From ca728c2d2e450cce1bb52321827f505360c95cfa Mon Sep 17 00:00:00 2001 From: Muhammad Kumail Date: Thu, 4 Dec 2025 18:17:46 +0000 Subject: [PATCH 01/19] feat: add create / delete team (group) resource actions --- pkg/connector/team.go | 403 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 403 insertions(+) diff --git a/pkg/connector/team.go b/pkg/connector/team.go index 194b0ba2..a55d014a 100644 --- a/pkg/connector/team.go +++ b/pkg/connector/team.go @@ -6,7 +6,9 @@ import ( "strconv" "strings" + config "github.com/conductorone/baton-sdk/pb/c1/config/v1" v2 "github.com/conductorone/baton-sdk/pb/c1/connector/v2" + "github.com/conductorone/baton-sdk/pkg/actions" "github.com/conductorone/baton-sdk/pkg/annotations" "github.com/conductorone/baton-sdk/pkg/pagination" "github.com/conductorone/baton-sdk/pkg/types/entitlement" @@ -17,6 +19,7 @@ import ( "github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/ctxzap" "go.uber.org/zap" "google.golang.org/grpc/codes" + "google.golang.org/protobuf/types/known/structpb" ) const ( @@ -362,6 +365,406 @@ func (o *teamResourceType) Revoke(ctx context.Context, grant *v2.Grant) (annotat return nil, nil } +// Create creates a new team in a GitHub organization. +// The resource must have a parent resource ID that references the organization. +// The team name is taken from the resource's DisplayName field. +// Optional profile fields: +// - description: string - Team description +// - privacy: string - "secret" or "closed" (default: "secret") +// - parent_team_id: int64 - ID of the parent team for nested teams +func (o *teamResourceType) Create(ctx context.Context, resource *v2.Resource) (*v2.Resource, annotations.Annotations, error) { + l := ctxzap.Extract(ctx) + + if resource == nil { + return nil, nil, fmt.Errorf("github-connector: resource cannot be nil") + } + + if resource.Id == nil || resource.Id.ResourceType != resourceTypeTeam.Id { + return nil, nil, fmt.Errorf("github-connector: invalid resource type for team creation") + } + + // Get the parent org resource ID + parentResourceID := resource.GetParentResourceId() + if parentResourceID == nil { + return nil, nil, fmt.Errorf("github-connector: parent organization resource ID is required to create a team") + } + + if parentResourceID.ResourceType != resourceTypeOrg.Id { + return nil, nil, fmt.Errorf("github-connector: parent resource must be an organization, got %s", parentResourceID.ResourceType) + } + + // Get the organization name + orgName, err := o.orgCache.GetOrgName(ctx, parentResourceID) + if err != nil { + return nil, nil, fmt.Errorf("github-connector: failed to get organization name: %w", err) + } + + // Get team name from display name + teamName := resource.GetDisplayName() + if teamName == "" { + return nil, nil, fmt.Errorf("github-connector: team name (DisplayName) is required") + } + + l.Info("github-connector: creating team", + zap.String("team_name", teamName), + zap.String("org_name", orgName), + ) + + // Build the NewTeam request + newTeam := github.NewTeam{ + Name: teamName, + } + + // Extract optional fields from the group trait profile if available + groupTrait, err := rType.GetGroupTrait(resource) + if err == nil && groupTrait != nil && groupTrait.Profile != nil { + // Get description if provided + if description, ok := rType.GetProfileStringValue(groupTrait.Profile, "description"); ok && description != "" { + newTeam.Description = github.Ptr(description) + } + + // Get privacy setting if provided ("secret" or "closed") + if privacy, ok := rType.GetProfileStringValue(groupTrait.Profile, "privacy"); ok && privacy != "" { + if privacy == "secret" || privacy == "closed" { + newTeam.Privacy = github.Ptr(privacy) + } else { + l.Warn("github-connector: invalid privacy value, using default", + zap.String("provided_privacy", privacy), + ) + } + } + + // Get parent team ID if provided (for nested teams) + if parentTeamID, ok := rType.GetProfileInt64Value(groupTrait.Profile, "parent_team_id"); ok && parentTeamID > 0 { + newTeam.ParentTeamID = github.Ptr(parentTeamID) + } + } + + // Create the team via GitHub API + createdTeam, resp, err := o.client.Teams.CreateTeam(ctx, orgName, newTeam) + if err != nil { + return nil, nil, wrapGitHubError(err, resp, fmt.Sprintf("github-connector: failed to create team %s in org %s", teamName, orgName)) + } + + // Extract rate limit data for annotations + var annos annotations.Annotations + if rateLimitData, err := extractRateLimitData(resp); err == nil { + annos.WithRateLimiting(rateLimitData) + } + + l.Info("github-connector: team created successfully", + zap.String("team_name", createdTeam.GetName()), + zap.Int64("team_id", createdTeam.GetID()), + zap.String("team_slug", createdTeam.GetSlug()), + ) + + // Create the resource representation of the newly created team + createdResource, err := teamResource(createdTeam, parentResourceID) + if err != nil { + return nil, annos, fmt.Errorf("github-connector: failed to create resource representation for team: %w", err) + } + + return createdResource, annos, nil +} + +// Delete deletes a team from a GitHub organization. +// The team is identified by its resource ID which contains the GitHub team ID. +func (o *teamResourceType) Delete(ctx context.Context, resourceId *v2.ResourceId) (annotations.Annotations, error) { + l := ctxzap.Extract(ctx) + + if resourceId == nil { + return nil, fmt.Errorf("github-connector: resource ID cannot be nil") + } + + if resourceId.ResourceType != resourceTypeTeam.Id { + return nil, fmt.Errorf("github-connector: invalid resource type %s, expected %s", resourceId.ResourceType, resourceTypeTeam.Id) + } + + // Parse the team ID from the resource + teamID, err := strconv.ParseInt(resourceId.GetResource(), 10, 64) + if err != nil { + return nil, fmt.Errorf("github-connector: invalid team ID %s: %w", resourceId.GetResource(), err) + } + + l.Info("github-connector: deleting team", + zap.Int64("team_id", teamID), + ) + + // We need to find the org that this team belongs to. + // We'll iterate through the organizations in the org cache. + var annos annotations.Annotations + var deleted bool + var lastErr error + var lastResp *github.Response + + // Use the org cache to get the list of organizations + // We need to iterate through the configured organizations + o.orgCache.RLock() + orgIDs := make([]string, 0, len(o.orgCache.orgNames)) + for orgID := range o.orgCache.orgNames { + orgIDs = append(orgIDs, orgID) + } + o.orgCache.RUnlock() + + for _, orgID := range orgIDs { + orgIDInt, err := strconv.ParseInt(orgID, 10, 64) + if err != nil { + continue + } + + // Try to get the team first to verify it exists in this org + _, resp, err := o.client.Teams.GetTeamByID(ctx, orgIDInt, teamID) + if err != nil { + // Team doesn't exist in this org, continue to next + if isNotFoundError(resp) { + continue + } + lastErr = err + lastResp = resp + continue + } + + // Team found in this org, delete it + resp, err = o.client.Teams.DeleteTeamByID(ctx, orgIDInt, teamID) + if err != nil { + lastErr = err + lastResp = resp + continue + } + + // Successfully deleted + deleted = true + if rateLimitData, err := extractRateLimitData(resp); err == nil { + annos.WithRateLimiting(rateLimitData) + } + + l.Info("github-connector: team deleted successfully", + zap.Int64("team_id", teamID), + zap.Int64("org_id", orgIDInt), + ) + break + } + + if !deleted { + if lastErr != nil { + return annos, wrapGitHubError(lastErr, lastResp, fmt.Sprintf("github-connector: failed to delete team %d", teamID)) + } + return annos, fmt.Errorf("github-connector: team %d not found in any accessible organization", teamID) + } + + return annos, nil +} + +// ResourceActions registers the resource actions for the team resource type. +// This implements the ResourceActionProvider interface. +func (o *teamResourceType) ResourceActions(ctx context.Context, registry actions.ResourceTypeActionRegistry) error { + if err := o.registerCreateTeamAction(ctx, registry); err != nil { + return err + } + if err := o.registerDeleteTeamAction(ctx, registry); err != nil { + return err + } + return nil +} + +func (o *teamResourceType) registerCreateTeamAction(ctx context.Context, registry actions.ResourceTypeActionRegistry) error { + return registry.Register(ctx, &v2.ResourceActionSchema{ + Name: "create", + DisplayName: "Create Team", + Description: "Create a new team in a GitHub organization", + ActionType: []v2.ActionType{v2.ActionType_ACTION_TYPE_RESOURCE_CREATE}, + Arguments: []*config.Field{ + { + Name: "name", + DisplayName: "Team Name", + Description: "The name of the team to create", + Field: &config.Field_StringField{}, + IsRequired: true, + }, + { + Name: "parent", + DisplayName: "Parent Organization", + Description: "The organization to create the team in", + Field: &config.Field_ResourceIdField{}, + IsRequired: true, + }, + { + Name: "description", + DisplayName: "Description", + Description: "A description of the team", + Field: &config.Field_StringField{}, + }, + { + Name: "privacy", + DisplayName: "Privacy", + Description: "The privacy level: 'secret' or 'closed'", + Field: &config.Field_StringField{}, + }, + }, + ReturnTypes: []*config.Field{ + {Name: "success", Field: &config.Field_BoolField{}}, + {Name: "resource", Field: &config.Field_ResourceField{}}, + }, + }, o.handleCreateTeamAction) +} + +func (o *teamResourceType) registerDeleteTeamAction(ctx context.Context, registry actions.ResourceTypeActionRegistry) error { + return registry.Register(ctx, &v2.ResourceActionSchema{ + Name: "delete", + DisplayName: "Delete Team", + Description: "Delete a team from a GitHub organization", + ActionType: []v2.ActionType{v2.ActionType_ACTION_TYPE_RESOURCE_DELETE}, + Arguments: []*config.Field{ + { + Name: "resource", + DisplayName: "Team Resource", + Description: "The team resource to delete", + Field: &config.Field_ResourceIdField{}, + IsRequired: true, + }, + { + Name: "parent", + DisplayName: "Parent Organization", + Description: "The organization the team belongs to", + Field: &config.Field_ResourceIdField{}, + IsRequired: true, + }, + }, + ReturnTypes: []*config.Field{ + {Name: "success", Field: &config.Field_BoolField{}}, + }, + }, o.handleDeleteTeamAction) +} + +func (o *teamResourceType) handleCreateTeamAction(ctx context.Context, args *structpb.Struct) (*structpb.Struct, annotations.Annotations, error) { + l := ctxzap.Extract(ctx) + + // Extract required arguments using SDK helpers + name, err := actions.RequireStringArg(args, "name") + if err != nil { + return nil, nil, err + } + + parentResourceID, err := actions.RequireResourceIDArg(args, "parent") + if err != nil { + return nil, nil, err + } + + // Get the organization name from the parent resource ID + orgName, err := o.orgCache.GetOrgName(ctx, parentResourceID) + if err != nil { + return nil, nil, fmt.Errorf("failed to get organization name: %w", err) + } + + l.Info("github-connector: creating team via action", + zap.String("team_name", name), + zap.String("org_name", orgName), + ) + + // Build the NewTeam request + newTeam := github.NewTeam{ + Name: name, + } + + // Extract optional fields using SDK helpers + if description, ok := actions.GetStringArg(args, "description"); ok && description != "" { + newTeam.Description = github.Ptr(description) + } + + if privacy, ok := actions.GetStringArg(args, "privacy"); ok && privacy != "" { + if privacy == "secret" || privacy == "closed" { + newTeam.Privacy = github.Ptr(privacy) + } else { + l.Warn("github-connector: invalid privacy value, using default", + zap.String("provided_privacy", privacy), + ) + } + } + + // Create the team via GitHub API + createdTeam, resp, err := o.client.Teams.CreateTeam(ctx, orgName, newTeam) + if err != nil { + return nil, nil, wrapGitHubError(err, resp, fmt.Sprintf("failed to create team %s in org %s", name, orgName)) + } + + // Extract rate limit data for annotations + var annos annotations.Annotations + if rateLimitData, err := extractRateLimitData(resp); err == nil { + annos.WithRateLimiting(rateLimitData) + } + + l.Info("github-connector: team created successfully via action", + zap.String("team_name", createdTeam.GetName()), + zap.Int64("team_id", createdTeam.GetID()), + zap.String("team_slug", createdTeam.GetSlug()), + ) + + // Create the resource representation of the newly created team + resource, err := teamResource(createdTeam, parentResourceID) + if err != nil { + return nil, annos, fmt.Errorf("failed to create resource representation: %w", err) + } + + // Build return values using SDK helpers + resourceRv, err := actions.NewResourceReturnField("resource", resource) + if err != nil { + return nil, annos, err + } + + return actions.NewReturnValues(true, resourceRv), annos, nil +} + +func (o *teamResourceType) handleDeleteTeamAction(ctx context.Context, args *structpb.Struct) (*structpb.Struct, annotations.Annotations, error) { + l := ctxzap.Extract(ctx) + + // Extract the team resource ID using SDK helper + resourceID, err := actions.RequireResourceIDArg(args, "resource") + if err != nil { + return nil, nil, err + } + + // Extract the parent org resource ID using SDK helper + parentResourceID, err := actions.RequireResourceIDArg(args, "parent") + if err != nil { + return nil, nil, err + } + + // Parse the team ID from the resource + teamID, err := strconv.ParseInt(resourceID.Resource, 10, 64) + if err != nil { + return nil, nil, fmt.Errorf("invalid team ID %s: %w", resourceID.Resource, err) + } + + // Parse the org ID from the parent resource + orgID, err := strconv.ParseInt(parentResourceID.Resource, 10, 64) + if err != nil { + return nil, nil, fmt.Errorf("invalid org ID %s: %w", parentResourceID.Resource, err) + } + + l.Info("github-connector: deleting team via action", + zap.Int64("team_id", teamID), + zap.Int64("org_id", orgID), + ) + + // Delete the team directly using the provided org ID from parent + resp, err := o.client.Teams.DeleteTeamByID(ctx, orgID, teamID) + if err != nil { + return nil, nil, wrapGitHubError(err, resp, fmt.Sprintf("failed to delete team %d in org %d", teamID, orgID)) + } + + var annos annotations.Annotations + if rateLimitData, err := extractRateLimitData(resp); err == nil { + annos.WithRateLimiting(rateLimitData) + } + + l.Info("github-connector: team deleted successfully via action", + zap.Int64("team_id", teamID), + zap.Int64("org_id", orgID), + ) + + return actions.NewReturnValues(true), annos, nil +} + func teamBuilder(client *github.Client, orgCache *orgNameCache) *teamResourceType { return &teamResourceType{ resourceType: resourceTypeTeam, From 100b51afc2e453bd451d8a2affa043b89739357b Mon Sep 17 00:00:00 2001 From: Muhammad Kumail Date: Thu, 4 Dec 2025 18:22:48 +0000 Subject: [PATCH 02/19] rm: resource manager funcs --- pkg/connector/team.go | 190 ------------------------------------------ 1 file changed, 190 deletions(-) diff --git a/pkg/connector/team.go b/pkg/connector/team.go index a55d014a..32bbc995 100644 --- a/pkg/connector/team.go +++ b/pkg/connector/team.go @@ -365,196 +365,6 @@ func (o *teamResourceType) Revoke(ctx context.Context, grant *v2.Grant) (annotat return nil, nil } -// Create creates a new team in a GitHub organization. -// The resource must have a parent resource ID that references the organization. -// The team name is taken from the resource's DisplayName field. -// Optional profile fields: -// - description: string - Team description -// - privacy: string - "secret" or "closed" (default: "secret") -// - parent_team_id: int64 - ID of the parent team for nested teams -func (o *teamResourceType) Create(ctx context.Context, resource *v2.Resource) (*v2.Resource, annotations.Annotations, error) { - l := ctxzap.Extract(ctx) - - if resource == nil { - return nil, nil, fmt.Errorf("github-connector: resource cannot be nil") - } - - if resource.Id == nil || resource.Id.ResourceType != resourceTypeTeam.Id { - return nil, nil, fmt.Errorf("github-connector: invalid resource type for team creation") - } - - // Get the parent org resource ID - parentResourceID := resource.GetParentResourceId() - if parentResourceID == nil { - return nil, nil, fmt.Errorf("github-connector: parent organization resource ID is required to create a team") - } - - if parentResourceID.ResourceType != resourceTypeOrg.Id { - return nil, nil, fmt.Errorf("github-connector: parent resource must be an organization, got %s", parentResourceID.ResourceType) - } - - // Get the organization name - orgName, err := o.orgCache.GetOrgName(ctx, parentResourceID) - if err != nil { - return nil, nil, fmt.Errorf("github-connector: failed to get organization name: %w", err) - } - - // Get team name from display name - teamName := resource.GetDisplayName() - if teamName == "" { - return nil, nil, fmt.Errorf("github-connector: team name (DisplayName) is required") - } - - l.Info("github-connector: creating team", - zap.String("team_name", teamName), - zap.String("org_name", orgName), - ) - - // Build the NewTeam request - newTeam := github.NewTeam{ - Name: teamName, - } - - // Extract optional fields from the group trait profile if available - groupTrait, err := rType.GetGroupTrait(resource) - if err == nil && groupTrait != nil && groupTrait.Profile != nil { - // Get description if provided - if description, ok := rType.GetProfileStringValue(groupTrait.Profile, "description"); ok && description != "" { - newTeam.Description = github.Ptr(description) - } - - // Get privacy setting if provided ("secret" or "closed") - if privacy, ok := rType.GetProfileStringValue(groupTrait.Profile, "privacy"); ok && privacy != "" { - if privacy == "secret" || privacy == "closed" { - newTeam.Privacy = github.Ptr(privacy) - } else { - l.Warn("github-connector: invalid privacy value, using default", - zap.String("provided_privacy", privacy), - ) - } - } - - // Get parent team ID if provided (for nested teams) - if parentTeamID, ok := rType.GetProfileInt64Value(groupTrait.Profile, "parent_team_id"); ok && parentTeamID > 0 { - newTeam.ParentTeamID = github.Ptr(parentTeamID) - } - } - - // Create the team via GitHub API - createdTeam, resp, err := o.client.Teams.CreateTeam(ctx, orgName, newTeam) - if err != nil { - return nil, nil, wrapGitHubError(err, resp, fmt.Sprintf("github-connector: failed to create team %s in org %s", teamName, orgName)) - } - - // Extract rate limit data for annotations - var annos annotations.Annotations - if rateLimitData, err := extractRateLimitData(resp); err == nil { - annos.WithRateLimiting(rateLimitData) - } - - l.Info("github-connector: team created successfully", - zap.String("team_name", createdTeam.GetName()), - zap.Int64("team_id", createdTeam.GetID()), - zap.String("team_slug", createdTeam.GetSlug()), - ) - - // Create the resource representation of the newly created team - createdResource, err := teamResource(createdTeam, parentResourceID) - if err != nil { - return nil, annos, fmt.Errorf("github-connector: failed to create resource representation for team: %w", err) - } - - return createdResource, annos, nil -} - -// Delete deletes a team from a GitHub organization. -// The team is identified by its resource ID which contains the GitHub team ID. -func (o *teamResourceType) Delete(ctx context.Context, resourceId *v2.ResourceId) (annotations.Annotations, error) { - l := ctxzap.Extract(ctx) - - if resourceId == nil { - return nil, fmt.Errorf("github-connector: resource ID cannot be nil") - } - - if resourceId.ResourceType != resourceTypeTeam.Id { - return nil, fmt.Errorf("github-connector: invalid resource type %s, expected %s", resourceId.ResourceType, resourceTypeTeam.Id) - } - - // Parse the team ID from the resource - teamID, err := strconv.ParseInt(resourceId.GetResource(), 10, 64) - if err != nil { - return nil, fmt.Errorf("github-connector: invalid team ID %s: %w", resourceId.GetResource(), err) - } - - l.Info("github-connector: deleting team", - zap.Int64("team_id", teamID), - ) - - // We need to find the org that this team belongs to. - // We'll iterate through the organizations in the org cache. - var annos annotations.Annotations - var deleted bool - var lastErr error - var lastResp *github.Response - - // Use the org cache to get the list of organizations - // We need to iterate through the configured organizations - o.orgCache.RLock() - orgIDs := make([]string, 0, len(o.orgCache.orgNames)) - for orgID := range o.orgCache.orgNames { - orgIDs = append(orgIDs, orgID) - } - o.orgCache.RUnlock() - - for _, orgID := range orgIDs { - orgIDInt, err := strconv.ParseInt(orgID, 10, 64) - if err != nil { - continue - } - - // Try to get the team first to verify it exists in this org - _, resp, err := o.client.Teams.GetTeamByID(ctx, orgIDInt, teamID) - if err != nil { - // Team doesn't exist in this org, continue to next - if isNotFoundError(resp) { - continue - } - lastErr = err - lastResp = resp - continue - } - - // Team found in this org, delete it - resp, err = o.client.Teams.DeleteTeamByID(ctx, orgIDInt, teamID) - if err != nil { - lastErr = err - lastResp = resp - continue - } - - // Successfully deleted - deleted = true - if rateLimitData, err := extractRateLimitData(resp); err == nil { - annos.WithRateLimiting(rateLimitData) - } - - l.Info("github-connector: team deleted successfully", - zap.Int64("team_id", teamID), - zap.Int64("org_id", orgIDInt), - ) - break - } - - if !deleted { - if lastErr != nil { - return annos, wrapGitHubError(lastErr, lastResp, fmt.Sprintf("github-connector: failed to delete team %d", teamID)) - } - return annos, fmt.Errorf("github-connector: team %d not found in any accessible organization", teamID) - } - - return annos, nil -} - // ResourceActions registers the resource actions for the team resource type. // This implements the ResourceActionProvider interface. func (o *teamResourceType) ResourceActions(ctx context.Context, registry actions.ResourceTypeActionRegistry) error { From 9962207eae46e770024a8660628d40eadfec4e14 Mon Sep 17 00:00:00 2001 From: Muhammad Kumail Date: Fri, 5 Dec 2025 01:00:05 +0000 Subject: [PATCH 03/19] feat: add create / delete repository action --- pkg/connector/repository.go | 363 ++++++++++++++++++++++++++++++++++++ 1 file changed, 363 insertions(+) diff --git a/pkg/connector/repository.go b/pkg/connector/repository.go index 550acd4d..179cd9b0 100644 --- a/pkg/connector/repository.go +++ b/pkg/connector/repository.go @@ -7,7 +7,9 @@ import ( "strconv" "strings" + config "github.com/conductorone/baton-sdk/pb/c1/config/v1" v2 "github.com/conductorone/baton-sdk/pb/c1/connector/v2" + "github.com/conductorone/baton-sdk/pkg/actions" "github.com/conductorone/baton-sdk/pkg/annotations" "github.com/conductorone/baton-sdk/pkg/pagination" "github.com/conductorone/baton-sdk/pkg/types/entitlement" @@ -18,6 +20,7 @@ import ( "github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/ctxzap" "go.uber.org/zap" "google.golang.org/grpc/codes" + "google.golang.org/protobuf/types/known/structpb" ) // outside collaborators are given one of these roles too. @@ -433,3 +436,363 @@ func skipGrantsForResourceType(bag *pagination.Bag) (string, error) { } return pageToken, nil } + +// ResourceActions registers the resource actions for the repository resource type. +// This implements the ResourceActionProvider interface. +func (o *repositoryResourceType) ResourceActions(ctx context.Context, registry actions.ResourceTypeActionRegistry) error { + if err := o.registerCreateRepositoryAction(ctx, registry); err != nil { + return err + } + if err := o.registerDeleteRepositoryAction(ctx, registry); err != nil { + return err + } + return nil +} + +func (o *repositoryResourceType) registerCreateRepositoryAction(ctx context.Context, registry actions.ResourceTypeActionRegistry) error { + return registry.Register(ctx, &v2.ResourceActionSchema{ + Name: "create", + DisplayName: "Create Repository", + Description: "Create a new repository in a GitHub organization", + ActionType: []v2.ActionType{v2.ActionType_ACTION_TYPE_RESOURCE_CREATE}, + Arguments: []*config.Field{ + { + Name: "name", + DisplayName: "Repository Name", + Description: "The name of the repository to create", + Field: &config.Field_StringField{}, + IsRequired: true, + }, + { + Name: "parent", + DisplayName: "Parent Organization", + Description: "The organization to create the repository in", + Field: &config.Field_ResourceIdField{}, + IsRequired: true, + }, + { + Name: "description", + DisplayName: "Description", + Description: "A description of the repository", + Field: &config.Field_StringField{}, + }, + { + Name: "private", + DisplayName: "Private", + Description: "Whether the repository should be private (true/false)", + Field: &config.Field_BoolField{}, + }, + { + Name: "visibility", + DisplayName: "Visibility", + Description: "The visibility level: 'public', 'private', or 'internal'", + Field: &config.Field_StringField{}, + }, + { + Name: "has_issues", + DisplayName: "Has Issues", + Description: "Enable issues for this repository (true/false)", + Field: &config.Field_BoolField{}, + }, + { + Name: "has_projects", + DisplayName: "Has Projects", + Description: "Enable projects for this repository (true/false)", + Field: &config.Field_BoolField{}, + }, + { + Name: "has_wiki", + DisplayName: "Has Wiki", + Description: "Enable wiki for this repository (true/false)", + Field: &config.Field_BoolField{}, + }, + { + Name: "has_discussions", + DisplayName: "Has Discussions", + Description: "Enable discussions for this repository (true/false)", + Field: &config.Field_BoolField{}, + }, + { + Name: "auto_init", + DisplayName: "Auto Initialize", + Description: "Create an initial commit with empty README (true/false)", + Field: &config.Field_BoolField{}, + }, + { + Name: "gitignore_template", + DisplayName: "Gitignore Template", + Description: "Gitignore template to apply (e.g., 'Go', 'Python', 'Node')", + Field: &config.Field_StringField{}, + }, + { + Name: "license_template", + DisplayName: "License Template", + Description: "License template to apply (e.g., 'mit', 'apache-2.0', 'gpl-3.0')", + Field: &config.Field_StringField{}, + }, + { + Name: "allow_squash_merge", + DisplayName: "Allow Squash Merge", + Description: "Allow squash-merging pull requests (true/false)", + Field: &config.Field_BoolField{}, + }, + { + Name: "allow_merge_commit", + DisplayName: "Allow Merge Commit", + Description: "Allow merging pull requests with a merge commit (true/false)", + Field: &config.Field_BoolField{}, + }, + { + Name: "allow_rebase_merge", + DisplayName: "Allow Rebase Merge", + Description: "Allow rebase-merging pull requests (true/false)", + Field: &config.Field_BoolField{}, + }, + { + Name: "allow_auto_merge", + DisplayName: "Allow Auto Merge", + Description: "Allow auto-merge on pull requests (true/false)", + Field: &config.Field_BoolField{}, + }, + { + Name: "delete_branch_on_merge", + DisplayName: "Delete Branch on Merge", + Description: "Automatically delete head branches after pull requests are merged (true/false)", + Field: &config.Field_BoolField{}, + }, + { + Name: "is_template", + DisplayName: "Is Template", + Description: "Make this repository available as a template (true/false)", + Field: &config.Field_BoolField{}, + }, + }, + ReturnTypes: []*config.Field{ + {Name: "success", Field: &config.Field_BoolField{}}, + {Name: "resource", Field: &config.Field_ResourceField{}}, + }, + }, o.handleCreateRepositoryAction) +} + +func (o *repositoryResourceType) registerDeleteRepositoryAction(ctx context.Context, registry actions.ResourceTypeActionRegistry) error { + return registry.Register(ctx, &v2.ResourceActionSchema{ + Name: "delete", + DisplayName: "Delete Repository", + Description: "Delete a repository from a GitHub organization", + ActionType: []v2.ActionType{v2.ActionType_ACTION_TYPE_RESOURCE_DELETE}, + Arguments: []*config.Field{ + { + Name: "resource", + DisplayName: "Repository Resource", + Description: "The repository resource to delete", + Field: &config.Field_ResourceIdField{}, + IsRequired: true, + }, + { + Name: "parent", + DisplayName: "Parent Organization", + Description: "The organization the repository belongs to", + Field: &config.Field_ResourceIdField{}, + IsRequired: true, + }, + }, + ReturnTypes: []*config.Field{ + {Name: "success", Field: &config.Field_BoolField{}}, + }, + }, o.handleDeleteRepositoryAction) +} + +func (o *repositoryResourceType) handleCreateRepositoryAction(ctx context.Context, args *structpb.Struct) (*structpb.Struct, annotations.Annotations, error) { + l := ctxzap.Extract(ctx) + + // Extract required arguments using SDK helpers + name, err := actions.RequireStringArg(args, "name") + if err != nil { + return nil, nil, err + } + + parentResourceID, err := actions.RequireResourceIDArg(args, "parent") + if err != nil { + return nil, nil, err + } + + // Get the organization name from the parent resource ID + orgName, err := o.orgCache.GetOrgName(ctx, parentResourceID) + if err != nil { + return nil, nil, fmt.Errorf("failed to get organization name: %w", err) + } + + l.Info("github-connector: creating repository via action", + zap.String("repo_name", name), + zap.String("org_name", orgName), + ) + + // Build the Repository request + newRepo := &github.Repository{ + Name: github.Ptr(name), + } + + // Extract optional fields using SDK helpers + if description, ok := actions.GetStringArg(args, "description"); ok && description != "" { + newRepo.Description = github.Ptr(description) + } + + if private, ok := actions.GetBoolArg(args, "private"); ok { + newRepo.Private = github.Ptr(private) + } + + if visibility, ok := actions.GetStringArg(args, "visibility"); ok && visibility != "" { + if visibility == "public" || visibility == "private" || visibility == "internal" { + newRepo.Visibility = github.Ptr(visibility) + } else { + l.Warn("github-connector: invalid visibility value, using default", + zap.String("provided_visibility", visibility), + ) + } + } + + if hasIssues, ok := actions.GetBoolArg(args, "has_issues"); ok { + newRepo.HasIssues = github.Ptr(hasIssues) + } + + if hasProjects, ok := actions.GetBoolArg(args, "has_projects"); ok { + newRepo.HasProjects = github.Ptr(hasProjects) + } + + if hasWiki, ok := actions.GetBoolArg(args, "has_wiki"); ok { + newRepo.HasWiki = github.Ptr(hasWiki) + } + + if hasDiscussions, ok := actions.GetBoolArg(args, "has_discussions"); ok { + newRepo.HasDiscussions = github.Ptr(hasDiscussions) + } + + if autoInit, ok := actions.GetBoolArg(args, "auto_init"); ok { + newRepo.AutoInit = github.Ptr(autoInit) + } + + if gitignoreTemplate, ok := actions.GetStringArg(args, "gitignore_template"); ok && gitignoreTemplate != "" { + newRepo.GitignoreTemplate = github.Ptr(gitignoreTemplate) + } + + if licenseTemplate, ok := actions.GetStringArg(args, "license_template"); ok && licenseTemplate != "" { + newRepo.LicenseTemplate = github.Ptr(licenseTemplate) + } + + if allowSquashMerge, ok := actions.GetBoolArg(args, "allow_squash_merge"); ok { + newRepo.AllowSquashMerge = github.Ptr(allowSquashMerge) + } + + if allowMergeCommit, ok := actions.GetBoolArg(args, "allow_merge_commit"); ok { + newRepo.AllowMergeCommit = github.Ptr(allowMergeCommit) + } + + if allowRebaseMerge, ok := actions.GetBoolArg(args, "allow_rebase_merge"); ok { + newRepo.AllowRebaseMerge = github.Ptr(allowRebaseMerge) + } + + if allowAutoMerge, ok := actions.GetBoolArg(args, "allow_auto_merge"); ok { + newRepo.AllowAutoMerge = github.Ptr(allowAutoMerge) + } + + if deleteBranchOnMerge, ok := actions.GetBoolArg(args, "delete_branch_on_merge"); ok { + newRepo.DeleteBranchOnMerge = github.Ptr(deleteBranchOnMerge) + } + + if isTemplate, ok := actions.GetBoolArg(args, "is_template"); ok { + newRepo.IsTemplate = github.Ptr(isTemplate) + } + + // Create the repository via GitHub API + createdRepo, resp, err := o.client.Repositories.Create(ctx, orgName, newRepo) + if err != nil { + return nil, nil, wrapGitHubError(err, resp, fmt.Sprintf("failed to create repository %s in org %s", name, orgName)) + } + + // Extract rate limit data for annotations + var annos annotations.Annotations + if rateLimitData, err := extractRateLimitData(resp); err == nil { + annos.WithRateLimiting(rateLimitData) + } + + l.Info("github-connector: repository created successfully via action", + zap.String("repo_name", createdRepo.GetName()), + zap.Int64("repo_id", createdRepo.GetID()), + zap.String("repo_full_name", createdRepo.GetFullName()), + ) + + // Create the resource representation of the newly created repository + repoResource, err := repositoryResource(ctx, createdRepo, parentResourceID) + if err != nil { + return nil, annos, fmt.Errorf("failed to create resource representation: %w", err) + } + + // Build return values using SDK helpers + resourceRv, err := actions.NewResourceReturnField("resource", repoResource) + if err != nil { + return nil, annos, err + } + + return actions.NewReturnValues(true, resourceRv), annos, nil +} + +func (o *repositoryResourceType) handleDeleteRepositoryAction(ctx context.Context, args *structpb.Struct) (*structpb.Struct, annotations.Annotations, error) { + l := ctxzap.Extract(ctx) + + // Extract the repository resource ID using SDK helper + resourceID, err := actions.RequireResourceIDArg(args, "resource") + if err != nil { + return nil, nil, err + } + + // Extract the parent org resource ID using SDK helper + parentResourceID, err := actions.RequireResourceIDArg(args, "parent") + if err != nil { + return nil, nil, err + } + + // Parse the repo ID from the resource + repoID, err := strconv.ParseInt(resourceID.Resource, 10, 64) + if err != nil { + return nil, nil, fmt.Errorf("invalid repository ID %s: %w", resourceID.Resource, err) + } + + // Get the organization name from the parent resource ID + orgName, err := o.orgCache.GetOrgName(ctx, parentResourceID) + if err != nil { + return nil, nil, fmt.Errorf("failed to get organization name: %w", err) + } + + // First, get the repository to find its name (needed for deletion) + repo, resp, err := o.client.Repositories.GetByID(ctx, repoID) + if err != nil { + return nil, nil, wrapGitHubError(err, resp, fmt.Sprintf("failed to get repository %d", repoID)) + } + + repoName := repo.GetName() + + l.Info("github-connector: deleting repository via action", + zap.Int64("repo_id", repoID), + zap.String("repo_name", repoName), + zap.String("org_name", orgName), + ) + + // Delete the repository via GitHub API + resp, err = o.client.Repositories.Delete(ctx, orgName, repoName) + if err != nil { + return nil, nil, wrapGitHubError(err, resp, fmt.Sprintf("failed to delete repository %s in org %s", repoName, orgName)) + } + + var annos annotations.Annotations + if rateLimitData, err := extractRateLimitData(resp); err == nil { + annos.WithRateLimiting(rateLimitData) + } + + l.Info("github-connector: repository deleted successfully via action", + zap.Int64("repo_id", repoID), + zap.String("repo_name", repoName), + zap.String("org_name", orgName), + ) + + return actions.NewReturnValues(true), annos, nil +} From d6cd32d85d83c22a51ace6e575f8aa43e63aaf9a Mon Sep 17 00:00:00 2001 From: Muhammad Kumail Date: Mon, 8 Dec 2025 21:57:45 +0000 Subject: [PATCH 04/19] add: update groups --- pkg/connector/repository.go | 376 +++++++++++++++++++++++++++++++++++- pkg/connector/team.go | 245 ++++++++++++++++++++++- 2 files changed, 606 insertions(+), 15 deletions(-) diff --git a/pkg/connector/repository.go b/pkg/connector/repository.go index 179cd9b0..199f3aff 100644 --- a/pkg/connector/repository.go +++ b/pkg/connector/repository.go @@ -443,6 +443,9 @@ func (o *repositoryResourceType) ResourceActions(ctx context.Context, registry a if err := o.registerCreateRepositoryAction(ctx, registry); err != nil { return err } + if err := o.registerUpdateRepositoryAction(ctx, registry); err != nil { + return err + } if err := o.registerDeleteRepositoryAction(ctx, registry); err != nil { return err } @@ -450,7 +453,7 @@ func (o *repositoryResourceType) ResourceActions(ctx context.Context, registry a } func (o *repositoryResourceType) registerCreateRepositoryAction(ctx context.Context, registry actions.ResourceTypeActionRegistry) error { - return registry.Register(ctx, &v2.ResourceActionSchema{ + return registry.Register(ctx, &v2.BatonActionSchema{ Name: "create", DisplayName: "Create Repository", Description: "Create a new repository in a GitHub organization", @@ -485,8 +488,16 @@ func (o *repositoryResourceType) registerCreateRepositoryAction(ctx context.Cont { Name: "visibility", DisplayName: "Visibility", - Description: "The visibility level: 'public', 'private', or 'internal'", - Field: &config.Field_StringField{}, + Description: "The visibility level of the repository", + Field: &config.Field_StringField{ + StringField: &config.StringField{ + Options: []*config.StringFieldOption{ + {Value: "public", DisplayName: "Public"}, + {Value: "private", DisplayName: "Private"}, + {Value: "internal", DisplayName: "Internal (Enterprise only)"}, + }, + }, + }, }, { Name: "has_issues", @@ -521,14 +532,48 @@ func (o *repositoryResourceType) registerCreateRepositoryAction(ctx context.Cont { Name: "gitignore_template", DisplayName: "Gitignore Template", - Description: "Gitignore template to apply (e.g., 'Go', 'Python', 'Node')", - Field: &config.Field_StringField{}, + Description: "Gitignore template to apply", + Field: &config.Field_StringField{ + StringField: &config.StringField{ + Options: []*config.StringFieldOption{ + {Value: "", DisplayName: "None"}, + {Value: "Go", DisplayName: "Go"}, + {Value: "Python", DisplayName: "Python"}, + {Value: "Node", DisplayName: "Node"}, + {Value: "Java", DisplayName: "Java"}, + {Value: "Ruby", DisplayName: "Ruby"}, + {Value: "Rust", DisplayName: "Rust"}, + {Value: "C++", DisplayName: "C++"}, + {Value: "C", DisplayName: "C"}, + {Value: "Swift", DisplayName: "Swift"}, + {Value: "Kotlin", DisplayName: "Kotlin"}, + {Value: "Scala", DisplayName: "Scala"}, + {Value: "Terraform", DisplayName: "Terraform"}, + }, + }, + }, }, { Name: "license_template", DisplayName: "License Template", - Description: "License template to apply (e.g., 'mit', 'apache-2.0', 'gpl-3.0')", - Field: &config.Field_StringField{}, + Description: "License template to apply", + Field: &config.Field_StringField{ + StringField: &config.StringField{ + Options: []*config.StringFieldOption{ + {Value: "", DisplayName: "None"}, + {Value: "mit", DisplayName: "MIT License"}, + {Value: "apache-2.0", DisplayName: "Apache License 2.0"}, + {Value: "gpl-3.0", DisplayName: "GNU GPLv3"}, + {Value: "gpl-2.0", DisplayName: "GNU GPLv2"}, + {Value: "lgpl-3.0", DisplayName: "GNU LGPLv3"}, + {Value: "bsd-3-clause", DisplayName: "BSD 3-Clause"}, + {Value: "bsd-2-clause", DisplayName: "BSD 2-Clause"}, + {Value: "mpl-2.0", DisplayName: "Mozilla Public License 2.0"}, + {Value: "unlicense", DisplayName: "The Unlicense"}, + {Value: "agpl-3.0", DisplayName: "GNU AGPLv3"}, + }, + }, + }, }, { Name: "allow_squash_merge", @@ -575,7 +620,7 @@ func (o *repositoryResourceType) registerCreateRepositoryAction(ctx context.Cont } func (o *repositoryResourceType) registerDeleteRepositoryAction(ctx context.Context, registry actions.ResourceTypeActionRegistry) error { - return registry.Register(ctx, &v2.ResourceActionSchema{ + return registry.Register(ctx, &v2.BatonActionSchema{ Name: "delete", DisplayName: "Delete Repository", Description: "Delete a repository from a GitHub organization", @@ -602,6 +647,145 @@ func (o *repositoryResourceType) registerDeleteRepositoryAction(ctx context.Cont }, o.handleDeleteRepositoryAction) } +func (o *repositoryResourceType) registerUpdateRepositoryAction(ctx context.Context, registry actions.ResourceTypeActionRegistry) error { + return registry.Register(ctx, &v2.BatonActionSchema{ + Name: "update", + DisplayName: "Update Repository", + Description: "Update an existing repository in a GitHub organization", + ActionType: []v2.ActionType{v2.ActionType_ACTION_TYPE_RESOURCE_MUTATE}, + Arguments: []*config.Field{ + { + Name: "resource", + DisplayName: "Repository Resource", + Description: "The repository resource to update", + Field: &config.Field_ResourceIdField{}, + IsRequired: true, + }, + { + Name: "parent", + DisplayName: "Parent Organization", + Description: "The organization the repository belongs to", + Field: &config.Field_ResourceIdField{}, + IsRequired: true, + }, + { + Name: "name", + DisplayName: "Repository Name", + Description: "The new name of the repository (leave empty to keep current)", + Field: &config.Field_StringField{}, + }, + { + Name: "description", + DisplayName: "Description", + Description: "A description of the repository", + Field: &config.Field_StringField{}, + }, + { + Name: "homepage", + DisplayName: "Homepage", + Description: "A URL with more information about the repository", + Field: &config.Field_StringField{}, + }, + { + Name: "private", + DisplayName: "Private", + Description: "Whether the repository should be private (true/false)", + Field: &config.Field_BoolField{}, + }, + { + Name: "visibility", + DisplayName: "Visibility", + Description: "The visibility level of the repository", + Field: &config.Field_StringField{ + StringField: &config.StringField{ + Options: []*config.StringFieldOption{ + {Value: "public", DisplayName: "Public"}, + {Value: "private", DisplayName: "Private"}, + {Value: "internal", DisplayName: "Internal (Enterprise only)"}, + }, + }, + }, + }, + { + Name: "has_issues", + DisplayName: "Has Issues", + Description: "Enable issues for this repository (true/false)", + Field: &config.Field_BoolField{}, + }, + { + Name: "has_projects", + DisplayName: "Has Projects", + Description: "Enable projects for this repository (true/false)", + Field: &config.Field_BoolField{}, + }, + { + Name: "has_wiki", + DisplayName: "Has Wiki", + Description: "Enable wiki for this repository (true/false)", + Field: &config.Field_BoolField{}, + }, + { + Name: "has_discussions", + DisplayName: "Has Discussions", + Description: "Enable discussions for this repository (true/false)", + Field: &config.Field_BoolField{}, + }, + { + Name: "default_branch", + DisplayName: "Default Branch", + Description: "The default branch of the repository", + Field: &config.Field_StringField{}, + }, + { + Name: "allow_squash_merge", + DisplayName: "Allow Squash Merge", + Description: "Allow squash-merging pull requests (true/false)", + Field: &config.Field_BoolField{}, + }, + { + Name: "allow_merge_commit", + DisplayName: "Allow Merge Commit", + Description: "Allow merging pull requests with a merge commit (true/false)", + Field: &config.Field_BoolField{}, + }, + { + Name: "allow_rebase_merge", + DisplayName: "Allow Rebase Merge", + Description: "Allow rebase-merging pull requests (true/false)", + Field: &config.Field_BoolField{}, + }, + { + Name: "allow_auto_merge", + DisplayName: "Allow Auto Merge", + Description: "Allow auto-merge on pull requests (true/false)", + Field: &config.Field_BoolField{}, + }, + { + Name: "delete_branch_on_merge", + DisplayName: "Delete Branch on Merge", + Description: "Automatically delete head branches after pull requests are merged (true/false)", + Field: &config.Field_BoolField{}, + }, + { + Name: "archived", + DisplayName: "Archived", + Description: "Archive the repository (true/false). Note: You cannot unarchive repositories through the API", + Field: &config.Field_BoolField{}, + }, + { + Name: "is_template", + DisplayName: "Is Template", + Description: "Make this repository available as a template (true/false)", + Field: &config.Field_BoolField{}, + }, + }, + ReturnTypes: []*config.Field{ + {Name: "success", Field: &config.Field_BoolField{}}, + {Name: "resource", Field: &config.Field_ResourceField{}}, + }, + }, o.handleUpdateRepositoryAction) +} + func (o *repositoryResourceType) handleCreateRepositoryAction(ctx context.Context, args *structpb.Struct) (*structpb.Struct, annotations.Annotations, error) { l := ctxzap.Extract(ctx) @@ -796,3 +980,179 @@ func (o *repositoryResourceType) handleDeleteRepositoryAction(ctx context.Contex return actions.NewReturnValues(true), annos, nil } + +func (o *repositoryResourceType) handleUpdateRepositoryAction(ctx context.Context, args *structpb.Struct) (*structpb.Struct, annotations.Annotations, error) { + l := ctxzap.Extract(ctx) + + // Extract the repository resource ID using SDK helper + resourceID, err := actions.RequireResourceIDArg(args, "resource") + if err != nil { + return nil, nil, err + } + + // Extract the parent org resource ID using SDK helper + parentResourceID, err := actions.RequireResourceIDArg(args, "parent") + if err != nil { + return nil, nil, err + } + + // Parse the repo ID from the resource + repoID, err := strconv.ParseInt(resourceID.Resource, 10, 64) + if err != nil { + return nil, nil, fmt.Errorf("invalid repository ID %s: %w", resourceID.Resource, err) + } + + // Get the organization name from the parent resource ID + orgName, err := o.orgCache.GetOrgName(ctx, parentResourceID) + if err != nil { + return nil, nil, fmt.Errorf("failed to get organization name: %w", err) + } + + // First, get the current repository to find its name + repo, resp, err := o.client.Repositories.GetByID(ctx, repoID) + if err != nil { + return nil, nil, wrapGitHubError(err, resp, fmt.Sprintf("failed to get repository %d", repoID)) + } + + currentRepoName := repo.GetName() + + l.Info("github-connector: updating repository via action", + zap.Int64("repo_id", repoID), + zap.String("repo_name", currentRepoName), + zap.String("org_name", orgName), + ) + + // Build the Repository update request + updateRepo := &github.Repository{} + + // Track if any updates were provided + hasUpdates := false + + // Extract optional fields using SDK helpers + if name, ok := actions.GetStringArg(args, "name"); ok && name != "" { + updateRepo.Name = github.Ptr(name) + hasUpdates = true + } + + if description, ok := actions.GetStringArg(args, "description"); ok { + updateRepo.Description = github.Ptr(description) + hasUpdates = true + } + + if homepage, ok := actions.GetStringArg(args, "homepage"); ok { + updateRepo.Homepage = github.Ptr(homepage) + hasUpdates = true + } + + if private, ok := actions.GetBoolArg(args, "private"); ok { + updateRepo.Private = github.Ptr(private) + hasUpdates = true + } + + if visibility, ok := actions.GetStringArg(args, "visibility"); ok && visibility != "" { + if visibility == "public" || visibility == "private" || visibility == "internal" { + updateRepo.Visibility = github.Ptr(visibility) + hasUpdates = true + } else { + l.Warn("github-connector: invalid visibility value, ignoring", + zap.String("provided_visibility", visibility), + ) + } + } + + if hasIssues, ok := actions.GetBoolArg(args, "has_issues"); ok { + updateRepo.HasIssues = github.Ptr(hasIssues) + hasUpdates = true + } + + if hasProjects, ok := actions.GetBoolArg(args, "has_projects"); ok { + updateRepo.HasProjects = github.Ptr(hasProjects) + hasUpdates = true + } + + if hasWiki, ok := actions.GetBoolArg(args, "has_wiki"); ok { + updateRepo.HasWiki = github.Ptr(hasWiki) + hasUpdates = true + } + + if hasDiscussions, ok := actions.GetBoolArg(args, "has_discussions"); ok { + updateRepo.HasDiscussions = github.Ptr(hasDiscussions) + hasUpdates = true + } + + if defaultBranch, ok := actions.GetStringArg(args, "default_branch"); ok && defaultBranch != "" { + updateRepo.DefaultBranch = github.Ptr(defaultBranch) + hasUpdates = true + } + + if allowSquashMerge, ok := actions.GetBoolArg(args, "allow_squash_merge"); ok { + updateRepo.AllowSquashMerge = github.Ptr(allowSquashMerge) + hasUpdates = true + } + + if allowMergeCommit, ok := actions.GetBoolArg(args, "allow_merge_commit"); ok { + updateRepo.AllowMergeCommit = github.Ptr(allowMergeCommit) + hasUpdates = true + } + + if allowRebaseMerge, ok := actions.GetBoolArg(args, "allow_rebase_merge"); ok { + updateRepo.AllowRebaseMerge = github.Ptr(allowRebaseMerge) + hasUpdates = true + } + + if allowAutoMerge, ok := actions.GetBoolArg(args, "allow_auto_merge"); ok { + updateRepo.AllowAutoMerge = github.Ptr(allowAutoMerge) + hasUpdates = true + } + + if deleteBranchOnMerge, ok := actions.GetBoolArg(args, "delete_branch_on_merge"); ok { + updateRepo.DeleteBranchOnMerge = github.Ptr(deleteBranchOnMerge) + hasUpdates = true + } + + if archived, ok := actions.GetBoolArg(args, "archived"); ok { + updateRepo.Archived = github.Ptr(archived) + hasUpdates = true + } + + if isTemplate, ok := actions.GetBoolArg(args, "is_template"); ok { + updateRepo.IsTemplate = github.Ptr(isTemplate) + hasUpdates = true + } + + if !hasUpdates { + return nil, nil, fmt.Errorf("no update fields provided") + } + + // Update the repository via GitHub API + updatedRepo, resp, err := o.client.Repositories.Edit(ctx, orgName, currentRepoName, updateRepo) + if err != nil { + return nil, nil, wrapGitHubError(err, resp, fmt.Sprintf("failed to update repository %s in org %s", currentRepoName, orgName)) + } + + // Extract rate limit data for annotations + var annos annotations.Annotations + if rateLimitData, err := extractRateLimitData(resp); err == nil { + annos.WithRateLimiting(rateLimitData) + } + + l.Info("github-connector: repository updated successfully via action", + zap.Int64("repo_id", updatedRepo.GetID()), + zap.String("repo_name", updatedRepo.GetName()), + zap.String("repo_full_name", updatedRepo.GetFullName()), + ) + + // Create the resource representation of the updated repository + repoResource, err := repositoryResource(ctx, updatedRepo, parentResourceID) + if err != nil { + return nil, annos, fmt.Errorf("failed to create resource representation: %w", err) + } + + // Build return values using SDK helpers + resourceRv, err := actions.NewResourceReturnField("resource", repoResource) + if err != nil { + return nil, annos, err + } + + return actions.NewReturnValues(true, resourceRv), annos, nil +} diff --git a/pkg/connector/team.go b/pkg/connector/team.go index 32bbc995..f315355a 100644 --- a/pkg/connector/team.go +++ b/pkg/connector/team.go @@ -371,6 +371,9 @@ func (o *teamResourceType) ResourceActions(ctx context.Context, registry actions if err := o.registerCreateTeamAction(ctx, registry); err != nil { return err } + if err := o.registerUpdateTeamAction(ctx, registry); err != nil { + return err + } if err := o.registerDeleteTeamAction(ctx, registry); err != nil { return err } @@ -378,7 +381,7 @@ func (o *teamResourceType) ResourceActions(ctx context.Context, registry actions } func (o *teamResourceType) registerCreateTeamAction(ctx context.Context, registry actions.ResourceTypeActionRegistry) error { - return registry.Register(ctx, &v2.ResourceActionSchema{ + return registry.Register(ctx, &v2.BatonActionSchema{ Name: "create", DisplayName: "Create Team", Description: "Create a new team in a GitHub organization", @@ -410,6 +413,19 @@ func (o *teamResourceType) registerCreateTeamAction(ctx context.Context, registr Description: "The privacy level: 'secret' or 'closed'", Field: &config.Field_StringField{}, }, + { + Name: "notification_setting", + DisplayName: "Notification Setting", + Description: "The notification setting for the team", + Field: &config.Field_StringField{ + StringField: &config.StringField{ + Options: []*config.StringFieldOption{ + {Value: "notifications_enabled", DisplayName: "Enabled"}, + {Value: "notifications_disabled", DisplayName: "Disabled"}, + }, + }, + }, + }, }, ReturnTypes: []*config.Field{ {Name: "success", Field: &config.Field_BoolField{}}, @@ -419,7 +435,7 @@ func (o *teamResourceType) registerCreateTeamAction(ctx context.Context, registr } func (o *teamResourceType) registerDeleteTeamAction(ctx context.Context, registry actions.ResourceTypeActionRegistry) error { - return registry.Register(ctx, &v2.ResourceActionSchema{ + return registry.Register(ctx, &v2.BatonActionSchema{ Name: "delete", DisplayName: "Delete Team", Description: "Delete a team from a GitHub organization", @@ -446,6 +462,73 @@ func (o *teamResourceType) registerDeleteTeamAction(ctx context.Context, registr }, o.handleDeleteTeamAction) } +func (o *teamResourceType) registerUpdateTeamAction(ctx context.Context, registry actions.ResourceTypeActionRegistry) error { + return registry.Register(ctx, &v2.BatonActionSchema{ + Name: "update", + DisplayName: "Update Team", + Description: "Update an existing team in a GitHub organization", + ActionType: []v2.ActionType{v2.ActionType_ACTION_TYPE_RESOURCE_MUTATE}, + Arguments: []*config.Field{ + { + Name: "resource", + DisplayName: "Team Resource", + Description: "The team resource to update", + Field: &config.Field_ResourceIdField{}, + IsRequired: true, + }, + { + Name: "parent", + DisplayName: "Parent Organization", + Description: "The organization the team belongs to", + Field: &config.Field_ResourceIdField{}, + IsRequired: true, + }, + { + Name: "name", + DisplayName: "Team Name", + Description: "The new name of the team (leave empty to keep current)", + Field: &config.Field_StringField{}, + }, + { + Name: "description", + DisplayName: "Description", + Description: "A description of the team", + Field: &config.Field_StringField{}, + }, + { + Name: "privacy", + DisplayName: "Privacy", + Description: "The privacy level of the team", + Field: &config.Field_StringField{ + StringField: &config.StringField{ + Options: []*config.StringFieldOption{ + {Value: "secret", DisplayName: "Secret (only visible to org owners and team members)"}, + {Value: "closed", DisplayName: "Closed (visible to all org members)"}, + }, + }, + }, + }, + { + Name: "notification_setting", + DisplayName: "Notification Setting", + Description: "The notification setting for the team", + Field: &config.Field_StringField{ + StringField: &config.StringField{ + Options: []*config.StringFieldOption{ + {Value: "notifications_enabled", DisplayName: "Enabled"}, + {Value: "notifications_disabled", DisplayName: "Disabled"}, + }, + }, + }, + }, + }, + ReturnTypes: []*config.Field{ + {Name: "success", Field: &config.Field_BoolField{}}, + {Name: "resource", Field: &config.Field_ResourceField{}}, + }, + }, o.handleUpdateTeamAction) +} + func (o *teamResourceType) handleCreateTeamAction(ctx context.Context, args *structpb.Struct) (*structpb.Struct, annotations.Annotations, error) { l := ctxzap.Extract(ctx) @@ -551,15 +634,30 @@ func (o *teamResourceType) handleDeleteTeamAction(ctx context.Context, args *str return nil, nil, fmt.Errorf("invalid org ID %s: %w", parentResourceID.Resource, err) } + // Get the organization name from the cache + orgName, err := o.orgCache.GetOrgName(ctx, parentResourceID) + if err != nil { + return nil, nil, fmt.Errorf("failed to get organization name: %w", err) + } + + // Get the team to find its slug + team, resp, err := o.client.Teams.GetTeamByID(ctx, orgID, teamID) + if err != nil { + return nil, nil, wrapGitHubError(err, resp, fmt.Sprintf("failed to get team %d", teamID)) + } + + teamSlug := team.GetSlug() + l.Info("github-connector: deleting team via action", zap.Int64("team_id", teamID), - zap.Int64("org_id", orgID), + zap.String("team_slug", teamSlug), + zap.String("org_name", orgName), ) - // Delete the team directly using the provided org ID from parent - resp, err := o.client.Teams.DeleteTeamByID(ctx, orgID, teamID) + // Delete the team using slug + resp, err = o.client.Teams.DeleteTeamBySlug(ctx, orgName, teamSlug) if err != nil { - return nil, nil, wrapGitHubError(err, resp, fmt.Sprintf("failed to delete team %d in org %d", teamID, orgID)) + return nil, nil, wrapGitHubError(err, resp, fmt.Sprintf("failed to delete team %s in org %s", teamSlug, orgName)) } var annos annotations.Annotations @@ -569,12 +667,145 @@ func (o *teamResourceType) handleDeleteTeamAction(ctx context.Context, args *str l.Info("github-connector: team deleted successfully via action", zap.Int64("team_id", teamID), - zap.Int64("org_id", orgID), + zap.String("team_slug", teamSlug), + zap.String("org_name", orgName), ) return actions.NewReturnValues(true), annos, nil } +func (o *teamResourceType) handleUpdateTeamAction(ctx context.Context, args *structpb.Struct) (*structpb.Struct, annotations.Annotations, error) { + l := ctxzap.Extract(ctx) + + // Extract the team resource ID using SDK helper + resourceID, err := actions.RequireResourceIDArg(args, "resource") + if err != nil { + return nil, nil, err + } + + // Extract the parent org resource ID using SDK helper + parentResourceID, err := actions.RequireResourceIDArg(args, "parent") + if err != nil { + return nil, nil, err + } + + // Parse the team ID from the resource + teamID, err := strconv.ParseInt(resourceID.Resource, 10, 64) + if err != nil { + return nil, nil, fmt.Errorf("invalid team ID %s: %w", resourceID.Resource, err) + } + + // Parse the org ID from the parent resource + orgID, err := strconv.ParseInt(parentResourceID.Resource, 10, 64) + if err != nil { + return nil, nil, fmt.Errorf("invalid org ID %s: %w", parentResourceID.Resource, err) + } + + // Get the organization name from the cache + orgName, err := o.orgCache.GetOrgName(ctx, parentResourceID) + if err != nil { + return nil, nil, fmt.Errorf("failed to get organization name: %w", err) + } + + // Get the team to find its slug + team, resp, err := o.client.Teams.GetTeamByID(ctx, orgID, teamID) + if err != nil { + return nil, nil, wrapGitHubError(err, resp, fmt.Sprintf("failed to get team %d", teamID)) + } + + teamSlug := team.GetSlug() + + l.Info("github-connector: updating team via action", + zap.Int64("team_id", teamID), + zap.String("team_slug", teamSlug), + zap.String("org_name", orgName), + ) + + // Build the NewTeam update request + // Note: GitHub API uses NewTeam for both create and edit operations + updateTeam := github.NewTeam{} + + // Track if any updates were provided + hasUpdates := false + + // Extract optional fields using SDK helpers + if name, ok := actions.GetStringArg(args, "name"); ok && name != "" { + updateTeam.Name = name + hasUpdates = true + } + + if description, ok := actions.GetStringArg(args, "description"); ok { + updateTeam.Description = github.Ptr(description) + hasUpdates = true + } + + if privacy, ok := actions.GetStringArg(args, "privacy"); ok && privacy != "" { + if privacy == "secret" || privacy == "closed" { + updateTeam.Privacy = github.Ptr(privacy) + hasUpdates = true + } else { + l.Warn("github-connector: invalid privacy value, ignoring", + zap.String("provided_privacy", privacy), + ) + } + } + + if notificationSetting, ok := actions.GetStringArg(args, "notification_setting"); ok && notificationSetting != "" { + if notificationSetting == "notifications_enabled" || notificationSetting == "notifications_disabled" { + updateTeam.NotificationSetting = github.Ptr(notificationSetting) + hasUpdates = true + } else { + l.Warn("github-connector: invalid notification_setting value, ignoring", + zap.String("provided_notification_setting", notificationSetting), + ) + } + } + + if parentTeamID, ok := actions.GetIntArg(args, "parent_team_id"); ok { + if parentTeamID > 0 { + updateTeam.ParentTeamID = github.Ptr(parentTeamID) + hasUpdates = true + } + // Note: Setting to 0 would remove the parent, but GitHub API requires omitting the field entirely + } + + if !hasUpdates { + return nil, nil, fmt.Errorf("no update fields provided") + } + + // Update the team via GitHub API using slug + updatedTeam, resp, err := o.client.Teams.EditTeamBySlug(ctx, orgName, teamSlug, updateTeam, false) + if err != nil { + return nil, nil, wrapGitHubError(err, resp, fmt.Sprintf("failed to update team %s in org %s", teamSlug, orgName)) + } + + // Extract rate limit data for annotations + var annos annotations.Annotations + if rateLimitData, err := extractRateLimitData(resp); err == nil { + annos.WithRateLimiting(rateLimitData) + } + + l.Info("github-connector: team updated successfully via action", + zap.Int64("team_id", updatedTeam.GetID()), + zap.String("team_name", updatedTeam.GetName()), + zap.String("team_slug", updatedTeam.GetSlug()), + ) + + // Create the resource representation of the updated team + resource, err := teamResource(updatedTeam, parentResourceID) + if err != nil { + return nil, annos, fmt.Errorf("failed to create resource representation: %w", err) + } + + // Build return values using SDK helpers + resourceRv, err := actions.NewResourceReturnField("resource", resource) + if err != nil { + return nil, annos, err + } + + return actions.NewReturnValues(true, resourceRv), annos, nil +} + func teamBuilder(client *github.Client, orgCache *orgNameCache) *teamResourceType { return &teamResourceType{ resourceType: resourceTypeTeam, From 450c7f86deb11c5e8c6276e952b35e13ea5ad57a Mon Sep 17 00:00:00 2001 From: Muhammad Kumail Date: Thu, 11 Dec 2025 19:46:11 +0000 Subject: [PATCH 05/19] fix: deprecated action --- pkg/connector/repository.go | 8 ++++---- pkg/connector/team.go | 8 ++++---- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/pkg/connector/repository.go b/pkg/connector/repository.go index 199f3aff..c0432824 100644 --- a/pkg/connector/repository.go +++ b/pkg/connector/repository.go @@ -439,7 +439,7 @@ func skipGrantsForResourceType(bag *pagination.Bag) (string, error) { // ResourceActions registers the resource actions for the repository resource type. // This implements the ResourceActionProvider interface. -func (o *repositoryResourceType) ResourceActions(ctx context.Context, registry actions.ResourceTypeActionRegistry) error { +func (o *repositoryResourceType) ResourceActions(ctx context.Context, registry actions.ActionRegistry) error { if err := o.registerCreateRepositoryAction(ctx, registry); err != nil { return err } @@ -452,7 +452,7 @@ func (o *repositoryResourceType) ResourceActions(ctx context.Context, registry a return nil } -func (o *repositoryResourceType) registerCreateRepositoryAction(ctx context.Context, registry actions.ResourceTypeActionRegistry) error { +func (o *repositoryResourceType) registerCreateRepositoryAction(ctx context.Context, registry actions.ActionRegistry) error { return registry.Register(ctx, &v2.BatonActionSchema{ Name: "create", DisplayName: "Create Repository", @@ -619,7 +619,7 @@ func (o *repositoryResourceType) registerCreateRepositoryAction(ctx context.Cont }, o.handleCreateRepositoryAction) } -func (o *repositoryResourceType) registerDeleteRepositoryAction(ctx context.Context, registry actions.ResourceTypeActionRegistry) error { +func (o *repositoryResourceType) registerDeleteRepositoryAction(ctx context.Context, registry actions.ActionRegistry) error { return registry.Register(ctx, &v2.BatonActionSchema{ Name: "delete", DisplayName: "Delete Repository", @@ -647,7 +647,7 @@ func (o *repositoryResourceType) registerDeleteRepositoryAction(ctx context.Cont }, o.handleDeleteRepositoryAction) } -func (o *repositoryResourceType) registerUpdateRepositoryAction(ctx context.Context, registry actions.ResourceTypeActionRegistry) error { +func (o *repositoryResourceType) registerUpdateRepositoryAction(ctx context.Context, registry actions.ActionRegistry) error { return registry.Register(ctx, &v2.BatonActionSchema{ Name: "update", DisplayName: "Update Repository", diff --git a/pkg/connector/team.go b/pkg/connector/team.go index f315355a..37af7a94 100644 --- a/pkg/connector/team.go +++ b/pkg/connector/team.go @@ -367,7 +367,7 @@ func (o *teamResourceType) Revoke(ctx context.Context, grant *v2.Grant) (annotat // ResourceActions registers the resource actions for the team resource type. // This implements the ResourceActionProvider interface. -func (o *teamResourceType) ResourceActions(ctx context.Context, registry actions.ResourceTypeActionRegistry) error { +func (o *teamResourceType) ResourceActions(ctx context.Context, registry actions.ActionRegistry) error { if err := o.registerCreateTeamAction(ctx, registry); err != nil { return err } @@ -380,7 +380,7 @@ func (o *teamResourceType) ResourceActions(ctx context.Context, registry actions return nil } -func (o *teamResourceType) registerCreateTeamAction(ctx context.Context, registry actions.ResourceTypeActionRegistry) error { +func (o *teamResourceType) registerCreateTeamAction(ctx context.Context, registry actions.ActionRegistry) error { return registry.Register(ctx, &v2.BatonActionSchema{ Name: "create", DisplayName: "Create Team", @@ -434,7 +434,7 @@ func (o *teamResourceType) registerCreateTeamAction(ctx context.Context, registr }, o.handleCreateTeamAction) } -func (o *teamResourceType) registerDeleteTeamAction(ctx context.Context, registry actions.ResourceTypeActionRegistry) error { +func (o *teamResourceType) registerDeleteTeamAction(ctx context.Context, registry actions.ActionRegistry) error { return registry.Register(ctx, &v2.BatonActionSchema{ Name: "delete", DisplayName: "Delete Team", @@ -462,7 +462,7 @@ func (o *teamResourceType) registerDeleteTeamAction(ctx context.Context, registr }, o.handleDeleteTeamAction) } -func (o *teamResourceType) registerUpdateTeamAction(ctx context.Context, registry actions.ResourceTypeActionRegistry) error { +func (o *teamResourceType) registerUpdateTeamAction(ctx context.Context, registry actions.ActionRegistry) error { return registry.Register(ctx, &v2.BatonActionSchema{ Name: "update", DisplayName: "Update Team", From 01d085834251bbb9650f4ebfb327fdb4c6d1db07 Mon Sep 17 00:00:00 2001 From: Justin Gallardo Date: Tue, 30 Dec 2025 09:36:24 -0800 Subject: [PATCH 06/19] Bump baton-sdk to v0.6.9 --- go.mod | 21 +- go.sum | 40 +- pkg/config/conf.gen.go | 28 +- pkg/connector/repository.go | 2 +- pkg/connector/team.go | 2 +- .../Masterminds/semver/v3/.gitignore | 1 + .../Masterminds/semver/v3/.golangci.yml | 27 + .../Masterminds/semver/v3/CHANGELOG.md | 268 + .../semver/v3/LICENSE.txt} | 12 +- .../github.com/Masterminds/semver/v3/Makefile | 31 + .../Masterminds/semver/v3/README.md | 274 + .../Masterminds/semver/v3/SECURITY.md | 19 + .../Masterminds/semver/v3/collection.go | 24 + .../Masterminds/semver/v3/constraints.go | 601 ++ .../github.com/Masterminds/semver/v3/doc.go | 184 + .../Masterminds/semver/v3/version.go | 788 +++ .../baton-sdk/internal/connector/connector.go | 138 +- .../connector/connector_server_unix.go | 6 +- .../c1/c1z/v1/annotation_sync_details.pb.go | 56 +- .../annotation_sync_details_protoopaque.pb.go | 124 + .../baton-sdk/pb/c1/c1z/v1/diff.pb.go | 274 +- .../pb/c1/c1z/v1/diff_protoopaque.pb.go | 491 ++ .../baton-sdk/pb/c1/config/v1/config.pb.go | 2067 +++++-- .../pb/c1/config/v1/config.pb.validate.go | 1199 ++++ .../pb/c1/config/v1/config_protoopaque.pb.go | 2422 ++++++++ .../baton-sdk/pb/c1/config/v1/rules.pb.go | 1134 +++- .../pb/c1/config/v1/rules.pb.validate.go | 202 + .../pb/c1/config/v1/rules_protoopaque.pb.go | 1742 ++++++ .../baton-sdk/pb/c1/connector/v2/action.pb.go | 878 ++- .../pb/c1/connector/v2/action.pb.validate.go | 6 + .../c1/connector/v2/action_protoopaque.pb.go | 1221 ++++ .../c1/connector/v2/annotation_baton_id.pb.go | 162 +- .../v2/annotation_baton_id_protoopaque.pb.go | 323 + .../connector/v2/annotation_entitlement.pb.go | 82 +- .../annotation_entitlement_protoopaque.pb.go | 153 + .../pb/c1/connector/v2/annotation_etag.pb.go | 141 +- .../v2/annotation_etag_protoopaque.pb.go | 273 + .../v2/annotation_external_link.pb.go | 61 +- ...annotation_external_link_protoopaque.pb.go | 125 + .../v2/annotation_external_ticket.pb.go | 100 +- ...notation_external_ticket_protoopaque.pb.go | 199 + .../pb/c1/connector/v2/annotation_grant.pb.go | 274 +- .../v2/annotation_grant.pb.validate.go | 102 + .../v2/annotation_grant_protoopaque.pb.go | 454 ++ .../connector/v2/annotation_ratelimit.pb.go | 116 +- .../v2/annotation_ratelimit_protoopaque.pb.go | 239 + .../c1/connector/v2/annotation_raw_id.pb.go | 57 +- .../v2/annotation_raw_id_protoopaque.pb.go | 125 + .../c1/connector/v2/annotation_request.pb.go | 59 +- .../v2/annotation_request_protoopaque.pb.go | 125 + .../v2/annotation_resource_tree.pb.go | 144 +- .../annotation_resource_tree.pb.validate.go | 100 + ...annotation_resource_tree_protoopaque.pb.go | 260 + .../v2/annotation_security_insight.pb.go | 535 ++ ...annotation_security_insight.pb.validate.go | 639 ++ ...otation_security_insight_protoopaque.pb.go | 516 ++ .../c1/connector/v2/annotation_sync_id.pb.go | 130 - .../v2/annotation_sync_id.pb.validate.go | 137 - .../pb/c1/connector/v2/annotation_trait.pb.go | 922 ++- .../v2/annotation_trait_protoopaque.pb.go | 1527 +++++ .../v2/annotation_v1_identifier.pb.go | 58 +- ...annotation_v1_identifier_protoopaque.pb.go | 124 + .../baton-sdk/pb/c1/connector/v2/asset.pb.go | 307 +- .../c1/connector/v2/asset_protoopaque.pb.go | 509 ++ .../baton-sdk/pb/c1/connector/v2/config.pb.go | 646 +- .../c1/connector/v2/config_protoopaque.pb.go | 1080 ++++ .../pb/c1/connector/v2/connector.pb.go | 1677 ++++-- .../c1/connector/v2/connector.pb.validate.go | 286 + .../connector/v2/connector_protoopaque.pb.go | 2318 +++++++ .../pb/c1/connector/v2/entitlement.pb.go | 590 +- .../connector/v2/entitlement.pb.validate.go | 414 ++ .../pb/c1/connector/v2/entitlement_grpc.pb.go | 40 +- .../v2/entitlement_protoopaque.pb.go | 772 +++ .../pb/c1/connector/v2/event_feed.pb.go | 1052 +++- .../connector/v2/event_feed_protoopaque.pb.go | 1663 ++++++ .../baton-sdk/pb/c1/connector/v2/grant.pb.go | 586 +- .../pb/c1/connector/v2/grant.pb.validate.go | 15 + .../c1/connector/v2/grant_protoopaque.pb.go | 939 +++ .../pb/c1/connector/v2/resource.pb.go | 3315 +++++++--- .../c1/connector/v2/resource.pb.validate.go | 401 ++ .../connector/v2/resource_protoopaque.pb.go | 4838 +++++++++++++++ .../baton-sdk/pb/c1/connector/v2/ticket.pb.go | 2181 +++++-- .../c1/connector/v2/ticket_protoopaque.pb.go | 3409 +++++++++++ .../v1/connector_wrapper.pb.go | 136 +- .../v1/connector_wrapper.pb.validate.go | 2 + .../v1/connector_wrapper_protoopaque.pb.go | 200 + .../pb/c1/connectorapi/baton/v1/baton.pb.go | 3568 ++++++++--- .../baton/v1/baton.pb.validate.go | 38 + .../baton/v1/baton_protoopaque.pb.go | 5312 +++++++++++++++++ .../pb/c1/connectorapi/baton/v1/config.pb.go | 340 +- .../baton/v1/config.pb.validate.go | 208 + .../connectorapi/baton/v1/config_grpc.pb.go | 40 +- .../baton/v1/config_protoopaque.pb.go | 502 ++ .../pb/c1/connectorapi/baton/v1/session.pb.go | 1064 +++- .../baton/v1/session.pb.validate.go | 389 +- .../connectorapi/baton/v1/session_grpc.pb.go | 117 +- .../baton/v1/session_protoopaque.pb.go | 1443 +++++ .../pb/c1/ratelimit/v1/ratelimit.pb.go | 774 ++- .../ratelimit/v1/ratelimit_protoopaque.pb.go | 1322 ++++ .../pb/c1/reader/v2/entitlement.pb.go | 136 +- .../reader/v2/entitlement_protoopaque.pb.go | 224 + .../baton-sdk/pb/c1/reader/v2/grant.pb.go | 444 +- .../pb/c1/reader/v2/grant_protoopaque.pb.go | 667 +++ .../baton-sdk/pb/c1/reader/v2/resource.pb.go | 247 +- .../c1/reader/v2/resource_protoopaque.pb.go | 392 ++ .../baton-sdk/pb/c1/reader/v2/sync.pb.go | 463 +- .../pb/c1/reader/v2/sync_protoopaque.pb.go | 769 +++ .../pb/c1/transport/v1/transport.pb.go | 207 +- .../pb/c1/transport/v1/transport.proto | 22 - .../transport/v1/transport_protoopaque.pb.go | 335 ++ .../baton-sdk/pb/c1/utls/v1/tls.pb.go | 83 +- .../pb/c1/utls/v1/tls_protoopaque.pb.go | 165 + .../baton-sdk/pkg/actions/actions.go | 519 ++ .../baton-sdk/pkg/actions/args.go | 543 ++ .../baton-sdk/pkg/annotations/annotations.go | 10 - .../conductorone/baton-sdk/pkg/bid/bid.go | 10 +- .../conductorone/baton-sdk/pkg/bid/parser.go | 26 +- .../conductorone/baton-sdk/pkg/cli/cli.go | 26 +- .../baton-sdk/pkg/cli/commands.go | 244 +- .../baton-sdk/pkg/cli/lambda_server__added.go | 146 +- .../pkg/cli/lambda_server_omitted.go | 3 +- .../baton-sdk/pkg/cli/lazy_session.go | 129 + .../baton-sdk/pkg/config/config.go | 122 +- .../baton-sdk/pkg/config/generate.go | 30 +- .../pkg/connectorbuilder/accounts.go | 136 + .../baton-sdk/pkg/connectorbuilder/actions.go | 241 + .../baton-sdk/pkg/connectorbuilder/assets.go | 11 + .../pkg/connectorbuilder/connectorbuilder.go | 1824 ++---- .../pkg/connectorbuilder/credentials.go | 105 + .../baton-sdk/pkg/connectorbuilder/events.go | 163 + .../pkg/connectorbuilder/resource_manager.go | 230 + .../connectorbuilder/resource_provisioner.go | 173 + .../pkg/connectorbuilder/resource_syncer.go | 403 ++ .../pkg/connectorbuilder/session_store.go | 58 + .../baton-sdk/pkg/connectorbuilder/tickets.go | 242 + .../baton-sdk/pkg/connectorrunner/runner.go | 182 +- .../baton-sdk/pkg/crypto/client_secret.go | 2 +- .../baton-sdk/pkg/crypto/crypto.go | 56 +- .../baton-sdk/pkg/crypto/password.go | 8 +- .../baton-sdk/pkg/crypto/providers/jwk/jwk.go | 40 +- .../baton-sdk/pkg/dotc1z/assets.go | 10 +- .../baton-sdk/pkg/dotc1z/c1file.go | 155 +- .../conductorone/baton-sdk/pkg/dotc1z/diff.go | 10 + .../baton-sdk/pkg/dotc1z/entitlements.go | 47 +- .../conductorone/baton-sdk/pkg/dotc1z/file.go | 56 +- .../baton-sdk/pkg/dotc1z/grants.go | 117 +- .../pkg/dotc1z/manager/local/local.go | 36 +- .../baton-sdk/pkg/dotc1z/manager/s3/s3.go | 21 +- .../baton-sdk/pkg/dotc1z/resouce_types.go | 32 +- .../baton-sdk/pkg/dotc1z/resources.go | 41 +- .../baton-sdk/pkg/dotc1z/session_store.go | 428 ++ .../baton-sdk/pkg/dotc1z/sql_helpers.go | 245 +- .../baton-sdk/pkg/dotc1z/sync_runs.go | 75 +- .../baton-sdk/pkg/field/decode_hooks.go | 171 + .../pkg/field/default_relationships.go | 4 + .../baton-sdk/pkg/field/defaults.go | 90 + .../baton-sdk/pkg/field/field_group.go | 26 + .../baton-sdk/pkg/field/field_options.go | 12 +- .../baton-sdk/pkg/field/fields.go | 44 +- .../baton-sdk/pkg/field/marshal.go | 299 +- .../baton-sdk/pkg/field/rule_builders.go | 84 +- .../baton-sdk/pkg/field/struct.go | 27 + .../baton-sdk/pkg/field/validation.go | 145 +- .../baton-sdk/pkg/lambda/grpc/config/sts.go | 8 +- .../baton-sdk/pkg/lambda/grpc/server.go | 4 +- .../baton-sdk/pkg/lambda/grpc/transport.go | 15 +- .../baton-sdk/pkg/lambda/grpc/util.go | 4 +- .../baton-sdk/pkg/provisioner/provisioner.go | 124 +- .../baton-sdk/pkg/ratelimit/grpc.go | 52 +- .../baton-sdk/pkg/ratelimit/http.go | 4 +- .../pkg/ratelimit/mem_ratelimiter.go | 30 +- .../pkg/ratelimit/noop_ratelimiter.go | 10 +- .../baton-sdk/pkg/ratelimit/ratelimit.go | 4 +- .../conductorone/baton-sdk/pkg/retry/retry.go | 4 +- .../baton-sdk/pkg/sdk/empty_connector.go | 48 +- .../conductorone/baton-sdk/pkg/sdk/version.go | 2 +- .../baton-sdk/pkg/session/json.go | 136 - .../baton-sdk/pkg/session/json_session.go | 118 + .../baton-sdk/pkg/session/memory.go | 220 - .../baton-sdk/pkg/session/memory_cache.go | 218 + .../baton-sdk/pkg/session/noop_session.go | 52 + .../baton-sdk/pkg/session/session.go | 146 +- .../{grpc_session.go => session_client.go} | 217 +- .../baton-sdk/pkg/session/session_server.go | 215 + .../baton-sdk/pkg/session/typed_session.go | 156 + .../baton-sdk/pkg/sync/client_wrapper.go | 223 - .../baton-sdk/pkg/sync/expand/expander.go | 328 + .../baton-sdk/pkg/sync/expand/graph.go | 58 +- .../conductorone/baton-sdk/pkg/sync/state.go | 45 + .../conductorone/baton-sdk/pkg/sync/syncer.go | 1901 +++--- .../pkg/synccompactor/attached/attached.go | 12 + .../baton-sdk/pkg/synccompactor/compactor.go | 115 +- .../baton-sdk/pkg/tasks/c1api/actions.go | 29 +- .../pkg/tasks/c1api/bulk_create_tickets.go | 8 +- .../pkg/tasks/c1api/bulk_get_tickets.go | 8 +- .../pkg/tasks/c1api/create_account.go | 6 +- .../pkg/tasks/c1api/create_resource.go | 6 +- .../pkg/tasks/c1api/create_ticket.go | 6 +- .../pkg/tasks/c1api/delete_resource.go | 6 +- .../baton-sdk/pkg/tasks/c1api/full_sync.go | 30 +- .../baton-sdk/pkg/tasks/c1api/get_ticket.go | 10 +- .../baton-sdk/pkg/tasks/c1api/grant.go | 10 +- .../baton-sdk/pkg/tasks/c1api/hello.go | 43 +- .../pkg/tasks/c1api/list_ticket_schemas.go | 8 +- .../baton-sdk/pkg/tasks/c1api/manager.go | 49 +- .../baton-sdk/pkg/tasks/c1api/revoke.go | 6 +- .../pkg/tasks/c1api/rotate_credentials.go | 6 +- .../pkg/tasks/c1api/service_client.go | 48 +- .../baton-sdk/pkg/tasks/c1api/task_helpers.go | 12 +- .../baton-sdk/pkg/tasks/local/accounter.go | 6 +- .../pkg/tasks/local/action_invoker.go | 72 +- .../pkg/tasks/local/action_schema_list.go | 77 + .../baton-sdk/pkg/tasks/local/compactor.go | 6 +- .../baton-sdk/pkg/tasks/local/deleter.go | 6 +- .../baton-sdk/pkg/tasks/local/differ.go | 6 +- .../baton-sdk/pkg/tasks/local/event_feed.go | 22 +- .../baton-sdk/pkg/tasks/local/granter.go | 6 +- .../baton-sdk/pkg/tasks/local/revoker.go | 6 +- .../baton-sdk/pkg/tasks/local/rotator.go | 6 +- .../baton-sdk/pkg/tasks/local/syncer.go | 37 +- .../baton-sdk/pkg/tasks/local/ticket.go | 86 +- .../conductorone/baton-sdk/pkg/tasks/tasks.go | 90 +- .../pkg/types/entitlement/entitlement.go | 24 +- .../baton-sdk/pkg/types/grant/grant.go | 32 +- .../baton-sdk/pkg/types/resource/app_trait.go | 12 +- .../pkg/types/resource/group_trait.go | 6 +- .../baton-sdk/pkg/types/resource/resource.go | 57 +- .../pkg/types/resource/role_trait.go | 4 +- .../pkg/types/resource/secret_trait.go | 10 +- .../types/resource/security_insight_trait.go | 287 + .../pkg/types/resource/user_trait.go | 42 +- .../baton-sdk/pkg/types/session_cache.go | 58 - .../baton-sdk/pkg/types/sessions/sessions.go | 77 + .../baton-sdk/pkg/types/tasks/tasks.go | 7 + .../pkg/types/ticket/custom_fields.go | 426 +- .../baton-sdk/pkg/ugrpc/interceptors.go | 118 - .../baton-sdk/pkg/uhttp/dbcache.go | 21 +- .../baton-sdk/pkg/uhttp/gocache.go | 83 +- .../baton-sdk/pkg/uhttp/wrapper.go | 39 +- .../baton-sdk/pkg/uotel/config.go | 2 +- .../conductorone/baton-sdk/pkg/utls/certs.go | 8 +- .../conductorone/baton-sdk/pkg/utls/client.go | 8 +- .../baton-sdk/pkg/utls/listener.go | 8 +- vendor/github.com/dolthub/maphash/.gitignore | 2 - vendor/github.com/dolthub/maphash/README.md | 4 - vendor/github.com/dolthub/maphash/hasher.go | 48 - vendor/github.com/dolthub/maphash/runtime.go | 111 - .../github.com/ebitengine/purego/.gitignore | 1 + .../maphash => ebitengine/purego}/LICENSE | 4 +- vendor/github.com/ebitengine/purego/README.md | 113 + .../github.com/ebitengine/purego/abi_amd64.h | 99 + .../github.com/ebitengine/purego/abi_arm64.h | 39 + .../ebitengine/purego/abi_loong64.h | 60 + vendor/github.com/ebitengine/purego/cgo.go | 19 + .../github.com/ebitengine/purego/dlerror.go | 17 + vendor/github.com/ebitengine/purego/dlfcn.go | 99 + .../ebitengine/purego/dlfcn_android.go | 34 + .../ebitengine/purego/dlfcn_darwin.go | 19 + .../ebitengine/purego/dlfcn_freebsd.go | 14 + .../ebitengine/purego/dlfcn_linux.go | 16 + .../ebitengine/purego/dlfcn_netbsd.go | 15 + .../ebitengine/purego/dlfcn_nocgo_freebsd.go | 11 + .../ebitengine/purego/dlfcn_nocgo_linux.go | 19 + .../ebitengine/purego/dlfcn_nocgo_netbsd.go | 9 + .../ebitengine/purego/dlfcn_playground.go | 24 + .../ebitengine/purego/dlfcn_stubs.s | 26 + vendor/github.com/ebitengine/purego/func.go | 489 ++ vendor/github.com/ebitengine/purego/gen.go | 6 + .../ebitengine/purego/go_runtime.go | 13 + .../purego/internal/cgo/dlfcn_cgo_unix.go | 56 + .../ebitengine/purego/internal/cgo/empty.go | 6 + .../purego/internal/cgo/syscall_cgo_unix.go | 55 + .../purego/internal/fakecgo/abi_amd64.h | 99 + .../purego/internal/fakecgo/abi_arm64.h | 39 + .../purego/internal/fakecgo/abi_loong64.h | 60 + .../purego/internal/fakecgo/asm_amd64.s | 39 + .../purego/internal/fakecgo/asm_arm64.s | 36 + .../purego/internal/fakecgo/asm_loong64.s | 40 + .../purego/internal/fakecgo/callbacks.go | 93 + .../ebitengine/purego/internal/fakecgo/doc.go | 32 + .../purego/internal/fakecgo/freebsd.go | 27 + .../internal/fakecgo/go_darwin_amd64.go | 73 + .../internal/fakecgo/go_darwin_arm64.go | 88 + .../internal/fakecgo/go_freebsd_amd64.go | 95 + .../internal/fakecgo/go_freebsd_arm64.go | 98 + .../purego/internal/fakecgo/go_libinit.go | 72 + .../purego/internal/fakecgo/go_linux_amd64.go | 95 + .../purego/internal/fakecgo/go_linux_arm64.go | 98 + .../internal/fakecgo/go_linux_loong64.go | 92 + .../purego/internal/fakecgo/go_netbsd.go | 106 + .../purego/internal/fakecgo/go_setenv.go | 18 + .../purego/internal/fakecgo/go_util.go | 37 + .../purego/internal/fakecgo/iscgo.go | 19 + .../purego/internal/fakecgo/libcgo.go | 39 + .../purego/internal/fakecgo/libcgo_darwin.go | 26 + .../purego/internal/fakecgo/libcgo_freebsd.go | 20 + .../purego/internal/fakecgo/libcgo_linux.go | 20 + .../purego/internal/fakecgo/libcgo_netbsd.go | 26 + .../purego/internal/fakecgo/netbsd.go | 23 + .../purego/internal/fakecgo/setenv.go | 19 + .../purego/internal/fakecgo/symbols.go | 231 + .../purego/internal/fakecgo/symbols_darwin.go | 30 + .../internal/fakecgo/symbols_freebsd.go | 30 + .../purego/internal/fakecgo/symbols_linux.go | 30 + .../purego/internal/fakecgo/symbols_netbsd.go | 30 + .../internal/fakecgo/trampolines_amd64.s | 104 + .../internal/fakecgo/trampolines_arm64.s | 72 + .../internal/fakecgo/trampolines_loong64.s | 71 + .../internal/fakecgo/trampolines_stubs.s | 94 + .../purego/internal/strings/strings.go | 40 + vendor/github.com/ebitengine/purego/is_ios.go | 13 + vendor/github.com/ebitengine/purego/nocgo.go | 25 + .../ebitengine/purego/struct_amd64.go | 264 + .../ebitengine/purego/struct_arm64.go | 286 + .../ebitengine/purego/struct_loong64.go | 190 + .../ebitengine/purego/struct_other.go | 20 + .../github.com/ebitengine/purego/sys_amd64.s | 164 + .../github.com/ebitengine/purego/sys_arm64.s | 92 + .../ebitengine/purego/sys_loong64.s | 96 + .../ebitengine/purego/sys_unix_arm64.s | 70 + .../ebitengine/purego/sys_unix_loong64.s | 75 + .../github.com/ebitengine/purego/syscall.go | 56 + .../ebitengine/purego/syscall_cgo_linux.go | 21 + .../ebitengine/purego/syscall_sysv.go | 226 + .../ebitengine/purego/syscall_windows.go | 46 + .../ebitengine/purego/zcallback_amd64.s | 2014 +++++++ .../ebitengine/purego/zcallback_arm64.s | 4014 +++++++++++++ .../ebitengine/purego/zcallback_loong64.s | 4014 +++++++++++++ vendor/github.com/gammazero/deque/.gitignore | 26 - vendor/github.com/gammazero/deque/README.md | 80 - vendor/github.com/gammazero/deque/deque.go | 434 -- vendor/github.com/gammazero/deque/doc.go | 38 - .../github.com/maypok86/otter/.golangci.yml | 103 - vendor/github.com/maypok86/otter/CHANGELOG.md | 98 - vendor/github.com/maypok86/otter/README.md | 191 - vendor/github.com/maypok86/otter/builder.go | 311 - vendor/github.com/maypok86/otter/cache.go | 165 - vendor/github.com/maypok86/otter/entry.go | 82 - vendor/github.com/maypok86/otter/extension.go | 89 - .../maypok86/otter/internal/core/cache.go | 533 -- .../maypok86/otter/internal/core/task.go | 126 - .../maypok86/otter/internal/expiry/fixed.go | 47 - .../maypok86/otter/internal/expiry/queue.go | 89 - .../otter/internal/generated/node/b.go | 144 - .../otter/internal/generated/node/bc.go | 148 - .../otter/internal/generated/node/be.go | 160 - .../otter/internal/generated/node/bec.go | 164 - .../otter/internal/generated/node/manager.go | 143 - .../otter/internal/hashtable/bucket.go | 81 - .../maypok86/otter/internal/hashtable/map.go | 551 -- .../maypok86/otter/internal/lossy/buffer.go | 143 - .../maypok86/otter/internal/queue/growable.go | 135 - .../maypok86/otter/internal/s3fifo/ghost.go | 77 - .../maypok86/otter/internal/s3fifo/main.go | 87 - .../maypok86/otter/internal/s3fifo/policy.go | 106 - .../maypok86/otter/internal/s3fifo/queue.go | 75 - .../maypok86/otter/internal/s3fifo/small.go | 89 - .../maypok86/otter/internal/stats/counter.go | 108 - .../maypok86/otter/internal/stats/stats.go | 143 - .../otter/internal/unixtime/unixtime.go | 93 - vendor/github.com/maypok86/otter/stats.go | 95 - .../maypok86/otter/{ => v2}/.gitignore | 2 + .../maypok86/otter/v2/.golangci.yml | 110 + .../github.com/maypok86/otter/v2/CHANGELOG.md | 252 + .../otter/{ => v2}/CODE_OF_CONDUCT.md | 0 .../maypok86/otter/{ => v2}/CONTRIBUTING.md | 0 .../maypok86/otter/{ => v2}/LICENSE | 2 +- .../maypok86/otter/{ => v2}/Makefile | 19 +- vendor/github.com/maypok86/otter/v2/README.md | 234 + vendor/github.com/maypok86/otter/v2/cache.go | 470 ++ .../maypok86/otter/v2/cache_impl.go | 1868 ++++++ vendor/github.com/maypok86/otter/v2/clock.go | 232 + .../github.com/maypok86/otter/v2/deletion.go | 68 + vendor/github.com/maypok86/otter/v2/doc.go | 28 + vendor/github.com/maypok86/otter/v2/entry.go | 95 + vendor/github.com/maypok86/otter/v2/error.go | 55 + .../maypok86/otter/v2/expiry_calculator.go | 140 + .../otter/v2/internal/deque/linked.go | 231 + .../otter/v2/internal/deque/queue/mpsc.go | 320 + .../internal/expiration}/variable.go | 93 +- .../otter/v2/internal/generated/node/b.go | 163 + .../otter/v2/internal/generated/node/be.go | 180 + .../otter/v2/internal/generated/node/ber.go | 184 + .../otter/v2/internal/generated/node/berw.go | 199 + .../otter/v2/internal/generated/node/bew.go | 195 + .../otter/v2/internal/generated/node/br.go | 168 + .../otter/v2/internal/generated/node/brw.go | 185 + .../otter/v2/internal/generated/node/bs.go | 179 + .../otter/v2/internal/generated/node/bse.go | 193 + .../otter/v2/internal/generated/node/bser.go | 197 + .../otter/v2/internal/generated/node/bsr.go | 183 + .../otter/v2/internal/generated/node/bw.go | 181 + .../v2/internal/generated/node/manager.go | 186 + .../maypok86/otter/v2/internal/hashmap/map.go | 631 ++ .../internal/hashmap/node.go} | 27 +- .../maypok86/otter/v2/internal/lossy/ring.go | 131 + .../otter/v2/internal/lossy/striped.go | 235 + .../maypok86/otter/v2/internal/xiter/xiter.go | 63 + .../power.go => v2/internal/xmath/xmath.go} | 34 +- .../internal/xruntime/hasher.go} | 23 +- .../{ => v2}/internal/xruntime/xruntime.go | 17 +- .../maypok86/otter/v2/internal/xsync/adder.go | 91 + vendor/github.com/maypok86/otter/v2/loader.go | 106 + vendor/github.com/maypok86/otter/v2/logger.go | 52 + .../github.com/maypok86/otter/v2/mkdocs.yml | 162 + .../github.com/maypok86/otter/v2/options.go | 224 + .../maypok86/otter/v2/persistence.go | 155 + vendor/github.com/maypok86/otter/v2/policy.go | 542 ++ .../maypok86/otter/v2/refresh_calculator.go | 115 + .../maypok86/otter/v2/singleflight.go | 221 + vendor/github.com/maypok86/otter/v2/sketch.go | 172 + .../maypok86/otter/v2/stats/counter.go | 102 + .../xruntime/runtime.go => v2/stats/doc.go} | 15 +- .../maypok86/otter/v2/stats/recorder.go | 59 + .../maypok86/otter/v2/stats/stats.go | 153 + vendor/github.com/maypok86/otter/v2/task.go | 48 + .../shirou/gopsutil/v3/cpu/cpu_darwin.go | 117 - .../shirou/gopsutil/v3/cpu/cpu_darwin_cgo.go | 111 - .../gopsutil/v3/cpu/cpu_darwin_nocgo.go | 14 - .../shirou/gopsutil/v3/cpu/cpu_windows.go | 229 - .../gopsutil/v3/host/host_darwin_cgo.go | 47 - .../gopsutil/v3/host/host_darwin_nocgo.go | 14 - .../shirou/gopsutil/v3/host/host_fallback.go | 50 - .../shirou/gopsutil/v3/host/smc_darwin.c | 169 - .../shirou/gopsutil/v3/host/smc_darwin.h | 32 - .../gopsutil/v3/internal/common/binary.go | 637 -- .../v3/internal/common/common_darwin.go | 66 - .../gopsutil/v3/internal/common/warnings.go | 30 - .../shirou/gopsutil/v3/mem/mem_darwin.go | 72 - .../shirou/gopsutil/v3/mem/mem_darwin_cgo.go | 58 - .../gopsutil/v3/mem/mem_darwin_nocgo.go | 89 - .../shirou/gopsutil/v3/net/net_fallback.go | 93 - .../shirou/gopsutil/v3/net/net_linux_111.go | 12 - .../shirou/gopsutil/v3/net/net_linux_116.go | 12 - .../shirou/gopsutil/v3/process/process_bsd.go | 76 - .../gopsutil/v3/process/process_darwin.go | 325 - .../gopsutil/v3/process/process_darwin_cgo.go | 222 - .../v3/process/process_darwin_nocgo.go | 134 - .../gopsutil/v3/process/process_fallback.go | 203 - .../v3/process/process_freebsd_amd64.go | 192 - .../gopsutil/v3/process/process_plan9.go | 203 - .../shirou/gopsutil/{v3 => v4}/LICENSE | 0 .../shirou/gopsutil/{v3 => v4}/common/env.go | 16 +- .../shirou/gopsutil/{v3 => v4}/cpu/cpu.go | 6 +- .../shirou/gopsutil/{v3 => v4}/cpu/cpu_aix.go | 2 +- .../gopsutil/{v3 => v4}/cpu/cpu_aix_cgo.go | 2 +- .../gopsutil/{v3 => v4}/cpu/cpu_aix_nocgo.go | 20 +- .../shirou/gopsutil/v4/cpu/cpu_darwin.go | 203 + .../gopsutil/v4/cpu/cpu_darwin_arm64.go | 80 + .../gopsutil/v4/cpu/cpu_darwin_fallback.go | 13 + .../gopsutil/{v3 => v4}/cpu/cpu_dragonfly.go | 18 +- .../{v3 => v4}/cpu/cpu_dragonfly_amd64.go | 1 + .../gopsutil/{v3 => v4}/cpu/cpu_fallback.go | 4 +- .../gopsutil/{v3 => v4}/cpu/cpu_freebsd.go | 22 +- .../{v3 => v4}/cpu/cpu_freebsd_386.go | 1 + .../{v3 => v4}/cpu/cpu_freebsd_amd64.go | 1 + .../{v3 => v4}/cpu/cpu_freebsd_arm.go | 1 + .../{v3 => v4}/cpu/cpu_freebsd_arm64.go | 1 + .../gopsutil/{v3 => v4}/cpu/cpu_linux.go | 36 +- .../gopsutil/{v3 => v4}/cpu/cpu_netbsd.go | 20 +- .../{v3 => v4}/cpu/cpu_netbsd_amd64.go | 1 + .../shirou/gopsutil/v4/cpu/cpu_netbsd_arm.go | 10 + .../{v3 => v4}/cpu/cpu_netbsd_arm64.go | 1 + .../gopsutil/{v3 => v4}/cpu/cpu_openbsd.go | 20 +- .../{v3 => v4}/cpu/cpu_openbsd_386.go | 1 + .../{v3 => v4}/cpu/cpu_openbsd_amd64.go | 1 + .../{v3 => v4}/cpu/cpu_openbsd_arm.go | 1 + .../{v3 => v4}/cpu/cpu_openbsd_arm64.go | 1 + .../{v3 => v4}/cpu/cpu_openbsd_riscv64.go | 1 + .../gopsutil/{v3 => v4}/cpu/cpu_plan9.go | 11 +- .../gopsutil/{v3 => v4}/cpu/cpu_solaris.go | 46 +- .../shirou/gopsutil/v4/cpu/cpu_windows.go | 477 ++ .../shirou/gopsutil/{v3 => v4}/host/host.go | 38 +- .../gopsutil/{v3 => v4}/host/host_aix.go | 120 +- .../{v3 => v4}/host/host_aix_ppc64.go | 1 - .../gopsutil/{v3 => v4}/host/host_bsd.go | 4 +- .../gopsutil/{v3 => v4}/host/host_darwin.go | 17 +- .../{v3 => v4}/host/host_darwin_amd64.go | 1 + .../{v3 => v4}/host/host_darwin_arm64.go | 2 +- .../shirou/gopsutil/v4/host/host_fallback.go | 46 + .../gopsutil/{v3 => v4}/host/host_freebsd.go | 22 +- .../{v3 => v4}/host/host_freebsd_386.go | 1 + .../{v3 => v4}/host/host_freebsd_amd64.go | 1 + .../{v3 => v4}/host/host_freebsd_arm.go | 1 + .../{v3 => v4}/host/host_freebsd_arm64.go | 2 +- .../gopsutil/{v3 => v4}/host/host_linux.go | 214 +- .../{v3 => v4}/host/host_linux_386.go | 17 +- .../{v3 => v4}/host/host_linux_amd64.go | 1 + .../{v3 => v4}/host/host_linux_arm.go | 1 + .../{v3 => v4}/host/host_linux_arm64.go | 2 +- .../{v3 => v4}/host/host_linux_loong64.go | 2 +- .../{v3 => v4}/host/host_linux_mips.go | 1 + .../{v3 => v4}/host/host_linux_mips64.go | 1 + .../{v3 => v4}/host/host_linux_mips64le.go | 1 + .../{v3 => v4}/host/host_linux_mipsle.go | 1 + .../{v3 => v4}/host/host_linux_ppc64.go | 2 +- .../{v3 => v4}/host/host_linux_ppc64le.go | 2 +- .../{v3 => v4}/host/host_linux_riscv64.go | 1 + .../{v3 => v4}/host/host_linux_s390x.go | 2 +- .../gopsutil/{v3 => v4}/host/host_netbsd.go | 19 +- .../gopsutil/{v3 => v4}/host/host_openbsd.go | 22 +- .../{v3 => v4}/host/host_openbsd_386.go | 2 +- .../{v3 => v4}/host/host_openbsd_amd64.go | 1 + .../{v3 => v4}/host/host_openbsd_arm.go | 2 +- .../{v3 => v4}/host/host_openbsd_arm64.go | 2 +- .../{v3 => v4}/host/host_openbsd_riscv64.go | 2 +- .../gopsutil/{v3 => v4}/host/host_posix.go | 2 +- .../gopsutil/{v3 => v4}/host/host_solaris.go | 68 +- .../gopsutil/{v3 => v4}/host/host_windows.go | 107 +- .../{v3 => v4}/internal/common/common.go | 56 +- .../v4/internal/common/common_darwin.go | 382 ++ .../internal/common/common_freebsd.go | 19 +- .../internal/common/common_linux.go | 76 +- .../internal/common/common_netbsd.go | 19 +- .../internal/common/common_openbsd.go | 19 +- .../{v3 => v4}/internal/common/common_unix.go | 24 +- .../internal/common/common_windows.go | 25 +- .../{v3 => v4}/internal/common/endian.go | 1 + .../v4/internal/common/readlink_linux.go | 53 + .../{v3 => v4}/internal/common/sleep.go | 1 + .../gopsutil/v4/internal/common/warnings.go | 53 + .../shirou/gopsutil/v4/mem/ex_linux.go | 40 + .../shirou/gopsutil/v4/mem/ex_windows.go | 62 + .../shirou/gopsutil/{v3 => v4}/mem/mem.go | 5 +- .../shirou/gopsutil/{v3 => v4}/mem/mem_aix.go | 8 +- .../gopsutil/{v3 => v4}/mem/mem_aix_cgo.go | 2 +- .../gopsutil/{v3 => v4}/mem/mem_aix_nocgo.go | 4 +- .../shirou/gopsutil/{v3 => v4}/mem/mem_bsd.go | 2 +- .../shirou/gopsutil/v4/mem/mem_darwin.go | 130 + .../gopsutil/{v3 => v4}/mem/mem_fallback.go | 10 +- .../gopsutil/{v3 => v4}/mem/mem_freebsd.go | 17 +- .../gopsutil/{v3 => v4}/mem/mem_linux.go | 62 +- .../gopsutil/{v3 => v4}/mem/mem_netbsd.go | 6 +- .../gopsutil/{v3 => v4}/mem/mem_openbsd.go | 12 +- .../{v3 => v4}/mem/mem_openbsd_386.go | 2 +- .../{v3 => v4}/mem/mem_openbsd_amd64.go | 1 + .../{v3 => v4}/mem/mem_openbsd_arm.go | 2 +- .../{v3 => v4}/mem/mem_openbsd_arm64.go | 2 +- .../{v3 => v4}/mem/mem_openbsd_riscv64.go | 2 +- .../gopsutil/{v3 => v4}/mem/mem_plan9.go | 7 +- .../gopsutil/{v3 => v4}/mem/mem_solaris.go | 26 +- .../gopsutil/{v3 => v4}/mem/mem_windows.go | 65 +- .../shirou/gopsutil/{v3 => v4}/net/net.go | 93 +- .../shirou/gopsutil/{v3 => v4}/net/net_aix.go | 138 +- .../gopsutil/{v3 => v4}/net/net_aix_cgo.go | 6 +- .../gopsutil/{v3 => v4}/net/net_aix_nocgo.go | 12 +- .../gopsutil/{v3 => v4}/net/net_darwin.go | 68 +- .../shirou/gopsutil/v4/net/net_fallback.go | 71 + .../gopsutil/{v3 => v4}/net/net_freebsd.go | 44 +- .../gopsutil/{v3 => v4}/net/net_linux.go | 199 +- .../gopsutil/{v3 => v4}/net/net_openbsd.go | 142 +- .../gopsutil/{v3 => v4}/net/net_solaris.go | 73 +- .../gopsutil/{v3 => v4}/net/net_unix.go | 90 +- .../gopsutil/{v3 => v4}/net/net_windows.go | 130 +- .../gopsutil/{v3 => v4}/process/process.go | 72 +- .../shirou/gopsutil/v4/process/process_bsd.go | 76 + .../gopsutil/v4/process/process_darwin.go | 490 ++ .../process/process_darwin_amd64.go | 22 + .../process/process_darwin_arm64.go | 23 +- .../gopsutil/v4/process/process_fallback.go | 203 + .../{v3 => v4}/process/process_freebsd.go | 105 +- .../{v3 => v4}/process/process_freebsd_386.go | 26 + .../v4/process/process_freebsd_amd64.go | 224 + .../{v3 => v4}/process/process_freebsd_arm.go | 26 + .../process/process_freebsd_arm64.go | 24 + .../{v3 => v4}/process/process_linux.go | 128 +- .../{v3 => v4}/process/process_openbsd.go | 81 +- .../{v3 => v4}/process/process_openbsd_386.go | 2 +- .../process/process_openbsd_amd64.go | 1 + .../{v3 => v4}/process/process_openbsd_arm.go | 2 +- .../process/process_openbsd_arm64.go | 2 +- .../process/process_openbsd_riscv64.go | 2 +- .../gopsutil/v4/process/process_plan9.go | 203 + .../{v3 => v4}/process/process_posix.go | 13 +- .../{v3 => v4}/process/process_solaris.go | 73 +- .../{v3 => v4}/process/process_windows.go | 195 +- .../process/process_windows_32bit.go | 76 +- .../process/process_windows_64bit.go | 38 +- .../shoenig/go-m1cpu/.golangci.yaml | 12 - vendor/github.com/shoenig/go-m1cpu/LICENSE | 363 -- vendor/github.com/shoenig/go-m1cpu/Makefile | 12 - vendor/github.com/shoenig/go-m1cpu/README.md | 66 - vendor/github.com/shoenig/go-m1cpu/cpu.go | 213 - .../shoenig/go-m1cpu/incompatible.go | 53 - .../testify/assert/assertion_compare.go | 22 +- .../testify/assert/assertion_format.go | 51 +- .../testify/assert/assertion_forward.go | 102 +- .../testify/assert/assertion_order.go | 2 +- .../stretchr/testify/assert/assertions.go | 367 +- .../github.com/stretchr/testify/assert/doc.go | 4 + .../testify/assert/http_assertions.go | 4 +- .../testify/assert/yaml/yaml_custom.go | 1 - .../testify/assert/yaml/yaml_default.go | 1 - .../stretchr/testify/assert/yaml/yaml_fail.go | 1 - .../stretchr/testify/require/doc.go | 2 + .../stretchr/testify/require/require.go | 108 +- .../testify/require/require_forward.go | 102 +- .../tklauser/go-sysconf/.cirrus.yml | 6 +- .../tklauser/go-sysconf/sysconf_netbsd.go | 20 +- .../github.com/tklauser/numcpus/.cirrus.yml | 6 +- .../tklauser/numcpus/numcpus_linux.go | 10 +- vendor/golang.org/x/sys/cpu/cpu.go | 3 + vendor/golang.org/x/sys/cpu/cpu_arm64.go | 20 +- vendor/golang.org/x/sys/cpu/cpu_arm64.s | 19 +- vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go | 1 + .../golang.org/x/sys/cpu/cpu_gccgo_arm64.go | 1 + .../golang.org/x/sys/cpu/cpu_netbsd_arm64.go | 2 +- .../golang.org/x/sys/cpu/cpu_openbsd_arm64.go | 2 +- .../golang.org/x/sys/plan9/pwd_go15_plan9.go | 21 - vendor/golang.org/x/sys/plan9/pwd_plan9.go | 14 +- .../golang.org/x/sys/unix/affinity_linux.go | 9 +- vendor/golang.org/x/sys/unix/fdset.go | 4 +- vendor/golang.org/x/sys/unix/ifreq_linux.go | 4 +- vendor/golang.org/x/sys/unix/mkall.sh | 1 + vendor/golang.org/x/sys/unix/mkerrors.sh | 5 + .../golang.org/x/sys/unix/syscall_darwin.go | 56 +- vendor/golang.org/x/sys/unix/syscall_linux.go | 10 +- .../golang.org/x/sys/unix/syscall_netbsd.go | 17 + .../golang.org/x/sys/unix/syscall_solaris.go | 2 +- vendor/golang.org/x/sys/unix/zerrors_linux.go | 403 +- .../x/sys/unix/zerrors_linux_386.go | 2 + .../x/sys/unix/zerrors_linux_amd64.go | 2 + .../x/sys/unix/zerrors_linux_arm.go | 2 + .../x/sys/unix/zerrors_linux_arm64.go | 2 + .../x/sys/unix/zerrors_linux_loong64.go | 2 + .../x/sys/unix/zerrors_linux_mips.go | 2 + .../x/sys/unix/zerrors_linux_mips64.go | 2 + .../x/sys/unix/zerrors_linux_mips64le.go | 2 + .../x/sys/unix/zerrors_linux_mipsle.go | 2 + .../x/sys/unix/zerrors_linux_ppc.go | 2 + .../x/sys/unix/zerrors_linux_ppc64.go | 2 + .../x/sys/unix/zerrors_linux_ppc64le.go | 2 + .../x/sys/unix/zerrors_linux_riscv64.go | 2 + .../x/sys/unix/zerrors_linux_s390x.go | 2 + .../x/sys/unix/zerrors_linux_sparc64.go | 2 + .../golang.org/x/sys/unix/zsyscall_linux.go | 10 + .../x/sys/unix/zsyscall_solaris_amd64.go | 8 +- .../x/sys/unix/zsysnum_linux_386.go | 1 + .../x/sys/unix/zsysnum_linux_amd64.go | 1 + .../x/sys/unix/zsysnum_linux_arm.go | 1 + .../x/sys/unix/zsysnum_linux_arm64.go | 1 + .../x/sys/unix/zsysnum_linux_loong64.go | 1 + .../x/sys/unix/zsysnum_linux_mips.go | 1 + .../x/sys/unix/zsysnum_linux_mips64.go | 1 + .../x/sys/unix/zsysnum_linux_mips64le.go | 1 + .../x/sys/unix/zsysnum_linux_mipsle.go | 1 + .../x/sys/unix/zsysnum_linux_ppc.go | 1 + .../x/sys/unix/zsysnum_linux_ppc64.go | 1 + .../x/sys/unix/zsysnum_linux_ppc64le.go | 1 + .../x/sys/unix/zsysnum_linux_riscv64.go | 1 + .../x/sys/unix/zsysnum_linux_s390x.go | 1 + .../x/sys/unix/zsysnum_linux_sparc64.go | 1 + vendor/golang.org/x/sys/unix/ztypes_linux.go | 109 +- .../golang.org/x/sys/unix/ztypes_linux_386.go | 30 +- .../x/sys/unix/ztypes_linux_amd64.go | 28 +- .../golang.org/x/sys/unix/ztypes_linux_arm.go | 32 +- .../x/sys/unix/ztypes_linux_arm64.go | 28 +- .../x/sys/unix/ztypes_linux_loong64.go | 28 +- .../x/sys/unix/ztypes_linux_mips.go | 30 +- .../x/sys/unix/ztypes_linux_mips64.go | 28 +- .../x/sys/unix/ztypes_linux_mips64le.go | 28 +- .../x/sys/unix/ztypes_linux_mipsle.go | 30 +- .../golang.org/x/sys/unix/ztypes_linux_ppc.go | 32 +- .../x/sys/unix/ztypes_linux_ppc64.go | 28 +- .../x/sys/unix/ztypes_linux_ppc64le.go | 28 +- .../x/sys/unix/ztypes_linux_riscv64.go | 28 +- .../x/sys/unix/ztypes_linux_s390x.go | 28 +- .../x/sys/unix/ztypes_linux_sparc64.go | 28 +- .../sys/windows/registry/zsyscall_windows.go | 16 +- .../x/sys/windows/syscall_windows.go | 17 + .../golang.org/x/sys/windows/types_windows.go | 98 + .../x/sys/windows/zsyscall_windows.go | 1021 ++-- vendor/modules.txt | 84 +- 673 files changed, 102930 insertions(+), 24240 deletions(-) create mode 100644 vendor/github.com/Masterminds/semver/v3/.gitignore create mode 100644 vendor/github.com/Masterminds/semver/v3/.golangci.yml create mode 100644 vendor/github.com/Masterminds/semver/v3/CHANGELOG.md rename vendor/github.com/{gammazero/deque/LICENSE => Masterminds/semver/v3/LICENSE.txt} (86%) create mode 100644 vendor/github.com/Masterminds/semver/v3/Makefile create mode 100644 vendor/github.com/Masterminds/semver/v3/README.md create mode 100644 vendor/github.com/Masterminds/semver/v3/SECURITY.md create mode 100644 vendor/github.com/Masterminds/semver/v3/collection.go create mode 100644 vendor/github.com/Masterminds/semver/v3/constraints.go create mode 100644 vendor/github.com/Masterminds/semver/v3/doc.go create mode 100644 vendor/github.com/Masterminds/semver/v3/version.go create mode 100644 vendor/github.com/conductorone/baton-sdk/pb/c1/c1z/v1/annotation_sync_details_protoopaque.pb.go create mode 100644 vendor/github.com/conductorone/baton-sdk/pb/c1/c1z/v1/diff_protoopaque.pb.go create mode 100644 vendor/github.com/conductorone/baton-sdk/pb/c1/config/v1/config_protoopaque.pb.go create mode 100644 vendor/github.com/conductorone/baton-sdk/pb/c1/config/v1/rules_protoopaque.pb.go create mode 100644 vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/action_protoopaque.pb.go create mode 100644 vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_baton_id_protoopaque.pb.go create mode 100644 vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_entitlement_protoopaque.pb.go create mode 100644 vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_etag_protoopaque.pb.go create mode 100644 vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_external_link_protoopaque.pb.go create mode 100644 vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_external_ticket_protoopaque.pb.go create mode 100644 vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_grant_protoopaque.pb.go create mode 100644 vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_ratelimit_protoopaque.pb.go create mode 100644 vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_raw_id_protoopaque.pb.go create mode 100644 vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_request_protoopaque.pb.go create mode 100644 vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_resource_tree_protoopaque.pb.go create mode 100644 vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_security_insight.pb.go create mode 100644 vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_security_insight.pb.validate.go create mode 100644 vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_security_insight_protoopaque.pb.go delete mode 100644 vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_sync_id.pb.go delete mode 100644 vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_sync_id.pb.validate.go create mode 100644 vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_trait_protoopaque.pb.go create mode 100644 vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_v1_identifier_protoopaque.pb.go create mode 100644 vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/asset_protoopaque.pb.go create mode 100644 vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/config_protoopaque.pb.go create mode 100644 vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/connector_protoopaque.pb.go create mode 100644 vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/entitlement_protoopaque.pb.go create mode 100644 vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/event_feed_protoopaque.pb.go create mode 100644 vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/grant_protoopaque.pb.go create mode 100644 vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/resource_protoopaque.pb.go create mode 100644 vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/ticket_protoopaque.pb.go create mode 100644 vendor/github.com/conductorone/baton-sdk/pb/c1/connector_wrapper/v1/connector_wrapper_protoopaque.pb.go create mode 100644 vendor/github.com/conductorone/baton-sdk/pb/c1/connectorapi/baton/v1/baton_protoopaque.pb.go create mode 100644 vendor/github.com/conductorone/baton-sdk/pb/c1/connectorapi/baton/v1/config_protoopaque.pb.go create mode 100644 vendor/github.com/conductorone/baton-sdk/pb/c1/connectorapi/baton/v1/session_protoopaque.pb.go create mode 100644 vendor/github.com/conductorone/baton-sdk/pb/c1/ratelimit/v1/ratelimit_protoopaque.pb.go create mode 100644 vendor/github.com/conductorone/baton-sdk/pb/c1/reader/v2/entitlement_protoopaque.pb.go create mode 100644 vendor/github.com/conductorone/baton-sdk/pb/c1/reader/v2/grant_protoopaque.pb.go create mode 100644 vendor/github.com/conductorone/baton-sdk/pb/c1/reader/v2/resource_protoopaque.pb.go create mode 100644 vendor/github.com/conductorone/baton-sdk/pb/c1/reader/v2/sync_protoopaque.pb.go delete mode 100644 vendor/github.com/conductorone/baton-sdk/pb/c1/transport/v1/transport.proto create mode 100644 vendor/github.com/conductorone/baton-sdk/pb/c1/transport/v1/transport_protoopaque.pb.go create mode 100644 vendor/github.com/conductorone/baton-sdk/pb/c1/utls/v1/tls_protoopaque.pb.go create mode 100644 vendor/github.com/conductorone/baton-sdk/pkg/actions/actions.go create mode 100644 vendor/github.com/conductorone/baton-sdk/pkg/actions/args.go create mode 100644 vendor/github.com/conductorone/baton-sdk/pkg/cli/lazy_session.go create mode 100644 vendor/github.com/conductorone/baton-sdk/pkg/connectorbuilder/accounts.go create mode 100644 vendor/github.com/conductorone/baton-sdk/pkg/connectorbuilder/actions.go create mode 100644 vendor/github.com/conductorone/baton-sdk/pkg/connectorbuilder/credentials.go create mode 100644 vendor/github.com/conductorone/baton-sdk/pkg/connectorbuilder/events.go create mode 100644 vendor/github.com/conductorone/baton-sdk/pkg/connectorbuilder/resource_manager.go create mode 100644 vendor/github.com/conductorone/baton-sdk/pkg/connectorbuilder/resource_provisioner.go create mode 100644 vendor/github.com/conductorone/baton-sdk/pkg/connectorbuilder/resource_syncer.go create mode 100644 vendor/github.com/conductorone/baton-sdk/pkg/connectorbuilder/session_store.go create mode 100644 vendor/github.com/conductorone/baton-sdk/pkg/connectorbuilder/tickets.go create mode 100644 vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/session_store.go create mode 100644 vendor/github.com/conductorone/baton-sdk/pkg/field/decode_hooks.go create mode 100644 vendor/github.com/conductorone/baton-sdk/pkg/field/field_group.go delete mode 100644 vendor/github.com/conductorone/baton-sdk/pkg/session/json.go create mode 100644 vendor/github.com/conductorone/baton-sdk/pkg/session/json_session.go delete mode 100644 vendor/github.com/conductorone/baton-sdk/pkg/session/memory.go create mode 100644 vendor/github.com/conductorone/baton-sdk/pkg/session/memory_cache.go create mode 100644 vendor/github.com/conductorone/baton-sdk/pkg/session/noop_session.go rename vendor/github.com/conductorone/baton-sdk/pkg/session/{grpc_session.go => session_client.go} (55%) create mode 100644 vendor/github.com/conductorone/baton-sdk/pkg/session/session_server.go create mode 100644 vendor/github.com/conductorone/baton-sdk/pkg/session/typed_session.go delete mode 100644 vendor/github.com/conductorone/baton-sdk/pkg/sync/client_wrapper.go create mode 100644 vendor/github.com/conductorone/baton-sdk/pkg/sync/expand/expander.go create mode 100644 vendor/github.com/conductorone/baton-sdk/pkg/tasks/local/action_schema_list.go create mode 100644 vendor/github.com/conductorone/baton-sdk/pkg/types/resource/security_insight_trait.go delete mode 100644 vendor/github.com/conductorone/baton-sdk/pkg/types/session_cache.go create mode 100644 vendor/github.com/conductorone/baton-sdk/pkg/types/sessions/sessions.go delete mode 100644 vendor/github.com/dolthub/maphash/.gitignore delete mode 100644 vendor/github.com/dolthub/maphash/README.md delete mode 100644 vendor/github.com/dolthub/maphash/hasher.go delete mode 100644 vendor/github.com/dolthub/maphash/runtime.go create mode 100644 vendor/github.com/ebitengine/purego/.gitignore rename vendor/github.com/{dolthub/maphash => ebitengine/purego}/LICENSE (99%) create mode 100644 vendor/github.com/ebitengine/purego/README.md create mode 100644 vendor/github.com/ebitengine/purego/abi_amd64.h create mode 100644 vendor/github.com/ebitengine/purego/abi_arm64.h create mode 100644 vendor/github.com/ebitengine/purego/abi_loong64.h create mode 100644 vendor/github.com/ebitengine/purego/cgo.go create mode 100644 vendor/github.com/ebitengine/purego/dlerror.go create mode 100644 vendor/github.com/ebitengine/purego/dlfcn.go create mode 100644 vendor/github.com/ebitengine/purego/dlfcn_android.go create mode 100644 vendor/github.com/ebitengine/purego/dlfcn_darwin.go create mode 100644 vendor/github.com/ebitengine/purego/dlfcn_freebsd.go create mode 100644 vendor/github.com/ebitengine/purego/dlfcn_linux.go create mode 100644 vendor/github.com/ebitengine/purego/dlfcn_netbsd.go create mode 100644 vendor/github.com/ebitengine/purego/dlfcn_nocgo_freebsd.go create mode 100644 vendor/github.com/ebitengine/purego/dlfcn_nocgo_linux.go create mode 100644 vendor/github.com/ebitengine/purego/dlfcn_nocgo_netbsd.go create mode 100644 vendor/github.com/ebitengine/purego/dlfcn_playground.go create mode 100644 vendor/github.com/ebitengine/purego/dlfcn_stubs.s create mode 100644 vendor/github.com/ebitengine/purego/func.go create mode 100644 vendor/github.com/ebitengine/purego/gen.go create mode 100644 vendor/github.com/ebitengine/purego/go_runtime.go create mode 100644 vendor/github.com/ebitengine/purego/internal/cgo/dlfcn_cgo_unix.go create mode 100644 vendor/github.com/ebitengine/purego/internal/cgo/empty.go create mode 100644 vendor/github.com/ebitengine/purego/internal/cgo/syscall_cgo_unix.go create mode 100644 vendor/github.com/ebitengine/purego/internal/fakecgo/abi_amd64.h create mode 100644 vendor/github.com/ebitengine/purego/internal/fakecgo/abi_arm64.h create mode 100644 vendor/github.com/ebitengine/purego/internal/fakecgo/abi_loong64.h create mode 100644 vendor/github.com/ebitengine/purego/internal/fakecgo/asm_amd64.s create mode 100644 vendor/github.com/ebitengine/purego/internal/fakecgo/asm_arm64.s create mode 100644 vendor/github.com/ebitengine/purego/internal/fakecgo/asm_loong64.s create mode 100644 vendor/github.com/ebitengine/purego/internal/fakecgo/callbacks.go create mode 100644 vendor/github.com/ebitengine/purego/internal/fakecgo/doc.go create mode 100644 vendor/github.com/ebitengine/purego/internal/fakecgo/freebsd.go create mode 100644 vendor/github.com/ebitengine/purego/internal/fakecgo/go_darwin_amd64.go create mode 100644 vendor/github.com/ebitengine/purego/internal/fakecgo/go_darwin_arm64.go create mode 100644 vendor/github.com/ebitengine/purego/internal/fakecgo/go_freebsd_amd64.go create mode 100644 vendor/github.com/ebitengine/purego/internal/fakecgo/go_freebsd_arm64.go create mode 100644 vendor/github.com/ebitengine/purego/internal/fakecgo/go_libinit.go create mode 100644 vendor/github.com/ebitengine/purego/internal/fakecgo/go_linux_amd64.go create mode 100644 vendor/github.com/ebitengine/purego/internal/fakecgo/go_linux_arm64.go create mode 100644 vendor/github.com/ebitengine/purego/internal/fakecgo/go_linux_loong64.go create mode 100644 vendor/github.com/ebitengine/purego/internal/fakecgo/go_netbsd.go create mode 100644 vendor/github.com/ebitengine/purego/internal/fakecgo/go_setenv.go create mode 100644 vendor/github.com/ebitengine/purego/internal/fakecgo/go_util.go create mode 100644 vendor/github.com/ebitengine/purego/internal/fakecgo/iscgo.go create mode 100644 vendor/github.com/ebitengine/purego/internal/fakecgo/libcgo.go create mode 100644 vendor/github.com/ebitengine/purego/internal/fakecgo/libcgo_darwin.go create mode 100644 vendor/github.com/ebitengine/purego/internal/fakecgo/libcgo_freebsd.go create mode 100644 vendor/github.com/ebitengine/purego/internal/fakecgo/libcgo_linux.go create mode 100644 vendor/github.com/ebitengine/purego/internal/fakecgo/libcgo_netbsd.go create mode 100644 vendor/github.com/ebitengine/purego/internal/fakecgo/netbsd.go create mode 100644 vendor/github.com/ebitengine/purego/internal/fakecgo/setenv.go create mode 100644 vendor/github.com/ebitengine/purego/internal/fakecgo/symbols.go create mode 100644 vendor/github.com/ebitengine/purego/internal/fakecgo/symbols_darwin.go create mode 100644 vendor/github.com/ebitengine/purego/internal/fakecgo/symbols_freebsd.go create mode 100644 vendor/github.com/ebitengine/purego/internal/fakecgo/symbols_linux.go create mode 100644 vendor/github.com/ebitengine/purego/internal/fakecgo/symbols_netbsd.go create mode 100644 vendor/github.com/ebitengine/purego/internal/fakecgo/trampolines_amd64.s create mode 100644 vendor/github.com/ebitengine/purego/internal/fakecgo/trampolines_arm64.s create mode 100644 vendor/github.com/ebitengine/purego/internal/fakecgo/trampolines_loong64.s create mode 100644 vendor/github.com/ebitengine/purego/internal/fakecgo/trampolines_stubs.s create mode 100644 vendor/github.com/ebitengine/purego/internal/strings/strings.go create mode 100644 vendor/github.com/ebitengine/purego/is_ios.go create mode 100644 vendor/github.com/ebitengine/purego/nocgo.go create mode 100644 vendor/github.com/ebitengine/purego/struct_amd64.go create mode 100644 vendor/github.com/ebitengine/purego/struct_arm64.go create mode 100644 vendor/github.com/ebitengine/purego/struct_loong64.go create mode 100644 vendor/github.com/ebitengine/purego/struct_other.go create mode 100644 vendor/github.com/ebitengine/purego/sys_amd64.s create mode 100644 vendor/github.com/ebitengine/purego/sys_arm64.s create mode 100644 vendor/github.com/ebitengine/purego/sys_loong64.s create mode 100644 vendor/github.com/ebitengine/purego/sys_unix_arm64.s create mode 100644 vendor/github.com/ebitengine/purego/sys_unix_loong64.s create mode 100644 vendor/github.com/ebitengine/purego/syscall.go create mode 100644 vendor/github.com/ebitengine/purego/syscall_cgo_linux.go create mode 100644 vendor/github.com/ebitengine/purego/syscall_sysv.go create mode 100644 vendor/github.com/ebitengine/purego/syscall_windows.go create mode 100644 vendor/github.com/ebitengine/purego/zcallback_amd64.s create mode 100644 vendor/github.com/ebitengine/purego/zcallback_arm64.s create mode 100644 vendor/github.com/ebitengine/purego/zcallback_loong64.s delete mode 100644 vendor/github.com/gammazero/deque/.gitignore delete mode 100644 vendor/github.com/gammazero/deque/README.md delete mode 100644 vendor/github.com/gammazero/deque/deque.go delete mode 100644 vendor/github.com/gammazero/deque/doc.go delete mode 100644 vendor/github.com/maypok86/otter/.golangci.yml delete mode 100644 vendor/github.com/maypok86/otter/CHANGELOG.md delete mode 100644 vendor/github.com/maypok86/otter/README.md delete mode 100644 vendor/github.com/maypok86/otter/builder.go delete mode 100644 vendor/github.com/maypok86/otter/cache.go delete mode 100644 vendor/github.com/maypok86/otter/entry.go delete mode 100644 vendor/github.com/maypok86/otter/extension.go delete mode 100644 vendor/github.com/maypok86/otter/internal/core/cache.go delete mode 100644 vendor/github.com/maypok86/otter/internal/core/task.go delete mode 100644 vendor/github.com/maypok86/otter/internal/expiry/fixed.go delete mode 100644 vendor/github.com/maypok86/otter/internal/expiry/queue.go delete mode 100644 vendor/github.com/maypok86/otter/internal/generated/node/b.go delete mode 100644 vendor/github.com/maypok86/otter/internal/generated/node/bc.go delete mode 100644 vendor/github.com/maypok86/otter/internal/generated/node/be.go delete mode 100644 vendor/github.com/maypok86/otter/internal/generated/node/bec.go delete mode 100644 vendor/github.com/maypok86/otter/internal/generated/node/manager.go delete mode 100644 vendor/github.com/maypok86/otter/internal/hashtable/bucket.go delete mode 100644 vendor/github.com/maypok86/otter/internal/hashtable/map.go delete mode 100644 vendor/github.com/maypok86/otter/internal/lossy/buffer.go delete mode 100644 vendor/github.com/maypok86/otter/internal/queue/growable.go delete mode 100644 vendor/github.com/maypok86/otter/internal/s3fifo/ghost.go delete mode 100644 vendor/github.com/maypok86/otter/internal/s3fifo/main.go delete mode 100644 vendor/github.com/maypok86/otter/internal/s3fifo/policy.go delete mode 100644 vendor/github.com/maypok86/otter/internal/s3fifo/queue.go delete mode 100644 vendor/github.com/maypok86/otter/internal/s3fifo/small.go delete mode 100644 vendor/github.com/maypok86/otter/internal/stats/counter.go delete mode 100644 vendor/github.com/maypok86/otter/internal/stats/stats.go delete mode 100644 vendor/github.com/maypok86/otter/internal/unixtime/unixtime.go delete mode 100644 vendor/github.com/maypok86/otter/stats.go rename vendor/github.com/maypok86/otter/{ => v2}/.gitignore (91%) create mode 100644 vendor/github.com/maypok86/otter/v2/.golangci.yml create mode 100644 vendor/github.com/maypok86/otter/v2/CHANGELOG.md rename vendor/github.com/maypok86/otter/{ => v2}/CODE_OF_CONDUCT.md (100%) rename vendor/github.com/maypok86/otter/{ => v2}/CONTRIBUTING.md (100%) rename vendor/github.com/maypok86/otter/{ => v2}/LICENSE (99%) rename vendor/github.com/maypok86/otter/{ => v2}/Makefile (71%) create mode 100644 vendor/github.com/maypok86/otter/v2/README.md create mode 100644 vendor/github.com/maypok86/otter/v2/cache.go create mode 100644 vendor/github.com/maypok86/otter/v2/cache_impl.go create mode 100644 vendor/github.com/maypok86/otter/v2/clock.go create mode 100644 vendor/github.com/maypok86/otter/v2/deletion.go create mode 100644 vendor/github.com/maypok86/otter/v2/doc.go create mode 100644 vendor/github.com/maypok86/otter/v2/entry.go create mode 100644 vendor/github.com/maypok86/otter/v2/error.go create mode 100644 vendor/github.com/maypok86/otter/v2/expiry_calculator.go create mode 100644 vendor/github.com/maypok86/otter/v2/internal/deque/linked.go create mode 100644 vendor/github.com/maypok86/otter/v2/internal/deque/queue/mpsc.go rename vendor/github.com/maypok86/otter/{internal/expiry => v2/internal/expiration}/variable.go (57%) create mode 100644 vendor/github.com/maypok86/otter/v2/internal/generated/node/b.go create mode 100644 vendor/github.com/maypok86/otter/v2/internal/generated/node/be.go create mode 100644 vendor/github.com/maypok86/otter/v2/internal/generated/node/ber.go create mode 100644 vendor/github.com/maypok86/otter/v2/internal/generated/node/berw.go create mode 100644 vendor/github.com/maypok86/otter/v2/internal/generated/node/bew.go create mode 100644 vendor/github.com/maypok86/otter/v2/internal/generated/node/br.go create mode 100644 vendor/github.com/maypok86/otter/v2/internal/generated/node/brw.go create mode 100644 vendor/github.com/maypok86/otter/v2/internal/generated/node/bs.go create mode 100644 vendor/github.com/maypok86/otter/v2/internal/generated/node/bse.go create mode 100644 vendor/github.com/maypok86/otter/v2/internal/generated/node/bser.go create mode 100644 vendor/github.com/maypok86/otter/v2/internal/generated/node/bsr.go create mode 100644 vendor/github.com/maypok86/otter/v2/internal/generated/node/bw.go create mode 100644 vendor/github.com/maypok86/otter/v2/internal/generated/node/manager.go create mode 100644 vendor/github.com/maypok86/otter/v2/internal/hashmap/map.go rename vendor/github.com/maypok86/otter/{internal/expiry/disabled.go => v2/internal/hashmap/node.go} (55%) create mode 100644 vendor/github.com/maypok86/otter/v2/internal/lossy/ring.go create mode 100644 vendor/github.com/maypok86/otter/v2/internal/lossy/striped.go create mode 100644 vendor/github.com/maypok86/otter/v2/internal/xiter/xiter.go rename vendor/github.com/maypok86/otter/{internal/xmath/power.go => v2/internal/xmath/xmath.go} (65%) rename vendor/github.com/maypok86/otter/{internal/xruntime/runtime_1.22.go => v2/internal/xruntime/hasher.go} (64%) rename vendor/github.com/maypok86/otter/{ => v2}/internal/xruntime/xruntime.go (73%) create mode 100644 vendor/github.com/maypok86/otter/v2/internal/xsync/adder.go create mode 100644 vendor/github.com/maypok86/otter/v2/loader.go create mode 100644 vendor/github.com/maypok86/otter/v2/logger.go create mode 100644 vendor/github.com/maypok86/otter/v2/mkdocs.yml create mode 100644 vendor/github.com/maypok86/otter/v2/options.go create mode 100644 vendor/github.com/maypok86/otter/v2/persistence.go create mode 100644 vendor/github.com/maypok86/otter/v2/policy.go create mode 100644 vendor/github.com/maypok86/otter/v2/refresh_calculator.go create mode 100644 vendor/github.com/maypok86/otter/v2/singleflight.go create mode 100644 vendor/github.com/maypok86/otter/v2/sketch.go create mode 100644 vendor/github.com/maypok86/otter/v2/stats/counter.go rename vendor/github.com/maypok86/otter/{internal/xruntime/runtime.go => v2/stats/doc.go} (73%) create mode 100644 vendor/github.com/maypok86/otter/v2/stats/recorder.go create mode 100644 vendor/github.com/maypok86/otter/v2/stats/stats.go create mode 100644 vendor/github.com/maypok86/otter/v2/task.go delete mode 100644 vendor/github.com/shirou/gopsutil/v3/cpu/cpu_darwin.go delete mode 100644 vendor/github.com/shirou/gopsutil/v3/cpu/cpu_darwin_cgo.go delete mode 100644 vendor/github.com/shirou/gopsutil/v3/cpu/cpu_darwin_nocgo.go delete mode 100644 vendor/github.com/shirou/gopsutil/v3/cpu/cpu_windows.go delete mode 100644 vendor/github.com/shirou/gopsutil/v3/host/host_darwin_cgo.go delete mode 100644 vendor/github.com/shirou/gopsutil/v3/host/host_darwin_nocgo.go delete mode 100644 vendor/github.com/shirou/gopsutil/v3/host/host_fallback.go delete mode 100644 vendor/github.com/shirou/gopsutil/v3/host/smc_darwin.c delete mode 100644 vendor/github.com/shirou/gopsutil/v3/host/smc_darwin.h delete mode 100644 vendor/github.com/shirou/gopsutil/v3/internal/common/binary.go delete mode 100644 vendor/github.com/shirou/gopsutil/v3/internal/common/common_darwin.go delete mode 100644 vendor/github.com/shirou/gopsutil/v3/internal/common/warnings.go delete mode 100644 vendor/github.com/shirou/gopsutil/v3/mem/mem_darwin.go delete mode 100644 vendor/github.com/shirou/gopsutil/v3/mem/mem_darwin_cgo.go delete mode 100644 vendor/github.com/shirou/gopsutil/v3/mem/mem_darwin_nocgo.go delete mode 100644 vendor/github.com/shirou/gopsutil/v3/net/net_fallback.go delete mode 100644 vendor/github.com/shirou/gopsutil/v3/net/net_linux_111.go delete mode 100644 vendor/github.com/shirou/gopsutil/v3/net/net_linux_116.go delete mode 100644 vendor/github.com/shirou/gopsutil/v3/process/process_bsd.go delete mode 100644 vendor/github.com/shirou/gopsutil/v3/process/process_darwin.go delete mode 100644 vendor/github.com/shirou/gopsutil/v3/process/process_darwin_cgo.go delete mode 100644 vendor/github.com/shirou/gopsutil/v3/process/process_darwin_nocgo.go delete mode 100644 vendor/github.com/shirou/gopsutil/v3/process/process_fallback.go delete mode 100644 vendor/github.com/shirou/gopsutil/v3/process/process_freebsd_amd64.go delete mode 100644 vendor/github.com/shirou/gopsutil/v3/process/process_plan9.go rename vendor/github.com/shirou/gopsutil/{v3 => v4}/LICENSE (100%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/common/env.go (51%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/cpu/cpu.go (96%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/cpu/cpu_aix.go (85%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/cpu/cpu_aix_cgo.go (96%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/cpu/cpu_aix_nocgo.go (85%) create mode 100644 vendor/github.com/shirou/gopsutil/v4/cpu/cpu_darwin.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/cpu/cpu_darwin_arm64.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/cpu/cpu_darwin_fallback.go rename vendor/github.com/shirou/gopsutil/{v3 => v4}/cpu/cpu_dragonfly.go (88%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/cpu/cpu_dragonfly_amd64.go (71%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/cpu/cpu_fallback.go (83%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/cpu/cpu_freebsd.go (89%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/cpu/cpu_freebsd_386.go (71%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/cpu/cpu_freebsd_amd64.go (71%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/cpu/cpu_freebsd_arm.go (71%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/cpu/cpu_freebsd_arm64.go (71%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/cpu/cpu_linux.go (93%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/cpu/cpu_netbsd.go (84%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/cpu/cpu_netbsd_amd64.go (71%) create mode 100644 vendor/github.com/shirou/gopsutil/v4/cpu/cpu_netbsd_arm.go rename vendor/github.com/shirou/gopsutil/{v3 => v4}/cpu/cpu_netbsd_arm64.go (71%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/cpu/cpu_openbsd.go (86%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/cpu/cpu_openbsd_386.go (73%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/cpu/cpu_openbsd_amd64.go (73%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/cpu/cpu_openbsd_arm.go (73%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/cpu/cpu_openbsd_arm64.go (73%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/cpu/cpu_openbsd_riscv64.go (73%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/cpu/cpu_plan9.go (73%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/cpu/cpu_solaris.go (84%) create mode 100644 vendor/github.com/shirou/gopsutil/v4/cpu/cpu_windows.go rename vendor/github.com/shirou/gopsutil/{v3 => v4}/host/host.go (86%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/host/host_aix.go (59%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/host/host_aix_ppc64.go (97%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/host/host_bsd.go (86%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/host/host_darwin.go (85%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/host/host_darwin_amd64.go (88%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/host/host_darwin_arm64.go (89%) create mode 100644 vendor/github.com/shirou/gopsutil/v4/host/host_fallback.go rename vendor/github.com/shirou/gopsutil/{v3 => v4}/host/host_freebsd.go (81%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/host/host_freebsd_386.go (92%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/host/host_freebsd_amd64.go (92%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/host/host_freebsd_arm.go (92%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/host/host_freebsd_arm64.go (93%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/host/host_linux.go (59%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/host/host_linux_386.go (60%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/host/host_linux_amd64.go (95%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/host/host_linux_arm.go (95%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/host/host_linux_arm64.go (95%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/host/host_linux_loong64.go (95%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/host/host_linux_mips.go (94%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/host/host_linux_mips64.go (94%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/host/host_linux_mips64le.go (94%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/host/host_linux_mipsle.go (94%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/host/host_linux_ppc64.go (95%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/host/host_linux_ppc64le.go (95%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/host/host_linux_riscv64.go (95%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/host/host_linux_s390x.go (95%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/host/host_netbsd.go (56%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/host/host_openbsd.go (73%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/host/host_openbsd_386.go (92%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/host/host_openbsd_amd64.go (91%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/host/host_openbsd_arm.go (92%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/host/host_openbsd_arm64.go (92%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/host/host_openbsd_riscv64.go (92%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/host/host_posix.go (84%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/host/host_solaris.go (70%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/host/host_windows.go (76%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/internal/common/common.go (88%) create mode 100644 vendor/github.com/shirou/gopsutil/v4/internal/common/common_darwin.go rename vendor/github.com/shirou/gopsutil/{v3 => v4}/internal/common/common_freebsd.go (74%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/internal/common/common_linux.go (82%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/internal/common/common_netbsd.go (66%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/internal/common/common_openbsd.go (66%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/internal/common/common_unix.go (61%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/internal/common/common_windows.go (92%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/internal/common/endian.go (88%) create mode 100644 vendor/github.com/shirou/gopsutil/v4/internal/common/readlink_linux.go rename vendor/github.com/shirou/gopsutil/{v3 => v4}/internal/common/sleep.go (89%) create mode 100644 vendor/github.com/shirou/gopsutil/v4/internal/common/warnings.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/mem/ex_linux.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/mem/ex_windows.go rename vendor/github.com/shirou/gopsutil/{v3 => v4}/mem/mem.go (94%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/mem/mem_aix.go (58%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/mem/mem_aix_cgo.go (97%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/mem/mem_aix_nocgo.go (95%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/mem/mem_bsd.go (98%) create mode 100644 vendor/github.com/shirou/gopsutil/v4/mem/mem_darwin.go rename vendor/github.com/shirou/gopsutil/{v3 => v4}/mem/mem_fallback.go (62%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/mem/mem_freebsd.go (91%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/mem/mem_linux.go (89%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/mem/mem_netbsd.go (90%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/mem/mem_openbsd.go (87%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/mem/mem_openbsd_386.go (93%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/mem/mem_openbsd_amd64.go (92%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/mem/mem_openbsd_arm.go (93%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/mem/mem_openbsd_arm64.go (93%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/mem/mem_openbsd_riscv64.go (94%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/mem/mem_plan9.go (91%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/mem/mem_solaris.go (90%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/mem/mem_windows.go (67%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/net/net.go (67%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/net/net_aix.go (61%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/net/net_aix_cgo.go (88%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/net/net_aix_nocgo.go (89%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/net/net_darwin.go (77%) create mode 100644 vendor/github.com/shirou/gopsutil/v4/net/net_fallback.go rename vendor/github.com/shirou/gopsutil/{v3 => v4}/net/net_freebsd.go (57%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/net/net_linux.go (73%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/net/net_openbsd.go (64%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/net/net_solaris.go (58%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/net/net_unix.go (56%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/net/net_windows.go (80%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/process/process.go (88%) create mode 100644 vendor/github.com/shirou/gopsutil/v4/process/process_bsd.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/process/process_darwin.go rename vendor/github.com/shirou/gopsutil/{v3 => v4}/process/process_darwin_amd64.go (87%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/process/process_darwin_arm64.go (85%) create mode 100644 vendor/github.com/shirou/gopsutil/v4/process/process_fallback.go rename vendor/github.com/shirou/gopsutil/{v3 => v4}/process/process_freebsd.go (66%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/process/process_freebsd_386.go (83%) create mode 100644 vendor/github.com/shirou/gopsutil/v4/process/process_freebsd_amd64.go rename vendor/github.com/shirou/gopsutil/{v3 => v4}/process/process_freebsd_arm.go (83%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/process/process_freebsd_arm64.go (88%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/process/process_linux.go (91%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/process/process_openbsd.go (75%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/process/process_openbsd_386.go (98%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/process/process_openbsd_amd64.go (98%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/process/process_openbsd_arm.go (98%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/process/process_openbsd_arm64.go (98%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/process/process_openbsd_riscv64.go (98%) create mode 100644 vendor/github.com/shirou/gopsutil/v4/process/process_plan9.go rename vendor/github.com/shirou/gopsutil/{v3 => v4}/process/process_posix.go (92%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/process/process_solaris.go (68%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/process/process_windows.go (83%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/process/process_windows_32bit.go (52%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/process/process_windows_64bit.go (68%) delete mode 100644 vendor/github.com/shoenig/go-m1cpu/.golangci.yaml delete mode 100644 vendor/github.com/shoenig/go-m1cpu/LICENSE delete mode 100644 vendor/github.com/shoenig/go-m1cpu/Makefile delete mode 100644 vendor/github.com/shoenig/go-m1cpu/README.md delete mode 100644 vendor/github.com/shoenig/go-m1cpu/cpu.go delete mode 100644 vendor/github.com/shoenig/go-m1cpu/incompatible.go delete mode 100644 vendor/golang.org/x/sys/plan9/pwd_go15_plan9.go diff --git a/go.mod b/go.mod index 4d01443d..34ae51f5 100644 --- a/go.mod +++ b/go.mod @@ -1,9 +1,9 @@ module github.com/conductorone/baton-github -go 1.25 +go 1.25.2 require ( - github.com/conductorone/baton-sdk v0.4.2 + github.com/conductorone/baton-sdk v0.6.9 github.com/deckarep/golang-set/v2 v2.8.0 github.com/ennyjfrick/ruleguard-logfatal v0.0.2 github.com/golang-jwt/jwt/v5 v5.2.2 @@ -12,7 +12,7 @@ require ( github.com/migueleliasweb/go-github-mock v1.1.0 github.com/quasilyte/go-ruleguard/dsl v0.3.22 github.com/shurcooL/githubv4 v0.0.0-20240727222349-48295856cce7 - github.com/stretchr/testify v1.10.0 + github.com/stretchr/testify v1.11.1 go.uber.org/zap v1.27.0 golang.org/x/oauth2 v0.29.0 golang.org/x/text v0.24.0 @@ -23,6 +23,7 @@ require ( require ( filippo.io/age v1.2.1 // indirect filippo.io/edwards25519 v1.1.0 // indirect + github.com/Masterminds/semver/v3 v3.4.0 // indirect github.com/aws/aws-lambda-go v1.47.0 // indirect github.com/aws/aws-sdk-go-v2 v1.36.3 // indirect github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.10 // indirect @@ -50,12 +51,11 @@ require ( github.com/conductorone/dpop/integrations/dpop_grpc v0.2.3 // indirect github.com/conductorone/dpop/integrations/dpop_oauth2 v0.2.3 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect - github.com/dolthub/maphash v0.1.0 // indirect github.com/doug-martin/goqu/v9 v9.19.0 // indirect github.com/dustin/go-humanize v1.0.1 // indirect + github.com/ebitengine/purego v0.9.1 // indirect github.com/envoyproxy/protoc-gen-validate v1.2.1 // indirect github.com/fsnotify/fsnotify v1.8.0 // indirect - github.com/gammazero/deque v1.0.0 // indirect github.com/glebarez/go-sqlite v1.22.0 // indirect github.com/go-jose/go-jose/v4 v4.0.5 // indirect github.com/go-logr/logr v1.4.2 // indirect @@ -74,7 +74,7 @@ require ( github.com/lufia/plan9stats v0.0.0-20240909124753-873cd0166683 // indirect github.com/magiconair/properties v1.8.9 // indirect github.com/mattn/go-isatty v0.0.20 // indirect - github.com/maypok86/otter v1.2.4 // indirect + github.com/maypok86/otter/v2 v2.2.1 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/ncruces/go-strftime v0.1.9 // indirect github.com/pelletier/go-toml/v2 v2.2.3 // indirect @@ -87,8 +87,7 @@ require ( github.com/sagikazarmark/locafero v0.7.0 // indirect github.com/sagikazarmark/slog-shim v0.1.0 // indirect github.com/segmentio/ksuid v1.0.4 // indirect - github.com/shirou/gopsutil/v3 v3.24.5 // indirect - github.com/shoenig/go-m1cpu v0.1.6 // indirect + github.com/shirou/gopsutil/v4 v4.25.11 // indirect github.com/shurcooL/graphql v0.0.0-20230722043721-ed46e5a46466 // indirect github.com/sourcegraph/conc v0.3.0 // indirect github.com/spf13/afero v1.12.0 // indirect @@ -97,8 +96,8 @@ require ( github.com/spf13/pflag v1.0.6 // indirect github.com/spf13/viper v1.19.0 // indirect github.com/subosito/gotenv v1.6.0 // indirect - github.com/tklauser/go-sysconf v0.3.14 // indirect - github.com/tklauser/numcpus v0.9.0 // indirect + github.com/tklauser/go-sysconf v0.3.16 // indirect + github.com/tklauser/numcpus v0.11.0 // indirect github.com/yusufpapurcu/wmi v1.2.4 // indirect go.opentelemetry.io/auto/sdk v1.1.0 // indirect go.opentelemetry.io/contrib/bridges/otelzap v0.10.0 // indirect @@ -119,7 +118,7 @@ require ( golang.org/x/exp v0.0.0-20250128182459-e0ece0dbea4c // indirect golang.org/x/net v0.35.0 // indirect golang.org/x/sync v0.13.0 // indirect - golang.org/x/sys v0.34.0 // indirect + golang.org/x/sys v0.38.0 // indirect golang.org/x/term v0.33.0 // indirect golang.org/x/time v0.8.0 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20250218202821-56aae31c358a // indirect diff --git a/go.sum b/go.sum index 5df91713..65631f28 100644 --- a/go.sum +++ b/go.sum @@ -8,6 +8,8 @@ filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4 github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60= github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= +github.com/Masterminds/semver/v3 v3.4.0 h1:Zog+i5UMtVoCU8oKka5P7i9q9HgrJeGzI9SA1Xbatp0= +github.com/Masterminds/semver/v3 v3.4.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= github.com/aws/aws-lambda-go v1.47.0 h1:0H8s0vumYx/YKs4sE7YM0ktwL2eWse+kfopsRI1sXVI= github.com/aws/aws-lambda-go v1.47.0/go.mod h1:dpMpZgvWx5vuQJfBt0zqBha60q7Dd7RfgJv23DymV8A= github.com/aws/aws-sdk-go-v2 v1.36.3 h1:mJoei2CxPutQVxaATCzDUjcZEjVRdpsiiXi2o38yqWM= @@ -58,8 +60,8 @@ github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyY github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/conductorone/baton-sdk v0.4.2 h1:hj/aXdaz850EVqnErw9DN/+3864LSdVcwa6p3vMrAfI= -github.com/conductorone/baton-sdk v0.4.2/go.mod h1:Csa1C2KrI4TxJAtC3WjQqOn24u0g2f4/5FgiYqZWpN4= +github.com/conductorone/baton-sdk v0.6.9 h1:HckTc+QeoL/K4FAOrvrsTIDb65898ft/m2YIty/YBgk= +github.com/conductorone/baton-sdk v0.6.9/go.mod h1:9S5feBOuIJxlNdGmkv3ObkCNHbVyOHr6foNrIrk+d4Y= github.com/conductorone/dpop v0.2.3 h1:s91U3845GHQ6P6FWrdNr2SEOy1ES/jcFs1JtKSl2S+o= github.com/conductorone/dpop v0.2.3/go.mod h1:gyo8TtzB9SCFCsjsICH4IaLZ7y64CcrDXMOPBwfq/3s= github.com/conductorone/dpop/integrations/dpop_grpc v0.2.3 h1:kLMCNIh0Mo2vbvvkCmJ3ixsPbXEJ6HPcW53Ku9yje3s= @@ -74,12 +76,12 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8Yc github.com/deckarep/golang-set/v2 v2.8.0 h1:swm0rlPCmdWn9mESxKOjWk8hXSqoxOp+ZlfuyaAdFlQ= github.com/deckarep/golang-set/v2 v2.8.0/go.mod h1:VAky9rY/yGXJOLEDv3OMci+7wtDpOF4IN+y82NBOac4= github.com/denisenkom/go-mssqldb v0.10.0/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= -github.com/dolthub/maphash v0.1.0 h1:bsQ7JsF4FkkWyrP3oCnFJgrCUAFbFf3kOl4L/QxPDyQ= -github.com/dolthub/maphash v0.1.0/go.mod h1:gkg4Ch4CdCDu5h6PMriVLawB7koZ+5ijb9puGMV50a4= github.com/doug-martin/goqu/v9 v9.19.0 h1:PD7t1X3tRcUiSdc5TEyOFKujZA5gs3VSA7wxSvBx7qo= github.com/doug-martin/goqu/v9 v9.19.0/go.mod h1:nf0Wc2/hV3gYK9LiyqIrzBEVGlI8qW3GuDCEobC4wBQ= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= +github.com/ebitengine/purego v0.9.1 h1:a/k2f2HQU3Pi399RPW1MOaZyhKJL9w/xFpKAg4q1s0A= +github.com/ebitengine/purego v0.9.1/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ= github.com/ennyjfrick/ruleguard-logfatal v0.0.2 h1:FlNMe9+h029VZVD8n6YdFzZAQz/aA8y6WSZttg50yBM= github.com/ennyjfrick/ruleguard-logfatal v0.0.2/go.mod h1:Ng4Cc8dzYEo8vzB2xd+IOxsO8X1OqO9mNnY4jbngQac= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= @@ -92,8 +94,6 @@ github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHk github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M= github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= -github.com/gammazero/deque v1.0.0 h1:LTmimT8H7bXkkCy6gZX7zNLtkbz4NdS2z8LZuor3j34= -github.com/gammazero/deque v1.0.0/go.mod h1:iflpYvtGfM3U8S8j+sZEKIak3SAKYpA5/SQewgfXDKo= github.com/glebarez/go-sqlite v1.22.0 h1:uAcMJhaA6r3LHMTFgP0SifzgXg46yJkgxqyuyec+ruQ= github.com/glebarez/go-sqlite v1.22.0/go.mod h1:PlBIdHe0+aUEFn+r2/uthrWq4FxbzugL0L8Li6yQJbc= github.com/go-jose/go-jose/v4 v4.0.5 h1:M6T8+mKZl/+fNNuFHvGIzDz7BTLQPIounk/b9dw3AaE= @@ -176,8 +176,8 @@ github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D github.com/mattn/go-sqlite3 v1.14.7/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU= github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= -github.com/maypok86/otter v1.2.4 h1:HhW1Pq6VdJkmWwcZZq19BlEQkHtI8xgsQzBVXJU0nfc= -github.com/maypok86/otter v1.2.4/go.mod h1:mKLfoI7v1HOmQMwFgX4QkRk23mX6ge3RDvjdHOWG4R4= +github.com/maypok86/otter/v2 v2.2.1 h1:hnGssisMFkdisYcvQ8L019zpYQcdtPse+g0ps2i7cfI= +github.com/maypok86/otter/v2 v2.2.1/go.mod h1:1NKY9bY+kB5jwCXBJfE59u+zAwOt6C7ni1FTlFFMqVs= github.com/migueleliasweb/go-github-mock v1.1.0 h1:GKaOBPsrPGkAKgtfuWY8MclS1xR6MInkx1SexJucMwE= github.com/migueleliasweb/go-github-mock v1.1.0/go.mod h1:pYe/XlGs4BGMfRY4vmeixVsODHnVDDhJ9zoi0qzSMHc= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= @@ -219,12 +219,8 @@ github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6g github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= github.com/segmentio/ksuid v1.0.4 h1:sBo2BdShXjmcugAMwjugoGUdUV0pcxY5mW4xKRn3v4c= github.com/segmentio/ksuid v1.0.4/go.mod h1:/XUiZBD3kVx5SmUOl55voK5yeAbBNNIed+2O73XgrPE= -github.com/shirou/gopsutil/v3 v3.24.5 h1:i0t8kL+kQTvpAYToeuiVk3TgDeKOFioZO3Ztz/iZ9pI= -github.com/shirou/gopsutil/v3 v3.24.5/go.mod h1:bsoOS1aStSs9ErQ1WWfxllSeS1K5D+U30r2NfcubMVk= -github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM= -github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= -github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU= -github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k= +github.com/shirou/gopsutil/v4 v4.25.11 h1:X53gB7muL9Gnwwo2evPSE+SfOrltMoR6V3xJAXZILTY= +github.com/shirou/gopsutil/v4 v4.25.11/go.mod h1:EivAfP5x2EhLp2ovdpKSozecVXn1TmuG7SMzs/Wh4PU= github.com/shurcooL/githubv4 v0.0.0-20240727222349-48295856cce7 h1:cYCy18SHPKRkvclm+pWm1Lk4YrREb4IOIb/YdFO0p2M= github.com/shurcooL/githubv4 v0.0.0-20240727222349-48295856cce7/go.mod h1:zqMwyHmnN/eDOZOdiTohqIUKUrTFX62PNlu7IJdu0q8= github.com/shurcooL/graphql v0.0.0-20230722043721-ed46e5a46466 h1:17JxqqJY66GmZVHkmAsGEkcIu0oCe3AM420QDgGwZx0= @@ -252,14 +248,14 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= -github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= -github.com/tklauser/go-sysconf v0.3.14 h1:g5vzr9iPFFz24v2KZXs/pvpvh8/V9Fw6vQK5ZZb78yU= -github.com/tklauser/go-sysconf v0.3.14/go.mod h1:1ym4lWMLUOhuBOPGtRcJm7tEGX4SCYNEEEtghGG/8uY= -github.com/tklauser/numcpus v0.9.0 h1:lmyCHtANi8aRUgkckBgoDk1nHCux3n2cgkJLXdQGPDo= -github.com/tklauser/numcpus v0.9.0/go.mod h1:SN6Nq1O3VychhC1npsWostA+oW+VOQTxZrS604NSRyI= +github.com/tklauser/go-sysconf v0.3.16 h1:frioLaCQSsF5Cy1jgRBrzr6t502KIIwQ0MArYICU0nA= +github.com/tklauser/go-sysconf v0.3.16/go.mod h1:/qNL9xxDhc7tx3HSRsLWNnuzbVfh3e7gh/BmM179nYI= +github.com/tklauser/numcpus v0.11.0 h1:nSTwhKH5e1dMNsCdVBukSZrURJRoHbSEQjdEbY+9RXw= +github.com/tklauser/numcpus v0.11.0/go.mod h1:z+LwcLq54uWZTX0u/bGobaV34u6V7KNlTZejzM6/3MQ= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= @@ -358,8 +354,8 @@ golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.34.0 h1:H5Y5sJ2L2JRdyv7ROF1he/lPdvFsd0mJHFw2ThKHxLA= -golang.org/x/sys v0.34.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc= +golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= golang.org/x/term v0.33.0 h1:NuFncQrRcaRvVmgRkvM3j/F00gWIAlcmlB8ACEKmGIg= golang.org/x/term v0.33.0/go.mod h1:s18+ql9tYWp1IfpV9DmCtQDDSRBUjKaw9M1eAv5UeF0= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= diff --git a/pkg/config/conf.gen.go b/pkg/config/conf.gen.go index d6d0b78c..7b3c4536 100644 --- a/pkg/config/conf.gen.go +++ b/pkg/config/conf.gen.go @@ -1,17 +1,17 @@ // Code generated by baton-sdk. DO NOT EDIT!!! package config -import "reflect" +import "reflect" type Github struct { - Token string `mapstructure:"token"` - Orgs []string `mapstructure:"orgs"` - Enterprises []string `mapstructure:"enterprises"` - InstanceUrl string `mapstructure:"instance-url"` - SyncSecrets bool `mapstructure:"sync-secrets"` - OmitArchivedRepositories bool `mapstructure:"omit-archived-repositories"` - AppId string `mapstructure:"app-id"` - AppPrivatekeyPath string `mapstructure:"app-privatekey-path"` + Token string `mapstructure:"token"` + Orgs []string `mapstructure:"orgs"` + Enterprises []string `mapstructure:"enterprises"` + InstanceUrl string `mapstructure:"instance-url"` + SyncSecrets bool `mapstructure:"sync-secrets"` + OmitArchivedRepositories bool `mapstructure:"omit-archived-repositories"` + AppId string `mapstructure:"app-id"` + AppPrivatekeyPath string `mapstructure:"app-privatekey-path"` } func (c *Github) findFieldByTag(tagValue string) (any, bool) { @@ -46,11 +46,13 @@ func (c *Github) GetString(fieldName string) string { if !ok { return "" } - t, ok := v.(string) - if !ok { - panic("wrong type") + if t, ok := v.(string); ok { + return t } - return t + if t, ok := v.([]byte); ok { + return string(t) + } + panic("wrong type") } func (c *Github) GetInt(fieldName string) int { diff --git a/pkg/connector/repository.go b/pkg/connector/repository.go index c0432824..1e7cdb97 100644 --- a/pkg/connector/repository.go +++ b/pkg/connector/repository.go @@ -652,7 +652,7 @@ func (o *repositoryResourceType) registerUpdateRepositoryAction(ctx context.Cont Name: "update", DisplayName: "Update Repository", Description: "Update an existing repository in a GitHub organization", - ActionType: []v2.ActionType{v2.ActionType_ACTION_TYPE_RESOURCE_MUTATE}, + ActionType: []v2.ActionType{}, Arguments: []*config.Field{ { Name: "resource", diff --git a/pkg/connector/team.go b/pkg/connector/team.go index 37af7a94..6fc420d2 100644 --- a/pkg/connector/team.go +++ b/pkg/connector/team.go @@ -467,7 +467,7 @@ func (o *teamResourceType) registerUpdateTeamAction(ctx context.Context, registr Name: "update", DisplayName: "Update Team", Description: "Update an existing team in a GitHub organization", - ActionType: []v2.ActionType{v2.ActionType_ACTION_TYPE_RESOURCE_MUTATE}, + ActionType: []v2.ActionType{}, Arguments: []*config.Field{ { Name: "resource", diff --git a/vendor/github.com/Masterminds/semver/v3/.gitignore b/vendor/github.com/Masterminds/semver/v3/.gitignore new file mode 100644 index 00000000..6b061e61 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/.gitignore @@ -0,0 +1 @@ +_fuzz/ \ No newline at end of file diff --git a/vendor/github.com/Masterminds/semver/v3/.golangci.yml b/vendor/github.com/Masterminds/semver/v3/.golangci.yml new file mode 100644 index 00000000..fbc63325 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/.golangci.yml @@ -0,0 +1,27 @@ +run: + deadline: 2m + +linters: + disable-all: true + enable: + - misspell + - govet + - staticcheck + - errcheck + - unparam + - ineffassign + - nakedret + - gocyclo + - dupl + - goimports + - revive + - gosec + - gosimple + - typecheck + - unused + +linters-settings: + gofmt: + simplify: true + dupl: + threshold: 600 diff --git a/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md b/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md new file mode 100644 index 00000000..fabe5e43 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md @@ -0,0 +1,268 @@ +# Changelog + +## 3.4.0 (2025-06-27) + +### Added + +- #268: Added property to Constraints to include prereleases for Check and Validate + +### Changed + +- #263: Updated Go testing for 1.24, 1.23, and 1.22 +- #269: Updated the error message handling for message case and wrapping errors +- #266: Restore the ability to have leading 0's when parsing with NewVersion. + Opt-out of this by setting CoerceNewVersion to false. + +### Fixed + +- #257: Fixed the CodeQL link (thanks @dmitris) +- #262: Restored detailed errors when failed to parse with NewVersion. Opt-out + of this by setting DetailedNewVersionErrors to false for faster performance. +- #267: Handle pre-releases for an "and" group if one constraint includes them + +## 3.3.1 (2024-11-19) + +### Fixed + +- #253: Fix for allowing some version that were invalid + +## 3.3.0 (2024-08-27) + +### Added + +- #238: Add LessThanEqual and GreaterThanEqual functions (thanks @grosser) +- #213: nil version equality checking (thanks @KnutZuidema) + +### Changed + +- #241: Simplify StrictNewVersion parsing (thanks @grosser) +- Testing support up through Go 1.23 +- Minimum version set to 1.21 as this is what's tested now +- Fuzz testing now supports caching + +## 3.2.1 (2023-04-10) + +### Changed + +- #198: Improved testing around pre-release names +- #200: Improved code scanning with addition of CodeQL +- #201: Testing now includes Go 1.20. Go 1.17 has been dropped +- #202: Migrated Fuzz testing to Go built-in Fuzzing. CI runs daily +- #203: Docs updated for security details + +### Fixed + +- #199: Fixed issue with range transformations + +## 3.2.0 (2022-11-28) + +### Added + +- #190: Added text marshaling and unmarshaling +- #167: Added JSON marshalling for constraints (thanks @SimonTheLeg) +- #173: Implement encoding.TextMarshaler and encoding.TextUnmarshaler on Version (thanks @MarkRosemaker) +- #179: Added New() version constructor (thanks @kazhuravlev) + +### Changed + +- #182/#183: Updated CI testing setup + +### Fixed + +- #186: Fixing issue where validation of constraint section gave false positives +- #176: Fix constraints check with *-0 (thanks @mtt0) +- #181: Fixed Caret operator (^) gives unexpected results when the minor version in constraint is 0 (thanks @arshchimni) +- #161: Fixed godoc (thanks @afirth) + +## 3.1.1 (2020-11-23) + +### Fixed + +- #158: Fixed issue with generated regex operation order that could cause problem + +## 3.1.0 (2020-04-15) + +### Added + +- #131: Add support for serializing/deserializing SQL (thanks @ryancurrah) + +### Changed + +- #148: More accurate validation messages on constraints + +## 3.0.3 (2019-12-13) + +### Fixed + +- #141: Fixed issue with <= comparison + +## 3.0.2 (2019-11-14) + +### Fixed + +- #134: Fixed broken constraint checking with ^0.0 (thanks @krmichelos) + +## 3.0.1 (2019-09-13) + +### Fixed + +- #125: Fixes issue with module path for v3 + +## 3.0.0 (2019-09-12) + +This is a major release of the semver package which includes API changes. The Go +API is compatible with ^1. The Go API was not changed because many people are using +`go get` without Go modules for their applications and API breaking changes cause +errors which we have or would need to support. + +The changes in this release are the handling based on the data passed into the +functions. These are described in the added and changed sections below. + +### Added + +- StrictNewVersion function. This is similar to NewVersion but will return an + error if the version passed in is not a strict semantic version. For example, + 1.2.3 would pass but v1.2.3 or 1.2 would fail because they are not strictly + speaking semantic versions. This function is faster, performs fewer operations, + and uses fewer allocations than NewVersion. +- Fuzzing has been performed on NewVersion, StrictNewVersion, and NewConstraint. + The Makefile contains the operations used. For more information on you can start + on Wikipedia at https://en.wikipedia.org/wiki/Fuzzing +- Now using Go modules + +### Changed + +- NewVersion has proper prerelease and metadata validation with error messages + to signal an issue with either of them +- ^ now operates using a similar set of rules to npm/js and Rust/Cargo. If the + version is >=1 the ^ ranges works the same as v1. For major versions of 0 the + rules have changed. The minor version is treated as the stable version unless + a patch is specified and then it is equivalent to =. One difference from npm/js + is that prereleases there are only to a specific version (e.g. 1.2.3). + Prereleases here look over multiple versions and follow semantic version + ordering rules. This pattern now follows along with the expected and requested + handling of this packaged by numerous users. + +## 1.5.0 (2019-09-11) + +### Added + +- #103: Add basic fuzzing for `NewVersion()` (thanks @jesse-c) + +### Changed + +- #82: Clarify wildcard meaning in range constraints and update tests for it (thanks @greysteil) +- #83: Clarify caret operator range for pre-1.0.0 dependencies (thanks @greysteil) +- #72: Adding docs comment pointing to vert for a cli +- #71: Update the docs on pre-release comparator handling +- #89: Test with new go versions (thanks @thedevsaddam) +- #87: Added $ to ValidPrerelease for better validation (thanks @jeremycarroll) + +### Fixed + +- #78: Fix unchecked error in example code (thanks @ravron) +- #70: Fix the handling of pre-releases and the 0.0.0 release edge case +- #97: Fixed copyright file for proper display on GitHub +- #107: Fix handling prerelease when sorting alphanum and num +- #109: Fixed where Validate sometimes returns wrong message on error + +## 1.4.2 (2018-04-10) + +### Changed + +- #72: Updated the docs to point to vert for a console appliaction +- #71: Update the docs on pre-release comparator handling + +### Fixed + +- #70: Fix the handling of pre-releases and the 0.0.0 release edge case + +## 1.4.1 (2018-04-02) + +### Fixed + +- Fixed #64: Fix pre-release precedence issue (thanks @uudashr) + +## 1.4.0 (2017-10-04) + +### Changed + +- #61: Update NewVersion to parse ints with a 64bit int size (thanks @zknill) + +## 1.3.1 (2017-07-10) + +### Fixed + +- Fixed #57: number comparisons in prerelease sometimes inaccurate + +## 1.3.0 (2017-05-02) + +### Added + +- #45: Added json (un)marshaling support (thanks @mh-cbon) +- Stability marker. See https://masterminds.github.io/stability/ + +### Fixed + +- #51: Fix handling of single digit tilde constraint (thanks @dgodd) + +### Changed + +- #55: The godoc icon moved from png to svg + +## 1.2.3 (2017-04-03) + +### Fixed + +- #46: Fixed 0.x.x and 0.0.x in constraints being treated as * + +## Release 1.2.2 (2016-12-13) + +### Fixed + +- #34: Fixed issue where hyphen range was not working with pre-release parsing. + +## Release 1.2.1 (2016-11-28) + +### Fixed + +- #24: Fixed edge case issue where constraint "> 0" does not handle "0.0.1-alpha" + properly. + +## Release 1.2.0 (2016-11-04) + +### Added + +- #20: Added MustParse function for versions (thanks @adamreese) +- #15: Added increment methods on versions (thanks @mh-cbon) + +### Fixed + +- Issue #21: Per the SemVer spec (section 9) a pre-release is unstable and + might not satisfy the intended compatibility. The change here ignores pre-releases + on constraint checks (e.g., ~ or ^) when a pre-release is not part of the + constraint. For example, `^1.2.3` will ignore pre-releases while + `^1.2.3-alpha` will include them. + +## Release 1.1.1 (2016-06-30) + +### Changed + +- Issue #9: Speed up version comparison performance (thanks @sdboyer) +- Issue #8: Added benchmarks (thanks @sdboyer) +- Updated Go Report Card URL to new location +- Updated Readme to add code snippet formatting (thanks @mh-cbon) +- Updating tagging to v[SemVer] structure for compatibility with other tools. + +## Release 1.1.0 (2016-03-11) + +- Issue #2: Implemented validation to provide reasons a versions failed a + constraint. + +## Release 1.0.1 (2015-12-31) + +- Fixed #1: * constraint failing on valid versions. + +## Release 1.0.0 (2015-10-20) + +- Initial release diff --git a/vendor/github.com/gammazero/deque/LICENSE b/vendor/github.com/Masterminds/semver/v3/LICENSE.txt similarity index 86% rename from vendor/github.com/gammazero/deque/LICENSE rename to vendor/github.com/Masterminds/semver/v3/LICENSE.txt index 0566f266..9ff7da9c 100644 --- a/vendor/github.com/gammazero/deque/LICENSE +++ b/vendor/github.com/Masterminds/semver/v3/LICENSE.txt @@ -1,6 +1,4 @@ -MIT License - -Copyright (c) 2018 Andrew J. Gillis +Copyright (C) 2014-2019, Matt Butcher and Matt Farina Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -9,13 +7,13 @@ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/Masterminds/semver/v3/Makefile b/vendor/github.com/Masterminds/semver/v3/Makefile new file mode 100644 index 00000000..9ca87a2c --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/Makefile @@ -0,0 +1,31 @@ +GOPATH=$(shell go env GOPATH) +GOLANGCI_LINT=$(GOPATH)/bin/golangci-lint + +.PHONY: lint +lint: $(GOLANGCI_LINT) + @echo "==> Linting codebase" + @$(GOLANGCI_LINT) run + +.PHONY: test +test: + @echo "==> Running tests" + GO111MODULE=on go test -v + +.PHONY: test-cover +test-cover: + @echo "==> Running Tests with coverage" + GO111MODULE=on go test -cover . + +.PHONY: fuzz +fuzz: + @echo "==> Running Fuzz Tests" + go env GOCACHE + go test -fuzz=FuzzNewVersion -fuzztime=15s . + go test -fuzz=FuzzStrictNewVersion -fuzztime=15s . + go test -fuzz=FuzzNewConstraint -fuzztime=15s . + +$(GOLANGCI_LINT): + # Install golangci-lint. The configuration for it is in the .golangci.yml + # file in the root of the repository + echo ${GOPATH} + curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(GOPATH)/bin v1.56.2 diff --git a/vendor/github.com/Masterminds/semver/v3/README.md b/vendor/github.com/Masterminds/semver/v3/README.md new file mode 100644 index 00000000..2f56c676 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/README.md @@ -0,0 +1,274 @@ +# SemVer + +The `semver` package provides the ability to work with [Semantic Versions](http://semver.org) in Go. Specifically it provides the ability to: + +* Parse semantic versions +* Sort semantic versions +* Check if a semantic version fits within a set of constraints +* Optionally work with a `v` prefix + +[![Stability: +Active](https://masterminds.github.io/stability/active.svg)](https://masterminds.github.io/stability/active.html) +[![](https://github.com/Masterminds/semver/workflows/Tests/badge.svg)](https://github.com/Masterminds/semver/actions) +[![GoDoc](https://img.shields.io/static/v1?label=godoc&message=reference&color=blue)](https://pkg.go.dev/github.com/Masterminds/semver/v3) +[![Go Report Card](https://goreportcard.com/badge/github.com/Masterminds/semver)](https://goreportcard.com/report/github.com/Masterminds/semver) + +## Package Versions + +Note, import `github.com/Masterminds/semver/v3` to use the latest version. + +There are three major versions fo the `semver` package. + +* 3.x.x is the stable and active version. This version is focused on constraint + compatibility for range handling in other tools from other languages. It has + a similar API to the v1 releases. The development of this version is on the master + branch. The documentation for this version is below. +* 2.x was developed primarily for [dep](https://github.com/golang/dep). There are + no tagged releases and the development was performed by [@sdboyer](https://github.com/sdboyer). + There are API breaking changes from v1. This version lives on the [2.x branch](https://github.com/Masterminds/semver/tree/2.x). +* 1.x.x is the original release. It is no longer maintained. You should use the + v3 release instead. You can read the documentation for the 1.x.x release + [here](https://github.com/Masterminds/semver/blob/release-1/README.md). + +## Parsing Semantic Versions + +There are two functions that can parse semantic versions. The `StrictNewVersion` +function only parses valid version 2 semantic versions as outlined in the +specification. The `NewVersion` function attempts to coerce a version into a +semantic version and parse it. For example, if there is a leading v or a version +listed without all 3 parts (e.g. `v1.2`) it will attempt to coerce it into a valid +semantic version (e.g., 1.2.0). In both cases a `Version` object is returned +that can be sorted, compared, and used in constraints. + +When parsing a version an error is returned if there is an issue parsing the +version. For example, + + v, err := semver.NewVersion("1.2.3-beta.1+build345") + +The version object has methods to get the parts of the version, compare it to +other versions, convert the version back into a string, and get the original +string. Getting the original string is useful if the semantic version was coerced +into a valid form. + +There are package level variables that affect how `NewVersion` handles parsing. + +- `CoerceNewVersion` is `true` by default. When set to `true` it coerces non-compliant + versions into SemVer. For example, allowing a leading 0 in a major, minor, or patch + part. This enables the use of CalVer in versions even when not compliant with SemVer. + When set to `false` less coercion work is done. +- `DetailedNewVersionErrors` provides more detailed errors. It only has an affect when + `CoerceNewVersion` is set to `false`. When `DetailedNewVersionErrors` is set to `true` + it can provide some more insight into why a version is invalid. Setting + `DetailedNewVersionErrors` to `false` is faster on performance but provides less + detailed error messages if a version fails to parse. + +## Sorting Semantic Versions + +A set of versions can be sorted using the `sort` package from the standard library. +For example, + +```go +raw := []string{"1.2.3", "1.0", "1.3", "2", "0.4.2",} +vs := make([]*semver.Version, len(raw)) +for i, r := range raw { + v, err := semver.NewVersion(r) + if err != nil { + t.Errorf("Error parsing version: %s", err) + } + + vs[i] = v +} + +sort.Sort(semver.Collection(vs)) +``` + +## Checking Version Constraints + +There are two methods for comparing versions. One uses comparison methods on +`Version` instances and the other uses `Constraints`. There are some important +differences to notes between these two methods of comparison. + +1. When two versions are compared using functions such as `Compare`, `LessThan`, + and others it will follow the specification and always include pre-releases + within the comparison. It will provide an answer that is valid with the + comparison section of the spec at https://semver.org/#spec-item-11 +2. When constraint checking is used for checks or validation it will follow a + different set of rules that are common for ranges with tools like npm/js + and Rust/Cargo. This includes considering pre-releases to be invalid if the + ranges does not include one. If you want to have it include pre-releases a + simple solution is to include `-0` in your range. +3. Constraint ranges can have some complex rules including the shorthand use of + ~ and ^. For more details on those see the options below. + +There are differences between the two methods or checking versions because the +comparison methods on `Version` follow the specification while comparison ranges +are not part of the specification. Different packages and tools have taken it +upon themselves to come up with range rules. This has resulted in differences. +For example, npm/js and Cargo/Rust follow similar patterns while PHP has a +different pattern for ^. The comparison features in this package follow the +npm/js and Cargo/Rust lead because applications using it have followed similar +patters with their versions. + +Checking a version against version constraints is one of the most featureful +parts of the package. + +```go +c, err := semver.NewConstraint(">= 1.2.3") +if err != nil { + // Handle constraint not being parsable. +} + +v, err := semver.NewVersion("1.3") +if err != nil { + // Handle version not being parsable. +} +// Check if the version meets the constraints. The variable a will be true. +a := c.Check(v) +``` + +### Basic Comparisons + +There are two elements to the comparisons. First, a comparison string is a list +of space or comma separated AND comparisons. These are then separated by || (OR) +comparisons. For example, `">= 1.2 < 3.0.0 || >= 4.2.3"` is looking for a +comparison that's greater than or equal to 1.2 and less than 3.0.0 or is +greater than or equal to 4.2.3. + +The basic comparisons are: + +* `=`: equal (aliased to no operator) +* `!=`: not equal +* `>`: greater than +* `<`: less than +* `>=`: greater than or equal to +* `<=`: less than or equal to + +### Working With Prerelease Versions + +Pre-releases, for those not familiar with them, are used for software releases +prior to stable or generally available releases. Examples of pre-releases include +development, alpha, beta, and release candidate releases. A pre-release may be +a version such as `1.2.3-beta.1` while the stable release would be `1.2.3`. In the +order of precedence, pre-releases come before their associated releases. In this +example `1.2.3-beta.1 < 1.2.3`. + +According to the Semantic Version specification, pre-releases may not be +API compliant with their release counterpart. It says, + +> A pre-release version indicates that the version is unstable and might not satisfy the intended compatibility requirements as denoted by its associated normal version. + +SemVer's comparisons using constraints without a pre-release comparator will skip +pre-release versions. For example, `>=1.2.3` will skip pre-releases when looking +at a list of releases while `>=1.2.3-0` will evaluate and find pre-releases. + +The reason for the `0` as a pre-release version in the example comparison is +because pre-releases can only contain ASCII alphanumerics and hyphens (along with +`.` separators), per the spec. Sorting happens in ASCII sort order, again per the +spec. The lowest character is a `0` in ASCII sort order +(see an [ASCII Table](http://www.asciitable.com/)) + +Understanding ASCII sort ordering is important because A-Z comes before a-z. That +means `>=1.2.3-BETA` will return `1.2.3-alpha`. What you might expect from case +sensitivity doesn't apply here. This is due to ASCII sort ordering which is what +the spec specifies. + +The `Constraints` instance returned from `semver.NewConstraint()` has a property +`IncludePrerelease` that, when set to true, will return prerelease versions when calls +to `Check()` and `Validate()` are made. + +### Hyphen Range Comparisons + +There are multiple methods to handle ranges and the first is hyphens ranges. +These look like: + +* `1.2 - 1.4.5` which is equivalent to `>= 1.2 <= 1.4.5` +* `2.3.4 - 4.5` which is equivalent to `>= 2.3.4 <= 4.5` + +Note that `1.2-1.4.5` without whitespace is parsed completely differently; it's +parsed as a single constraint `1.2.0` with _prerelease_ `1.4.5`. + +### Wildcards In Comparisons + +The `x`, `X`, and `*` characters can be used as a wildcard character. This works +for all comparison operators. When used on the `=` operator it falls +back to the patch level comparison (see tilde below). For example, + +* `1.2.x` is equivalent to `>= 1.2.0, < 1.3.0` +* `>= 1.2.x` is equivalent to `>= 1.2.0` +* `<= 2.x` is equivalent to `< 3` +* `*` is equivalent to `>= 0.0.0` + +### Tilde Range Comparisons (Patch) + +The tilde (`~`) comparison operator is for patch level ranges when a minor +version is specified and major level changes when the minor number is missing. +For example, + +* `~1.2.3` is equivalent to `>= 1.2.3, < 1.3.0` +* `~1` is equivalent to `>= 1, < 2` +* `~2.3` is equivalent to `>= 2.3, < 2.4` +* `~1.2.x` is equivalent to `>= 1.2.0, < 1.3.0` +* `~1.x` is equivalent to `>= 1, < 2` + +### Caret Range Comparisons (Major) + +The caret (`^`) comparison operator is for major level changes once a stable +(1.0.0) release has occurred. Prior to a 1.0.0 release the minor versions acts +as the API stability level. This is useful when comparisons of API versions as a +major change is API breaking. For example, + +* `^1.2.3` is equivalent to `>= 1.2.3, < 2.0.0` +* `^1.2.x` is equivalent to `>= 1.2.0, < 2.0.0` +* `^2.3` is equivalent to `>= 2.3, < 3` +* `^2.x` is equivalent to `>= 2.0.0, < 3` +* `^0.2.3` is equivalent to `>=0.2.3 <0.3.0` +* `^0.2` is equivalent to `>=0.2.0 <0.3.0` +* `^0.0.3` is equivalent to `>=0.0.3 <0.0.4` +* `^0.0` is equivalent to `>=0.0.0 <0.1.0` +* `^0` is equivalent to `>=0.0.0 <1.0.0` + +## Validation + +In addition to testing a version against a constraint, a version can be validated +against a constraint. When validation fails a slice of errors containing why a +version didn't meet the constraint is returned. For example, + +```go +c, err := semver.NewConstraint("<= 1.2.3, >= 1.4") +if err != nil { + // Handle constraint not being parseable. +} + +v, err := semver.NewVersion("1.3") +if err != nil { + // Handle version not being parseable. +} + +// Validate a version against a constraint. +a, msgs := c.Validate(v) +// a is false +for _, m := range msgs { + fmt.Println(m) + + // Loops over the errors which would read + // "1.3 is greater than 1.2.3" + // "1.3 is less than 1.4" +} +``` + +## Contribute + +If you find an issue or want to contribute please file an [issue](https://github.com/Masterminds/semver/issues) +or [create a pull request](https://github.com/Masterminds/semver/pulls). + +## Security + +Security is an important consideration for this project. The project currently +uses the following tools to help discover security issues: + +* [CodeQL](https://codeql.github.com) +* [gosec](https://github.com/securego/gosec) +* Daily Fuzz testing + +If you believe you have found a security vulnerability you can privately disclose +it through the [GitHub security page](https://github.com/Masterminds/semver/security). diff --git a/vendor/github.com/Masterminds/semver/v3/SECURITY.md b/vendor/github.com/Masterminds/semver/v3/SECURITY.md new file mode 100644 index 00000000..a30a66b1 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/SECURITY.md @@ -0,0 +1,19 @@ +# Security Policy + +## Supported Versions + +The following versions of semver are currently supported: + +| Version | Supported | +| ------- | ------------------ | +| 3.x | :white_check_mark: | +| 2.x | :x: | +| 1.x | :x: | + +Fixes are only released for the latest minor version in the form of a patch release. + +## Reporting a Vulnerability + +You can privately disclose a vulnerability through GitHubs +[private vulnerability reporting](https://github.com/Masterminds/semver/security/advisories) +mechanism. diff --git a/vendor/github.com/Masterminds/semver/v3/collection.go b/vendor/github.com/Masterminds/semver/v3/collection.go new file mode 100644 index 00000000..a7823589 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/collection.go @@ -0,0 +1,24 @@ +package semver + +// Collection is a collection of Version instances and implements the sort +// interface. See the sort package for more details. +// https://golang.org/pkg/sort/ +type Collection []*Version + +// Len returns the length of a collection. The number of Version instances +// on the slice. +func (c Collection) Len() int { + return len(c) +} + +// Less is needed for the sort interface to compare two Version objects on the +// slice. If checks if one is less than the other. +func (c Collection) Less(i, j int) bool { + return c[i].LessThan(c[j]) +} + +// Swap is needed for the sort interface to replace the Version objects +// at two different positions in the slice. +func (c Collection) Swap(i, j int) { + c[i], c[j] = c[j], c[i] +} diff --git a/vendor/github.com/Masterminds/semver/v3/constraints.go b/vendor/github.com/Masterminds/semver/v3/constraints.go new file mode 100644 index 00000000..8b7a10f8 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/constraints.go @@ -0,0 +1,601 @@ +package semver + +import ( + "bytes" + "errors" + "fmt" + "regexp" + "strings" +) + +// Constraints is one or more constraint that a semantic version can be +// checked against. +type Constraints struct { + constraints [][]*constraint + containsPre []bool + + // IncludePrerelease specifies if pre-releases should be included in + // the results. Note, if a constraint range has a prerelease than + // prereleases will be included for that AND group even if this is + // set to false. + IncludePrerelease bool +} + +// NewConstraint returns a Constraints instance that a Version instance can +// be checked against. If there is a parse error it will be returned. +func NewConstraint(c string) (*Constraints, error) { + + // Rewrite - ranges into a comparison operation. + c = rewriteRange(c) + + ors := strings.Split(c, "||") + lenors := len(ors) + or := make([][]*constraint, lenors) + hasPre := make([]bool, lenors) + for k, v := range ors { + // Validate the segment + if !validConstraintRegex.MatchString(v) { + return nil, fmt.Errorf("improper constraint: %s", v) + } + + cs := findConstraintRegex.FindAllString(v, -1) + if cs == nil { + cs = append(cs, v) + } + result := make([]*constraint, len(cs)) + for i, s := range cs { + pc, err := parseConstraint(s) + if err != nil { + return nil, err + } + + // If one of the constraints has a prerelease record this. + // This information is used when checking all in an "and" + // group to ensure they all check for prereleases. + if pc.con.pre != "" { + hasPre[k] = true + } + + result[i] = pc + } + or[k] = result + } + + o := &Constraints{ + constraints: or, + containsPre: hasPre, + } + return o, nil +} + +// Check tests if a version satisfies the constraints. +func (cs Constraints) Check(v *Version) bool { + // TODO(mattfarina): For v4 of this library consolidate the Check and Validate + // functions as the underlying functions make that possible now. + // loop over the ORs and check the inner ANDs + for i, o := range cs.constraints { + joy := true + for _, c := range o { + if check, _ := c.check(v, (cs.IncludePrerelease || cs.containsPre[i])); !check { + joy = false + break + } + } + + if joy { + return true + } + } + + return false +} + +// Validate checks if a version satisfies a constraint. If not a slice of +// reasons for the failure are returned in addition to a bool. +func (cs Constraints) Validate(v *Version) (bool, []error) { + // loop over the ORs and check the inner ANDs + var e []error + + // Capture the prerelease message only once. When it happens the first time + // this var is marked + var prerelesase bool + for i, o := range cs.constraints { + joy := true + for _, c := range o { + // Before running the check handle the case there the version is + // a prerelease and the check is not searching for prereleases. + if !(cs.IncludePrerelease || cs.containsPre[i]) && v.pre != "" { + if !prerelesase { + em := fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + e = append(e, em) + prerelesase = true + } + joy = false + + } else { + + if _, err := c.check(v, (cs.IncludePrerelease || cs.containsPre[i])); err != nil { + e = append(e, err) + joy = false + } + } + } + + if joy { + return true, []error{} + } + } + + return false, e +} + +func (cs Constraints) String() string { + buf := make([]string, len(cs.constraints)) + var tmp bytes.Buffer + + for k, v := range cs.constraints { + tmp.Reset() + vlen := len(v) + for kk, c := range v { + tmp.WriteString(c.string()) + + // Space separate the AND conditions + if vlen > 1 && kk < vlen-1 { + tmp.WriteString(" ") + } + } + buf[k] = tmp.String() + } + + return strings.Join(buf, " || ") +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface. +func (cs *Constraints) UnmarshalText(text []byte) error { + temp, err := NewConstraint(string(text)) + if err != nil { + return err + } + + *cs = *temp + + return nil +} + +// MarshalText implements the encoding.TextMarshaler interface. +func (cs Constraints) MarshalText() ([]byte, error) { + return []byte(cs.String()), nil +} + +var constraintOps map[string]cfunc +var constraintRegex *regexp.Regexp +var constraintRangeRegex *regexp.Regexp + +// Used to find individual constraints within a multi-constraint string +var findConstraintRegex *regexp.Regexp + +// Used to validate an segment of ANDs is valid +var validConstraintRegex *regexp.Regexp + +const cvRegex string = `v?([0-9|x|X|\*]+)(\.[0-9|x|X|\*]+)?(\.[0-9|x|X|\*]+)?` + + `(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + + `(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + +func init() { + constraintOps = map[string]cfunc{ + "": constraintTildeOrEqual, + "=": constraintTildeOrEqual, + "!=": constraintNotEqual, + ">": constraintGreaterThan, + "<": constraintLessThan, + ">=": constraintGreaterThanEqual, + "=>": constraintGreaterThanEqual, + "<=": constraintLessThanEqual, + "=<": constraintLessThanEqual, + "~": constraintTilde, + "~>": constraintTilde, + "^": constraintCaret, + } + + ops := `=||!=|>|<|>=|=>|<=|=<|~|~>|\^` + + constraintRegex = regexp.MustCompile(fmt.Sprintf( + `^\s*(%s)\s*(%s)\s*$`, + ops, + cvRegex)) + + constraintRangeRegex = regexp.MustCompile(fmt.Sprintf( + `\s*(%s)\s+-\s+(%s)\s*`, + cvRegex, cvRegex)) + + findConstraintRegex = regexp.MustCompile(fmt.Sprintf( + `(%s)\s*(%s)`, + ops, + cvRegex)) + + // The first time a constraint shows up will look slightly different from + // future times it shows up due to a leading space or comma in a given + // string. + validConstraintRegex = regexp.MustCompile(fmt.Sprintf( + `^(\s*(%s)\s*(%s)\s*)((?:\s+|,\s*)(%s)\s*(%s)\s*)*$`, + ops, + cvRegex, + ops, + cvRegex)) +} + +// An individual constraint +type constraint struct { + // The version used in the constraint check. For example, if a constraint + // is '<= 2.0.0' the con a version instance representing 2.0.0. + con *Version + + // The original parsed version (e.g., 4.x from != 4.x) + orig string + + // The original operator for the constraint + origfunc string + + // When an x is used as part of the version (e.g., 1.x) + minorDirty bool + dirty bool + patchDirty bool +} + +// Check if a version meets the constraint +func (c *constraint) check(v *Version, includePre bool) (bool, error) { + return constraintOps[c.origfunc](v, c, includePre) +} + +// String prints an individual constraint into a string +func (c *constraint) string() string { + return c.origfunc + c.orig +} + +type cfunc func(v *Version, c *constraint, includePre bool) (bool, error) + +func parseConstraint(c string) (*constraint, error) { + if len(c) > 0 { + m := constraintRegex.FindStringSubmatch(c) + if m == nil { + return nil, fmt.Errorf("improper constraint: %s", c) + } + + cs := &constraint{ + orig: m[2], + origfunc: m[1], + } + + ver := m[2] + minorDirty := false + patchDirty := false + dirty := false + if isX(m[3]) || m[3] == "" { + ver = fmt.Sprintf("0.0.0%s", m[6]) + dirty = true + } else if isX(strings.TrimPrefix(m[4], ".")) || m[4] == "" { + minorDirty = true + dirty = true + ver = fmt.Sprintf("%s.0.0%s", m[3], m[6]) + } else if isX(strings.TrimPrefix(m[5], ".")) || m[5] == "" { + dirty = true + patchDirty = true + ver = fmt.Sprintf("%s%s.0%s", m[3], m[4], m[6]) + } + + con, err := NewVersion(ver) + if err != nil { + + // The constraintRegex should catch any regex parsing errors. So, + // we should never get here. + return nil, errors.New("constraint parser error") + } + + cs.con = con + cs.minorDirty = minorDirty + cs.patchDirty = patchDirty + cs.dirty = dirty + + return cs, nil + } + + // The rest is the special case where an empty string was passed in which + // is equivalent to * or >=0.0.0 + con, err := StrictNewVersion("0.0.0") + if err != nil { + + // The constraintRegex should catch any regex parsing errors. So, + // we should never get here. + return nil, errors.New("constraint parser error") + } + + cs := &constraint{ + con: con, + orig: c, + origfunc: "", + minorDirty: false, + patchDirty: false, + dirty: true, + } + return cs, nil +} + +// Constraint functions +func constraintNotEqual(v *Version, c *constraint, includePre bool) (bool, error) { + // The existence of prereleases is checked at the group level and passed in. + // Exit early if the version has a prerelease but those are to be ignored. + if v.Prerelease() != "" && !includePre { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + if c.dirty { + if c.con.Major() != v.Major() { + return true, nil + } + if c.con.Minor() != v.Minor() && !c.minorDirty { + return true, nil + } else if c.minorDirty { + return false, fmt.Errorf("%s is equal to %s", v, c.orig) + } else if c.con.Patch() != v.Patch() && !c.patchDirty { + return true, nil + } else if c.patchDirty { + // Need to handle prereleases if present + if v.Prerelease() != "" || c.con.Prerelease() != "" { + eq := comparePrerelease(v.Prerelease(), c.con.Prerelease()) != 0 + if eq { + return true, nil + } + return false, fmt.Errorf("%s is equal to %s", v, c.orig) + } + return false, fmt.Errorf("%s is equal to %s", v, c.orig) + } + } + + eq := v.Equal(c.con) + if eq { + return false, fmt.Errorf("%s is equal to %s", v, c.orig) + } + + return true, nil +} + +func constraintGreaterThan(v *Version, c *constraint, includePre bool) (bool, error) { + + // The existence of prereleases is checked at the group level and passed in. + // Exit early if the version has a prerelease but those are to be ignored. + if v.Prerelease() != "" && !includePre { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + var eq bool + + if !c.dirty { + eq = v.Compare(c.con) == 1 + if eq { + return true, nil + } + return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig) + } + + if v.Major() > c.con.Major() { + return true, nil + } else if v.Major() < c.con.Major() { + return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig) + } else if c.minorDirty { + // This is a range case such as >11. When the version is something like + // 11.1.0 is it not > 11. For that we would need 12 or higher + return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig) + } else if c.patchDirty { + // This is for ranges such as >11.1. A version of 11.1.1 is not greater + // which one of 11.2.1 is greater + eq = v.Minor() > c.con.Minor() + if eq { + return true, nil + } + return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig) + } + + // If we have gotten here we are not comparing pre-preleases and can use the + // Compare function to accomplish that. + eq = v.Compare(c.con) == 1 + if eq { + return true, nil + } + return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig) +} + +func constraintLessThan(v *Version, c *constraint, includePre bool) (bool, error) { + // The existence of prereleases is checked at the group level and passed in. + // Exit early if the version has a prerelease but those are to be ignored. + if v.Prerelease() != "" && !includePre { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + eq := v.Compare(c.con) < 0 + if eq { + return true, nil + } + return false, fmt.Errorf("%s is greater than or equal to %s", v, c.orig) +} + +func constraintGreaterThanEqual(v *Version, c *constraint, includePre bool) (bool, error) { + + // The existence of prereleases is checked at the group level and passed in. + // Exit early if the version has a prerelease but those are to be ignored. + if v.Prerelease() != "" && !includePre { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + eq := v.Compare(c.con) >= 0 + if eq { + return true, nil + } + return false, fmt.Errorf("%s is less than %s", v, c.orig) +} + +func constraintLessThanEqual(v *Version, c *constraint, includePre bool) (bool, error) { + // The existence of prereleases is checked at the group level and passed in. + // Exit early if the version has a prerelease but those are to be ignored. + if v.Prerelease() != "" && !includePre { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + var eq bool + + if !c.dirty { + eq = v.Compare(c.con) <= 0 + if eq { + return true, nil + } + return false, fmt.Errorf("%s is greater than %s", v, c.orig) + } + + if v.Major() > c.con.Major() { + return false, fmt.Errorf("%s is greater than %s", v, c.orig) + } else if v.Major() == c.con.Major() && v.Minor() > c.con.Minor() && !c.minorDirty { + return false, fmt.Errorf("%s is greater than %s", v, c.orig) + } + + return true, nil +} + +// ~*, ~>* --> >= 0.0.0 (any) +// ~2, ~2.x, ~2.x.x, ~>2, ~>2.x ~>2.x.x --> >=2.0.0, <3.0.0 +// ~2.0, ~2.0.x, ~>2.0, ~>2.0.x --> >=2.0.0, <2.1.0 +// ~1.2, ~1.2.x, ~>1.2, ~>1.2.x --> >=1.2.0, <1.3.0 +// ~1.2.3, ~>1.2.3 --> >=1.2.3, <1.3.0 +// ~1.2.0, ~>1.2.0 --> >=1.2.0, <1.3.0 +func constraintTilde(v *Version, c *constraint, includePre bool) (bool, error) { + // The existence of prereleases is checked at the group level and passed in. + // Exit early if the version has a prerelease but those are to be ignored. + if v.Prerelease() != "" && !includePre { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + if v.LessThan(c.con) { + return false, fmt.Errorf("%s is less than %s", v, c.orig) + } + + // ~0.0.0 is a special case where all constraints are accepted. It's + // equivalent to >= 0.0.0. + if c.con.Major() == 0 && c.con.Minor() == 0 && c.con.Patch() == 0 && + !c.minorDirty && !c.patchDirty { + return true, nil + } + + if v.Major() != c.con.Major() { + return false, fmt.Errorf("%s does not have same major version as %s", v, c.orig) + } + + if v.Minor() != c.con.Minor() && !c.minorDirty { + return false, fmt.Errorf("%s does not have same major and minor version as %s", v, c.orig) + } + + return true, nil +} + +// When there is a .x (dirty) status it automatically opts in to ~. Otherwise +// it's a straight = +func constraintTildeOrEqual(v *Version, c *constraint, includePre bool) (bool, error) { + // The existence of prereleases is checked at the group level and passed in. + // Exit early if the version has a prerelease but those are to be ignored. + if v.Prerelease() != "" && !includePre { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + if c.dirty { + return constraintTilde(v, c, includePre) + } + + eq := v.Equal(c.con) + if eq { + return true, nil + } + + return false, fmt.Errorf("%s is not equal to %s", v, c.orig) +} + +// ^* --> (any) +// ^1.2.3 --> >=1.2.3 <2.0.0 +// ^1.2 --> >=1.2.0 <2.0.0 +// ^1 --> >=1.0.0 <2.0.0 +// ^0.2.3 --> >=0.2.3 <0.3.0 +// ^0.2 --> >=0.2.0 <0.3.0 +// ^0.0.3 --> >=0.0.3 <0.0.4 +// ^0.0 --> >=0.0.0 <0.1.0 +// ^0 --> >=0.0.0 <1.0.0 +func constraintCaret(v *Version, c *constraint, includePre bool) (bool, error) { + // The existence of prereleases is checked at the group level and passed in. + // Exit early if the version has a prerelease but those are to be ignored. + if v.Prerelease() != "" && !includePre { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + // This less than handles prereleases + if v.LessThan(c.con) { + return false, fmt.Errorf("%s is less than %s", v, c.orig) + } + + var eq bool + + // ^ when the major > 0 is >=x.y.z < x+1 + if c.con.Major() > 0 || c.minorDirty { + + // ^ has to be within a major range for > 0. Everything less than was + // filtered out with the LessThan call above. This filters out those + // that greater but not within the same major range. + eq = v.Major() == c.con.Major() + if eq { + return true, nil + } + return false, fmt.Errorf("%s does not have same major version as %s", v, c.orig) + } + + // ^ when the major is 0 and minor > 0 is >=0.y.z < 0.y+1 + if c.con.Major() == 0 && v.Major() > 0 { + return false, fmt.Errorf("%s does not have same major version as %s", v, c.orig) + } + // If the con Minor is > 0 it is not dirty + if c.con.Minor() > 0 || c.patchDirty { + eq = v.Minor() == c.con.Minor() + if eq { + return true, nil + } + return false, fmt.Errorf("%s does not have same minor version as %s. Expected minor versions to match when constraint major version is 0", v, c.orig) + } + // ^ when the minor is 0 and minor > 0 is =0.0.z + if c.con.Minor() == 0 && v.Minor() > 0 { + return false, fmt.Errorf("%s does not have same minor version as %s", v, c.orig) + } + + // At this point the major is 0 and the minor is 0 and not dirty. The patch + // is not dirty so we need to check if they are equal. If they are not equal + eq = c.con.Patch() == v.Patch() + if eq { + return true, nil + } + return false, fmt.Errorf("%s does not equal %s. Expect version and constraint to equal when major and minor versions are 0", v, c.orig) +} + +func isX(x string) bool { + switch x { + case "x", "*", "X": + return true + default: + return false + } +} + +func rewriteRange(i string) string { + m := constraintRangeRegex.FindAllStringSubmatch(i, -1) + if m == nil { + return i + } + o := i + for _, v := range m { + t := fmt.Sprintf(">= %s, <= %s ", v[1], v[11]) + o = strings.Replace(o, v[0], t, 1) + } + + return o +} diff --git a/vendor/github.com/Masterminds/semver/v3/doc.go b/vendor/github.com/Masterminds/semver/v3/doc.go new file mode 100644 index 00000000..74f97caa --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/doc.go @@ -0,0 +1,184 @@ +/* +Package semver provides the ability to work with Semantic Versions (http://semver.org) in Go. + +Specifically it provides the ability to: + + - Parse semantic versions + - Sort semantic versions + - Check if a semantic version fits within a set of constraints + - Optionally work with a `v` prefix + +# Parsing Semantic Versions + +There are two functions that can parse semantic versions. The `StrictNewVersion` +function only parses valid version 2 semantic versions as outlined in the +specification. The `NewVersion` function attempts to coerce a version into a +semantic version and parse it. For example, if there is a leading v or a version +listed without all 3 parts (e.g. 1.2) it will attempt to coerce it into a valid +semantic version (e.g., 1.2.0). In both cases a `Version` object is returned +that can be sorted, compared, and used in constraints. + +When parsing a version an optional error can be returned if there is an issue +parsing the version. For example, + + v, err := semver.NewVersion("1.2.3-beta.1+b345") + +The version object has methods to get the parts of the version, compare it to +other versions, convert the version back into a string, and get the original +string. For more details please see the documentation +at https://godoc.org/github.com/Masterminds/semver. + +# Sorting Semantic Versions + +A set of versions can be sorted using the `sort` package from the standard library. +For example, + + raw := []string{"1.2.3", "1.0", "1.3", "2", "0.4.2",} + vs := make([]*semver.Version, len(raw)) + for i, r := range raw { + v, err := semver.NewVersion(r) + if err != nil { + t.Errorf("Error parsing version: %s", err) + } + + vs[i] = v + } + + sort.Sort(semver.Collection(vs)) + +# Checking Version Constraints and Comparing Versions + +There are two methods for comparing versions. One uses comparison methods on +`Version` instances and the other is using Constraints. There are some important +differences to notes between these two methods of comparison. + + 1. When two versions are compared using functions such as `Compare`, `LessThan`, + and others it will follow the specification and always include prereleases + within the comparison. It will provide an answer valid with the comparison + spec section at https://semver.org/#spec-item-11 + 2. When constraint checking is used for checks or validation it will follow a + different set of rules that are common for ranges with tools like npm/js + and Rust/Cargo. This includes considering prereleases to be invalid if the + ranges does not include on. If you want to have it include pre-releases a + simple solution is to include `-0` in your range. + 3. Constraint ranges can have some complex rules including the shorthard use of + ~ and ^. For more details on those see the options below. + +There are differences between the two methods or checking versions because the +comparison methods on `Version` follow the specification while comparison ranges +are not part of the specification. Different packages and tools have taken it +upon themselves to come up with range rules. This has resulted in differences. +For example, npm/js and Cargo/Rust follow similar patterns which PHP has a +different pattern for ^. The comparison features in this package follow the +npm/js and Cargo/Rust lead because applications using it have followed similar +patters with their versions. + +Checking a version against version constraints is one of the most featureful +parts of the package. + + c, err := semver.NewConstraint(">= 1.2.3") + if err != nil { + // Handle constraint not being parsable. + } + + v, err := semver.NewVersion("1.3") + if err != nil { + // Handle version not being parsable. + } + // Check if the version meets the constraints. The a variable will be true. + a := c.Check(v) + +# Basic Comparisons + +There are two elements to the comparisons. First, a comparison string is a list +of comma or space separated AND comparisons. These are then separated by || (OR) +comparisons. For example, `">= 1.2 < 3.0.0 || >= 4.2.3"` is looking for a +comparison that's greater than or equal to 1.2 and less than 3.0.0 or is +greater than or equal to 4.2.3. This can also be written as +`">= 1.2, < 3.0.0 || >= 4.2.3"` + +The basic comparisons are: + + - `=`: equal (aliased to no operator) + - `!=`: not equal + - `>`: greater than + - `<`: less than + - `>=`: greater than or equal to + - `<=`: less than or equal to + +# Hyphen Range Comparisons + +There are multiple methods to handle ranges and the first is hyphens ranges. +These look like: + + - `1.2 - 1.4.5` which is equivalent to `>= 1.2, <= 1.4.5` + - `2.3.4 - 4.5` which is equivalent to `>= 2.3.4 <= 4.5` + +# Wildcards In Comparisons + +The `x`, `X`, and `*` characters can be used as a wildcard character. This works +for all comparison operators. When used on the `=` operator it falls +back to the tilde operation. For example, + + - `1.2.x` is equivalent to `>= 1.2.0 < 1.3.0` + - `>= 1.2.x` is equivalent to `>= 1.2.0` + - `<= 2.x` is equivalent to `<= 3` + - `*` is equivalent to `>= 0.0.0` + +Tilde Range Comparisons (Patch) + +The tilde (`~`) comparison operator is for patch level ranges when a minor +version is specified and major level changes when the minor number is missing. +For example, + + - `~1.2.3` is equivalent to `>= 1.2.3 < 1.3.0` + - `~1` is equivalent to `>= 1, < 2` + - `~2.3` is equivalent to `>= 2.3 < 2.4` + - `~1.2.x` is equivalent to `>= 1.2.0 < 1.3.0` + - `~1.x` is equivalent to `>= 1 < 2` + +Caret Range Comparisons (Major) + +The caret (`^`) comparison operator is for major level changes once a stable +(1.0.0) release has occurred. Prior to a 1.0.0 release the minor versions acts +as the API stability level. This is useful when comparisons of API versions as a +major change is API breaking. For example, + + - `^1.2.3` is equivalent to `>= 1.2.3, < 2.0.0` + - `^1.2.x` is equivalent to `>= 1.2.0, < 2.0.0` + - `^2.3` is equivalent to `>= 2.3, < 3` + - `^2.x` is equivalent to `>= 2.0.0, < 3` + - `^0.2.3` is equivalent to `>=0.2.3 <0.3.0` + - `^0.2` is equivalent to `>=0.2.0 <0.3.0` + - `^0.0.3` is equivalent to `>=0.0.3 <0.0.4` + - `^0.0` is equivalent to `>=0.0.0 <0.1.0` + - `^0` is equivalent to `>=0.0.0 <1.0.0` + +# Validation + +In addition to testing a version against a constraint, a version can be validated +against a constraint. When validation fails a slice of errors containing why a +version didn't meet the constraint is returned. For example, + + c, err := semver.NewConstraint("<= 1.2.3, >= 1.4") + if err != nil { + // Handle constraint not being parseable. + } + + v, _ := semver.NewVersion("1.3") + if err != nil { + // Handle version not being parseable. + } + + // Validate a version against a constraint. + a, msgs := c.Validate(v) + // a is false + for _, m := range msgs { + fmt.Println(m) + + // Loops over the errors which would read + // "1.3 is greater than 1.2.3" + // "1.3 is less than 1.4" + } +*/ +package semver diff --git a/vendor/github.com/Masterminds/semver/v3/version.go b/vendor/github.com/Masterminds/semver/v3/version.go new file mode 100644 index 00000000..7a3ba738 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/version.go @@ -0,0 +1,788 @@ +package semver + +import ( + "bytes" + "database/sql/driver" + "encoding/json" + "errors" + "fmt" + "regexp" + "strconv" + "strings" +) + +// The compiled version of the regex created at init() is cached here so it +// only needs to be created once. +var versionRegex *regexp.Regexp +var looseVersionRegex *regexp.Regexp + +// CoerceNewVersion sets if leading 0's are allowd in the version part. Leading 0's are +// not allowed in a valid semantic version. When set to true, NewVersion will coerce +// leading 0's into a valid version. +var CoerceNewVersion = true + +// DetailedNewVersionErrors specifies if detailed errors are returned from the NewVersion +// function. This is used when CoerceNewVersion is set to false. If set to false +// ErrInvalidSemVer is returned for an invalid version. This does not apply to +// StrictNewVersion. Setting this function to false returns errors more quickly. +var DetailedNewVersionErrors = true + +var ( + // ErrInvalidSemVer is returned a version is found to be invalid when + // being parsed. + ErrInvalidSemVer = errors.New("invalid semantic version") + + // ErrEmptyString is returned when an empty string is passed in for parsing. + ErrEmptyString = errors.New("version string empty") + + // ErrInvalidCharacters is returned when invalid characters are found as + // part of a version + ErrInvalidCharacters = errors.New("invalid characters in version") + + // ErrSegmentStartsZero is returned when a version segment starts with 0. + // This is invalid in SemVer. + ErrSegmentStartsZero = errors.New("version segment starts with 0") + + // ErrInvalidMetadata is returned when the metadata is an invalid format + ErrInvalidMetadata = errors.New("invalid metadata string") + + // ErrInvalidPrerelease is returned when the pre-release is an invalid format + ErrInvalidPrerelease = errors.New("invalid prerelease string") +) + +// semVerRegex is the regular expression used to parse a semantic version. +// This is not the official regex from the semver spec. It has been modified to allow for loose handling +// where versions like 2.1 are detected. +const semVerRegex string = `v?(0|[1-9]\d*)(?:\.(0|[1-9]\d*))?(?:\.(0|[1-9]\d*))?` + + `(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?` + + `(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?` + +// looseSemVerRegex is a regular expression that lets invalid semver expressions through +// with enough detail that certain errors can be checked for. +const looseSemVerRegex string = `v?([0-9]+)(\.[0-9]+)?(\.[0-9]+)?` + + `(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + + `(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + +// Version represents a single semantic version. +type Version struct { + major, minor, patch uint64 + pre string + metadata string + original string +} + +func init() { + versionRegex = regexp.MustCompile("^" + semVerRegex + "$") + looseVersionRegex = regexp.MustCompile("^" + looseSemVerRegex + "$") +} + +const ( + num string = "0123456789" + allowed string = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ-" + num +) + +// StrictNewVersion parses a given version and returns an instance of Version or +// an error if unable to parse the version. Only parses valid semantic versions. +// Performs checking that can find errors within the version. +// If you want to coerce a version such as 1 or 1.2 and parse it as the 1.x +// releases of semver did, use the NewVersion() function. +func StrictNewVersion(v string) (*Version, error) { + // Parsing here does not use RegEx in order to increase performance and reduce + // allocations. + + if len(v) == 0 { + return nil, ErrEmptyString + } + + // Split the parts into [0]major, [1]minor, and [2]patch,prerelease,build + parts := strings.SplitN(v, ".", 3) + if len(parts) != 3 { + return nil, ErrInvalidSemVer + } + + sv := &Version{ + original: v, + } + + // Extract build metadata + if strings.Contains(parts[2], "+") { + extra := strings.SplitN(parts[2], "+", 2) + sv.metadata = extra[1] + parts[2] = extra[0] + if err := validateMetadata(sv.metadata); err != nil { + return nil, err + } + } + + // Extract build prerelease + if strings.Contains(parts[2], "-") { + extra := strings.SplitN(parts[2], "-", 2) + sv.pre = extra[1] + parts[2] = extra[0] + if err := validatePrerelease(sv.pre); err != nil { + return nil, err + } + } + + // Validate the number segments are valid. This includes only having positive + // numbers and no leading 0's. + for _, p := range parts { + if !containsOnly(p, num) { + return nil, ErrInvalidCharacters + } + + if len(p) > 1 && p[0] == '0' { + return nil, ErrSegmentStartsZero + } + } + + // Extract major, minor, and patch + var err error + sv.major, err = strconv.ParseUint(parts[0], 10, 64) + if err != nil { + return nil, err + } + + sv.minor, err = strconv.ParseUint(parts[1], 10, 64) + if err != nil { + return nil, err + } + + sv.patch, err = strconv.ParseUint(parts[2], 10, 64) + if err != nil { + return nil, err + } + + return sv, nil +} + +// NewVersion parses a given version and returns an instance of Version or +// an error if unable to parse the version. If the version is SemVer-ish it +// attempts to convert it to SemVer. If you want to validate it was a strict +// semantic version at parse time see StrictNewVersion(). +func NewVersion(v string) (*Version, error) { + if CoerceNewVersion { + return coerceNewVersion(v) + } + m := versionRegex.FindStringSubmatch(v) + if m == nil { + + // Disabling detailed errors is first so that it is in the fast path. + if !DetailedNewVersionErrors { + return nil, ErrInvalidSemVer + } + + // Check for specific errors with the semver string and return a more detailed + // error. + m = looseVersionRegex.FindStringSubmatch(v) + if m == nil { + return nil, ErrInvalidSemVer + } + err := validateVersion(m) + if err != nil { + return nil, err + } + return nil, ErrInvalidSemVer + } + + sv := &Version{ + metadata: m[5], + pre: m[4], + original: v, + } + + var err error + sv.major, err = strconv.ParseUint(m[1], 10, 64) + if err != nil { + return nil, fmt.Errorf("error parsing version segment: %w", err) + } + + if m[2] != "" { + sv.minor, err = strconv.ParseUint(m[2], 10, 64) + if err != nil { + return nil, fmt.Errorf("error parsing version segment: %w", err) + } + } else { + sv.minor = 0 + } + + if m[3] != "" { + sv.patch, err = strconv.ParseUint(m[3], 10, 64) + if err != nil { + return nil, fmt.Errorf("error parsing version segment: %w", err) + } + } else { + sv.patch = 0 + } + + // Perform some basic due diligence on the extra parts to ensure they are + // valid. + + if sv.pre != "" { + if err = validatePrerelease(sv.pre); err != nil { + return nil, err + } + } + + if sv.metadata != "" { + if err = validateMetadata(sv.metadata); err != nil { + return nil, err + } + } + + return sv, nil +} + +func coerceNewVersion(v string) (*Version, error) { + m := looseVersionRegex.FindStringSubmatch(v) + if m == nil { + return nil, ErrInvalidSemVer + } + + sv := &Version{ + metadata: m[8], + pre: m[5], + original: v, + } + + var err error + sv.major, err = strconv.ParseUint(m[1], 10, 64) + if err != nil { + return nil, fmt.Errorf("error parsing version segment: %w", err) + } + + if m[2] != "" { + sv.minor, err = strconv.ParseUint(strings.TrimPrefix(m[2], "."), 10, 64) + if err != nil { + return nil, fmt.Errorf("error parsing version segment: %w", err) + } + } else { + sv.minor = 0 + } + + if m[3] != "" { + sv.patch, err = strconv.ParseUint(strings.TrimPrefix(m[3], "."), 10, 64) + if err != nil { + return nil, fmt.Errorf("error parsing version segment: %w", err) + } + } else { + sv.patch = 0 + } + + // Perform some basic due diligence on the extra parts to ensure they are + // valid. + + if sv.pre != "" { + if err = validatePrerelease(sv.pre); err != nil { + return nil, err + } + } + + if sv.metadata != "" { + if err = validateMetadata(sv.metadata); err != nil { + return nil, err + } + } + + return sv, nil +} + +// New creates a new instance of Version with each of the parts passed in as +// arguments instead of parsing a version string. +func New(major, minor, patch uint64, pre, metadata string) *Version { + v := Version{ + major: major, + minor: minor, + patch: patch, + pre: pre, + metadata: metadata, + original: "", + } + + v.original = v.String() + + return &v +} + +// MustParse parses a given version and panics on error. +func MustParse(v string) *Version { + sv, err := NewVersion(v) + if err != nil { + panic(err) + } + return sv +} + +// String converts a Version object to a string. +// Note, if the original version contained a leading v this version will not. +// See the Original() method to retrieve the original value. Semantic Versions +// don't contain a leading v per the spec. Instead it's optional on +// implementation. +func (v Version) String() string { + var buf bytes.Buffer + + fmt.Fprintf(&buf, "%d.%d.%d", v.major, v.minor, v.patch) + if v.pre != "" { + fmt.Fprintf(&buf, "-%s", v.pre) + } + if v.metadata != "" { + fmt.Fprintf(&buf, "+%s", v.metadata) + } + + return buf.String() +} + +// Original returns the original value passed in to be parsed. +func (v *Version) Original() string { + return v.original +} + +// Major returns the major version. +func (v Version) Major() uint64 { + return v.major +} + +// Minor returns the minor version. +func (v Version) Minor() uint64 { + return v.minor +} + +// Patch returns the patch version. +func (v Version) Patch() uint64 { + return v.patch +} + +// Prerelease returns the pre-release version. +func (v Version) Prerelease() string { + return v.pre +} + +// Metadata returns the metadata on the version. +func (v Version) Metadata() string { + return v.metadata +} + +// originalVPrefix returns the original 'v' prefix if any. +func (v Version) originalVPrefix() string { + // Note, only lowercase v is supported as a prefix by the parser. + if v.original != "" && v.original[:1] == "v" { + return v.original[:1] + } + return "" +} + +// IncPatch produces the next patch version. +// If the current version does not have prerelease/metadata information, +// it unsets metadata and prerelease values, increments patch number. +// If the current version has any of prerelease or metadata information, +// it unsets both values and keeps current patch value +func (v Version) IncPatch() Version { + vNext := v + // according to http://semver.org/#spec-item-9 + // Pre-release versions have a lower precedence than the associated normal version. + // according to http://semver.org/#spec-item-10 + // Build metadata SHOULD be ignored when determining version precedence. + if v.pre != "" { + vNext.metadata = "" + vNext.pre = "" + } else { + vNext.metadata = "" + vNext.pre = "" + vNext.patch = v.patch + 1 + } + vNext.original = v.originalVPrefix() + "" + vNext.String() + return vNext +} + +// IncMinor produces the next minor version. +// Sets patch to 0. +// Increments minor number. +// Unsets metadata. +// Unsets prerelease status. +func (v Version) IncMinor() Version { + vNext := v + vNext.metadata = "" + vNext.pre = "" + vNext.patch = 0 + vNext.minor = v.minor + 1 + vNext.original = v.originalVPrefix() + "" + vNext.String() + return vNext +} + +// IncMajor produces the next major version. +// Sets patch to 0. +// Sets minor to 0. +// Increments major number. +// Unsets metadata. +// Unsets prerelease status. +func (v Version) IncMajor() Version { + vNext := v + vNext.metadata = "" + vNext.pre = "" + vNext.patch = 0 + vNext.minor = 0 + vNext.major = v.major + 1 + vNext.original = v.originalVPrefix() + "" + vNext.String() + return vNext +} + +// SetPrerelease defines the prerelease value. +// Value must not include the required 'hyphen' prefix. +func (v Version) SetPrerelease(prerelease string) (Version, error) { + vNext := v + if len(prerelease) > 0 { + if err := validatePrerelease(prerelease); err != nil { + return vNext, err + } + } + vNext.pre = prerelease + vNext.original = v.originalVPrefix() + "" + vNext.String() + return vNext, nil +} + +// SetMetadata defines metadata value. +// Value must not include the required 'plus' prefix. +func (v Version) SetMetadata(metadata string) (Version, error) { + vNext := v + if len(metadata) > 0 { + if err := validateMetadata(metadata); err != nil { + return vNext, err + } + } + vNext.metadata = metadata + vNext.original = v.originalVPrefix() + "" + vNext.String() + return vNext, nil +} + +// LessThan tests if one version is less than another one. +func (v *Version) LessThan(o *Version) bool { + return v.Compare(o) < 0 +} + +// LessThanEqual tests if one version is less or equal than another one. +func (v *Version) LessThanEqual(o *Version) bool { + return v.Compare(o) <= 0 +} + +// GreaterThan tests if one version is greater than another one. +func (v *Version) GreaterThan(o *Version) bool { + return v.Compare(o) > 0 +} + +// GreaterThanEqual tests if one version is greater or equal than another one. +func (v *Version) GreaterThanEqual(o *Version) bool { + return v.Compare(o) >= 0 +} + +// Equal tests if two versions are equal to each other. +// Note, versions can be equal with different metadata since metadata +// is not considered part of the comparable version. +func (v *Version) Equal(o *Version) bool { + if v == o { + return true + } + if v == nil || o == nil { + return false + } + return v.Compare(o) == 0 +} + +// Compare compares this version to another one. It returns -1, 0, or 1 if +// the version smaller, equal, or larger than the other version. +// +// Versions are compared by X.Y.Z. Build metadata is ignored. Prerelease is +// lower than the version without a prerelease. Compare always takes into account +// prereleases. If you want to work with ranges using typical range syntaxes that +// skip prereleases if the range is not looking for them use constraints. +func (v *Version) Compare(o *Version) int { + // Compare the major, minor, and patch version for differences. If a + // difference is found return the comparison. + if d := compareSegment(v.Major(), o.Major()); d != 0 { + return d + } + if d := compareSegment(v.Minor(), o.Minor()); d != 0 { + return d + } + if d := compareSegment(v.Patch(), o.Patch()); d != 0 { + return d + } + + // At this point the major, minor, and patch versions are the same. + ps := v.pre + po := o.Prerelease() + + if ps == "" && po == "" { + return 0 + } + if ps == "" { + return 1 + } + if po == "" { + return -1 + } + + return comparePrerelease(ps, po) +} + +// UnmarshalJSON implements JSON.Unmarshaler interface. +func (v *Version) UnmarshalJSON(b []byte) error { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + temp, err := NewVersion(s) + if err != nil { + return err + } + v.major = temp.major + v.minor = temp.minor + v.patch = temp.patch + v.pre = temp.pre + v.metadata = temp.metadata + v.original = temp.original + return nil +} + +// MarshalJSON implements JSON.Marshaler interface. +func (v Version) MarshalJSON() ([]byte, error) { + return json.Marshal(v.String()) +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface. +func (v *Version) UnmarshalText(text []byte) error { + temp, err := NewVersion(string(text)) + if err != nil { + return err + } + + *v = *temp + + return nil +} + +// MarshalText implements the encoding.TextMarshaler interface. +func (v Version) MarshalText() ([]byte, error) { + return []byte(v.String()), nil +} + +// Scan implements the SQL.Scanner interface. +func (v *Version) Scan(value interface{}) error { + var s string + s, _ = value.(string) + temp, err := NewVersion(s) + if err != nil { + return err + } + v.major = temp.major + v.minor = temp.minor + v.patch = temp.patch + v.pre = temp.pre + v.metadata = temp.metadata + v.original = temp.original + return nil +} + +// Value implements the Driver.Valuer interface. +func (v Version) Value() (driver.Value, error) { + return v.String(), nil +} + +func compareSegment(v, o uint64) int { + if v < o { + return -1 + } + if v > o { + return 1 + } + + return 0 +} + +func comparePrerelease(v, o string) int { + // split the prelease versions by their part. The separator, per the spec, + // is a . + sparts := strings.Split(v, ".") + oparts := strings.Split(o, ".") + + // Find the longer length of the parts to know how many loop iterations to + // go through. + slen := len(sparts) + olen := len(oparts) + + l := slen + if olen > slen { + l = olen + } + + // Iterate over each part of the prereleases to compare the differences. + for i := 0; i < l; i++ { + // Since the lentgh of the parts can be different we need to create + // a placeholder. This is to avoid out of bounds issues. + stemp := "" + if i < slen { + stemp = sparts[i] + } + + otemp := "" + if i < olen { + otemp = oparts[i] + } + + d := comparePrePart(stemp, otemp) + if d != 0 { + return d + } + } + + // Reaching here means two versions are of equal value but have different + // metadata (the part following a +). They are not identical in string form + // but the version comparison finds them to be equal. + return 0 +} + +func comparePrePart(s, o string) int { + // Fastpath if they are equal + if s == o { + return 0 + } + + // When s or o are empty we can use the other in an attempt to determine + // the response. + if s == "" { + if o != "" { + return -1 + } + return 1 + } + + if o == "" { + if s != "" { + return 1 + } + return -1 + } + + // When comparing strings "99" is greater than "103". To handle + // cases like this we need to detect numbers and compare them. According + // to the semver spec, numbers are always positive. If there is a - at the + // start like -99 this is to be evaluated as an alphanum. numbers always + // have precedence over alphanum. Parsing as Uints because negative numbers + // are ignored. + + oi, n1 := strconv.ParseUint(o, 10, 64) + si, n2 := strconv.ParseUint(s, 10, 64) + + // The case where both are strings compare the strings + if n1 != nil && n2 != nil { + if s > o { + return 1 + } + return -1 + } else if n1 != nil { + // o is a string and s is a number + return -1 + } else if n2 != nil { + // s is a string and o is a number + return 1 + } + // Both are numbers + if si > oi { + return 1 + } + return -1 +} + +// Like strings.ContainsAny but does an only instead of any. +func containsOnly(s string, comp string) bool { + return strings.IndexFunc(s, func(r rune) bool { + return !strings.ContainsRune(comp, r) + }) == -1 +} + +// From the spec, "Identifiers MUST comprise only +// ASCII alphanumerics and hyphen [0-9A-Za-z-]. Identifiers MUST NOT be empty. +// Numeric identifiers MUST NOT include leading zeroes.". These segments can +// be dot separated. +func validatePrerelease(p string) error { + eparts := strings.Split(p, ".") + for _, p := range eparts { + if p == "" { + return ErrInvalidPrerelease + } else if containsOnly(p, num) { + if len(p) > 1 && p[0] == '0' { + return ErrSegmentStartsZero + } + } else if !containsOnly(p, allowed) { + return ErrInvalidPrerelease + } + } + + return nil +} + +// From the spec, "Build metadata MAY be denoted by +// appending a plus sign and a series of dot separated identifiers immediately +// following the patch or pre-release version. Identifiers MUST comprise only +// ASCII alphanumerics and hyphen [0-9A-Za-z-]. Identifiers MUST NOT be empty." +func validateMetadata(m string) error { + eparts := strings.Split(m, ".") + for _, p := range eparts { + if p == "" { + return ErrInvalidMetadata + } else if !containsOnly(p, allowed) { + return ErrInvalidMetadata + } + } + return nil +} + +// validateVersion checks for common validation issues but may not catch all errors +func validateVersion(m []string) error { + var err error + var v string + if m[1] != "" { + if len(m[1]) > 1 && m[1][0] == '0' { + return ErrSegmentStartsZero + } + _, err = strconv.ParseUint(m[1], 10, 64) + if err != nil { + return fmt.Errorf("error parsing version segment: %w", err) + } + } + + if m[2] != "" { + v = strings.TrimPrefix(m[2], ".") + if len(v) > 1 && v[0] == '0' { + return ErrSegmentStartsZero + } + _, err = strconv.ParseUint(v, 10, 64) + if err != nil { + return fmt.Errorf("error parsing version segment: %w", err) + } + } + + if m[3] != "" { + v = strings.TrimPrefix(m[3], ".") + if len(v) > 1 && v[0] == '0' { + return ErrSegmentStartsZero + } + _, err = strconv.ParseUint(v, 10, 64) + if err != nil { + return fmt.Errorf("error parsing version segment: %w", err) + } + } + + if m[5] != "" { + if err = validatePrerelease(m[5]); err != nil { + return err + } + } + + if m[8] != "" { + if err = validateMetadata(m[8]); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/conductorone/baton-sdk/internal/connector/connector.go b/vendor/github.com/conductorone/baton-sdk/internal/connector/connector.go index c5898902..ab6da92c 100644 --- a/vendor/github.com/conductorone/baton-sdk/internal/connector/connector.go +++ b/vendor/github.com/conductorone/baton-sdk/internal/connector/connector.go @@ -6,6 +6,7 @@ import ( "errors" "fmt" "io" + "net" "os" "os/exec" "sync" @@ -24,8 +25,11 @@ import ( connectorwrapperV1 "github.com/conductorone/baton-sdk/pb/c1/connector_wrapper/v1" ratelimitV1 "github.com/conductorone/baton-sdk/pb/c1/ratelimit/v1" tlsV1 "github.com/conductorone/baton-sdk/pb/c1/utls/v1" + "github.com/conductorone/baton-sdk/pkg/bid" ratelimit2 "github.com/conductorone/baton-sdk/pkg/ratelimit" + "github.com/conductorone/baton-sdk/pkg/session" "github.com/conductorone/baton-sdk/pkg/types" + "github.com/conductorone/baton-sdk/pkg/types/sessions" "github.com/conductorone/baton-sdk/pkg/ugrpc" utls2 "github.com/conductorone/baton-sdk/pkg/utls" ) @@ -49,6 +53,28 @@ type connectorClient struct { connectorV2.EventServiceClient connectorV2.TicketsServiceClient connectorV2.ActionServiceClient + + sessionStoreSetter sessions.SetSessionStore // this is the session store server +} + +var _ sessions.SetSessionStore = (*connectorClient)(nil) +var _ SetSessionStoreSetter = (*connectorClient)(nil) + +type SetSessionStoreSetter interface { + SetSessionStoreSetter(setsessionStoreSetter sessions.SetSessionStore) +} + +func (c *connectorClient) SetSessionStoreSetter(sessionStoreSetter sessions.SetSessionStore) { + c.sessionStoreSetter = sessionStoreSetter +} + +func (c *connectorClient) SetSessionStore(ctx context.Context, store sessions.SessionStore) { + if c.sessionStoreSetter == nil { + l := ctxzap.Extract(ctx) + l.Warn("connectorClient's session store is nil") + return + } + c.sessionStoreSetter.SetSessionStore(ctx, store) } var ErrConnectorNotImplemented = errors.New("client does not implement connector connectorV2") @@ -56,24 +82,35 @@ var ErrConnectorNotImplemented = errors.New("client does not implement connector type wrapper struct { mtx sync.RWMutex - server types.ConnectorServer - client types.ConnectorClient - serverStdin io.WriteCloser - conn *grpc.ClientConn - provisioningEnabled bool - ticketingEnabled bool - fullSyncDisabled bool - targetedSyncResourceIDs []string + server types.ConnectorServer + client types.ConnectorClient + serverStdin io.WriteCloser + conn *grpc.ClientConn + provisioningEnabled bool + ticketingEnabled bool + fullSyncDisabled bool + targetedSyncResources []*connectorV2.Resource + sessionStoreEnabled bool + syncResourceTypeIDs []string rateLimiter ratelimitV1.RateLimiterServiceServer rlCfg *ratelimitV1.RateLimiterConfig rlDescriptors []*ratelimitV1.RateLimitDescriptors_Entry now func() time.Time + + SessionServer sessions.SetSessionStore } type Option func(ctx context.Context, w *wrapper) error +func WithSessionStoreEnabled() Option { + return func(ctx context.Context, w *wrapper) error { + w.sessionStoreEnabled = true + return nil + } +} + func WithRateLimiterConfig(cfg *ratelimitV1.RateLimiterConfig) Option { return func(ctx context.Context, w *wrapper) error { if cfg != nil { @@ -117,9 +154,24 @@ func WithTicketingEnabled() Option { } } -func WithTargetedSyncResourceIDs(resourceIDs []string) Option { +func WithTargetedSyncResources(resourceIDs []string) Option { + return func(ctx context.Context, w *wrapper) error { + resources := make([]*connectorV2.Resource, 0, len(resourceIDs)) + for _, resourceId := range resourceIDs { + r, err := bid.ParseResourceBid(resourceId) + if err != nil { + return err + } + resources = append(resources, r) + } + w.targetedSyncResources = resources + return nil + } +} + +func WithSyncResourceTypeIDs(resourceTypeIDs []string) Option { return func(ctx context.Context, w *wrapper) error { - w.targetedSyncResourceIDs = resourceIDs + w.syncResourceTypeIDs = resourceTypeIDs return nil } } @@ -154,7 +206,7 @@ func (cw *wrapper) Run(ctx context.Context, serverCfg *connectorwrapperV1.Server return err } - tlsConfig, err := utls2.ListenerConfig(ctx, serverCfg.Credential) + tlsConfig, err := utls2.ListenerConfig(ctx, serverCfg.GetCredential()) if err != nil { return err } @@ -175,7 +227,7 @@ func (cw *wrapper) Run(ctx context.Context, serverCfg *connectorwrapperV1.Server )), ) - rl, err := ratelimit2.NewLimiter(ctx, cw.now, serverCfg.RateLimiterConfig) + rl, err := ratelimit2.NewLimiter(ctx, cw.now, serverCfg.GetRateLimiterConfig()) if err != nil { return err } @@ -198,14 +250,55 @@ func (cw *wrapper) runServer(ctx context.Context, serverCred *tlsV1.Credential) listenPort, listener, err := cw.setupListener(ctx) if err != nil { - return 0, err + return 0, fmt.Errorf("failed to setup listener: %w", err) + } + var sessionListenerPort uint32 + if cw.sessionStoreEnabled { + var sessionListenerFile *os.File + sessionListenerPort, sessionListenerFile, err = cw.setupListener(ctx) + if err != nil { + return 0, fmt.Errorf("failed to setup session listener: %w", err) + } + + if sessionListenerFile == nil { + return 0, fmt.Errorf("session listener file is nil") + } + + // Start the session cache server on the cache listener + sessionListener, err := net.FileListener(sessionListenerFile) + if err != nil { + _ = sessionListenerFile.Close() + return 0, fmt.Errorf("failed to create session listener: %w", err) + } + tlsConfig, err := utls2.ListenerConfig(ctx, serverCred) + if err != nil { + _ = sessionListenerFile.Close() + return 0, fmt.Errorf("failed to create session listener config: %w", err) + } + + // TODO(kans): block until we send a request or something/error handling in general. + l.Info("starting session store server") + server := session.NewGRPCSessionServer() + cw.SessionServer = server + go func() { + defer sessionListenerFile.Close() + serverErr := session.StartGRPCSessionServerWithOptions(ctx, sessionListener, server, + grpc.Creds(credentials.NewTLS(tlsConfig)), + grpc.ChainUnaryInterceptor(ugrpc.UnaryServerInterceptor(ctx)...), + ) + if serverErr != nil { + l.Error("failed to create session store server", zap.Error(serverErr)) + return + } + }() } - serverCfg, err := proto.Marshal(&connectorwrapperV1.ServerConfig{ - Credential: serverCred, - RateLimiterConfig: cw.rlCfg, - ListenPort: listenPort, - }) + serverCfg, err := proto.Marshal(connectorwrapperV1.ServerConfig_builder{ + Credential: serverCred, + RateLimiterConfig: cw.rlCfg, + ListenPort: listenPort, + SessionStoreListenPort: sessionListenerPort, + }.Build()) if err != nil { return 0, err } @@ -331,8 +424,11 @@ func (cw *wrapper) C(ctx context.Context) (types.ConnectorClient, error) { } cw.conn = conn - cw.client = NewConnectorClient(ctx, cw.conn) - return cw.client, nil + client := NewConnectorClient(ctx, cw.conn) + client.SetSessionStoreSetter(cw.SessionServer) + cw.client = client + + return client, nil } // Close shuts down the grpc server and closes the connection. @@ -414,7 +510,7 @@ func Register(ctx context.Context, s grpc.ServiceRegistrar, srv types.ConnectorS // NewConnectorClient takes a grpc.ClientConnInterface and returns an implementation of the ConnectorClient interface. // It does not check that the connection actually supports the services. -func NewConnectorClient(ctx context.Context, cc grpc.ClientConnInterface) types.ConnectorClient { +func NewConnectorClient(_ context.Context, cc grpc.ClientConnInterface) *connectorClient { return &connectorClient{ ResourceTypesServiceClient: connectorV2.NewResourceTypesServiceClient(cc), ResourcesServiceClient: connectorV2.NewResourcesServiceClient(cc), diff --git a/vendor/github.com/conductorone/baton-sdk/internal/connector/connector_server_unix.go b/vendor/github.com/conductorone/baton-sdk/internal/connector/connector_server_unix.go index ab7f1214..bac831a5 100644 --- a/vendor/github.com/conductorone/baton-sdk/internal/connector/connector_server_unix.go +++ b/vendor/github.com/conductorone/baton-sdk/internal/connector/connector_server_unix.go @@ -44,7 +44,7 @@ func (cw *wrapper) setupListener(ctx context.Context) (uint32, *os.File, error) func (cw *wrapper) getListener(ctx context.Context, serverCfg *connectorwrapperV1.ServerConfig) (net.Listener, error) { l := ctxzap.Extract(ctx) - l.Debug("starting listener with fd", zap.Uint32("expected_listen_port", serverCfg.ListenPort)) + l.Debug("starting listener with fd", zap.Uint32("expected_listen_port", serverCfg.GetListenPort())) listenerFd := os.Getenv(listenerFdEnv) if listenerFd == "" { @@ -64,8 +64,8 @@ func (cw *wrapper) getListener(ctx context.Context, serverCfg *connectorwrapperV } listenPort := getPort(listener) - if listenPort != serverCfg.ListenPort { - return nil, fmt.Errorf("listen port mismatch: %d != %d", listenPort, serverCfg.ListenPort) + if listenPort != serverCfg.GetListenPort() { + return nil, fmt.Errorf("listen port mismatch: %d != %d", listenPort, serverCfg.GetListenPort()) } l.Debug("listener started", zap.Uint32("listen_port", listenPort)) diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/c1z/v1/annotation_sync_details.pb.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/c1z/v1/annotation_sync_details.pb.go index 71b4ddb2..35212c03 100644 --- a/vendor/github.com/conductorone/baton-sdk/pb/c1/c1z/v1/annotation_sync_details.pb.go +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/c1z/v1/annotation_sync_details.pb.go @@ -1,16 +1,17 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.4 +// protoc-gen-go v1.36.10 // protoc (unknown) // source: c1/c1z/v1/annotation_sync_details.proto +//go:build !protoopaque + package v1 import ( protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" - sync "sync" unsafe "unsafe" ) @@ -22,7 +23,7 @@ const ( ) type SyncDetails struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache @@ -53,11 +54,6 @@ func (x *SyncDetails) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use SyncDetails.ProtoReflect.Descriptor instead. -func (*SyncDetails) Descriptor() ([]byte, []int) { - return file_c1_c1z_v1_annotation_sync_details_proto_rawDescGZIP(), []int{0} -} - func (x *SyncDetails) GetId() string { if x != nil { return x.Id @@ -65,32 +61,32 @@ func (x *SyncDetails) GetId() string { return "" } -var File_c1_c1z_v1_annotation_sync_details_proto protoreflect.FileDescriptor +func (x *SyncDetails) SetId(v string) { + x.Id = v +} -var file_c1_c1z_v1_annotation_sync_details_proto_rawDesc = string([]byte{ - 0x0a, 0x27, 0x63, 0x31, 0x2f, 0x63, 0x31, 0x7a, 0x2f, 0x76, 0x31, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, - 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x64, 0x65, 0x74, 0x61, - 0x69, 0x6c, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x09, 0x63, 0x31, 0x2e, 0x63, 0x31, - 0x7a, 0x2e, 0x76, 0x31, 0x22, 0x1d, 0x0a, 0x0b, 0x53, 0x79, 0x6e, 0x63, 0x44, 0x65, 0x74, 0x61, - 0x69, 0x6c, 0x73, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x02, 0x69, 0x64, 0x42, 0x30, 0x5a, 0x2e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, - 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x64, 0x75, 0x63, 0x74, 0x6f, 0x72, 0x6f, 0x6e, 0x65, 0x2f, 0x62, - 0x61, 0x74, 0x6f, 0x6e, 0x2d, 0x73, 0x64, 0x6b, 0x2f, 0x70, 0x62, 0x2f, 0x63, 0x31, 0x2f, 0x63, - 0x31, 0x7a, 0x2f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -}) - -var ( - file_c1_c1z_v1_annotation_sync_details_proto_rawDescOnce sync.Once - file_c1_c1z_v1_annotation_sync_details_proto_rawDescData []byte -) +type SyncDetails_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. -func file_c1_c1z_v1_annotation_sync_details_proto_rawDescGZIP() []byte { - file_c1_c1z_v1_annotation_sync_details_proto_rawDescOnce.Do(func() { - file_c1_c1z_v1_annotation_sync_details_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_c1_c1z_v1_annotation_sync_details_proto_rawDesc), len(file_c1_c1z_v1_annotation_sync_details_proto_rawDesc))) - }) - return file_c1_c1z_v1_annotation_sync_details_proto_rawDescData + Id string } +func (b0 SyncDetails_builder) Build() *SyncDetails { + m0 := &SyncDetails{} + b, x := &b0, m0 + _, _ = b, x + x.Id = b.Id + return m0 +} + +var File_c1_c1z_v1_annotation_sync_details_proto protoreflect.FileDescriptor + +const file_c1_c1z_v1_annotation_sync_details_proto_rawDesc = "" + + "\n" + + "'c1/c1z/v1/annotation_sync_details.proto\x12\tc1.c1z.v1\"\x1d\n" + + "\vSyncDetails\x12\x0e\n" + + "\x02id\x18\x01 \x01(\tR\x02idB0Z.github.com/conductorone/baton-sdk/pb/c1/c1z/v1b\x06proto3" + var file_c1_c1z_v1_annotation_sync_details_proto_msgTypes = make([]protoimpl.MessageInfo, 1) var file_c1_c1z_v1_annotation_sync_details_proto_goTypes = []any{ (*SyncDetails)(nil), // 0: c1.c1z.v1.SyncDetails diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/c1z/v1/annotation_sync_details_protoopaque.pb.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/c1z/v1/annotation_sync_details_protoopaque.pb.go new file mode 100644 index 00000000..13a7e990 --- /dev/null +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/c1z/v1/annotation_sync_details_protoopaque.pb.go @@ -0,0 +1,124 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.10 +// protoc (unknown) +// source: c1/c1z/v1/annotation_sync_details.proto + +//go:build protoopaque + +package v1 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type SyncDetails struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Id string `protobuf:"bytes,1,opt,name=id,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SyncDetails) Reset() { + *x = SyncDetails{} + mi := &file_c1_c1z_v1_annotation_sync_details_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SyncDetails) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SyncDetails) ProtoMessage() {} + +func (x *SyncDetails) ProtoReflect() protoreflect.Message { + mi := &file_c1_c1z_v1_annotation_sync_details_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *SyncDetails) GetId() string { + if x != nil { + return x.xxx_hidden_Id + } + return "" +} + +func (x *SyncDetails) SetId(v string) { + x.xxx_hidden_Id = v +} + +type SyncDetails_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Id string +} + +func (b0 SyncDetails_builder) Build() *SyncDetails { + m0 := &SyncDetails{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Id = b.Id + return m0 +} + +var File_c1_c1z_v1_annotation_sync_details_proto protoreflect.FileDescriptor + +const file_c1_c1z_v1_annotation_sync_details_proto_rawDesc = "" + + "\n" + + "'c1/c1z/v1/annotation_sync_details.proto\x12\tc1.c1z.v1\"\x1d\n" + + "\vSyncDetails\x12\x0e\n" + + "\x02id\x18\x01 \x01(\tR\x02idB0Z.github.com/conductorone/baton-sdk/pb/c1/c1z/v1b\x06proto3" + +var file_c1_c1z_v1_annotation_sync_details_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_c1_c1z_v1_annotation_sync_details_proto_goTypes = []any{ + (*SyncDetails)(nil), // 0: c1.c1z.v1.SyncDetails +} +var file_c1_c1z_v1_annotation_sync_details_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_c1_c1z_v1_annotation_sync_details_proto_init() } +func file_c1_c1z_v1_annotation_sync_details_proto_init() { + if File_c1_c1z_v1_annotation_sync_details_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_c1_c1z_v1_annotation_sync_details_proto_rawDesc), len(file_c1_c1z_v1_annotation_sync_details_proto_rawDesc)), + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_c1_c1z_v1_annotation_sync_details_proto_goTypes, + DependencyIndexes: file_c1_c1z_v1_annotation_sync_details_proto_depIdxs, + MessageInfos: file_c1_c1z_v1_annotation_sync_details_proto_msgTypes, + }.Build() + File_c1_c1z_v1_annotation_sync_details_proto = out.File + file_c1_c1z_v1_annotation_sync_details_proto_goTypes = nil + file_c1_c1z_v1_annotation_sync_details_proto_depIdxs = nil +} diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/c1z/v1/diff.pb.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/c1z/v1/diff.pb.go index ac849400..1f319756 100644 --- a/vendor/github.com/conductorone/baton-sdk/pb/c1/c1z/v1/diff.pb.go +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/c1z/v1/diff.pb.go @@ -1,9 +1,11 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.4 +// protoc-gen-go v1.36.10 // protoc (unknown) // source: c1/c1z/v1/diff.proto +//go:build !protoopaque + package v1 import ( @@ -11,7 +13,6 @@ import ( protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" - sync "sync" unsafe "unsafe" ) @@ -23,7 +24,7 @@ const ( ) type ResourceDiff struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Created []*v2.Resource `protobuf:"bytes,1,rep,name=created,proto3" json:"created,omitempty"` Deleted []*v2.Resource `protobuf:"bytes,2,rep,name=deleted,proto3" json:"deleted,omitempty"` Modified []*v2.Resource `protobuf:"bytes,3,rep,name=modified,proto3" json:"modified,omitempty"` @@ -56,11 +57,6 @@ func (x *ResourceDiff) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ResourceDiff.ProtoReflect.Descriptor instead. -func (*ResourceDiff) Descriptor() ([]byte, []int) { - return file_c1_c1z_v1_diff_proto_rawDescGZIP(), []int{0} -} - func (x *ResourceDiff) GetCreated() []*v2.Resource { if x != nil { return x.Created @@ -82,8 +78,38 @@ func (x *ResourceDiff) GetModified() []*v2.Resource { return nil } +func (x *ResourceDiff) SetCreated(v []*v2.Resource) { + x.Created = v +} + +func (x *ResourceDiff) SetDeleted(v []*v2.Resource) { + x.Deleted = v +} + +func (x *ResourceDiff) SetModified(v []*v2.Resource) { + x.Modified = v +} + +type ResourceDiff_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Created []*v2.Resource + Deleted []*v2.Resource + Modified []*v2.Resource +} + +func (b0 ResourceDiff_builder) Build() *ResourceDiff { + m0 := &ResourceDiff{} + b, x := &b0, m0 + _, _ = b, x + x.Created = b.Created + x.Deleted = b.Deleted + x.Modified = b.Modified + return m0 +} + type EntitlementDiff struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Created []*v2.Entitlement `protobuf:"bytes,1,rep,name=created,proto3" json:"created,omitempty"` Deleted []*v2.Entitlement `protobuf:"bytes,2,rep,name=deleted,proto3" json:"deleted,omitempty"` Modified []*v2.Entitlement `protobuf:"bytes,3,rep,name=modified,proto3" json:"modified,omitempty"` @@ -116,11 +142,6 @@ func (x *EntitlementDiff) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use EntitlementDiff.ProtoReflect.Descriptor instead. -func (*EntitlementDiff) Descriptor() ([]byte, []int) { - return file_c1_c1z_v1_diff_proto_rawDescGZIP(), []int{1} -} - func (x *EntitlementDiff) GetCreated() []*v2.Entitlement { if x != nil { return x.Created @@ -142,8 +163,38 @@ func (x *EntitlementDiff) GetModified() []*v2.Entitlement { return nil } +func (x *EntitlementDiff) SetCreated(v []*v2.Entitlement) { + x.Created = v +} + +func (x *EntitlementDiff) SetDeleted(v []*v2.Entitlement) { + x.Deleted = v +} + +func (x *EntitlementDiff) SetModified(v []*v2.Entitlement) { + x.Modified = v +} + +type EntitlementDiff_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Created []*v2.Entitlement + Deleted []*v2.Entitlement + Modified []*v2.Entitlement +} + +func (b0 EntitlementDiff_builder) Build() *EntitlementDiff { + m0 := &EntitlementDiff{} + b, x := &b0, m0 + _, _ = b, x + x.Created = b.Created + x.Deleted = b.Deleted + x.Modified = b.Modified + return m0 +} + type GrantDiff struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Created []*v2.Grant `protobuf:"bytes,1,rep,name=created,proto3" json:"created,omitempty"` Deleted []*v2.Grant `protobuf:"bytes,2,rep,name=deleted,proto3" json:"deleted,omitempty"` Modified []*v2.Grant `protobuf:"bytes,3,rep,name=modified,proto3" json:"modified,omitempty"` @@ -176,11 +227,6 @@ func (x *GrantDiff) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GrantDiff.ProtoReflect.Descriptor instead. -func (*GrantDiff) Descriptor() ([]byte, []int) { - return file_c1_c1z_v1_diff_proto_rawDescGZIP(), []int{2} -} - func (x *GrantDiff) GetCreated() []*v2.Grant { if x != nil { return x.Created @@ -202,8 +248,38 @@ func (x *GrantDiff) GetModified() []*v2.Grant { return nil } +func (x *GrantDiff) SetCreated(v []*v2.Grant) { + x.Created = v +} + +func (x *GrantDiff) SetDeleted(v []*v2.Grant) { + x.Deleted = v +} + +func (x *GrantDiff) SetModified(v []*v2.Grant) { + x.Modified = v +} + +type GrantDiff_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Created []*v2.Grant + Deleted []*v2.Grant + Modified []*v2.Grant +} + +func (b0 GrantDiff_builder) Build() *GrantDiff { + m0 := &GrantDiff{} + b, x := &b0, m0 + _, _ = b, x + x.Created = b.Created + x.Deleted = b.Deleted + x.Modified = b.Modified + return m0 +} + type C1ZDiffOutput struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Resources *ResourceDiff `protobuf:"bytes,1,opt,name=resources,proto3" json:"resources,omitempty"` Entitlements *EntitlementDiff `protobuf:"bytes,2,opt,name=entitlements,proto3" json:"entitlements,omitempty"` Grants *GrantDiff `protobuf:"bytes,3,opt,name=grants,proto3" json:"grants,omitempty"` @@ -236,11 +312,6 @@ func (x *C1ZDiffOutput) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use C1ZDiffOutput.ProtoReflect.Descriptor instead. -func (*C1ZDiffOutput) Descriptor() ([]byte, []int) { - return file_c1_c1z_v1_diff_proto_rawDescGZIP(), []int{3} -} - func (x *C1ZDiffOutput) GetResources() *ResourceDiff { if x != nil { return x.Resources @@ -262,80 +333,91 @@ func (x *C1ZDiffOutput) GetGrants() *GrantDiff { return nil } -var File_c1_c1z_v1_diff_proto protoreflect.FileDescriptor +func (x *C1ZDiffOutput) SetResources(v *ResourceDiff) { + x.Resources = v +} -var file_c1_c1z_v1_diff_proto_rawDesc = string([]byte{ - 0x0a, 0x14, 0x63, 0x31, 0x2f, 0x63, 0x31, 0x7a, 0x2f, 0x76, 0x31, 0x2f, 0x64, 0x69, 0x66, 0x66, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x09, 0x63, 0x31, 0x2e, 0x63, 0x31, 0x7a, 0x2e, 0x76, - 0x31, 0x1a, 0x21, 0x63, 0x31, 0x2f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f, - 0x76, 0x32, 0x2f, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x63, 0x31, 0x2f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, - 0x6f, 0x72, 0x2f, 0x76, 0x32, 0x2f, 0x67, 0x72, 0x61, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x1a, 0x1e, 0x63, 0x31, 0x2f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f, - 0x76, 0x32, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x22, 0xaf, 0x01, 0x0a, 0x0c, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x69, - 0x66, 0x66, 0x12, 0x33, 0x0a, 0x07, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, - 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x07, - 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x12, 0x33, 0x0a, 0x07, 0x64, 0x65, 0x6c, 0x65, 0x74, - 0x65, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, - 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x52, 0x07, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x12, 0x35, 0x0a, 0x08, - 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65, 0x64, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, - 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, - 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x08, 0x6d, 0x6f, 0x64, 0x69, 0x66, - 0x69, 0x65, 0x64, 0x22, 0xbb, 0x01, 0x0a, 0x0f, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x6d, - 0x65, 0x6e, 0x74, 0x44, 0x69, 0x66, 0x66, 0x12, 0x36, 0x0a, 0x07, 0x63, 0x72, 0x65, 0x61, 0x74, - 0x65, 0x64, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, - 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x6e, 0x74, 0x69, 0x74, - 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x07, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x12, - 0x36, 0x0a, 0x07, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x1c, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, - 0x76, 0x32, 0x2e, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x07, - 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x12, 0x38, 0x0a, 0x08, 0x6d, 0x6f, 0x64, 0x69, 0x66, - 0x69, 0x65, 0x64, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x63, 0x31, 0x2e, 0x63, - 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x6e, 0x74, 0x69, - 0x74, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x08, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65, - 0x64, 0x22, 0xa3, 0x01, 0x0a, 0x09, 0x47, 0x72, 0x61, 0x6e, 0x74, 0x44, 0x69, 0x66, 0x66, 0x12, - 0x30, 0x0a, 0x07, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x16, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, - 0x76, 0x32, 0x2e, 0x47, 0x72, 0x61, 0x6e, 0x74, 0x52, 0x07, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, - 0x64, 0x12, 0x30, 0x0a, 0x07, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, - 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x72, 0x61, 0x6e, 0x74, 0x52, 0x07, 0x64, 0x65, 0x6c, 0x65, - 0x74, 0x65, 0x64, 0x12, 0x32, 0x0a, 0x08, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65, 0x64, 0x18, - 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, - 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x72, 0x61, 0x6e, 0x74, 0x52, 0x08, 0x6d, - 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65, 0x64, 0x22, 0xb4, 0x01, 0x0a, 0x0d, 0x43, 0x31, 0x5a, 0x44, - 0x69, 0x66, 0x66, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x12, 0x35, 0x0a, 0x09, 0x72, 0x65, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x63, - 0x31, 0x2e, 0x63, 0x31, 0x7a, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x44, 0x69, 0x66, 0x66, 0x52, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, - 0x12, 0x3e, 0x0a, 0x0c, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x31, 0x7a, 0x2e, - 0x76, 0x31, 0x2e, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x44, 0x69, - 0x66, 0x66, 0x52, 0x0c, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, - 0x12, 0x2c, 0x0a, 0x06, 0x67, 0x72, 0x61, 0x6e, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x14, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x31, 0x7a, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x72, 0x61, - 0x6e, 0x74, 0x44, 0x69, 0x66, 0x66, 0x52, 0x06, 0x67, 0x72, 0x61, 0x6e, 0x74, 0x73, 0x42, 0x30, - 0x5a, 0x2e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, - 0x64, 0x75, 0x63, 0x74, 0x6f, 0x72, 0x6f, 0x6e, 0x65, 0x2f, 0x62, 0x61, 0x74, 0x6f, 0x6e, 0x2d, - 0x73, 0x64, 0x6b, 0x2f, 0x70, 0x62, 0x2f, 0x63, 0x31, 0x2f, 0x63, 0x31, 0x7a, 0x2f, 0x76, 0x31, - 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -}) - -var ( - file_c1_c1z_v1_diff_proto_rawDescOnce sync.Once - file_c1_c1z_v1_diff_proto_rawDescData []byte -) +func (x *C1ZDiffOutput) SetEntitlements(v *EntitlementDiff) { + x.Entitlements = v +} + +func (x *C1ZDiffOutput) SetGrants(v *GrantDiff) { + x.Grants = v +} + +func (x *C1ZDiffOutput) HasResources() bool { + if x == nil { + return false + } + return x.Resources != nil +} + +func (x *C1ZDiffOutput) HasEntitlements() bool { + if x == nil { + return false + } + return x.Entitlements != nil +} + +func (x *C1ZDiffOutput) HasGrants() bool { + if x == nil { + return false + } + return x.Grants != nil +} + +func (x *C1ZDiffOutput) ClearResources() { + x.Resources = nil +} + +func (x *C1ZDiffOutput) ClearEntitlements() { + x.Entitlements = nil +} -func file_c1_c1z_v1_diff_proto_rawDescGZIP() []byte { - file_c1_c1z_v1_diff_proto_rawDescOnce.Do(func() { - file_c1_c1z_v1_diff_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_c1_c1z_v1_diff_proto_rawDesc), len(file_c1_c1z_v1_diff_proto_rawDesc))) - }) - return file_c1_c1z_v1_diff_proto_rawDescData +func (x *C1ZDiffOutput) ClearGrants() { + x.Grants = nil } +type C1ZDiffOutput_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Resources *ResourceDiff + Entitlements *EntitlementDiff + Grants *GrantDiff +} + +func (b0 C1ZDiffOutput_builder) Build() *C1ZDiffOutput { + m0 := &C1ZDiffOutput{} + b, x := &b0, m0 + _, _ = b, x + x.Resources = b.Resources + x.Entitlements = b.Entitlements + x.Grants = b.Grants + return m0 +} + +var File_c1_c1z_v1_diff_proto protoreflect.FileDescriptor + +const file_c1_c1z_v1_diff_proto_rawDesc = "" + + "\n" + + "\x14c1/c1z/v1/diff.proto\x12\tc1.c1z.v1\x1a!c1/connector/v2/entitlement.proto\x1a\x1bc1/connector/v2/grant.proto\x1a\x1ec1/connector/v2/resource.proto\"\xaf\x01\n" + + "\fResourceDiff\x123\n" + + "\acreated\x18\x01 \x03(\v2\x19.c1.connector.v2.ResourceR\acreated\x123\n" + + "\adeleted\x18\x02 \x03(\v2\x19.c1.connector.v2.ResourceR\adeleted\x125\n" + + "\bmodified\x18\x03 \x03(\v2\x19.c1.connector.v2.ResourceR\bmodified\"\xbb\x01\n" + + "\x0fEntitlementDiff\x126\n" + + "\acreated\x18\x01 \x03(\v2\x1c.c1.connector.v2.EntitlementR\acreated\x126\n" + + "\adeleted\x18\x02 \x03(\v2\x1c.c1.connector.v2.EntitlementR\adeleted\x128\n" + + "\bmodified\x18\x03 \x03(\v2\x1c.c1.connector.v2.EntitlementR\bmodified\"\xa3\x01\n" + + "\tGrantDiff\x120\n" + + "\acreated\x18\x01 \x03(\v2\x16.c1.connector.v2.GrantR\acreated\x120\n" + + "\adeleted\x18\x02 \x03(\v2\x16.c1.connector.v2.GrantR\adeleted\x122\n" + + "\bmodified\x18\x03 \x03(\v2\x16.c1.connector.v2.GrantR\bmodified\"\xb4\x01\n" + + "\rC1ZDiffOutput\x125\n" + + "\tresources\x18\x01 \x01(\v2\x17.c1.c1z.v1.ResourceDiffR\tresources\x12>\n" + + "\fentitlements\x18\x02 \x01(\v2\x1a.c1.c1z.v1.EntitlementDiffR\fentitlements\x12,\n" + + "\x06grants\x18\x03 \x01(\v2\x14.c1.c1z.v1.GrantDiffR\x06grantsB0Z.github.com/conductorone/baton-sdk/pb/c1/c1z/v1b\x06proto3" + var file_c1_c1z_v1_diff_proto_msgTypes = make([]protoimpl.MessageInfo, 4) var file_c1_c1z_v1_diff_proto_goTypes = []any{ (*ResourceDiff)(nil), // 0: c1.c1z.v1.ResourceDiff diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/c1z/v1/diff_protoopaque.pb.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/c1z/v1/diff_protoopaque.pb.go new file mode 100644 index 00000000..f0ae6188 --- /dev/null +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/c1z/v1/diff_protoopaque.pb.go @@ -0,0 +1,491 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.10 +// protoc (unknown) +// source: c1/c1z/v1/diff.proto + +//go:build protoopaque + +package v1 + +import ( + v2 "github.com/conductorone/baton-sdk/pb/c1/connector/v2" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type ResourceDiff struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Created *[]*v2.Resource `protobuf:"bytes,1,rep,name=created,proto3"` + xxx_hidden_Deleted *[]*v2.Resource `protobuf:"bytes,2,rep,name=deleted,proto3"` + xxx_hidden_Modified *[]*v2.Resource `protobuf:"bytes,3,rep,name=modified,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ResourceDiff) Reset() { + *x = ResourceDiff{} + mi := &file_c1_c1z_v1_diff_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ResourceDiff) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResourceDiff) ProtoMessage() {} + +func (x *ResourceDiff) ProtoReflect() protoreflect.Message { + mi := &file_c1_c1z_v1_diff_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *ResourceDiff) GetCreated() []*v2.Resource { + if x != nil { + if x.xxx_hidden_Created != nil { + return *x.xxx_hidden_Created + } + } + return nil +} + +func (x *ResourceDiff) GetDeleted() []*v2.Resource { + if x != nil { + if x.xxx_hidden_Deleted != nil { + return *x.xxx_hidden_Deleted + } + } + return nil +} + +func (x *ResourceDiff) GetModified() []*v2.Resource { + if x != nil { + if x.xxx_hidden_Modified != nil { + return *x.xxx_hidden_Modified + } + } + return nil +} + +func (x *ResourceDiff) SetCreated(v []*v2.Resource) { + x.xxx_hidden_Created = &v +} + +func (x *ResourceDiff) SetDeleted(v []*v2.Resource) { + x.xxx_hidden_Deleted = &v +} + +func (x *ResourceDiff) SetModified(v []*v2.Resource) { + x.xxx_hidden_Modified = &v +} + +type ResourceDiff_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Created []*v2.Resource + Deleted []*v2.Resource + Modified []*v2.Resource +} + +func (b0 ResourceDiff_builder) Build() *ResourceDiff { + m0 := &ResourceDiff{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Created = &b.Created + x.xxx_hidden_Deleted = &b.Deleted + x.xxx_hidden_Modified = &b.Modified + return m0 +} + +type EntitlementDiff struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Created *[]*v2.Entitlement `protobuf:"bytes,1,rep,name=created,proto3"` + xxx_hidden_Deleted *[]*v2.Entitlement `protobuf:"bytes,2,rep,name=deleted,proto3"` + xxx_hidden_Modified *[]*v2.Entitlement `protobuf:"bytes,3,rep,name=modified,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *EntitlementDiff) Reset() { + *x = EntitlementDiff{} + mi := &file_c1_c1z_v1_diff_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *EntitlementDiff) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EntitlementDiff) ProtoMessage() {} + +func (x *EntitlementDiff) ProtoReflect() protoreflect.Message { + mi := &file_c1_c1z_v1_diff_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *EntitlementDiff) GetCreated() []*v2.Entitlement { + if x != nil { + if x.xxx_hidden_Created != nil { + return *x.xxx_hidden_Created + } + } + return nil +} + +func (x *EntitlementDiff) GetDeleted() []*v2.Entitlement { + if x != nil { + if x.xxx_hidden_Deleted != nil { + return *x.xxx_hidden_Deleted + } + } + return nil +} + +func (x *EntitlementDiff) GetModified() []*v2.Entitlement { + if x != nil { + if x.xxx_hidden_Modified != nil { + return *x.xxx_hidden_Modified + } + } + return nil +} + +func (x *EntitlementDiff) SetCreated(v []*v2.Entitlement) { + x.xxx_hidden_Created = &v +} + +func (x *EntitlementDiff) SetDeleted(v []*v2.Entitlement) { + x.xxx_hidden_Deleted = &v +} + +func (x *EntitlementDiff) SetModified(v []*v2.Entitlement) { + x.xxx_hidden_Modified = &v +} + +type EntitlementDiff_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Created []*v2.Entitlement + Deleted []*v2.Entitlement + Modified []*v2.Entitlement +} + +func (b0 EntitlementDiff_builder) Build() *EntitlementDiff { + m0 := &EntitlementDiff{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Created = &b.Created + x.xxx_hidden_Deleted = &b.Deleted + x.xxx_hidden_Modified = &b.Modified + return m0 +} + +type GrantDiff struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Created *[]*v2.Grant `protobuf:"bytes,1,rep,name=created,proto3"` + xxx_hidden_Deleted *[]*v2.Grant `protobuf:"bytes,2,rep,name=deleted,proto3"` + xxx_hidden_Modified *[]*v2.Grant `protobuf:"bytes,3,rep,name=modified,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GrantDiff) Reset() { + *x = GrantDiff{} + mi := &file_c1_c1z_v1_diff_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GrantDiff) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GrantDiff) ProtoMessage() {} + +func (x *GrantDiff) ProtoReflect() protoreflect.Message { + mi := &file_c1_c1z_v1_diff_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *GrantDiff) GetCreated() []*v2.Grant { + if x != nil { + if x.xxx_hidden_Created != nil { + return *x.xxx_hidden_Created + } + } + return nil +} + +func (x *GrantDiff) GetDeleted() []*v2.Grant { + if x != nil { + if x.xxx_hidden_Deleted != nil { + return *x.xxx_hidden_Deleted + } + } + return nil +} + +func (x *GrantDiff) GetModified() []*v2.Grant { + if x != nil { + if x.xxx_hidden_Modified != nil { + return *x.xxx_hidden_Modified + } + } + return nil +} + +func (x *GrantDiff) SetCreated(v []*v2.Grant) { + x.xxx_hidden_Created = &v +} + +func (x *GrantDiff) SetDeleted(v []*v2.Grant) { + x.xxx_hidden_Deleted = &v +} + +func (x *GrantDiff) SetModified(v []*v2.Grant) { + x.xxx_hidden_Modified = &v +} + +type GrantDiff_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Created []*v2.Grant + Deleted []*v2.Grant + Modified []*v2.Grant +} + +func (b0 GrantDiff_builder) Build() *GrantDiff { + m0 := &GrantDiff{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Created = &b.Created + x.xxx_hidden_Deleted = &b.Deleted + x.xxx_hidden_Modified = &b.Modified + return m0 +} + +type C1ZDiffOutput struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Resources *ResourceDiff `protobuf:"bytes,1,opt,name=resources,proto3"` + xxx_hidden_Entitlements *EntitlementDiff `protobuf:"bytes,2,opt,name=entitlements,proto3"` + xxx_hidden_Grants *GrantDiff `protobuf:"bytes,3,opt,name=grants,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *C1ZDiffOutput) Reset() { + *x = C1ZDiffOutput{} + mi := &file_c1_c1z_v1_diff_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *C1ZDiffOutput) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*C1ZDiffOutput) ProtoMessage() {} + +func (x *C1ZDiffOutput) ProtoReflect() protoreflect.Message { + mi := &file_c1_c1z_v1_diff_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *C1ZDiffOutput) GetResources() *ResourceDiff { + if x != nil { + return x.xxx_hidden_Resources + } + return nil +} + +func (x *C1ZDiffOutput) GetEntitlements() *EntitlementDiff { + if x != nil { + return x.xxx_hidden_Entitlements + } + return nil +} + +func (x *C1ZDiffOutput) GetGrants() *GrantDiff { + if x != nil { + return x.xxx_hidden_Grants + } + return nil +} + +func (x *C1ZDiffOutput) SetResources(v *ResourceDiff) { + x.xxx_hidden_Resources = v +} + +func (x *C1ZDiffOutput) SetEntitlements(v *EntitlementDiff) { + x.xxx_hidden_Entitlements = v +} + +func (x *C1ZDiffOutput) SetGrants(v *GrantDiff) { + x.xxx_hidden_Grants = v +} + +func (x *C1ZDiffOutput) HasResources() bool { + if x == nil { + return false + } + return x.xxx_hidden_Resources != nil +} + +func (x *C1ZDiffOutput) HasEntitlements() bool { + if x == nil { + return false + } + return x.xxx_hidden_Entitlements != nil +} + +func (x *C1ZDiffOutput) HasGrants() bool { + if x == nil { + return false + } + return x.xxx_hidden_Grants != nil +} + +func (x *C1ZDiffOutput) ClearResources() { + x.xxx_hidden_Resources = nil +} + +func (x *C1ZDiffOutput) ClearEntitlements() { + x.xxx_hidden_Entitlements = nil +} + +func (x *C1ZDiffOutput) ClearGrants() { + x.xxx_hidden_Grants = nil +} + +type C1ZDiffOutput_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Resources *ResourceDiff + Entitlements *EntitlementDiff + Grants *GrantDiff +} + +func (b0 C1ZDiffOutput_builder) Build() *C1ZDiffOutput { + m0 := &C1ZDiffOutput{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Resources = b.Resources + x.xxx_hidden_Entitlements = b.Entitlements + x.xxx_hidden_Grants = b.Grants + return m0 +} + +var File_c1_c1z_v1_diff_proto protoreflect.FileDescriptor + +const file_c1_c1z_v1_diff_proto_rawDesc = "" + + "\n" + + "\x14c1/c1z/v1/diff.proto\x12\tc1.c1z.v1\x1a!c1/connector/v2/entitlement.proto\x1a\x1bc1/connector/v2/grant.proto\x1a\x1ec1/connector/v2/resource.proto\"\xaf\x01\n" + + "\fResourceDiff\x123\n" + + "\acreated\x18\x01 \x03(\v2\x19.c1.connector.v2.ResourceR\acreated\x123\n" + + "\adeleted\x18\x02 \x03(\v2\x19.c1.connector.v2.ResourceR\adeleted\x125\n" + + "\bmodified\x18\x03 \x03(\v2\x19.c1.connector.v2.ResourceR\bmodified\"\xbb\x01\n" + + "\x0fEntitlementDiff\x126\n" + + "\acreated\x18\x01 \x03(\v2\x1c.c1.connector.v2.EntitlementR\acreated\x126\n" + + "\adeleted\x18\x02 \x03(\v2\x1c.c1.connector.v2.EntitlementR\adeleted\x128\n" + + "\bmodified\x18\x03 \x03(\v2\x1c.c1.connector.v2.EntitlementR\bmodified\"\xa3\x01\n" + + "\tGrantDiff\x120\n" + + "\acreated\x18\x01 \x03(\v2\x16.c1.connector.v2.GrantR\acreated\x120\n" + + "\adeleted\x18\x02 \x03(\v2\x16.c1.connector.v2.GrantR\adeleted\x122\n" + + "\bmodified\x18\x03 \x03(\v2\x16.c1.connector.v2.GrantR\bmodified\"\xb4\x01\n" + + "\rC1ZDiffOutput\x125\n" + + "\tresources\x18\x01 \x01(\v2\x17.c1.c1z.v1.ResourceDiffR\tresources\x12>\n" + + "\fentitlements\x18\x02 \x01(\v2\x1a.c1.c1z.v1.EntitlementDiffR\fentitlements\x12,\n" + + "\x06grants\x18\x03 \x01(\v2\x14.c1.c1z.v1.GrantDiffR\x06grantsB0Z.github.com/conductorone/baton-sdk/pb/c1/c1z/v1b\x06proto3" + +var file_c1_c1z_v1_diff_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_c1_c1z_v1_diff_proto_goTypes = []any{ + (*ResourceDiff)(nil), // 0: c1.c1z.v1.ResourceDiff + (*EntitlementDiff)(nil), // 1: c1.c1z.v1.EntitlementDiff + (*GrantDiff)(nil), // 2: c1.c1z.v1.GrantDiff + (*C1ZDiffOutput)(nil), // 3: c1.c1z.v1.C1ZDiffOutput + (*v2.Resource)(nil), // 4: c1.connector.v2.Resource + (*v2.Entitlement)(nil), // 5: c1.connector.v2.Entitlement + (*v2.Grant)(nil), // 6: c1.connector.v2.Grant +} +var file_c1_c1z_v1_diff_proto_depIdxs = []int32{ + 4, // 0: c1.c1z.v1.ResourceDiff.created:type_name -> c1.connector.v2.Resource + 4, // 1: c1.c1z.v1.ResourceDiff.deleted:type_name -> c1.connector.v2.Resource + 4, // 2: c1.c1z.v1.ResourceDiff.modified:type_name -> c1.connector.v2.Resource + 5, // 3: c1.c1z.v1.EntitlementDiff.created:type_name -> c1.connector.v2.Entitlement + 5, // 4: c1.c1z.v1.EntitlementDiff.deleted:type_name -> c1.connector.v2.Entitlement + 5, // 5: c1.c1z.v1.EntitlementDiff.modified:type_name -> c1.connector.v2.Entitlement + 6, // 6: c1.c1z.v1.GrantDiff.created:type_name -> c1.connector.v2.Grant + 6, // 7: c1.c1z.v1.GrantDiff.deleted:type_name -> c1.connector.v2.Grant + 6, // 8: c1.c1z.v1.GrantDiff.modified:type_name -> c1.connector.v2.Grant + 0, // 9: c1.c1z.v1.C1ZDiffOutput.resources:type_name -> c1.c1z.v1.ResourceDiff + 1, // 10: c1.c1z.v1.C1ZDiffOutput.entitlements:type_name -> c1.c1z.v1.EntitlementDiff + 2, // 11: c1.c1z.v1.C1ZDiffOutput.grants:type_name -> c1.c1z.v1.GrantDiff + 12, // [12:12] is the sub-list for method output_type + 12, // [12:12] is the sub-list for method input_type + 12, // [12:12] is the sub-list for extension type_name + 12, // [12:12] is the sub-list for extension extendee + 0, // [0:12] is the sub-list for field type_name +} + +func init() { file_c1_c1z_v1_diff_proto_init() } +func file_c1_c1z_v1_diff_proto_init() { + if File_c1_c1z_v1_diff_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_c1_c1z_v1_diff_proto_rawDesc), len(file_c1_c1z_v1_diff_proto_rawDesc)), + NumEnums: 0, + NumMessages: 4, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_c1_c1z_v1_diff_proto_goTypes, + DependencyIndexes: file_c1_c1z_v1_diff_proto_depIdxs, + MessageInfos: file_c1_c1z_v1_diff_proto_msgTypes, + }.Build() + File_c1_c1z_v1_diff_proto = out.File + file_c1_c1z_v1_diff_proto_goTypes = nil + file_c1_c1z_v1_diff_proto_depIdxs = nil +} diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/config/v1/config.pb.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/config/v1/config.pb.go index ebbf484a..06945eed 100644 --- a/vendor/github.com/conductorone/baton-sdk/pb/c1/config/v1/config.pb.go +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/config/v1/config.pb.go @@ -1,9 +1,11 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.4 +// protoc-gen-go v1.36.10 // protoc (unknown) // source: c1/config/v1/config.proto +//go:build !protoopaque + package v1 import ( @@ -11,7 +13,6 @@ import ( protoimpl "google.golang.org/protobuf/runtime/protoimpl" anypb "google.golang.org/protobuf/types/known/anypb" reflect "reflect" - sync "sync" unsafe "unsafe" ) @@ -72,11 +73,6 @@ func (x ConstraintKind) Number() protoreflect.EnumNumber { return protoreflect.EnumNumber(x) } -// Deprecated: Use ConstraintKind.Descriptor instead. -func (ConstraintKind) EnumDescriptor() ([]byte, []int) { - return file_c1_config_v1_config_proto_rawDescGZIP(), []int{0} -} - type StringFieldType int32 const ( @@ -127,13 +123,8 @@ func (x StringFieldType) Number() protoreflect.EnumNumber { return protoreflect.EnumNumber(x) } -// Deprecated: Use StringFieldType.Descriptor instead. -func (StringFieldType) EnumDescriptor() ([]byte, []int) { - return file_c1_config_v1_config_proto_rawDescGZIP(), []int{1} -} - type Configuration struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Fields []*Field `protobuf:"bytes,1,rep,name=fields,proto3" json:"fields,omitempty"` Constraints []*Constraint `protobuf:"bytes,2,rep,name=constraints,proto3" json:"constraints,omitempty"` DisplayName string `protobuf:"bytes,3,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` @@ -143,6 +134,7 @@ type Configuration struct { CatalogId string `protobuf:"bytes,8,opt,name=catalog_id,json=catalogId,proto3" json:"catalog_id,omitempty"` SupportsExternalResources bool `protobuf:"varint,9,opt,name=supports_external_resources,json=supportsExternalResources,proto3" json:"supports_external_resources,omitempty"` RequiresExternalConnector bool `protobuf:"varint,10,opt,name=requires_external_connector,json=requiresExternalConnector,proto3" json:"requires_external_connector,omitempty"` + FieldGroups []*FieldGroup `protobuf:"bytes,11,rep,name=field_groups,json=fieldGroups,proto3" json:"field_groups,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } @@ -172,11 +164,6 @@ func (x *Configuration) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use Configuration.ProtoReflect.Descriptor instead. -func (*Configuration) Descriptor() ([]byte, []int) { - return file_c1_config_v1_config_proto_rawDescGZIP(), []int{0} -} - func (x *Configuration) GetFields() []*Field { if x != nil { return x.Fields @@ -240,8 +227,87 @@ func (x *Configuration) GetRequiresExternalConnector() bool { return false } +func (x *Configuration) GetFieldGroups() []*FieldGroup { + if x != nil { + return x.FieldGroups + } + return nil +} + +func (x *Configuration) SetFields(v []*Field) { + x.Fields = v +} + +func (x *Configuration) SetConstraints(v []*Constraint) { + x.Constraints = v +} + +func (x *Configuration) SetDisplayName(v string) { + x.DisplayName = v +} + +func (x *Configuration) SetHelpUrl(v string) { + x.HelpUrl = v +} + +func (x *Configuration) SetIconUrl(v string) { + x.IconUrl = v +} + +func (x *Configuration) SetIsDirectory(v bool) { + x.IsDirectory = v +} + +func (x *Configuration) SetCatalogId(v string) { + x.CatalogId = v +} + +func (x *Configuration) SetSupportsExternalResources(v bool) { + x.SupportsExternalResources = v +} + +func (x *Configuration) SetRequiresExternalConnector(v bool) { + x.RequiresExternalConnector = v +} + +func (x *Configuration) SetFieldGroups(v []*FieldGroup) { + x.FieldGroups = v +} + +type Configuration_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Fields []*Field + Constraints []*Constraint + DisplayName string + HelpUrl string + IconUrl string + IsDirectory bool + CatalogId string + SupportsExternalResources bool + RequiresExternalConnector bool + FieldGroups []*FieldGroup +} + +func (b0 Configuration_builder) Build() *Configuration { + m0 := &Configuration{} + b, x := &b0, m0 + _, _ = b, x + x.Fields = b.Fields + x.Constraints = b.Constraints + x.DisplayName = b.DisplayName + x.HelpUrl = b.HelpUrl + x.IconUrl = b.IconUrl + x.IsDirectory = b.IsDirectory + x.CatalogId = b.CatalogId + x.SupportsExternalResources = b.SupportsExternalResources + x.RequiresExternalConnector = b.RequiresExternalConnector + x.FieldGroups = b.FieldGroups + return m0 +} + type Constraint struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Kind ConstraintKind `protobuf:"varint,1,opt,name=kind,proto3,enum=c1.config.v1.ConstraintKind" json:"kind,omitempty"` FieldNames []string `protobuf:"bytes,2,rep,name=field_names,json=fieldNames,proto3" json:"field_names,omitempty"` SecondaryFieldNames []string `protobuf:"bytes,3,rep,name=secondary_field_names,json=secondaryFieldNames,proto3" json:"secondary_field_names,omitempty"` @@ -277,11 +343,6 @@ func (x *Constraint) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use Constraint.ProtoReflect.Descriptor instead. -func (*Constraint) Descriptor() ([]byte, []int) { - return file_c1_config_v1_config_proto_rawDescGZIP(), []int{1} -} - func (x *Constraint) GetKind() ConstraintKind { if x != nil { return x.Kind @@ -324,8 +385,169 @@ func (x *Constraint) GetIsFieldGroup() bool { return false } +func (x *Constraint) SetKind(v ConstraintKind) { + x.Kind = v +} + +func (x *Constraint) SetFieldNames(v []string) { + x.FieldNames = v +} + +func (x *Constraint) SetSecondaryFieldNames(v []string) { + x.SecondaryFieldNames = v +} + +func (x *Constraint) SetName(v string) { + x.Name = v +} + +func (x *Constraint) SetHelpText(v string) { + x.HelpText = v +} + +func (x *Constraint) SetIsFieldGroup(v bool) { + x.IsFieldGroup = v +} + +type Constraint_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Kind ConstraintKind + FieldNames []string + SecondaryFieldNames []string + Name string + HelpText string + IsFieldGroup bool +} + +func (b0 Constraint_builder) Build() *Constraint { + m0 := &Constraint{} + b, x := &b0, m0 + _, _ = b, x + x.Kind = b.Kind + x.FieldNames = b.FieldNames + x.SecondaryFieldNames = b.SecondaryFieldNames + x.Name = b.Name + x.HelpText = b.HelpText + x.IsFieldGroup = b.IsFieldGroup + return m0 +} + +type FieldGroup struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // Unique ID. + DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` + HelpText string `protobuf:"bytes,3,opt,name=help_text,json=helpText,proto3" json:"help_text,omitempty"` + Fields []string `protobuf:"bytes,4,rep,name=fields,proto3" json:"fields,omitempty"` + Default bool `protobuf:"varint,5,opt,name=default,proto3" json:"default,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *FieldGroup) Reset() { + *x = FieldGroup{} + mi := &file_c1_config_v1_config_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *FieldGroup) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FieldGroup) ProtoMessage() {} + +func (x *FieldGroup) ProtoReflect() protoreflect.Message { + mi := &file_c1_config_v1_config_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *FieldGroup) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *FieldGroup) GetDisplayName() string { + if x != nil { + return x.DisplayName + } + return "" +} + +func (x *FieldGroup) GetHelpText() string { + if x != nil { + return x.HelpText + } + return "" +} + +func (x *FieldGroup) GetFields() []string { + if x != nil { + return x.Fields + } + return nil +} + +func (x *FieldGroup) GetDefault() bool { + if x != nil { + return x.Default + } + return false +} + +func (x *FieldGroup) SetName(v string) { + x.Name = v +} + +func (x *FieldGroup) SetDisplayName(v string) { + x.DisplayName = v +} + +func (x *FieldGroup) SetHelpText(v string) { + x.HelpText = v +} + +func (x *FieldGroup) SetFields(v []string) { + x.Fields = v +} + +func (x *FieldGroup) SetDefault(v bool) { + x.Default = v +} + +type FieldGroup_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Name string + DisplayName string + HelpText string + Fields []string + Default bool +} + +func (b0 FieldGroup_builder) Build() *FieldGroup { + m0 := &FieldGroup{} + b, x := &b0, m0 + _, _ = b, x + x.Name = b.Name + x.DisplayName = b.DisplayName + x.HelpText = b.HelpText + x.Fields = b.Fields + x.Default = b.Default + return m0 +} + type Field struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // canonical name, typically in snake DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` @@ -340,6 +562,10 @@ type Field struct { // *Field_BoolField // *Field_StringSliceField // *Field_StringMapField + // *Field_ResourceIdField + // *Field_ResourceIdSliceField + // *Field_ResourceField + // *Field_ResourceSliceField Field isField_Field `protobuf_oneof:"field"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache @@ -347,7 +573,7 @@ type Field struct { func (x *Field) Reset() { *x = Field{} - mi := &file_c1_config_v1_config_proto_msgTypes[2] + mi := &file_c1_config_v1_config_proto_msgTypes[3] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -359,7 +585,7 @@ func (x *Field) String() string { func (*Field) ProtoMessage() {} func (x *Field) ProtoReflect() protoreflect.Message { - mi := &file_c1_config_v1_config_proto_msgTypes[2] + mi := &file_c1_config_v1_config_proto_msgTypes[3] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -370,11 +596,6 @@ func (x *Field) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use Field.ProtoReflect.Descriptor instead. -func (*Field) Descriptor() ([]byte, []int) { - return file_c1_config_v1_config_proto_rawDescGZIP(), []int{2} -} - func (x *Field) GetName() string { if x != nil { return x.Name @@ -476,156 +697,1137 @@ func (x *Field) GetStringMapField() *StringMapField { return nil } -type isField_Field interface { - isField_Field() +func (x *Field) GetResourceIdField() *ResourceIdField { + if x != nil { + if x, ok := x.Field.(*Field_ResourceIdField); ok { + return x.ResourceIdField + } + } + return nil } -type Field_StringField struct { - StringField *StringField `protobuf:"bytes,100,opt,name=string_field,json=stringField,proto3,oneof"` +func (x *Field) GetResourceIdSliceField() *ResourceIdSliceField { + if x != nil { + if x, ok := x.Field.(*Field_ResourceIdSliceField); ok { + return x.ResourceIdSliceField + } + } + return nil } -type Field_IntField struct { - IntField *IntField `protobuf:"bytes,101,opt,name=int_field,json=intField,proto3,oneof"` +func (x *Field) GetResourceField() *ResourceField { + if x != nil { + if x, ok := x.Field.(*Field_ResourceField); ok { + return x.ResourceField + } + } + return nil } -type Field_BoolField struct { - BoolField *BoolField `protobuf:"bytes,102,opt,name=bool_field,json=boolField,proto3,oneof"` +func (x *Field) GetResourceSliceField() *ResourceSliceField { + if x != nil { + if x, ok := x.Field.(*Field_ResourceSliceField); ok { + return x.ResourceSliceField + } + } + return nil } -type Field_StringSliceField struct { - StringSliceField *StringSliceField `protobuf:"bytes,103,opt,name=string_slice_field,json=stringSliceField,proto3,oneof"` +func (x *Field) SetName(v string) { + x.Name = v } -type Field_StringMapField struct { - StringMapField *StringMapField `protobuf:"bytes,104,opt,name=string_map_field,json=stringMapField,proto3,oneof"` +func (x *Field) SetDisplayName(v string) { + x.DisplayName = v } -func (*Field_StringField) isField_Field() {} +func (x *Field) SetDescription(v string) { + x.Description = v +} -func (*Field_IntField) isField_Field() {} +func (x *Field) SetPlaceholder(v string) { + x.Placeholder = v +} -func (*Field_BoolField) isField_Field() {} +func (x *Field) SetIsRequired(v bool) { + x.IsRequired = v +} -func (*Field_StringSliceField) isField_Field() {} +func (x *Field) SetIsOps(v bool) { + x.IsOps = v +} -func (*Field_StringMapField) isField_Field() {} +func (x *Field) SetIsSecret(v bool) { + x.IsSecret = v +} -type IntField struct { - state protoimpl.MessageState `protogen:"open.v1"` - // rules - DefaultValue int64 `protobuf:"varint,1,opt,name=default_value,json=defaultValue,proto3" json:"default_value,omitempty"` - Rules *Int64Rules `protobuf:"bytes,2,opt,name=rules,proto3,oneof" json:"rules,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache +func (x *Field) SetStringField(v *StringField) { + if v == nil { + x.Field = nil + return + } + x.Field = &Field_StringField{v} } -func (x *IntField) Reset() { - *x = IntField{} - mi := &file_c1_config_v1_config_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) +func (x *Field) SetIntField(v *IntField) { + if v == nil { + x.Field = nil + return + } + x.Field = &Field_IntField{v} } -func (x *IntField) String() string { - return protoimpl.X.MessageStringOf(x) +func (x *Field) SetBoolField(v *BoolField) { + if v == nil { + x.Field = nil + return + } + x.Field = &Field_BoolField{v} } -func (*IntField) ProtoMessage() {} +func (x *Field) SetStringSliceField(v *StringSliceField) { + if v == nil { + x.Field = nil + return + } + x.Field = &Field_StringSliceField{v} +} -func (x *IntField) ProtoReflect() protoreflect.Message { - mi := &file_c1_config_v1_config_proto_msgTypes[3] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms +func (x *Field) SetStringMapField(v *StringMapField) { + if v == nil { + x.Field = nil + return } - return mi.MessageOf(x) + x.Field = &Field_StringMapField{v} } -// Deprecated: Use IntField.ProtoReflect.Descriptor instead. -func (*IntField) Descriptor() ([]byte, []int) { - return file_c1_config_v1_config_proto_rawDescGZIP(), []int{3} +func (x *Field) SetResourceIdField(v *ResourceIdField) { + if v == nil { + x.Field = nil + return + } + x.Field = &Field_ResourceIdField{v} } -func (x *IntField) GetDefaultValue() int64 { - if x != nil { - return x.DefaultValue +func (x *Field) SetResourceIdSliceField(v *ResourceIdSliceField) { + if v == nil { + x.Field = nil + return } - return 0 + x.Field = &Field_ResourceIdSliceField{v} } -func (x *IntField) GetRules() *Int64Rules { - if x != nil { - return x.Rules +func (x *Field) SetResourceField(v *ResourceField) { + if v == nil { + x.Field = nil + return } - return nil + x.Field = &Field_ResourceField{v} } -type BoolField struct { - state protoimpl.MessageState `protogen:"open.v1"` - DefaultValue bool `protobuf:"varint,1,opt,name=default_value,json=defaultValue,proto3" json:"default_value,omitempty"` - Rules *BoolRules `protobuf:"bytes,2,opt,name=rules,proto3,oneof" json:"rules,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache +func (x *Field) SetResourceSliceField(v *ResourceSliceField) { + if v == nil { + x.Field = nil + return + } + x.Field = &Field_ResourceSliceField{v} } -func (x *BoolField) Reset() { - *x = BoolField{} - mi := &file_c1_config_v1_config_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) +func (x *Field) HasField() bool { + if x == nil { + return false + } + return x.Field != nil } -func (x *BoolField) String() string { - return protoimpl.X.MessageStringOf(x) +func (x *Field) HasStringField() bool { + if x == nil { + return false + } + _, ok := x.Field.(*Field_StringField) + return ok } -func (*BoolField) ProtoMessage() {} +func (x *Field) HasIntField() bool { + if x == nil { + return false + } + _, ok := x.Field.(*Field_IntField) + return ok +} -func (x *BoolField) ProtoReflect() protoreflect.Message { - mi := &file_c1_config_v1_config_proto_msgTypes[4] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms +func (x *Field) HasBoolField() bool { + if x == nil { + return false } - return mi.MessageOf(x) + _, ok := x.Field.(*Field_BoolField) + return ok } -// Deprecated: Use BoolField.ProtoReflect.Descriptor instead. -func (*BoolField) Descriptor() ([]byte, []int) { - return file_c1_config_v1_config_proto_rawDescGZIP(), []int{4} +func (x *Field) HasStringSliceField() bool { + if x == nil { + return false + } + _, ok := x.Field.(*Field_StringSliceField) + return ok } -func (x *BoolField) GetDefaultValue() bool { - if x != nil { - return x.DefaultValue +func (x *Field) HasStringMapField() bool { + if x == nil { + return false } - return false + _, ok := x.Field.(*Field_StringMapField) + return ok } -func (x *BoolField) GetRules() *BoolRules { - if x != nil { - return x.Rules +func (x *Field) HasResourceIdField() bool { + if x == nil { + return false } - return nil + _, ok := x.Field.(*Field_ResourceIdField) + return ok } -type StringSliceField struct { - state protoimpl.MessageState `protogen:"open.v1"` - DefaultValue []string `protobuf:"bytes,1,rep,name=default_value,json=defaultValue,proto3" json:"default_value,omitempty"` - Rules *RepeatedStringRules `protobuf:"bytes,2,opt,name=rules,proto3,oneof" json:"rules,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache +func (x *Field) HasResourceIdSliceField() bool { + if x == nil { + return false + } + _, ok := x.Field.(*Field_ResourceIdSliceField) + return ok +} + +func (x *Field) HasResourceField() bool { + if x == nil { + return false + } + _, ok := x.Field.(*Field_ResourceField) + return ok +} + +func (x *Field) HasResourceSliceField() bool { + if x == nil { + return false + } + _, ok := x.Field.(*Field_ResourceSliceField) + return ok +} + +func (x *Field) ClearField() { + x.Field = nil +} + +func (x *Field) ClearStringField() { + if _, ok := x.Field.(*Field_StringField); ok { + x.Field = nil + } +} + +func (x *Field) ClearIntField() { + if _, ok := x.Field.(*Field_IntField); ok { + x.Field = nil + } +} + +func (x *Field) ClearBoolField() { + if _, ok := x.Field.(*Field_BoolField); ok { + x.Field = nil + } +} + +func (x *Field) ClearStringSliceField() { + if _, ok := x.Field.(*Field_StringSliceField); ok { + x.Field = nil + } +} + +func (x *Field) ClearStringMapField() { + if _, ok := x.Field.(*Field_StringMapField); ok { + x.Field = nil + } +} + +func (x *Field) ClearResourceIdField() { + if _, ok := x.Field.(*Field_ResourceIdField); ok { + x.Field = nil + } +} + +func (x *Field) ClearResourceIdSliceField() { + if _, ok := x.Field.(*Field_ResourceIdSliceField); ok { + x.Field = nil + } +} + +func (x *Field) ClearResourceField() { + if _, ok := x.Field.(*Field_ResourceField); ok { + x.Field = nil + } +} + +func (x *Field) ClearResourceSliceField() { + if _, ok := x.Field.(*Field_ResourceSliceField); ok { + x.Field = nil + } +} + +const Field_Field_not_set_case case_Field_Field = 0 +const Field_StringField_case case_Field_Field = 100 +const Field_IntField_case case_Field_Field = 101 +const Field_BoolField_case case_Field_Field = 102 +const Field_StringSliceField_case case_Field_Field = 103 +const Field_StringMapField_case case_Field_Field = 104 +const Field_ResourceIdField_case case_Field_Field = 105 +const Field_ResourceIdSliceField_case case_Field_Field = 106 +const Field_ResourceField_case case_Field_Field = 107 +const Field_ResourceSliceField_case case_Field_Field = 108 + +func (x *Field) WhichField() case_Field_Field { + if x == nil { + return Field_Field_not_set_case + } + switch x.Field.(type) { + case *Field_StringField: + return Field_StringField_case + case *Field_IntField: + return Field_IntField_case + case *Field_BoolField: + return Field_BoolField_case + case *Field_StringSliceField: + return Field_StringSliceField_case + case *Field_StringMapField: + return Field_StringMapField_case + case *Field_ResourceIdField: + return Field_ResourceIdField_case + case *Field_ResourceIdSliceField: + return Field_ResourceIdSliceField_case + case *Field_ResourceField: + return Field_ResourceField_case + case *Field_ResourceSliceField: + return Field_ResourceSliceField_case + default: + return Field_Field_not_set_case + } +} + +type Field_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Name string + DisplayName string + Description string + Placeholder string + IsRequired bool + IsOps bool + IsSecret bool + // Fields of oneof Field: + StringField *StringField + IntField *IntField + BoolField *BoolField + StringSliceField *StringSliceField + StringMapField *StringMapField + ResourceIdField *ResourceIdField + ResourceIdSliceField *ResourceIdSliceField + // These are meant to serve as return types for actions. + ResourceField *ResourceField + ResourceSliceField *ResourceSliceField + // -- end of Field +} + +func (b0 Field_builder) Build() *Field { + m0 := &Field{} + b, x := &b0, m0 + _, _ = b, x + x.Name = b.Name + x.DisplayName = b.DisplayName + x.Description = b.Description + x.Placeholder = b.Placeholder + x.IsRequired = b.IsRequired + x.IsOps = b.IsOps + x.IsSecret = b.IsSecret + if b.StringField != nil { + x.Field = &Field_StringField{b.StringField} + } + if b.IntField != nil { + x.Field = &Field_IntField{b.IntField} + } + if b.BoolField != nil { + x.Field = &Field_BoolField{b.BoolField} + } + if b.StringSliceField != nil { + x.Field = &Field_StringSliceField{b.StringSliceField} + } + if b.StringMapField != nil { + x.Field = &Field_StringMapField{b.StringMapField} + } + if b.ResourceIdField != nil { + x.Field = &Field_ResourceIdField{b.ResourceIdField} + } + if b.ResourceIdSliceField != nil { + x.Field = &Field_ResourceIdSliceField{b.ResourceIdSliceField} + } + if b.ResourceField != nil { + x.Field = &Field_ResourceField{b.ResourceField} + } + if b.ResourceSliceField != nil { + x.Field = &Field_ResourceSliceField{b.ResourceSliceField} + } + return m0 +} + +type case_Field_Field protoreflect.FieldNumber + +func (x case_Field_Field) String() string { + md := file_c1_config_v1_config_proto_msgTypes[3].Descriptor() + if x == 0 { + return "not set" + } + return protoimpl.X.MessageFieldStringOf(md, protoreflect.FieldNumber(x)) +} + +type isField_Field interface { + isField_Field() +} + +type Field_StringField struct { + StringField *StringField `protobuf:"bytes,100,opt,name=string_field,json=stringField,proto3,oneof"` +} + +type Field_IntField struct { + IntField *IntField `protobuf:"bytes,101,opt,name=int_field,json=intField,proto3,oneof"` +} + +type Field_BoolField struct { + BoolField *BoolField `protobuf:"bytes,102,opt,name=bool_field,json=boolField,proto3,oneof"` +} + +type Field_StringSliceField struct { + StringSliceField *StringSliceField `protobuf:"bytes,103,opt,name=string_slice_field,json=stringSliceField,proto3,oneof"` +} + +type Field_StringMapField struct { + StringMapField *StringMapField `protobuf:"bytes,104,opt,name=string_map_field,json=stringMapField,proto3,oneof"` +} + +type Field_ResourceIdField struct { + ResourceIdField *ResourceIdField `protobuf:"bytes,105,opt,name=resource_id_field,json=resourceIdField,proto3,oneof"` +} + +type Field_ResourceIdSliceField struct { + ResourceIdSliceField *ResourceIdSliceField `protobuf:"bytes,106,opt,name=resource_id_slice_field,json=resourceIdSliceField,proto3,oneof"` +} + +type Field_ResourceField struct { + // These are meant to serve as return types for actions. + ResourceField *ResourceField `protobuf:"bytes,107,opt,name=resource_field,json=resourceField,proto3,oneof"` +} + +type Field_ResourceSliceField struct { + ResourceSliceField *ResourceSliceField `protobuf:"bytes,108,opt,name=resource_slice_field,json=resourceSliceField,proto3,oneof"` +} + +func (*Field_StringField) isField_Field() {} + +func (*Field_IntField) isField_Field() {} + +func (*Field_BoolField) isField_Field() {} + +func (*Field_StringSliceField) isField_Field() {} + +func (*Field_StringMapField) isField_Field() {} + +func (*Field_ResourceIdField) isField_Field() {} + +func (*Field_ResourceIdSliceField) isField_Field() {} + +func (*Field_ResourceField) isField_Field() {} + +func (*Field_ResourceSliceField) isField_Field() {} + +// These are partially duplicate with the Resource proto in the connector package. +// This is to avoid import cycles +type Resource struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + ResourceId *ResourceId `protobuf:"bytes,1,opt,name=resource_id,json=resourceId,proto3" json:"resource_id,omitempty"` + ParentResourceId *ResourceId `protobuf:"bytes,2,opt,name=parent_resource_id,json=parentResourceId,proto3" json:"parent_resource_id,omitempty"` + DisplayName string `protobuf:"bytes,3,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` + Description string `protobuf:"bytes,4,opt,name=description,proto3" json:"description,omitempty"` + Annotations []*anypb.Any `protobuf:"bytes,5,rep,name=annotations,proto3" json:"annotations,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Resource) Reset() { + *x = Resource{} + mi := &file_c1_config_v1_config_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Resource) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Resource) ProtoMessage() {} + +func (x *Resource) ProtoReflect() protoreflect.Message { + mi := &file_c1_config_v1_config_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *Resource) GetResourceId() *ResourceId { + if x != nil { + return x.ResourceId + } + return nil +} + +func (x *Resource) GetParentResourceId() *ResourceId { + if x != nil { + return x.ParentResourceId + } + return nil +} + +func (x *Resource) GetDisplayName() string { + if x != nil { + return x.DisplayName + } + return "" +} + +func (x *Resource) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *Resource) GetAnnotations() []*anypb.Any { + if x != nil { + return x.Annotations + } + return nil +} + +func (x *Resource) SetResourceId(v *ResourceId) { + x.ResourceId = v +} + +func (x *Resource) SetParentResourceId(v *ResourceId) { + x.ParentResourceId = v +} + +func (x *Resource) SetDisplayName(v string) { + x.DisplayName = v +} + +func (x *Resource) SetDescription(v string) { + x.Description = v +} + +func (x *Resource) SetAnnotations(v []*anypb.Any) { + x.Annotations = v +} + +func (x *Resource) HasResourceId() bool { + if x == nil { + return false + } + return x.ResourceId != nil +} + +func (x *Resource) HasParentResourceId() bool { + if x == nil { + return false + } + return x.ParentResourceId != nil +} + +func (x *Resource) ClearResourceId() { + x.ResourceId = nil +} + +func (x *Resource) ClearParentResourceId() { + x.ParentResourceId = nil +} + +type Resource_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + ResourceId *ResourceId + ParentResourceId *ResourceId + DisplayName string + Description string + Annotations []*anypb.Any +} + +func (b0 Resource_builder) Build() *Resource { + m0 := &Resource{} + b, x := &b0, m0 + _, _ = b, x + x.ResourceId = b.ResourceId + x.ParentResourceId = b.ParentResourceId + x.DisplayName = b.DisplayName + x.Description = b.Description + x.Annotations = b.Annotations + return m0 +} + +type ResourceId struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + ResourceTypeId string `protobuf:"bytes,1,opt,name=resource_type_id,json=resourceTypeId,proto3" json:"resource_type_id,omitempty"` + ResourceId string `protobuf:"bytes,2,opt,name=resource_id,json=resourceId,proto3" json:"resource_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ResourceId) Reset() { + *x = ResourceId{} + mi := &file_c1_config_v1_config_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ResourceId) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResourceId) ProtoMessage() {} + +func (x *ResourceId) ProtoReflect() protoreflect.Message { + mi := &file_c1_config_v1_config_proto_msgTypes[5] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *ResourceId) GetResourceTypeId() string { + if x != nil { + return x.ResourceTypeId + } + return "" +} + +func (x *ResourceId) GetResourceId() string { + if x != nil { + return x.ResourceId + } + return "" +} + +func (x *ResourceId) SetResourceTypeId(v string) { + x.ResourceTypeId = v +} + +func (x *ResourceId) SetResourceId(v string) { + x.ResourceId = v +} + +type ResourceId_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + ResourceTypeId string + ResourceId string +} + +func (b0 ResourceId_builder) Build() *ResourceId { + m0 := &ResourceId{} + b, x := &b0, m0 + _, _ = b, x + x.ResourceTypeId = b.ResourceTypeId + x.ResourceId = b.ResourceId + return m0 +} + +type ResourceField struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + DefaultValue *Resource `protobuf:"bytes,1,opt,name=default_value,json=defaultValue,proto3" json:"default_value,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ResourceField) Reset() { + *x = ResourceField{} + mi := &file_c1_config_v1_config_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ResourceField) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResourceField) ProtoMessage() {} + +func (x *ResourceField) ProtoReflect() protoreflect.Message { + mi := &file_c1_config_v1_config_proto_msgTypes[6] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *ResourceField) GetDefaultValue() *Resource { + if x != nil { + return x.DefaultValue + } + return nil +} + +func (x *ResourceField) SetDefaultValue(v *Resource) { + x.DefaultValue = v +} + +func (x *ResourceField) HasDefaultValue() bool { + if x == nil { + return false + } + return x.DefaultValue != nil +} + +func (x *ResourceField) ClearDefaultValue() { + x.DefaultValue = nil +} + +type ResourceField_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + DefaultValue *Resource +} + +func (b0 ResourceField_builder) Build() *ResourceField { + m0 := &ResourceField{} + b, x := &b0, m0 + _, _ = b, x + x.DefaultValue = b.DefaultValue + return m0 +} + +type ResourceSliceField struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + DefaultValue []*Resource `protobuf:"bytes,1,rep,name=default_value,json=defaultValue,proto3" json:"default_value,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ResourceSliceField) Reset() { + *x = ResourceSliceField{} + mi := &file_c1_config_v1_config_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ResourceSliceField) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResourceSliceField) ProtoMessage() {} + +func (x *ResourceSliceField) ProtoReflect() protoreflect.Message { + mi := &file_c1_config_v1_config_proto_msgTypes[7] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *ResourceSliceField) GetDefaultValue() []*Resource { + if x != nil { + return x.DefaultValue + } + return nil +} + +func (x *ResourceSliceField) SetDefaultValue(v []*Resource) { + x.DefaultValue = v +} + +type ResourceSliceField_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + DefaultValue []*Resource +} + +func (b0 ResourceSliceField_builder) Build() *ResourceSliceField { + m0 := &ResourceSliceField{} + b, x := &b0, m0 + _, _ = b, x + x.DefaultValue = b.DefaultValue + return m0 +} + +type ResourceIdField struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + DefaultValue *ResourceId `protobuf:"bytes,1,opt,name=default_value,json=defaultValue,proto3" json:"default_value,omitempty"` + Rules *ResourceIDRules `protobuf:"bytes,3,opt,name=rules,proto3,oneof" json:"rules,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ResourceIdField) Reset() { + *x = ResourceIdField{} + mi := &file_c1_config_v1_config_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ResourceIdField) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResourceIdField) ProtoMessage() {} + +func (x *ResourceIdField) ProtoReflect() protoreflect.Message { + mi := &file_c1_config_v1_config_proto_msgTypes[8] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *ResourceIdField) GetDefaultValue() *ResourceId { + if x != nil { + return x.DefaultValue + } + return nil +} + +func (x *ResourceIdField) GetRules() *ResourceIDRules { + if x != nil { + return x.Rules + } + return nil +} + +func (x *ResourceIdField) SetDefaultValue(v *ResourceId) { + x.DefaultValue = v +} + +func (x *ResourceIdField) SetRules(v *ResourceIDRules) { + x.Rules = v +} + +func (x *ResourceIdField) HasDefaultValue() bool { + if x == nil { + return false + } + return x.DefaultValue != nil +} + +func (x *ResourceIdField) HasRules() bool { + if x == nil { + return false + } + return x.Rules != nil +} + +func (x *ResourceIdField) ClearDefaultValue() { + x.DefaultValue = nil +} + +func (x *ResourceIdField) ClearRules() { + x.Rules = nil +} + +type ResourceIdField_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + DefaultValue *ResourceId + Rules *ResourceIDRules +} + +func (b0 ResourceIdField_builder) Build() *ResourceIdField { + m0 := &ResourceIdField{} + b, x := &b0, m0 + _, _ = b, x + x.DefaultValue = b.DefaultValue + x.Rules = b.Rules + return m0 +} + +type ResourceIdSliceField struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + DefaultValue []*ResourceIdField `protobuf:"bytes,1,rep,name=default_value,json=defaultValue,proto3" json:"default_value,omitempty"` + Rules *RepeatedResourceIdRules `protobuf:"bytes,2,opt,name=rules,proto3,oneof" json:"rules,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ResourceIdSliceField) Reset() { + *x = ResourceIdSliceField{} + mi := &file_c1_config_v1_config_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ResourceIdSliceField) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResourceIdSliceField) ProtoMessage() {} + +func (x *ResourceIdSliceField) ProtoReflect() protoreflect.Message { + mi := &file_c1_config_v1_config_proto_msgTypes[9] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *ResourceIdSliceField) GetDefaultValue() []*ResourceIdField { + if x != nil { + return x.DefaultValue + } + return nil +} + +func (x *ResourceIdSliceField) GetRules() *RepeatedResourceIdRules { + if x != nil { + return x.Rules + } + return nil +} + +func (x *ResourceIdSliceField) SetDefaultValue(v []*ResourceIdField) { + x.DefaultValue = v +} + +func (x *ResourceIdSliceField) SetRules(v *RepeatedResourceIdRules) { + x.Rules = v +} + +func (x *ResourceIdSliceField) HasRules() bool { + if x == nil { + return false + } + return x.Rules != nil +} + +func (x *ResourceIdSliceField) ClearRules() { + x.Rules = nil +} + +type ResourceIdSliceField_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + DefaultValue []*ResourceIdField + Rules *RepeatedResourceIdRules +} + +func (b0 ResourceIdSliceField_builder) Build() *ResourceIdSliceField { + m0 := &ResourceIdSliceField{} + b, x := &b0, m0 + _, _ = b, x + x.DefaultValue = b.DefaultValue + x.Rules = b.Rules + return m0 +} + +type IntField struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + // rules + DefaultValue int64 `protobuf:"varint,1,opt,name=default_value,json=defaultValue,proto3" json:"default_value,omitempty"` + Rules *Int64Rules `protobuf:"bytes,2,opt,name=rules,proto3,oneof" json:"rules,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *IntField) Reset() { + *x = IntField{} + mi := &file_c1_config_v1_config_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *IntField) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*IntField) ProtoMessage() {} + +func (x *IntField) ProtoReflect() protoreflect.Message { + mi := &file_c1_config_v1_config_proto_msgTypes[10] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *IntField) GetDefaultValue() int64 { + if x != nil { + return x.DefaultValue + } + return 0 +} + +func (x *IntField) GetRules() *Int64Rules { + if x != nil { + return x.Rules + } + return nil +} + +func (x *IntField) SetDefaultValue(v int64) { + x.DefaultValue = v +} + +func (x *IntField) SetRules(v *Int64Rules) { + x.Rules = v +} + +func (x *IntField) HasRules() bool { + if x == nil { + return false + } + return x.Rules != nil +} + +func (x *IntField) ClearRules() { + x.Rules = nil +} + +type IntField_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // rules + DefaultValue int64 + Rules *Int64Rules +} + +func (b0 IntField_builder) Build() *IntField { + m0 := &IntField{} + b, x := &b0, m0 + _, _ = b, x + x.DefaultValue = b.DefaultValue + x.Rules = b.Rules + return m0 +} + +type BoolField struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + DefaultValue bool `protobuf:"varint,1,opt,name=default_value,json=defaultValue,proto3" json:"default_value,omitempty"` + Rules *BoolRules `protobuf:"bytes,2,opt,name=rules,proto3,oneof" json:"rules,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *BoolField) Reset() { + *x = BoolField{} + mi := &file_c1_config_v1_config_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *BoolField) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BoolField) ProtoMessage() {} + +func (x *BoolField) ProtoReflect() protoreflect.Message { + mi := &file_c1_config_v1_config_proto_msgTypes[11] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *BoolField) GetDefaultValue() bool { + if x != nil { + return x.DefaultValue + } + return false +} + +func (x *BoolField) GetRules() *BoolRules { + if x != nil { + return x.Rules + } + return nil +} + +func (x *BoolField) SetDefaultValue(v bool) { + x.DefaultValue = v +} + +func (x *BoolField) SetRules(v *BoolRules) { + x.Rules = v +} + +func (x *BoolField) HasRules() bool { + if x == nil { + return false + } + return x.Rules != nil +} + +func (x *BoolField) ClearRules() { + x.Rules = nil +} + +type BoolField_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + DefaultValue bool + Rules *BoolRules +} + +func (b0 BoolField_builder) Build() *BoolField { + m0 := &BoolField{} + b, x := &b0, m0 + _, _ = b, x + x.DefaultValue = b.DefaultValue + x.Rules = b.Rules + return m0 +} + +type StringSliceField struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + DefaultValue []string `protobuf:"bytes,1,rep,name=default_value,json=defaultValue,proto3" json:"default_value,omitempty"` + Rules *RepeatedStringRules `protobuf:"bytes,2,opt,name=rules,proto3,oneof" json:"rules,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *StringSliceField) Reset() { *x = StringSliceField{} - mi := &file_c1_config_v1_config_proto_msgTypes[5] + mi := &file_c1_config_v1_config_proto_msgTypes[12] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -637,7 +1839,7 @@ func (x *StringSliceField) String() string { func (*StringSliceField) ProtoMessage() {} func (x *StringSliceField) ProtoReflect() protoreflect.Message { - mi := &file_c1_config_v1_config_proto_msgTypes[5] + mi := &file_c1_config_v1_config_proto_msgTypes[12] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -648,11 +1850,6 @@ func (x *StringSliceField) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use StringSliceField.ProtoReflect.Descriptor instead. -func (*StringSliceField) Descriptor() ([]byte, []int) { - return file_c1_config_v1_config_proto_rawDescGZIP(), []int{5} -} - func (x *StringSliceField) GetDefaultValue() []string { if x != nil { return x.DefaultValue @@ -667,8 +1864,43 @@ func (x *StringSliceField) GetRules() *RepeatedStringRules { return nil } +func (x *StringSliceField) SetDefaultValue(v []string) { + x.DefaultValue = v +} + +func (x *StringSliceField) SetRules(v *RepeatedStringRules) { + x.Rules = v +} + +func (x *StringSliceField) HasRules() bool { + if x == nil { + return false + } + return x.Rules != nil +} + +func (x *StringSliceField) ClearRules() { + x.Rules = nil +} + +type StringSliceField_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + DefaultValue []string + Rules *RepeatedStringRules +} + +func (b0 StringSliceField_builder) Build() *StringSliceField { + m0 := &StringSliceField{} + b, x := &b0, m0 + _, _ = b, x + x.DefaultValue = b.DefaultValue + x.Rules = b.Rules + return m0 +} + type StringMapField struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` DefaultValue map[string]*anypb.Any `protobuf:"bytes,1,rep,name=default_value,json=defaultValue,proto3" json:"default_value,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` Rules *StringMapRules `protobuf:"bytes,2,opt,name=rules,proto3,oneof" json:"rules,omitempty"` unknownFields protoimpl.UnknownFields @@ -677,7 +1909,7 @@ type StringMapField struct { func (x *StringMapField) Reset() { *x = StringMapField{} - mi := &file_c1_config_v1_config_proto_msgTypes[6] + mi := &file_c1_config_v1_config_proto_msgTypes[13] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -689,7 +1921,7 @@ func (x *StringMapField) String() string { func (*StringMapField) ProtoMessage() {} func (x *StringMapField) ProtoReflect() protoreflect.Message { - mi := &file_c1_config_v1_config_proto_msgTypes[6] + mi := &file_c1_config_v1_config_proto_msgTypes[13] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -700,11 +1932,6 @@ func (x *StringMapField) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use StringMapField.ProtoReflect.Descriptor instead. -func (*StringMapField) Descriptor() ([]byte, []int) { - return file_c1_config_v1_config_proto_rawDescGZIP(), []int{6} -} - func (x *StringMapField) GetDefaultValue() map[string]*anypb.Any { if x != nil { return x.DefaultValue @@ -719,8 +1946,43 @@ func (x *StringMapField) GetRules() *StringMapRules { return nil } +func (x *StringMapField) SetDefaultValue(v map[string]*anypb.Any) { + x.DefaultValue = v +} + +func (x *StringMapField) SetRules(v *StringMapRules) { + x.Rules = v +} + +func (x *StringMapField) HasRules() bool { + if x == nil { + return false + } + return x.Rules != nil +} + +func (x *StringMapField) ClearRules() { + x.Rules = nil +} + +type StringMapField_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + DefaultValue map[string]*anypb.Any + Rules *StringMapRules +} + +func (b0 StringMapField_builder) Build() *StringMapField { + m0 := &StringMapField{} + b, x := &b0, m0 + _, _ = b, x + x.DefaultValue = b.DefaultValue + x.Rules = b.Rules + return m0 +} + type StringFieldOption struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` DisplayName string `protobuf:"bytes,3,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` @@ -730,7 +1992,7 @@ type StringFieldOption struct { func (x *StringFieldOption) Reset() { *x = StringFieldOption{} - mi := &file_c1_config_v1_config_proto_msgTypes[7] + mi := &file_c1_config_v1_config_proto_msgTypes[14] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -742,7 +2004,7 @@ func (x *StringFieldOption) String() string { func (*StringFieldOption) ProtoMessage() {} func (x *StringFieldOption) ProtoReflect() protoreflect.Message { - mi := &file_c1_config_v1_config_proto_msgTypes[7] + mi := &file_c1_config_v1_config_proto_msgTypes[14] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -753,11 +2015,6 @@ func (x *StringFieldOption) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use StringFieldOption.ProtoReflect.Descriptor instead. -func (*StringFieldOption) Descriptor() ([]byte, []int) { - return file_c1_config_v1_config_proto_rawDescGZIP(), []int{7} -} - func (x *StringFieldOption) GetName() string { if x != nil { return x.Name @@ -779,8 +2036,38 @@ func (x *StringFieldOption) GetDisplayName() string { return "" } +func (x *StringFieldOption) SetName(v string) { + x.Name = v +} + +func (x *StringFieldOption) SetValue(v string) { + x.Value = v +} + +func (x *StringFieldOption) SetDisplayName(v string) { + x.DisplayName = v +} + +type StringFieldOption_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Name string + Value string + DisplayName string +} + +func (b0 StringFieldOption_builder) Build() *StringFieldOption { + m0 := &StringFieldOption{} + b, x := &b0, m0 + _, _ = b, x + x.Name = b.Name + x.Value = b.Value + x.DisplayName = b.DisplayName + return m0 +} + type StringField struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` DefaultValue string `protobuf:"bytes,1,opt,name=default_value,json=defaultValue,proto3" json:"default_value,omitempty"` Rules *StringRules `protobuf:"bytes,2,opt,name=rules,proto3,oneof" json:"rules,omitempty"` Type StringFieldType `protobuf:"varint,3,opt,name=type,proto3,enum=c1.config.v1.StringFieldType" json:"type,omitempty"` @@ -793,7 +2080,7 @@ type StringField struct { func (x *StringField) Reset() { *x = StringField{} - mi := &file_c1_config_v1_config_proto_msgTypes[8] + mi := &file_c1_config_v1_config_proto_msgTypes[15] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -805,7 +2092,7 @@ func (x *StringField) String() string { func (*StringField) ProtoMessage() {} func (x *StringField) ProtoReflect() protoreflect.Message { - mi := &file_c1_config_v1_config_proto_msgTypes[8] + mi := &file_c1_config_v1_config_proto_msgTypes[15] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -816,11 +2103,6 @@ func (x *StringField) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use StringField.ProtoReflect.Descriptor instead. -func (*StringField) Descriptor() ([]byte, []int) { - return file_c1_config_v1_config_proto_rawDescGZIP(), []int{8} -} - func (x *StringField) GetDefaultValue() string { if x != nil { return x.DefaultValue @@ -856,240 +2138,249 @@ func (x *StringField) GetOptions() []*StringFieldOption { return nil } -var File_c1_config_v1_config_proto protoreflect.FileDescriptor +func (x *StringField) SetDefaultValue(v string) { + x.DefaultValue = v +} -var file_c1_config_v1_config_proto_rawDesc = string([]byte{ - 0x0a, 0x19, 0x63, 0x31, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x76, 0x31, 0x2f, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0c, 0x63, 0x31, 0x2e, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x76, 0x31, 0x1a, 0x18, 0x63, 0x31, 0x2f, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x93, - 0x03, 0x0a, 0x0d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x2b, 0x0a, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x13, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x76, 0x31, 0x2e, - 0x46, 0x69, 0x65, 0x6c, 0x64, 0x52, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x12, 0x3a, 0x0a, - 0x0b, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x72, 0x61, 0x69, 0x6e, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x76, - 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x72, 0x61, 0x69, 0x6e, 0x74, 0x52, 0x0b, 0x63, 0x6f, - 0x6e, 0x73, 0x74, 0x72, 0x61, 0x69, 0x6e, 0x74, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, - 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x19, 0x0a, 0x08, - 0x68, 0x65, 0x6c, 0x70, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, - 0x68, 0x65, 0x6c, 0x70, 0x55, 0x72, 0x6c, 0x12, 0x19, 0x0a, 0x08, 0x69, 0x63, 0x6f, 0x6e, 0x5f, - 0x75, 0x72, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x69, 0x63, 0x6f, 0x6e, 0x55, - 0x72, 0x6c, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, - 0x72, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x73, 0x44, 0x69, 0x72, 0x65, - 0x63, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, - 0x5f, 0x69, 0x64, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x61, 0x74, 0x61, 0x6c, - 0x6f, 0x67, 0x49, 0x64, 0x12, 0x3e, 0x0a, 0x1b, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x73, - 0x5f, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x73, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x19, 0x73, 0x75, 0x70, 0x70, 0x6f, - 0x72, 0x74, 0x73, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x73, 0x12, 0x3e, 0x0a, 0x1b, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x73, - 0x5f, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, - 0x74, 0x6f, 0x72, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x19, 0x72, 0x65, 0x71, 0x75, 0x69, - 0x72, 0x65, 0x73, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x43, 0x6f, 0x6e, 0x6e, 0x65, - 0x63, 0x74, 0x6f, 0x72, 0x22, 0xea, 0x01, 0x0a, 0x0a, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x72, 0x61, - 0x69, 0x6e, 0x74, 0x12, 0x30, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0e, 0x32, 0x1c, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x76, 0x31, - 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x72, 0x61, 0x69, 0x6e, 0x74, 0x4b, 0x69, 0x6e, 0x64, 0x52, - 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6e, - 0x61, 0x6d, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x66, 0x69, 0x65, 0x6c, - 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x32, 0x0a, 0x15, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, - 0x61, 0x72, 0x79, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, - 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x13, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x61, 0x72, 0x79, - 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1b, - 0x0a, 0x09, 0x68, 0x65, 0x6c, 0x70, 0x5f, 0x74, 0x65, 0x78, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x08, 0x68, 0x65, 0x6c, 0x70, 0x54, 0x65, 0x78, 0x74, 0x12, 0x24, 0x0a, 0x0e, 0x69, - 0x73, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x18, 0x06, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x0c, 0x69, 0x73, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x47, 0x72, 0x6f, 0x75, - 0x70, 0x22, 0xab, 0x04, 0x0a, 0x05, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, - 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e, 0x61, - 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x68, 0x6f, 0x6c, - 0x64, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x70, 0x6c, 0x61, 0x63, 0x65, - 0x68, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x12, 0x1f, 0x0a, 0x0b, 0x69, 0x73, 0x5f, 0x72, 0x65, 0x71, - 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x69, 0x73, 0x52, - 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x12, 0x15, 0x0a, 0x06, 0x69, 0x73, 0x5f, 0x6f, 0x70, - 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x69, 0x73, 0x4f, 0x70, 0x73, 0x12, 0x1b, - 0x0a, 0x09, 0x69, 0x73, 0x5f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x08, 0x69, 0x73, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x12, 0x3e, 0x0a, 0x0c, 0x73, - 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x64, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x19, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x76, 0x31, - 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x48, 0x00, 0x52, 0x0b, - 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x35, 0x0a, 0x09, 0x69, - 0x6e, 0x74, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x65, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, - 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, - 0x74, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x48, 0x00, 0x52, 0x08, 0x69, 0x6e, 0x74, 0x46, 0x69, 0x65, - 0x6c, 0x64, 0x12, 0x38, 0x0a, 0x0a, 0x62, 0x6f, 0x6f, 0x6c, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, - 0x18, 0x66, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x48, - 0x00, 0x52, 0x09, 0x62, 0x6f, 0x6f, 0x6c, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x4e, 0x0a, 0x12, - 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x6c, 0x69, 0x63, 0x65, 0x5f, 0x66, 0x69, 0x65, - 0x6c, 0x64, 0x18, 0x67, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x53, 0x6c, - 0x69, 0x63, 0x65, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x48, 0x00, 0x52, 0x10, 0x73, 0x74, 0x72, 0x69, - 0x6e, 0x67, 0x53, 0x6c, 0x69, 0x63, 0x65, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x48, 0x0a, 0x10, - 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x6d, 0x61, 0x70, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, - 0x18, 0x68, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x70, 0x46, - 0x69, 0x65, 0x6c, 0x64, 0x48, 0x00, 0x52, 0x0e, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, - 0x70, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x42, 0x07, 0x0a, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x22, - 0x6e, 0x0a, 0x08, 0x49, 0x6e, 0x74, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x64, - 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x03, 0x52, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, - 0x12, 0x33, 0x0a, 0x05, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x18, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x49, - 0x6e, 0x74, 0x36, 0x34, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x48, 0x00, 0x52, 0x05, 0x72, 0x75, 0x6c, - 0x65, 0x73, 0x88, 0x01, 0x01, 0x42, 0x08, 0x0a, 0x06, 0x5f, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x22, - 0x6e, 0x0a, 0x09, 0x42, 0x6f, 0x6f, 0x6c, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x23, 0x0a, 0x0d, - 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x12, 0x32, 0x0a, 0x05, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x17, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x76, 0x31, 0x2e, - 0x42, 0x6f, 0x6f, 0x6c, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x48, 0x00, 0x52, 0x05, 0x72, 0x75, 0x6c, - 0x65, 0x73, 0x88, 0x01, 0x01, 0x42, 0x08, 0x0a, 0x06, 0x5f, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x22, - 0x7f, 0x0a, 0x10, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x53, 0x6c, 0x69, 0x63, 0x65, 0x46, 0x69, - 0x65, 0x6c, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x65, 0x66, 0x61, - 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x3c, 0x0a, 0x05, 0x72, 0x75, 0x6c, 0x65, - 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x53, - 0x74, 0x72, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x48, 0x00, 0x52, 0x05, 0x72, 0x75, - 0x6c, 0x65, 0x73, 0x88, 0x01, 0x01, 0x42, 0x08, 0x0a, 0x06, 0x5f, 0x72, 0x75, 0x6c, 0x65, 0x73, - 0x22, 0xff, 0x01, 0x0a, 0x0e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x70, 0x46, 0x69, - 0x65, 0x6c, 0x64, 0x12, 0x53, 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x63, 0x31, 0x2e, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, - 0x4d, 0x61, 0x70, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x2e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, - 0x56, 0x61, 0x6c, 0x75, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0c, 0x64, 0x65, 0x66, 0x61, - 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x37, 0x0a, 0x05, 0x72, 0x75, 0x6c, 0x65, - 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x70, - 0x52, 0x75, 0x6c, 0x65, 0x73, 0x48, 0x00, 0x52, 0x05, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x88, 0x01, - 0x01, 0x1a, 0x55, 0x0a, 0x11, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2a, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x08, 0x0a, 0x06, 0x5f, 0x72, 0x75, 0x6c, - 0x65, 0x73, 0x22, 0x60, 0x0a, 0x11, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x46, 0x69, 0x65, 0x6c, - 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, - 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, - 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x8f, 0x02, 0x0a, 0x0b, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x46, - 0x69, 0x65, 0x6c, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x65, 0x66, - 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x34, 0x0a, 0x05, 0x72, 0x75, 0x6c, - 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x52, 0x75, - 0x6c, 0x65, 0x73, 0x48, 0x00, 0x52, 0x05, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x88, 0x01, 0x01, 0x12, - 0x31, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1d, 0x2e, - 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x72, - 0x69, 0x6e, 0x67, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, - 0x70, 0x65, 0x12, 0x2d, 0x0a, 0x12, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x5f, 0x65, 0x78, - 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x11, - 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, - 0x73, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x05, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x76, - 0x31, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x08, 0x0a, 0x06, - 0x5f, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x2a, 0xc4, 0x01, 0x0a, 0x0e, 0x43, 0x6f, 0x6e, 0x73, 0x74, - 0x72, 0x61, 0x69, 0x6e, 0x74, 0x4b, 0x69, 0x6e, 0x64, 0x12, 0x1f, 0x0a, 0x1b, 0x43, 0x4f, 0x4e, - 0x53, 0x54, 0x52, 0x41, 0x49, 0x4e, 0x54, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x55, 0x4e, 0x53, - 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x25, 0x0a, 0x21, 0x43, 0x4f, - 0x4e, 0x53, 0x54, 0x52, 0x41, 0x49, 0x4e, 0x54, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x52, 0x45, - 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x5f, 0x54, 0x4f, 0x47, 0x45, 0x54, 0x48, 0x45, 0x52, 0x10, - 0x01, 0x12, 0x20, 0x0a, 0x1c, 0x43, 0x4f, 0x4e, 0x53, 0x54, 0x52, 0x41, 0x49, 0x4e, 0x54, 0x5f, - 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x41, 0x54, 0x5f, 0x4c, 0x45, 0x41, 0x53, 0x54, 0x5f, 0x4f, 0x4e, - 0x45, 0x10, 0x02, 0x12, 0x26, 0x0a, 0x22, 0x43, 0x4f, 0x4e, 0x53, 0x54, 0x52, 0x41, 0x49, 0x4e, - 0x54, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x4d, 0x55, 0x54, 0x55, 0x41, 0x4c, 0x4c, 0x59, 0x5f, - 0x45, 0x58, 0x43, 0x4c, 0x55, 0x53, 0x49, 0x56, 0x45, 0x10, 0x03, 0x12, 0x20, 0x0a, 0x1c, 0x43, - 0x4f, 0x4e, 0x53, 0x54, 0x52, 0x41, 0x49, 0x4e, 0x54, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x44, - 0x45, 0x50, 0x45, 0x4e, 0x44, 0x45, 0x4e, 0x54, 0x5f, 0x4f, 0x4e, 0x10, 0x04, 0x2a, 0xc9, 0x01, - 0x0a, 0x0f, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x54, 0x79, 0x70, - 0x65, 0x12, 0x26, 0x0a, 0x22, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x46, 0x49, 0x45, 0x4c, - 0x44, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x54, 0x45, 0x58, 0x54, 0x5f, 0x55, 0x4e, 0x53, 0x50, - 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1c, 0x0a, 0x18, 0x53, 0x54, 0x52, - 0x49, 0x4e, 0x47, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x52, - 0x41, 0x4e, 0x44, 0x4f, 0x4d, 0x10, 0x01, 0x12, 0x1c, 0x0a, 0x18, 0x53, 0x54, 0x52, 0x49, 0x4e, - 0x47, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4f, 0x41, 0x55, - 0x54, 0x48, 0x32, 0x10, 0x02, 0x12, 0x2f, 0x0a, 0x2b, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, - 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x4f, 0x4e, 0x4e, 0x45, - 0x43, 0x54, 0x4f, 0x52, 0x5f, 0x44, 0x45, 0x52, 0x49, 0x56, 0x45, 0x44, 0x5f, 0x4f, 0x50, 0x54, - 0x49, 0x4f, 0x4e, 0x53, 0x10, 0x03, 0x12, 0x21, 0x0a, 0x1d, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, - 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x4c, 0x45, - 0x5f, 0x55, 0x50, 0x4c, 0x4f, 0x41, 0x44, 0x10, 0x04, 0x42, 0x33, 0x5a, 0x31, 0x67, 0x69, 0x74, - 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x64, 0x75, 0x63, 0x74, 0x6f, - 0x72, 0x6f, 0x6e, 0x65, 0x2f, 0x62, 0x61, 0x74, 0x6f, 0x6e, 0x2d, 0x73, 0x64, 0x6b, 0x2f, 0x70, - 0x62, 0x2f, 0x63, 0x31, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x76, 0x31, 0x62, 0x06, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -}) +func (x *StringField) SetRules(v *StringRules) { + x.Rules = v +} -var ( - file_c1_config_v1_config_proto_rawDescOnce sync.Once - file_c1_config_v1_config_proto_rawDescData []byte -) +func (x *StringField) SetType(v StringFieldType) { + x.Type = v +} -func file_c1_config_v1_config_proto_rawDescGZIP() []byte { - file_c1_config_v1_config_proto_rawDescOnce.Do(func() { - file_c1_config_v1_config_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_c1_config_v1_config_proto_rawDesc), len(file_c1_config_v1_config_proto_rawDesc))) - }) - return file_c1_config_v1_config_proto_rawDescData +func (x *StringField) SetAllowedExtensions(v []string) { + x.AllowedExtensions = v } +func (x *StringField) SetOptions(v []*StringFieldOption) { + x.Options = v +} + +func (x *StringField) HasRules() bool { + if x == nil { + return false + } + return x.Rules != nil +} + +func (x *StringField) ClearRules() { + x.Rules = nil +} + +type StringField_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + DefaultValue string + Rules *StringRules + Type StringFieldType + // only used for FileUpload atm, oneofs are evil + AllowedExtensions []string + Options []*StringFieldOption +} + +func (b0 StringField_builder) Build() *StringField { + m0 := &StringField{} + b, x := &b0, m0 + _, _ = b, x + x.DefaultValue = b.DefaultValue + x.Rules = b.Rules + x.Type = b.Type + x.AllowedExtensions = b.AllowedExtensions + x.Options = b.Options + return m0 +} + +var File_c1_config_v1_config_proto protoreflect.FileDescriptor + +const file_c1_config_v1_config_proto_rawDesc = "" + + "\n" + + "\x19c1/config/v1/config.proto\x12\fc1.config.v1\x1a\x18c1/config/v1/rules.proto\x1a\x19google/protobuf/any.proto\"\xd0\x03\n" + + "\rConfiguration\x12+\n" + + "\x06fields\x18\x01 \x03(\v2\x13.c1.config.v1.FieldR\x06fields\x12:\n" + + "\vconstraints\x18\x02 \x03(\v2\x18.c1.config.v1.ConstraintR\vconstraints\x12!\n" + + "\fdisplay_name\x18\x03 \x01(\tR\vdisplayName\x12\x19\n" + + "\bhelp_url\x18\x04 \x01(\tR\ahelpUrl\x12\x19\n" + + "\bicon_url\x18\x05 \x01(\tR\aiconUrl\x12!\n" + + "\fis_directory\x18\a \x01(\bR\visDirectory\x12\x1d\n" + + "\n" + + "catalog_id\x18\b \x01(\tR\tcatalogId\x12>\n" + + "\x1bsupports_external_resources\x18\t \x01(\bR\x19supportsExternalResources\x12>\n" + + "\x1brequires_external_connector\x18\n" + + " \x01(\bR\x19requiresExternalConnector\x12;\n" + + "\ffield_groups\x18\v \x03(\v2\x18.c1.config.v1.FieldGroupR\vfieldGroups\"\xea\x01\n" + + "\n" + + "Constraint\x120\n" + + "\x04kind\x18\x01 \x01(\x0e2\x1c.c1.config.v1.ConstraintKindR\x04kind\x12\x1f\n" + + "\vfield_names\x18\x02 \x03(\tR\n" + + "fieldNames\x122\n" + + "\x15secondary_field_names\x18\x03 \x03(\tR\x13secondaryFieldNames\x12\x12\n" + + "\x04name\x18\x04 \x01(\tR\x04name\x12\x1b\n" + + "\thelp_text\x18\x05 \x01(\tR\bhelpText\x12$\n" + + "\x0eis_field_group\x18\x06 \x01(\bR\fisFieldGroup\"\x92\x01\n" + + "\n" + + "FieldGroup\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\x12!\n" + + "\fdisplay_name\x18\x02 \x01(\tR\vdisplayName\x12\x1b\n" + + "\thelp_text\x18\x03 \x01(\tR\bhelpText\x12\x16\n" + + "\x06fields\x18\x04 \x03(\tR\x06fields\x12\x18\n" + + "\adefault\x18\x05 \x01(\bR\adefault\"\xf1\x06\n" + + "\x05Field\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\x12!\n" + + "\fdisplay_name\x18\x02 \x01(\tR\vdisplayName\x12 \n" + + "\vdescription\x18\x03 \x01(\tR\vdescription\x12 \n" + + "\vplaceholder\x18\x04 \x01(\tR\vplaceholder\x12\x1f\n" + + "\vis_required\x18\x05 \x01(\bR\n" + + "isRequired\x12\x15\n" + + "\x06is_ops\x18\x06 \x01(\bR\x05isOps\x12\x1b\n" + + "\tis_secret\x18\a \x01(\bR\bisSecret\x12>\n" + + "\fstring_field\x18d \x01(\v2\x19.c1.config.v1.StringFieldH\x00R\vstringField\x125\n" + + "\tint_field\x18e \x01(\v2\x16.c1.config.v1.IntFieldH\x00R\bintField\x128\n" + + "\n" + + "bool_field\x18f \x01(\v2\x17.c1.config.v1.BoolFieldH\x00R\tboolField\x12N\n" + + "\x12string_slice_field\x18g \x01(\v2\x1e.c1.config.v1.StringSliceFieldH\x00R\x10stringSliceField\x12H\n" + + "\x10string_map_field\x18h \x01(\v2\x1c.c1.config.v1.StringMapFieldH\x00R\x0estringMapField\x12K\n" + + "\x11resource_id_field\x18i \x01(\v2\x1d.c1.config.v1.ResourceIdFieldH\x00R\x0fresourceIdField\x12[\n" + + "\x17resource_id_slice_field\x18j \x01(\v2\".c1.config.v1.ResourceIdSliceFieldH\x00R\x14resourceIdSliceField\x12D\n" + + "\x0eresource_field\x18k \x01(\v2\x1b.c1.config.v1.ResourceFieldH\x00R\rresourceField\x12T\n" + + "\x14resource_slice_field\x18l \x01(\v2 .c1.config.v1.ResourceSliceFieldH\x00R\x12resourceSliceFieldB\a\n" + + "\x05field\"\x8a\x02\n" + + "\bResource\x129\n" + + "\vresource_id\x18\x01 \x01(\v2\x18.c1.config.v1.ResourceIdR\n" + + "resourceId\x12F\n" + + "\x12parent_resource_id\x18\x02 \x01(\v2\x18.c1.config.v1.ResourceIdR\x10parentResourceId\x12!\n" + + "\fdisplay_name\x18\x03 \x01(\tR\vdisplayName\x12 \n" + + "\vdescription\x18\x04 \x01(\tR\vdescription\x126\n" + + "\vannotations\x18\x05 \x03(\v2\x14.google.protobuf.AnyR\vannotations\"W\n" + + "\n" + + "ResourceId\x12(\n" + + "\x10resource_type_id\x18\x01 \x01(\tR\x0eresourceTypeId\x12\x1f\n" + + "\vresource_id\x18\x02 \x01(\tR\n" + + "resourceId\"L\n" + + "\rResourceField\x12;\n" + + "\rdefault_value\x18\x01 \x01(\v2\x16.c1.config.v1.ResourceR\fdefaultValue\"Q\n" + + "\x12ResourceSliceField\x12;\n" + + "\rdefault_value\x18\x01 \x03(\v2\x16.c1.config.v1.ResourceR\fdefaultValue\"\x94\x01\n" + + "\x0fResourceIdField\x12=\n" + + "\rdefault_value\x18\x01 \x01(\v2\x18.c1.config.v1.ResourceIdR\fdefaultValue\x128\n" + + "\x05rules\x18\x03 \x01(\v2\x1d.c1.config.v1.ResourceIDRulesH\x00R\x05rules\x88\x01\x01B\b\n" + + "\x06_rules\"\xa6\x01\n" + + "\x14ResourceIdSliceField\x12B\n" + + "\rdefault_value\x18\x01 \x03(\v2\x1d.c1.config.v1.ResourceIdFieldR\fdefaultValue\x12@\n" + + "\x05rules\x18\x02 \x01(\v2%.c1.config.v1.RepeatedResourceIdRulesH\x00R\x05rules\x88\x01\x01B\b\n" + + "\x06_rules\"n\n" + + "\bIntField\x12#\n" + + "\rdefault_value\x18\x01 \x01(\x03R\fdefaultValue\x123\n" + + "\x05rules\x18\x02 \x01(\v2\x18.c1.config.v1.Int64RulesH\x00R\x05rules\x88\x01\x01B\b\n" + + "\x06_rules\"n\n" + + "\tBoolField\x12#\n" + + "\rdefault_value\x18\x01 \x01(\bR\fdefaultValue\x122\n" + + "\x05rules\x18\x02 \x01(\v2\x17.c1.config.v1.BoolRulesH\x00R\x05rules\x88\x01\x01B\b\n" + + "\x06_rules\"\x7f\n" + + "\x10StringSliceField\x12#\n" + + "\rdefault_value\x18\x01 \x03(\tR\fdefaultValue\x12<\n" + + "\x05rules\x18\x02 \x01(\v2!.c1.config.v1.RepeatedStringRulesH\x00R\x05rules\x88\x01\x01B\b\n" + + "\x06_rules\"\xff\x01\n" + + "\x0eStringMapField\x12S\n" + + "\rdefault_value\x18\x01 \x03(\v2..c1.config.v1.StringMapField.DefaultValueEntryR\fdefaultValue\x127\n" + + "\x05rules\x18\x02 \x01(\v2\x1c.c1.config.v1.StringMapRulesH\x00R\x05rules\x88\x01\x01\x1aU\n" + + "\x11DefaultValueEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12*\n" + + "\x05value\x18\x02 \x01(\v2\x14.google.protobuf.AnyR\x05value:\x028\x01B\b\n" + + "\x06_rules\"`\n" + + "\x11StringFieldOption\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\x12\x14\n" + + "\x05value\x18\x02 \x01(\tR\x05value\x12!\n" + + "\fdisplay_name\x18\x03 \x01(\tR\vdisplayName\"\x8f\x02\n" + + "\vStringField\x12#\n" + + "\rdefault_value\x18\x01 \x01(\tR\fdefaultValue\x124\n" + + "\x05rules\x18\x02 \x01(\v2\x19.c1.config.v1.StringRulesH\x00R\x05rules\x88\x01\x01\x121\n" + + "\x04type\x18\x03 \x01(\x0e2\x1d.c1.config.v1.StringFieldTypeR\x04type\x12-\n" + + "\x12allowed_extensions\x18\x04 \x03(\tR\x11allowedExtensions\x129\n" + + "\aoptions\x18\x05 \x03(\v2\x1f.c1.config.v1.StringFieldOptionR\aoptionsB\b\n" + + "\x06_rules*\xc4\x01\n" + + "\x0eConstraintKind\x12\x1f\n" + + "\x1bCONSTRAINT_KIND_UNSPECIFIED\x10\x00\x12%\n" + + "!CONSTRAINT_KIND_REQUIRED_TOGETHER\x10\x01\x12 \n" + + "\x1cCONSTRAINT_KIND_AT_LEAST_ONE\x10\x02\x12&\n" + + "\"CONSTRAINT_KIND_MUTUALLY_EXCLUSIVE\x10\x03\x12 \n" + + "\x1cCONSTRAINT_KIND_DEPENDENT_ON\x10\x04*\xc9\x01\n" + + "\x0fStringFieldType\x12&\n" + + "\"STRING_FIELD_TYPE_TEXT_UNSPECIFIED\x10\x00\x12\x1c\n" + + "\x18STRING_FIELD_TYPE_RANDOM\x10\x01\x12\x1c\n" + + "\x18STRING_FIELD_TYPE_OAUTH2\x10\x02\x12/\n" + + "+STRING_FIELD_TYPE_CONNECTOR_DERIVED_OPTIONS\x10\x03\x12!\n" + + "\x1dSTRING_FIELD_TYPE_FILE_UPLOAD\x10\x04B3Z1github.com/conductorone/baton-sdk/pb/c1/config/v1b\x06proto3" + var file_c1_config_v1_config_proto_enumTypes = make([]protoimpl.EnumInfo, 2) -var file_c1_config_v1_config_proto_msgTypes = make([]protoimpl.MessageInfo, 10) +var file_c1_config_v1_config_proto_msgTypes = make([]protoimpl.MessageInfo, 17) var file_c1_config_v1_config_proto_goTypes = []any{ - (ConstraintKind)(0), // 0: c1.config.v1.ConstraintKind - (StringFieldType)(0), // 1: c1.config.v1.StringFieldType - (*Configuration)(nil), // 2: c1.config.v1.Configuration - (*Constraint)(nil), // 3: c1.config.v1.Constraint - (*Field)(nil), // 4: c1.config.v1.Field - (*IntField)(nil), // 5: c1.config.v1.IntField - (*BoolField)(nil), // 6: c1.config.v1.BoolField - (*StringSliceField)(nil), // 7: c1.config.v1.StringSliceField - (*StringMapField)(nil), // 8: c1.config.v1.StringMapField - (*StringFieldOption)(nil), // 9: c1.config.v1.StringFieldOption - (*StringField)(nil), // 10: c1.config.v1.StringField - nil, // 11: c1.config.v1.StringMapField.DefaultValueEntry - (*Int64Rules)(nil), // 12: c1.config.v1.Int64Rules - (*BoolRules)(nil), // 13: c1.config.v1.BoolRules - (*RepeatedStringRules)(nil), // 14: c1.config.v1.RepeatedStringRules - (*StringMapRules)(nil), // 15: c1.config.v1.StringMapRules - (*StringRules)(nil), // 16: c1.config.v1.StringRules - (*anypb.Any)(nil), // 17: google.protobuf.Any + (ConstraintKind)(0), // 0: c1.config.v1.ConstraintKind + (StringFieldType)(0), // 1: c1.config.v1.StringFieldType + (*Configuration)(nil), // 2: c1.config.v1.Configuration + (*Constraint)(nil), // 3: c1.config.v1.Constraint + (*FieldGroup)(nil), // 4: c1.config.v1.FieldGroup + (*Field)(nil), // 5: c1.config.v1.Field + (*Resource)(nil), // 6: c1.config.v1.Resource + (*ResourceId)(nil), // 7: c1.config.v1.ResourceId + (*ResourceField)(nil), // 8: c1.config.v1.ResourceField + (*ResourceSliceField)(nil), // 9: c1.config.v1.ResourceSliceField + (*ResourceIdField)(nil), // 10: c1.config.v1.ResourceIdField + (*ResourceIdSliceField)(nil), // 11: c1.config.v1.ResourceIdSliceField + (*IntField)(nil), // 12: c1.config.v1.IntField + (*BoolField)(nil), // 13: c1.config.v1.BoolField + (*StringSliceField)(nil), // 14: c1.config.v1.StringSliceField + (*StringMapField)(nil), // 15: c1.config.v1.StringMapField + (*StringFieldOption)(nil), // 16: c1.config.v1.StringFieldOption + (*StringField)(nil), // 17: c1.config.v1.StringField + nil, // 18: c1.config.v1.StringMapField.DefaultValueEntry + (*anypb.Any)(nil), // 19: google.protobuf.Any + (*ResourceIDRules)(nil), // 20: c1.config.v1.ResourceIDRules + (*RepeatedResourceIdRules)(nil), // 21: c1.config.v1.RepeatedResourceIdRules + (*Int64Rules)(nil), // 22: c1.config.v1.Int64Rules + (*BoolRules)(nil), // 23: c1.config.v1.BoolRules + (*RepeatedStringRules)(nil), // 24: c1.config.v1.RepeatedStringRules + (*StringMapRules)(nil), // 25: c1.config.v1.StringMapRules + (*StringRules)(nil), // 26: c1.config.v1.StringRules } var file_c1_config_v1_config_proto_depIdxs = []int32{ - 4, // 0: c1.config.v1.Configuration.fields:type_name -> c1.config.v1.Field + 5, // 0: c1.config.v1.Configuration.fields:type_name -> c1.config.v1.Field 3, // 1: c1.config.v1.Configuration.constraints:type_name -> c1.config.v1.Constraint - 0, // 2: c1.config.v1.Constraint.kind:type_name -> c1.config.v1.ConstraintKind - 10, // 3: c1.config.v1.Field.string_field:type_name -> c1.config.v1.StringField - 5, // 4: c1.config.v1.Field.int_field:type_name -> c1.config.v1.IntField - 6, // 5: c1.config.v1.Field.bool_field:type_name -> c1.config.v1.BoolField - 7, // 6: c1.config.v1.Field.string_slice_field:type_name -> c1.config.v1.StringSliceField - 8, // 7: c1.config.v1.Field.string_map_field:type_name -> c1.config.v1.StringMapField - 12, // 8: c1.config.v1.IntField.rules:type_name -> c1.config.v1.Int64Rules - 13, // 9: c1.config.v1.BoolField.rules:type_name -> c1.config.v1.BoolRules - 14, // 10: c1.config.v1.StringSliceField.rules:type_name -> c1.config.v1.RepeatedStringRules - 11, // 11: c1.config.v1.StringMapField.default_value:type_name -> c1.config.v1.StringMapField.DefaultValueEntry - 15, // 12: c1.config.v1.StringMapField.rules:type_name -> c1.config.v1.StringMapRules - 16, // 13: c1.config.v1.StringField.rules:type_name -> c1.config.v1.StringRules - 1, // 14: c1.config.v1.StringField.type:type_name -> c1.config.v1.StringFieldType - 9, // 15: c1.config.v1.StringField.options:type_name -> c1.config.v1.StringFieldOption - 17, // 16: c1.config.v1.StringMapField.DefaultValueEntry.value:type_name -> google.protobuf.Any - 17, // [17:17] is the sub-list for method output_type - 17, // [17:17] is the sub-list for method input_type - 17, // [17:17] is the sub-list for extension type_name - 17, // [17:17] is the sub-list for extension extendee - 0, // [0:17] is the sub-list for field type_name + 4, // 2: c1.config.v1.Configuration.field_groups:type_name -> c1.config.v1.FieldGroup + 0, // 3: c1.config.v1.Constraint.kind:type_name -> c1.config.v1.ConstraintKind + 17, // 4: c1.config.v1.Field.string_field:type_name -> c1.config.v1.StringField + 12, // 5: c1.config.v1.Field.int_field:type_name -> c1.config.v1.IntField + 13, // 6: c1.config.v1.Field.bool_field:type_name -> c1.config.v1.BoolField + 14, // 7: c1.config.v1.Field.string_slice_field:type_name -> c1.config.v1.StringSliceField + 15, // 8: c1.config.v1.Field.string_map_field:type_name -> c1.config.v1.StringMapField + 10, // 9: c1.config.v1.Field.resource_id_field:type_name -> c1.config.v1.ResourceIdField + 11, // 10: c1.config.v1.Field.resource_id_slice_field:type_name -> c1.config.v1.ResourceIdSliceField + 8, // 11: c1.config.v1.Field.resource_field:type_name -> c1.config.v1.ResourceField + 9, // 12: c1.config.v1.Field.resource_slice_field:type_name -> c1.config.v1.ResourceSliceField + 7, // 13: c1.config.v1.Resource.resource_id:type_name -> c1.config.v1.ResourceId + 7, // 14: c1.config.v1.Resource.parent_resource_id:type_name -> c1.config.v1.ResourceId + 19, // 15: c1.config.v1.Resource.annotations:type_name -> google.protobuf.Any + 6, // 16: c1.config.v1.ResourceField.default_value:type_name -> c1.config.v1.Resource + 6, // 17: c1.config.v1.ResourceSliceField.default_value:type_name -> c1.config.v1.Resource + 7, // 18: c1.config.v1.ResourceIdField.default_value:type_name -> c1.config.v1.ResourceId + 20, // 19: c1.config.v1.ResourceIdField.rules:type_name -> c1.config.v1.ResourceIDRules + 10, // 20: c1.config.v1.ResourceIdSliceField.default_value:type_name -> c1.config.v1.ResourceIdField + 21, // 21: c1.config.v1.ResourceIdSliceField.rules:type_name -> c1.config.v1.RepeatedResourceIdRules + 22, // 22: c1.config.v1.IntField.rules:type_name -> c1.config.v1.Int64Rules + 23, // 23: c1.config.v1.BoolField.rules:type_name -> c1.config.v1.BoolRules + 24, // 24: c1.config.v1.StringSliceField.rules:type_name -> c1.config.v1.RepeatedStringRules + 18, // 25: c1.config.v1.StringMapField.default_value:type_name -> c1.config.v1.StringMapField.DefaultValueEntry + 25, // 26: c1.config.v1.StringMapField.rules:type_name -> c1.config.v1.StringMapRules + 26, // 27: c1.config.v1.StringField.rules:type_name -> c1.config.v1.StringRules + 1, // 28: c1.config.v1.StringField.type:type_name -> c1.config.v1.StringFieldType + 16, // 29: c1.config.v1.StringField.options:type_name -> c1.config.v1.StringFieldOption + 19, // 30: c1.config.v1.StringMapField.DefaultValueEntry.value:type_name -> google.protobuf.Any + 31, // [31:31] is the sub-list for method output_type + 31, // [31:31] is the sub-list for method input_type + 31, // [31:31] is the sub-list for extension type_name + 31, // [31:31] is the sub-list for extension extendee + 0, // [0:31] is the sub-list for field type_name } func init() { file_c1_config_v1_config_proto_init() } @@ -1098,25 +2389,31 @@ func file_c1_config_v1_config_proto_init() { return } file_c1_config_v1_rules_proto_init() - file_c1_config_v1_config_proto_msgTypes[2].OneofWrappers = []any{ + file_c1_config_v1_config_proto_msgTypes[3].OneofWrappers = []any{ (*Field_StringField)(nil), (*Field_IntField)(nil), (*Field_BoolField)(nil), (*Field_StringSliceField)(nil), (*Field_StringMapField)(nil), + (*Field_ResourceIdField)(nil), + (*Field_ResourceIdSliceField)(nil), + (*Field_ResourceField)(nil), + (*Field_ResourceSliceField)(nil), } - file_c1_config_v1_config_proto_msgTypes[3].OneofWrappers = []any{} - file_c1_config_v1_config_proto_msgTypes[4].OneofWrappers = []any{} - file_c1_config_v1_config_proto_msgTypes[5].OneofWrappers = []any{} - file_c1_config_v1_config_proto_msgTypes[6].OneofWrappers = []any{} file_c1_config_v1_config_proto_msgTypes[8].OneofWrappers = []any{} + file_c1_config_v1_config_proto_msgTypes[9].OneofWrappers = []any{} + file_c1_config_v1_config_proto_msgTypes[10].OneofWrappers = []any{} + file_c1_config_v1_config_proto_msgTypes[11].OneofWrappers = []any{} + file_c1_config_v1_config_proto_msgTypes[12].OneofWrappers = []any{} + file_c1_config_v1_config_proto_msgTypes[13].OneofWrappers = []any{} + file_c1_config_v1_config_proto_msgTypes[15].OneofWrappers = []any{} type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: unsafe.Slice(unsafe.StringData(file_c1_config_v1_config_proto_rawDesc), len(file_c1_config_v1_config_proto_rawDesc)), NumEnums: 2, - NumMessages: 10, + NumMessages: 17, NumExtensions: 0, NumServices: 0, }, diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/config/v1/config.pb.validate.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/config/v1/config.pb.validate.go index e2114f8d..c58e2bd2 100644 --- a/vendor/github.com/conductorone/baton-sdk/pb/c1/config/v1/config.pb.validate.go +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/config/v1/config.pb.validate.go @@ -139,6 +139,40 @@ func (m *Configuration) validate(all bool) error { // no validation rules for RequiresExternalConnector + for idx, item := range m.GetFieldGroups() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ConfigurationValidationError{ + field: fmt.Sprintf("FieldGroups[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ConfigurationValidationError{ + field: fmt.Sprintf("FieldGroups[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ConfigurationValidationError{ + field: fmt.Sprintf("FieldGroups[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + if len(errors) > 0 { return ConfigurationMultiError(errors) } @@ -324,6 +358,113 @@ var _ interface { ErrorName() string } = ConstraintValidationError{} +// Validate checks the field values on FieldGroup with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *FieldGroup) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on FieldGroup with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in FieldGroupMultiError, or +// nil if none found. +func (m *FieldGroup) ValidateAll() error { + return m.validate(true) +} + +func (m *FieldGroup) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Name + + // no validation rules for DisplayName + + // no validation rules for HelpText + + // no validation rules for Default + + if len(errors) > 0 { + return FieldGroupMultiError(errors) + } + + return nil +} + +// FieldGroupMultiError is an error wrapping multiple validation errors +// returned by FieldGroup.ValidateAll() if the designated constraints aren't met. +type FieldGroupMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m FieldGroupMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m FieldGroupMultiError) AllErrors() []error { return m } + +// FieldGroupValidationError is the validation error returned by +// FieldGroup.Validate if the designated constraints aren't met. +type FieldGroupValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e FieldGroupValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e FieldGroupValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e FieldGroupValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e FieldGroupValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e FieldGroupValidationError) ErrorName() string { return "FieldGroupValidationError" } + +// Error satisfies the builtin error interface +func (e FieldGroupValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sFieldGroup.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = FieldGroupValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = FieldGroupValidationError{} + // Validate checks the field values on Field with the rules defined in the // proto definition for this message. If any rules are violated, the first // error encountered is returned, or nil if there are no violations. @@ -565,6 +706,170 @@ func (m *Field) validate(all bool) error { } } + case *Field_ResourceIdField: + if v == nil { + err := FieldValidationError{ + field: "Field", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetResourceIdField()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, FieldValidationError{ + field: "ResourceIdField", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, FieldValidationError{ + field: "ResourceIdField", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetResourceIdField()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return FieldValidationError{ + field: "ResourceIdField", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *Field_ResourceIdSliceField: + if v == nil { + err := FieldValidationError{ + field: "Field", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetResourceIdSliceField()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, FieldValidationError{ + field: "ResourceIdSliceField", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, FieldValidationError{ + field: "ResourceIdSliceField", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetResourceIdSliceField()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return FieldValidationError{ + field: "ResourceIdSliceField", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *Field_ResourceField: + if v == nil { + err := FieldValidationError{ + field: "Field", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetResourceField()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, FieldValidationError{ + field: "ResourceField", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, FieldValidationError{ + field: "ResourceField", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetResourceField()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return FieldValidationError{ + field: "ResourceField", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *Field_ResourceSliceField: + if v == nil { + err := FieldValidationError{ + field: "Field", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetResourceSliceField()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, FieldValidationError{ + field: "ResourceSliceField", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, FieldValidationError{ + field: "ResourceSliceField", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetResourceSliceField()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return FieldValidationError{ + field: "ResourceSliceField", + reason: "embedded message failed validation", + cause: err, + } + } + } + default: _ = v // ensures v is used } @@ -646,6 +951,900 @@ var _ interface { ErrorName() string } = FieldValidationError{} +// Validate checks the field values on Resource with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Resource) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Resource with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in ResourceMultiError, or nil +// if none found. +func (m *Resource) ValidateAll() error { + return m.validate(true) +} + +func (m *Resource) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetResourceId()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ResourceValidationError{ + field: "ResourceId", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ResourceValidationError{ + field: "ResourceId", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetResourceId()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ResourceValidationError{ + field: "ResourceId", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetParentResourceId()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ResourceValidationError{ + field: "ParentResourceId", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ResourceValidationError{ + field: "ParentResourceId", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetParentResourceId()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ResourceValidationError{ + field: "ParentResourceId", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for DisplayName + + // no validation rules for Description + + for idx, item := range m.GetAnnotations() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ResourceValidationError{ + field: fmt.Sprintf("Annotations[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ResourceValidationError{ + field: fmt.Sprintf("Annotations[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ResourceValidationError{ + field: fmt.Sprintf("Annotations[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return ResourceMultiError(errors) + } + + return nil +} + +// ResourceMultiError is an error wrapping multiple validation errors returned +// by Resource.ValidateAll() if the designated constraints aren't met. +type ResourceMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ResourceMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ResourceMultiError) AllErrors() []error { return m } + +// ResourceValidationError is the validation error returned by +// Resource.Validate if the designated constraints aren't met. +type ResourceValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ResourceValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ResourceValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ResourceValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ResourceValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ResourceValidationError) ErrorName() string { return "ResourceValidationError" } + +// Error satisfies the builtin error interface +func (e ResourceValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sResource.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ResourceValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ResourceValidationError{} + +// Validate checks the field values on ResourceId with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *ResourceId) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ResourceId with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in ResourceIdMultiError, or +// nil if none found. +func (m *ResourceId) ValidateAll() error { + return m.validate(true) +} + +func (m *ResourceId) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for ResourceTypeId + + // no validation rules for ResourceId + + if len(errors) > 0 { + return ResourceIdMultiError(errors) + } + + return nil +} + +// ResourceIdMultiError is an error wrapping multiple validation errors +// returned by ResourceId.ValidateAll() if the designated constraints aren't met. +type ResourceIdMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ResourceIdMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ResourceIdMultiError) AllErrors() []error { return m } + +// ResourceIdValidationError is the validation error returned by +// ResourceId.Validate if the designated constraints aren't met. +type ResourceIdValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ResourceIdValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ResourceIdValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ResourceIdValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ResourceIdValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ResourceIdValidationError) ErrorName() string { return "ResourceIdValidationError" } + +// Error satisfies the builtin error interface +func (e ResourceIdValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sResourceId.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ResourceIdValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ResourceIdValidationError{} + +// Validate checks the field values on ResourceField with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *ResourceField) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ResourceField with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in ResourceFieldMultiError, or +// nil if none found. +func (m *ResourceField) ValidateAll() error { + return m.validate(true) +} + +func (m *ResourceField) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetDefaultValue()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ResourceFieldValidationError{ + field: "DefaultValue", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ResourceFieldValidationError{ + field: "DefaultValue", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetDefaultValue()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ResourceFieldValidationError{ + field: "DefaultValue", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return ResourceFieldMultiError(errors) + } + + return nil +} + +// ResourceFieldMultiError is an error wrapping multiple validation errors +// returned by ResourceField.ValidateAll() if the designated constraints +// aren't met. +type ResourceFieldMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ResourceFieldMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ResourceFieldMultiError) AllErrors() []error { return m } + +// ResourceFieldValidationError is the validation error returned by +// ResourceField.Validate if the designated constraints aren't met. +type ResourceFieldValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ResourceFieldValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ResourceFieldValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ResourceFieldValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ResourceFieldValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ResourceFieldValidationError) ErrorName() string { return "ResourceFieldValidationError" } + +// Error satisfies the builtin error interface +func (e ResourceFieldValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sResourceField.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ResourceFieldValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ResourceFieldValidationError{} + +// Validate checks the field values on ResourceSliceField with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *ResourceSliceField) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ResourceSliceField with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ResourceSliceFieldMultiError, or nil if none found. +func (m *ResourceSliceField) ValidateAll() error { + return m.validate(true) +} + +func (m *ResourceSliceField) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetDefaultValue() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ResourceSliceFieldValidationError{ + field: fmt.Sprintf("DefaultValue[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ResourceSliceFieldValidationError{ + field: fmt.Sprintf("DefaultValue[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ResourceSliceFieldValidationError{ + field: fmt.Sprintf("DefaultValue[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return ResourceSliceFieldMultiError(errors) + } + + return nil +} + +// ResourceSliceFieldMultiError is an error wrapping multiple validation errors +// returned by ResourceSliceField.ValidateAll() if the designated constraints +// aren't met. +type ResourceSliceFieldMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ResourceSliceFieldMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ResourceSliceFieldMultiError) AllErrors() []error { return m } + +// ResourceSliceFieldValidationError is the validation error returned by +// ResourceSliceField.Validate if the designated constraints aren't met. +type ResourceSliceFieldValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ResourceSliceFieldValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ResourceSliceFieldValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ResourceSliceFieldValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ResourceSliceFieldValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ResourceSliceFieldValidationError) ErrorName() string { + return "ResourceSliceFieldValidationError" +} + +// Error satisfies the builtin error interface +func (e ResourceSliceFieldValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sResourceSliceField.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ResourceSliceFieldValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ResourceSliceFieldValidationError{} + +// Validate checks the field values on ResourceIdField with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *ResourceIdField) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ResourceIdField with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ResourceIdFieldMultiError, or nil if none found. +func (m *ResourceIdField) ValidateAll() error { + return m.validate(true) +} + +func (m *ResourceIdField) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetDefaultValue()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ResourceIdFieldValidationError{ + field: "DefaultValue", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ResourceIdFieldValidationError{ + field: "DefaultValue", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetDefaultValue()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ResourceIdFieldValidationError{ + field: "DefaultValue", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if m.Rules != nil { + + if all { + switch v := interface{}(m.GetRules()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ResourceIdFieldValidationError{ + field: "Rules", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ResourceIdFieldValidationError{ + field: "Rules", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRules()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ResourceIdFieldValidationError{ + field: "Rules", + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return ResourceIdFieldMultiError(errors) + } + + return nil +} + +// ResourceIdFieldMultiError is an error wrapping multiple validation errors +// returned by ResourceIdField.ValidateAll() if the designated constraints +// aren't met. +type ResourceIdFieldMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ResourceIdFieldMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ResourceIdFieldMultiError) AllErrors() []error { return m } + +// ResourceIdFieldValidationError is the validation error returned by +// ResourceIdField.Validate if the designated constraints aren't met. +type ResourceIdFieldValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ResourceIdFieldValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ResourceIdFieldValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ResourceIdFieldValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ResourceIdFieldValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ResourceIdFieldValidationError) ErrorName() string { return "ResourceIdFieldValidationError" } + +// Error satisfies the builtin error interface +func (e ResourceIdFieldValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sResourceIdField.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ResourceIdFieldValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ResourceIdFieldValidationError{} + +// Validate checks the field values on ResourceIdSliceField with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *ResourceIdSliceField) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ResourceIdSliceField with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ResourceIdSliceFieldMultiError, or nil if none found. +func (m *ResourceIdSliceField) ValidateAll() error { + return m.validate(true) +} + +func (m *ResourceIdSliceField) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetDefaultValue() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ResourceIdSliceFieldValidationError{ + field: fmt.Sprintf("DefaultValue[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ResourceIdSliceFieldValidationError{ + field: fmt.Sprintf("DefaultValue[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ResourceIdSliceFieldValidationError{ + field: fmt.Sprintf("DefaultValue[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if m.Rules != nil { + + if all { + switch v := interface{}(m.GetRules()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ResourceIdSliceFieldValidationError{ + field: "Rules", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ResourceIdSliceFieldValidationError{ + field: "Rules", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRules()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ResourceIdSliceFieldValidationError{ + field: "Rules", + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return ResourceIdSliceFieldMultiError(errors) + } + + return nil +} + +// ResourceIdSliceFieldMultiError is an error wrapping multiple validation +// errors returned by ResourceIdSliceField.ValidateAll() if the designated +// constraints aren't met. +type ResourceIdSliceFieldMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ResourceIdSliceFieldMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ResourceIdSliceFieldMultiError) AllErrors() []error { return m } + +// ResourceIdSliceFieldValidationError is the validation error returned by +// ResourceIdSliceField.Validate if the designated constraints aren't met. +type ResourceIdSliceFieldValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ResourceIdSliceFieldValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ResourceIdSliceFieldValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ResourceIdSliceFieldValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ResourceIdSliceFieldValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ResourceIdSliceFieldValidationError) ErrorName() string { + return "ResourceIdSliceFieldValidationError" +} + +// Error satisfies the builtin error interface +func (e ResourceIdSliceFieldValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sResourceIdSliceField.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ResourceIdSliceFieldValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ResourceIdSliceFieldValidationError{} + // Validate checks the field values on IntField with the rules defined in the // proto definition for this message. If any rules are violated, the first // error encountered is returned, or nil if there are no violations. diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/config/v1/config_protoopaque.pb.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/config/v1/config_protoopaque.pb.go new file mode 100644 index 00000000..5cc36692 --- /dev/null +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/config/v1/config_protoopaque.pb.go @@ -0,0 +1,2422 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.10 +// protoc (unknown) +// source: c1/config/v1/config.proto + +//go:build protoopaque + +package v1 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + reflect "reflect" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type ConstraintKind int32 + +const ( + ConstraintKind_CONSTRAINT_KIND_UNSPECIFIED ConstraintKind = 0 + ConstraintKind_CONSTRAINT_KIND_REQUIRED_TOGETHER ConstraintKind = 1 + ConstraintKind_CONSTRAINT_KIND_AT_LEAST_ONE ConstraintKind = 2 + ConstraintKind_CONSTRAINT_KIND_MUTUALLY_EXCLUSIVE ConstraintKind = 3 + ConstraintKind_CONSTRAINT_KIND_DEPENDENT_ON ConstraintKind = 4 +) + +// Enum value maps for ConstraintKind. +var ( + ConstraintKind_name = map[int32]string{ + 0: "CONSTRAINT_KIND_UNSPECIFIED", + 1: "CONSTRAINT_KIND_REQUIRED_TOGETHER", + 2: "CONSTRAINT_KIND_AT_LEAST_ONE", + 3: "CONSTRAINT_KIND_MUTUALLY_EXCLUSIVE", + 4: "CONSTRAINT_KIND_DEPENDENT_ON", + } + ConstraintKind_value = map[string]int32{ + "CONSTRAINT_KIND_UNSPECIFIED": 0, + "CONSTRAINT_KIND_REQUIRED_TOGETHER": 1, + "CONSTRAINT_KIND_AT_LEAST_ONE": 2, + "CONSTRAINT_KIND_MUTUALLY_EXCLUSIVE": 3, + "CONSTRAINT_KIND_DEPENDENT_ON": 4, + } +) + +func (x ConstraintKind) Enum() *ConstraintKind { + p := new(ConstraintKind) + *p = x + return p +} + +func (x ConstraintKind) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ConstraintKind) Descriptor() protoreflect.EnumDescriptor { + return file_c1_config_v1_config_proto_enumTypes[0].Descriptor() +} + +func (ConstraintKind) Type() protoreflect.EnumType { + return &file_c1_config_v1_config_proto_enumTypes[0] +} + +func (x ConstraintKind) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +type StringFieldType int32 + +const ( + StringFieldType_STRING_FIELD_TYPE_TEXT_UNSPECIFIED StringFieldType = 0 // default/catch all. Its a text field. + StringFieldType_STRING_FIELD_TYPE_RANDOM StringFieldType = 1 + StringFieldType_STRING_FIELD_TYPE_OAUTH2 StringFieldType = 2 + StringFieldType_STRING_FIELD_TYPE_CONNECTOR_DERIVED_OPTIONS StringFieldType = 3 + StringFieldType_STRING_FIELD_TYPE_FILE_UPLOAD StringFieldType = 4 +) + +// Enum value maps for StringFieldType. +var ( + StringFieldType_name = map[int32]string{ + 0: "STRING_FIELD_TYPE_TEXT_UNSPECIFIED", + 1: "STRING_FIELD_TYPE_RANDOM", + 2: "STRING_FIELD_TYPE_OAUTH2", + 3: "STRING_FIELD_TYPE_CONNECTOR_DERIVED_OPTIONS", + 4: "STRING_FIELD_TYPE_FILE_UPLOAD", + } + StringFieldType_value = map[string]int32{ + "STRING_FIELD_TYPE_TEXT_UNSPECIFIED": 0, + "STRING_FIELD_TYPE_RANDOM": 1, + "STRING_FIELD_TYPE_OAUTH2": 2, + "STRING_FIELD_TYPE_CONNECTOR_DERIVED_OPTIONS": 3, + "STRING_FIELD_TYPE_FILE_UPLOAD": 4, + } +) + +func (x StringFieldType) Enum() *StringFieldType { + p := new(StringFieldType) + *p = x + return p +} + +func (x StringFieldType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (StringFieldType) Descriptor() protoreflect.EnumDescriptor { + return file_c1_config_v1_config_proto_enumTypes[1].Descriptor() +} + +func (StringFieldType) Type() protoreflect.EnumType { + return &file_c1_config_v1_config_proto_enumTypes[1] +} + +func (x StringFieldType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +type Configuration struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Fields *[]*Field `protobuf:"bytes,1,rep,name=fields,proto3"` + xxx_hidden_Constraints *[]*Constraint `protobuf:"bytes,2,rep,name=constraints,proto3"` + xxx_hidden_DisplayName string `protobuf:"bytes,3,opt,name=display_name,json=displayName,proto3"` + xxx_hidden_HelpUrl string `protobuf:"bytes,4,opt,name=help_url,json=helpUrl,proto3"` + xxx_hidden_IconUrl string `protobuf:"bytes,5,opt,name=icon_url,json=iconUrl,proto3"` + xxx_hidden_IsDirectory bool `protobuf:"varint,7,opt,name=is_directory,json=isDirectory,proto3"` + xxx_hidden_CatalogId string `protobuf:"bytes,8,opt,name=catalog_id,json=catalogId,proto3"` + xxx_hidden_SupportsExternalResources bool `protobuf:"varint,9,opt,name=supports_external_resources,json=supportsExternalResources,proto3"` + xxx_hidden_RequiresExternalConnector bool `protobuf:"varint,10,opt,name=requires_external_connector,json=requiresExternalConnector,proto3"` + xxx_hidden_FieldGroups *[]*FieldGroup `protobuf:"bytes,11,rep,name=field_groups,json=fieldGroups,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Configuration) Reset() { + *x = Configuration{} + mi := &file_c1_config_v1_config_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Configuration) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Configuration) ProtoMessage() {} + +func (x *Configuration) ProtoReflect() protoreflect.Message { + mi := &file_c1_config_v1_config_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *Configuration) GetFields() []*Field { + if x != nil { + if x.xxx_hidden_Fields != nil { + return *x.xxx_hidden_Fields + } + } + return nil +} + +func (x *Configuration) GetConstraints() []*Constraint { + if x != nil { + if x.xxx_hidden_Constraints != nil { + return *x.xxx_hidden_Constraints + } + } + return nil +} + +func (x *Configuration) GetDisplayName() string { + if x != nil { + return x.xxx_hidden_DisplayName + } + return "" +} + +func (x *Configuration) GetHelpUrl() string { + if x != nil { + return x.xxx_hidden_HelpUrl + } + return "" +} + +func (x *Configuration) GetIconUrl() string { + if x != nil { + return x.xxx_hidden_IconUrl + } + return "" +} + +func (x *Configuration) GetIsDirectory() bool { + if x != nil { + return x.xxx_hidden_IsDirectory + } + return false +} + +func (x *Configuration) GetCatalogId() string { + if x != nil { + return x.xxx_hidden_CatalogId + } + return "" +} + +func (x *Configuration) GetSupportsExternalResources() bool { + if x != nil { + return x.xxx_hidden_SupportsExternalResources + } + return false +} + +func (x *Configuration) GetRequiresExternalConnector() bool { + if x != nil { + return x.xxx_hidden_RequiresExternalConnector + } + return false +} + +func (x *Configuration) GetFieldGroups() []*FieldGroup { + if x != nil { + if x.xxx_hidden_FieldGroups != nil { + return *x.xxx_hidden_FieldGroups + } + } + return nil +} + +func (x *Configuration) SetFields(v []*Field) { + x.xxx_hidden_Fields = &v +} + +func (x *Configuration) SetConstraints(v []*Constraint) { + x.xxx_hidden_Constraints = &v +} + +func (x *Configuration) SetDisplayName(v string) { + x.xxx_hidden_DisplayName = v +} + +func (x *Configuration) SetHelpUrl(v string) { + x.xxx_hidden_HelpUrl = v +} + +func (x *Configuration) SetIconUrl(v string) { + x.xxx_hidden_IconUrl = v +} + +func (x *Configuration) SetIsDirectory(v bool) { + x.xxx_hidden_IsDirectory = v +} + +func (x *Configuration) SetCatalogId(v string) { + x.xxx_hidden_CatalogId = v +} + +func (x *Configuration) SetSupportsExternalResources(v bool) { + x.xxx_hidden_SupportsExternalResources = v +} + +func (x *Configuration) SetRequiresExternalConnector(v bool) { + x.xxx_hidden_RequiresExternalConnector = v +} + +func (x *Configuration) SetFieldGroups(v []*FieldGroup) { + x.xxx_hidden_FieldGroups = &v +} + +type Configuration_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Fields []*Field + Constraints []*Constraint + DisplayName string + HelpUrl string + IconUrl string + IsDirectory bool + CatalogId string + SupportsExternalResources bool + RequiresExternalConnector bool + FieldGroups []*FieldGroup +} + +func (b0 Configuration_builder) Build() *Configuration { + m0 := &Configuration{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Fields = &b.Fields + x.xxx_hidden_Constraints = &b.Constraints + x.xxx_hidden_DisplayName = b.DisplayName + x.xxx_hidden_HelpUrl = b.HelpUrl + x.xxx_hidden_IconUrl = b.IconUrl + x.xxx_hidden_IsDirectory = b.IsDirectory + x.xxx_hidden_CatalogId = b.CatalogId + x.xxx_hidden_SupportsExternalResources = b.SupportsExternalResources + x.xxx_hidden_RequiresExternalConnector = b.RequiresExternalConnector + x.xxx_hidden_FieldGroups = &b.FieldGroups + return m0 +} + +type Constraint struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Kind ConstraintKind `protobuf:"varint,1,opt,name=kind,proto3,enum=c1.config.v1.ConstraintKind"` + xxx_hidden_FieldNames []string `protobuf:"bytes,2,rep,name=field_names,json=fieldNames,proto3"` + xxx_hidden_SecondaryFieldNames []string `protobuf:"bytes,3,rep,name=secondary_field_names,json=secondaryFieldNames,proto3"` + xxx_hidden_Name string `protobuf:"bytes,4,opt,name=name,proto3"` + xxx_hidden_HelpText string `protobuf:"bytes,5,opt,name=help_text,json=helpText,proto3"` + xxx_hidden_IsFieldGroup bool `protobuf:"varint,6,opt,name=is_field_group,json=isFieldGroup,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Constraint) Reset() { + *x = Constraint{} + mi := &file_c1_config_v1_config_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Constraint) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Constraint) ProtoMessage() {} + +func (x *Constraint) ProtoReflect() protoreflect.Message { + mi := &file_c1_config_v1_config_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *Constraint) GetKind() ConstraintKind { + if x != nil { + return x.xxx_hidden_Kind + } + return ConstraintKind_CONSTRAINT_KIND_UNSPECIFIED +} + +func (x *Constraint) GetFieldNames() []string { + if x != nil { + return x.xxx_hidden_FieldNames + } + return nil +} + +func (x *Constraint) GetSecondaryFieldNames() []string { + if x != nil { + return x.xxx_hidden_SecondaryFieldNames + } + return nil +} + +func (x *Constraint) GetName() string { + if x != nil { + return x.xxx_hidden_Name + } + return "" +} + +func (x *Constraint) GetHelpText() string { + if x != nil { + return x.xxx_hidden_HelpText + } + return "" +} + +func (x *Constraint) GetIsFieldGroup() bool { + if x != nil { + return x.xxx_hidden_IsFieldGroup + } + return false +} + +func (x *Constraint) SetKind(v ConstraintKind) { + x.xxx_hidden_Kind = v +} + +func (x *Constraint) SetFieldNames(v []string) { + x.xxx_hidden_FieldNames = v +} + +func (x *Constraint) SetSecondaryFieldNames(v []string) { + x.xxx_hidden_SecondaryFieldNames = v +} + +func (x *Constraint) SetName(v string) { + x.xxx_hidden_Name = v +} + +func (x *Constraint) SetHelpText(v string) { + x.xxx_hidden_HelpText = v +} + +func (x *Constraint) SetIsFieldGroup(v bool) { + x.xxx_hidden_IsFieldGroup = v +} + +type Constraint_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Kind ConstraintKind + FieldNames []string + SecondaryFieldNames []string + Name string + HelpText string + IsFieldGroup bool +} + +func (b0 Constraint_builder) Build() *Constraint { + m0 := &Constraint{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Kind = b.Kind + x.xxx_hidden_FieldNames = b.FieldNames + x.xxx_hidden_SecondaryFieldNames = b.SecondaryFieldNames + x.xxx_hidden_Name = b.Name + x.xxx_hidden_HelpText = b.HelpText + x.xxx_hidden_IsFieldGroup = b.IsFieldGroup + return m0 +} + +type FieldGroup struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Name string `protobuf:"bytes,1,opt,name=name,proto3"` + xxx_hidden_DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3"` + xxx_hidden_HelpText string `protobuf:"bytes,3,opt,name=help_text,json=helpText,proto3"` + xxx_hidden_Fields []string `protobuf:"bytes,4,rep,name=fields,proto3"` + xxx_hidden_Default bool `protobuf:"varint,5,opt,name=default,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *FieldGroup) Reset() { + *x = FieldGroup{} + mi := &file_c1_config_v1_config_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *FieldGroup) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FieldGroup) ProtoMessage() {} + +func (x *FieldGroup) ProtoReflect() protoreflect.Message { + mi := &file_c1_config_v1_config_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *FieldGroup) GetName() string { + if x != nil { + return x.xxx_hidden_Name + } + return "" +} + +func (x *FieldGroup) GetDisplayName() string { + if x != nil { + return x.xxx_hidden_DisplayName + } + return "" +} + +func (x *FieldGroup) GetHelpText() string { + if x != nil { + return x.xxx_hidden_HelpText + } + return "" +} + +func (x *FieldGroup) GetFields() []string { + if x != nil { + return x.xxx_hidden_Fields + } + return nil +} + +func (x *FieldGroup) GetDefault() bool { + if x != nil { + return x.xxx_hidden_Default + } + return false +} + +func (x *FieldGroup) SetName(v string) { + x.xxx_hidden_Name = v +} + +func (x *FieldGroup) SetDisplayName(v string) { + x.xxx_hidden_DisplayName = v +} + +func (x *FieldGroup) SetHelpText(v string) { + x.xxx_hidden_HelpText = v +} + +func (x *FieldGroup) SetFields(v []string) { + x.xxx_hidden_Fields = v +} + +func (x *FieldGroup) SetDefault(v bool) { + x.xxx_hidden_Default = v +} + +type FieldGroup_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Name string + DisplayName string + HelpText string + Fields []string + Default bool +} + +func (b0 FieldGroup_builder) Build() *FieldGroup { + m0 := &FieldGroup{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Name = b.Name + x.xxx_hidden_DisplayName = b.DisplayName + x.xxx_hidden_HelpText = b.HelpText + x.xxx_hidden_Fields = b.Fields + x.xxx_hidden_Default = b.Default + return m0 +} + +type Field struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Name string `protobuf:"bytes,1,opt,name=name,proto3"` + xxx_hidden_DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3"` + xxx_hidden_Description string `protobuf:"bytes,3,opt,name=description,proto3"` + xxx_hidden_Placeholder string `protobuf:"bytes,4,opt,name=placeholder,proto3"` + xxx_hidden_IsRequired bool `protobuf:"varint,5,opt,name=is_required,json=isRequired,proto3"` + xxx_hidden_IsOps bool `protobuf:"varint,6,opt,name=is_ops,json=isOps,proto3"` + xxx_hidden_IsSecret bool `protobuf:"varint,7,opt,name=is_secret,json=isSecret,proto3"` + xxx_hidden_Field isField_Field `protobuf_oneof:"field"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Field) Reset() { + *x = Field{} + mi := &file_c1_config_v1_config_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Field) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Field) ProtoMessage() {} + +func (x *Field) ProtoReflect() protoreflect.Message { + mi := &file_c1_config_v1_config_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *Field) GetName() string { + if x != nil { + return x.xxx_hidden_Name + } + return "" +} + +func (x *Field) GetDisplayName() string { + if x != nil { + return x.xxx_hidden_DisplayName + } + return "" +} + +func (x *Field) GetDescription() string { + if x != nil { + return x.xxx_hidden_Description + } + return "" +} + +func (x *Field) GetPlaceholder() string { + if x != nil { + return x.xxx_hidden_Placeholder + } + return "" +} + +func (x *Field) GetIsRequired() bool { + if x != nil { + return x.xxx_hidden_IsRequired + } + return false +} + +func (x *Field) GetIsOps() bool { + if x != nil { + return x.xxx_hidden_IsOps + } + return false +} + +func (x *Field) GetIsSecret() bool { + if x != nil { + return x.xxx_hidden_IsSecret + } + return false +} + +func (x *Field) GetStringField() *StringField { + if x != nil { + if x, ok := x.xxx_hidden_Field.(*field_StringField); ok { + return x.StringField + } + } + return nil +} + +func (x *Field) GetIntField() *IntField { + if x != nil { + if x, ok := x.xxx_hidden_Field.(*field_IntField); ok { + return x.IntField + } + } + return nil +} + +func (x *Field) GetBoolField() *BoolField { + if x != nil { + if x, ok := x.xxx_hidden_Field.(*field_BoolField); ok { + return x.BoolField + } + } + return nil +} + +func (x *Field) GetStringSliceField() *StringSliceField { + if x != nil { + if x, ok := x.xxx_hidden_Field.(*field_StringSliceField); ok { + return x.StringSliceField + } + } + return nil +} + +func (x *Field) GetStringMapField() *StringMapField { + if x != nil { + if x, ok := x.xxx_hidden_Field.(*field_StringMapField); ok { + return x.StringMapField + } + } + return nil +} + +func (x *Field) GetResourceIdField() *ResourceIdField { + if x != nil { + if x, ok := x.xxx_hidden_Field.(*field_ResourceIdField); ok { + return x.ResourceIdField + } + } + return nil +} + +func (x *Field) GetResourceIdSliceField() *ResourceIdSliceField { + if x != nil { + if x, ok := x.xxx_hidden_Field.(*field_ResourceIdSliceField); ok { + return x.ResourceIdSliceField + } + } + return nil +} + +func (x *Field) GetResourceField() *ResourceField { + if x != nil { + if x, ok := x.xxx_hidden_Field.(*field_ResourceField); ok { + return x.ResourceField + } + } + return nil +} + +func (x *Field) GetResourceSliceField() *ResourceSliceField { + if x != nil { + if x, ok := x.xxx_hidden_Field.(*field_ResourceSliceField); ok { + return x.ResourceSliceField + } + } + return nil +} + +func (x *Field) SetName(v string) { + x.xxx_hidden_Name = v +} + +func (x *Field) SetDisplayName(v string) { + x.xxx_hidden_DisplayName = v +} + +func (x *Field) SetDescription(v string) { + x.xxx_hidden_Description = v +} + +func (x *Field) SetPlaceholder(v string) { + x.xxx_hidden_Placeholder = v +} + +func (x *Field) SetIsRequired(v bool) { + x.xxx_hidden_IsRequired = v +} + +func (x *Field) SetIsOps(v bool) { + x.xxx_hidden_IsOps = v +} + +func (x *Field) SetIsSecret(v bool) { + x.xxx_hidden_IsSecret = v +} + +func (x *Field) SetStringField(v *StringField) { + if v == nil { + x.xxx_hidden_Field = nil + return + } + x.xxx_hidden_Field = &field_StringField{v} +} + +func (x *Field) SetIntField(v *IntField) { + if v == nil { + x.xxx_hidden_Field = nil + return + } + x.xxx_hidden_Field = &field_IntField{v} +} + +func (x *Field) SetBoolField(v *BoolField) { + if v == nil { + x.xxx_hidden_Field = nil + return + } + x.xxx_hidden_Field = &field_BoolField{v} +} + +func (x *Field) SetStringSliceField(v *StringSliceField) { + if v == nil { + x.xxx_hidden_Field = nil + return + } + x.xxx_hidden_Field = &field_StringSliceField{v} +} + +func (x *Field) SetStringMapField(v *StringMapField) { + if v == nil { + x.xxx_hidden_Field = nil + return + } + x.xxx_hidden_Field = &field_StringMapField{v} +} + +func (x *Field) SetResourceIdField(v *ResourceIdField) { + if v == nil { + x.xxx_hidden_Field = nil + return + } + x.xxx_hidden_Field = &field_ResourceIdField{v} +} + +func (x *Field) SetResourceIdSliceField(v *ResourceIdSliceField) { + if v == nil { + x.xxx_hidden_Field = nil + return + } + x.xxx_hidden_Field = &field_ResourceIdSliceField{v} +} + +func (x *Field) SetResourceField(v *ResourceField) { + if v == nil { + x.xxx_hidden_Field = nil + return + } + x.xxx_hidden_Field = &field_ResourceField{v} +} + +func (x *Field) SetResourceSliceField(v *ResourceSliceField) { + if v == nil { + x.xxx_hidden_Field = nil + return + } + x.xxx_hidden_Field = &field_ResourceSliceField{v} +} + +func (x *Field) HasField() bool { + if x == nil { + return false + } + return x.xxx_hidden_Field != nil +} + +func (x *Field) HasStringField() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_Field.(*field_StringField) + return ok +} + +func (x *Field) HasIntField() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_Field.(*field_IntField) + return ok +} + +func (x *Field) HasBoolField() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_Field.(*field_BoolField) + return ok +} + +func (x *Field) HasStringSliceField() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_Field.(*field_StringSliceField) + return ok +} + +func (x *Field) HasStringMapField() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_Field.(*field_StringMapField) + return ok +} + +func (x *Field) HasResourceIdField() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_Field.(*field_ResourceIdField) + return ok +} + +func (x *Field) HasResourceIdSliceField() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_Field.(*field_ResourceIdSliceField) + return ok +} + +func (x *Field) HasResourceField() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_Field.(*field_ResourceField) + return ok +} + +func (x *Field) HasResourceSliceField() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_Field.(*field_ResourceSliceField) + return ok +} + +func (x *Field) ClearField() { + x.xxx_hidden_Field = nil +} + +func (x *Field) ClearStringField() { + if _, ok := x.xxx_hidden_Field.(*field_StringField); ok { + x.xxx_hidden_Field = nil + } +} + +func (x *Field) ClearIntField() { + if _, ok := x.xxx_hidden_Field.(*field_IntField); ok { + x.xxx_hidden_Field = nil + } +} + +func (x *Field) ClearBoolField() { + if _, ok := x.xxx_hidden_Field.(*field_BoolField); ok { + x.xxx_hidden_Field = nil + } +} + +func (x *Field) ClearStringSliceField() { + if _, ok := x.xxx_hidden_Field.(*field_StringSliceField); ok { + x.xxx_hidden_Field = nil + } +} + +func (x *Field) ClearStringMapField() { + if _, ok := x.xxx_hidden_Field.(*field_StringMapField); ok { + x.xxx_hidden_Field = nil + } +} + +func (x *Field) ClearResourceIdField() { + if _, ok := x.xxx_hidden_Field.(*field_ResourceIdField); ok { + x.xxx_hidden_Field = nil + } +} + +func (x *Field) ClearResourceIdSliceField() { + if _, ok := x.xxx_hidden_Field.(*field_ResourceIdSliceField); ok { + x.xxx_hidden_Field = nil + } +} + +func (x *Field) ClearResourceField() { + if _, ok := x.xxx_hidden_Field.(*field_ResourceField); ok { + x.xxx_hidden_Field = nil + } +} + +func (x *Field) ClearResourceSliceField() { + if _, ok := x.xxx_hidden_Field.(*field_ResourceSliceField); ok { + x.xxx_hidden_Field = nil + } +} + +const Field_Field_not_set_case case_Field_Field = 0 +const Field_StringField_case case_Field_Field = 100 +const Field_IntField_case case_Field_Field = 101 +const Field_BoolField_case case_Field_Field = 102 +const Field_StringSliceField_case case_Field_Field = 103 +const Field_StringMapField_case case_Field_Field = 104 +const Field_ResourceIdField_case case_Field_Field = 105 +const Field_ResourceIdSliceField_case case_Field_Field = 106 +const Field_ResourceField_case case_Field_Field = 107 +const Field_ResourceSliceField_case case_Field_Field = 108 + +func (x *Field) WhichField() case_Field_Field { + if x == nil { + return Field_Field_not_set_case + } + switch x.xxx_hidden_Field.(type) { + case *field_StringField: + return Field_StringField_case + case *field_IntField: + return Field_IntField_case + case *field_BoolField: + return Field_BoolField_case + case *field_StringSliceField: + return Field_StringSliceField_case + case *field_StringMapField: + return Field_StringMapField_case + case *field_ResourceIdField: + return Field_ResourceIdField_case + case *field_ResourceIdSliceField: + return Field_ResourceIdSliceField_case + case *field_ResourceField: + return Field_ResourceField_case + case *field_ResourceSliceField: + return Field_ResourceSliceField_case + default: + return Field_Field_not_set_case + } +} + +type Field_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Name string + DisplayName string + Description string + Placeholder string + IsRequired bool + IsOps bool + IsSecret bool + // Fields of oneof xxx_hidden_Field: + StringField *StringField + IntField *IntField + BoolField *BoolField + StringSliceField *StringSliceField + StringMapField *StringMapField + ResourceIdField *ResourceIdField + ResourceIdSliceField *ResourceIdSliceField + // These are meant to serve as return types for actions. + ResourceField *ResourceField + ResourceSliceField *ResourceSliceField + // -- end of xxx_hidden_Field +} + +func (b0 Field_builder) Build() *Field { + m0 := &Field{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Name = b.Name + x.xxx_hidden_DisplayName = b.DisplayName + x.xxx_hidden_Description = b.Description + x.xxx_hidden_Placeholder = b.Placeholder + x.xxx_hidden_IsRequired = b.IsRequired + x.xxx_hidden_IsOps = b.IsOps + x.xxx_hidden_IsSecret = b.IsSecret + if b.StringField != nil { + x.xxx_hidden_Field = &field_StringField{b.StringField} + } + if b.IntField != nil { + x.xxx_hidden_Field = &field_IntField{b.IntField} + } + if b.BoolField != nil { + x.xxx_hidden_Field = &field_BoolField{b.BoolField} + } + if b.StringSliceField != nil { + x.xxx_hidden_Field = &field_StringSliceField{b.StringSliceField} + } + if b.StringMapField != nil { + x.xxx_hidden_Field = &field_StringMapField{b.StringMapField} + } + if b.ResourceIdField != nil { + x.xxx_hidden_Field = &field_ResourceIdField{b.ResourceIdField} + } + if b.ResourceIdSliceField != nil { + x.xxx_hidden_Field = &field_ResourceIdSliceField{b.ResourceIdSliceField} + } + if b.ResourceField != nil { + x.xxx_hidden_Field = &field_ResourceField{b.ResourceField} + } + if b.ResourceSliceField != nil { + x.xxx_hidden_Field = &field_ResourceSliceField{b.ResourceSliceField} + } + return m0 +} + +type case_Field_Field protoreflect.FieldNumber + +func (x case_Field_Field) String() string { + md := file_c1_config_v1_config_proto_msgTypes[3].Descriptor() + if x == 0 { + return "not set" + } + return protoimpl.X.MessageFieldStringOf(md, protoreflect.FieldNumber(x)) +} + +type isField_Field interface { + isField_Field() +} + +type field_StringField struct { + StringField *StringField `protobuf:"bytes,100,opt,name=string_field,json=stringField,proto3,oneof"` +} + +type field_IntField struct { + IntField *IntField `protobuf:"bytes,101,opt,name=int_field,json=intField,proto3,oneof"` +} + +type field_BoolField struct { + BoolField *BoolField `protobuf:"bytes,102,opt,name=bool_field,json=boolField,proto3,oneof"` +} + +type field_StringSliceField struct { + StringSliceField *StringSliceField `protobuf:"bytes,103,opt,name=string_slice_field,json=stringSliceField,proto3,oneof"` +} + +type field_StringMapField struct { + StringMapField *StringMapField `protobuf:"bytes,104,opt,name=string_map_field,json=stringMapField,proto3,oneof"` +} + +type field_ResourceIdField struct { + ResourceIdField *ResourceIdField `protobuf:"bytes,105,opt,name=resource_id_field,json=resourceIdField,proto3,oneof"` +} + +type field_ResourceIdSliceField struct { + ResourceIdSliceField *ResourceIdSliceField `protobuf:"bytes,106,opt,name=resource_id_slice_field,json=resourceIdSliceField,proto3,oneof"` +} + +type field_ResourceField struct { + // These are meant to serve as return types for actions. + ResourceField *ResourceField `protobuf:"bytes,107,opt,name=resource_field,json=resourceField,proto3,oneof"` +} + +type field_ResourceSliceField struct { + ResourceSliceField *ResourceSliceField `protobuf:"bytes,108,opt,name=resource_slice_field,json=resourceSliceField,proto3,oneof"` +} + +func (*field_StringField) isField_Field() {} + +func (*field_IntField) isField_Field() {} + +func (*field_BoolField) isField_Field() {} + +func (*field_StringSliceField) isField_Field() {} + +func (*field_StringMapField) isField_Field() {} + +func (*field_ResourceIdField) isField_Field() {} + +func (*field_ResourceIdSliceField) isField_Field() {} + +func (*field_ResourceField) isField_Field() {} + +func (*field_ResourceSliceField) isField_Field() {} + +// These are partially duplicate with the Resource proto in the connector package. +// This is to avoid import cycles +type Resource struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_ResourceId *ResourceId `protobuf:"bytes,1,opt,name=resource_id,json=resourceId,proto3"` + xxx_hidden_ParentResourceId *ResourceId `protobuf:"bytes,2,opt,name=parent_resource_id,json=parentResourceId,proto3"` + xxx_hidden_DisplayName string `protobuf:"bytes,3,opt,name=display_name,json=displayName,proto3"` + xxx_hidden_Description string `protobuf:"bytes,4,opt,name=description,proto3"` + xxx_hidden_Annotations *[]*anypb.Any `protobuf:"bytes,5,rep,name=annotations,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Resource) Reset() { + *x = Resource{} + mi := &file_c1_config_v1_config_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Resource) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Resource) ProtoMessage() {} + +func (x *Resource) ProtoReflect() protoreflect.Message { + mi := &file_c1_config_v1_config_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *Resource) GetResourceId() *ResourceId { + if x != nil { + return x.xxx_hidden_ResourceId + } + return nil +} + +func (x *Resource) GetParentResourceId() *ResourceId { + if x != nil { + return x.xxx_hidden_ParentResourceId + } + return nil +} + +func (x *Resource) GetDisplayName() string { + if x != nil { + return x.xxx_hidden_DisplayName + } + return "" +} + +func (x *Resource) GetDescription() string { + if x != nil { + return x.xxx_hidden_Description + } + return "" +} + +func (x *Resource) GetAnnotations() []*anypb.Any { + if x != nil { + if x.xxx_hidden_Annotations != nil { + return *x.xxx_hidden_Annotations + } + } + return nil +} + +func (x *Resource) SetResourceId(v *ResourceId) { + x.xxx_hidden_ResourceId = v +} + +func (x *Resource) SetParentResourceId(v *ResourceId) { + x.xxx_hidden_ParentResourceId = v +} + +func (x *Resource) SetDisplayName(v string) { + x.xxx_hidden_DisplayName = v +} + +func (x *Resource) SetDescription(v string) { + x.xxx_hidden_Description = v +} + +func (x *Resource) SetAnnotations(v []*anypb.Any) { + x.xxx_hidden_Annotations = &v +} + +func (x *Resource) HasResourceId() bool { + if x == nil { + return false + } + return x.xxx_hidden_ResourceId != nil +} + +func (x *Resource) HasParentResourceId() bool { + if x == nil { + return false + } + return x.xxx_hidden_ParentResourceId != nil +} + +func (x *Resource) ClearResourceId() { + x.xxx_hidden_ResourceId = nil +} + +func (x *Resource) ClearParentResourceId() { + x.xxx_hidden_ParentResourceId = nil +} + +type Resource_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + ResourceId *ResourceId + ParentResourceId *ResourceId + DisplayName string + Description string + Annotations []*anypb.Any +} + +func (b0 Resource_builder) Build() *Resource { + m0 := &Resource{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_ResourceId = b.ResourceId + x.xxx_hidden_ParentResourceId = b.ParentResourceId + x.xxx_hidden_DisplayName = b.DisplayName + x.xxx_hidden_Description = b.Description + x.xxx_hidden_Annotations = &b.Annotations + return m0 +} + +type ResourceId struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_ResourceTypeId string `protobuf:"bytes,1,opt,name=resource_type_id,json=resourceTypeId,proto3"` + xxx_hidden_ResourceId string `protobuf:"bytes,2,opt,name=resource_id,json=resourceId,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ResourceId) Reset() { + *x = ResourceId{} + mi := &file_c1_config_v1_config_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ResourceId) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResourceId) ProtoMessage() {} + +func (x *ResourceId) ProtoReflect() protoreflect.Message { + mi := &file_c1_config_v1_config_proto_msgTypes[5] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *ResourceId) GetResourceTypeId() string { + if x != nil { + return x.xxx_hidden_ResourceTypeId + } + return "" +} + +func (x *ResourceId) GetResourceId() string { + if x != nil { + return x.xxx_hidden_ResourceId + } + return "" +} + +func (x *ResourceId) SetResourceTypeId(v string) { + x.xxx_hidden_ResourceTypeId = v +} + +func (x *ResourceId) SetResourceId(v string) { + x.xxx_hidden_ResourceId = v +} + +type ResourceId_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + ResourceTypeId string + ResourceId string +} + +func (b0 ResourceId_builder) Build() *ResourceId { + m0 := &ResourceId{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_ResourceTypeId = b.ResourceTypeId + x.xxx_hidden_ResourceId = b.ResourceId + return m0 +} + +type ResourceField struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_DefaultValue *Resource `protobuf:"bytes,1,opt,name=default_value,json=defaultValue,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ResourceField) Reset() { + *x = ResourceField{} + mi := &file_c1_config_v1_config_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ResourceField) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResourceField) ProtoMessage() {} + +func (x *ResourceField) ProtoReflect() protoreflect.Message { + mi := &file_c1_config_v1_config_proto_msgTypes[6] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *ResourceField) GetDefaultValue() *Resource { + if x != nil { + return x.xxx_hidden_DefaultValue + } + return nil +} + +func (x *ResourceField) SetDefaultValue(v *Resource) { + x.xxx_hidden_DefaultValue = v +} + +func (x *ResourceField) HasDefaultValue() bool { + if x == nil { + return false + } + return x.xxx_hidden_DefaultValue != nil +} + +func (x *ResourceField) ClearDefaultValue() { + x.xxx_hidden_DefaultValue = nil +} + +type ResourceField_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + DefaultValue *Resource +} + +func (b0 ResourceField_builder) Build() *ResourceField { + m0 := &ResourceField{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_DefaultValue = b.DefaultValue + return m0 +} + +type ResourceSliceField struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_DefaultValue *[]*Resource `protobuf:"bytes,1,rep,name=default_value,json=defaultValue,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ResourceSliceField) Reset() { + *x = ResourceSliceField{} + mi := &file_c1_config_v1_config_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ResourceSliceField) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResourceSliceField) ProtoMessage() {} + +func (x *ResourceSliceField) ProtoReflect() protoreflect.Message { + mi := &file_c1_config_v1_config_proto_msgTypes[7] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *ResourceSliceField) GetDefaultValue() []*Resource { + if x != nil { + if x.xxx_hidden_DefaultValue != nil { + return *x.xxx_hidden_DefaultValue + } + } + return nil +} + +func (x *ResourceSliceField) SetDefaultValue(v []*Resource) { + x.xxx_hidden_DefaultValue = &v +} + +type ResourceSliceField_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + DefaultValue []*Resource +} + +func (b0 ResourceSliceField_builder) Build() *ResourceSliceField { + m0 := &ResourceSliceField{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_DefaultValue = &b.DefaultValue + return m0 +} + +type ResourceIdField struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_DefaultValue *ResourceId `protobuf:"bytes,1,opt,name=default_value,json=defaultValue,proto3"` + xxx_hidden_Rules *ResourceIDRules `protobuf:"bytes,3,opt,name=rules,proto3,oneof"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ResourceIdField) Reset() { + *x = ResourceIdField{} + mi := &file_c1_config_v1_config_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ResourceIdField) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResourceIdField) ProtoMessage() {} + +func (x *ResourceIdField) ProtoReflect() protoreflect.Message { + mi := &file_c1_config_v1_config_proto_msgTypes[8] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *ResourceIdField) GetDefaultValue() *ResourceId { + if x != nil { + return x.xxx_hidden_DefaultValue + } + return nil +} + +func (x *ResourceIdField) GetRules() *ResourceIDRules { + if x != nil { + return x.xxx_hidden_Rules + } + return nil +} + +func (x *ResourceIdField) SetDefaultValue(v *ResourceId) { + x.xxx_hidden_DefaultValue = v +} + +func (x *ResourceIdField) SetRules(v *ResourceIDRules) { + x.xxx_hidden_Rules = v +} + +func (x *ResourceIdField) HasDefaultValue() bool { + if x == nil { + return false + } + return x.xxx_hidden_DefaultValue != nil +} + +func (x *ResourceIdField) HasRules() bool { + if x == nil { + return false + } + return x.xxx_hidden_Rules != nil +} + +func (x *ResourceIdField) ClearDefaultValue() { + x.xxx_hidden_DefaultValue = nil +} + +func (x *ResourceIdField) ClearRules() { + x.xxx_hidden_Rules = nil +} + +type ResourceIdField_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + DefaultValue *ResourceId + Rules *ResourceIDRules +} + +func (b0 ResourceIdField_builder) Build() *ResourceIdField { + m0 := &ResourceIdField{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_DefaultValue = b.DefaultValue + x.xxx_hidden_Rules = b.Rules + return m0 +} + +type ResourceIdSliceField struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_DefaultValue *[]*ResourceIdField `protobuf:"bytes,1,rep,name=default_value,json=defaultValue,proto3"` + xxx_hidden_Rules *RepeatedResourceIdRules `protobuf:"bytes,2,opt,name=rules,proto3,oneof"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ResourceIdSliceField) Reset() { + *x = ResourceIdSliceField{} + mi := &file_c1_config_v1_config_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ResourceIdSliceField) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResourceIdSliceField) ProtoMessage() {} + +func (x *ResourceIdSliceField) ProtoReflect() protoreflect.Message { + mi := &file_c1_config_v1_config_proto_msgTypes[9] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *ResourceIdSliceField) GetDefaultValue() []*ResourceIdField { + if x != nil { + if x.xxx_hidden_DefaultValue != nil { + return *x.xxx_hidden_DefaultValue + } + } + return nil +} + +func (x *ResourceIdSliceField) GetRules() *RepeatedResourceIdRules { + if x != nil { + return x.xxx_hidden_Rules + } + return nil +} + +func (x *ResourceIdSliceField) SetDefaultValue(v []*ResourceIdField) { + x.xxx_hidden_DefaultValue = &v +} + +func (x *ResourceIdSliceField) SetRules(v *RepeatedResourceIdRules) { + x.xxx_hidden_Rules = v +} + +func (x *ResourceIdSliceField) HasRules() bool { + if x == nil { + return false + } + return x.xxx_hidden_Rules != nil +} + +func (x *ResourceIdSliceField) ClearRules() { + x.xxx_hidden_Rules = nil +} + +type ResourceIdSliceField_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + DefaultValue []*ResourceIdField + Rules *RepeatedResourceIdRules +} + +func (b0 ResourceIdSliceField_builder) Build() *ResourceIdSliceField { + m0 := &ResourceIdSliceField{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_DefaultValue = &b.DefaultValue + x.xxx_hidden_Rules = b.Rules + return m0 +} + +type IntField struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_DefaultValue int64 `protobuf:"varint,1,opt,name=default_value,json=defaultValue,proto3"` + xxx_hidden_Rules *Int64Rules `protobuf:"bytes,2,opt,name=rules,proto3,oneof"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *IntField) Reset() { + *x = IntField{} + mi := &file_c1_config_v1_config_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *IntField) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*IntField) ProtoMessage() {} + +func (x *IntField) ProtoReflect() protoreflect.Message { + mi := &file_c1_config_v1_config_proto_msgTypes[10] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *IntField) GetDefaultValue() int64 { + if x != nil { + return x.xxx_hidden_DefaultValue + } + return 0 +} + +func (x *IntField) GetRules() *Int64Rules { + if x != nil { + return x.xxx_hidden_Rules + } + return nil +} + +func (x *IntField) SetDefaultValue(v int64) { + x.xxx_hidden_DefaultValue = v +} + +func (x *IntField) SetRules(v *Int64Rules) { + x.xxx_hidden_Rules = v +} + +func (x *IntField) HasRules() bool { + if x == nil { + return false + } + return x.xxx_hidden_Rules != nil +} + +func (x *IntField) ClearRules() { + x.xxx_hidden_Rules = nil +} + +type IntField_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // rules + DefaultValue int64 + Rules *Int64Rules +} + +func (b0 IntField_builder) Build() *IntField { + m0 := &IntField{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_DefaultValue = b.DefaultValue + x.xxx_hidden_Rules = b.Rules + return m0 +} + +type BoolField struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_DefaultValue bool `protobuf:"varint,1,opt,name=default_value,json=defaultValue,proto3"` + xxx_hidden_Rules *BoolRules `protobuf:"bytes,2,opt,name=rules,proto3,oneof"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *BoolField) Reset() { + *x = BoolField{} + mi := &file_c1_config_v1_config_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *BoolField) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BoolField) ProtoMessage() {} + +func (x *BoolField) ProtoReflect() protoreflect.Message { + mi := &file_c1_config_v1_config_proto_msgTypes[11] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *BoolField) GetDefaultValue() bool { + if x != nil { + return x.xxx_hidden_DefaultValue + } + return false +} + +func (x *BoolField) GetRules() *BoolRules { + if x != nil { + return x.xxx_hidden_Rules + } + return nil +} + +func (x *BoolField) SetDefaultValue(v bool) { + x.xxx_hidden_DefaultValue = v +} + +func (x *BoolField) SetRules(v *BoolRules) { + x.xxx_hidden_Rules = v +} + +func (x *BoolField) HasRules() bool { + if x == nil { + return false + } + return x.xxx_hidden_Rules != nil +} + +func (x *BoolField) ClearRules() { + x.xxx_hidden_Rules = nil +} + +type BoolField_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + DefaultValue bool + Rules *BoolRules +} + +func (b0 BoolField_builder) Build() *BoolField { + m0 := &BoolField{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_DefaultValue = b.DefaultValue + x.xxx_hidden_Rules = b.Rules + return m0 +} + +type StringSliceField struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_DefaultValue []string `protobuf:"bytes,1,rep,name=default_value,json=defaultValue,proto3"` + xxx_hidden_Rules *RepeatedStringRules `protobuf:"bytes,2,opt,name=rules,proto3,oneof"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *StringSliceField) Reset() { + *x = StringSliceField{} + mi := &file_c1_config_v1_config_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StringSliceField) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StringSliceField) ProtoMessage() {} + +func (x *StringSliceField) ProtoReflect() protoreflect.Message { + mi := &file_c1_config_v1_config_proto_msgTypes[12] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *StringSliceField) GetDefaultValue() []string { + if x != nil { + return x.xxx_hidden_DefaultValue + } + return nil +} + +func (x *StringSliceField) GetRules() *RepeatedStringRules { + if x != nil { + return x.xxx_hidden_Rules + } + return nil +} + +func (x *StringSliceField) SetDefaultValue(v []string) { + x.xxx_hidden_DefaultValue = v +} + +func (x *StringSliceField) SetRules(v *RepeatedStringRules) { + x.xxx_hidden_Rules = v +} + +func (x *StringSliceField) HasRules() bool { + if x == nil { + return false + } + return x.xxx_hidden_Rules != nil +} + +func (x *StringSliceField) ClearRules() { + x.xxx_hidden_Rules = nil +} + +type StringSliceField_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + DefaultValue []string + Rules *RepeatedStringRules +} + +func (b0 StringSliceField_builder) Build() *StringSliceField { + m0 := &StringSliceField{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_DefaultValue = b.DefaultValue + x.xxx_hidden_Rules = b.Rules + return m0 +} + +type StringMapField struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_DefaultValue map[string]*anypb.Any `protobuf:"bytes,1,rep,name=default_value,json=defaultValue,proto3" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + xxx_hidden_Rules *StringMapRules `protobuf:"bytes,2,opt,name=rules,proto3,oneof"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *StringMapField) Reset() { + *x = StringMapField{} + mi := &file_c1_config_v1_config_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StringMapField) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StringMapField) ProtoMessage() {} + +func (x *StringMapField) ProtoReflect() protoreflect.Message { + mi := &file_c1_config_v1_config_proto_msgTypes[13] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *StringMapField) GetDefaultValue() map[string]*anypb.Any { + if x != nil { + return x.xxx_hidden_DefaultValue + } + return nil +} + +func (x *StringMapField) GetRules() *StringMapRules { + if x != nil { + return x.xxx_hidden_Rules + } + return nil +} + +func (x *StringMapField) SetDefaultValue(v map[string]*anypb.Any) { + x.xxx_hidden_DefaultValue = v +} + +func (x *StringMapField) SetRules(v *StringMapRules) { + x.xxx_hidden_Rules = v +} + +func (x *StringMapField) HasRules() bool { + if x == nil { + return false + } + return x.xxx_hidden_Rules != nil +} + +func (x *StringMapField) ClearRules() { + x.xxx_hidden_Rules = nil +} + +type StringMapField_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + DefaultValue map[string]*anypb.Any + Rules *StringMapRules +} + +func (b0 StringMapField_builder) Build() *StringMapField { + m0 := &StringMapField{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_DefaultValue = b.DefaultValue + x.xxx_hidden_Rules = b.Rules + return m0 +} + +type StringFieldOption struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Name string `protobuf:"bytes,1,opt,name=name,proto3"` + xxx_hidden_Value string `protobuf:"bytes,2,opt,name=value,proto3"` + xxx_hidden_DisplayName string `protobuf:"bytes,3,opt,name=display_name,json=displayName,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *StringFieldOption) Reset() { + *x = StringFieldOption{} + mi := &file_c1_config_v1_config_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StringFieldOption) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StringFieldOption) ProtoMessage() {} + +func (x *StringFieldOption) ProtoReflect() protoreflect.Message { + mi := &file_c1_config_v1_config_proto_msgTypes[14] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *StringFieldOption) GetName() string { + if x != nil { + return x.xxx_hidden_Name + } + return "" +} + +func (x *StringFieldOption) GetValue() string { + if x != nil { + return x.xxx_hidden_Value + } + return "" +} + +func (x *StringFieldOption) GetDisplayName() string { + if x != nil { + return x.xxx_hidden_DisplayName + } + return "" +} + +func (x *StringFieldOption) SetName(v string) { + x.xxx_hidden_Name = v +} + +func (x *StringFieldOption) SetValue(v string) { + x.xxx_hidden_Value = v +} + +func (x *StringFieldOption) SetDisplayName(v string) { + x.xxx_hidden_DisplayName = v +} + +type StringFieldOption_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Name string + Value string + DisplayName string +} + +func (b0 StringFieldOption_builder) Build() *StringFieldOption { + m0 := &StringFieldOption{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Name = b.Name + x.xxx_hidden_Value = b.Value + x.xxx_hidden_DisplayName = b.DisplayName + return m0 +} + +type StringField struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_DefaultValue string `protobuf:"bytes,1,opt,name=default_value,json=defaultValue,proto3"` + xxx_hidden_Rules *StringRules `protobuf:"bytes,2,opt,name=rules,proto3,oneof"` + xxx_hidden_Type StringFieldType `protobuf:"varint,3,opt,name=type,proto3,enum=c1.config.v1.StringFieldType"` + xxx_hidden_AllowedExtensions []string `protobuf:"bytes,4,rep,name=allowed_extensions,json=allowedExtensions,proto3"` + xxx_hidden_Options *[]*StringFieldOption `protobuf:"bytes,5,rep,name=options,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *StringField) Reset() { + *x = StringField{} + mi := &file_c1_config_v1_config_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StringField) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StringField) ProtoMessage() {} + +func (x *StringField) ProtoReflect() protoreflect.Message { + mi := &file_c1_config_v1_config_proto_msgTypes[15] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *StringField) GetDefaultValue() string { + if x != nil { + return x.xxx_hidden_DefaultValue + } + return "" +} + +func (x *StringField) GetRules() *StringRules { + if x != nil { + return x.xxx_hidden_Rules + } + return nil +} + +func (x *StringField) GetType() StringFieldType { + if x != nil { + return x.xxx_hidden_Type + } + return StringFieldType_STRING_FIELD_TYPE_TEXT_UNSPECIFIED +} + +func (x *StringField) GetAllowedExtensions() []string { + if x != nil { + return x.xxx_hidden_AllowedExtensions + } + return nil +} + +func (x *StringField) GetOptions() []*StringFieldOption { + if x != nil { + if x.xxx_hidden_Options != nil { + return *x.xxx_hidden_Options + } + } + return nil +} + +func (x *StringField) SetDefaultValue(v string) { + x.xxx_hidden_DefaultValue = v +} + +func (x *StringField) SetRules(v *StringRules) { + x.xxx_hidden_Rules = v +} + +func (x *StringField) SetType(v StringFieldType) { + x.xxx_hidden_Type = v +} + +func (x *StringField) SetAllowedExtensions(v []string) { + x.xxx_hidden_AllowedExtensions = v +} + +func (x *StringField) SetOptions(v []*StringFieldOption) { + x.xxx_hidden_Options = &v +} + +func (x *StringField) HasRules() bool { + if x == nil { + return false + } + return x.xxx_hidden_Rules != nil +} + +func (x *StringField) ClearRules() { + x.xxx_hidden_Rules = nil +} + +type StringField_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + DefaultValue string + Rules *StringRules + Type StringFieldType + // only used for FileUpload atm, oneofs are evil + AllowedExtensions []string + Options []*StringFieldOption +} + +func (b0 StringField_builder) Build() *StringField { + m0 := &StringField{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_DefaultValue = b.DefaultValue + x.xxx_hidden_Rules = b.Rules + x.xxx_hidden_Type = b.Type + x.xxx_hidden_AllowedExtensions = b.AllowedExtensions + x.xxx_hidden_Options = &b.Options + return m0 +} + +var File_c1_config_v1_config_proto protoreflect.FileDescriptor + +const file_c1_config_v1_config_proto_rawDesc = "" + + "\n" + + "\x19c1/config/v1/config.proto\x12\fc1.config.v1\x1a\x18c1/config/v1/rules.proto\x1a\x19google/protobuf/any.proto\"\xd0\x03\n" + + "\rConfiguration\x12+\n" + + "\x06fields\x18\x01 \x03(\v2\x13.c1.config.v1.FieldR\x06fields\x12:\n" + + "\vconstraints\x18\x02 \x03(\v2\x18.c1.config.v1.ConstraintR\vconstraints\x12!\n" + + "\fdisplay_name\x18\x03 \x01(\tR\vdisplayName\x12\x19\n" + + "\bhelp_url\x18\x04 \x01(\tR\ahelpUrl\x12\x19\n" + + "\bicon_url\x18\x05 \x01(\tR\aiconUrl\x12!\n" + + "\fis_directory\x18\a \x01(\bR\visDirectory\x12\x1d\n" + + "\n" + + "catalog_id\x18\b \x01(\tR\tcatalogId\x12>\n" + + "\x1bsupports_external_resources\x18\t \x01(\bR\x19supportsExternalResources\x12>\n" + + "\x1brequires_external_connector\x18\n" + + " \x01(\bR\x19requiresExternalConnector\x12;\n" + + "\ffield_groups\x18\v \x03(\v2\x18.c1.config.v1.FieldGroupR\vfieldGroups\"\xea\x01\n" + + "\n" + + "Constraint\x120\n" + + "\x04kind\x18\x01 \x01(\x0e2\x1c.c1.config.v1.ConstraintKindR\x04kind\x12\x1f\n" + + "\vfield_names\x18\x02 \x03(\tR\n" + + "fieldNames\x122\n" + + "\x15secondary_field_names\x18\x03 \x03(\tR\x13secondaryFieldNames\x12\x12\n" + + "\x04name\x18\x04 \x01(\tR\x04name\x12\x1b\n" + + "\thelp_text\x18\x05 \x01(\tR\bhelpText\x12$\n" + + "\x0eis_field_group\x18\x06 \x01(\bR\fisFieldGroup\"\x92\x01\n" + + "\n" + + "FieldGroup\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\x12!\n" + + "\fdisplay_name\x18\x02 \x01(\tR\vdisplayName\x12\x1b\n" + + "\thelp_text\x18\x03 \x01(\tR\bhelpText\x12\x16\n" + + "\x06fields\x18\x04 \x03(\tR\x06fields\x12\x18\n" + + "\adefault\x18\x05 \x01(\bR\adefault\"\xf1\x06\n" + + "\x05Field\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\x12!\n" + + "\fdisplay_name\x18\x02 \x01(\tR\vdisplayName\x12 \n" + + "\vdescription\x18\x03 \x01(\tR\vdescription\x12 \n" + + "\vplaceholder\x18\x04 \x01(\tR\vplaceholder\x12\x1f\n" + + "\vis_required\x18\x05 \x01(\bR\n" + + "isRequired\x12\x15\n" + + "\x06is_ops\x18\x06 \x01(\bR\x05isOps\x12\x1b\n" + + "\tis_secret\x18\a \x01(\bR\bisSecret\x12>\n" + + "\fstring_field\x18d \x01(\v2\x19.c1.config.v1.StringFieldH\x00R\vstringField\x125\n" + + "\tint_field\x18e \x01(\v2\x16.c1.config.v1.IntFieldH\x00R\bintField\x128\n" + + "\n" + + "bool_field\x18f \x01(\v2\x17.c1.config.v1.BoolFieldH\x00R\tboolField\x12N\n" + + "\x12string_slice_field\x18g \x01(\v2\x1e.c1.config.v1.StringSliceFieldH\x00R\x10stringSliceField\x12H\n" + + "\x10string_map_field\x18h \x01(\v2\x1c.c1.config.v1.StringMapFieldH\x00R\x0estringMapField\x12K\n" + + "\x11resource_id_field\x18i \x01(\v2\x1d.c1.config.v1.ResourceIdFieldH\x00R\x0fresourceIdField\x12[\n" + + "\x17resource_id_slice_field\x18j \x01(\v2\".c1.config.v1.ResourceIdSliceFieldH\x00R\x14resourceIdSliceField\x12D\n" + + "\x0eresource_field\x18k \x01(\v2\x1b.c1.config.v1.ResourceFieldH\x00R\rresourceField\x12T\n" + + "\x14resource_slice_field\x18l \x01(\v2 .c1.config.v1.ResourceSliceFieldH\x00R\x12resourceSliceFieldB\a\n" + + "\x05field\"\x8a\x02\n" + + "\bResource\x129\n" + + "\vresource_id\x18\x01 \x01(\v2\x18.c1.config.v1.ResourceIdR\n" + + "resourceId\x12F\n" + + "\x12parent_resource_id\x18\x02 \x01(\v2\x18.c1.config.v1.ResourceIdR\x10parentResourceId\x12!\n" + + "\fdisplay_name\x18\x03 \x01(\tR\vdisplayName\x12 \n" + + "\vdescription\x18\x04 \x01(\tR\vdescription\x126\n" + + "\vannotations\x18\x05 \x03(\v2\x14.google.protobuf.AnyR\vannotations\"W\n" + + "\n" + + "ResourceId\x12(\n" + + "\x10resource_type_id\x18\x01 \x01(\tR\x0eresourceTypeId\x12\x1f\n" + + "\vresource_id\x18\x02 \x01(\tR\n" + + "resourceId\"L\n" + + "\rResourceField\x12;\n" + + "\rdefault_value\x18\x01 \x01(\v2\x16.c1.config.v1.ResourceR\fdefaultValue\"Q\n" + + "\x12ResourceSliceField\x12;\n" + + "\rdefault_value\x18\x01 \x03(\v2\x16.c1.config.v1.ResourceR\fdefaultValue\"\x94\x01\n" + + "\x0fResourceIdField\x12=\n" + + "\rdefault_value\x18\x01 \x01(\v2\x18.c1.config.v1.ResourceIdR\fdefaultValue\x128\n" + + "\x05rules\x18\x03 \x01(\v2\x1d.c1.config.v1.ResourceIDRulesH\x00R\x05rules\x88\x01\x01B\b\n" + + "\x06_rules\"\xa6\x01\n" + + "\x14ResourceIdSliceField\x12B\n" + + "\rdefault_value\x18\x01 \x03(\v2\x1d.c1.config.v1.ResourceIdFieldR\fdefaultValue\x12@\n" + + "\x05rules\x18\x02 \x01(\v2%.c1.config.v1.RepeatedResourceIdRulesH\x00R\x05rules\x88\x01\x01B\b\n" + + "\x06_rules\"n\n" + + "\bIntField\x12#\n" + + "\rdefault_value\x18\x01 \x01(\x03R\fdefaultValue\x123\n" + + "\x05rules\x18\x02 \x01(\v2\x18.c1.config.v1.Int64RulesH\x00R\x05rules\x88\x01\x01B\b\n" + + "\x06_rules\"n\n" + + "\tBoolField\x12#\n" + + "\rdefault_value\x18\x01 \x01(\bR\fdefaultValue\x122\n" + + "\x05rules\x18\x02 \x01(\v2\x17.c1.config.v1.BoolRulesH\x00R\x05rules\x88\x01\x01B\b\n" + + "\x06_rules\"\x7f\n" + + "\x10StringSliceField\x12#\n" + + "\rdefault_value\x18\x01 \x03(\tR\fdefaultValue\x12<\n" + + "\x05rules\x18\x02 \x01(\v2!.c1.config.v1.RepeatedStringRulesH\x00R\x05rules\x88\x01\x01B\b\n" + + "\x06_rules\"\xff\x01\n" + + "\x0eStringMapField\x12S\n" + + "\rdefault_value\x18\x01 \x03(\v2..c1.config.v1.StringMapField.DefaultValueEntryR\fdefaultValue\x127\n" + + "\x05rules\x18\x02 \x01(\v2\x1c.c1.config.v1.StringMapRulesH\x00R\x05rules\x88\x01\x01\x1aU\n" + + "\x11DefaultValueEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12*\n" + + "\x05value\x18\x02 \x01(\v2\x14.google.protobuf.AnyR\x05value:\x028\x01B\b\n" + + "\x06_rules\"`\n" + + "\x11StringFieldOption\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\x12\x14\n" + + "\x05value\x18\x02 \x01(\tR\x05value\x12!\n" + + "\fdisplay_name\x18\x03 \x01(\tR\vdisplayName\"\x8f\x02\n" + + "\vStringField\x12#\n" + + "\rdefault_value\x18\x01 \x01(\tR\fdefaultValue\x124\n" + + "\x05rules\x18\x02 \x01(\v2\x19.c1.config.v1.StringRulesH\x00R\x05rules\x88\x01\x01\x121\n" + + "\x04type\x18\x03 \x01(\x0e2\x1d.c1.config.v1.StringFieldTypeR\x04type\x12-\n" + + "\x12allowed_extensions\x18\x04 \x03(\tR\x11allowedExtensions\x129\n" + + "\aoptions\x18\x05 \x03(\v2\x1f.c1.config.v1.StringFieldOptionR\aoptionsB\b\n" + + "\x06_rules*\xc4\x01\n" + + "\x0eConstraintKind\x12\x1f\n" + + "\x1bCONSTRAINT_KIND_UNSPECIFIED\x10\x00\x12%\n" + + "!CONSTRAINT_KIND_REQUIRED_TOGETHER\x10\x01\x12 \n" + + "\x1cCONSTRAINT_KIND_AT_LEAST_ONE\x10\x02\x12&\n" + + "\"CONSTRAINT_KIND_MUTUALLY_EXCLUSIVE\x10\x03\x12 \n" + + "\x1cCONSTRAINT_KIND_DEPENDENT_ON\x10\x04*\xc9\x01\n" + + "\x0fStringFieldType\x12&\n" + + "\"STRING_FIELD_TYPE_TEXT_UNSPECIFIED\x10\x00\x12\x1c\n" + + "\x18STRING_FIELD_TYPE_RANDOM\x10\x01\x12\x1c\n" + + "\x18STRING_FIELD_TYPE_OAUTH2\x10\x02\x12/\n" + + "+STRING_FIELD_TYPE_CONNECTOR_DERIVED_OPTIONS\x10\x03\x12!\n" + + "\x1dSTRING_FIELD_TYPE_FILE_UPLOAD\x10\x04B3Z1github.com/conductorone/baton-sdk/pb/c1/config/v1b\x06proto3" + +var file_c1_config_v1_config_proto_enumTypes = make([]protoimpl.EnumInfo, 2) +var file_c1_config_v1_config_proto_msgTypes = make([]protoimpl.MessageInfo, 17) +var file_c1_config_v1_config_proto_goTypes = []any{ + (ConstraintKind)(0), // 0: c1.config.v1.ConstraintKind + (StringFieldType)(0), // 1: c1.config.v1.StringFieldType + (*Configuration)(nil), // 2: c1.config.v1.Configuration + (*Constraint)(nil), // 3: c1.config.v1.Constraint + (*FieldGroup)(nil), // 4: c1.config.v1.FieldGroup + (*Field)(nil), // 5: c1.config.v1.Field + (*Resource)(nil), // 6: c1.config.v1.Resource + (*ResourceId)(nil), // 7: c1.config.v1.ResourceId + (*ResourceField)(nil), // 8: c1.config.v1.ResourceField + (*ResourceSliceField)(nil), // 9: c1.config.v1.ResourceSliceField + (*ResourceIdField)(nil), // 10: c1.config.v1.ResourceIdField + (*ResourceIdSliceField)(nil), // 11: c1.config.v1.ResourceIdSliceField + (*IntField)(nil), // 12: c1.config.v1.IntField + (*BoolField)(nil), // 13: c1.config.v1.BoolField + (*StringSliceField)(nil), // 14: c1.config.v1.StringSliceField + (*StringMapField)(nil), // 15: c1.config.v1.StringMapField + (*StringFieldOption)(nil), // 16: c1.config.v1.StringFieldOption + (*StringField)(nil), // 17: c1.config.v1.StringField + nil, // 18: c1.config.v1.StringMapField.DefaultValueEntry + (*anypb.Any)(nil), // 19: google.protobuf.Any + (*ResourceIDRules)(nil), // 20: c1.config.v1.ResourceIDRules + (*RepeatedResourceIdRules)(nil), // 21: c1.config.v1.RepeatedResourceIdRules + (*Int64Rules)(nil), // 22: c1.config.v1.Int64Rules + (*BoolRules)(nil), // 23: c1.config.v1.BoolRules + (*RepeatedStringRules)(nil), // 24: c1.config.v1.RepeatedStringRules + (*StringMapRules)(nil), // 25: c1.config.v1.StringMapRules + (*StringRules)(nil), // 26: c1.config.v1.StringRules +} +var file_c1_config_v1_config_proto_depIdxs = []int32{ + 5, // 0: c1.config.v1.Configuration.fields:type_name -> c1.config.v1.Field + 3, // 1: c1.config.v1.Configuration.constraints:type_name -> c1.config.v1.Constraint + 4, // 2: c1.config.v1.Configuration.field_groups:type_name -> c1.config.v1.FieldGroup + 0, // 3: c1.config.v1.Constraint.kind:type_name -> c1.config.v1.ConstraintKind + 17, // 4: c1.config.v1.Field.string_field:type_name -> c1.config.v1.StringField + 12, // 5: c1.config.v1.Field.int_field:type_name -> c1.config.v1.IntField + 13, // 6: c1.config.v1.Field.bool_field:type_name -> c1.config.v1.BoolField + 14, // 7: c1.config.v1.Field.string_slice_field:type_name -> c1.config.v1.StringSliceField + 15, // 8: c1.config.v1.Field.string_map_field:type_name -> c1.config.v1.StringMapField + 10, // 9: c1.config.v1.Field.resource_id_field:type_name -> c1.config.v1.ResourceIdField + 11, // 10: c1.config.v1.Field.resource_id_slice_field:type_name -> c1.config.v1.ResourceIdSliceField + 8, // 11: c1.config.v1.Field.resource_field:type_name -> c1.config.v1.ResourceField + 9, // 12: c1.config.v1.Field.resource_slice_field:type_name -> c1.config.v1.ResourceSliceField + 7, // 13: c1.config.v1.Resource.resource_id:type_name -> c1.config.v1.ResourceId + 7, // 14: c1.config.v1.Resource.parent_resource_id:type_name -> c1.config.v1.ResourceId + 19, // 15: c1.config.v1.Resource.annotations:type_name -> google.protobuf.Any + 6, // 16: c1.config.v1.ResourceField.default_value:type_name -> c1.config.v1.Resource + 6, // 17: c1.config.v1.ResourceSliceField.default_value:type_name -> c1.config.v1.Resource + 7, // 18: c1.config.v1.ResourceIdField.default_value:type_name -> c1.config.v1.ResourceId + 20, // 19: c1.config.v1.ResourceIdField.rules:type_name -> c1.config.v1.ResourceIDRules + 10, // 20: c1.config.v1.ResourceIdSliceField.default_value:type_name -> c1.config.v1.ResourceIdField + 21, // 21: c1.config.v1.ResourceIdSliceField.rules:type_name -> c1.config.v1.RepeatedResourceIdRules + 22, // 22: c1.config.v1.IntField.rules:type_name -> c1.config.v1.Int64Rules + 23, // 23: c1.config.v1.BoolField.rules:type_name -> c1.config.v1.BoolRules + 24, // 24: c1.config.v1.StringSliceField.rules:type_name -> c1.config.v1.RepeatedStringRules + 18, // 25: c1.config.v1.StringMapField.default_value:type_name -> c1.config.v1.StringMapField.DefaultValueEntry + 25, // 26: c1.config.v1.StringMapField.rules:type_name -> c1.config.v1.StringMapRules + 26, // 27: c1.config.v1.StringField.rules:type_name -> c1.config.v1.StringRules + 1, // 28: c1.config.v1.StringField.type:type_name -> c1.config.v1.StringFieldType + 16, // 29: c1.config.v1.StringField.options:type_name -> c1.config.v1.StringFieldOption + 19, // 30: c1.config.v1.StringMapField.DefaultValueEntry.value:type_name -> google.protobuf.Any + 31, // [31:31] is the sub-list for method output_type + 31, // [31:31] is the sub-list for method input_type + 31, // [31:31] is the sub-list for extension type_name + 31, // [31:31] is the sub-list for extension extendee + 0, // [0:31] is the sub-list for field type_name +} + +func init() { file_c1_config_v1_config_proto_init() } +func file_c1_config_v1_config_proto_init() { + if File_c1_config_v1_config_proto != nil { + return + } + file_c1_config_v1_rules_proto_init() + file_c1_config_v1_config_proto_msgTypes[3].OneofWrappers = []any{ + (*field_StringField)(nil), + (*field_IntField)(nil), + (*field_BoolField)(nil), + (*field_StringSliceField)(nil), + (*field_StringMapField)(nil), + (*field_ResourceIdField)(nil), + (*field_ResourceIdSliceField)(nil), + (*field_ResourceField)(nil), + (*field_ResourceSliceField)(nil), + } + file_c1_config_v1_config_proto_msgTypes[8].OneofWrappers = []any{} + file_c1_config_v1_config_proto_msgTypes[9].OneofWrappers = []any{} + file_c1_config_v1_config_proto_msgTypes[10].OneofWrappers = []any{} + file_c1_config_v1_config_proto_msgTypes[11].OneofWrappers = []any{} + file_c1_config_v1_config_proto_msgTypes[12].OneofWrappers = []any{} + file_c1_config_v1_config_proto_msgTypes[13].OneofWrappers = []any{} + file_c1_config_v1_config_proto_msgTypes[15].OneofWrappers = []any{} + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_c1_config_v1_config_proto_rawDesc), len(file_c1_config_v1_config_proto_rawDesc)), + NumEnums: 2, + NumMessages: 17, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_c1_config_v1_config_proto_goTypes, + DependencyIndexes: file_c1_config_v1_config_proto_depIdxs, + EnumInfos: file_c1_config_v1_config_proto_enumTypes, + MessageInfos: file_c1_config_v1_config_proto_msgTypes, + }.Build() + File_c1_config_v1_config_proto = out.File + file_c1_config_v1_config_proto_goTypes = nil + file_c1_config_v1_config_proto_depIdxs = nil +} diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/config/v1/rules.pb.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/config/v1/rules.pb.go index 0958f39c..11073384 100644 --- a/vendor/github.com/conductorone/baton-sdk/pb/c1/config/v1/rules.pb.go +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/config/v1/rules.pb.go @@ -1,16 +1,17 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.4 +// protoc-gen-go v1.36.10 // protoc (unknown) // source: c1/config/v1/rules.proto +//go:build !protoopaque + package v1 import ( protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" - sync "sync" unsafe "unsafe" ) @@ -98,14 +99,9 @@ func (x WellKnownString) Number() protoreflect.EnumNumber { return protoreflect.EnumNumber(x) } -// Deprecated: Use WellKnownString.Descriptor instead. -func (WellKnownString) EnumDescriptor() ([]byte, []int) { - return file_c1_config_v1_rules_proto_rawDescGZIP(), []int{0} -} - // Int64Rules describes the constraints applied to `int64` values type Int64Rules struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` // Const specifies that this field must be exactly the specified value Eq *int64 `protobuf:"varint,1,opt,name=eq,proto3,oneof" json:"eq,omitempty"` // Lt specifies that this field must be less than the specified value, @@ -161,11 +157,6 @@ func (x *Int64Rules) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use Int64Rules.ProtoReflect.Descriptor instead. -func (*Int64Rules) Descriptor() ([]byte, []int) { - return file_c1_config_v1_rules_proto_rawDescGZIP(), []int{0} -} - func (x *Int64Rules) GetEq() int64 { if x != nil && x.Eq != nil { return *x.Eq @@ -229,9 +220,147 @@ func (x *Int64Rules) GetIsRequired() bool { return false } +func (x *Int64Rules) SetEq(v int64) { + x.Eq = &v +} + +func (x *Int64Rules) SetLt(v int64) { + x.Lt = &v +} + +func (x *Int64Rules) SetLte(v int64) { + x.Lte = &v +} + +func (x *Int64Rules) SetGt(v int64) { + x.Gt = &v +} + +func (x *Int64Rules) SetGte(v int64) { + x.Gte = &v +} + +func (x *Int64Rules) SetIn(v []int64) { + x.In = v +} + +func (x *Int64Rules) SetNotIn(v []int64) { + x.NotIn = v +} + +func (x *Int64Rules) SetValidateEmpty(v bool) { + x.ValidateEmpty = v +} + +func (x *Int64Rules) SetIsRequired(v bool) { + x.IsRequired = v +} + +func (x *Int64Rules) HasEq() bool { + if x == nil { + return false + } + return x.Eq != nil +} + +func (x *Int64Rules) HasLt() bool { + if x == nil { + return false + } + return x.Lt != nil +} + +func (x *Int64Rules) HasLte() bool { + if x == nil { + return false + } + return x.Lte != nil +} + +func (x *Int64Rules) HasGt() bool { + if x == nil { + return false + } + return x.Gt != nil +} + +func (x *Int64Rules) HasGte() bool { + if x == nil { + return false + } + return x.Gte != nil +} + +func (x *Int64Rules) ClearEq() { + x.Eq = nil +} + +func (x *Int64Rules) ClearLt() { + x.Lt = nil +} + +func (x *Int64Rules) ClearLte() { + x.Lte = nil +} + +func (x *Int64Rules) ClearGt() { + x.Gt = nil +} + +func (x *Int64Rules) ClearGte() { + x.Gte = nil +} + +type Int64Rules_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // Const specifies that this field must be exactly the specified value + Eq *int64 + // Lt specifies that this field must be less than the specified value, + // exclusive + Lt *int64 + // Lte specifies that this field must be less than or equal to the + // specified value, inclusive + Lte *int64 + // Gt specifies that this field must be greater than the specified value, + // exclusive. If the value of Gt is larger than a specified Lt or Lte, the + // range is reversed. + Gt *int64 + // Gte specifies that this field must be greater than or equal to the + // specified value, inclusive. If the value of Gte is larger than a + // specified Lt or Lte, the range is reversed. + Gte *int64 + // In specifies that this field must be equal to one of the specified + // values + In []int64 + // NotIn specifies that this field cannot be equal to one of the specified + // values + NotIn []int64 + // IgnoreEmpty specifies that the validation rules of this field should be + // evaluated only if the field is not empty + ValidateEmpty bool + IsRequired bool +} + +func (b0 Int64Rules_builder) Build() *Int64Rules { + m0 := &Int64Rules{} + b, x := &b0, m0 + _, _ = b, x + x.Eq = b.Eq + x.Lt = b.Lt + x.Lte = b.Lte + x.Gt = b.Gt + x.Gte = b.Gte + x.In = b.In + x.NotIn = b.NotIn + x.ValidateEmpty = b.ValidateEmpty + x.IsRequired = b.IsRequired + return m0 +} + // BoolRules describes the constraints applied to `bool` values type BoolRules struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` // Const specifies that this field must be exactly the specified value Eq *bool `protobuf:"varint,1,opt,name=eq,proto3,oneof" json:"eq,omitempty"` unknownFields protoimpl.UnknownFields @@ -263,11 +392,6 @@ func (x *BoolRules) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use BoolRules.ProtoReflect.Descriptor instead. -func (*BoolRules) Descriptor() ([]byte, []int) { - return file_c1_config_v1_rules_proto_rawDescGZIP(), []int{1} -} - func (x *BoolRules) GetEq() bool { if x != nil && x.Eq != nil { return *x.Eq @@ -275,9 +399,39 @@ func (x *BoolRules) GetEq() bool { return false } +func (x *BoolRules) SetEq(v bool) { + x.Eq = &v +} + +func (x *BoolRules) HasEq() bool { + if x == nil { + return false + } + return x.Eq != nil +} + +func (x *BoolRules) ClearEq() { + x.Eq = nil +} + +type BoolRules_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // Const specifies that this field must be exactly the specified value + Eq *bool +} + +func (b0 BoolRules_builder) Build() *BoolRules { + m0 := &BoolRules{} + b, x := &b0, m0 + _, _ = b, x + x.Eq = b.Eq + return m0 +} + // RepeatedRules describe the constraints applied to `repeated` values type RepeatedRules struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` // MinItems specifies that this field must have the specified number of // items at a minimum MinItems *uint64 `protobuf:"varint,1,opt,name=min_items,json=minItems,proto3,oneof" json:"min_items,omitempty"` @@ -331,11 +485,6 @@ func (x *RepeatedRules) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use RepeatedRules.ProtoReflect.Descriptor instead. -func (*RepeatedRules) Descriptor() ([]byte, []int) { - return file_c1_config_v1_rules_proto_rawDescGZIP(), []int{2} -} - func (x *RepeatedRules) GetMinItems() uint64 { if x != nil && x.MinItems != nil { return *x.MinItems @@ -382,7 +531,7 @@ func (x *RepeatedRules) GetBool() *BoolRules { return nil } -func (x *RepeatedRules) GetString_() *StringRules { +func (x *RepeatedRules) GetString() *StringRules { if x != nil { if x, ok := x.ItemRules.(*RepeatedRules_String_); ok { return x.String_ @@ -391,6 +540,11 @@ func (x *RepeatedRules) GetString_() *StringRules { return nil } +// Deprecated: Use GetString instead. +func (x *RepeatedRules) GetString_() *StringRules { + return x.GetString() +} + func (x *RepeatedRules) GetValidateEmpty() bool { if x != nil { return x.ValidateEmpty @@ -405,6 +559,206 @@ func (x *RepeatedRules) GetIsRequired() bool { return false } +func (x *RepeatedRules) SetMinItems(v uint64) { + x.MinItems = &v +} + +func (x *RepeatedRules) SetMaxItems(v uint64) { + x.MaxItems = &v +} + +func (x *RepeatedRules) SetUnique(v bool) { + x.Unique = v +} + +func (x *RepeatedRules) SetInt64(v *Int64Rules) { + if v == nil { + x.ItemRules = nil + return + } + x.ItemRules = &RepeatedRules_Int64{v} +} + +func (x *RepeatedRules) SetBool(v *BoolRules) { + if v == nil { + x.ItemRules = nil + return + } + x.ItemRules = &RepeatedRules_Bool{v} +} + +func (x *RepeatedRules) SetString(v *StringRules) { + if v == nil { + x.ItemRules = nil + return + } + x.ItemRules = &RepeatedRules_String_{v} +} + +func (x *RepeatedRules) SetValidateEmpty(v bool) { + x.ValidateEmpty = v +} + +func (x *RepeatedRules) SetIsRequired(v bool) { + x.IsRequired = v +} + +func (x *RepeatedRules) HasMinItems() bool { + if x == nil { + return false + } + return x.MinItems != nil +} + +func (x *RepeatedRules) HasMaxItems() bool { + if x == nil { + return false + } + return x.MaxItems != nil +} + +func (x *RepeatedRules) HasItemRules() bool { + if x == nil { + return false + } + return x.ItemRules != nil +} + +func (x *RepeatedRules) HasInt64() bool { + if x == nil { + return false + } + _, ok := x.ItemRules.(*RepeatedRules_Int64) + return ok +} + +func (x *RepeatedRules) HasBool() bool { + if x == nil { + return false + } + _, ok := x.ItemRules.(*RepeatedRules_Bool) + return ok +} + +func (x *RepeatedRules) HasString() bool { + if x == nil { + return false + } + _, ok := x.ItemRules.(*RepeatedRules_String_) + return ok +} + +func (x *RepeatedRules) ClearMinItems() { + x.MinItems = nil +} + +func (x *RepeatedRules) ClearMaxItems() { + x.MaxItems = nil +} + +func (x *RepeatedRules) ClearItemRules() { + x.ItemRules = nil +} + +func (x *RepeatedRules) ClearInt64() { + if _, ok := x.ItemRules.(*RepeatedRules_Int64); ok { + x.ItemRules = nil + } +} + +func (x *RepeatedRules) ClearBool() { + if _, ok := x.ItemRules.(*RepeatedRules_Bool); ok { + x.ItemRules = nil + } +} + +func (x *RepeatedRules) ClearString() { + if _, ok := x.ItemRules.(*RepeatedRules_String_); ok { + x.ItemRules = nil + } +} + +const RepeatedRules_ItemRules_not_set_case case_RepeatedRules_ItemRules = 0 +const RepeatedRules_Int64_case case_RepeatedRules_ItemRules = 100 +const RepeatedRules_Bool_case case_RepeatedRules_ItemRules = 101 +const RepeatedRules_String__case case_RepeatedRules_ItemRules = 102 + +func (x *RepeatedRules) WhichItemRules() case_RepeatedRules_ItemRules { + if x == nil { + return RepeatedRules_ItemRules_not_set_case + } + switch x.ItemRules.(type) { + case *RepeatedRules_Int64: + return RepeatedRules_Int64_case + case *RepeatedRules_Bool: + return RepeatedRules_Bool_case + case *RepeatedRules_String_: + return RepeatedRules_String__case + default: + return RepeatedRules_ItemRules_not_set_case + } +} + +type RepeatedRules_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // MinItems specifies that this field must have the specified number of + // items at a minimum + MinItems *uint64 + // MaxItems specifies that this field must have the specified number of + // items at a maximum + MaxItems *uint64 + // Unique specifies that all elements in this field must be unique. This + // constraint is only applicable to scalar and enum types (messages are not + // supported). + Unique bool + // Items specifies the constraints to be applied to each item in the field. + // Repeated message fields will still execute validation against each item + // unless skip is specified here. + + // Fields of oneof ItemRules: + // Scalar Field Types + Int64 *Int64Rules + Bool *BoolRules + String *StringRules + // -- end of ItemRules + // IgnoreEmpty specifies that the validation rules of this field should be + // evaluated only if the field is not empty + ValidateEmpty bool + IsRequired bool +} + +func (b0 RepeatedRules_builder) Build() *RepeatedRules { + m0 := &RepeatedRules{} + b, x := &b0, m0 + _, _ = b, x + x.MinItems = b.MinItems + x.MaxItems = b.MaxItems + x.Unique = b.Unique + if b.Int64 != nil { + x.ItemRules = &RepeatedRules_Int64{b.Int64} + } + if b.Bool != nil { + x.ItemRules = &RepeatedRules_Bool{b.Bool} + } + if b.String != nil { + x.ItemRules = &RepeatedRules_String_{b.String} + } + x.ValidateEmpty = b.ValidateEmpty + x.IsRequired = b.IsRequired + return m0 +} + +type case_RepeatedRules_ItemRules protoreflect.FieldNumber + +func (x case_RepeatedRules_ItemRules) String() string { + md := file_c1_config_v1_rules_proto_msgTypes[2].Descriptor() + if x == 0 { + return "not set" + } + return protoimpl.X.MessageFieldStringOf(md, protoreflect.FieldNumber(x)) +} + type isRepeatedRules_ItemRules interface { isRepeatedRules_ItemRules() } @@ -429,7 +783,7 @@ func (*RepeatedRules_Bool) isRepeatedRules_ItemRules() {} func (*RepeatedRules_String_) isRepeatedRules_ItemRules() {} type RepeatedStringRules struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` // MinItems specifies that this field must have the specified number of // items at a minimum MinItems *uint64 `protobuf:"varint,1,opt,name=min_items,json=minItems,proto3,oneof" json:"min_items,omitempty"` @@ -474,11 +828,6 @@ func (x *RepeatedStringRules) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use RepeatedStringRules.ProtoReflect.Descriptor instead. -func (*RepeatedStringRules) Descriptor() ([]byte, []int) { - return file_c1_config_v1_rules_proto_rawDescGZIP(), []int{3} -} - func (x *RepeatedStringRules) GetMinItems() uint64 { if x != nil && x.MinItems != nil { return *x.MinItems @@ -521,8 +870,98 @@ func (x *RepeatedStringRules) GetIsRequired() bool { return false } +func (x *RepeatedStringRules) SetMinItems(v uint64) { + x.MinItems = &v +} + +func (x *RepeatedStringRules) SetMaxItems(v uint64) { + x.MaxItems = &v +} + +func (x *RepeatedStringRules) SetUnique(v bool) { + x.Unique = v +} + +func (x *RepeatedStringRules) SetItemRules(v *StringRules) { + x.ItemRules = v +} + +func (x *RepeatedStringRules) SetValidateEmpty(v bool) { + x.ValidateEmpty = v +} + +func (x *RepeatedStringRules) SetIsRequired(v bool) { + x.IsRequired = v +} + +func (x *RepeatedStringRules) HasMinItems() bool { + if x == nil { + return false + } + return x.MinItems != nil +} + +func (x *RepeatedStringRules) HasMaxItems() bool { + if x == nil { + return false + } + return x.MaxItems != nil +} + +func (x *RepeatedStringRules) HasItemRules() bool { + if x == nil { + return false + } + return x.ItemRules != nil +} + +func (x *RepeatedStringRules) ClearMinItems() { + x.MinItems = nil +} + +func (x *RepeatedStringRules) ClearMaxItems() { + x.MaxItems = nil +} + +func (x *RepeatedStringRules) ClearItemRules() { + x.ItemRules = nil +} + +type RepeatedStringRules_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // MinItems specifies that this field must have the specified number of + // items at a minimum + MinItems *uint64 + // MaxItems specifies that this field must have the specified number of + // items at a maximum + MaxItems *uint64 + // Unique specifies that all elements in this field must be unique. This + // constraint is only applicable to scalar and enum types (messages are not + // supported). + Unique bool + ItemRules *StringRules + // IgnoreEmpty specifies that the validation rules of this field should be + // evaluated only if the field is not empty + ValidateEmpty bool + IsRequired bool +} + +func (b0 RepeatedStringRules_builder) Build() *RepeatedStringRules { + m0 := &RepeatedStringRules{} + b, x := &b0, m0 + _, _ = b, x + x.MinItems = b.MinItems + x.MaxItems = b.MaxItems + x.Unique = b.Unique + x.ItemRules = b.ItemRules + x.ValidateEmpty = b.ValidateEmpty + x.IsRequired = b.IsRequired + return m0 +} + type StringRules struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` // Const specifies that this field must be exactly the specified value Eq *string `protobuf:"bytes,1,opt,name=eq,proto3,oneof" json:"eq,omitempty"` // Len specifies that this field must be the specified number of @@ -593,11 +1032,6 @@ func (x *StringRules) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use StringRules.ProtoReflect.Descriptor instead. -func (*StringRules) Descriptor() ([]byte, []int) { - return file_c1_config_v1_rules_proto_rawDescGZIP(), []int{4} -} - func (x *StringRules) GetEq() string { if x != nil && x.Eq != nil { return *x.Eq @@ -696,8 +1130,230 @@ func (x *StringRules) GetIsRequired() bool { return false } +func (x *StringRules) SetEq(v string) { + x.Eq = &v +} + +func (x *StringRules) SetLen(v uint64) { + x.Len = &v +} + +func (x *StringRules) SetMinLen(v uint64) { + x.MinLen = &v +} + +func (x *StringRules) SetMaxLen(v uint64) { + x.MaxLen = &v +} + +func (x *StringRules) SetPattern(v string) { + x.Pattern = &v +} + +func (x *StringRules) SetPrefix(v string) { + x.Prefix = &v +} + +func (x *StringRules) SetSuffix(v string) { + x.Suffix = &v +} + +func (x *StringRules) SetContains(v string) { + x.Contains = &v +} + +func (x *StringRules) SetNotContains(v string) { + x.NotContains = &v +} + +func (x *StringRules) SetIn(v []string) { + x.In = v +} + +func (x *StringRules) SetNotIn(v []string) { + x.NotIn = v +} + +func (x *StringRules) SetWellKnown(v WellKnownString) { + x.WellKnown = v +} + +func (x *StringRules) SetValidateEmpty(v bool) { + x.ValidateEmpty = v +} + +func (x *StringRules) SetIsRequired(v bool) { + x.IsRequired = v +} + +func (x *StringRules) HasEq() bool { + if x == nil { + return false + } + return x.Eq != nil +} + +func (x *StringRules) HasLen() bool { + if x == nil { + return false + } + return x.Len != nil +} + +func (x *StringRules) HasMinLen() bool { + if x == nil { + return false + } + return x.MinLen != nil +} + +func (x *StringRules) HasMaxLen() bool { + if x == nil { + return false + } + return x.MaxLen != nil +} + +func (x *StringRules) HasPattern() bool { + if x == nil { + return false + } + return x.Pattern != nil +} + +func (x *StringRules) HasPrefix() bool { + if x == nil { + return false + } + return x.Prefix != nil +} + +func (x *StringRules) HasSuffix() bool { + if x == nil { + return false + } + return x.Suffix != nil +} + +func (x *StringRules) HasContains() bool { + if x == nil { + return false + } + return x.Contains != nil +} + +func (x *StringRules) HasNotContains() bool { + if x == nil { + return false + } + return x.NotContains != nil +} + +func (x *StringRules) ClearEq() { + x.Eq = nil +} + +func (x *StringRules) ClearLen() { + x.Len = nil +} + +func (x *StringRules) ClearMinLen() { + x.MinLen = nil +} + +func (x *StringRules) ClearMaxLen() { + x.MaxLen = nil +} + +func (x *StringRules) ClearPattern() { + x.Pattern = nil +} + +func (x *StringRules) ClearPrefix() { + x.Prefix = nil +} + +func (x *StringRules) ClearSuffix() { + x.Suffix = nil +} + +func (x *StringRules) ClearContains() { + x.Contains = nil +} + +func (x *StringRules) ClearNotContains() { + x.NotContains = nil +} + +type StringRules_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // Const specifies that this field must be exactly the specified value + Eq *string + // Len specifies that this field must be the specified number of + // characters (Unicode code points). Note that the number of + // characters may differ from the number of bytes in the string. + Len *uint64 + // MinLen specifies that this field must be the specified number of + // characters (Unicode code points) at a minimum. Note that the number of + // characters may differ from the number of bytes in the string. + MinLen *uint64 + // MaxLen specifies that this field must be the specified number of + // characters (Unicode code points) at a maximum. Note that the number of + // characters may differ from the number of bytes in the string. + MaxLen *uint64 + // Pattern specifies that this field must match against the specified + // regular expression (RE2 syntax). The included expression should elide + // any delimiters. + Pattern *string + // Prefix specifies that this field must have the specified substring at + // the beginning of the string. + Prefix *string + // Suffix specifies that this field must have the specified substring at + // the end of the string. + Suffix *string + // Contains specifies that this field must have the specified substring + // anywhere in the string. + Contains *string + // NotContains specifies that this field cannot have the specified substring + // anywhere in the string. + NotContains *string + // In specifies that this field must be equal to one of the specified + // values + In []string + // NotIn specifies that this field cannot be equal to one of the specified + // values + NotIn []string + WellKnown WellKnownString + // IgnoreEmpty specifies that the validation rules of this field should be + // evaluated only if the field is not empty + ValidateEmpty bool + IsRequired bool +} + +func (b0 StringRules_builder) Build() *StringRules { + m0 := &StringRules{} + b, x := &b0, m0 + _, _ = b, x + x.Eq = b.Eq + x.Len = b.Len + x.MinLen = b.MinLen + x.MaxLen = b.MaxLen + x.Pattern = b.Pattern + x.Prefix = b.Prefix + x.Suffix = b.Suffix + x.Contains = b.Contains + x.NotContains = b.NotContains + x.In = b.In + x.NotIn = b.NotIn + x.WellKnown = b.WellKnown + x.ValidateEmpty = b.ValidateEmpty + x.IsRequired = b.IsRequired + return m0 +} + type StringMapRules struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` // IgnoreEmpty specifies that the validation rules of this field should be // evaluated only if the field is not empty ValidateEmpty bool `protobuf:"varint,1,opt,name=validate_empty,json=validateEmpty,proto3" json:"validate_empty,omitempty"` @@ -731,11 +1387,6 @@ func (x *StringMapRules) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use StringMapRules.ProtoReflect.Descriptor instead. -func (*StringMapRules) Descriptor() ([]byte, []int) { - return file_c1_config_v1_rules_proto_rawDescGZIP(), []int{5} -} - func (x *StringMapRules) GetValidateEmpty() bool { if x != nil { return x.ValidateEmpty @@ -750,158 +1401,261 @@ func (x *StringMapRules) GetIsRequired() bool { return false } -var File_c1_config_v1_rules_proto protoreflect.FileDescriptor +func (x *StringMapRules) SetValidateEmpty(v bool) { + x.ValidateEmpty = v +} -var file_c1_config_v1_rules_proto_rawDesc = string([]byte{ - 0x0a, 0x18, 0x63, 0x31, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x76, 0x31, 0x2f, 0x72, - 0x75, 0x6c, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0c, 0x63, 0x31, 0x2e, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x76, 0x31, 0x22, 0x8d, 0x02, 0x0a, 0x0a, 0x49, 0x6e, 0x74, - 0x36, 0x34, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x13, 0x0a, 0x02, 0x65, 0x71, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x02, 0x65, 0x71, 0x88, 0x01, 0x01, 0x12, 0x13, 0x0a, 0x02, - 0x6c, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x02, 0x6c, 0x74, 0x88, 0x01, - 0x01, 0x12, 0x15, 0x0a, 0x03, 0x6c, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, - 0x52, 0x03, 0x6c, 0x74, 0x65, 0x88, 0x01, 0x01, 0x12, 0x13, 0x0a, 0x02, 0x67, 0x74, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x02, 0x67, 0x74, 0x88, 0x01, 0x01, 0x12, 0x15, 0x0a, - 0x03, 0x67, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x48, 0x04, 0x52, 0x03, 0x67, 0x74, - 0x65, 0x88, 0x01, 0x01, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x6e, 0x18, 0x06, 0x20, 0x03, 0x28, 0x03, - 0x52, 0x02, 0x69, 0x6e, 0x12, 0x15, 0x0a, 0x06, 0x6e, 0x6f, 0x74, 0x5f, 0x69, 0x6e, 0x18, 0x07, - 0x20, 0x03, 0x28, 0x03, 0x52, 0x05, 0x6e, 0x6f, 0x74, 0x49, 0x6e, 0x12, 0x25, 0x0a, 0x0e, 0x76, - 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x18, 0x08, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x0d, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x45, 0x6d, 0x70, - 0x74, 0x79, 0x12, 0x1f, 0x0a, 0x0b, 0x69, 0x73, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, - 0x64, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x69, 0x73, 0x52, 0x65, 0x71, 0x75, 0x69, - 0x72, 0x65, 0x64, 0x42, 0x05, 0x0a, 0x03, 0x5f, 0x65, 0x71, 0x42, 0x05, 0x0a, 0x03, 0x5f, 0x6c, - 0x74, 0x42, 0x06, 0x0a, 0x04, 0x5f, 0x6c, 0x74, 0x65, 0x42, 0x05, 0x0a, 0x03, 0x5f, 0x67, 0x74, - 0x42, 0x06, 0x0a, 0x04, 0x5f, 0x67, 0x74, 0x65, 0x22, 0x27, 0x0a, 0x09, 0x42, 0x6f, 0x6f, 0x6c, - 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x13, 0x0a, 0x02, 0x65, 0x71, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x08, 0x48, 0x00, 0x52, 0x02, 0x65, 0x71, 0x88, 0x01, 0x01, 0x42, 0x05, 0x0a, 0x03, 0x5f, 0x65, - 0x71, 0x22, 0xf3, 0x02, 0x0a, 0x0d, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x52, 0x75, - 0x6c, 0x65, 0x73, 0x12, 0x20, 0x0a, 0x09, 0x6d, 0x69, 0x6e, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x48, 0x01, 0x52, 0x08, 0x6d, 0x69, 0x6e, 0x49, 0x74, 0x65, - 0x6d, 0x73, 0x88, 0x01, 0x01, 0x12, 0x20, 0x0a, 0x09, 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x74, 0x65, - 0x6d, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x48, 0x02, 0x52, 0x08, 0x6d, 0x61, 0x78, 0x49, - 0x74, 0x65, 0x6d, 0x73, 0x88, 0x01, 0x01, 0x12, 0x16, 0x0a, 0x06, 0x75, 0x6e, 0x69, 0x71, 0x75, - 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x12, - 0x30, 0x0a, 0x05, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x18, 0x64, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, - 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, - 0x74, 0x36, 0x34, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x48, 0x00, 0x52, 0x05, 0x69, 0x6e, 0x74, 0x36, - 0x34, 0x12, 0x2d, 0x0a, 0x04, 0x62, 0x6f, 0x6f, 0x6c, 0x18, 0x65, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x17, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x42, - 0x6f, 0x6f, 0x6c, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x48, 0x00, 0x52, 0x04, 0x62, 0x6f, 0x6f, 0x6c, - 0x12, 0x33, 0x0a, 0x06, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x18, 0x66, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x19, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x76, 0x31, 0x2e, - 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x48, 0x00, 0x52, 0x06, 0x73, - 0x74, 0x72, 0x69, 0x6e, 0x67, 0x12, 0x25, 0x0a, 0x0e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, - 0x65, 0x5f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x76, - 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x1f, 0x0a, 0x0b, - 0x69, 0x73, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x0a, 0x69, 0x73, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x42, 0x0c, 0x0a, - 0x0a, 0x69, 0x74, 0x65, 0x6d, 0x5f, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, - 0x6d, 0x69, 0x6e, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x6d, 0x61, - 0x78, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x22, 0x8f, 0x02, 0x0a, 0x13, 0x52, 0x65, 0x70, 0x65, - 0x61, 0x74, 0x65, 0x64, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, - 0x20, 0x0a, 0x09, 0x6d, 0x69, 0x6e, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x04, 0x48, 0x00, 0x52, 0x08, 0x6d, 0x69, 0x6e, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x88, 0x01, - 0x01, 0x12, 0x20, 0x0a, 0x09, 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x04, 0x48, 0x01, 0x52, 0x08, 0x6d, 0x61, 0x78, 0x49, 0x74, 0x65, 0x6d, 0x73, - 0x88, 0x01, 0x01, 0x12, 0x16, 0x0a, 0x06, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x06, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x12, 0x38, 0x0a, 0x0a, 0x69, - 0x74, 0x65, 0x6d, 0x5f, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x19, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x53, - 0x74, 0x72, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x09, 0x69, 0x74, 0x65, 0x6d, - 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, - 0x65, 0x5f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x76, - 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x1f, 0x0a, 0x0b, - 0x69, 0x73, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x0a, 0x69, 0x73, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x42, 0x0c, 0x0a, - 0x0a, 0x5f, 0x6d, 0x69, 0x6e, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, - 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x22, 0xab, 0x04, 0x0a, 0x0b, 0x53, 0x74, - 0x72, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x13, 0x0a, 0x02, 0x65, 0x71, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x02, 0x65, 0x71, 0x88, 0x01, 0x01, 0x12, 0x15, - 0x0a, 0x03, 0x6c, 0x65, 0x6e, 0x18, 0x13, 0x20, 0x01, 0x28, 0x04, 0x48, 0x01, 0x52, 0x03, 0x6c, - 0x65, 0x6e, 0x88, 0x01, 0x01, 0x12, 0x1c, 0x0a, 0x07, 0x6d, 0x69, 0x6e, 0x5f, 0x6c, 0x65, 0x6e, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x48, 0x02, 0x52, 0x06, 0x6d, 0x69, 0x6e, 0x4c, 0x65, 0x6e, - 0x88, 0x01, 0x01, 0x12, 0x1c, 0x0a, 0x07, 0x6d, 0x61, 0x78, 0x5f, 0x6c, 0x65, 0x6e, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x04, 0x48, 0x03, 0x52, 0x06, 0x6d, 0x61, 0x78, 0x4c, 0x65, 0x6e, 0x88, 0x01, - 0x01, 0x12, 0x1d, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x18, 0x06, 0x20, 0x01, - 0x28, 0x09, 0x48, 0x04, 0x52, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x88, 0x01, 0x01, - 0x12, 0x1b, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, - 0x48, 0x05, 0x52, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x88, 0x01, 0x01, 0x12, 0x1b, 0x0a, - 0x06, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x48, 0x06, 0x52, - 0x06, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, 0x88, 0x01, 0x01, 0x12, 0x1f, 0x0a, 0x08, 0x63, 0x6f, - 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x73, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x48, 0x07, 0x52, 0x08, - 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x73, 0x88, 0x01, 0x01, 0x12, 0x26, 0x0a, 0x0c, 0x6e, - 0x6f, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x73, 0x18, 0x17, 0x20, 0x01, 0x28, - 0x09, 0x48, 0x08, 0x52, 0x0b, 0x6e, 0x6f, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x73, - 0x88, 0x01, 0x01, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x6e, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x09, 0x52, - 0x02, 0x69, 0x6e, 0x12, 0x15, 0x0a, 0x06, 0x6e, 0x6f, 0x74, 0x5f, 0x69, 0x6e, 0x18, 0x0b, 0x20, - 0x03, 0x28, 0x09, 0x52, 0x05, 0x6e, 0x6f, 0x74, 0x49, 0x6e, 0x12, 0x3c, 0x0a, 0x0a, 0x77, 0x65, - 0x6c, 0x6c, 0x5f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1d, - 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x65, - 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x52, 0x09, 0x77, - 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x12, 0x25, 0x0a, 0x0e, 0x76, 0x61, 0x6c, 0x69, - 0x64, 0x61, 0x74, 0x65, 0x5f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x0d, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, - 0x1f, 0x0a, 0x0b, 0x69, 0x73, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, 0x1b, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x69, 0x73, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, - 0x42, 0x05, 0x0a, 0x03, 0x5f, 0x65, 0x71, 0x42, 0x06, 0x0a, 0x04, 0x5f, 0x6c, 0x65, 0x6e, 0x42, - 0x0a, 0x0a, 0x08, 0x5f, 0x6d, 0x69, 0x6e, 0x5f, 0x6c, 0x65, 0x6e, 0x42, 0x0a, 0x0a, 0x08, 0x5f, - 0x6d, 0x61, 0x78, 0x5f, 0x6c, 0x65, 0x6e, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x70, 0x61, 0x74, 0x74, - 0x65, 0x72, 0x6e, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x42, 0x09, - 0x0a, 0x07, 0x5f, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, 0x42, 0x0b, 0x0a, 0x09, 0x5f, 0x63, 0x6f, - 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x73, 0x42, 0x0f, 0x0a, 0x0d, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x63, - 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x73, 0x22, 0x58, 0x0a, 0x0e, 0x53, 0x74, 0x72, 0x69, 0x6e, - 0x67, 0x4d, 0x61, 0x70, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x76, 0x61, 0x6c, - 0x69, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x0d, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x45, 0x6d, 0x70, 0x74, 0x79, - 0x12, 0x1f, 0x0a, 0x0b, 0x69, 0x73, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x69, 0x73, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, - 0x64, 0x2a, 0x99, 0x02, 0x0a, 0x0f, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x53, - 0x74, 0x72, 0x69, 0x6e, 0x67, 0x12, 0x21, 0x0a, 0x1d, 0x57, 0x45, 0x4c, 0x4c, 0x5f, 0x4b, 0x4e, - 0x4f, 0x57, 0x4e, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, - 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1b, 0x0a, 0x17, 0x57, 0x45, 0x4c, 0x4c, - 0x5f, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x45, 0x4d, - 0x41, 0x49, 0x4c, 0x10, 0x01, 0x12, 0x1e, 0x0a, 0x1a, 0x57, 0x45, 0x4c, 0x4c, 0x5f, 0x4b, 0x4e, - 0x4f, 0x57, 0x4e, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x48, 0x4f, 0x53, 0x54, 0x4e, - 0x41, 0x4d, 0x45, 0x10, 0x02, 0x12, 0x18, 0x0a, 0x14, 0x57, 0x45, 0x4c, 0x4c, 0x5f, 0x4b, 0x4e, - 0x4f, 0x57, 0x4e, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x49, 0x50, 0x10, 0x03, 0x12, - 0x1a, 0x0a, 0x16, 0x57, 0x45, 0x4c, 0x4c, 0x5f, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x5f, 0x53, 0x54, - 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x49, 0x50, 0x56, 0x34, 0x10, 0x04, 0x12, 0x1a, 0x0a, 0x16, 0x57, - 0x45, 0x4c, 0x4c, 0x5f, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, - 0x5f, 0x49, 0x50, 0x56, 0x36, 0x10, 0x05, 0x12, 0x19, 0x0a, 0x15, 0x57, 0x45, 0x4c, 0x4c, 0x5f, - 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x55, 0x52, 0x49, - 0x10, 0x06, 0x12, 0x1d, 0x0a, 0x19, 0x57, 0x45, 0x4c, 0x4c, 0x5f, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, - 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x41, 0x44, 0x44, 0x52, 0x45, 0x53, 0x53, 0x10, - 0x07, 0x12, 0x1a, 0x0a, 0x16, 0x57, 0x45, 0x4c, 0x4c, 0x5f, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x5f, - 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x55, 0x55, 0x49, 0x44, 0x10, 0x08, 0x42, 0x33, 0x5a, - 0x31, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x64, - 0x75, 0x63, 0x74, 0x6f, 0x72, 0x6f, 0x6e, 0x65, 0x2f, 0x62, 0x61, 0x74, 0x6f, 0x6e, 0x2d, 0x73, - 0x64, 0x6b, 0x2f, 0x70, 0x62, 0x2f, 0x63, 0x31, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, - 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -}) +func (x *StringMapRules) SetIsRequired(v bool) { + x.IsRequired = v +} -var ( - file_c1_config_v1_rules_proto_rawDescOnce sync.Once - file_c1_config_v1_rules_proto_rawDescData []byte -) +type StringMapRules_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // IgnoreEmpty specifies that the validation rules of this field should be + // evaluated only if the field is not empty + ValidateEmpty bool + IsRequired bool +} -func file_c1_config_v1_rules_proto_rawDescGZIP() []byte { - file_c1_config_v1_rules_proto_rawDescOnce.Do(func() { - file_c1_config_v1_rules_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_c1_config_v1_rules_proto_rawDesc), len(file_c1_config_v1_rules_proto_rawDesc))) - }) - return file_c1_config_v1_rules_proto_rawDescData +func (b0 StringMapRules_builder) Build() *StringMapRules { + m0 := &StringMapRules{} + b, x := &b0, m0 + _, _ = b, x + x.ValidateEmpty = b.ValidateEmpty + x.IsRequired = b.IsRequired + return m0 +} + +type ResourceIDRules struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + AllowedResourceTypeIds []string `protobuf:"bytes,1,rep,name=allowed_resource_type_ids,json=allowedResourceTypeIds,proto3" json:"allowed_resource_type_ids,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ResourceIDRules) Reset() { + *x = ResourceIDRules{} + mi := &file_c1_config_v1_rules_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } +func (x *ResourceIDRules) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResourceIDRules) ProtoMessage() {} + +func (x *ResourceIDRules) ProtoReflect() protoreflect.Message { + mi := &file_c1_config_v1_rules_proto_msgTypes[6] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *ResourceIDRules) GetAllowedResourceTypeIds() []string { + if x != nil { + return x.AllowedResourceTypeIds + } + return nil +} + +func (x *ResourceIDRules) SetAllowedResourceTypeIds(v []string) { + x.AllowedResourceTypeIds = v +} + +type ResourceIDRules_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + AllowedResourceTypeIds []string +} + +func (b0 ResourceIDRules_builder) Build() *ResourceIDRules { + m0 := &ResourceIDRules{} + b, x := &b0, m0 + _, _ = b, x + x.AllowedResourceTypeIds = b.AllowedResourceTypeIds + return m0 +} + +type RepeatedResourceIdRules struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + AllowedResourceTypeIds []string `protobuf:"bytes,1,rep,name=allowed_resource_type_ids,json=allowedResourceTypeIds,proto3" json:"allowed_resource_type_ids,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *RepeatedResourceIdRules) Reset() { + *x = RepeatedResourceIdRules{} + mi := &file_c1_config_v1_rules_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *RepeatedResourceIdRules) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RepeatedResourceIdRules) ProtoMessage() {} + +func (x *RepeatedResourceIdRules) ProtoReflect() protoreflect.Message { + mi := &file_c1_config_v1_rules_proto_msgTypes[7] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *RepeatedResourceIdRules) GetAllowedResourceTypeIds() []string { + if x != nil { + return x.AllowedResourceTypeIds + } + return nil +} + +func (x *RepeatedResourceIdRules) SetAllowedResourceTypeIds(v []string) { + x.AllowedResourceTypeIds = v +} + +type RepeatedResourceIdRules_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + AllowedResourceTypeIds []string +} + +func (b0 RepeatedResourceIdRules_builder) Build() *RepeatedResourceIdRules { + m0 := &RepeatedResourceIdRules{} + b, x := &b0, m0 + _, _ = b, x + x.AllowedResourceTypeIds = b.AllowedResourceTypeIds + return m0 +} + +var File_c1_config_v1_rules_proto protoreflect.FileDescriptor + +const file_c1_config_v1_rules_proto_rawDesc = "" + + "\n" + + "\x18c1/config/v1/rules.proto\x12\fc1.config.v1\"\x8d\x02\n" + + "\n" + + "Int64Rules\x12\x13\n" + + "\x02eq\x18\x01 \x01(\x03H\x00R\x02eq\x88\x01\x01\x12\x13\n" + + "\x02lt\x18\x02 \x01(\x03H\x01R\x02lt\x88\x01\x01\x12\x15\n" + + "\x03lte\x18\x03 \x01(\x03H\x02R\x03lte\x88\x01\x01\x12\x13\n" + + "\x02gt\x18\x04 \x01(\x03H\x03R\x02gt\x88\x01\x01\x12\x15\n" + + "\x03gte\x18\x05 \x01(\x03H\x04R\x03gte\x88\x01\x01\x12\x0e\n" + + "\x02in\x18\x06 \x03(\x03R\x02in\x12\x15\n" + + "\x06not_in\x18\a \x03(\x03R\x05notIn\x12%\n" + + "\x0evalidate_empty\x18\b \x01(\bR\rvalidateEmpty\x12\x1f\n" + + "\vis_required\x18\t \x01(\bR\n" + + "isRequiredB\x05\n" + + "\x03_eqB\x05\n" + + "\x03_ltB\x06\n" + + "\x04_lteB\x05\n" + + "\x03_gtB\x06\n" + + "\x04_gte\"'\n" + + "\tBoolRules\x12\x13\n" + + "\x02eq\x18\x01 \x01(\bH\x00R\x02eq\x88\x01\x01B\x05\n" + + "\x03_eq\"\xf3\x02\n" + + "\rRepeatedRules\x12 \n" + + "\tmin_items\x18\x01 \x01(\x04H\x01R\bminItems\x88\x01\x01\x12 \n" + + "\tmax_items\x18\x02 \x01(\x04H\x02R\bmaxItems\x88\x01\x01\x12\x16\n" + + "\x06unique\x18\x03 \x01(\bR\x06unique\x120\n" + + "\x05int64\x18d \x01(\v2\x18.c1.config.v1.Int64RulesH\x00R\x05int64\x12-\n" + + "\x04bool\x18e \x01(\v2\x17.c1.config.v1.BoolRulesH\x00R\x04bool\x123\n" + + "\x06string\x18f \x01(\v2\x19.c1.config.v1.StringRulesH\x00R\x06string\x12%\n" + + "\x0evalidate_empty\x18\x04 \x01(\bR\rvalidateEmpty\x12\x1f\n" + + "\vis_required\x18\x05 \x01(\bR\n" + + "isRequiredB\f\n" + + "\n" + + "item_rulesB\f\n" + + "\n" + + "_min_itemsB\f\n" + + "\n" + + "_max_items\"\x8f\x02\n" + + "\x13RepeatedStringRules\x12 \n" + + "\tmin_items\x18\x01 \x01(\x04H\x00R\bminItems\x88\x01\x01\x12 \n" + + "\tmax_items\x18\x02 \x01(\x04H\x01R\bmaxItems\x88\x01\x01\x12\x16\n" + + "\x06unique\x18\x03 \x01(\bR\x06unique\x128\n" + + "\n" + + "item_rules\x18\x04 \x01(\v2\x19.c1.config.v1.StringRulesR\titemRules\x12%\n" + + "\x0evalidate_empty\x18\x05 \x01(\bR\rvalidateEmpty\x12\x1f\n" + + "\vis_required\x18\x06 \x01(\bR\n" + + "isRequiredB\f\n" + + "\n" + + "_min_itemsB\f\n" + + "\n" + + "_max_items\"\xab\x04\n" + + "\vStringRules\x12\x13\n" + + "\x02eq\x18\x01 \x01(\tH\x00R\x02eq\x88\x01\x01\x12\x15\n" + + "\x03len\x18\x13 \x01(\x04H\x01R\x03len\x88\x01\x01\x12\x1c\n" + + "\amin_len\x18\x02 \x01(\x04H\x02R\x06minLen\x88\x01\x01\x12\x1c\n" + + "\amax_len\x18\x03 \x01(\x04H\x03R\x06maxLen\x88\x01\x01\x12\x1d\n" + + "\apattern\x18\x06 \x01(\tH\x04R\apattern\x88\x01\x01\x12\x1b\n" + + "\x06prefix\x18\a \x01(\tH\x05R\x06prefix\x88\x01\x01\x12\x1b\n" + + "\x06suffix\x18\b \x01(\tH\x06R\x06suffix\x88\x01\x01\x12\x1f\n" + + "\bcontains\x18\t \x01(\tH\aR\bcontains\x88\x01\x01\x12&\n" + + "\fnot_contains\x18\x17 \x01(\tH\bR\vnotContains\x88\x01\x01\x12\x0e\n" + + "\x02in\x18\n" + + " \x03(\tR\x02in\x12\x15\n" + + "\x06not_in\x18\v \x03(\tR\x05notIn\x12<\n" + + "\n" + + "well_known\x18\f \x01(\x0e2\x1d.c1.config.v1.WellKnownStringR\twellKnown\x12%\n" + + "\x0evalidate_empty\x18\x1a \x01(\bR\rvalidateEmpty\x12\x1f\n" + + "\vis_required\x18\x1b \x01(\bR\n" + + "isRequiredB\x05\n" + + "\x03_eqB\x06\n" + + "\x04_lenB\n" + + "\n" + + "\b_min_lenB\n" + + "\n" + + "\b_max_lenB\n" + + "\n" + + "\b_patternB\t\n" + + "\a_prefixB\t\n" + + "\a_suffixB\v\n" + + "\t_containsB\x0f\n" + + "\r_not_contains\"X\n" + + "\x0eStringMapRules\x12%\n" + + "\x0evalidate_empty\x18\x01 \x01(\bR\rvalidateEmpty\x12\x1f\n" + + "\vis_required\x18\x02 \x01(\bR\n" + + "isRequired\"L\n" + + "\x0fResourceIDRules\x129\n" + + "\x19allowed_resource_type_ids\x18\x01 \x03(\tR\x16allowedResourceTypeIds\"T\n" + + "\x17RepeatedResourceIdRules\x129\n" + + "\x19allowed_resource_type_ids\x18\x01 \x03(\tR\x16allowedResourceTypeIds*\x99\x02\n" + + "\x0fWellKnownString\x12!\n" + + "\x1dWELL_KNOWN_STRING_UNSPECIFIED\x10\x00\x12\x1b\n" + + "\x17WELL_KNOWN_STRING_EMAIL\x10\x01\x12\x1e\n" + + "\x1aWELL_KNOWN_STRING_HOSTNAME\x10\x02\x12\x18\n" + + "\x14WELL_KNOWN_STRING_IP\x10\x03\x12\x1a\n" + + "\x16WELL_KNOWN_STRING_IPV4\x10\x04\x12\x1a\n" + + "\x16WELL_KNOWN_STRING_IPV6\x10\x05\x12\x19\n" + + "\x15WELL_KNOWN_STRING_URI\x10\x06\x12\x1d\n" + + "\x19WELL_KNOWN_STRING_ADDRESS\x10\a\x12\x1a\n" + + "\x16WELL_KNOWN_STRING_UUID\x10\bB3Z1github.com/conductorone/baton-sdk/pb/c1/config/v1b\x06proto3" + var file_c1_config_v1_rules_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_c1_config_v1_rules_proto_msgTypes = make([]protoimpl.MessageInfo, 6) +var file_c1_config_v1_rules_proto_msgTypes = make([]protoimpl.MessageInfo, 8) var file_c1_config_v1_rules_proto_goTypes = []any{ - (WellKnownString)(0), // 0: c1.config.v1.WellKnownString - (*Int64Rules)(nil), // 1: c1.config.v1.Int64Rules - (*BoolRules)(nil), // 2: c1.config.v1.BoolRules - (*RepeatedRules)(nil), // 3: c1.config.v1.RepeatedRules - (*RepeatedStringRules)(nil), // 4: c1.config.v1.RepeatedStringRules - (*StringRules)(nil), // 5: c1.config.v1.StringRules - (*StringMapRules)(nil), // 6: c1.config.v1.StringMapRules + (WellKnownString)(0), // 0: c1.config.v1.WellKnownString + (*Int64Rules)(nil), // 1: c1.config.v1.Int64Rules + (*BoolRules)(nil), // 2: c1.config.v1.BoolRules + (*RepeatedRules)(nil), // 3: c1.config.v1.RepeatedRules + (*RepeatedStringRules)(nil), // 4: c1.config.v1.RepeatedStringRules + (*StringRules)(nil), // 5: c1.config.v1.StringRules + (*StringMapRules)(nil), // 6: c1.config.v1.StringMapRules + (*ResourceIDRules)(nil), // 7: c1.config.v1.ResourceIDRules + (*RepeatedResourceIdRules)(nil), // 8: c1.config.v1.RepeatedResourceIdRules } var file_c1_config_v1_rules_proto_depIdxs = []int32{ 1, // 0: c1.config.v1.RepeatedRules.int64:type_name -> c1.config.v1.Int64Rules @@ -936,7 +1690,7 @@ func file_c1_config_v1_rules_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: unsafe.Slice(unsafe.StringData(file_c1_config_v1_rules_proto_rawDesc), len(file_c1_config_v1_rules_proto_rawDesc)), NumEnums: 1, - NumMessages: 6, + NumMessages: 8, NumExtensions: 0, NumServices: 0, }, diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/config/v1/rules.pb.validate.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/config/v1/rules.pb.validate.go index a2c8ff0d..f5f49e9e 100644 --- a/vendor/github.com/conductorone/baton-sdk/pb/c1/config/v1/rules.pb.validate.go +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/config/v1/rules.pb.validate.go @@ -892,3 +892,205 @@ var _ interface { Cause() error ErrorName() string } = StringMapRulesValidationError{} + +// Validate checks the field values on ResourceIDRules with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *ResourceIDRules) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ResourceIDRules with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ResourceIDRulesMultiError, or nil if none found. +func (m *ResourceIDRules) ValidateAll() error { + return m.validate(true) +} + +func (m *ResourceIDRules) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return ResourceIDRulesMultiError(errors) + } + + return nil +} + +// ResourceIDRulesMultiError is an error wrapping multiple validation errors +// returned by ResourceIDRules.ValidateAll() if the designated constraints +// aren't met. +type ResourceIDRulesMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ResourceIDRulesMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ResourceIDRulesMultiError) AllErrors() []error { return m } + +// ResourceIDRulesValidationError is the validation error returned by +// ResourceIDRules.Validate if the designated constraints aren't met. +type ResourceIDRulesValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ResourceIDRulesValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ResourceIDRulesValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ResourceIDRulesValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ResourceIDRulesValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ResourceIDRulesValidationError) ErrorName() string { return "ResourceIDRulesValidationError" } + +// Error satisfies the builtin error interface +func (e ResourceIDRulesValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sResourceIDRules.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ResourceIDRulesValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ResourceIDRulesValidationError{} + +// Validate checks the field values on RepeatedResourceIdRules with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *RepeatedResourceIdRules) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RepeatedResourceIdRules with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// RepeatedResourceIdRulesMultiError, or nil if none found. +func (m *RepeatedResourceIdRules) ValidateAll() error { + return m.validate(true) +} + +func (m *RepeatedResourceIdRules) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return RepeatedResourceIdRulesMultiError(errors) + } + + return nil +} + +// RepeatedResourceIdRulesMultiError is an error wrapping multiple validation +// errors returned by RepeatedResourceIdRules.ValidateAll() if the designated +// constraints aren't met. +type RepeatedResourceIdRulesMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RepeatedResourceIdRulesMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RepeatedResourceIdRulesMultiError) AllErrors() []error { return m } + +// RepeatedResourceIdRulesValidationError is the validation error returned by +// RepeatedResourceIdRules.Validate if the designated constraints aren't met. +type RepeatedResourceIdRulesValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RepeatedResourceIdRulesValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RepeatedResourceIdRulesValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RepeatedResourceIdRulesValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RepeatedResourceIdRulesValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RepeatedResourceIdRulesValidationError) ErrorName() string { + return "RepeatedResourceIdRulesValidationError" +} + +// Error satisfies the builtin error interface +func (e RepeatedResourceIdRulesValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRepeatedResourceIdRules.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RepeatedResourceIdRulesValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RepeatedResourceIdRulesValidationError{} diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/config/v1/rules_protoopaque.pb.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/config/v1/rules_protoopaque.pb.go new file mode 100644 index 00000000..87964c85 --- /dev/null +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/config/v1/rules_protoopaque.pb.go @@ -0,0 +1,1742 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.10 +// protoc (unknown) +// source: c1/config/v1/rules.proto + +//go:build protoopaque + +package v1 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type WellKnownString int32 + +const ( + WellKnownString_WELL_KNOWN_STRING_UNSPECIFIED WellKnownString = 0 + // Email specifies that the field must be a valid email address as + // defined by RFC 5322 + WellKnownString_WELL_KNOWN_STRING_EMAIL WellKnownString = 1 + // Hostname specifies that the field must be a valid hostname as + // defined by RFC 1034. This constraint does not support + // internationalized domain names (IDNs). + WellKnownString_WELL_KNOWN_STRING_HOSTNAME WellKnownString = 2 + // Ip specifies that the field must be a valid IP (v4 or v6) address. + // Valid IPv6 addresses should not include surrounding square brackets. + WellKnownString_WELL_KNOWN_STRING_IP WellKnownString = 3 + // Ipv4 specifies that the field must be a valid IPv4 address. + WellKnownString_WELL_KNOWN_STRING_IPV4 WellKnownString = 4 + // Ipv6 specifies that the field must be a valid IPv6 address. Valid + // IPv6 addresses should not include surrounding square brackets. + WellKnownString_WELL_KNOWN_STRING_IPV6 WellKnownString = 5 + WellKnownString_WELL_KNOWN_STRING_URI WellKnownString = 6 + // Address specifies that the field must be either a valid hostname as + // defined by RFC 1034 (which does not support internationalized domain + // names or IDNs), or it can be a valid IP (v4 or v6). + WellKnownString_WELL_KNOWN_STRING_ADDRESS WellKnownString = 7 + // Uuid specifies that the field must be a valid UUID as defined by + // RFC 4122 + WellKnownString_WELL_KNOWN_STRING_UUID WellKnownString = 8 +) + +// Enum value maps for WellKnownString. +var ( + WellKnownString_name = map[int32]string{ + 0: "WELL_KNOWN_STRING_UNSPECIFIED", + 1: "WELL_KNOWN_STRING_EMAIL", + 2: "WELL_KNOWN_STRING_HOSTNAME", + 3: "WELL_KNOWN_STRING_IP", + 4: "WELL_KNOWN_STRING_IPV4", + 5: "WELL_KNOWN_STRING_IPV6", + 6: "WELL_KNOWN_STRING_URI", + 7: "WELL_KNOWN_STRING_ADDRESS", + 8: "WELL_KNOWN_STRING_UUID", + } + WellKnownString_value = map[string]int32{ + "WELL_KNOWN_STRING_UNSPECIFIED": 0, + "WELL_KNOWN_STRING_EMAIL": 1, + "WELL_KNOWN_STRING_HOSTNAME": 2, + "WELL_KNOWN_STRING_IP": 3, + "WELL_KNOWN_STRING_IPV4": 4, + "WELL_KNOWN_STRING_IPV6": 5, + "WELL_KNOWN_STRING_URI": 6, + "WELL_KNOWN_STRING_ADDRESS": 7, + "WELL_KNOWN_STRING_UUID": 8, + } +) + +func (x WellKnownString) Enum() *WellKnownString { + p := new(WellKnownString) + *p = x + return p +} + +func (x WellKnownString) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (WellKnownString) Descriptor() protoreflect.EnumDescriptor { + return file_c1_config_v1_rules_proto_enumTypes[0].Descriptor() +} + +func (WellKnownString) Type() protoreflect.EnumType { + return &file_c1_config_v1_rules_proto_enumTypes[0] +} + +func (x WellKnownString) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Int64Rules describes the constraints applied to `int64` values +type Int64Rules struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Eq int64 `protobuf:"varint,1,opt,name=eq,proto3,oneof"` + xxx_hidden_Lt int64 `protobuf:"varint,2,opt,name=lt,proto3,oneof"` + xxx_hidden_Lte int64 `protobuf:"varint,3,opt,name=lte,proto3,oneof"` + xxx_hidden_Gt int64 `protobuf:"varint,4,opt,name=gt,proto3,oneof"` + xxx_hidden_Gte int64 `protobuf:"varint,5,opt,name=gte,proto3,oneof"` + xxx_hidden_In []int64 `protobuf:"varint,6,rep,packed,name=in,proto3"` + xxx_hidden_NotIn []int64 `protobuf:"varint,7,rep,packed,name=not_in,json=notIn,proto3"` + xxx_hidden_ValidateEmpty bool `protobuf:"varint,8,opt,name=validate_empty,json=validateEmpty,proto3"` + xxx_hidden_IsRequired bool `protobuf:"varint,9,opt,name=is_required,json=isRequired,proto3"` + XXX_raceDetectHookData protoimpl.RaceDetectHookData + XXX_presence [1]uint32 + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Int64Rules) Reset() { + *x = Int64Rules{} + mi := &file_c1_config_v1_rules_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Int64Rules) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Int64Rules) ProtoMessage() {} + +func (x *Int64Rules) ProtoReflect() protoreflect.Message { + mi := &file_c1_config_v1_rules_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *Int64Rules) GetEq() int64 { + if x != nil { + return x.xxx_hidden_Eq + } + return 0 +} + +func (x *Int64Rules) GetLt() int64 { + if x != nil { + return x.xxx_hidden_Lt + } + return 0 +} + +func (x *Int64Rules) GetLte() int64 { + if x != nil { + return x.xxx_hidden_Lte + } + return 0 +} + +func (x *Int64Rules) GetGt() int64 { + if x != nil { + return x.xxx_hidden_Gt + } + return 0 +} + +func (x *Int64Rules) GetGte() int64 { + if x != nil { + return x.xxx_hidden_Gte + } + return 0 +} + +func (x *Int64Rules) GetIn() []int64 { + if x != nil { + return x.xxx_hidden_In + } + return nil +} + +func (x *Int64Rules) GetNotIn() []int64 { + if x != nil { + return x.xxx_hidden_NotIn + } + return nil +} + +func (x *Int64Rules) GetValidateEmpty() bool { + if x != nil { + return x.xxx_hidden_ValidateEmpty + } + return false +} + +func (x *Int64Rules) GetIsRequired() bool { + if x != nil { + return x.xxx_hidden_IsRequired + } + return false +} + +func (x *Int64Rules) SetEq(v int64) { + x.xxx_hidden_Eq = v + protoimpl.X.SetPresent(&(x.XXX_presence[0]), 0, 9) +} + +func (x *Int64Rules) SetLt(v int64) { + x.xxx_hidden_Lt = v + protoimpl.X.SetPresent(&(x.XXX_presence[0]), 1, 9) +} + +func (x *Int64Rules) SetLte(v int64) { + x.xxx_hidden_Lte = v + protoimpl.X.SetPresent(&(x.XXX_presence[0]), 2, 9) +} + +func (x *Int64Rules) SetGt(v int64) { + x.xxx_hidden_Gt = v + protoimpl.X.SetPresent(&(x.XXX_presence[0]), 3, 9) +} + +func (x *Int64Rules) SetGte(v int64) { + x.xxx_hidden_Gte = v + protoimpl.X.SetPresent(&(x.XXX_presence[0]), 4, 9) +} + +func (x *Int64Rules) SetIn(v []int64) { + x.xxx_hidden_In = v +} + +func (x *Int64Rules) SetNotIn(v []int64) { + x.xxx_hidden_NotIn = v +} + +func (x *Int64Rules) SetValidateEmpty(v bool) { + x.xxx_hidden_ValidateEmpty = v +} + +func (x *Int64Rules) SetIsRequired(v bool) { + x.xxx_hidden_IsRequired = v +} + +func (x *Int64Rules) HasEq() bool { + if x == nil { + return false + } + return protoimpl.X.Present(&(x.XXX_presence[0]), 0) +} + +func (x *Int64Rules) HasLt() bool { + if x == nil { + return false + } + return protoimpl.X.Present(&(x.XXX_presence[0]), 1) +} + +func (x *Int64Rules) HasLte() bool { + if x == nil { + return false + } + return protoimpl.X.Present(&(x.XXX_presence[0]), 2) +} + +func (x *Int64Rules) HasGt() bool { + if x == nil { + return false + } + return protoimpl.X.Present(&(x.XXX_presence[0]), 3) +} + +func (x *Int64Rules) HasGte() bool { + if x == nil { + return false + } + return protoimpl.X.Present(&(x.XXX_presence[0]), 4) +} + +func (x *Int64Rules) ClearEq() { + protoimpl.X.ClearPresent(&(x.XXX_presence[0]), 0) + x.xxx_hidden_Eq = 0 +} + +func (x *Int64Rules) ClearLt() { + protoimpl.X.ClearPresent(&(x.XXX_presence[0]), 1) + x.xxx_hidden_Lt = 0 +} + +func (x *Int64Rules) ClearLte() { + protoimpl.X.ClearPresent(&(x.XXX_presence[0]), 2) + x.xxx_hidden_Lte = 0 +} + +func (x *Int64Rules) ClearGt() { + protoimpl.X.ClearPresent(&(x.XXX_presence[0]), 3) + x.xxx_hidden_Gt = 0 +} + +func (x *Int64Rules) ClearGte() { + protoimpl.X.ClearPresent(&(x.XXX_presence[0]), 4) + x.xxx_hidden_Gte = 0 +} + +type Int64Rules_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // Const specifies that this field must be exactly the specified value + Eq *int64 + // Lt specifies that this field must be less than the specified value, + // exclusive + Lt *int64 + // Lte specifies that this field must be less than or equal to the + // specified value, inclusive + Lte *int64 + // Gt specifies that this field must be greater than the specified value, + // exclusive. If the value of Gt is larger than a specified Lt or Lte, the + // range is reversed. + Gt *int64 + // Gte specifies that this field must be greater than or equal to the + // specified value, inclusive. If the value of Gte is larger than a + // specified Lt or Lte, the range is reversed. + Gte *int64 + // In specifies that this field must be equal to one of the specified + // values + In []int64 + // NotIn specifies that this field cannot be equal to one of the specified + // values + NotIn []int64 + // IgnoreEmpty specifies that the validation rules of this field should be + // evaluated only if the field is not empty + ValidateEmpty bool + IsRequired bool +} + +func (b0 Int64Rules_builder) Build() *Int64Rules { + m0 := &Int64Rules{} + b, x := &b0, m0 + _, _ = b, x + if b.Eq != nil { + protoimpl.X.SetPresentNonAtomic(&(x.XXX_presence[0]), 0, 9) + x.xxx_hidden_Eq = *b.Eq + } + if b.Lt != nil { + protoimpl.X.SetPresentNonAtomic(&(x.XXX_presence[0]), 1, 9) + x.xxx_hidden_Lt = *b.Lt + } + if b.Lte != nil { + protoimpl.X.SetPresentNonAtomic(&(x.XXX_presence[0]), 2, 9) + x.xxx_hidden_Lte = *b.Lte + } + if b.Gt != nil { + protoimpl.X.SetPresentNonAtomic(&(x.XXX_presence[0]), 3, 9) + x.xxx_hidden_Gt = *b.Gt + } + if b.Gte != nil { + protoimpl.X.SetPresentNonAtomic(&(x.XXX_presence[0]), 4, 9) + x.xxx_hidden_Gte = *b.Gte + } + x.xxx_hidden_In = b.In + x.xxx_hidden_NotIn = b.NotIn + x.xxx_hidden_ValidateEmpty = b.ValidateEmpty + x.xxx_hidden_IsRequired = b.IsRequired + return m0 +} + +// BoolRules describes the constraints applied to `bool` values +type BoolRules struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Eq bool `protobuf:"varint,1,opt,name=eq,proto3,oneof"` + XXX_raceDetectHookData protoimpl.RaceDetectHookData + XXX_presence [1]uint32 + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *BoolRules) Reset() { + *x = BoolRules{} + mi := &file_c1_config_v1_rules_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *BoolRules) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BoolRules) ProtoMessage() {} + +func (x *BoolRules) ProtoReflect() protoreflect.Message { + mi := &file_c1_config_v1_rules_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *BoolRules) GetEq() bool { + if x != nil { + return x.xxx_hidden_Eq + } + return false +} + +func (x *BoolRules) SetEq(v bool) { + x.xxx_hidden_Eq = v + protoimpl.X.SetPresent(&(x.XXX_presence[0]), 0, 1) +} + +func (x *BoolRules) HasEq() bool { + if x == nil { + return false + } + return protoimpl.X.Present(&(x.XXX_presence[0]), 0) +} + +func (x *BoolRules) ClearEq() { + protoimpl.X.ClearPresent(&(x.XXX_presence[0]), 0) + x.xxx_hidden_Eq = false +} + +type BoolRules_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // Const specifies that this field must be exactly the specified value + Eq *bool +} + +func (b0 BoolRules_builder) Build() *BoolRules { + m0 := &BoolRules{} + b, x := &b0, m0 + _, _ = b, x + if b.Eq != nil { + protoimpl.X.SetPresentNonAtomic(&(x.XXX_presence[0]), 0, 1) + x.xxx_hidden_Eq = *b.Eq + } + return m0 +} + +// RepeatedRules describe the constraints applied to `repeated` values +type RepeatedRules struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_MinItems uint64 `protobuf:"varint,1,opt,name=min_items,json=minItems,proto3,oneof"` + xxx_hidden_MaxItems uint64 `protobuf:"varint,2,opt,name=max_items,json=maxItems,proto3,oneof"` + xxx_hidden_Unique bool `protobuf:"varint,3,opt,name=unique,proto3"` + xxx_hidden_ItemRules isRepeatedRules_ItemRules `protobuf_oneof:"item_rules"` + xxx_hidden_ValidateEmpty bool `protobuf:"varint,4,opt,name=validate_empty,json=validateEmpty,proto3"` + xxx_hidden_IsRequired bool `protobuf:"varint,5,opt,name=is_required,json=isRequired,proto3"` + XXX_raceDetectHookData protoimpl.RaceDetectHookData + XXX_presence [1]uint32 + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *RepeatedRules) Reset() { + *x = RepeatedRules{} + mi := &file_c1_config_v1_rules_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *RepeatedRules) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RepeatedRules) ProtoMessage() {} + +func (x *RepeatedRules) ProtoReflect() protoreflect.Message { + mi := &file_c1_config_v1_rules_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *RepeatedRules) GetMinItems() uint64 { + if x != nil { + return x.xxx_hidden_MinItems + } + return 0 +} + +func (x *RepeatedRules) GetMaxItems() uint64 { + if x != nil { + return x.xxx_hidden_MaxItems + } + return 0 +} + +func (x *RepeatedRules) GetUnique() bool { + if x != nil { + return x.xxx_hidden_Unique + } + return false +} + +func (x *RepeatedRules) GetInt64() *Int64Rules { + if x != nil { + if x, ok := x.xxx_hidden_ItemRules.(*repeatedRules_Int64); ok { + return x.Int64 + } + } + return nil +} + +func (x *RepeatedRules) GetBool() *BoolRules { + if x != nil { + if x, ok := x.xxx_hidden_ItemRules.(*repeatedRules_Bool); ok { + return x.Bool + } + } + return nil +} + +func (x *RepeatedRules) GetString() *StringRules { + if x != nil { + if x, ok := x.xxx_hidden_ItemRules.(*repeatedRules_String_); ok { + return x.String_ + } + } + return nil +} + +func (x *RepeatedRules) GetValidateEmpty() bool { + if x != nil { + return x.xxx_hidden_ValidateEmpty + } + return false +} + +func (x *RepeatedRules) GetIsRequired() bool { + if x != nil { + return x.xxx_hidden_IsRequired + } + return false +} + +func (x *RepeatedRules) SetMinItems(v uint64) { + x.xxx_hidden_MinItems = v + protoimpl.X.SetPresent(&(x.XXX_presence[0]), 0, 6) +} + +func (x *RepeatedRules) SetMaxItems(v uint64) { + x.xxx_hidden_MaxItems = v + protoimpl.X.SetPresent(&(x.XXX_presence[0]), 1, 6) +} + +func (x *RepeatedRules) SetUnique(v bool) { + x.xxx_hidden_Unique = v +} + +func (x *RepeatedRules) SetInt64(v *Int64Rules) { + if v == nil { + x.xxx_hidden_ItemRules = nil + return + } + x.xxx_hidden_ItemRules = &repeatedRules_Int64{v} +} + +func (x *RepeatedRules) SetBool(v *BoolRules) { + if v == nil { + x.xxx_hidden_ItemRules = nil + return + } + x.xxx_hidden_ItemRules = &repeatedRules_Bool{v} +} + +func (x *RepeatedRules) SetString(v *StringRules) { + if v == nil { + x.xxx_hidden_ItemRules = nil + return + } + x.xxx_hidden_ItemRules = &repeatedRules_String_{v} +} + +func (x *RepeatedRules) SetValidateEmpty(v bool) { + x.xxx_hidden_ValidateEmpty = v +} + +func (x *RepeatedRules) SetIsRequired(v bool) { + x.xxx_hidden_IsRequired = v +} + +func (x *RepeatedRules) HasMinItems() bool { + if x == nil { + return false + } + return protoimpl.X.Present(&(x.XXX_presence[0]), 0) +} + +func (x *RepeatedRules) HasMaxItems() bool { + if x == nil { + return false + } + return protoimpl.X.Present(&(x.XXX_presence[0]), 1) +} + +func (x *RepeatedRules) HasItemRules() bool { + if x == nil { + return false + } + return x.xxx_hidden_ItemRules != nil +} + +func (x *RepeatedRules) HasInt64() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_ItemRules.(*repeatedRules_Int64) + return ok +} + +func (x *RepeatedRules) HasBool() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_ItemRules.(*repeatedRules_Bool) + return ok +} + +func (x *RepeatedRules) HasString() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_ItemRules.(*repeatedRules_String_) + return ok +} + +func (x *RepeatedRules) ClearMinItems() { + protoimpl.X.ClearPresent(&(x.XXX_presence[0]), 0) + x.xxx_hidden_MinItems = 0 +} + +func (x *RepeatedRules) ClearMaxItems() { + protoimpl.X.ClearPresent(&(x.XXX_presence[0]), 1) + x.xxx_hidden_MaxItems = 0 +} + +func (x *RepeatedRules) ClearItemRules() { + x.xxx_hidden_ItemRules = nil +} + +func (x *RepeatedRules) ClearInt64() { + if _, ok := x.xxx_hidden_ItemRules.(*repeatedRules_Int64); ok { + x.xxx_hidden_ItemRules = nil + } +} + +func (x *RepeatedRules) ClearBool() { + if _, ok := x.xxx_hidden_ItemRules.(*repeatedRules_Bool); ok { + x.xxx_hidden_ItemRules = nil + } +} + +func (x *RepeatedRules) ClearString() { + if _, ok := x.xxx_hidden_ItemRules.(*repeatedRules_String_); ok { + x.xxx_hidden_ItemRules = nil + } +} + +const RepeatedRules_ItemRules_not_set_case case_RepeatedRules_ItemRules = 0 +const RepeatedRules_Int64_case case_RepeatedRules_ItemRules = 100 +const RepeatedRules_Bool_case case_RepeatedRules_ItemRules = 101 +const RepeatedRules_String__case case_RepeatedRules_ItemRules = 102 + +func (x *RepeatedRules) WhichItemRules() case_RepeatedRules_ItemRules { + if x == nil { + return RepeatedRules_ItemRules_not_set_case + } + switch x.xxx_hidden_ItemRules.(type) { + case *repeatedRules_Int64: + return RepeatedRules_Int64_case + case *repeatedRules_Bool: + return RepeatedRules_Bool_case + case *repeatedRules_String_: + return RepeatedRules_String__case + default: + return RepeatedRules_ItemRules_not_set_case + } +} + +type RepeatedRules_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // MinItems specifies that this field must have the specified number of + // items at a minimum + MinItems *uint64 + // MaxItems specifies that this field must have the specified number of + // items at a maximum + MaxItems *uint64 + // Unique specifies that all elements in this field must be unique. This + // constraint is only applicable to scalar and enum types (messages are not + // supported). + Unique bool + // Items specifies the constraints to be applied to each item in the field. + // Repeated message fields will still execute validation against each item + // unless skip is specified here. + + // Fields of oneof xxx_hidden_ItemRules: + // Scalar Field Types + Int64 *Int64Rules + Bool *BoolRules + String *StringRules + // -- end of xxx_hidden_ItemRules + // IgnoreEmpty specifies that the validation rules of this field should be + // evaluated only if the field is not empty + ValidateEmpty bool + IsRequired bool +} + +func (b0 RepeatedRules_builder) Build() *RepeatedRules { + m0 := &RepeatedRules{} + b, x := &b0, m0 + _, _ = b, x + if b.MinItems != nil { + protoimpl.X.SetPresentNonAtomic(&(x.XXX_presence[0]), 0, 6) + x.xxx_hidden_MinItems = *b.MinItems + } + if b.MaxItems != nil { + protoimpl.X.SetPresentNonAtomic(&(x.XXX_presence[0]), 1, 6) + x.xxx_hidden_MaxItems = *b.MaxItems + } + x.xxx_hidden_Unique = b.Unique + if b.Int64 != nil { + x.xxx_hidden_ItemRules = &repeatedRules_Int64{b.Int64} + } + if b.Bool != nil { + x.xxx_hidden_ItemRules = &repeatedRules_Bool{b.Bool} + } + if b.String != nil { + x.xxx_hidden_ItemRules = &repeatedRules_String_{b.String} + } + x.xxx_hidden_ValidateEmpty = b.ValidateEmpty + x.xxx_hidden_IsRequired = b.IsRequired + return m0 +} + +type case_RepeatedRules_ItemRules protoreflect.FieldNumber + +func (x case_RepeatedRules_ItemRules) String() string { + md := file_c1_config_v1_rules_proto_msgTypes[2].Descriptor() + if x == 0 { + return "not set" + } + return protoimpl.X.MessageFieldStringOf(md, protoreflect.FieldNumber(x)) +} + +type isRepeatedRules_ItemRules interface { + isRepeatedRules_ItemRules() +} + +type repeatedRules_Int64 struct { + // Scalar Field Types + Int64 *Int64Rules `protobuf:"bytes,100,opt,name=int64,proto3,oneof"` +} + +type repeatedRules_Bool struct { + Bool *BoolRules `protobuf:"bytes,101,opt,name=bool,proto3,oneof"` +} + +type repeatedRules_String_ struct { + String_ *StringRules `protobuf:"bytes,102,opt,name=string,proto3,oneof"` // RepeatedRules repeated = 103; +} + +func (*repeatedRules_Int64) isRepeatedRules_ItemRules() {} + +func (*repeatedRules_Bool) isRepeatedRules_ItemRules() {} + +func (*repeatedRules_String_) isRepeatedRules_ItemRules() {} + +type RepeatedStringRules struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_MinItems uint64 `protobuf:"varint,1,opt,name=min_items,json=minItems,proto3,oneof"` + xxx_hidden_MaxItems uint64 `protobuf:"varint,2,opt,name=max_items,json=maxItems,proto3,oneof"` + xxx_hidden_Unique bool `protobuf:"varint,3,opt,name=unique,proto3"` + xxx_hidden_ItemRules *StringRules `protobuf:"bytes,4,opt,name=item_rules,json=itemRules,proto3"` + xxx_hidden_ValidateEmpty bool `protobuf:"varint,5,opt,name=validate_empty,json=validateEmpty,proto3"` + xxx_hidden_IsRequired bool `protobuf:"varint,6,opt,name=is_required,json=isRequired,proto3"` + XXX_raceDetectHookData protoimpl.RaceDetectHookData + XXX_presence [1]uint32 + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *RepeatedStringRules) Reset() { + *x = RepeatedStringRules{} + mi := &file_c1_config_v1_rules_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *RepeatedStringRules) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RepeatedStringRules) ProtoMessage() {} + +func (x *RepeatedStringRules) ProtoReflect() protoreflect.Message { + mi := &file_c1_config_v1_rules_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *RepeatedStringRules) GetMinItems() uint64 { + if x != nil { + return x.xxx_hidden_MinItems + } + return 0 +} + +func (x *RepeatedStringRules) GetMaxItems() uint64 { + if x != nil { + return x.xxx_hidden_MaxItems + } + return 0 +} + +func (x *RepeatedStringRules) GetUnique() bool { + if x != nil { + return x.xxx_hidden_Unique + } + return false +} + +func (x *RepeatedStringRules) GetItemRules() *StringRules { + if x != nil { + return x.xxx_hidden_ItemRules + } + return nil +} + +func (x *RepeatedStringRules) GetValidateEmpty() bool { + if x != nil { + return x.xxx_hidden_ValidateEmpty + } + return false +} + +func (x *RepeatedStringRules) GetIsRequired() bool { + if x != nil { + return x.xxx_hidden_IsRequired + } + return false +} + +func (x *RepeatedStringRules) SetMinItems(v uint64) { + x.xxx_hidden_MinItems = v + protoimpl.X.SetPresent(&(x.XXX_presence[0]), 0, 6) +} + +func (x *RepeatedStringRules) SetMaxItems(v uint64) { + x.xxx_hidden_MaxItems = v + protoimpl.X.SetPresent(&(x.XXX_presence[0]), 1, 6) +} + +func (x *RepeatedStringRules) SetUnique(v bool) { + x.xxx_hidden_Unique = v +} + +func (x *RepeatedStringRules) SetItemRules(v *StringRules) { + x.xxx_hidden_ItemRules = v +} + +func (x *RepeatedStringRules) SetValidateEmpty(v bool) { + x.xxx_hidden_ValidateEmpty = v +} + +func (x *RepeatedStringRules) SetIsRequired(v bool) { + x.xxx_hidden_IsRequired = v +} + +func (x *RepeatedStringRules) HasMinItems() bool { + if x == nil { + return false + } + return protoimpl.X.Present(&(x.XXX_presence[0]), 0) +} + +func (x *RepeatedStringRules) HasMaxItems() bool { + if x == nil { + return false + } + return protoimpl.X.Present(&(x.XXX_presence[0]), 1) +} + +func (x *RepeatedStringRules) HasItemRules() bool { + if x == nil { + return false + } + return x.xxx_hidden_ItemRules != nil +} + +func (x *RepeatedStringRules) ClearMinItems() { + protoimpl.X.ClearPresent(&(x.XXX_presence[0]), 0) + x.xxx_hidden_MinItems = 0 +} + +func (x *RepeatedStringRules) ClearMaxItems() { + protoimpl.X.ClearPresent(&(x.XXX_presence[0]), 1) + x.xxx_hidden_MaxItems = 0 +} + +func (x *RepeatedStringRules) ClearItemRules() { + x.xxx_hidden_ItemRules = nil +} + +type RepeatedStringRules_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // MinItems specifies that this field must have the specified number of + // items at a minimum + MinItems *uint64 + // MaxItems specifies that this field must have the specified number of + // items at a maximum + MaxItems *uint64 + // Unique specifies that all elements in this field must be unique. This + // constraint is only applicable to scalar and enum types (messages are not + // supported). + Unique bool + ItemRules *StringRules + // IgnoreEmpty specifies that the validation rules of this field should be + // evaluated only if the field is not empty + ValidateEmpty bool + IsRequired bool +} + +func (b0 RepeatedStringRules_builder) Build() *RepeatedStringRules { + m0 := &RepeatedStringRules{} + b, x := &b0, m0 + _, _ = b, x + if b.MinItems != nil { + protoimpl.X.SetPresentNonAtomic(&(x.XXX_presence[0]), 0, 6) + x.xxx_hidden_MinItems = *b.MinItems + } + if b.MaxItems != nil { + protoimpl.X.SetPresentNonAtomic(&(x.XXX_presence[0]), 1, 6) + x.xxx_hidden_MaxItems = *b.MaxItems + } + x.xxx_hidden_Unique = b.Unique + x.xxx_hidden_ItemRules = b.ItemRules + x.xxx_hidden_ValidateEmpty = b.ValidateEmpty + x.xxx_hidden_IsRequired = b.IsRequired + return m0 +} + +type StringRules struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Eq *string `protobuf:"bytes,1,opt,name=eq,proto3,oneof"` + xxx_hidden_Len uint64 `protobuf:"varint,19,opt,name=len,proto3,oneof"` + xxx_hidden_MinLen uint64 `protobuf:"varint,2,opt,name=min_len,json=minLen,proto3,oneof"` + xxx_hidden_MaxLen uint64 `protobuf:"varint,3,opt,name=max_len,json=maxLen,proto3,oneof"` + xxx_hidden_Pattern *string `protobuf:"bytes,6,opt,name=pattern,proto3,oneof"` + xxx_hidden_Prefix *string `protobuf:"bytes,7,opt,name=prefix,proto3,oneof"` + xxx_hidden_Suffix *string `protobuf:"bytes,8,opt,name=suffix,proto3,oneof"` + xxx_hidden_Contains *string `protobuf:"bytes,9,opt,name=contains,proto3,oneof"` + xxx_hidden_NotContains *string `protobuf:"bytes,23,opt,name=not_contains,json=notContains,proto3,oneof"` + xxx_hidden_In []string `protobuf:"bytes,10,rep,name=in,proto3"` + xxx_hidden_NotIn []string `protobuf:"bytes,11,rep,name=not_in,json=notIn,proto3"` + xxx_hidden_WellKnown WellKnownString `protobuf:"varint,12,opt,name=well_known,json=wellKnown,proto3,enum=c1.config.v1.WellKnownString"` + xxx_hidden_ValidateEmpty bool `protobuf:"varint,26,opt,name=validate_empty,json=validateEmpty,proto3"` + xxx_hidden_IsRequired bool `protobuf:"varint,27,opt,name=is_required,json=isRequired,proto3"` + XXX_raceDetectHookData protoimpl.RaceDetectHookData + XXX_presence [1]uint32 + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *StringRules) Reset() { + *x = StringRules{} + mi := &file_c1_config_v1_rules_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StringRules) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StringRules) ProtoMessage() {} + +func (x *StringRules) ProtoReflect() protoreflect.Message { + mi := &file_c1_config_v1_rules_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *StringRules) GetEq() string { + if x != nil { + if x.xxx_hidden_Eq != nil { + return *x.xxx_hidden_Eq + } + return "" + } + return "" +} + +func (x *StringRules) GetLen() uint64 { + if x != nil { + return x.xxx_hidden_Len + } + return 0 +} + +func (x *StringRules) GetMinLen() uint64 { + if x != nil { + return x.xxx_hidden_MinLen + } + return 0 +} + +func (x *StringRules) GetMaxLen() uint64 { + if x != nil { + return x.xxx_hidden_MaxLen + } + return 0 +} + +func (x *StringRules) GetPattern() string { + if x != nil { + if x.xxx_hidden_Pattern != nil { + return *x.xxx_hidden_Pattern + } + return "" + } + return "" +} + +func (x *StringRules) GetPrefix() string { + if x != nil { + if x.xxx_hidden_Prefix != nil { + return *x.xxx_hidden_Prefix + } + return "" + } + return "" +} + +func (x *StringRules) GetSuffix() string { + if x != nil { + if x.xxx_hidden_Suffix != nil { + return *x.xxx_hidden_Suffix + } + return "" + } + return "" +} + +func (x *StringRules) GetContains() string { + if x != nil { + if x.xxx_hidden_Contains != nil { + return *x.xxx_hidden_Contains + } + return "" + } + return "" +} + +func (x *StringRules) GetNotContains() string { + if x != nil { + if x.xxx_hidden_NotContains != nil { + return *x.xxx_hidden_NotContains + } + return "" + } + return "" +} + +func (x *StringRules) GetIn() []string { + if x != nil { + return x.xxx_hidden_In + } + return nil +} + +func (x *StringRules) GetNotIn() []string { + if x != nil { + return x.xxx_hidden_NotIn + } + return nil +} + +func (x *StringRules) GetWellKnown() WellKnownString { + if x != nil { + return x.xxx_hidden_WellKnown + } + return WellKnownString_WELL_KNOWN_STRING_UNSPECIFIED +} + +func (x *StringRules) GetValidateEmpty() bool { + if x != nil { + return x.xxx_hidden_ValidateEmpty + } + return false +} + +func (x *StringRules) GetIsRequired() bool { + if x != nil { + return x.xxx_hidden_IsRequired + } + return false +} + +func (x *StringRules) SetEq(v string) { + x.xxx_hidden_Eq = &v + protoimpl.X.SetPresent(&(x.XXX_presence[0]), 0, 14) +} + +func (x *StringRules) SetLen(v uint64) { + x.xxx_hidden_Len = v + protoimpl.X.SetPresent(&(x.XXX_presence[0]), 1, 14) +} + +func (x *StringRules) SetMinLen(v uint64) { + x.xxx_hidden_MinLen = v + protoimpl.X.SetPresent(&(x.XXX_presence[0]), 2, 14) +} + +func (x *StringRules) SetMaxLen(v uint64) { + x.xxx_hidden_MaxLen = v + protoimpl.X.SetPresent(&(x.XXX_presence[0]), 3, 14) +} + +func (x *StringRules) SetPattern(v string) { + x.xxx_hidden_Pattern = &v + protoimpl.X.SetPresent(&(x.XXX_presence[0]), 4, 14) +} + +func (x *StringRules) SetPrefix(v string) { + x.xxx_hidden_Prefix = &v + protoimpl.X.SetPresent(&(x.XXX_presence[0]), 5, 14) +} + +func (x *StringRules) SetSuffix(v string) { + x.xxx_hidden_Suffix = &v + protoimpl.X.SetPresent(&(x.XXX_presence[0]), 6, 14) +} + +func (x *StringRules) SetContains(v string) { + x.xxx_hidden_Contains = &v + protoimpl.X.SetPresent(&(x.XXX_presence[0]), 7, 14) +} + +func (x *StringRules) SetNotContains(v string) { + x.xxx_hidden_NotContains = &v + protoimpl.X.SetPresent(&(x.XXX_presence[0]), 8, 14) +} + +func (x *StringRules) SetIn(v []string) { + x.xxx_hidden_In = v +} + +func (x *StringRules) SetNotIn(v []string) { + x.xxx_hidden_NotIn = v +} + +func (x *StringRules) SetWellKnown(v WellKnownString) { + x.xxx_hidden_WellKnown = v +} + +func (x *StringRules) SetValidateEmpty(v bool) { + x.xxx_hidden_ValidateEmpty = v +} + +func (x *StringRules) SetIsRequired(v bool) { + x.xxx_hidden_IsRequired = v +} + +func (x *StringRules) HasEq() bool { + if x == nil { + return false + } + return protoimpl.X.Present(&(x.XXX_presence[0]), 0) +} + +func (x *StringRules) HasLen() bool { + if x == nil { + return false + } + return protoimpl.X.Present(&(x.XXX_presence[0]), 1) +} + +func (x *StringRules) HasMinLen() bool { + if x == nil { + return false + } + return protoimpl.X.Present(&(x.XXX_presence[0]), 2) +} + +func (x *StringRules) HasMaxLen() bool { + if x == nil { + return false + } + return protoimpl.X.Present(&(x.XXX_presence[0]), 3) +} + +func (x *StringRules) HasPattern() bool { + if x == nil { + return false + } + return protoimpl.X.Present(&(x.XXX_presence[0]), 4) +} + +func (x *StringRules) HasPrefix() bool { + if x == nil { + return false + } + return protoimpl.X.Present(&(x.XXX_presence[0]), 5) +} + +func (x *StringRules) HasSuffix() bool { + if x == nil { + return false + } + return protoimpl.X.Present(&(x.XXX_presence[0]), 6) +} + +func (x *StringRules) HasContains() bool { + if x == nil { + return false + } + return protoimpl.X.Present(&(x.XXX_presence[0]), 7) +} + +func (x *StringRules) HasNotContains() bool { + if x == nil { + return false + } + return protoimpl.X.Present(&(x.XXX_presence[0]), 8) +} + +func (x *StringRules) ClearEq() { + protoimpl.X.ClearPresent(&(x.XXX_presence[0]), 0) + x.xxx_hidden_Eq = nil +} + +func (x *StringRules) ClearLen() { + protoimpl.X.ClearPresent(&(x.XXX_presence[0]), 1) + x.xxx_hidden_Len = 0 +} + +func (x *StringRules) ClearMinLen() { + protoimpl.X.ClearPresent(&(x.XXX_presence[0]), 2) + x.xxx_hidden_MinLen = 0 +} + +func (x *StringRules) ClearMaxLen() { + protoimpl.X.ClearPresent(&(x.XXX_presence[0]), 3) + x.xxx_hidden_MaxLen = 0 +} + +func (x *StringRules) ClearPattern() { + protoimpl.X.ClearPresent(&(x.XXX_presence[0]), 4) + x.xxx_hidden_Pattern = nil +} + +func (x *StringRules) ClearPrefix() { + protoimpl.X.ClearPresent(&(x.XXX_presence[0]), 5) + x.xxx_hidden_Prefix = nil +} + +func (x *StringRules) ClearSuffix() { + protoimpl.X.ClearPresent(&(x.XXX_presence[0]), 6) + x.xxx_hidden_Suffix = nil +} + +func (x *StringRules) ClearContains() { + protoimpl.X.ClearPresent(&(x.XXX_presence[0]), 7) + x.xxx_hidden_Contains = nil +} + +func (x *StringRules) ClearNotContains() { + protoimpl.X.ClearPresent(&(x.XXX_presence[0]), 8) + x.xxx_hidden_NotContains = nil +} + +type StringRules_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // Const specifies that this field must be exactly the specified value + Eq *string + // Len specifies that this field must be the specified number of + // characters (Unicode code points). Note that the number of + // characters may differ from the number of bytes in the string. + Len *uint64 + // MinLen specifies that this field must be the specified number of + // characters (Unicode code points) at a minimum. Note that the number of + // characters may differ from the number of bytes in the string. + MinLen *uint64 + // MaxLen specifies that this field must be the specified number of + // characters (Unicode code points) at a maximum. Note that the number of + // characters may differ from the number of bytes in the string. + MaxLen *uint64 + // Pattern specifies that this field must match against the specified + // regular expression (RE2 syntax). The included expression should elide + // any delimiters. + Pattern *string + // Prefix specifies that this field must have the specified substring at + // the beginning of the string. + Prefix *string + // Suffix specifies that this field must have the specified substring at + // the end of the string. + Suffix *string + // Contains specifies that this field must have the specified substring + // anywhere in the string. + Contains *string + // NotContains specifies that this field cannot have the specified substring + // anywhere in the string. + NotContains *string + // In specifies that this field must be equal to one of the specified + // values + In []string + // NotIn specifies that this field cannot be equal to one of the specified + // values + NotIn []string + WellKnown WellKnownString + // IgnoreEmpty specifies that the validation rules of this field should be + // evaluated only if the field is not empty + ValidateEmpty bool + IsRequired bool +} + +func (b0 StringRules_builder) Build() *StringRules { + m0 := &StringRules{} + b, x := &b0, m0 + _, _ = b, x + if b.Eq != nil { + protoimpl.X.SetPresentNonAtomic(&(x.XXX_presence[0]), 0, 14) + x.xxx_hidden_Eq = b.Eq + } + if b.Len != nil { + protoimpl.X.SetPresentNonAtomic(&(x.XXX_presence[0]), 1, 14) + x.xxx_hidden_Len = *b.Len + } + if b.MinLen != nil { + protoimpl.X.SetPresentNonAtomic(&(x.XXX_presence[0]), 2, 14) + x.xxx_hidden_MinLen = *b.MinLen + } + if b.MaxLen != nil { + protoimpl.X.SetPresentNonAtomic(&(x.XXX_presence[0]), 3, 14) + x.xxx_hidden_MaxLen = *b.MaxLen + } + if b.Pattern != nil { + protoimpl.X.SetPresentNonAtomic(&(x.XXX_presence[0]), 4, 14) + x.xxx_hidden_Pattern = b.Pattern + } + if b.Prefix != nil { + protoimpl.X.SetPresentNonAtomic(&(x.XXX_presence[0]), 5, 14) + x.xxx_hidden_Prefix = b.Prefix + } + if b.Suffix != nil { + protoimpl.X.SetPresentNonAtomic(&(x.XXX_presence[0]), 6, 14) + x.xxx_hidden_Suffix = b.Suffix + } + if b.Contains != nil { + protoimpl.X.SetPresentNonAtomic(&(x.XXX_presence[0]), 7, 14) + x.xxx_hidden_Contains = b.Contains + } + if b.NotContains != nil { + protoimpl.X.SetPresentNonAtomic(&(x.XXX_presence[0]), 8, 14) + x.xxx_hidden_NotContains = b.NotContains + } + x.xxx_hidden_In = b.In + x.xxx_hidden_NotIn = b.NotIn + x.xxx_hidden_WellKnown = b.WellKnown + x.xxx_hidden_ValidateEmpty = b.ValidateEmpty + x.xxx_hidden_IsRequired = b.IsRequired + return m0 +} + +type StringMapRules struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_ValidateEmpty bool `protobuf:"varint,1,opt,name=validate_empty,json=validateEmpty,proto3"` + xxx_hidden_IsRequired bool `protobuf:"varint,2,opt,name=is_required,json=isRequired,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *StringMapRules) Reset() { + *x = StringMapRules{} + mi := &file_c1_config_v1_rules_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StringMapRules) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StringMapRules) ProtoMessage() {} + +func (x *StringMapRules) ProtoReflect() protoreflect.Message { + mi := &file_c1_config_v1_rules_proto_msgTypes[5] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *StringMapRules) GetValidateEmpty() bool { + if x != nil { + return x.xxx_hidden_ValidateEmpty + } + return false +} + +func (x *StringMapRules) GetIsRequired() bool { + if x != nil { + return x.xxx_hidden_IsRequired + } + return false +} + +func (x *StringMapRules) SetValidateEmpty(v bool) { + x.xxx_hidden_ValidateEmpty = v +} + +func (x *StringMapRules) SetIsRequired(v bool) { + x.xxx_hidden_IsRequired = v +} + +type StringMapRules_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // IgnoreEmpty specifies that the validation rules of this field should be + // evaluated only if the field is not empty + ValidateEmpty bool + IsRequired bool +} + +func (b0 StringMapRules_builder) Build() *StringMapRules { + m0 := &StringMapRules{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_ValidateEmpty = b.ValidateEmpty + x.xxx_hidden_IsRequired = b.IsRequired + return m0 +} + +type ResourceIDRules struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_AllowedResourceTypeIds []string `protobuf:"bytes,1,rep,name=allowed_resource_type_ids,json=allowedResourceTypeIds,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ResourceIDRules) Reset() { + *x = ResourceIDRules{} + mi := &file_c1_config_v1_rules_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ResourceIDRules) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResourceIDRules) ProtoMessage() {} + +func (x *ResourceIDRules) ProtoReflect() protoreflect.Message { + mi := &file_c1_config_v1_rules_proto_msgTypes[6] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *ResourceIDRules) GetAllowedResourceTypeIds() []string { + if x != nil { + return x.xxx_hidden_AllowedResourceTypeIds + } + return nil +} + +func (x *ResourceIDRules) SetAllowedResourceTypeIds(v []string) { + x.xxx_hidden_AllowedResourceTypeIds = v +} + +type ResourceIDRules_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + AllowedResourceTypeIds []string +} + +func (b0 ResourceIDRules_builder) Build() *ResourceIDRules { + m0 := &ResourceIDRules{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_AllowedResourceTypeIds = b.AllowedResourceTypeIds + return m0 +} + +type RepeatedResourceIdRules struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_AllowedResourceTypeIds []string `protobuf:"bytes,1,rep,name=allowed_resource_type_ids,json=allowedResourceTypeIds,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *RepeatedResourceIdRules) Reset() { + *x = RepeatedResourceIdRules{} + mi := &file_c1_config_v1_rules_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *RepeatedResourceIdRules) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RepeatedResourceIdRules) ProtoMessage() {} + +func (x *RepeatedResourceIdRules) ProtoReflect() protoreflect.Message { + mi := &file_c1_config_v1_rules_proto_msgTypes[7] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *RepeatedResourceIdRules) GetAllowedResourceTypeIds() []string { + if x != nil { + return x.xxx_hidden_AllowedResourceTypeIds + } + return nil +} + +func (x *RepeatedResourceIdRules) SetAllowedResourceTypeIds(v []string) { + x.xxx_hidden_AllowedResourceTypeIds = v +} + +type RepeatedResourceIdRules_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + AllowedResourceTypeIds []string +} + +func (b0 RepeatedResourceIdRules_builder) Build() *RepeatedResourceIdRules { + m0 := &RepeatedResourceIdRules{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_AllowedResourceTypeIds = b.AllowedResourceTypeIds + return m0 +} + +var File_c1_config_v1_rules_proto protoreflect.FileDescriptor + +const file_c1_config_v1_rules_proto_rawDesc = "" + + "\n" + + "\x18c1/config/v1/rules.proto\x12\fc1.config.v1\"\x8d\x02\n" + + "\n" + + "Int64Rules\x12\x13\n" + + "\x02eq\x18\x01 \x01(\x03H\x00R\x02eq\x88\x01\x01\x12\x13\n" + + "\x02lt\x18\x02 \x01(\x03H\x01R\x02lt\x88\x01\x01\x12\x15\n" + + "\x03lte\x18\x03 \x01(\x03H\x02R\x03lte\x88\x01\x01\x12\x13\n" + + "\x02gt\x18\x04 \x01(\x03H\x03R\x02gt\x88\x01\x01\x12\x15\n" + + "\x03gte\x18\x05 \x01(\x03H\x04R\x03gte\x88\x01\x01\x12\x0e\n" + + "\x02in\x18\x06 \x03(\x03R\x02in\x12\x15\n" + + "\x06not_in\x18\a \x03(\x03R\x05notIn\x12%\n" + + "\x0evalidate_empty\x18\b \x01(\bR\rvalidateEmpty\x12\x1f\n" + + "\vis_required\x18\t \x01(\bR\n" + + "isRequiredB\x05\n" + + "\x03_eqB\x05\n" + + "\x03_ltB\x06\n" + + "\x04_lteB\x05\n" + + "\x03_gtB\x06\n" + + "\x04_gte\"'\n" + + "\tBoolRules\x12\x13\n" + + "\x02eq\x18\x01 \x01(\bH\x00R\x02eq\x88\x01\x01B\x05\n" + + "\x03_eq\"\xf3\x02\n" + + "\rRepeatedRules\x12 \n" + + "\tmin_items\x18\x01 \x01(\x04H\x01R\bminItems\x88\x01\x01\x12 \n" + + "\tmax_items\x18\x02 \x01(\x04H\x02R\bmaxItems\x88\x01\x01\x12\x16\n" + + "\x06unique\x18\x03 \x01(\bR\x06unique\x120\n" + + "\x05int64\x18d \x01(\v2\x18.c1.config.v1.Int64RulesH\x00R\x05int64\x12-\n" + + "\x04bool\x18e \x01(\v2\x17.c1.config.v1.BoolRulesH\x00R\x04bool\x123\n" + + "\x06string\x18f \x01(\v2\x19.c1.config.v1.StringRulesH\x00R\x06string\x12%\n" + + "\x0evalidate_empty\x18\x04 \x01(\bR\rvalidateEmpty\x12\x1f\n" + + "\vis_required\x18\x05 \x01(\bR\n" + + "isRequiredB\f\n" + + "\n" + + "item_rulesB\f\n" + + "\n" + + "_min_itemsB\f\n" + + "\n" + + "_max_items\"\x8f\x02\n" + + "\x13RepeatedStringRules\x12 \n" + + "\tmin_items\x18\x01 \x01(\x04H\x00R\bminItems\x88\x01\x01\x12 \n" + + "\tmax_items\x18\x02 \x01(\x04H\x01R\bmaxItems\x88\x01\x01\x12\x16\n" + + "\x06unique\x18\x03 \x01(\bR\x06unique\x128\n" + + "\n" + + "item_rules\x18\x04 \x01(\v2\x19.c1.config.v1.StringRulesR\titemRules\x12%\n" + + "\x0evalidate_empty\x18\x05 \x01(\bR\rvalidateEmpty\x12\x1f\n" + + "\vis_required\x18\x06 \x01(\bR\n" + + "isRequiredB\f\n" + + "\n" + + "_min_itemsB\f\n" + + "\n" + + "_max_items\"\xab\x04\n" + + "\vStringRules\x12\x13\n" + + "\x02eq\x18\x01 \x01(\tH\x00R\x02eq\x88\x01\x01\x12\x15\n" + + "\x03len\x18\x13 \x01(\x04H\x01R\x03len\x88\x01\x01\x12\x1c\n" + + "\amin_len\x18\x02 \x01(\x04H\x02R\x06minLen\x88\x01\x01\x12\x1c\n" + + "\amax_len\x18\x03 \x01(\x04H\x03R\x06maxLen\x88\x01\x01\x12\x1d\n" + + "\apattern\x18\x06 \x01(\tH\x04R\apattern\x88\x01\x01\x12\x1b\n" + + "\x06prefix\x18\a \x01(\tH\x05R\x06prefix\x88\x01\x01\x12\x1b\n" + + "\x06suffix\x18\b \x01(\tH\x06R\x06suffix\x88\x01\x01\x12\x1f\n" + + "\bcontains\x18\t \x01(\tH\aR\bcontains\x88\x01\x01\x12&\n" + + "\fnot_contains\x18\x17 \x01(\tH\bR\vnotContains\x88\x01\x01\x12\x0e\n" + + "\x02in\x18\n" + + " \x03(\tR\x02in\x12\x15\n" + + "\x06not_in\x18\v \x03(\tR\x05notIn\x12<\n" + + "\n" + + "well_known\x18\f \x01(\x0e2\x1d.c1.config.v1.WellKnownStringR\twellKnown\x12%\n" + + "\x0evalidate_empty\x18\x1a \x01(\bR\rvalidateEmpty\x12\x1f\n" + + "\vis_required\x18\x1b \x01(\bR\n" + + "isRequiredB\x05\n" + + "\x03_eqB\x06\n" + + "\x04_lenB\n" + + "\n" + + "\b_min_lenB\n" + + "\n" + + "\b_max_lenB\n" + + "\n" + + "\b_patternB\t\n" + + "\a_prefixB\t\n" + + "\a_suffixB\v\n" + + "\t_containsB\x0f\n" + + "\r_not_contains\"X\n" + + "\x0eStringMapRules\x12%\n" + + "\x0evalidate_empty\x18\x01 \x01(\bR\rvalidateEmpty\x12\x1f\n" + + "\vis_required\x18\x02 \x01(\bR\n" + + "isRequired\"L\n" + + "\x0fResourceIDRules\x129\n" + + "\x19allowed_resource_type_ids\x18\x01 \x03(\tR\x16allowedResourceTypeIds\"T\n" + + "\x17RepeatedResourceIdRules\x129\n" + + "\x19allowed_resource_type_ids\x18\x01 \x03(\tR\x16allowedResourceTypeIds*\x99\x02\n" + + "\x0fWellKnownString\x12!\n" + + "\x1dWELL_KNOWN_STRING_UNSPECIFIED\x10\x00\x12\x1b\n" + + "\x17WELL_KNOWN_STRING_EMAIL\x10\x01\x12\x1e\n" + + "\x1aWELL_KNOWN_STRING_HOSTNAME\x10\x02\x12\x18\n" + + "\x14WELL_KNOWN_STRING_IP\x10\x03\x12\x1a\n" + + "\x16WELL_KNOWN_STRING_IPV4\x10\x04\x12\x1a\n" + + "\x16WELL_KNOWN_STRING_IPV6\x10\x05\x12\x19\n" + + "\x15WELL_KNOWN_STRING_URI\x10\x06\x12\x1d\n" + + "\x19WELL_KNOWN_STRING_ADDRESS\x10\a\x12\x1a\n" + + "\x16WELL_KNOWN_STRING_UUID\x10\bB3Z1github.com/conductorone/baton-sdk/pb/c1/config/v1b\x06proto3" + +var file_c1_config_v1_rules_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_c1_config_v1_rules_proto_msgTypes = make([]protoimpl.MessageInfo, 8) +var file_c1_config_v1_rules_proto_goTypes = []any{ + (WellKnownString)(0), // 0: c1.config.v1.WellKnownString + (*Int64Rules)(nil), // 1: c1.config.v1.Int64Rules + (*BoolRules)(nil), // 2: c1.config.v1.BoolRules + (*RepeatedRules)(nil), // 3: c1.config.v1.RepeatedRules + (*RepeatedStringRules)(nil), // 4: c1.config.v1.RepeatedStringRules + (*StringRules)(nil), // 5: c1.config.v1.StringRules + (*StringMapRules)(nil), // 6: c1.config.v1.StringMapRules + (*ResourceIDRules)(nil), // 7: c1.config.v1.ResourceIDRules + (*RepeatedResourceIdRules)(nil), // 8: c1.config.v1.RepeatedResourceIdRules +} +var file_c1_config_v1_rules_proto_depIdxs = []int32{ + 1, // 0: c1.config.v1.RepeatedRules.int64:type_name -> c1.config.v1.Int64Rules + 2, // 1: c1.config.v1.RepeatedRules.bool:type_name -> c1.config.v1.BoolRules + 5, // 2: c1.config.v1.RepeatedRules.string:type_name -> c1.config.v1.StringRules + 5, // 3: c1.config.v1.RepeatedStringRules.item_rules:type_name -> c1.config.v1.StringRules + 0, // 4: c1.config.v1.StringRules.well_known:type_name -> c1.config.v1.WellKnownString + 5, // [5:5] is the sub-list for method output_type + 5, // [5:5] is the sub-list for method input_type + 5, // [5:5] is the sub-list for extension type_name + 5, // [5:5] is the sub-list for extension extendee + 0, // [0:5] is the sub-list for field type_name +} + +func init() { file_c1_config_v1_rules_proto_init() } +func file_c1_config_v1_rules_proto_init() { + if File_c1_config_v1_rules_proto != nil { + return + } + file_c1_config_v1_rules_proto_msgTypes[0].OneofWrappers = []any{} + file_c1_config_v1_rules_proto_msgTypes[1].OneofWrappers = []any{} + file_c1_config_v1_rules_proto_msgTypes[2].OneofWrappers = []any{ + (*repeatedRules_Int64)(nil), + (*repeatedRules_Bool)(nil), + (*repeatedRules_String_)(nil), + } + file_c1_config_v1_rules_proto_msgTypes[3].OneofWrappers = []any{} + file_c1_config_v1_rules_proto_msgTypes[4].OneofWrappers = []any{} + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_c1_config_v1_rules_proto_rawDesc), len(file_c1_config_v1_rules_proto_rawDesc)), + NumEnums: 1, + NumMessages: 8, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_c1_config_v1_rules_proto_goTypes, + DependencyIndexes: file_c1_config_v1_rules_proto_depIdxs, + EnumInfos: file_c1_config_v1_rules_proto_enumTypes, + MessageInfos: file_c1_config_v1_rules_proto_msgTypes, + }.Build() + File_c1_config_v1_rules_proto = out.File + file_c1_config_v1_rules_proto_goTypes = nil + file_c1_config_v1_rules_proto_depIdxs = nil +} diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/action.pb.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/action.pb.go index 569a555f..d8fbaf70 100644 --- a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/action.pb.go +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/action.pb.go @@ -1,9 +1,11 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.4 +// protoc-gen-go v1.36.10 // protoc (unknown) // source: c1/connector/v2/action.proto +//go:build !protoopaque + package v2 import ( @@ -13,7 +15,6 @@ import ( anypb "google.golang.org/protobuf/types/known/anypb" structpb "google.golang.org/protobuf/types/known/structpb" reflect "reflect" - sync "sync" unsafe "unsafe" ) @@ -77,21 +78,86 @@ func (x BatonActionStatus) Number() protoreflect.EnumNumber { return protoreflect.EnumNumber(x) } -// Deprecated: Use BatonActionStatus.Descriptor instead. -func (BatonActionStatus) EnumDescriptor() ([]byte, []int) { - return file_c1_connector_v2_action_proto_rawDescGZIP(), []int{0} +// This defines the action type. +type ActionType int32 + +const ( + ActionType_ACTION_TYPE_UNSPECIFIED ActionType = 0 + ActionType_ACTION_TYPE_DYNAMIC ActionType = 1 + ActionType_ACTION_TYPE_ACCOUNT ActionType = 2 + ActionType_ACTION_TYPE_ACCOUNT_UPDATE_PROFILE ActionType = 3 + ActionType_ACTION_TYPE_ACCOUNT_DISABLE ActionType = 4 + ActionType_ACTION_TYPE_ACCOUNT_ENABLE ActionType = 5 + // Generic resource actions + ActionType_ACTION_TYPE_RESOURCE_CREATE ActionType = 6 + ActionType_ACTION_TYPE_RESOURCE_DELETE ActionType = 7 + ActionType_ACTION_TYPE_RESOURCE_ENABLE ActionType = 8 + ActionType_ACTION_TYPE_RESOURCE_DISABLE ActionType = 9 +) + +// Enum value maps for ActionType. +var ( + ActionType_name = map[int32]string{ + 0: "ACTION_TYPE_UNSPECIFIED", + 1: "ACTION_TYPE_DYNAMIC", + 2: "ACTION_TYPE_ACCOUNT", + 3: "ACTION_TYPE_ACCOUNT_UPDATE_PROFILE", + 4: "ACTION_TYPE_ACCOUNT_DISABLE", + 5: "ACTION_TYPE_ACCOUNT_ENABLE", + 6: "ACTION_TYPE_RESOURCE_CREATE", + 7: "ACTION_TYPE_RESOURCE_DELETE", + 8: "ACTION_TYPE_RESOURCE_ENABLE", + 9: "ACTION_TYPE_RESOURCE_DISABLE", + } + ActionType_value = map[string]int32{ + "ACTION_TYPE_UNSPECIFIED": 0, + "ACTION_TYPE_DYNAMIC": 1, + "ACTION_TYPE_ACCOUNT": 2, + "ACTION_TYPE_ACCOUNT_UPDATE_PROFILE": 3, + "ACTION_TYPE_ACCOUNT_DISABLE": 4, + "ACTION_TYPE_ACCOUNT_ENABLE": 5, + "ACTION_TYPE_RESOURCE_CREATE": 6, + "ACTION_TYPE_RESOURCE_DELETE": 7, + "ACTION_TYPE_RESOURCE_ENABLE": 8, + "ACTION_TYPE_RESOURCE_DISABLE": 9, + } +) + +func (x ActionType) Enum() *ActionType { + p := new(ActionType) + *p = x + return p +} + +func (x ActionType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ActionType) Descriptor() protoreflect.EnumDescriptor { + return file_c1_connector_v2_action_proto_enumTypes[1].Descriptor() +} + +func (ActionType) Type() protoreflect.EnumType { + return &file_c1_connector_v2_action_proto_enumTypes[1] +} + +func (x ActionType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) } type BatonActionSchema struct { - state protoimpl.MessageState `protogen:"open.v1"` - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Arguments []*v1.Field `protobuf:"bytes,2,rep,name=arguments,proto3" json:"arguments,omitempty"` - Constraints []*v1.Constraint `protobuf:"bytes,3,rep,name=constraints,proto3" json:"constraints,omitempty"` - ReturnTypes []*v1.Field `protobuf:"bytes,4,rep,name=return_types,json=returnTypes,proto3" json:"return_types,omitempty"` - DisplayName string `protobuf:"bytes,5,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` - Description string `protobuf:"bytes,6,opt,name=description,proto3" json:"description,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"hybrid.v1"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Arguments []*v1.Field `protobuf:"bytes,2,rep,name=arguments,proto3" json:"arguments,omitempty"` + Constraints []*v1.Constraint `protobuf:"bytes,3,rep,name=constraints,proto3" json:"constraints,omitempty"` + ReturnTypes []*v1.Field `protobuf:"bytes,4,rep,name=return_types,json=returnTypes,proto3" json:"return_types,omitempty"` + DisplayName string `protobuf:"bytes,5,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` + Description string `protobuf:"bytes,6,opt,name=description,proto3" json:"description,omitempty"` + ActionType []ActionType `protobuf:"varint,7,rep,packed,name=action_type,json=actionType,proto3,enum=c1.connector.v2.ActionType" json:"action_type,omitempty"` + // Optional: if set, this action is scoped to a specific resource type + ResourceTypeId string `protobuf:"bytes,8,opt,name=resource_type_id,json=resourceTypeId,proto3" json:"resource_type_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *BatonActionSchema) Reset() { @@ -119,11 +185,6 @@ func (x *BatonActionSchema) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use BatonActionSchema.ProtoReflect.Descriptor instead. -func (*BatonActionSchema) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_action_proto_rawDescGZIP(), []int{0} -} - func (x *BatonActionSchema) GetName() string { if x != nil { return x.Name @@ -166,13 +227,90 @@ func (x *BatonActionSchema) GetDescription() string { return "" } +func (x *BatonActionSchema) GetActionType() []ActionType { + if x != nil { + return x.ActionType + } + return nil +} + +func (x *BatonActionSchema) GetResourceTypeId() string { + if x != nil { + return x.ResourceTypeId + } + return "" +} + +func (x *BatonActionSchema) SetName(v string) { + x.Name = v +} + +func (x *BatonActionSchema) SetArguments(v []*v1.Field) { + x.Arguments = v +} + +func (x *BatonActionSchema) SetConstraints(v []*v1.Constraint) { + x.Constraints = v +} + +func (x *BatonActionSchema) SetReturnTypes(v []*v1.Field) { + x.ReturnTypes = v +} + +func (x *BatonActionSchema) SetDisplayName(v string) { + x.DisplayName = v +} + +func (x *BatonActionSchema) SetDescription(v string) { + x.Description = v +} + +func (x *BatonActionSchema) SetActionType(v []ActionType) { + x.ActionType = v +} + +func (x *BatonActionSchema) SetResourceTypeId(v string) { + x.ResourceTypeId = v +} + +type BatonActionSchema_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Name string + Arguments []*v1.Field + Constraints []*v1.Constraint + ReturnTypes []*v1.Field + DisplayName string + Description string + ActionType []ActionType + // Optional: if set, this action is scoped to a specific resource type + ResourceTypeId string +} + +func (b0 BatonActionSchema_builder) Build() *BatonActionSchema { + m0 := &BatonActionSchema{} + b, x := &b0, m0 + _, _ = b, x + x.Name = b.Name + x.Arguments = b.Arguments + x.Constraints = b.Constraints + x.ReturnTypes = b.ReturnTypes + x.DisplayName = b.DisplayName + x.Description = b.Description + x.ActionType = b.ActionType + x.ResourceTypeId = b.ResourceTypeId + return m0 +} + type InvokeActionRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Args *structpb.Struct `protobuf:"bytes,2,opt,name=args,proto3" json:"args,omitempty"` - Annotations []*anypb.Any `protobuf:"bytes,3,rep,name=annotations,proto3" json:"annotations,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"hybrid.v1"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Args *structpb.Struct `protobuf:"bytes,2,opt,name=args,proto3" json:"args,omitempty"` + Annotations []*anypb.Any `protobuf:"bytes,3,rep,name=annotations,proto3" json:"annotations,omitempty"` + // Optional: if set, invokes a resource-scoped action + ResourceTypeId string `protobuf:"bytes,4,opt,name=resource_type_id,json=resourceTypeId,proto3" json:"resource_type_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *InvokeActionRequest) Reset() { @@ -200,11 +338,6 @@ func (x *InvokeActionRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use InvokeActionRequest.ProtoReflect.Descriptor instead. -func (*InvokeActionRequest) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_action_proto_rawDescGZIP(), []int{1} -} - func (x *InvokeActionRequest) GetName() string { if x != nil { return x.Name @@ -226,8 +359,63 @@ func (x *InvokeActionRequest) GetAnnotations() []*anypb.Any { return nil } +func (x *InvokeActionRequest) GetResourceTypeId() string { + if x != nil { + return x.ResourceTypeId + } + return "" +} + +func (x *InvokeActionRequest) SetName(v string) { + x.Name = v +} + +func (x *InvokeActionRequest) SetArgs(v *structpb.Struct) { + x.Args = v +} + +func (x *InvokeActionRequest) SetAnnotations(v []*anypb.Any) { + x.Annotations = v +} + +func (x *InvokeActionRequest) SetResourceTypeId(v string) { + x.ResourceTypeId = v +} + +func (x *InvokeActionRequest) HasArgs() bool { + if x == nil { + return false + } + return x.Args != nil +} + +func (x *InvokeActionRequest) ClearArgs() { + x.Args = nil +} + +type InvokeActionRequest_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Name string + Args *structpb.Struct + Annotations []*anypb.Any + // Optional: if set, invokes a resource-scoped action + ResourceTypeId string +} + +func (b0 InvokeActionRequest_builder) Build() *InvokeActionRequest { + m0 := &InvokeActionRequest{} + b, x := &b0, m0 + _, _ = b, x + x.Name = b.Name + x.Args = b.Args + x.Annotations = b.Annotations + x.ResourceTypeId = b.ResourceTypeId + return m0 +} + type InvokeActionResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` Status BatonActionStatus `protobuf:"varint,2,opt,name=status,proto3,enum=c1.connector.v2.BatonActionStatus" json:"status,omitempty"` Annotations []*anypb.Any `protobuf:"bytes,3,rep,name=annotations,proto3" json:"annotations,omitempty"` @@ -262,11 +450,6 @@ func (x *InvokeActionResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use InvokeActionResponse.ProtoReflect.Descriptor instead. -func (*InvokeActionResponse) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_action_proto_rawDescGZIP(), []int{2} -} - func (x *InvokeActionResponse) GetId() string { if x != nil { return x.Id @@ -302,8 +485,61 @@ func (x *InvokeActionResponse) GetName() string { return "" } +func (x *InvokeActionResponse) SetId(v string) { + x.Id = v +} + +func (x *InvokeActionResponse) SetStatus(v BatonActionStatus) { + x.Status = v +} + +func (x *InvokeActionResponse) SetAnnotations(v []*anypb.Any) { + x.Annotations = v +} + +func (x *InvokeActionResponse) SetResponse(v *structpb.Struct) { + x.Response = v +} + +func (x *InvokeActionResponse) SetName(v string) { + x.Name = v +} + +func (x *InvokeActionResponse) HasResponse() bool { + if x == nil { + return false + } + return x.Response != nil +} + +func (x *InvokeActionResponse) ClearResponse() { + x.Response = nil +} + +type InvokeActionResponse_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Id string + Status BatonActionStatus + Annotations []*anypb.Any + Response *structpb.Struct + Name string +} + +func (b0 InvokeActionResponse_builder) Build() *InvokeActionResponse { + m0 := &InvokeActionResponse{} + b, x := &b0, m0 + _, _ = b, x + x.Id = b.Id + x.Status = b.Status + x.Annotations = b.Annotations + x.Response = b.Response + x.Name = b.Name + return m0 +} + type GetActionStatusRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` // Deprecated: Marked as deprecated in c1/connector/v2/action.proto. Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` @@ -337,11 +573,6 @@ func (x *GetActionStatusRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetActionStatusRequest.ProtoReflect.Descriptor instead. -func (*GetActionStatusRequest) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_action_proto_rawDescGZIP(), []int{3} -} - // Deprecated: Marked as deprecated in c1/connector/v2/action.proto. func (x *GetActionStatusRequest) GetName() string { if x != nil { @@ -364,8 +595,40 @@ func (x *GetActionStatusRequest) GetAnnotations() []*anypb.Any { return nil } +// Deprecated: Marked as deprecated in c1/connector/v2/action.proto. +func (x *GetActionStatusRequest) SetName(v string) { + x.Name = v +} + +func (x *GetActionStatusRequest) SetId(v string) { + x.Id = v +} + +func (x *GetActionStatusRequest) SetAnnotations(v []*anypb.Any) { + x.Annotations = v +} + +type GetActionStatusRequest_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // Deprecated: Marked as deprecated in c1/connector/v2/action.proto. + Name string + Id string + Annotations []*anypb.Any +} + +func (b0 GetActionStatusRequest_builder) Build() *GetActionStatusRequest { + m0 := &GetActionStatusRequest{} + b, x := &b0, m0 + _, _ = b, x + x.Name = b.Name + x.Id = b.Id + x.Annotations = b.Annotations + return m0 +} + type GetActionStatusResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` Status BatonActionStatus `protobuf:"varint,3,opt,name=status,proto3,enum=c1.connector.v2.BatonActionStatus" json:"status,omitempty"` @@ -400,11 +663,6 @@ func (x *GetActionStatusResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetActionStatusResponse.ProtoReflect.Descriptor instead. -func (*GetActionStatusResponse) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_action_proto_rawDescGZIP(), []int{4} -} - func (x *GetActionStatusResponse) GetName() string { if x != nil { return x.Name @@ -440,8 +698,61 @@ func (x *GetActionStatusResponse) GetResponse() *structpb.Struct { return nil } +func (x *GetActionStatusResponse) SetName(v string) { + x.Name = v +} + +func (x *GetActionStatusResponse) SetId(v string) { + x.Id = v +} + +func (x *GetActionStatusResponse) SetStatus(v BatonActionStatus) { + x.Status = v +} + +func (x *GetActionStatusResponse) SetAnnotations(v []*anypb.Any) { + x.Annotations = v +} + +func (x *GetActionStatusResponse) SetResponse(v *structpb.Struct) { + x.Response = v +} + +func (x *GetActionStatusResponse) HasResponse() bool { + if x == nil { + return false + } + return x.Response != nil +} + +func (x *GetActionStatusResponse) ClearResponse() { + x.Response = nil +} + +type GetActionStatusResponse_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Name string + Id string + Status BatonActionStatus + Annotations []*anypb.Any + Response *structpb.Struct +} + +func (b0 GetActionStatusResponse_builder) Build() *GetActionStatusResponse { + m0 := &GetActionStatusResponse{} + b, x := &b0, m0 + _, _ = b, x + x.Name = b.Name + x.Id = b.Id + x.Status = b.Status + x.Annotations = b.Annotations + x.Response = b.Response + return m0 +} + type GetActionSchemaRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` Annotations []*anypb.Any `protobuf:"bytes,2,rep,name=annotations,proto3" json:"annotations,omitempty"` unknownFields protoimpl.UnknownFields @@ -473,11 +784,6 @@ func (x *GetActionSchemaRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetActionSchemaRequest.ProtoReflect.Descriptor instead. -func (*GetActionSchemaRequest) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_action_proto_rawDescGZIP(), []int{5} -} - func (x *GetActionSchemaRequest) GetName() string { if x != nil { return x.Name @@ -492,8 +798,32 @@ func (x *GetActionSchemaRequest) GetAnnotations() []*anypb.Any { return nil } +func (x *GetActionSchemaRequest) SetName(v string) { + x.Name = v +} + +func (x *GetActionSchemaRequest) SetAnnotations(v []*anypb.Any) { + x.Annotations = v +} + +type GetActionSchemaRequest_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Name string + Annotations []*anypb.Any +} + +func (b0 GetActionSchemaRequest_builder) Build() *GetActionSchemaRequest { + m0 := &GetActionSchemaRequest{} + b, x := &b0, m0 + _, _ = b, x + x.Name = b.Name + x.Annotations = b.Annotations + return m0 +} + type GetActionSchemaResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Schema *BatonActionSchema `protobuf:"bytes,1,opt,name=schema,proto3" json:"schema,omitempty"` Annotations []*anypb.Any `protobuf:"bytes,2,rep,name=annotations,proto3" json:"annotations,omitempty"` unknownFields protoimpl.UnknownFields @@ -525,11 +855,6 @@ func (x *GetActionSchemaResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetActionSchemaResponse.ProtoReflect.Descriptor instead. -func (*GetActionSchemaResponse) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_action_proto_rawDescGZIP(), []int{6} -} - func (x *GetActionSchemaResponse) GetSchema() *BatonActionSchema { if x != nil { return x.Schema @@ -544,11 +869,48 @@ func (x *GetActionSchemaResponse) GetAnnotations() []*anypb.Any { return nil } +func (x *GetActionSchemaResponse) SetSchema(v *BatonActionSchema) { + x.Schema = v +} + +func (x *GetActionSchemaResponse) SetAnnotations(v []*anypb.Any) { + x.Annotations = v +} + +func (x *GetActionSchemaResponse) HasSchema() bool { + if x == nil { + return false + } + return x.Schema != nil +} + +func (x *GetActionSchemaResponse) ClearSchema() { + x.Schema = nil +} + +type GetActionSchemaResponse_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Schema *BatonActionSchema + Annotations []*anypb.Any +} + +func (b0 GetActionSchemaResponse_builder) Build() *GetActionSchemaResponse { + m0 := &GetActionSchemaResponse{} + b, x := &b0, m0 + _, _ = b, x + x.Schema = b.Schema + x.Annotations = b.Annotations + return m0 +} + type ListActionSchemasRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - Annotations []*anypb.Any `protobuf:"bytes,1,rep,name=annotations,proto3" json:"annotations,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"hybrid.v1"` + Annotations []*anypb.Any `protobuf:"bytes,1,rep,name=annotations,proto3" json:"annotations,omitempty"` + // Optional: filter to only return actions for a specific resource type + ResourceTypeId string `protobuf:"bytes,2,opt,name=resource_type_id,json=resourceTypeId,proto3" json:"resource_type_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *ListActionSchemasRequest) Reset() { @@ -576,11 +938,6 @@ func (x *ListActionSchemasRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ListActionSchemasRequest.ProtoReflect.Descriptor instead. -func (*ListActionSchemasRequest) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_action_proto_rawDescGZIP(), []int{7} -} - func (x *ListActionSchemasRequest) GetAnnotations() []*anypb.Any { if x != nil { return x.Annotations @@ -588,8 +945,40 @@ func (x *ListActionSchemasRequest) GetAnnotations() []*anypb.Any { return nil } +func (x *ListActionSchemasRequest) GetResourceTypeId() string { + if x != nil { + return x.ResourceTypeId + } + return "" +} + +func (x *ListActionSchemasRequest) SetAnnotations(v []*anypb.Any) { + x.Annotations = v +} + +func (x *ListActionSchemasRequest) SetResourceTypeId(v string) { + x.ResourceTypeId = v +} + +type ListActionSchemasRequest_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Annotations []*anypb.Any + // Optional: filter to only return actions for a specific resource type + ResourceTypeId string +} + +func (b0 ListActionSchemasRequest_builder) Build() *ListActionSchemasRequest { + m0 := &ListActionSchemasRequest{} + b, x := &b0, m0 + _, _ = b, x + x.Annotations = b.Annotations + x.ResourceTypeId = b.ResourceTypeId + return m0 +} + type ListActionSchemasResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Schemas []*BatonActionSchema `protobuf:"bytes,1,rep,name=schemas,proto3" json:"schemas,omitempty"` Annotations []*anypb.Any `protobuf:"bytes,2,rep,name=annotations,proto3" json:"annotations,omitempty"` unknownFields protoimpl.UnknownFields @@ -621,11 +1010,6 @@ func (x *ListActionSchemasResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ListActionSchemasResponse.ProtoReflect.Descriptor instead. -func (*ListActionSchemasResponse) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_action_proto_rawDescGZIP(), []int{8} -} - func (x *ListActionSchemasResponse) GetSchemas() []*BatonActionSchema { if x != nil { return x.Schemas @@ -640,219 +1024,155 @@ func (x *ListActionSchemasResponse) GetAnnotations() []*anypb.Any { return nil } -var File_c1_connector_v2_action_proto protoreflect.FileDescriptor +func (x *ListActionSchemasResponse) SetSchemas(v []*BatonActionSchema) { + x.Schemas = v +} + +func (x *ListActionSchemasResponse) SetAnnotations(v []*anypb.Any) { + x.Annotations = v +} -var file_c1_connector_v2_action_proto_rawDesc = string([]byte{ - 0x0a, 0x1c, 0x63, 0x31, 0x2f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f, 0x76, - 0x32, 0x2f, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, - 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x1a, - 0x19, 0x63, 0x31, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x76, 0x31, 0x2f, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x22, 0x93, 0x02, 0x0a, 0x11, 0x42, 0x61, 0x74, 0x6f, 0x6e, 0x41, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x31, 0x0a, - 0x09, 0x61, 0x72, 0x67, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x13, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x76, 0x31, 0x2e, - 0x46, 0x69, 0x65, 0x6c, 0x64, 0x52, 0x09, 0x61, 0x72, 0x67, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x73, - 0x12, 0x3a, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x72, 0x61, 0x69, 0x6e, 0x74, 0x73, 0x18, - 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x72, 0x61, 0x69, 0x6e, 0x74, 0x52, - 0x0b, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x72, 0x61, 0x69, 0x6e, 0x74, 0x73, 0x12, 0x36, 0x0a, 0x0c, - 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x76, - 0x31, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x52, 0x0b, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x54, - 0x79, 0x70, 0x65, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, - 0x6c, 0x61, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, - 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x8e, 0x01, 0x0a, 0x13, 0x49, 0x6e, - 0x76, 0x6f, 0x6b, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2b, 0x0a, 0x04, 0x61, 0x72, 0x67, 0x73, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x04, 0x61, 0x72, - 0x67, 0x73, 0x12, 0x36, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0b, 0x61, - 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xe3, 0x01, 0x0a, 0x14, 0x49, - 0x6e, 0x76, 0x6f, 0x6b, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x02, 0x69, 0x64, 0x12, 0x3a, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0e, 0x32, 0x22, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, - 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x61, 0x74, 0x6f, 0x6e, 0x41, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, - 0x36, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, - 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x33, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, - 0x63, 0x74, 0x52, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x22, 0x78, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, - 0x69, 0x64, 0x12, 0x36, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0b, 0x61, - 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xe6, 0x01, 0x0a, 0x17, 0x47, - 0x65, 0x74, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x3a, 0x0a, 0x06, 0x73, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x22, 0x2e, 0x63, 0x31, 0x2e, - 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x61, 0x74, - 0x6f, 0x6e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, - 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x36, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, - 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x33, - 0x0a, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x64, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0x12, 0x36, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0b, 0x61, 0x6e, - 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x8d, 0x01, 0x0a, 0x17, 0x47, 0x65, - 0x74, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3a, 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, - 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x61, 0x74, 0x6f, 0x6e, 0x41, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, - 0x61, 0x12, 0x36, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0b, 0x61, 0x6e, - 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x52, 0x0a, 0x18, 0x4c, 0x69, 0x73, - 0x74, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x36, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, - 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x91, 0x01, - 0x0a, 0x19, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x63, 0x68, 0x65, - 0x6d, 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3c, 0x0a, 0x07, 0x73, - 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x63, - 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x42, - 0x61, 0x74, 0x6f, 0x6e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, - 0x52, 0x07, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x12, 0x36, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, - 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x2a, 0xdd, 0x01, 0x0a, 0x11, 0x42, 0x61, 0x74, 0x6f, 0x6e, 0x41, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x23, 0x0a, 0x1f, 0x42, 0x41, 0x54, 0x4f, 0x4e, - 0x5f, 0x41, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x55, - 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1f, 0x0a, 0x1b, - 0x42, 0x41, 0x54, 0x4f, 0x4e, 0x5f, 0x41, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x54, 0x41, - 0x54, 0x55, 0x53, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x01, 0x12, 0x1f, 0x0a, - 0x1b, 0x42, 0x41, 0x54, 0x4f, 0x4e, 0x5f, 0x41, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x54, - 0x41, 0x54, 0x55, 0x53, 0x5f, 0x50, 0x45, 0x4e, 0x44, 0x49, 0x4e, 0x47, 0x10, 0x02, 0x12, 0x1f, - 0x0a, 0x1b, 0x42, 0x41, 0x54, 0x4f, 0x4e, 0x5f, 0x41, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, - 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x52, 0x55, 0x4e, 0x4e, 0x49, 0x4e, 0x47, 0x10, 0x03, 0x12, - 0x20, 0x0a, 0x1c, 0x42, 0x41, 0x54, 0x4f, 0x4e, 0x5f, 0x41, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, - 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4f, 0x4d, 0x50, 0x4c, 0x45, 0x54, 0x45, 0x10, - 0x04, 0x12, 0x1e, 0x0a, 0x1a, 0x42, 0x41, 0x54, 0x4f, 0x4e, 0x5f, 0x41, 0x43, 0x54, 0x49, 0x4f, - 0x4e, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, - 0x05, 0x32, 0xa4, 0x03, 0x0a, 0x0d, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x12, 0x5b, 0x0a, 0x0c, 0x49, 0x6e, 0x76, 0x6f, 0x6b, 0x65, 0x41, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x24, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, - 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x49, 0x6e, 0x76, 0x6f, 0x6b, 0x65, 0x41, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x63, 0x31, 0x2e, 0x63, - 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x49, 0x6e, 0x76, 0x6f, - 0x6b, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x64, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x12, 0x27, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, - 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x63, - 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x47, - 0x65, 0x74, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x64, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x41, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x27, 0x2e, 0x63, 0x31, 0x2e, 0x63, - 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x41, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, - 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x63, - 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6a, 0x0a, 0x11, - 0x4c, 0x69, 0x73, 0x74, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, - 0x73, 0x12, 0x29, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, - 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x63, - 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x63, - 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x4c, - 0x69, 0x73, 0x74, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x36, 0x5a, 0x34, 0x67, 0x69, 0x74, 0x68, - 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x64, 0x75, 0x63, 0x74, 0x6f, 0x72, - 0x6f, 0x6e, 0x65, 0x2f, 0x62, 0x61, 0x74, 0x6f, 0x6e, 0x2d, 0x73, 0x64, 0x6b, 0x2f, 0x70, 0x62, - 0x2f, 0x63, 0x31, 0x2f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f, 0x76, 0x32, - 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -}) +type ListActionSchemasResponse_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. -var ( - file_c1_connector_v2_action_proto_rawDescOnce sync.Once - file_c1_connector_v2_action_proto_rawDescData []byte -) + Schemas []*BatonActionSchema + Annotations []*anypb.Any +} -func file_c1_connector_v2_action_proto_rawDescGZIP() []byte { - file_c1_connector_v2_action_proto_rawDescOnce.Do(func() { - file_c1_connector_v2_action_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_c1_connector_v2_action_proto_rawDesc), len(file_c1_connector_v2_action_proto_rawDesc))) - }) - return file_c1_connector_v2_action_proto_rawDescData +func (b0 ListActionSchemasResponse_builder) Build() *ListActionSchemasResponse { + m0 := &ListActionSchemasResponse{} + b, x := &b0, m0 + _, _ = b, x + x.Schemas = b.Schemas + x.Annotations = b.Annotations + return m0 } -var file_c1_connector_v2_action_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var File_c1_connector_v2_action_proto protoreflect.FileDescriptor + +const file_c1_connector_v2_action_proto_rawDesc = "" + + "\n" + + "\x1cc1/connector/v2/action.proto\x12\x0fc1.connector.v2\x1a\x19c1/config/v1/config.proto\x1a\x19google/protobuf/any.proto\x1a\x1cgoogle/protobuf/struct.proto\"\xfb\x02\n" + + "\x11BatonActionSchema\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\x121\n" + + "\targuments\x18\x02 \x03(\v2\x13.c1.config.v1.FieldR\targuments\x12:\n" + + "\vconstraints\x18\x03 \x03(\v2\x18.c1.config.v1.ConstraintR\vconstraints\x126\n" + + "\freturn_types\x18\x04 \x03(\v2\x13.c1.config.v1.FieldR\vreturnTypes\x12!\n" + + "\fdisplay_name\x18\x05 \x01(\tR\vdisplayName\x12 \n" + + "\vdescription\x18\x06 \x01(\tR\vdescription\x12<\n" + + "\vaction_type\x18\a \x03(\x0e2\x1b.c1.connector.v2.ActionTypeR\n" + + "actionType\x12(\n" + + "\x10resource_type_id\x18\b \x01(\tR\x0eresourceTypeId\"\xb8\x01\n" + + "\x13InvokeActionRequest\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\x12+\n" + + "\x04args\x18\x02 \x01(\v2\x17.google.protobuf.StructR\x04args\x126\n" + + "\vannotations\x18\x03 \x03(\v2\x14.google.protobuf.AnyR\vannotations\x12(\n" + + "\x10resource_type_id\x18\x04 \x01(\tR\x0eresourceTypeId\"\xe3\x01\n" + + "\x14InvokeActionResponse\x12\x0e\n" + + "\x02id\x18\x01 \x01(\tR\x02id\x12:\n" + + "\x06status\x18\x02 \x01(\x0e2\".c1.connector.v2.BatonActionStatusR\x06status\x126\n" + + "\vannotations\x18\x03 \x03(\v2\x14.google.protobuf.AnyR\vannotations\x123\n" + + "\bresponse\x18\x04 \x01(\v2\x17.google.protobuf.StructR\bresponse\x12\x12\n" + + "\x04name\x18\x05 \x01(\tR\x04name\"x\n" + + "\x16GetActionStatusRequest\x12\x16\n" + + "\x04name\x18\x01 \x01(\tB\x02\x18\x01R\x04name\x12\x0e\n" + + "\x02id\x18\x02 \x01(\tR\x02id\x126\n" + + "\vannotations\x18\x03 \x03(\v2\x14.google.protobuf.AnyR\vannotations\"\xe6\x01\n" + + "\x17GetActionStatusResponse\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\x12\x0e\n" + + "\x02id\x18\x02 \x01(\tR\x02id\x12:\n" + + "\x06status\x18\x03 \x01(\x0e2\".c1.connector.v2.BatonActionStatusR\x06status\x126\n" + + "\vannotations\x18\x04 \x03(\v2\x14.google.protobuf.AnyR\vannotations\x123\n" + + "\bresponse\x18\x05 \x01(\v2\x17.google.protobuf.StructR\bresponse\"d\n" + + "\x16GetActionSchemaRequest\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\x126\n" + + "\vannotations\x18\x02 \x03(\v2\x14.google.protobuf.AnyR\vannotations\"\x8d\x01\n" + + "\x17GetActionSchemaResponse\x12:\n" + + "\x06schema\x18\x01 \x01(\v2\".c1.connector.v2.BatonActionSchemaR\x06schema\x126\n" + + "\vannotations\x18\x02 \x03(\v2\x14.google.protobuf.AnyR\vannotations\"|\n" + + "\x18ListActionSchemasRequest\x126\n" + + "\vannotations\x18\x01 \x03(\v2\x14.google.protobuf.AnyR\vannotations\x12(\n" + + "\x10resource_type_id\x18\x02 \x01(\tR\x0eresourceTypeId\"\x91\x01\n" + + "\x19ListActionSchemasResponse\x12<\n" + + "\aschemas\x18\x01 \x03(\v2\".c1.connector.v2.BatonActionSchemaR\aschemas\x126\n" + + "\vannotations\x18\x02 \x03(\v2\x14.google.protobuf.AnyR\vannotations*\xdd\x01\n" + + "\x11BatonActionStatus\x12#\n" + + "\x1fBATON_ACTION_STATUS_UNSPECIFIED\x10\x00\x12\x1f\n" + + "\x1bBATON_ACTION_STATUS_UNKNOWN\x10\x01\x12\x1f\n" + + "\x1bBATON_ACTION_STATUS_PENDING\x10\x02\x12\x1f\n" + + "\x1bBATON_ACTION_STATUS_RUNNING\x10\x03\x12 \n" + + "\x1cBATON_ACTION_STATUS_COMPLETE\x10\x04\x12\x1e\n" + + "\x1aBATON_ACTION_STATUS_FAILED\x10\x05*\xc9\x02\n" + + "\n" + + "ActionType\x12\x1b\n" + + "\x17ACTION_TYPE_UNSPECIFIED\x10\x00\x12\x17\n" + + "\x13ACTION_TYPE_DYNAMIC\x10\x01\x12\x17\n" + + "\x13ACTION_TYPE_ACCOUNT\x10\x02\x12&\n" + + "\"ACTION_TYPE_ACCOUNT_UPDATE_PROFILE\x10\x03\x12\x1f\n" + + "\x1bACTION_TYPE_ACCOUNT_DISABLE\x10\x04\x12\x1e\n" + + "\x1aACTION_TYPE_ACCOUNT_ENABLE\x10\x05\x12\x1f\n" + + "\x1bACTION_TYPE_RESOURCE_CREATE\x10\x06\x12\x1f\n" + + "\x1bACTION_TYPE_RESOURCE_DELETE\x10\a\x12\x1f\n" + + "\x1bACTION_TYPE_RESOURCE_ENABLE\x10\b\x12 \n" + + "\x1cACTION_TYPE_RESOURCE_DISABLE\x10\t2\xa4\x03\n" + + "\rActionService\x12[\n" + + "\fInvokeAction\x12$.c1.connector.v2.InvokeActionRequest\x1a%.c1.connector.v2.InvokeActionResponse\x12d\n" + + "\x0fGetActionStatus\x12'.c1.connector.v2.GetActionStatusRequest\x1a(.c1.connector.v2.GetActionStatusResponse\x12d\n" + + "\x0fGetActionSchema\x12'.c1.connector.v2.GetActionSchemaRequest\x1a(.c1.connector.v2.GetActionSchemaResponse\x12j\n" + + "\x11ListActionSchemas\x12).c1.connector.v2.ListActionSchemasRequest\x1a*.c1.connector.v2.ListActionSchemasResponseB6Z4github.com/conductorone/baton-sdk/pb/c1/connector/v2b\x06proto3" + +var file_c1_connector_v2_action_proto_enumTypes = make([]protoimpl.EnumInfo, 2) var file_c1_connector_v2_action_proto_msgTypes = make([]protoimpl.MessageInfo, 9) var file_c1_connector_v2_action_proto_goTypes = []any{ (BatonActionStatus)(0), // 0: c1.connector.v2.BatonActionStatus - (*BatonActionSchema)(nil), // 1: c1.connector.v2.BatonActionSchema - (*InvokeActionRequest)(nil), // 2: c1.connector.v2.InvokeActionRequest - (*InvokeActionResponse)(nil), // 3: c1.connector.v2.InvokeActionResponse - (*GetActionStatusRequest)(nil), // 4: c1.connector.v2.GetActionStatusRequest - (*GetActionStatusResponse)(nil), // 5: c1.connector.v2.GetActionStatusResponse - (*GetActionSchemaRequest)(nil), // 6: c1.connector.v2.GetActionSchemaRequest - (*GetActionSchemaResponse)(nil), // 7: c1.connector.v2.GetActionSchemaResponse - (*ListActionSchemasRequest)(nil), // 8: c1.connector.v2.ListActionSchemasRequest - (*ListActionSchemasResponse)(nil), // 9: c1.connector.v2.ListActionSchemasResponse - (*v1.Field)(nil), // 10: c1.config.v1.Field - (*v1.Constraint)(nil), // 11: c1.config.v1.Constraint - (*structpb.Struct)(nil), // 12: google.protobuf.Struct - (*anypb.Any)(nil), // 13: google.protobuf.Any + (ActionType)(0), // 1: c1.connector.v2.ActionType + (*BatonActionSchema)(nil), // 2: c1.connector.v2.BatonActionSchema + (*InvokeActionRequest)(nil), // 3: c1.connector.v2.InvokeActionRequest + (*InvokeActionResponse)(nil), // 4: c1.connector.v2.InvokeActionResponse + (*GetActionStatusRequest)(nil), // 5: c1.connector.v2.GetActionStatusRequest + (*GetActionStatusResponse)(nil), // 6: c1.connector.v2.GetActionStatusResponse + (*GetActionSchemaRequest)(nil), // 7: c1.connector.v2.GetActionSchemaRequest + (*GetActionSchemaResponse)(nil), // 8: c1.connector.v2.GetActionSchemaResponse + (*ListActionSchemasRequest)(nil), // 9: c1.connector.v2.ListActionSchemasRequest + (*ListActionSchemasResponse)(nil), // 10: c1.connector.v2.ListActionSchemasResponse + (*v1.Field)(nil), // 11: c1.config.v1.Field + (*v1.Constraint)(nil), // 12: c1.config.v1.Constraint + (*structpb.Struct)(nil), // 13: google.protobuf.Struct + (*anypb.Any)(nil), // 14: google.protobuf.Any } var file_c1_connector_v2_action_proto_depIdxs = []int32{ - 10, // 0: c1.connector.v2.BatonActionSchema.arguments:type_name -> c1.config.v1.Field - 11, // 1: c1.connector.v2.BatonActionSchema.constraints:type_name -> c1.config.v1.Constraint - 10, // 2: c1.connector.v2.BatonActionSchema.return_types:type_name -> c1.config.v1.Field - 12, // 3: c1.connector.v2.InvokeActionRequest.args:type_name -> google.protobuf.Struct - 13, // 4: c1.connector.v2.InvokeActionRequest.annotations:type_name -> google.protobuf.Any - 0, // 5: c1.connector.v2.InvokeActionResponse.status:type_name -> c1.connector.v2.BatonActionStatus - 13, // 6: c1.connector.v2.InvokeActionResponse.annotations:type_name -> google.protobuf.Any - 12, // 7: c1.connector.v2.InvokeActionResponse.response:type_name -> google.protobuf.Struct - 13, // 8: c1.connector.v2.GetActionStatusRequest.annotations:type_name -> google.protobuf.Any - 0, // 9: c1.connector.v2.GetActionStatusResponse.status:type_name -> c1.connector.v2.BatonActionStatus - 13, // 10: c1.connector.v2.GetActionStatusResponse.annotations:type_name -> google.protobuf.Any - 12, // 11: c1.connector.v2.GetActionStatusResponse.response:type_name -> google.protobuf.Struct - 13, // 12: c1.connector.v2.GetActionSchemaRequest.annotations:type_name -> google.protobuf.Any - 1, // 13: c1.connector.v2.GetActionSchemaResponse.schema:type_name -> c1.connector.v2.BatonActionSchema - 13, // 14: c1.connector.v2.GetActionSchemaResponse.annotations:type_name -> google.protobuf.Any - 13, // 15: c1.connector.v2.ListActionSchemasRequest.annotations:type_name -> google.protobuf.Any - 1, // 16: c1.connector.v2.ListActionSchemasResponse.schemas:type_name -> c1.connector.v2.BatonActionSchema - 13, // 17: c1.connector.v2.ListActionSchemasResponse.annotations:type_name -> google.protobuf.Any - 2, // 18: c1.connector.v2.ActionService.InvokeAction:input_type -> c1.connector.v2.InvokeActionRequest - 4, // 19: c1.connector.v2.ActionService.GetActionStatus:input_type -> c1.connector.v2.GetActionStatusRequest - 6, // 20: c1.connector.v2.ActionService.GetActionSchema:input_type -> c1.connector.v2.GetActionSchemaRequest - 8, // 21: c1.connector.v2.ActionService.ListActionSchemas:input_type -> c1.connector.v2.ListActionSchemasRequest - 3, // 22: c1.connector.v2.ActionService.InvokeAction:output_type -> c1.connector.v2.InvokeActionResponse - 5, // 23: c1.connector.v2.ActionService.GetActionStatus:output_type -> c1.connector.v2.GetActionStatusResponse - 7, // 24: c1.connector.v2.ActionService.GetActionSchema:output_type -> c1.connector.v2.GetActionSchemaResponse - 9, // 25: c1.connector.v2.ActionService.ListActionSchemas:output_type -> c1.connector.v2.ListActionSchemasResponse - 22, // [22:26] is the sub-list for method output_type - 18, // [18:22] is the sub-list for method input_type - 18, // [18:18] is the sub-list for extension type_name - 18, // [18:18] is the sub-list for extension extendee - 0, // [0:18] is the sub-list for field type_name + 11, // 0: c1.connector.v2.BatonActionSchema.arguments:type_name -> c1.config.v1.Field + 12, // 1: c1.connector.v2.BatonActionSchema.constraints:type_name -> c1.config.v1.Constraint + 11, // 2: c1.connector.v2.BatonActionSchema.return_types:type_name -> c1.config.v1.Field + 1, // 3: c1.connector.v2.BatonActionSchema.action_type:type_name -> c1.connector.v2.ActionType + 13, // 4: c1.connector.v2.InvokeActionRequest.args:type_name -> google.protobuf.Struct + 14, // 5: c1.connector.v2.InvokeActionRequest.annotations:type_name -> google.protobuf.Any + 0, // 6: c1.connector.v2.InvokeActionResponse.status:type_name -> c1.connector.v2.BatonActionStatus + 14, // 7: c1.connector.v2.InvokeActionResponse.annotations:type_name -> google.protobuf.Any + 13, // 8: c1.connector.v2.InvokeActionResponse.response:type_name -> google.protobuf.Struct + 14, // 9: c1.connector.v2.GetActionStatusRequest.annotations:type_name -> google.protobuf.Any + 0, // 10: c1.connector.v2.GetActionStatusResponse.status:type_name -> c1.connector.v2.BatonActionStatus + 14, // 11: c1.connector.v2.GetActionStatusResponse.annotations:type_name -> google.protobuf.Any + 13, // 12: c1.connector.v2.GetActionStatusResponse.response:type_name -> google.protobuf.Struct + 14, // 13: c1.connector.v2.GetActionSchemaRequest.annotations:type_name -> google.protobuf.Any + 2, // 14: c1.connector.v2.GetActionSchemaResponse.schema:type_name -> c1.connector.v2.BatonActionSchema + 14, // 15: c1.connector.v2.GetActionSchemaResponse.annotations:type_name -> google.protobuf.Any + 14, // 16: c1.connector.v2.ListActionSchemasRequest.annotations:type_name -> google.protobuf.Any + 2, // 17: c1.connector.v2.ListActionSchemasResponse.schemas:type_name -> c1.connector.v2.BatonActionSchema + 14, // 18: c1.connector.v2.ListActionSchemasResponse.annotations:type_name -> google.protobuf.Any + 3, // 19: c1.connector.v2.ActionService.InvokeAction:input_type -> c1.connector.v2.InvokeActionRequest + 5, // 20: c1.connector.v2.ActionService.GetActionStatus:input_type -> c1.connector.v2.GetActionStatusRequest + 7, // 21: c1.connector.v2.ActionService.GetActionSchema:input_type -> c1.connector.v2.GetActionSchemaRequest + 9, // 22: c1.connector.v2.ActionService.ListActionSchemas:input_type -> c1.connector.v2.ListActionSchemasRequest + 4, // 23: c1.connector.v2.ActionService.InvokeAction:output_type -> c1.connector.v2.InvokeActionResponse + 6, // 24: c1.connector.v2.ActionService.GetActionStatus:output_type -> c1.connector.v2.GetActionStatusResponse + 8, // 25: c1.connector.v2.ActionService.GetActionSchema:output_type -> c1.connector.v2.GetActionSchemaResponse + 10, // 26: c1.connector.v2.ActionService.ListActionSchemas:output_type -> c1.connector.v2.ListActionSchemasResponse + 23, // [23:27] is the sub-list for method output_type + 19, // [19:23] is the sub-list for method input_type + 19, // [19:19] is the sub-list for extension type_name + 19, // [19:19] is the sub-list for extension extendee + 0, // [0:19] is the sub-list for field type_name } func init() { file_c1_connector_v2_action_proto_init() } @@ -865,7 +1185,7 @@ func file_c1_connector_v2_action_proto_init() { File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: unsafe.Slice(unsafe.StringData(file_c1_connector_v2_action_proto_rawDesc), len(file_c1_connector_v2_action_proto_rawDesc)), - NumEnums: 1, + NumEnums: 2, NumMessages: 9, NumExtensions: 0, NumServices: 1, diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/action.pb.validate.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/action.pb.validate.go index 590b0616..40566def 100644 --- a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/action.pb.validate.go +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/action.pb.validate.go @@ -165,6 +165,8 @@ func (m *BatonActionSchema) validate(all bool) error { // no validation rules for Description + // no validation rules for ResourceTypeId + if len(errors) > 0 { return BatonActionSchemaMultiError(errors) } @@ -332,6 +334,8 @@ func (m *InvokeActionRequest) validate(all bool) error { } + // no validation rules for ResourceTypeId + if len(errors) > 0 { return InvokeActionRequestMultiError(errors) } @@ -1253,6 +1257,8 @@ func (m *ListActionSchemasRequest) validate(all bool) error { } + // no validation rules for ResourceTypeId + if len(errors) > 0 { return ListActionSchemasRequestMultiError(errors) } diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/action_protoopaque.pb.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/action_protoopaque.pb.go new file mode 100644 index 00000000..44664477 --- /dev/null +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/action_protoopaque.pb.go @@ -0,0 +1,1221 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.10 +// protoc (unknown) +// source: c1/connector/v2/action.proto + +//go:build protoopaque + +package v2 + +import ( + v1 "github.com/conductorone/baton-sdk/pb/c1/config/v1" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + structpb "google.golang.org/protobuf/types/known/structpb" + reflect "reflect" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type BatonActionStatus int32 + +const ( + BatonActionStatus_BATON_ACTION_STATUS_UNSPECIFIED BatonActionStatus = 0 + BatonActionStatus_BATON_ACTION_STATUS_UNKNOWN BatonActionStatus = 1 + BatonActionStatus_BATON_ACTION_STATUS_PENDING BatonActionStatus = 2 + BatonActionStatus_BATON_ACTION_STATUS_RUNNING BatonActionStatus = 3 + BatonActionStatus_BATON_ACTION_STATUS_COMPLETE BatonActionStatus = 4 + BatonActionStatus_BATON_ACTION_STATUS_FAILED BatonActionStatus = 5 +) + +// Enum value maps for BatonActionStatus. +var ( + BatonActionStatus_name = map[int32]string{ + 0: "BATON_ACTION_STATUS_UNSPECIFIED", + 1: "BATON_ACTION_STATUS_UNKNOWN", + 2: "BATON_ACTION_STATUS_PENDING", + 3: "BATON_ACTION_STATUS_RUNNING", + 4: "BATON_ACTION_STATUS_COMPLETE", + 5: "BATON_ACTION_STATUS_FAILED", + } + BatonActionStatus_value = map[string]int32{ + "BATON_ACTION_STATUS_UNSPECIFIED": 0, + "BATON_ACTION_STATUS_UNKNOWN": 1, + "BATON_ACTION_STATUS_PENDING": 2, + "BATON_ACTION_STATUS_RUNNING": 3, + "BATON_ACTION_STATUS_COMPLETE": 4, + "BATON_ACTION_STATUS_FAILED": 5, + } +) + +func (x BatonActionStatus) Enum() *BatonActionStatus { + p := new(BatonActionStatus) + *p = x + return p +} + +func (x BatonActionStatus) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (BatonActionStatus) Descriptor() protoreflect.EnumDescriptor { + return file_c1_connector_v2_action_proto_enumTypes[0].Descriptor() +} + +func (BatonActionStatus) Type() protoreflect.EnumType { + return &file_c1_connector_v2_action_proto_enumTypes[0] +} + +func (x BatonActionStatus) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// This defines the action type. +type ActionType int32 + +const ( + ActionType_ACTION_TYPE_UNSPECIFIED ActionType = 0 + ActionType_ACTION_TYPE_DYNAMIC ActionType = 1 + ActionType_ACTION_TYPE_ACCOUNT ActionType = 2 + ActionType_ACTION_TYPE_ACCOUNT_UPDATE_PROFILE ActionType = 3 + ActionType_ACTION_TYPE_ACCOUNT_DISABLE ActionType = 4 + ActionType_ACTION_TYPE_ACCOUNT_ENABLE ActionType = 5 + // Generic resource actions + ActionType_ACTION_TYPE_RESOURCE_CREATE ActionType = 6 + ActionType_ACTION_TYPE_RESOURCE_DELETE ActionType = 7 + ActionType_ACTION_TYPE_RESOURCE_ENABLE ActionType = 8 + ActionType_ACTION_TYPE_RESOURCE_DISABLE ActionType = 9 +) + +// Enum value maps for ActionType. +var ( + ActionType_name = map[int32]string{ + 0: "ACTION_TYPE_UNSPECIFIED", + 1: "ACTION_TYPE_DYNAMIC", + 2: "ACTION_TYPE_ACCOUNT", + 3: "ACTION_TYPE_ACCOUNT_UPDATE_PROFILE", + 4: "ACTION_TYPE_ACCOUNT_DISABLE", + 5: "ACTION_TYPE_ACCOUNT_ENABLE", + 6: "ACTION_TYPE_RESOURCE_CREATE", + 7: "ACTION_TYPE_RESOURCE_DELETE", + 8: "ACTION_TYPE_RESOURCE_ENABLE", + 9: "ACTION_TYPE_RESOURCE_DISABLE", + } + ActionType_value = map[string]int32{ + "ACTION_TYPE_UNSPECIFIED": 0, + "ACTION_TYPE_DYNAMIC": 1, + "ACTION_TYPE_ACCOUNT": 2, + "ACTION_TYPE_ACCOUNT_UPDATE_PROFILE": 3, + "ACTION_TYPE_ACCOUNT_DISABLE": 4, + "ACTION_TYPE_ACCOUNT_ENABLE": 5, + "ACTION_TYPE_RESOURCE_CREATE": 6, + "ACTION_TYPE_RESOURCE_DELETE": 7, + "ACTION_TYPE_RESOURCE_ENABLE": 8, + "ACTION_TYPE_RESOURCE_DISABLE": 9, + } +) + +func (x ActionType) Enum() *ActionType { + p := new(ActionType) + *p = x + return p +} + +func (x ActionType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ActionType) Descriptor() protoreflect.EnumDescriptor { + return file_c1_connector_v2_action_proto_enumTypes[1].Descriptor() +} + +func (ActionType) Type() protoreflect.EnumType { + return &file_c1_connector_v2_action_proto_enumTypes[1] +} + +func (x ActionType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +type BatonActionSchema struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Name string `protobuf:"bytes,1,opt,name=name,proto3"` + xxx_hidden_Arguments *[]*v1.Field `protobuf:"bytes,2,rep,name=arguments,proto3"` + xxx_hidden_Constraints *[]*v1.Constraint `protobuf:"bytes,3,rep,name=constraints,proto3"` + xxx_hidden_ReturnTypes *[]*v1.Field `protobuf:"bytes,4,rep,name=return_types,json=returnTypes,proto3"` + xxx_hidden_DisplayName string `protobuf:"bytes,5,opt,name=display_name,json=displayName,proto3"` + xxx_hidden_Description string `protobuf:"bytes,6,opt,name=description,proto3"` + xxx_hidden_ActionType []ActionType `protobuf:"varint,7,rep,packed,name=action_type,json=actionType,proto3,enum=c1.connector.v2.ActionType"` + xxx_hidden_ResourceTypeId string `protobuf:"bytes,8,opt,name=resource_type_id,json=resourceTypeId,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *BatonActionSchema) Reset() { + *x = BatonActionSchema{} + mi := &file_c1_connector_v2_action_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *BatonActionSchema) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BatonActionSchema) ProtoMessage() {} + +func (x *BatonActionSchema) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_action_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *BatonActionSchema) GetName() string { + if x != nil { + return x.xxx_hidden_Name + } + return "" +} + +func (x *BatonActionSchema) GetArguments() []*v1.Field { + if x != nil { + if x.xxx_hidden_Arguments != nil { + return *x.xxx_hidden_Arguments + } + } + return nil +} + +func (x *BatonActionSchema) GetConstraints() []*v1.Constraint { + if x != nil { + if x.xxx_hidden_Constraints != nil { + return *x.xxx_hidden_Constraints + } + } + return nil +} + +func (x *BatonActionSchema) GetReturnTypes() []*v1.Field { + if x != nil { + if x.xxx_hidden_ReturnTypes != nil { + return *x.xxx_hidden_ReturnTypes + } + } + return nil +} + +func (x *BatonActionSchema) GetDisplayName() string { + if x != nil { + return x.xxx_hidden_DisplayName + } + return "" +} + +func (x *BatonActionSchema) GetDescription() string { + if x != nil { + return x.xxx_hidden_Description + } + return "" +} + +func (x *BatonActionSchema) GetActionType() []ActionType { + if x != nil { + return x.xxx_hidden_ActionType + } + return nil +} + +func (x *BatonActionSchema) GetResourceTypeId() string { + if x != nil { + return x.xxx_hidden_ResourceTypeId + } + return "" +} + +func (x *BatonActionSchema) SetName(v string) { + x.xxx_hidden_Name = v +} + +func (x *BatonActionSchema) SetArguments(v []*v1.Field) { + x.xxx_hidden_Arguments = &v +} + +func (x *BatonActionSchema) SetConstraints(v []*v1.Constraint) { + x.xxx_hidden_Constraints = &v +} + +func (x *BatonActionSchema) SetReturnTypes(v []*v1.Field) { + x.xxx_hidden_ReturnTypes = &v +} + +func (x *BatonActionSchema) SetDisplayName(v string) { + x.xxx_hidden_DisplayName = v +} + +func (x *BatonActionSchema) SetDescription(v string) { + x.xxx_hidden_Description = v +} + +func (x *BatonActionSchema) SetActionType(v []ActionType) { + x.xxx_hidden_ActionType = v +} + +func (x *BatonActionSchema) SetResourceTypeId(v string) { + x.xxx_hidden_ResourceTypeId = v +} + +type BatonActionSchema_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Name string + Arguments []*v1.Field + Constraints []*v1.Constraint + ReturnTypes []*v1.Field + DisplayName string + Description string + ActionType []ActionType + // Optional: if set, this action is scoped to a specific resource type + ResourceTypeId string +} + +func (b0 BatonActionSchema_builder) Build() *BatonActionSchema { + m0 := &BatonActionSchema{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Name = b.Name + x.xxx_hidden_Arguments = &b.Arguments + x.xxx_hidden_Constraints = &b.Constraints + x.xxx_hidden_ReturnTypes = &b.ReturnTypes + x.xxx_hidden_DisplayName = b.DisplayName + x.xxx_hidden_Description = b.Description + x.xxx_hidden_ActionType = b.ActionType + x.xxx_hidden_ResourceTypeId = b.ResourceTypeId + return m0 +} + +type InvokeActionRequest struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Name string `protobuf:"bytes,1,opt,name=name,proto3"` + xxx_hidden_Args *structpb.Struct `protobuf:"bytes,2,opt,name=args,proto3"` + xxx_hidden_Annotations *[]*anypb.Any `protobuf:"bytes,3,rep,name=annotations,proto3"` + xxx_hidden_ResourceTypeId string `protobuf:"bytes,4,opt,name=resource_type_id,json=resourceTypeId,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *InvokeActionRequest) Reset() { + *x = InvokeActionRequest{} + mi := &file_c1_connector_v2_action_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *InvokeActionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*InvokeActionRequest) ProtoMessage() {} + +func (x *InvokeActionRequest) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_action_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *InvokeActionRequest) GetName() string { + if x != nil { + return x.xxx_hidden_Name + } + return "" +} + +func (x *InvokeActionRequest) GetArgs() *structpb.Struct { + if x != nil { + return x.xxx_hidden_Args + } + return nil +} + +func (x *InvokeActionRequest) GetAnnotations() []*anypb.Any { + if x != nil { + if x.xxx_hidden_Annotations != nil { + return *x.xxx_hidden_Annotations + } + } + return nil +} + +func (x *InvokeActionRequest) GetResourceTypeId() string { + if x != nil { + return x.xxx_hidden_ResourceTypeId + } + return "" +} + +func (x *InvokeActionRequest) SetName(v string) { + x.xxx_hidden_Name = v +} + +func (x *InvokeActionRequest) SetArgs(v *structpb.Struct) { + x.xxx_hidden_Args = v +} + +func (x *InvokeActionRequest) SetAnnotations(v []*anypb.Any) { + x.xxx_hidden_Annotations = &v +} + +func (x *InvokeActionRequest) SetResourceTypeId(v string) { + x.xxx_hidden_ResourceTypeId = v +} + +func (x *InvokeActionRequest) HasArgs() bool { + if x == nil { + return false + } + return x.xxx_hidden_Args != nil +} + +func (x *InvokeActionRequest) ClearArgs() { + x.xxx_hidden_Args = nil +} + +type InvokeActionRequest_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Name string + Args *structpb.Struct + Annotations []*anypb.Any + // Optional: if set, invokes a resource-scoped action + ResourceTypeId string +} + +func (b0 InvokeActionRequest_builder) Build() *InvokeActionRequest { + m0 := &InvokeActionRequest{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Name = b.Name + x.xxx_hidden_Args = b.Args + x.xxx_hidden_Annotations = &b.Annotations + x.xxx_hidden_ResourceTypeId = b.ResourceTypeId + return m0 +} + +type InvokeActionResponse struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Id string `protobuf:"bytes,1,opt,name=id,proto3"` + xxx_hidden_Status BatonActionStatus `protobuf:"varint,2,opt,name=status,proto3,enum=c1.connector.v2.BatonActionStatus"` + xxx_hidden_Annotations *[]*anypb.Any `protobuf:"bytes,3,rep,name=annotations,proto3"` + xxx_hidden_Response *structpb.Struct `protobuf:"bytes,4,opt,name=response,proto3"` + xxx_hidden_Name string `protobuf:"bytes,5,opt,name=name,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *InvokeActionResponse) Reset() { + *x = InvokeActionResponse{} + mi := &file_c1_connector_v2_action_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *InvokeActionResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*InvokeActionResponse) ProtoMessage() {} + +func (x *InvokeActionResponse) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_action_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *InvokeActionResponse) GetId() string { + if x != nil { + return x.xxx_hidden_Id + } + return "" +} + +func (x *InvokeActionResponse) GetStatus() BatonActionStatus { + if x != nil { + return x.xxx_hidden_Status + } + return BatonActionStatus_BATON_ACTION_STATUS_UNSPECIFIED +} + +func (x *InvokeActionResponse) GetAnnotations() []*anypb.Any { + if x != nil { + if x.xxx_hidden_Annotations != nil { + return *x.xxx_hidden_Annotations + } + } + return nil +} + +func (x *InvokeActionResponse) GetResponse() *structpb.Struct { + if x != nil { + return x.xxx_hidden_Response + } + return nil +} + +func (x *InvokeActionResponse) GetName() string { + if x != nil { + return x.xxx_hidden_Name + } + return "" +} + +func (x *InvokeActionResponse) SetId(v string) { + x.xxx_hidden_Id = v +} + +func (x *InvokeActionResponse) SetStatus(v BatonActionStatus) { + x.xxx_hidden_Status = v +} + +func (x *InvokeActionResponse) SetAnnotations(v []*anypb.Any) { + x.xxx_hidden_Annotations = &v +} + +func (x *InvokeActionResponse) SetResponse(v *structpb.Struct) { + x.xxx_hidden_Response = v +} + +func (x *InvokeActionResponse) SetName(v string) { + x.xxx_hidden_Name = v +} + +func (x *InvokeActionResponse) HasResponse() bool { + if x == nil { + return false + } + return x.xxx_hidden_Response != nil +} + +func (x *InvokeActionResponse) ClearResponse() { + x.xxx_hidden_Response = nil +} + +type InvokeActionResponse_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Id string + Status BatonActionStatus + Annotations []*anypb.Any + Response *structpb.Struct + Name string +} + +func (b0 InvokeActionResponse_builder) Build() *InvokeActionResponse { + m0 := &InvokeActionResponse{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Id = b.Id + x.xxx_hidden_Status = b.Status + x.xxx_hidden_Annotations = &b.Annotations + x.xxx_hidden_Response = b.Response + x.xxx_hidden_Name = b.Name + return m0 +} + +type GetActionStatusRequest struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Name string `protobuf:"bytes,1,opt,name=name,proto3"` + xxx_hidden_Id string `protobuf:"bytes,2,opt,name=id,proto3"` + xxx_hidden_Annotations *[]*anypb.Any `protobuf:"bytes,3,rep,name=annotations,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetActionStatusRequest) Reset() { + *x = GetActionStatusRequest{} + mi := &file_c1_connector_v2_action_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetActionStatusRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetActionStatusRequest) ProtoMessage() {} + +func (x *GetActionStatusRequest) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_action_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Marked as deprecated in c1/connector/v2/action.proto. +func (x *GetActionStatusRequest) GetName() string { + if x != nil { + return x.xxx_hidden_Name + } + return "" +} + +func (x *GetActionStatusRequest) GetId() string { + if x != nil { + return x.xxx_hidden_Id + } + return "" +} + +func (x *GetActionStatusRequest) GetAnnotations() []*anypb.Any { + if x != nil { + if x.xxx_hidden_Annotations != nil { + return *x.xxx_hidden_Annotations + } + } + return nil +} + +// Deprecated: Marked as deprecated in c1/connector/v2/action.proto. +func (x *GetActionStatusRequest) SetName(v string) { + x.xxx_hidden_Name = v +} + +func (x *GetActionStatusRequest) SetId(v string) { + x.xxx_hidden_Id = v +} + +func (x *GetActionStatusRequest) SetAnnotations(v []*anypb.Any) { + x.xxx_hidden_Annotations = &v +} + +type GetActionStatusRequest_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // Deprecated: Marked as deprecated in c1/connector/v2/action.proto. + Name string + Id string + Annotations []*anypb.Any +} + +func (b0 GetActionStatusRequest_builder) Build() *GetActionStatusRequest { + m0 := &GetActionStatusRequest{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Name = b.Name + x.xxx_hidden_Id = b.Id + x.xxx_hidden_Annotations = &b.Annotations + return m0 +} + +type GetActionStatusResponse struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Name string `protobuf:"bytes,1,opt,name=name,proto3"` + xxx_hidden_Id string `protobuf:"bytes,2,opt,name=id,proto3"` + xxx_hidden_Status BatonActionStatus `protobuf:"varint,3,opt,name=status,proto3,enum=c1.connector.v2.BatonActionStatus"` + xxx_hidden_Annotations *[]*anypb.Any `protobuf:"bytes,4,rep,name=annotations,proto3"` + xxx_hidden_Response *structpb.Struct `protobuf:"bytes,5,opt,name=response,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetActionStatusResponse) Reset() { + *x = GetActionStatusResponse{} + mi := &file_c1_connector_v2_action_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetActionStatusResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetActionStatusResponse) ProtoMessage() {} + +func (x *GetActionStatusResponse) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_action_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *GetActionStatusResponse) GetName() string { + if x != nil { + return x.xxx_hidden_Name + } + return "" +} + +func (x *GetActionStatusResponse) GetId() string { + if x != nil { + return x.xxx_hidden_Id + } + return "" +} + +func (x *GetActionStatusResponse) GetStatus() BatonActionStatus { + if x != nil { + return x.xxx_hidden_Status + } + return BatonActionStatus_BATON_ACTION_STATUS_UNSPECIFIED +} + +func (x *GetActionStatusResponse) GetAnnotations() []*anypb.Any { + if x != nil { + if x.xxx_hidden_Annotations != nil { + return *x.xxx_hidden_Annotations + } + } + return nil +} + +func (x *GetActionStatusResponse) GetResponse() *structpb.Struct { + if x != nil { + return x.xxx_hidden_Response + } + return nil +} + +func (x *GetActionStatusResponse) SetName(v string) { + x.xxx_hidden_Name = v +} + +func (x *GetActionStatusResponse) SetId(v string) { + x.xxx_hidden_Id = v +} + +func (x *GetActionStatusResponse) SetStatus(v BatonActionStatus) { + x.xxx_hidden_Status = v +} + +func (x *GetActionStatusResponse) SetAnnotations(v []*anypb.Any) { + x.xxx_hidden_Annotations = &v +} + +func (x *GetActionStatusResponse) SetResponse(v *structpb.Struct) { + x.xxx_hidden_Response = v +} + +func (x *GetActionStatusResponse) HasResponse() bool { + if x == nil { + return false + } + return x.xxx_hidden_Response != nil +} + +func (x *GetActionStatusResponse) ClearResponse() { + x.xxx_hidden_Response = nil +} + +type GetActionStatusResponse_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Name string + Id string + Status BatonActionStatus + Annotations []*anypb.Any + Response *structpb.Struct +} + +func (b0 GetActionStatusResponse_builder) Build() *GetActionStatusResponse { + m0 := &GetActionStatusResponse{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Name = b.Name + x.xxx_hidden_Id = b.Id + x.xxx_hidden_Status = b.Status + x.xxx_hidden_Annotations = &b.Annotations + x.xxx_hidden_Response = b.Response + return m0 +} + +type GetActionSchemaRequest struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Name string `protobuf:"bytes,1,opt,name=name,proto3"` + xxx_hidden_Annotations *[]*anypb.Any `protobuf:"bytes,2,rep,name=annotations,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetActionSchemaRequest) Reset() { + *x = GetActionSchemaRequest{} + mi := &file_c1_connector_v2_action_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetActionSchemaRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetActionSchemaRequest) ProtoMessage() {} + +func (x *GetActionSchemaRequest) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_action_proto_msgTypes[5] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *GetActionSchemaRequest) GetName() string { + if x != nil { + return x.xxx_hidden_Name + } + return "" +} + +func (x *GetActionSchemaRequest) GetAnnotations() []*anypb.Any { + if x != nil { + if x.xxx_hidden_Annotations != nil { + return *x.xxx_hidden_Annotations + } + } + return nil +} + +func (x *GetActionSchemaRequest) SetName(v string) { + x.xxx_hidden_Name = v +} + +func (x *GetActionSchemaRequest) SetAnnotations(v []*anypb.Any) { + x.xxx_hidden_Annotations = &v +} + +type GetActionSchemaRequest_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Name string + Annotations []*anypb.Any +} + +func (b0 GetActionSchemaRequest_builder) Build() *GetActionSchemaRequest { + m0 := &GetActionSchemaRequest{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Name = b.Name + x.xxx_hidden_Annotations = &b.Annotations + return m0 +} + +type GetActionSchemaResponse struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Schema *BatonActionSchema `protobuf:"bytes,1,opt,name=schema,proto3"` + xxx_hidden_Annotations *[]*anypb.Any `protobuf:"bytes,2,rep,name=annotations,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetActionSchemaResponse) Reset() { + *x = GetActionSchemaResponse{} + mi := &file_c1_connector_v2_action_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetActionSchemaResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetActionSchemaResponse) ProtoMessage() {} + +func (x *GetActionSchemaResponse) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_action_proto_msgTypes[6] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *GetActionSchemaResponse) GetSchema() *BatonActionSchema { + if x != nil { + return x.xxx_hidden_Schema + } + return nil +} + +func (x *GetActionSchemaResponse) GetAnnotations() []*anypb.Any { + if x != nil { + if x.xxx_hidden_Annotations != nil { + return *x.xxx_hidden_Annotations + } + } + return nil +} + +func (x *GetActionSchemaResponse) SetSchema(v *BatonActionSchema) { + x.xxx_hidden_Schema = v +} + +func (x *GetActionSchemaResponse) SetAnnotations(v []*anypb.Any) { + x.xxx_hidden_Annotations = &v +} + +func (x *GetActionSchemaResponse) HasSchema() bool { + if x == nil { + return false + } + return x.xxx_hidden_Schema != nil +} + +func (x *GetActionSchemaResponse) ClearSchema() { + x.xxx_hidden_Schema = nil +} + +type GetActionSchemaResponse_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Schema *BatonActionSchema + Annotations []*anypb.Any +} + +func (b0 GetActionSchemaResponse_builder) Build() *GetActionSchemaResponse { + m0 := &GetActionSchemaResponse{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Schema = b.Schema + x.xxx_hidden_Annotations = &b.Annotations + return m0 +} + +type ListActionSchemasRequest struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Annotations *[]*anypb.Any `protobuf:"bytes,1,rep,name=annotations,proto3"` + xxx_hidden_ResourceTypeId string `protobuf:"bytes,2,opt,name=resource_type_id,json=resourceTypeId,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ListActionSchemasRequest) Reset() { + *x = ListActionSchemasRequest{} + mi := &file_c1_connector_v2_action_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ListActionSchemasRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListActionSchemasRequest) ProtoMessage() {} + +func (x *ListActionSchemasRequest) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_action_proto_msgTypes[7] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *ListActionSchemasRequest) GetAnnotations() []*anypb.Any { + if x != nil { + if x.xxx_hidden_Annotations != nil { + return *x.xxx_hidden_Annotations + } + } + return nil +} + +func (x *ListActionSchemasRequest) GetResourceTypeId() string { + if x != nil { + return x.xxx_hidden_ResourceTypeId + } + return "" +} + +func (x *ListActionSchemasRequest) SetAnnotations(v []*anypb.Any) { + x.xxx_hidden_Annotations = &v +} + +func (x *ListActionSchemasRequest) SetResourceTypeId(v string) { + x.xxx_hidden_ResourceTypeId = v +} + +type ListActionSchemasRequest_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Annotations []*anypb.Any + // Optional: filter to only return actions for a specific resource type + ResourceTypeId string +} + +func (b0 ListActionSchemasRequest_builder) Build() *ListActionSchemasRequest { + m0 := &ListActionSchemasRequest{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Annotations = &b.Annotations + x.xxx_hidden_ResourceTypeId = b.ResourceTypeId + return m0 +} + +type ListActionSchemasResponse struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Schemas *[]*BatonActionSchema `protobuf:"bytes,1,rep,name=schemas,proto3"` + xxx_hidden_Annotations *[]*anypb.Any `protobuf:"bytes,2,rep,name=annotations,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ListActionSchemasResponse) Reset() { + *x = ListActionSchemasResponse{} + mi := &file_c1_connector_v2_action_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ListActionSchemasResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListActionSchemasResponse) ProtoMessage() {} + +func (x *ListActionSchemasResponse) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_action_proto_msgTypes[8] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *ListActionSchemasResponse) GetSchemas() []*BatonActionSchema { + if x != nil { + if x.xxx_hidden_Schemas != nil { + return *x.xxx_hidden_Schemas + } + } + return nil +} + +func (x *ListActionSchemasResponse) GetAnnotations() []*anypb.Any { + if x != nil { + if x.xxx_hidden_Annotations != nil { + return *x.xxx_hidden_Annotations + } + } + return nil +} + +func (x *ListActionSchemasResponse) SetSchemas(v []*BatonActionSchema) { + x.xxx_hidden_Schemas = &v +} + +func (x *ListActionSchemasResponse) SetAnnotations(v []*anypb.Any) { + x.xxx_hidden_Annotations = &v +} + +type ListActionSchemasResponse_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Schemas []*BatonActionSchema + Annotations []*anypb.Any +} + +func (b0 ListActionSchemasResponse_builder) Build() *ListActionSchemasResponse { + m0 := &ListActionSchemasResponse{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Schemas = &b.Schemas + x.xxx_hidden_Annotations = &b.Annotations + return m0 +} + +var File_c1_connector_v2_action_proto protoreflect.FileDescriptor + +const file_c1_connector_v2_action_proto_rawDesc = "" + + "\n" + + "\x1cc1/connector/v2/action.proto\x12\x0fc1.connector.v2\x1a\x19c1/config/v1/config.proto\x1a\x19google/protobuf/any.proto\x1a\x1cgoogle/protobuf/struct.proto\"\xfb\x02\n" + + "\x11BatonActionSchema\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\x121\n" + + "\targuments\x18\x02 \x03(\v2\x13.c1.config.v1.FieldR\targuments\x12:\n" + + "\vconstraints\x18\x03 \x03(\v2\x18.c1.config.v1.ConstraintR\vconstraints\x126\n" + + "\freturn_types\x18\x04 \x03(\v2\x13.c1.config.v1.FieldR\vreturnTypes\x12!\n" + + "\fdisplay_name\x18\x05 \x01(\tR\vdisplayName\x12 \n" + + "\vdescription\x18\x06 \x01(\tR\vdescription\x12<\n" + + "\vaction_type\x18\a \x03(\x0e2\x1b.c1.connector.v2.ActionTypeR\n" + + "actionType\x12(\n" + + "\x10resource_type_id\x18\b \x01(\tR\x0eresourceTypeId\"\xb8\x01\n" + + "\x13InvokeActionRequest\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\x12+\n" + + "\x04args\x18\x02 \x01(\v2\x17.google.protobuf.StructR\x04args\x126\n" + + "\vannotations\x18\x03 \x03(\v2\x14.google.protobuf.AnyR\vannotations\x12(\n" + + "\x10resource_type_id\x18\x04 \x01(\tR\x0eresourceTypeId\"\xe3\x01\n" + + "\x14InvokeActionResponse\x12\x0e\n" + + "\x02id\x18\x01 \x01(\tR\x02id\x12:\n" + + "\x06status\x18\x02 \x01(\x0e2\".c1.connector.v2.BatonActionStatusR\x06status\x126\n" + + "\vannotations\x18\x03 \x03(\v2\x14.google.protobuf.AnyR\vannotations\x123\n" + + "\bresponse\x18\x04 \x01(\v2\x17.google.protobuf.StructR\bresponse\x12\x12\n" + + "\x04name\x18\x05 \x01(\tR\x04name\"x\n" + + "\x16GetActionStatusRequest\x12\x16\n" + + "\x04name\x18\x01 \x01(\tB\x02\x18\x01R\x04name\x12\x0e\n" + + "\x02id\x18\x02 \x01(\tR\x02id\x126\n" + + "\vannotations\x18\x03 \x03(\v2\x14.google.protobuf.AnyR\vannotations\"\xe6\x01\n" + + "\x17GetActionStatusResponse\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\x12\x0e\n" + + "\x02id\x18\x02 \x01(\tR\x02id\x12:\n" + + "\x06status\x18\x03 \x01(\x0e2\".c1.connector.v2.BatonActionStatusR\x06status\x126\n" + + "\vannotations\x18\x04 \x03(\v2\x14.google.protobuf.AnyR\vannotations\x123\n" + + "\bresponse\x18\x05 \x01(\v2\x17.google.protobuf.StructR\bresponse\"d\n" + + "\x16GetActionSchemaRequest\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\x126\n" + + "\vannotations\x18\x02 \x03(\v2\x14.google.protobuf.AnyR\vannotations\"\x8d\x01\n" + + "\x17GetActionSchemaResponse\x12:\n" + + "\x06schema\x18\x01 \x01(\v2\".c1.connector.v2.BatonActionSchemaR\x06schema\x126\n" + + "\vannotations\x18\x02 \x03(\v2\x14.google.protobuf.AnyR\vannotations\"|\n" + + "\x18ListActionSchemasRequest\x126\n" + + "\vannotations\x18\x01 \x03(\v2\x14.google.protobuf.AnyR\vannotations\x12(\n" + + "\x10resource_type_id\x18\x02 \x01(\tR\x0eresourceTypeId\"\x91\x01\n" + + "\x19ListActionSchemasResponse\x12<\n" + + "\aschemas\x18\x01 \x03(\v2\".c1.connector.v2.BatonActionSchemaR\aschemas\x126\n" + + "\vannotations\x18\x02 \x03(\v2\x14.google.protobuf.AnyR\vannotations*\xdd\x01\n" + + "\x11BatonActionStatus\x12#\n" + + "\x1fBATON_ACTION_STATUS_UNSPECIFIED\x10\x00\x12\x1f\n" + + "\x1bBATON_ACTION_STATUS_UNKNOWN\x10\x01\x12\x1f\n" + + "\x1bBATON_ACTION_STATUS_PENDING\x10\x02\x12\x1f\n" + + "\x1bBATON_ACTION_STATUS_RUNNING\x10\x03\x12 \n" + + "\x1cBATON_ACTION_STATUS_COMPLETE\x10\x04\x12\x1e\n" + + "\x1aBATON_ACTION_STATUS_FAILED\x10\x05*\xc9\x02\n" + + "\n" + + "ActionType\x12\x1b\n" + + "\x17ACTION_TYPE_UNSPECIFIED\x10\x00\x12\x17\n" + + "\x13ACTION_TYPE_DYNAMIC\x10\x01\x12\x17\n" + + "\x13ACTION_TYPE_ACCOUNT\x10\x02\x12&\n" + + "\"ACTION_TYPE_ACCOUNT_UPDATE_PROFILE\x10\x03\x12\x1f\n" + + "\x1bACTION_TYPE_ACCOUNT_DISABLE\x10\x04\x12\x1e\n" + + "\x1aACTION_TYPE_ACCOUNT_ENABLE\x10\x05\x12\x1f\n" + + "\x1bACTION_TYPE_RESOURCE_CREATE\x10\x06\x12\x1f\n" + + "\x1bACTION_TYPE_RESOURCE_DELETE\x10\a\x12\x1f\n" + + "\x1bACTION_TYPE_RESOURCE_ENABLE\x10\b\x12 \n" + + "\x1cACTION_TYPE_RESOURCE_DISABLE\x10\t2\xa4\x03\n" + + "\rActionService\x12[\n" + + "\fInvokeAction\x12$.c1.connector.v2.InvokeActionRequest\x1a%.c1.connector.v2.InvokeActionResponse\x12d\n" + + "\x0fGetActionStatus\x12'.c1.connector.v2.GetActionStatusRequest\x1a(.c1.connector.v2.GetActionStatusResponse\x12d\n" + + "\x0fGetActionSchema\x12'.c1.connector.v2.GetActionSchemaRequest\x1a(.c1.connector.v2.GetActionSchemaResponse\x12j\n" + + "\x11ListActionSchemas\x12).c1.connector.v2.ListActionSchemasRequest\x1a*.c1.connector.v2.ListActionSchemasResponseB6Z4github.com/conductorone/baton-sdk/pb/c1/connector/v2b\x06proto3" + +var file_c1_connector_v2_action_proto_enumTypes = make([]protoimpl.EnumInfo, 2) +var file_c1_connector_v2_action_proto_msgTypes = make([]protoimpl.MessageInfo, 9) +var file_c1_connector_v2_action_proto_goTypes = []any{ + (BatonActionStatus)(0), // 0: c1.connector.v2.BatonActionStatus + (ActionType)(0), // 1: c1.connector.v2.ActionType + (*BatonActionSchema)(nil), // 2: c1.connector.v2.BatonActionSchema + (*InvokeActionRequest)(nil), // 3: c1.connector.v2.InvokeActionRequest + (*InvokeActionResponse)(nil), // 4: c1.connector.v2.InvokeActionResponse + (*GetActionStatusRequest)(nil), // 5: c1.connector.v2.GetActionStatusRequest + (*GetActionStatusResponse)(nil), // 6: c1.connector.v2.GetActionStatusResponse + (*GetActionSchemaRequest)(nil), // 7: c1.connector.v2.GetActionSchemaRequest + (*GetActionSchemaResponse)(nil), // 8: c1.connector.v2.GetActionSchemaResponse + (*ListActionSchemasRequest)(nil), // 9: c1.connector.v2.ListActionSchemasRequest + (*ListActionSchemasResponse)(nil), // 10: c1.connector.v2.ListActionSchemasResponse + (*v1.Field)(nil), // 11: c1.config.v1.Field + (*v1.Constraint)(nil), // 12: c1.config.v1.Constraint + (*structpb.Struct)(nil), // 13: google.protobuf.Struct + (*anypb.Any)(nil), // 14: google.protobuf.Any +} +var file_c1_connector_v2_action_proto_depIdxs = []int32{ + 11, // 0: c1.connector.v2.BatonActionSchema.arguments:type_name -> c1.config.v1.Field + 12, // 1: c1.connector.v2.BatonActionSchema.constraints:type_name -> c1.config.v1.Constraint + 11, // 2: c1.connector.v2.BatonActionSchema.return_types:type_name -> c1.config.v1.Field + 1, // 3: c1.connector.v2.BatonActionSchema.action_type:type_name -> c1.connector.v2.ActionType + 13, // 4: c1.connector.v2.InvokeActionRequest.args:type_name -> google.protobuf.Struct + 14, // 5: c1.connector.v2.InvokeActionRequest.annotations:type_name -> google.protobuf.Any + 0, // 6: c1.connector.v2.InvokeActionResponse.status:type_name -> c1.connector.v2.BatonActionStatus + 14, // 7: c1.connector.v2.InvokeActionResponse.annotations:type_name -> google.protobuf.Any + 13, // 8: c1.connector.v2.InvokeActionResponse.response:type_name -> google.protobuf.Struct + 14, // 9: c1.connector.v2.GetActionStatusRequest.annotations:type_name -> google.protobuf.Any + 0, // 10: c1.connector.v2.GetActionStatusResponse.status:type_name -> c1.connector.v2.BatonActionStatus + 14, // 11: c1.connector.v2.GetActionStatusResponse.annotations:type_name -> google.protobuf.Any + 13, // 12: c1.connector.v2.GetActionStatusResponse.response:type_name -> google.protobuf.Struct + 14, // 13: c1.connector.v2.GetActionSchemaRequest.annotations:type_name -> google.protobuf.Any + 2, // 14: c1.connector.v2.GetActionSchemaResponse.schema:type_name -> c1.connector.v2.BatonActionSchema + 14, // 15: c1.connector.v2.GetActionSchemaResponse.annotations:type_name -> google.protobuf.Any + 14, // 16: c1.connector.v2.ListActionSchemasRequest.annotations:type_name -> google.protobuf.Any + 2, // 17: c1.connector.v2.ListActionSchemasResponse.schemas:type_name -> c1.connector.v2.BatonActionSchema + 14, // 18: c1.connector.v2.ListActionSchemasResponse.annotations:type_name -> google.protobuf.Any + 3, // 19: c1.connector.v2.ActionService.InvokeAction:input_type -> c1.connector.v2.InvokeActionRequest + 5, // 20: c1.connector.v2.ActionService.GetActionStatus:input_type -> c1.connector.v2.GetActionStatusRequest + 7, // 21: c1.connector.v2.ActionService.GetActionSchema:input_type -> c1.connector.v2.GetActionSchemaRequest + 9, // 22: c1.connector.v2.ActionService.ListActionSchemas:input_type -> c1.connector.v2.ListActionSchemasRequest + 4, // 23: c1.connector.v2.ActionService.InvokeAction:output_type -> c1.connector.v2.InvokeActionResponse + 6, // 24: c1.connector.v2.ActionService.GetActionStatus:output_type -> c1.connector.v2.GetActionStatusResponse + 8, // 25: c1.connector.v2.ActionService.GetActionSchema:output_type -> c1.connector.v2.GetActionSchemaResponse + 10, // 26: c1.connector.v2.ActionService.ListActionSchemas:output_type -> c1.connector.v2.ListActionSchemasResponse + 23, // [23:27] is the sub-list for method output_type + 19, // [19:23] is the sub-list for method input_type + 19, // [19:19] is the sub-list for extension type_name + 19, // [19:19] is the sub-list for extension extendee + 0, // [0:19] is the sub-list for field type_name +} + +func init() { file_c1_connector_v2_action_proto_init() } +func file_c1_connector_v2_action_proto_init() { + if File_c1_connector_v2_action_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_c1_connector_v2_action_proto_rawDesc), len(file_c1_connector_v2_action_proto_rawDesc)), + NumEnums: 2, + NumMessages: 9, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_c1_connector_v2_action_proto_goTypes, + DependencyIndexes: file_c1_connector_v2_action_proto_depIdxs, + EnumInfos: file_c1_connector_v2_action_proto_enumTypes, + MessageInfos: file_c1_connector_v2_action_proto_msgTypes, + }.Build() + File_c1_connector_v2_action_proto = out.File + file_c1_connector_v2_action_proto_goTypes = nil + file_c1_connector_v2_action_proto_depIdxs = nil +} diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_baton_id.pb.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_baton_id.pb.go index 88f6033b..94dc93dc 100644 --- a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_baton_id.pb.go +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_baton_id.pb.go @@ -1,16 +1,17 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.4 +// protoc-gen-go v1.36.10 // protoc (unknown) // source: c1/connector/v2/annotation_baton_id.proto +//go:build !protoopaque + package v2 import ( protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" - sync "sync" unsafe "unsafe" ) @@ -22,7 +23,7 @@ const ( ) type ExternalResourceMatch struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` ResourceType ResourceType_Trait `protobuf:"varint,1,opt,name=resource_type,json=resourceType,proto3,enum=c1.connector.v2.ResourceType_Trait" json:"resource_type,omitempty"` Key string `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` Value string `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` @@ -55,11 +56,6 @@ func (x *ExternalResourceMatch) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ExternalResourceMatch.ProtoReflect.Descriptor instead. -func (*ExternalResourceMatch) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_annotation_baton_id_proto_rawDescGZIP(), []int{0} -} - func (x *ExternalResourceMatch) GetResourceType() ResourceType_Trait { if x != nil { return x.ResourceType @@ -81,8 +77,38 @@ func (x *ExternalResourceMatch) GetValue() string { return "" } +func (x *ExternalResourceMatch) SetResourceType(v ResourceType_Trait) { + x.ResourceType = v +} + +func (x *ExternalResourceMatch) SetKey(v string) { + x.Key = v +} + +func (x *ExternalResourceMatch) SetValue(v string) { + x.Value = v +} + +type ExternalResourceMatch_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + ResourceType ResourceType_Trait + Key string + Value string +} + +func (b0 ExternalResourceMatch_builder) Build() *ExternalResourceMatch { + m0 := &ExternalResourceMatch{} + b, x := &b0, m0 + _, _ = b, x + x.ResourceType = b.ResourceType + x.Key = b.Key + x.Value = b.Value + return m0 +} + type ExternalResourceMatchAll struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` ResourceType ResourceType_Trait `protobuf:"varint,1,opt,name=resource_type,json=resourceType,proto3,enum=c1.connector.v2.ResourceType_Trait" json:"resource_type,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache @@ -113,11 +139,6 @@ func (x *ExternalResourceMatchAll) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ExternalResourceMatchAll.ProtoReflect.Descriptor instead. -func (*ExternalResourceMatchAll) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_annotation_baton_id_proto_rawDescGZIP(), []int{1} -} - func (x *ExternalResourceMatchAll) GetResourceType() ResourceType_Trait { if x != nil { return x.ResourceType @@ -125,8 +146,26 @@ func (x *ExternalResourceMatchAll) GetResourceType() ResourceType_Trait { return ResourceType_TRAIT_UNSPECIFIED } +func (x *ExternalResourceMatchAll) SetResourceType(v ResourceType_Trait) { + x.ResourceType = v +} + +type ExternalResourceMatchAll_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + ResourceType ResourceType_Trait +} + +func (b0 ExternalResourceMatchAll_builder) Build() *ExternalResourceMatchAll { + m0 := &ExternalResourceMatchAll{} + b, x := &b0, m0 + _, _ = b, x + x.ResourceType = b.ResourceType + return m0 +} + type ExternalResourceMatchID struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache @@ -157,11 +196,6 @@ func (x *ExternalResourceMatchID) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ExternalResourceMatchID.ProtoReflect.Descriptor instead. -func (*ExternalResourceMatchID) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_annotation_baton_id_proto_rawDescGZIP(), []int{2} -} - func (x *ExternalResourceMatchID) GetId() string { if x != nil { return x.Id @@ -169,8 +203,26 @@ func (x *ExternalResourceMatchID) GetId() string { return "" } +func (x *ExternalResourceMatchID) SetId(v string) { + x.Id = v +} + +type ExternalResourceMatchID_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Id string +} + +func (b0 ExternalResourceMatchID_builder) Build() *ExternalResourceMatchID { + m0 := &ExternalResourceMatchID{} + b, x := &b0, m0 + _, _ = b, x + x.Id = b.Id + return m0 +} + type BatonID struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } @@ -200,56 +252,32 @@ func (x *BatonID) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use BatonID.ProtoReflect.Descriptor instead. -func (*BatonID) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_annotation_baton_id_proto_rawDescGZIP(), []int{3} +type BatonID_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + } -var File_c1_connector_v2_annotation_baton_id_proto protoreflect.FileDescriptor +func (b0 BatonID_builder) Build() *BatonID { + m0 := &BatonID{} + b, x := &b0, m0 + _, _ = b, x + return m0 +} -var file_c1_connector_v2_annotation_baton_id_proto_rawDesc = string([]byte{ - 0x0a, 0x29, 0x63, 0x31, 0x2f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f, 0x76, - 0x32, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x62, 0x61, 0x74, - 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x63, 0x31, 0x2e, - 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x1a, 0x1e, 0x63, 0x31, - 0x2f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f, 0x76, 0x32, 0x2f, 0x72, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x89, 0x01, 0x0a, - 0x15, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x48, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, - 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, - 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x54, 0x72, 0x61, - 0x69, 0x74, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, - 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, - 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x64, 0x0a, 0x18, 0x45, 0x78, 0x74, 0x65, - 0x72, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x61, 0x74, 0x63, - 0x68, 0x41, 0x6c, 0x6c, 0x12, 0x48, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x63, 0x31, - 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x54, 0x72, 0x61, 0x69, 0x74, - 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x22, 0x29, - 0x0a, 0x17, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x49, 0x44, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x22, 0x09, 0x0a, 0x07, 0x42, 0x61, 0x74, - 0x6f, 0x6e, 0x49, 0x44, 0x42, 0x36, 0x5a, 0x34, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, - 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x64, 0x75, 0x63, 0x74, 0x6f, 0x72, 0x6f, 0x6e, 0x65, 0x2f, - 0x62, 0x61, 0x74, 0x6f, 0x6e, 0x2d, 0x73, 0x64, 0x6b, 0x2f, 0x70, 0x62, 0x2f, 0x63, 0x31, 0x2f, - 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f, 0x76, 0x32, 0x62, 0x06, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x33, -}) - -var ( - file_c1_connector_v2_annotation_baton_id_proto_rawDescOnce sync.Once - file_c1_connector_v2_annotation_baton_id_proto_rawDescData []byte -) +var File_c1_connector_v2_annotation_baton_id_proto protoreflect.FileDescriptor -func file_c1_connector_v2_annotation_baton_id_proto_rawDescGZIP() []byte { - file_c1_connector_v2_annotation_baton_id_proto_rawDescOnce.Do(func() { - file_c1_connector_v2_annotation_baton_id_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_c1_connector_v2_annotation_baton_id_proto_rawDesc), len(file_c1_connector_v2_annotation_baton_id_proto_rawDesc))) - }) - return file_c1_connector_v2_annotation_baton_id_proto_rawDescData -} +const file_c1_connector_v2_annotation_baton_id_proto_rawDesc = "" + + "\n" + + ")c1/connector/v2/annotation_baton_id.proto\x12\x0fc1.connector.v2\x1a\x1ec1/connector/v2/resource.proto\"\x89\x01\n" + + "\x15ExternalResourceMatch\x12H\n" + + "\rresource_type\x18\x01 \x01(\x0e2#.c1.connector.v2.ResourceType.TraitR\fresourceType\x12\x10\n" + + "\x03key\x18\x02 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x03 \x01(\tR\x05value\"d\n" + + "\x18ExternalResourceMatchAll\x12H\n" + + "\rresource_type\x18\x01 \x01(\x0e2#.c1.connector.v2.ResourceType.TraitR\fresourceType\")\n" + + "\x17ExternalResourceMatchID\x12\x0e\n" + + "\x02id\x18\x01 \x01(\tR\x02id\"\t\n" + + "\aBatonIDB6Z4github.com/conductorone/baton-sdk/pb/c1/connector/v2b\x06proto3" var file_c1_connector_v2_annotation_baton_id_proto_msgTypes = make([]protoimpl.MessageInfo, 4) var file_c1_connector_v2_annotation_baton_id_proto_goTypes = []any{ diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_baton_id_protoopaque.pb.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_baton_id_protoopaque.pb.go new file mode 100644 index 00000000..14b010c5 --- /dev/null +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_baton_id_protoopaque.pb.go @@ -0,0 +1,323 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.10 +// protoc (unknown) +// source: c1/connector/v2/annotation_baton_id.proto + +//go:build protoopaque + +package v2 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type ExternalResourceMatch struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_ResourceType ResourceType_Trait `protobuf:"varint,1,opt,name=resource_type,json=resourceType,proto3,enum=c1.connector.v2.ResourceType_Trait"` + xxx_hidden_Key string `protobuf:"bytes,2,opt,name=key,proto3"` + xxx_hidden_Value string `protobuf:"bytes,3,opt,name=value,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ExternalResourceMatch) Reset() { + *x = ExternalResourceMatch{} + mi := &file_c1_connector_v2_annotation_baton_id_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ExternalResourceMatch) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExternalResourceMatch) ProtoMessage() {} + +func (x *ExternalResourceMatch) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_annotation_baton_id_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *ExternalResourceMatch) GetResourceType() ResourceType_Trait { + if x != nil { + return x.xxx_hidden_ResourceType + } + return ResourceType_TRAIT_UNSPECIFIED +} + +func (x *ExternalResourceMatch) GetKey() string { + if x != nil { + return x.xxx_hidden_Key + } + return "" +} + +func (x *ExternalResourceMatch) GetValue() string { + if x != nil { + return x.xxx_hidden_Value + } + return "" +} + +func (x *ExternalResourceMatch) SetResourceType(v ResourceType_Trait) { + x.xxx_hidden_ResourceType = v +} + +func (x *ExternalResourceMatch) SetKey(v string) { + x.xxx_hidden_Key = v +} + +func (x *ExternalResourceMatch) SetValue(v string) { + x.xxx_hidden_Value = v +} + +type ExternalResourceMatch_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + ResourceType ResourceType_Trait + Key string + Value string +} + +func (b0 ExternalResourceMatch_builder) Build() *ExternalResourceMatch { + m0 := &ExternalResourceMatch{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_ResourceType = b.ResourceType + x.xxx_hidden_Key = b.Key + x.xxx_hidden_Value = b.Value + return m0 +} + +type ExternalResourceMatchAll struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_ResourceType ResourceType_Trait `protobuf:"varint,1,opt,name=resource_type,json=resourceType,proto3,enum=c1.connector.v2.ResourceType_Trait"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ExternalResourceMatchAll) Reset() { + *x = ExternalResourceMatchAll{} + mi := &file_c1_connector_v2_annotation_baton_id_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ExternalResourceMatchAll) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExternalResourceMatchAll) ProtoMessage() {} + +func (x *ExternalResourceMatchAll) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_annotation_baton_id_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *ExternalResourceMatchAll) GetResourceType() ResourceType_Trait { + if x != nil { + return x.xxx_hidden_ResourceType + } + return ResourceType_TRAIT_UNSPECIFIED +} + +func (x *ExternalResourceMatchAll) SetResourceType(v ResourceType_Trait) { + x.xxx_hidden_ResourceType = v +} + +type ExternalResourceMatchAll_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + ResourceType ResourceType_Trait +} + +func (b0 ExternalResourceMatchAll_builder) Build() *ExternalResourceMatchAll { + m0 := &ExternalResourceMatchAll{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_ResourceType = b.ResourceType + return m0 +} + +type ExternalResourceMatchID struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Id string `protobuf:"bytes,1,opt,name=id,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ExternalResourceMatchID) Reset() { + *x = ExternalResourceMatchID{} + mi := &file_c1_connector_v2_annotation_baton_id_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ExternalResourceMatchID) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExternalResourceMatchID) ProtoMessage() {} + +func (x *ExternalResourceMatchID) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_annotation_baton_id_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *ExternalResourceMatchID) GetId() string { + if x != nil { + return x.xxx_hidden_Id + } + return "" +} + +func (x *ExternalResourceMatchID) SetId(v string) { + x.xxx_hidden_Id = v +} + +type ExternalResourceMatchID_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Id string +} + +func (b0 ExternalResourceMatchID_builder) Build() *ExternalResourceMatchID { + m0 := &ExternalResourceMatchID{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Id = b.Id + return m0 +} + +type BatonID struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *BatonID) Reset() { + *x = BatonID{} + mi := &file_c1_connector_v2_annotation_baton_id_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *BatonID) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BatonID) ProtoMessage() {} + +func (x *BatonID) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_annotation_baton_id_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +type BatonID_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + +} + +func (b0 BatonID_builder) Build() *BatonID { + m0 := &BatonID{} + b, x := &b0, m0 + _, _ = b, x + return m0 +} + +var File_c1_connector_v2_annotation_baton_id_proto protoreflect.FileDescriptor + +const file_c1_connector_v2_annotation_baton_id_proto_rawDesc = "" + + "\n" + + ")c1/connector/v2/annotation_baton_id.proto\x12\x0fc1.connector.v2\x1a\x1ec1/connector/v2/resource.proto\"\x89\x01\n" + + "\x15ExternalResourceMatch\x12H\n" + + "\rresource_type\x18\x01 \x01(\x0e2#.c1.connector.v2.ResourceType.TraitR\fresourceType\x12\x10\n" + + "\x03key\x18\x02 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x03 \x01(\tR\x05value\"d\n" + + "\x18ExternalResourceMatchAll\x12H\n" + + "\rresource_type\x18\x01 \x01(\x0e2#.c1.connector.v2.ResourceType.TraitR\fresourceType\")\n" + + "\x17ExternalResourceMatchID\x12\x0e\n" + + "\x02id\x18\x01 \x01(\tR\x02id\"\t\n" + + "\aBatonIDB6Z4github.com/conductorone/baton-sdk/pb/c1/connector/v2b\x06proto3" + +var file_c1_connector_v2_annotation_baton_id_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_c1_connector_v2_annotation_baton_id_proto_goTypes = []any{ + (*ExternalResourceMatch)(nil), // 0: c1.connector.v2.ExternalResourceMatch + (*ExternalResourceMatchAll)(nil), // 1: c1.connector.v2.ExternalResourceMatchAll + (*ExternalResourceMatchID)(nil), // 2: c1.connector.v2.ExternalResourceMatchID + (*BatonID)(nil), // 3: c1.connector.v2.BatonID + (ResourceType_Trait)(0), // 4: c1.connector.v2.ResourceType.Trait +} +var file_c1_connector_v2_annotation_baton_id_proto_depIdxs = []int32{ + 4, // 0: c1.connector.v2.ExternalResourceMatch.resource_type:type_name -> c1.connector.v2.ResourceType.Trait + 4, // 1: c1.connector.v2.ExternalResourceMatchAll.resource_type:type_name -> c1.connector.v2.ResourceType.Trait + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_c1_connector_v2_annotation_baton_id_proto_init() } +func file_c1_connector_v2_annotation_baton_id_proto_init() { + if File_c1_connector_v2_annotation_baton_id_proto != nil { + return + } + file_c1_connector_v2_resource_proto_init() + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_c1_connector_v2_annotation_baton_id_proto_rawDesc), len(file_c1_connector_v2_annotation_baton_id_proto_rawDesc)), + NumEnums: 0, + NumMessages: 4, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_c1_connector_v2_annotation_baton_id_proto_goTypes, + DependencyIndexes: file_c1_connector_v2_annotation_baton_id_proto_depIdxs, + MessageInfos: file_c1_connector_v2_annotation_baton_id_proto_msgTypes, + }.Build() + File_c1_connector_v2_annotation_baton_id_proto = out.File + file_c1_connector_v2_annotation_baton_id_proto_goTypes = nil + file_c1_connector_v2_annotation_baton_id_proto_depIdxs = nil +} diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_entitlement.pb.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_entitlement.pb.go index 1abd77fb..3c218d56 100644 --- a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_entitlement.pb.go +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_entitlement.pb.go @@ -1,9 +1,11 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.4 +// protoc-gen-go v1.36.10 // protoc (unknown) // source: c1/connector/v2/annotation_entitlement.proto +//go:build !protoopaque + package v2 import ( @@ -11,7 +13,6 @@ import ( protoimpl "google.golang.org/protobuf/runtime/protoimpl" structpb "google.golang.org/protobuf/types/known/structpb" reflect "reflect" - sync "sync" unsafe "unsafe" ) @@ -23,7 +24,7 @@ const ( ) type EntitlementImmutable struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` SourceId string `protobuf:"bytes,1,opt,name=source_id,json=sourceId,proto3" json:"source_id,omitempty"` Metadata *structpb.Struct `protobuf:"bytes,2,opt,name=metadata,proto3" json:"metadata,omitempty"` unknownFields protoimpl.UnknownFields @@ -55,11 +56,6 @@ func (x *EntitlementImmutable) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use EntitlementImmutable.ProtoReflect.Descriptor instead. -func (*EntitlementImmutable) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_annotation_entitlement_proto_rawDescGZIP(), []int{0} -} - func (x *EntitlementImmutable) GetSourceId() string { if x != nil { return x.SourceId @@ -74,40 +70,50 @@ func (x *EntitlementImmutable) GetMetadata() *structpb.Struct { return nil } -var File_c1_connector_v2_annotation_entitlement_proto protoreflect.FileDescriptor +func (x *EntitlementImmutable) SetSourceId(v string) { + x.SourceId = v +} -var file_c1_connector_v2_annotation_entitlement_proto_rawDesc = string([]byte{ - 0x0a, 0x2c, 0x63, 0x31, 0x2f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f, 0x76, - 0x32, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x6e, 0x74, - 0x69, 0x74, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, - 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x1a, - 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x68, 0x0a, - 0x14, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x49, 0x6d, 0x6d, 0x75, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, - 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x49, 0x64, 0x12, 0x33, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x08, 0x6d, - 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x42, 0x36, 0x5a, 0x34, 0x67, 0x69, 0x74, 0x68, 0x75, - 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x64, 0x75, 0x63, 0x74, 0x6f, 0x72, 0x6f, - 0x6e, 0x65, 0x2f, 0x62, 0x61, 0x74, 0x6f, 0x6e, 0x2d, 0x73, 0x64, 0x6b, 0x2f, 0x70, 0x62, 0x2f, - 0x63, 0x31, 0x2f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f, 0x76, 0x32, 0x62, - 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -}) - -var ( - file_c1_connector_v2_annotation_entitlement_proto_rawDescOnce sync.Once - file_c1_connector_v2_annotation_entitlement_proto_rawDescData []byte -) +func (x *EntitlementImmutable) SetMetadata(v *structpb.Struct) { + x.Metadata = v +} + +func (x *EntitlementImmutable) HasMetadata() bool { + if x == nil { + return false + } + return x.Metadata != nil +} -func file_c1_connector_v2_annotation_entitlement_proto_rawDescGZIP() []byte { - file_c1_connector_v2_annotation_entitlement_proto_rawDescOnce.Do(func() { - file_c1_connector_v2_annotation_entitlement_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_c1_connector_v2_annotation_entitlement_proto_rawDesc), len(file_c1_connector_v2_annotation_entitlement_proto_rawDesc))) - }) - return file_c1_connector_v2_annotation_entitlement_proto_rawDescData +func (x *EntitlementImmutable) ClearMetadata() { + x.Metadata = nil } +type EntitlementImmutable_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + SourceId string + Metadata *structpb.Struct +} + +func (b0 EntitlementImmutable_builder) Build() *EntitlementImmutable { + m0 := &EntitlementImmutable{} + b, x := &b0, m0 + _, _ = b, x + x.SourceId = b.SourceId + x.Metadata = b.Metadata + return m0 +} + +var File_c1_connector_v2_annotation_entitlement_proto protoreflect.FileDescriptor + +const file_c1_connector_v2_annotation_entitlement_proto_rawDesc = "" + + "\n" + + ",c1/connector/v2/annotation_entitlement.proto\x12\x0fc1.connector.v2\x1a\x1cgoogle/protobuf/struct.proto\"h\n" + + "\x14EntitlementImmutable\x12\x1b\n" + + "\tsource_id\x18\x01 \x01(\tR\bsourceId\x123\n" + + "\bmetadata\x18\x02 \x01(\v2\x17.google.protobuf.StructR\bmetadataB6Z4github.com/conductorone/baton-sdk/pb/c1/connector/v2b\x06proto3" + var file_c1_connector_v2_annotation_entitlement_proto_msgTypes = make([]protoimpl.MessageInfo, 1) var file_c1_connector_v2_annotation_entitlement_proto_goTypes = []any{ (*EntitlementImmutable)(nil), // 0: c1.connector.v2.EntitlementImmutable diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_entitlement_protoopaque.pb.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_entitlement_protoopaque.pb.go new file mode 100644 index 00000000..9baa65cf --- /dev/null +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_entitlement_protoopaque.pb.go @@ -0,0 +1,153 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.10 +// protoc (unknown) +// source: c1/connector/v2/annotation_entitlement.proto + +//go:build protoopaque + +package v2 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + structpb "google.golang.org/protobuf/types/known/structpb" + reflect "reflect" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type EntitlementImmutable struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_SourceId string `protobuf:"bytes,1,opt,name=source_id,json=sourceId,proto3"` + xxx_hidden_Metadata *structpb.Struct `protobuf:"bytes,2,opt,name=metadata,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *EntitlementImmutable) Reset() { + *x = EntitlementImmutable{} + mi := &file_c1_connector_v2_annotation_entitlement_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *EntitlementImmutable) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EntitlementImmutable) ProtoMessage() {} + +func (x *EntitlementImmutable) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_annotation_entitlement_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *EntitlementImmutable) GetSourceId() string { + if x != nil { + return x.xxx_hidden_SourceId + } + return "" +} + +func (x *EntitlementImmutable) GetMetadata() *structpb.Struct { + if x != nil { + return x.xxx_hidden_Metadata + } + return nil +} + +func (x *EntitlementImmutable) SetSourceId(v string) { + x.xxx_hidden_SourceId = v +} + +func (x *EntitlementImmutable) SetMetadata(v *structpb.Struct) { + x.xxx_hidden_Metadata = v +} + +func (x *EntitlementImmutable) HasMetadata() bool { + if x == nil { + return false + } + return x.xxx_hidden_Metadata != nil +} + +func (x *EntitlementImmutable) ClearMetadata() { + x.xxx_hidden_Metadata = nil +} + +type EntitlementImmutable_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + SourceId string + Metadata *structpb.Struct +} + +func (b0 EntitlementImmutable_builder) Build() *EntitlementImmutable { + m0 := &EntitlementImmutable{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_SourceId = b.SourceId + x.xxx_hidden_Metadata = b.Metadata + return m0 +} + +var File_c1_connector_v2_annotation_entitlement_proto protoreflect.FileDescriptor + +const file_c1_connector_v2_annotation_entitlement_proto_rawDesc = "" + + "\n" + + ",c1/connector/v2/annotation_entitlement.proto\x12\x0fc1.connector.v2\x1a\x1cgoogle/protobuf/struct.proto\"h\n" + + "\x14EntitlementImmutable\x12\x1b\n" + + "\tsource_id\x18\x01 \x01(\tR\bsourceId\x123\n" + + "\bmetadata\x18\x02 \x01(\v2\x17.google.protobuf.StructR\bmetadataB6Z4github.com/conductorone/baton-sdk/pb/c1/connector/v2b\x06proto3" + +var file_c1_connector_v2_annotation_entitlement_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_c1_connector_v2_annotation_entitlement_proto_goTypes = []any{ + (*EntitlementImmutable)(nil), // 0: c1.connector.v2.EntitlementImmutable + (*structpb.Struct)(nil), // 1: google.protobuf.Struct +} +var file_c1_connector_v2_annotation_entitlement_proto_depIdxs = []int32{ + 1, // 0: c1.connector.v2.EntitlementImmutable.metadata:type_name -> google.protobuf.Struct + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_c1_connector_v2_annotation_entitlement_proto_init() } +func file_c1_connector_v2_annotation_entitlement_proto_init() { + if File_c1_connector_v2_annotation_entitlement_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_c1_connector_v2_annotation_entitlement_proto_rawDesc), len(file_c1_connector_v2_annotation_entitlement_proto_rawDesc)), + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_c1_connector_v2_annotation_entitlement_proto_goTypes, + DependencyIndexes: file_c1_connector_v2_annotation_entitlement_proto_depIdxs, + MessageInfos: file_c1_connector_v2_annotation_entitlement_proto_msgTypes, + }.Build() + File_c1_connector_v2_annotation_entitlement_proto = out.File + file_c1_connector_v2_annotation_entitlement_proto_goTypes = nil + file_c1_connector_v2_annotation_entitlement_proto_depIdxs = nil +} diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_etag.pb.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_etag.pb.go index 939bedd9..a106ae0a 100644 --- a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_etag.pb.go +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_etag.pb.go @@ -1,9 +1,11 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.4 +// protoc-gen-go v1.36.10 // protoc (unknown) // source: c1/connector/v2/annotation_etag.proto +//go:build !protoopaque + package v2 import ( @@ -11,7 +13,6 @@ import ( protoimpl "google.golang.org/protobuf/runtime/protoimpl" structpb "google.golang.org/protobuf/types/known/structpb" reflect "reflect" - sync "sync" unsafe "unsafe" ) @@ -23,7 +24,7 @@ const ( ) type ETag struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` EntitlementId string `protobuf:"bytes,2,opt,name=entitlement_id,json=entitlementId,proto3" json:"entitlement_id,omitempty"` unknownFields protoimpl.UnknownFields @@ -55,11 +56,6 @@ func (x *ETag) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ETag.ProtoReflect.Descriptor instead. -func (*ETag) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_annotation_etag_proto_rawDescGZIP(), []int{0} -} - func (x *ETag) GetValue() string { if x != nil { return x.Value @@ -74,8 +70,32 @@ func (x *ETag) GetEntitlementId() string { return "" } +func (x *ETag) SetValue(v string) { + x.Value = v +} + +func (x *ETag) SetEntitlementId(v string) { + x.EntitlementId = v +} + +type ETag_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Value string + EntitlementId string +} + +func (b0 ETag_builder) Build() *ETag { + m0 := &ETag{} + b, x := &b0, m0 + _, _ = b, x + x.Value = b.Value + x.EntitlementId = b.EntitlementId + return m0 +} + type ETagMetadata struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Metadata *structpb.Struct `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache @@ -106,11 +126,6 @@ func (x *ETagMetadata) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ETagMetadata.ProtoReflect.Descriptor instead. -func (*ETagMetadata) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_annotation_etag_proto_rawDescGZIP(), []int{1} -} - func (x *ETagMetadata) GetMetadata() *structpb.Struct { if x != nil { return x.Metadata @@ -118,8 +133,37 @@ func (x *ETagMetadata) GetMetadata() *structpb.Struct { return nil } +func (x *ETagMetadata) SetMetadata(v *structpb.Struct) { + x.Metadata = v +} + +func (x *ETagMetadata) HasMetadata() bool { + if x == nil { + return false + } + return x.Metadata != nil +} + +func (x *ETagMetadata) ClearMetadata() { + x.Metadata = nil +} + +type ETagMetadata_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Metadata *structpb.Struct +} + +func (b0 ETagMetadata_builder) Build() *ETagMetadata { + m0 := &ETagMetadata{} + b, x := &b0, m0 + _, _ = b, x + x.Metadata = b.Metadata + return m0 +} + type ETagMatch struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` EntitlementId string `protobuf:"bytes,1,opt,name=entitlement_id,json=entitlementId,proto3" json:"entitlement_id,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache @@ -150,11 +194,6 @@ func (x *ETagMatch) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ETagMatch.ProtoReflect.Descriptor instead. -func (*ETagMatch) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_annotation_etag_proto_rawDescGZIP(), []int{2} -} - func (x *ETagMatch) GetEntitlementId() string { if x != nil { return x.EntitlementId @@ -162,45 +201,37 @@ func (x *ETagMatch) GetEntitlementId() string { return "" } -var File_c1_connector_v2_annotation_etag_proto protoreflect.FileDescriptor +func (x *ETagMatch) SetEntitlementId(v string) { + x.EntitlementId = v +} -var file_c1_connector_v2_annotation_etag_proto_rawDesc = string([]byte{ - 0x0a, 0x25, 0x63, 0x31, 0x2f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f, 0x76, - 0x32, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x74, 0x61, - 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, - 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x43, 0x0a, 0x04, 0x45, 0x54, 0x61, 0x67, 0x12, 0x14, - 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x6d, - 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x65, 0x6e, - 0x74, 0x69, 0x74, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x22, 0x43, 0x0a, 0x0c, 0x45, - 0x54, 0x61, 0x67, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x33, 0x0a, 0x08, 0x6d, - 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, - 0x22, 0x32, 0x0a, 0x09, 0x45, 0x54, 0x61, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x25, 0x0a, - 0x0e, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x6d, 0x65, - 0x6e, 0x74, 0x49, 0x64, 0x42, 0x36, 0x5a, 0x34, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, - 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x64, 0x75, 0x63, 0x74, 0x6f, 0x72, 0x6f, 0x6e, 0x65, 0x2f, - 0x62, 0x61, 0x74, 0x6f, 0x6e, 0x2d, 0x73, 0x64, 0x6b, 0x2f, 0x70, 0x62, 0x2f, 0x63, 0x31, 0x2f, - 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f, 0x76, 0x32, 0x62, 0x06, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x33, -}) - -var ( - file_c1_connector_v2_annotation_etag_proto_rawDescOnce sync.Once - file_c1_connector_v2_annotation_etag_proto_rawDescData []byte -) +type ETagMatch_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. -func file_c1_connector_v2_annotation_etag_proto_rawDescGZIP() []byte { - file_c1_connector_v2_annotation_etag_proto_rawDescOnce.Do(func() { - file_c1_connector_v2_annotation_etag_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_c1_connector_v2_annotation_etag_proto_rawDesc), len(file_c1_connector_v2_annotation_etag_proto_rawDesc))) - }) - return file_c1_connector_v2_annotation_etag_proto_rawDescData + EntitlementId string } +func (b0 ETagMatch_builder) Build() *ETagMatch { + m0 := &ETagMatch{} + b, x := &b0, m0 + _, _ = b, x + x.EntitlementId = b.EntitlementId + return m0 +} + +var File_c1_connector_v2_annotation_etag_proto protoreflect.FileDescriptor + +const file_c1_connector_v2_annotation_etag_proto_rawDesc = "" + + "\n" + + "%c1/connector/v2/annotation_etag.proto\x12\x0fc1.connector.v2\x1a\x1cgoogle/protobuf/struct.proto\"C\n" + + "\x04ETag\x12\x14\n" + + "\x05value\x18\x01 \x01(\tR\x05value\x12%\n" + + "\x0eentitlement_id\x18\x02 \x01(\tR\rentitlementId\"C\n" + + "\fETagMetadata\x123\n" + + "\bmetadata\x18\x01 \x01(\v2\x17.google.protobuf.StructR\bmetadata\"2\n" + + "\tETagMatch\x12%\n" + + "\x0eentitlement_id\x18\x01 \x01(\tR\rentitlementIdB6Z4github.com/conductorone/baton-sdk/pb/c1/connector/v2b\x06proto3" + var file_c1_connector_v2_annotation_etag_proto_msgTypes = make([]protoimpl.MessageInfo, 3) var file_c1_connector_v2_annotation_etag_proto_goTypes = []any{ (*ETag)(nil), // 0: c1.connector.v2.ETag diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_etag_protoopaque.pb.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_etag_protoopaque.pb.go new file mode 100644 index 00000000..09c9512b --- /dev/null +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_etag_protoopaque.pb.go @@ -0,0 +1,273 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.10 +// protoc (unknown) +// source: c1/connector/v2/annotation_etag.proto + +//go:build protoopaque + +package v2 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + structpb "google.golang.org/protobuf/types/known/structpb" + reflect "reflect" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type ETag struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Value string `protobuf:"bytes,1,opt,name=value,proto3"` + xxx_hidden_EntitlementId string `protobuf:"bytes,2,opt,name=entitlement_id,json=entitlementId,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ETag) Reset() { + *x = ETag{} + mi := &file_c1_connector_v2_annotation_etag_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ETag) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ETag) ProtoMessage() {} + +func (x *ETag) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_annotation_etag_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *ETag) GetValue() string { + if x != nil { + return x.xxx_hidden_Value + } + return "" +} + +func (x *ETag) GetEntitlementId() string { + if x != nil { + return x.xxx_hidden_EntitlementId + } + return "" +} + +func (x *ETag) SetValue(v string) { + x.xxx_hidden_Value = v +} + +func (x *ETag) SetEntitlementId(v string) { + x.xxx_hidden_EntitlementId = v +} + +type ETag_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Value string + EntitlementId string +} + +func (b0 ETag_builder) Build() *ETag { + m0 := &ETag{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Value = b.Value + x.xxx_hidden_EntitlementId = b.EntitlementId + return m0 +} + +type ETagMetadata struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Metadata *structpb.Struct `protobuf:"bytes,1,opt,name=metadata,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ETagMetadata) Reset() { + *x = ETagMetadata{} + mi := &file_c1_connector_v2_annotation_etag_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ETagMetadata) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ETagMetadata) ProtoMessage() {} + +func (x *ETagMetadata) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_annotation_etag_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *ETagMetadata) GetMetadata() *structpb.Struct { + if x != nil { + return x.xxx_hidden_Metadata + } + return nil +} + +func (x *ETagMetadata) SetMetadata(v *structpb.Struct) { + x.xxx_hidden_Metadata = v +} + +func (x *ETagMetadata) HasMetadata() bool { + if x == nil { + return false + } + return x.xxx_hidden_Metadata != nil +} + +func (x *ETagMetadata) ClearMetadata() { + x.xxx_hidden_Metadata = nil +} + +type ETagMetadata_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Metadata *structpb.Struct +} + +func (b0 ETagMetadata_builder) Build() *ETagMetadata { + m0 := &ETagMetadata{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Metadata = b.Metadata + return m0 +} + +type ETagMatch struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_EntitlementId string `protobuf:"bytes,1,opt,name=entitlement_id,json=entitlementId,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ETagMatch) Reset() { + *x = ETagMatch{} + mi := &file_c1_connector_v2_annotation_etag_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ETagMatch) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ETagMatch) ProtoMessage() {} + +func (x *ETagMatch) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_annotation_etag_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *ETagMatch) GetEntitlementId() string { + if x != nil { + return x.xxx_hidden_EntitlementId + } + return "" +} + +func (x *ETagMatch) SetEntitlementId(v string) { + x.xxx_hidden_EntitlementId = v +} + +type ETagMatch_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + EntitlementId string +} + +func (b0 ETagMatch_builder) Build() *ETagMatch { + m0 := &ETagMatch{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_EntitlementId = b.EntitlementId + return m0 +} + +var File_c1_connector_v2_annotation_etag_proto protoreflect.FileDescriptor + +const file_c1_connector_v2_annotation_etag_proto_rawDesc = "" + + "\n" + + "%c1/connector/v2/annotation_etag.proto\x12\x0fc1.connector.v2\x1a\x1cgoogle/protobuf/struct.proto\"C\n" + + "\x04ETag\x12\x14\n" + + "\x05value\x18\x01 \x01(\tR\x05value\x12%\n" + + "\x0eentitlement_id\x18\x02 \x01(\tR\rentitlementId\"C\n" + + "\fETagMetadata\x123\n" + + "\bmetadata\x18\x01 \x01(\v2\x17.google.protobuf.StructR\bmetadata\"2\n" + + "\tETagMatch\x12%\n" + + "\x0eentitlement_id\x18\x01 \x01(\tR\rentitlementIdB6Z4github.com/conductorone/baton-sdk/pb/c1/connector/v2b\x06proto3" + +var file_c1_connector_v2_annotation_etag_proto_msgTypes = make([]protoimpl.MessageInfo, 3) +var file_c1_connector_v2_annotation_etag_proto_goTypes = []any{ + (*ETag)(nil), // 0: c1.connector.v2.ETag + (*ETagMetadata)(nil), // 1: c1.connector.v2.ETagMetadata + (*ETagMatch)(nil), // 2: c1.connector.v2.ETagMatch + (*structpb.Struct)(nil), // 3: google.protobuf.Struct +} +var file_c1_connector_v2_annotation_etag_proto_depIdxs = []int32{ + 3, // 0: c1.connector.v2.ETagMetadata.metadata:type_name -> google.protobuf.Struct + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_c1_connector_v2_annotation_etag_proto_init() } +func file_c1_connector_v2_annotation_etag_proto_init() { + if File_c1_connector_v2_annotation_etag_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_c1_connector_v2_annotation_etag_proto_rawDesc), len(file_c1_connector_v2_annotation_etag_proto_rawDesc)), + NumEnums: 0, + NumMessages: 3, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_c1_connector_v2_annotation_etag_proto_goTypes, + DependencyIndexes: file_c1_connector_v2_annotation_etag_proto_depIdxs, + MessageInfos: file_c1_connector_v2_annotation_etag_proto_msgTypes, + }.Build() + File_c1_connector_v2_annotation_etag_proto = out.File + file_c1_connector_v2_annotation_etag_proto_goTypes = nil + file_c1_connector_v2_annotation_etag_proto_depIdxs = nil +} diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_external_link.pb.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_external_link.pb.go index 65c95579..92a69bb8 100644 --- a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_external_link.pb.go +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_external_link.pb.go @@ -1,9 +1,11 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.4 +// protoc-gen-go v1.36.10 // protoc (unknown) // source: c1/connector/v2/annotation_external_link.proto +//go:build !protoopaque + package v2 import ( @@ -11,7 +13,6 @@ import ( protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" - sync "sync" unsafe "unsafe" ) @@ -23,7 +24,7 @@ const ( ) type ExternalLink struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Url string `protobuf:"bytes,1,opt,name=url,proto3" json:"url,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache @@ -54,11 +55,6 @@ func (x *ExternalLink) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ExternalLink.ProtoReflect.Descriptor instead. -func (*ExternalLink) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_annotation_external_link_proto_rawDescGZIP(), []int{0} -} - func (x *ExternalLink) GetUrl() string { if x != nil { return x.Url @@ -66,37 +62,32 @@ func (x *ExternalLink) GetUrl() string { return "" } -var File_c1_connector_v2_annotation_external_link_proto protoreflect.FileDescriptor +func (x *ExternalLink) SetUrl(v string) { + x.Url = v +} -var file_c1_connector_v2_annotation_external_link_proto_rawDesc = string([]byte{ - 0x0a, 0x2e, 0x63, 0x31, 0x2f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f, 0x76, - 0x32, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x74, - 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x6c, 0x69, 0x6e, 0x6b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x12, 0x0f, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, - 0x32, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, - 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x3c, 0x0a, 0x0c, 0x45, 0x78, - 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x4c, 0x69, 0x6e, 0x6b, 0x12, 0x2c, 0x0a, 0x03, 0x75, 0x72, - 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x1a, 0xfa, 0x42, 0x17, 0x72, 0x15, 0x20, 0x01, - 0x28, 0x80, 0x08, 0x3a, 0x08, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0xd0, 0x01, 0x01, - 0x88, 0x01, 0x01, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x42, 0x36, 0x5a, 0x34, 0x67, 0x69, 0x74, 0x68, - 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x64, 0x75, 0x63, 0x74, 0x6f, 0x72, - 0x6f, 0x6e, 0x65, 0x2f, 0x62, 0x61, 0x74, 0x6f, 0x6e, 0x2d, 0x73, 0x64, 0x6b, 0x2f, 0x70, 0x62, - 0x2f, 0x63, 0x31, 0x2f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f, 0x76, 0x32, - 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -}) - -var ( - file_c1_connector_v2_annotation_external_link_proto_rawDescOnce sync.Once - file_c1_connector_v2_annotation_external_link_proto_rawDescData []byte -) +type ExternalLink_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. -func file_c1_connector_v2_annotation_external_link_proto_rawDescGZIP() []byte { - file_c1_connector_v2_annotation_external_link_proto_rawDescOnce.Do(func() { - file_c1_connector_v2_annotation_external_link_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_c1_connector_v2_annotation_external_link_proto_rawDesc), len(file_c1_connector_v2_annotation_external_link_proto_rawDesc))) - }) - return file_c1_connector_v2_annotation_external_link_proto_rawDescData + Url string } +func (b0 ExternalLink_builder) Build() *ExternalLink { + m0 := &ExternalLink{} + b, x := &b0, m0 + _, _ = b, x + x.Url = b.Url + return m0 +} + +var File_c1_connector_v2_annotation_external_link_proto protoreflect.FileDescriptor + +const file_c1_connector_v2_annotation_external_link_proto_rawDesc = "" + + "\n" + + ".c1/connector/v2/annotation_external_link.proto\x12\x0fc1.connector.v2\x1a\x17validate/validate.proto\"<\n" + + "\fExternalLink\x12,\n" + + "\x03url\x18\x01 \x01(\tB\x1a\xfaB\x17r\x15 \x01(\x80\b:\bhttps://\xd0\x01\x01\x88\x01\x01R\x03urlB6Z4github.com/conductorone/baton-sdk/pb/c1/connector/v2b\x06proto3" + var file_c1_connector_v2_annotation_external_link_proto_msgTypes = make([]protoimpl.MessageInfo, 1) var file_c1_connector_v2_annotation_external_link_proto_goTypes = []any{ (*ExternalLink)(nil), // 0: c1.connector.v2.ExternalLink diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_external_link_protoopaque.pb.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_external_link_protoopaque.pb.go new file mode 100644 index 00000000..f04710cc --- /dev/null +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_external_link_protoopaque.pb.go @@ -0,0 +1,125 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.10 +// protoc (unknown) +// source: c1/connector/v2/annotation_external_link.proto + +//go:build protoopaque + +package v2 + +import ( + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type ExternalLink struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Url string `protobuf:"bytes,1,opt,name=url,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ExternalLink) Reset() { + *x = ExternalLink{} + mi := &file_c1_connector_v2_annotation_external_link_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ExternalLink) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExternalLink) ProtoMessage() {} + +func (x *ExternalLink) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_annotation_external_link_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *ExternalLink) GetUrl() string { + if x != nil { + return x.xxx_hidden_Url + } + return "" +} + +func (x *ExternalLink) SetUrl(v string) { + x.xxx_hidden_Url = v +} + +type ExternalLink_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Url string +} + +func (b0 ExternalLink_builder) Build() *ExternalLink { + m0 := &ExternalLink{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Url = b.Url + return m0 +} + +var File_c1_connector_v2_annotation_external_link_proto protoreflect.FileDescriptor + +const file_c1_connector_v2_annotation_external_link_proto_rawDesc = "" + + "\n" + + ".c1/connector/v2/annotation_external_link.proto\x12\x0fc1.connector.v2\x1a\x17validate/validate.proto\"<\n" + + "\fExternalLink\x12,\n" + + "\x03url\x18\x01 \x01(\tB\x1a\xfaB\x17r\x15 \x01(\x80\b:\bhttps://\xd0\x01\x01\x88\x01\x01R\x03urlB6Z4github.com/conductorone/baton-sdk/pb/c1/connector/v2b\x06proto3" + +var file_c1_connector_v2_annotation_external_link_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_c1_connector_v2_annotation_external_link_proto_goTypes = []any{ + (*ExternalLink)(nil), // 0: c1.connector.v2.ExternalLink +} +var file_c1_connector_v2_annotation_external_link_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_c1_connector_v2_annotation_external_link_proto_init() } +func file_c1_connector_v2_annotation_external_link_proto_init() { + if File_c1_connector_v2_annotation_external_link_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_c1_connector_v2_annotation_external_link_proto_rawDesc), len(file_c1_connector_v2_annotation_external_link_proto_rawDesc)), + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_c1_connector_v2_annotation_external_link_proto_goTypes, + DependencyIndexes: file_c1_connector_v2_annotation_external_link_proto_depIdxs, + MessageInfos: file_c1_connector_v2_annotation_external_link_proto_msgTypes, + }.Build() + File_c1_connector_v2_annotation_external_link_proto = out.File + file_c1_connector_v2_annotation_external_link_proto_goTypes = nil + file_c1_connector_v2_annotation_external_link_proto_depIdxs = nil +} diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_external_ticket.pb.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_external_ticket.pb.go index 2803d103..61f83574 100644 --- a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_external_ticket.pb.go +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_external_ticket.pb.go @@ -1,16 +1,17 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.4 +// protoc-gen-go v1.36.10 // protoc (unknown) // source: c1/connector/v2/annotation_external_ticket.proto +//go:build !protoopaque + package v2 import ( protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" - sync "sync" unsafe "unsafe" ) @@ -22,7 +23,7 @@ const ( ) type ExternalTicketSettings struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache @@ -53,11 +54,6 @@ func (x *ExternalTicketSettings) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ExternalTicketSettings.ProtoReflect.Descriptor instead. -func (*ExternalTicketSettings) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_annotation_external_ticket_proto_rawDescGZIP(), []int{0} -} - func (x *ExternalTicketSettings) GetEnabled() bool { if x != nil { return x.Enabled @@ -65,8 +61,26 @@ func (x *ExternalTicketSettings) GetEnabled() bool { return false } +func (x *ExternalTicketSettings) SetEnabled(v bool) { + x.Enabled = v +} + +type ExternalTicketSettings_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Enabled bool +} + +func (b0 ExternalTicketSettings_builder) Build() *ExternalTicketSettings { + m0 := &ExternalTicketSettings{} + b, x := &b0, m0 + _, _ = b, x + x.Enabled = b.Enabled + return m0 +} + type ExternalTicketRef struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` ExternalTicketProvisionerConfigId string `protobuf:"bytes,2,opt,name=external_ticket_provisioner_config_id,json=externalTicketProvisionerConfigId,proto3" json:"external_ticket_provisioner_config_id,omitempty"` unknownFields protoimpl.UnknownFields @@ -98,11 +112,6 @@ func (x *ExternalTicketRef) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ExternalTicketRef.ProtoReflect.Descriptor instead. -func (*ExternalTicketRef) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_annotation_external_ticket_proto_rawDescGZIP(), []int{1} -} - func (x *ExternalTicketRef) GetId() string { if x != nil { return x.Id @@ -117,42 +126,41 @@ func (x *ExternalTicketRef) GetExternalTicketProvisionerConfigId() string { return "" } -var File_c1_connector_v2_annotation_external_ticket_proto protoreflect.FileDescriptor +func (x *ExternalTicketRef) SetId(v string) { + x.Id = v +} -var file_c1_connector_v2_annotation_external_ticket_proto_rawDesc = string([]byte{ - 0x0a, 0x30, 0x63, 0x31, 0x2f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f, 0x76, - 0x32, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x74, - 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x12, 0x0f, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, - 0x2e, 0x76, 0x32, 0x22, 0x32, 0x0a, 0x16, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x54, - 0x69, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x18, 0x0a, - 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, - 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x22, 0x75, 0x0a, 0x11, 0x45, 0x78, 0x74, 0x65, 0x72, - 0x6e, 0x61, 0x6c, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x66, 0x12, 0x0e, 0x0a, 0x02, - 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x50, 0x0a, 0x25, - 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x5f, - 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x21, 0x65, 0x78, 0x74, - 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, - 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x49, 0x64, 0x42, 0x36, - 0x5a, 0x34, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, - 0x64, 0x75, 0x63, 0x74, 0x6f, 0x72, 0x6f, 0x6e, 0x65, 0x2f, 0x62, 0x61, 0x74, 0x6f, 0x6e, 0x2d, - 0x73, 0x64, 0x6b, 0x2f, 0x70, 0x62, 0x2f, 0x63, 0x31, 0x2f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, - 0x74, 0x6f, 0x72, 0x2f, 0x76, 0x32, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -}) - -var ( - file_c1_connector_v2_annotation_external_ticket_proto_rawDescOnce sync.Once - file_c1_connector_v2_annotation_external_ticket_proto_rawDescData []byte -) +func (x *ExternalTicketRef) SetExternalTicketProvisionerConfigId(v string) { + x.ExternalTicketProvisionerConfigId = v +} + +type ExternalTicketRef_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. -func file_c1_connector_v2_annotation_external_ticket_proto_rawDescGZIP() []byte { - file_c1_connector_v2_annotation_external_ticket_proto_rawDescOnce.Do(func() { - file_c1_connector_v2_annotation_external_ticket_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_c1_connector_v2_annotation_external_ticket_proto_rawDesc), len(file_c1_connector_v2_annotation_external_ticket_proto_rawDesc))) - }) - return file_c1_connector_v2_annotation_external_ticket_proto_rawDescData + Id string + ExternalTicketProvisionerConfigId string } +func (b0 ExternalTicketRef_builder) Build() *ExternalTicketRef { + m0 := &ExternalTicketRef{} + b, x := &b0, m0 + _, _ = b, x + x.Id = b.Id + x.ExternalTicketProvisionerConfigId = b.ExternalTicketProvisionerConfigId + return m0 +} + +var File_c1_connector_v2_annotation_external_ticket_proto protoreflect.FileDescriptor + +const file_c1_connector_v2_annotation_external_ticket_proto_rawDesc = "" + + "\n" + + "0c1/connector/v2/annotation_external_ticket.proto\x12\x0fc1.connector.v2\"2\n" + + "\x16ExternalTicketSettings\x12\x18\n" + + "\aenabled\x18\x01 \x01(\bR\aenabled\"u\n" + + "\x11ExternalTicketRef\x12\x0e\n" + + "\x02id\x18\x01 \x01(\tR\x02id\x12P\n" + + "%external_ticket_provisioner_config_id\x18\x02 \x01(\tR!externalTicketProvisionerConfigIdB6Z4github.com/conductorone/baton-sdk/pb/c1/connector/v2b\x06proto3" + var file_c1_connector_v2_annotation_external_ticket_proto_msgTypes = make([]protoimpl.MessageInfo, 2) var file_c1_connector_v2_annotation_external_ticket_proto_goTypes = []any{ (*ExternalTicketSettings)(nil), // 0: c1.connector.v2.ExternalTicketSettings diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_external_ticket_protoopaque.pb.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_external_ticket_protoopaque.pb.go new file mode 100644 index 00000000..9a7fce75 --- /dev/null +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_external_ticket_protoopaque.pb.go @@ -0,0 +1,199 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.10 +// protoc (unknown) +// source: c1/connector/v2/annotation_external_ticket.proto + +//go:build protoopaque + +package v2 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type ExternalTicketSettings struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ExternalTicketSettings) Reset() { + *x = ExternalTicketSettings{} + mi := &file_c1_connector_v2_annotation_external_ticket_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ExternalTicketSettings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExternalTicketSettings) ProtoMessage() {} + +func (x *ExternalTicketSettings) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_annotation_external_ticket_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *ExternalTicketSettings) GetEnabled() bool { + if x != nil { + return x.xxx_hidden_Enabled + } + return false +} + +func (x *ExternalTicketSettings) SetEnabled(v bool) { + x.xxx_hidden_Enabled = v +} + +type ExternalTicketSettings_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Enabled bool +} + +func (b0 ExternalTicketSettings_builder) Build() *ExternalTicketSettings { + m0 := &ExternalTicketSettings{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Enabled = b.Enabled + return m0 +} + +type ExternalTicketRef struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Id string `protobuf:"bytes,1,opt,name=id,proto3"` + xxx_hidden_ExternalTicketProvisionerConfigId string `protobuf:"bytes,2,opt,name=external_ticket_provisioner_config_id,json=externalTicketProvisionerConfigId,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ExternalTicketRef) Reset() { + *x = ExternalTicketRef{} + mi := &file_c1_connector_v2_annotation_external_ticket_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ExternalTicketRef) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExternalTicketRef) ProtoMessage() {} + +func (x *ExternalTicketRef) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_annotation_external_ticket_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *ExternalTicketRef) GetId() string { + if x != nil { + return x.xxx_hidden_Id + } + return "" +} + +func (x *ExternalTicketRef) GetExternalTicketProvisionerConfigId() string { + if x != nil { + return x.xxx_hidden_ExternalTicketProvisionerConfigId + } + return "" +} + +func (x *ExternalTicketRef) SetId(v string) { + x.xxx_hidden_Id = v +} + +func (x *ExternalTicketRef) SetExternalTicketProvisionerConfigId(v string) { + x.xxx_hidden_ExternalTicketProvisionerConfigId = v +} + +type ExternalTicketRef_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Id string + ExternalTicketProvisionerConfigId string +} + +func (b0 ExternalTicketRef_builder) Build() *ExternalTicketRef { + m0 := &ExternalTicketRef{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Id = b.Id + x.xxx_hidden_ExternalTicketProvisionerConfigId = b.ExternalTicketProvisionerConfigId + return m0 +} + +var File_c1_connector_v2_annotation_external_ticket_proto protoreflect.FileDescriptor + +const file_c1_connector_v2_annotation_external_ticket_proto_rawDesc = "" + + "\n" + + "0c1/connector/v2/annotation_external_ticket.proto\x12\x0fc1.connector.v2\"2\n" + + "\x16ExternalTicketSettings\x12\x18\n" + + "\aenabled\x18\x01 \x01(\bR\aenabled\"u\n" + + "\x11ExternalTicketRef\x12\x0e\n" + + "\x02id\x18\x01 \x01(\tR\x02id\x12P\n" + + "%external_ticket_provisioner_config_id\x18\x02 \x01(\tR!externalTicketProvisionerConfigIdB6Z4github.com/conductorone/baton-sdk/pb/c1/connector/v2b\x06proto3" + +var file_c1_connector_v2_annotation_external_ticket_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_c1_connector_v2_annotation_external_ticket_proto_goTypes = []any{ + (*ExternalTicketSettings)(nil), // 0: c1.connector.v2.ExternalTicketSettings + (*ExternalTicketRef)(nil), // 1: c1.connector.v2.ExternalTicketRef +} +var file_c1_connector_v2_annotation_external_ticket_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_c1_connector_v2_annotation_external_ticket_proto_init() } +func file_c1_connector_v2_annotation_external_ticket_proto_init() { + if File_c1_connector_v2_annotation_external_ticket_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_c1_connector_v2_annotation_external_ticket_proto_rawDesc), len(file_c1_connector_v2_annotation_external_ticket_proto_rawDesc)), + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_c1_connector_v2_annotation_external_ticket_proto_goTypes, + DependencyIndexes: file_c1_connector_v2_annotation_external_ticket_proto_depIdxs, + MessageInfos: file_c1_connector_v2_annotation_external_ticket_proto_msgTypes, + }.Build() + File_c1_connector_v2_annotation_external_ticket_proto = out.File + file_c1_connector_v2_annotation_external_ticket_proto_goTypes = nil + file_c1_connector_v2_annotation_external_ticket_proto_depIdxs = nil +} diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_grant.pb.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_grant.pb.go index b2a5e03e..65fa98c4 100644 --- a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_grant.pb.go +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_grant.pb.go @@ -1,9 +1,11 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.4 +// protoc-gen-go v1.36.10 // protoc (unknown) // source: c1/connector/v2/annotation_grant.proto +//go:build !protoopaque + package v2 import ( @@ -11,7 +13,6 @@ import ( protoimpl "google.golang.org/protobuf/runtime/protoimpl" structpb "google.golang.org/protobuf/types/known/structpb" reflect "reflect" - sync "sync" unsafe "unsafe" ) @@ -23,7 +24,7 @@ const ( ) type GrantMetadata struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Metadata *structpb.Struct `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache @@ -54,11 +55,6 @@ func (x *GrantMetadata) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GrantMetadata.ProtoReflect.Descriptor instead. -func (*GrantMetadata) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_annotation_grant_proto_rawDescGZIP(), []int{0} -} - func (x *GrantMetadata) GetMetadata() *structpb.Struct { if x != nil { return x.Metadata @@ -66,8 +62,37 @@ func (x *GrantMetadata) GetMetadata() *structpb.Struct { return nil } +func (x *GrantMetadata) SetMetadata(v *structpb.Struct) { + x.Metadata = v +} + +func (x *GrantMetadata) HasMetadata() bool { + if x == nil { + return false + } + return x.Metadata != nil +} + +func (x *GrantMetadata) ClearMetadata() { + x.Metadata = nil +} + +type GrantMetadata_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Metadata *structpb.Struct +} + +func (b0 GrantMetadata_builder) Build() *GrantMetadata { + m0 := &GrantMetadata{} + b, x := &b0, m0 + _, _ = b, x + x.Metadata = b.Metadata + return m0 +} + type GrantExpandable struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` EntitlementIds []string `protobuf:"bytes,1,rep,name=entitlement_ids,json=entitlementIds,proto3" json:"entitlement_ids,omitempty"` Shallow bool `protobuf:"varint,2,opt,name=shallow,proto3" json:"shallow,omitempty"` ResourceTypeIds []string `protobuf:"bytes,3,rep,name=resource_type_ids,json=resourceTypeIds,proto3" json:"resource_type_ids,omitempty"` @@ -100,11 +125,6 @@ func (x *GrantExpandable) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GrantExpandable.ProtoReflect.Descriptor instead. -func (*GrantExpandable) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_annotation_grant_proto_rawDescGZIP(), []int{1} -} - func (x *GrantExpandable) GetEntitlementIds() []string { if x != nil { return x.EntitlementIds @@ -126,9 +146,39 @@ func (x *GrantExpandable) GetResourceTypeIds() []string { return nil } +func (x *GrantExpandable) SetEntitlementIds(v []string) { + x.EntitlementIds = v +} + +func (x *GrantExpandable) SetShallow(v bool) { + x.Shallow = v +} + +func (x *GrantExpandable) SetResourceTypeIds(v []string) { + x.ResourceTypeIds = v +} + +type GrantExpandable_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + EntitlementIds []string + Shallow bool + ResourceTypeIds []string +} + +func (b0 GrantExpandable_builder) Build() *GrantExpandable { + m0 := &GrantExpandable{} + b, x := &b0, m0 + _, _ = b, x + x.EntitlementIds = b.EntitlementIds + x.Shallow = b.Shallow + x.ResourceTypeIds = b.ResourceTypeIds + return m0 +} + // Grant cannot be updated or revoked. For example, membership in an "all users" group. type GrantImmutable struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` SourceId string `protobuf:"bytes,1,opt,name=source_id,json=sourceId,proto3" json:"source_id,omitempty"` Metadata *structpb.Struct `protobuf:"bytes,2,opt,name=metadata,proto3" json:"metadata,omitempty"` unknownFields protoimpl.UnknownFields @@ -160,11 +210,6 @@ func (x *GrantImmutable) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GrantImmutable.ProtoReflect.Descriptor instead. -func (*GrantImmutable) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_annotation_grant_proto_rawDescGZIP(), []int{2} -} - func (x *GrantImmutable) GetSourceId() string { if x != nil { return x.SourceId @@ -179,9 +224,44 @@ func (x *GrantImmutable) GetMetadata() *structpb.Struct { return nil } +func (x *GrantImmutable) SetSourceId(v string) { + x.SourceId = v +} + +func (x *GrantImmutable) SetMetadata(v *structpb.Struct) { + x.Metadata = v +} + +func (x *GrantImmutable) HasMetadata() bool { + if x == nil { + return false + } + return x.Metadata != nil +} + +func (x *GrantImmutable) ClearMetadata() { + x.Metadata = nil +} + +type GrantImmutable_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + SourceId string + Metadata *structpb.Struct +} + +func (b0 GrantImmutable_builder) Build() *GrantImmutable { + m0 := &GrantImmutable{} + b, x := &b0, m0 + _, _ = b, x + x.SourceId = b.SourceId + x.Metadata = b.Metadata + return m0 +} + // Grant was not created because the entitlement already existed. type GrantAlreadyExists struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } @@ -211,14 +291,21 @@ func (x *GrantAlreadyExists) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GrantAlreadyExists.ProtoReflect.Descriptor instead. -func (*GrantAlreadyExists) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_annotation_grant_proto_rawDescGZIP(), []int{3} +type GrantAlreadyExists_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + +} + +func (b0 GrantAlreadyExists_builder) Build() *GrantAlreadyExists { + m0 := &GrantAlreadyExists{} + b, x := &b0, m0 + _, _ = b, x + return m0 } // Grant was not revoked because the entitlement does not exist. type GrantAlreadyRevoked struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } @@ -248,72 +335,93 @@ func (x *GrantAlreadyRevoked) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GrantAlreadyRevoked.ProtoReflect.Descriptor instead. -func (*GrantAlreadyRevoked) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_annotation_grant_proto_rawDescGZIP(), []int{4} +type GrantAlreadyRevoked_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + } -var File_c1_connector_v2_annotation_grant_proto protoreflect.FileDescriptor +func (b0 GrantAlreadyRevoked_builder) Build() *GrantAlreadyRevoked { + m0 := &GrantAlreadyRevoked{} + b, x := &b0, m0 + _, _ = b, x + return m0 +} -var file_c1_connector_v2_annotation_grant_proto_rawDesc = string([]byte{ - 0x0a, 0x26, 0x63, 0x31, 0x2f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f, 0x76, - 0x32, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x67, 0x72, 0x61, - 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, - 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, - 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x44, 0x0a, 0x0d, 0x47, 0x72, 0x61, 0x6e, 0x74, - 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x33, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, - 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, - 0x75, 0x63, 0x74, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0x80, 0x01, - 0x0a, 0x0f, 0x47, 0x72, 0x61, 0x6e, 0x74, 0x45, 0x78, 0x70, 0x61, 0x6e, 0x64, 0x61, 0x62, 0x6c, - 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, - 0x5f, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0e, 0x65, 0x6e, 0x74, 0x69, - 0x74, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x68, - 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x73, 0x68, 0x61, - 0x6c, 0x6c, 0x6f, 0x77, 0x12, 0x2a, 0x0a, 0x11, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x5f, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, - 0x0f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x49, 0x64, 0x73, - 0x22, 0x62, 0x0a, 0x0e, 0x47, 0x72, 0x61, 0x6e, 0x74, 0x49, 0x6d, 0x6d, 0x75, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x64, 0x12, - 0x33, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, - 0x64, 0x61, 0x74, 0x61, 0x22, 0x14, 0x0a, 0x12, 0x47, 0x72, 0x61, 0x6e, 0x74, 0x41, 0x6c, 0x72, - 0x65, 0x61, 0x64, 0x79, 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, 0x22, 0x15, 0x0a, 0x13, 0x47, 0x72, - 0x61, 0x6e, 0x74, 0x41, 0x6c, 0x72, 0x65, 0x61, 0x64, 0x79, 0x52, 0x65, 0x76, 0x6f, 0x6b, 0x65, - 0x64, 0x42, 0x36, 0x5a, 0x34, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, - 0x63, 0x6f, 0x6e, 0x64, 0x75, 0x63, 0x74, 0x6f, 0x72, 0x6f, 0x6e, 0x65, 0x2f, 0x62, 0x61, 0x74, - 0x6f, 0x6e, 0x2d, 0x73, 0x64, 0x6b, 0x2f, 0x70, 0x62, 0x2f, 0x63, 0x31, 0x2f, 0x63, 0x6f, 0x6e, - 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f, 0x76, 0x32, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x33, -}) - -var ( - file_c1_connector_v2_annotation_grant_proto_rawDescOnce sync.Once - file_c1_connector_v2_annotation_grant_proto_rawDescData []byte -) +// If a resource for a grant doesn't exist, insert it. +type InsertResourceGrants struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *InsertResourceGrants) Reset() { + *x = InsertResourceGrants{} + mi := &file_c1_connector_v2_annotation_grant_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} -func file_c1_connector_v2_annotation_grant_proto_rawDescGZIP() []byte { - file_c1_connector_v2_annotation_grant_proto_rawDescOnce.Do(func() { - file_c1_connector_v2_annotation_grant_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_c1_connector_v2_annotation_grant_proto_rawDesc), len(file_c1_connector_v2_annotation_grant_proto_rawDesc))) - }) - return file_c1_connector_v2_annotation_grant_proto_rawDescData +func (x *InsertResourceGrants) String() string { + return protoimpl.X.MessageStringOf(x) } -var file_c1_connector_v2_annotation_grant_proto_msgTypes = make([]protoimpl.MessageInfo, 5) +func (*InsertResourceGrants) ProtoMessage() {} + +func (x *InsertResourceGrants) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_annotation_grant_proto_msgTypes[5] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +type InsertResourceGrants_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + +} + +func (b0 InsertResourceGrants_builder) Build() *InsertResourceGrants { + m0 := &InsertResourceGrants{} + b, x := &b0, m0 + _, _ = b, x + return m0 +} + +var File_c1_connector_v2_annotation_grant_proto protoreflect.FileDescriptor + +const file_c1_connector_v2_annotation_grant_proto_rawDesc = "" + + "\n" + + "&c1/connector/v2/annotation_grant.proto\x12\x0fc1.connector.v2\x1a\x1cgoogle/protobuf/struct.proto\"D\n" + + "\rGrantMetadata\x123\n" + + "\bmetadata\x18\x01 \x01(\v2\x17.google.protobuf.StructR\bmetadata\"\x80\x01\n" + + "\x0fGrantExpandable\x12'\n" + + "\x0fentitlement_ids\x18\x01 \x03(\tR\x0eentitlementIds\x12\x18\n" + + "\ashallow\x18\x02 \x01(\bR\ashallow\x12*\n" + + "\x11resource_type_ids\x18\x03 \x03(\tR\x0fresourceTypeIds\"b\n" + + "\x0eGrantImmutable\x12\x1b\n" + + "\tsource_id\x18\x01 \x01(\tR\bsourceId\x123\n" + + "\bmetadata\x18\x02 \x01(\v2\x17.google.protobuf.StructR\bmetadata\"\x14\n" + + "\x12GrantAlreadyExists\"\x15\n" + + "\x13GrantAlreadyRevoked\"\x16\n" + + "\x14InsertResourceGrantsB6Z4github.com/conductorone/baton-sdk/pb/c1/connector/v2b\x06proto3" + +var file_c1_connector_v2_annotation_grant_proto_msgTypes = make([]protoimpl.MessageInfo, 6) var file_c1_connector_v2_annotation_grant_proto_goTypes = []any{ - (*GrantMetadata)(nil), // 0: c1.connector.v2.GrantMetadata - (*GrantExpandable)(nil), // 1: c1.connector.v2.GrantExpandable - (*GrantImmutable)(nil), // 2: c1.connector.v2.GrantImmutable - (*GrantAlreadyExists)(nil), // 3: c1.connector.v2.GrantAlreadyExists - (*GrantAlreadyRevoked)(nil), // 4: c1.connector.v2.GrantAlreadyRevoked - (*structpb.Struct)(nil), // 5: google.protobuf.Struct + (*GrantMetadata)(nil), // 0: c1.connector.v2.GrantMetadata + (*GrantExpandable)(nil), // 1: c1.connector.v2.GrantExpandable + (*GrantImmutable)(nil), // 2: c1.connector.v2.GrantImmutable + (*GrantAlreadyExists)(nil), // 3: c1.connector.v2.GrantAlreadyExists + (*GrantAlreadyRevoked)(nil), // 4: c1.connector.v2.GrantAlreadyRevoked + (*InsertResourceGrants)(nil), // 5: c1.connector.v2.InsertResourceGrants + (*structpb.Struct)(nil), // 6: google.protobuf.Struct } var file_c1_connector_v2_annotation_grant_proto_depIdxs = []int32{ - 5, // 0: c1.connector.v2.GrantMetadata.metadata:type_name -> google.protobuf.Struct - 5, // 1: c1.connector.v2.GrantImmutable.metadata:type_name -> google.protobuf.Struct + 6, // 0: c1.connector.v2.GrantMetadata.metadata:type_name -> google.protobuf.Struct + 6, // 1: c1.connector.v2.GrantImmutable.metadata:type_name -> google.protobuf.Struct 2, // [2:2] is the sub-list for method output_type 2, // [2:2] is the sub-list for method input_type 2, // [2:2] is the sub-list for extension type_name @@ -332,7 +440,7 @@ func file_c1_connector_v2_annotation_grant_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: unsafe.Slice(unsafe.StringData(file_c1_connector_v2_annotation_grant_proto_rawDesc), len(file_c1_connector_v2_annotation_grant_proto_rawDesc)), NumEnums: 0, - NumMessages: 5, + NumMessages: 6, NumExtensions: 0, NumServices: 0, }, diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_grant.pb.validate.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_grant.pb.validate.go index 33017b39..399f62ae 100644 --- a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_grant.pb.validate.go +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_grant.pb.validate.go @@ -600,3 +600,105 @@ var _ interface { Cause() error ErrorName() string } = GrantAlreadyRevokedValidationError{} + +// Validate checks the field values on InsertResourceGrants with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *InsertResourceGrants) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on InsertResourceGrants with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// InsertResourceGrantsMultiError, or nil if none found. +func (m *InsertResourceGrants) ValidateAll() error { + return m.validate(true) +} + +func (m *InsertResourceGrants) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return InsertResourceGrantsMultiError(errors) + } + + return nil +} + +// InsertResourceGrantsMultiError is an error wrapping multiple validation +// errors returned by InsertResourceGrants.ValidateAll() if the designated +// constraints aren't met. +type InsertResourceGrantsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m InsertResourceGrantsMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m InsertResourceGrantsMultiError) AllErrors() []error { return m } + +// InsertResourceGrantsValidationError is the validation error returned by +// InsertResourceGrants.Validate if the designated constraints aren't met. +type InsertResourceGrantsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e InsertResourceGrantsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e InsertResourceGrantsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e InsertResourceGrantsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e InsertResourceGrantsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e InsertResourceGrantsValidationError) ErrorName() string { + return "InsertResourceGrantsValidationError" +} + +// Error satisfies the builtin error interface +func (e InsertResourceGrantsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sInsertResourceGrants.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = InsertResourceGrantsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = InsertResourceGrantsValidationError{} diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_grant_protoopaque.pb.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_grant_protoopaque.pb.go new file mode 100644 index 00000000..71bd67e3 --- /dev/null +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_grant_protoopaque.pb.go @@ -0,0 +1,454 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.10 +// protoc (unknown) +// source: c1/connector/v2/annotation_grant.proto + +//go:build protoopaque + +package v2 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + structpb "google.golang.org/protobuf/types/known/structpb" + reflect "reflect" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type GrantMetadata struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Metadata *structpb.Struct `protobuf:"bytes,1,opt,name=metadata,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GrantMetadata) Reset() { + *x = GrantMetadata{} + mi := &file_c1_connector_v2_annotation_grant_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GrantMetadata) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GrantMetadata) ProtoMessage() {} + +func (x *GrantMetadata) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_annotation_grant_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *GrantMetadata) GetMetadata() *structpb.Struct { + if x != nil { + return x.xxx_hidden_Metadata + } + return nil +} + +func (x *GrantMetadata) SetMetadata(v *structpb.Struct) { + x.xxx_hidden_Metadata = v +} + +func (x *GrantMetadata) HasMetadata() bool { + if x == nil { + return false + } + return x.xxx_hidden_Metadata != nil +} + +func (x *GrantMetadata) ClearMetadata() { + x.xxx_hidden_Metadata = nil +} + +type GrantMetadata_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Metadata *structpb.Struct +} + +func (b0 GrantMetadata_builder) Build() *GrantMetadata { + m0 := &GrantMetadata{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Metadata = b.Metadata + return m0 +} + +type GrantExpandable struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_EntitlementIds []string `protobuf:"bytes,1,rep,name=entitlement_ids,json=entitlementIds,proto3"` + xxx_hidden_Shallow bool `protobuf:"varint,2,opt,name=shallow,proto3"` + xxx_hidden_ResourceTypeIds []string `protobuf:"bytes,3,rep,name=resource_type_ids,json=resourceTypeIds,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GrantExpandable) Reset() { + *x = GrantExpandable{} + mi := &file_c1_connector_v2_annotation_grant_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GrantExpandable) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GrantExpandable) ProtoMessage() {} + +func (x *GrantExpandable) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_annotation_grant_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *GrantExpandable) GetEntitlementIds() []string { + if x != nil { + return x.xxx_hidden_EntitlementIds + } + return nil +} + +func (x *GrantExpandable) GetShallow() bool { + if x != nil { + return x.xxx_hidden_Shallow + } + return false +} + +func (x *GrantExpandable) GetResourceTypeIds() []string { + if x != nil { + return x.xxx_hidden_ResourceTypeIds + } + return nil +} + +func (x *GrantExpandable) SetEntitlementIds(v []string) { + x.xxx_hidden_EntitlementIds = v +} + +func (x *GrantExpandable) SetShallow(v bool) { + x.xxx_hidden_Shallow = v +} + +func (x *GrantExpandable) SetResourceTypeIds(v []string) { + x.xxx_hidden_ResourceTypeIds = v +} + +type GrantExpandable_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + EntitlementIds []string + Shallow bool + ResourceTypeIds []string +} + +func (b0 GrantExpandable_builder) Build() *GrantExpandable { + m0 := &GrantExpandable{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_EntitlementIds = b.EntitlementIds + x.xxx_hidden_Shallow = b.Shallow + x.xxx_hidden_ResourceTypeIds = b.ResourceTypeIds + return m0 +} + +// Grant cannot be updated or revoked. For example, membership in an "all users" group. +type GrantImmutable struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_SourceId string `protobuf:"bytes,1,opt,name=source_id,json=sourceId,proto3"` + xxx_hidden_Metadata *structpb.Struct `protobuf:"bytes,2,opt,name=metadata,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GrantImmutable) Reset() { + *x = GrantImmutable{} + mi := &file_c1_connector_v2_annotation_grant_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GrantImmutable) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GrantImmutable) ProtoMessage() {} + +func (x *GrantImmutable) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_annotation_grant_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *GrantImmutable) GetSourceId() string { + if x != nil { + return x.xxx_hidden_SourceId + } + return "" +} + +func (x *GrantImmutable) GetMetadata() *structpb.Struct { + if x != nil { + return x.xxx_hidden_Metadata + } + return nil +} + +func (x *GrantImmutable) SetSourceId(v string) { + x.xxx_hidden_SourceId = v +} + +func (x *GrantImmutable) SetMetadata(v *structpb.Struct) { + x.xxx_hidden_Metadata = v +} + +func (x *GrantImmutable) HasMetadata() bool { + if x == nil { + return false + } + return x.xxx_hidden_Metadata != nil +} + +func (x *GrantImmutable) ClearMetadata() { + x.xxx_hidden_Metadata = nil +} + +type GrantImmutable_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + SourceId string + Metadata *structpb.Struct +} + +func (b0 GrantImmutable_builder) Build() *GrantImmutable { + m0 := &GrantImmutable{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_SourceId = b.SourceId + x.xxx_hidden_Metadata = b.Metadata + return m0 +} + +// Grant was not created because the entitlement already existed. +type GrantAlreadyExists struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GrantAlreadyExists) Reset() { + *x = GrantAlreadyExists{} + mi := &file_c1_connector_v2_annotation_grant_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GrantAlreadyExists) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GrantAlreadyExists) ProtoMessage() {} + +func (x *GrantAlreadyExists) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_annotation_grant_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +type GrantAlreadyExists_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + +} + +func (b0 GrantAlreadyExists_builder) Build() *GrantAlreadyExists { + m0 := &GrantAlreadyExists{} + b, x := &b0, m0 + _, _ = b, x + return m0 +} + +// Grant was not revoked because the entitlement does not exist. +type GrantAlreadyRevoked struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GrantAlreadyRevoked) Reset() { + *x = GrantAlreadyRevoked{} + mi := &file_c1_connector_v2_annotation_grant_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GrantAlreadyRevoked) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GrantAlreadyRevoked) ProtoMessage() {} + +func (x *GrantAlreadyRevoked) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_annotation_grant_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +type GrantAlreadyRevoked_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + +} + +func (b0 GrantAlreadyRevoked_builder) Build() *GrantAlreadyRevoked { + m0 := &GrantAlreadyRevoked{} + b, x := &b0, m0 + _, _ = b, x + return m0 +} + +// If a resource for a grant doesn't exist, insert it. +type InsertResourceGrants struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *InsertResourceGrants) Reset() { + *x = InsertResourceGrants{} + mi := &file_c1_connector_v2_annotation_grant_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *InsertResourceGrants) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*InsertResourceGrants) ProtoMessage() {} + +func (x *InsertResourceGrants) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_annotation_grant_proto_msgTypes[5] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +type InsertResourceGrants_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + +} + +func (b0 InsertResourceGrants_builder) Build() *InsertResourceGrants { + m0 := &InsertResourceGrants{} + b, x := &b0, m0 + _, _ = b, x + return m0 +} + +var File_c1_connector_v2_annotation_grant_proto protoreflect.FileDescriptor + +const file_c1_connector_v2_annotation_grant_proto_rawDesc = "" + + "\n" + + "&c1/connector/v2/annotation_grant.proto\x12\x0fc1.connector.v2\x1a\x1cgoogle/protobuf/struct.proto\"D\n" + + "\rGrantMetadata\x123\n" + + "\bmetadata\x18\x01 \x01(\v2\x17.google.protobuf.StructR\bmetadata\"\x80\x01\n" + + "\x0fGrantExpandable\x12'\n" + + "\x0fentitlement_ids\x18\x01 \x03(\tR\x0eentitlementIds\x12\x18\n" + + "\ashallow\x18\x02 \x01(\bR\ashallow\x12*\n" + + "\x11resource_type_ids\x18\x03 \x03(\tR\x0fresourceTypeIds\"b\n" + + "\x0eGrantImmutable\x12\x1b\n" + + "\tsource_id\x18\x01 \x01(\tR\bsourceId\x123\n" + + "\bmetadata\x18\x02 \x01(\v2\x17.google.protobuf.StructR\bmetadata\"\x14\n" + + "\x12GrantAlreadyExists\"\x15\n" + + "\x13GrantAlreadyRevoked\"\x16\n" + + "\x14InsertResourceGrantsB6Z4github.com/conductorone/baton-sdk/pb/c1/connector/v2b\x06proto3" + +var file_c1_connector_v2_annotation_grant_proto_msgTypes = make([]protoimpl.MessageInfo, 6) +var file_c1_connector_v2_annotation_grant_proto_goTypes = []any{ + (*GrantMetadata)(nil), // 0: c1.connector.v2.GrantMetadata + (*GrantExpandable)(nil), // 1: c1.connector.v2.GrantExpandable + (*GrantImmutable)(nil), // 2: c1.connector.v2.GrantImmutable + (*GrantAlreadyExists)(nil), // 3: c1.connector.v2.GrantAlreadyExists + (*GrantAlreadyRevoked)(nil), // 4: c1.connector.v2.GrantAlreadyRevoked + (*InsertResourceGrants)(nil), // 5: c1.connector.v2.InsertResourceGrants + (*structpb.Struct)(nil), // 6: google.protobuf.Struct +} +var file_c1_connector_v2_annotation_grant_proto_depIdxs = []int32{ + 6, // 0: c1.connector.v2.GrantMetadata.metadata:type_name -> google.protobuf.Struct + 6, // 1: c1.connector.v2.GrantImmutable.metadata:type_name -> google.protobuf.Struct + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_c1_connector_v2_annotation_grant_proto_init() } +func file_c1_connector_v2_annotation_grant_proto_init() { + if File_c1_connector_v2_annotation_grant_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_c1_connector_v2_annotation_grant_proto_rawDesc), len(file_c1_connector_v2_annotation_grant_proto_rawDesc)), + NumEnums: 0, + NumMessages: 6, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_c1_connector_v2_annotation_grant_proto_goTypes, + DependencyIndexes: file_c1_connector_v2_annotation_grant_proto_depIdxs, + MessageInfos: file_c1_connector_v2_annotation_grant_proto_msgTypes, + }.Build() + File_c1_connector_v2_annotation_grant_proto = out.File + file_c1_connector_v2_annotation_grant_proto_goTypes = nil + file_c1_connector_v2_annotation_grant_proto_depIdxs = nil +} diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_ratelimit.pb.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_ratelimit.pb.go index 3f711ada..9522f6d9 100644 --- a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_ratelimit.pb.go +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_ratelimit.pb.go @@ -1,9 +1,11 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.4 +// protoc-gen-go v1.36.10 // protoc (unknown) // source: c1/connector/v2/annotation_ratelimit.proto +//go:build !protoopaque + package v2 import ( @@ -11,7 +13,6 @@ import ( protoimpl "google.golang.org/protobuf/runtime/protoimpl" timestamppb "google.golang.org/protobuf/types/known/timestamppb" reflect "reflect" - sync "sync" unsafe "unsafe" ) @@ -69,13 +70,8 @@ func (x RateLimitDescription_Status) Number() protoreflect.EnumNumber { return protoreflect.EnumNumber(x) } -// Deprecated: Use RateLimitDescription_Status.Descriptor instead. -func (RateLimitDescription_Status) EnumDescriptor() ([]byte, []int) { - return file_c1_connector_v2_annotation_ratelimit_proto_rawDescGZIP(), []int{0, 0} -} - type RateLimitDescription struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Status RateLimitDescription_Status `protobuf:"varint,1,opt,name=status,proto3,enum=c1.connector.v2.RateLimitDescription_Status" json:"status,omitempty"` Limit int64 `protobuf:"varint,2,opt,name=limit,proto3" json:"limit,omitempty"` Remaining int64 `protobuf:"varint,3,opt,name=remaining,proto3" json:"remaining,omitempty"` @@ -109,11 +105,6 @@ func (x *RateLimitDescription) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use RateLimitDescription.ProtoReflect.Descriptor instead. -func (*RateLimitDescription) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_annotation_ratelimit_proto_rawDescGZIP(), []int{0} -} - func (x *RateLimitDescription) GetStatus() RateLimitDescription_Status { if x != nil { return x.Status @@ -142,52 +133,69 @@ func (x *RateLimitDescription) GetResetAt() *timestamppb.Timestamp { return nil } -var File_c1_connector_v2_annotation_ratelimit_proto protoreflect.FileDescriptor +func (x *RateLimitDescription) SetStatus(v RateLimitDescription_Status) { + x.Status = v +} -var file_c1_connector_v2_annotation_ratelimit_proto_rawDesc = string([]byte{ - 0x0a, 0x2a, 0x63, 0x31, 0x2f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f, 0x76, - 0x32, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x61, 0x74, - 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x63, 0x31, - 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x1a, 0x1f, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, - 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa0, - 0x02, 0x0a, 0x14, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x44, 0x65, 0x73, 0x63, - 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x44, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2c, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, - 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, - 0x6d, 0x69, 0x74, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x14, 0x0a, - 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x6c, 0x69, - 0x6d, 0x69, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x72, 0x65, 0x6d, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x72, 0x65, 0x6d, 0x61, 0x69, 0x6e, 0x69, 0x6e, - 0x67, 0x12, 0x35, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x65, 0x74, 0x5f, 0x61, 0x74, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, - 0x07, 0x72, 0x65, 0x73, 0x65, 0x74, 0x41, 0x74, 0x22, 0x57, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x12, 0x16, 0x0a, 0x12, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x55, 0x4e, 0x53, - 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x53, 0x54, - 0x41, 0x54, 0x55, 0x53, 0x5f, 0x4f, 0x4b, 0x10, 0x01, 0x12, 0x14, 0x0a, 0x10, 0x53, 0x54, 0x41, - 0x54, 0x55, 0x53, 0x5f, 0x4f, 0x56, 0x45, 0x52, 0x4c, 0x49, 0x4d, 0x49, 0x54, 0x10, 0x02, 0x12, - 0x10, 0x0a, 0x0c, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, - 0x03, 0x42, 0x36, 0x5a, 0x34, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, - 0x63, 0x6f, 0x6e, 0x64, 0x75, 0x63, 0x74, 0x6f, 0x72, 0x6f, 0x6e, 0x65, 0x2f, 0x62, 0x61, 0x74, - 0x6f, 0x6e, 0x2d, 0x73, 0x64, 0x6b, 0x2f, 0x70, 0x62, 0x2f, 0x63, 0x31, 0x2f, 0x63, 0x6f, 0x6e, - 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f, 0x76, 0x32, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x33, -}) +func (x *RateLimitDescription) SetLimit(v int64) { + x.Limit = v +} -var ( - file_c1_connector_v2_annotation_ratelimit_proto_rawDescOnce sync.Once - file_c1_connector_v2_annotation_ratelimit_proto_rawDescData []byte -) +func (x *RateLimitDescription) SetRemaining(v int64) { + x.Remaining = v +} + +func (x *RateLimitDescription) SetResetAt(v *timestamppb.Timestamp) { + x.ResetAt = v +} + +func (x *RateLimitDescription) HasResetAt() bool { + if x == nil { + return false + } + return x.ResetAt != nil +} + +func (x *RateLimitDescription) ClearResetAt() { + x.ResetAt = nil +} -func file_c1_connector_v2_annotation_ratelimit_proto_rawDescGZIP() []byte { - file_c1_connector_v2_annotation_ratelimit_proto_rawDescOnce.Do(func() { - file_c1_connector_v2_annotation_ratelimit_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_c1_connector_v2_annotation_ratelimit_proto_rawDesc), len(file_c1_connector_v2_annotation_ratelimit_proto_rawDesc))) - }) - return file_c1_connector_v2_annotation_ratelimit_proto_rawDescData +type RateLimitDescription_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Status RateLimitDescription_Status + Limit int64 + Remaining int64 + ResetAt *timestamppb.Timestamp +} + +func (b0 RateLimitDescription_builder) Build() *RateLimitDescription { + m0 := &RateLimitDescription{} + b, x := &b0, m0 + _, _ = b, x + x.Status = b.Status + x.Limit = b.Limit + x.Remaining = b.Remaining + x.ResetAt = b.ResetAt + return m0 } +var File_c1_connector_v2_annotation_ratelimit_proto protoreflect.FileDescriptor + +const file_c1_connector_v2_annotation_ratelimit_proto_rawDesc = "" + + "\n" + + "*c1/connector/v2/annotation_ratelimit.proto\x12\x0fc1.connector.v2\x1a\x1fgoogle/protobuf/timestamp.proto\"\xa0\x02\n" + + "\x14RateLimitDescription\x12D\n" + + "\x06status\x18\x01 \x01(\x0e2,.c1.connector.v2.RateLimitDescription.StatusR\x06status\x12\x14\n" + + "\x05limit\x18\x02 \x01(\x03R\x05limit\x12\x1c\n" + + "\tremaining\x18\x03 \x01(\x03R\tremaining\x125\n" + + "\breset_at\x18\x04 \x01(\v2\x1a.google.protobuf.TimestampR\aresetAt\"W\n" + + "\x06Status\x12\x16\n" + + "\x12STATUS_UNSPECIFIED\x10\x00\x12\r\n" + + "\tSTATUS_OK\x10\x01\x12\x14\n" + + "\x10STATUS_OVERLIMIT\x10\x02\x12\x10\n" + + "\fSTATUS_ERROR\x10\x03B6Z4github.com/conductorone/baton-sdk/pb/c1/connector/v2b\x06proto3" + var file_c1_connector_v2_annotation_ratelimit_proto_enumTypes = make([]protoimpl.EnumInfo, 1) var file_c1_connector_v2_annotation_ratelimit_proto_msgTypes = make([]protoimpl.MessageInfo, 1) var file_c1_connector_v2_annotation_ratelimit_proto_goTypes = []any{ diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_ratelimit_protoopaque.pb.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_ratelimit_protoopaque.pb.go new file mode 100644 index 00000000..f96edadf --- /dev/null +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_ratelimit_protoopaque.pb.go @@ -0,0 +1,239 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.10 +// protoc (unknown) +// source: c1/connector/v2/annotation_ratelimit.proto + +//go:build protoopaque + +package v2 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type RateLimitDescription_Status int32 + +const ( + RateLimitDescription_STATUS_UNSPECIFIED RateLimitDescription_Status = 0 + RateLimitDescription_STATUS_OK RateLimitDescription_Status = 1 + RateLimitDescription_STATUS_OVERLIMIT RateLimitDescription_Status = 2 + RateLimitDescription_STATUS_ERROR RateLimitDescription_Status = 3 +) + +// Enum value maps for RateLimitDescription_Status. +var ( + RateLimitDescription_Status_name = map[int32]string{ + 0: "STATUS_UNSPECIFIED", + 1: "STATUS_OK", + 2: "STATUS_OVERLIMIT", + 3: "STATUS_ERROR", + } + RateLimitDescription_Status_value = map[string]int32{ + "STATUS_UNSPECIFIED": 0, + "STATUS_OK": 1, + "STATUS_OVERLIMIT": 2, + "STATUS_ERROR": 3, + } +) + +func (x RateLimitDescription_Status) Enum() *RateLimitDescription_Status { + p := new(RateLimitDescription_Status) + *p = x + return p +} + +func (x RateLimitDescription_Status) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (RateLimitDescription_Status) Descriptor() protoreflect.EnumDescriptor { + return file_c1_connector_v2_annotation_ratelimit_proto_enumTypes[0].Descriptor() +} + +func (RateLimitDescription_Status) Type() protoreflect.EnumType { + return &file_c1_connector_v2_annotation_ratelimit_proto_enumTypes[0] +} + +func (x RateLimitDescription_Status) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +type RateLimitDescription struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Status RateLimitDescription_Status `protobuf:"varint,1,opt,name=status,proto3,enum=c1.connector.v2.RateLimitDescription_Status"` + xxx_hidden_Limit int64 `protobuf:"varint,2,opt,name=limit,proto3"` + xxx_hidden_Remaining int64 `protobuf:"varint,3,opt,name=remaining,proto3"` + xxx_hidden_ResetAt *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=reset_at,json=resetAt,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *RateLimitDescription) Reset() { + *x = RateLimitDescription{} + mi := &file_c1_connector_v2_annotation_ratelimit_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *RateLimitDescription) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RateLimitDescription) ProtoMessage() {} + +func (x *RateLimitDescription) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_annotation_ratelimit_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *RateLimitDescription) GetStatus() RateLimitDescription_Status { + if x != nil { + return x.xxx_hidden_Status + } + return RateLimitDescription_STATUS_UNSPECIFIED +} + +func (x *RateLimitDescription) GetLimit() int64 { + if x != nil { + return x.xxx_hidden_Limit + } + return 0 +} + +func (x *RateLimitDescription) GetRemaining() int64 { + if x != nil { + return x.xxx_hidden_Remaining + } + return 0 +} + +func (x *RateLimitDescription) GetResetAt() *timestamppb.Timestamp { + if x != nil { + return x.xxx_hidden_ResetAt + } + return nil +} + +func (x *RateLimitDescription) SetStatus(v RateLimitDescription_Status) { + x.xxx_hidden_Status = v +} + +func (x *RateLimitDescription) SetLimit(v int64) { + x.xxx_hidden_Limit = v +} + +func (x *RateLimitDescription) SetRemaining(v int64) { + x.xxx_hidden_Remaining = v +} + +func (x *RateLimitDescription) SetResetAt(v *timestamppb.Timestamp) { + x.xxx_hidden_ResetAt = v +} + +func (x *RateLimitDescription) HasResetAt() bool { + if x == nil { + return false + } + return x.xxx_hidden_ResetAt != nil +} + +func (x *RateLimitDescription) ClearResetAt() { + x.xxx_hidden_ResetAt = nil +} + +type RateLimitDescription_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Status RateLimitDescription_Status + Limit int64 + Remaining int64 + ResetAt *timestamppb.Timestamp +} + +func (b0 RateLimitDescription_builder) Build() *RateLimitDescription { + m0 := &RateLimitDescription{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Status = b.Status + x.xxx_hidden_Limit = b.Limit + x.xxx_hidden_Remaining = b.Remaining + x.xxx_hidden_ResetAt = b.ResetAt + return m0 +} + +var File_c1_connector_v2_annotation_ratelimit_proto protoreflect.FileDescriptor + +const file_c1_connector_v2_annotation_ratelimit_proto_rawDesc = "" + + "\n" + + "*c1/connector/v2/annotation_ratelimit.proto\x12\x0fc1.connector.v2\x1a\x1fgoogle/protobuf/timestamp.proto\"\xa0\x02\n" + + "\x14RateLimitDescription\x12D\n" + + "\x06status\x18\x01 \x01(\x0e2,.c1.connector.v2.RateLimitDescription.StatusR\x06status\x12\x14\n" + + "\x05limit\x18\x02 \x01(\x03R\x05limit\x12\x1c\n" + + "\tremaining\x18\x03 \x01(\x03R\tremaining\x125\n" + + "\breset_at\x18\x04 \x01(\v2\x1a.google.protobuf.TimestampR\aresetAt\"W\n" + + "\x06Status\x12\x16\n" + + "\x12STATUS_UNSPECIFIED\x10\x00\x12\r\n" + + "\tSTATUS_OK\x10\x01\x12\x14\n" + + "\x10STATUS_OVERLIMIT\x10\x02\x12\x10\n" + + "\fSTATUS_ERROR\x10\x03B6Z4github.com/conductorone/baton-sdk/pb/c1/connector/v2b\x06proto3" + +var file_c1_connector_v2_annotation_ratelimit_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_c1_connector_v2_annotation_ratelimit_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_c1_connector_v2_annotation_ratelimit_proto_goTypes = []any{ + (RateLimitDescription_Status)(0), // 0: c1.connector.v2.RateLimitDescription.Status + (*RateLimitDescription)(nil), // 1: c1.connector.v2.RateLimitDescription + (*timestamppb.Timestamp)(nil), // 2: google.protobuf.Timestamp +} +var file_c1_connector_v2_annotation_ratelimit_proto_depIdxs = []int32{ + 0, // 0: c1.connector.v2.RateLimitDescription.status:type_name -> c1.connector.v2.RateLimitDescription.Status + 2, // 1: c1.connector.v2.RateLimitDescription.reset_at:type_name -> google.protobuf.Timestamp + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_c1_connector_v2_annotation_ratelimit_proto_init() } +func file_c1_connector_v2_annotation_ratelimit_proto_init() { + if File_c1_connector_v2_annotation_ratelimit_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_c1_connector_v2_annotation_ratelimit_proto_rawDesc), len(file_c1_connector_v2_annotation_ratelimit_proto_rawDesc)), + NumEnums: 1, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_c1_connector_v2_annotation_ratelimit_proto_goTypes, + DependencyIndexes: file_c1_connector_v2_annotation_ratelimit_proto_depIdxs, + EnumInfos: file_c1_connector_v2_annotation_ratelimit_proto_enumTypes, + MessageInfos: file_c1_connector_v2_annotation_ratelimit_proto_msgTypes, + }.Build() + File_c1_connector_v2_annotation_ratelimit_proto = out.File + file_c1_connector_v2_annotation_ratelimit_proto_goTypes = nil + file_c1_connector_v2_annotation_ratelimit_proto_depIdxs = nil +} diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_raw_id.pb.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_raw_id.pb.go index 72063239..b80d787e 100644 --- a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_raw_id.pb.go +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_raw_id.pb.go @@ -1,16 +1,17 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.4 +// protoc-gen-go v1.36.10 // protoc (unknown) // source: c1/connector/v2/annotation_raw_id.proto +//go:build !protoopaque + package v2 import ( protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" - sync "sync" unsafe "unsafe" ) @@ -23,7 +24,7 @@ const ( // Raw ID from whatever API the resource/entitlement/grant came from. type RawId struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache @@ -54,11 +55,6 @@ func (x *RawId) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use RawId.ProtoReflect.Descriptor instead. -func (*RawId) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_annotation_raw_id_proto_rawDescGZIP(), []int{0} -} - func (x *RawId) GetId() string { if x != nil { return x.Id @@ -66,33 +62,32 @@ func (x *RawId) GetId() string { return "" } -var File_c1_connector_v2_annotation_raw_id_proto protoreflect.FileDescriptor +func (x *RawId) SetId(v string) { + x.Id = v +} -var file_c1_connector_v2_annotation_raw_id_proto_rawDesc = string([]byte{ - 0x0a, 0x27, 0x63, 0x31, 0x2f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f, 0x76, - 0x32, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x61, 0x77, - 0x5f, 0x69, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x63, 0x31, 0x2e, 0x63, 0x6f, - 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x22, 0x17, 0x0a, 0x05, 0x52, 0x61, - 0x77, 0x49, 0x64, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x02, 0x69, 0x64, 0x42, 0x36, 0x5a, 0x34, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, - 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x64, 0x75, 0x63, 0x74, 0x6f, 0x72, 0x6f, 0x6e, 0x65, 0x2f, 0x62, - 0x61, 0x74, 0x6f, 0x6e, 0x2d, 0x73, 0x64, 0x6b, 0x2f, 0x70, 0x62, 0x2f, 0x63, 0x31, 0x2f, 0x63, - 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f, 0x76, 0x32, 0x62, 0x06, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x33, -}) - -var ( - file_c1_connector_v2_annotation_raw_id_proto_rawDescOnce sync.Once - file_c1_connector_v2_annotation_raw_id_proto_rawDescData []byte -) +type RawId_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. -func file_c1_connector_v2_annotation_raw_id_proto_rawDescGZIP() []byte { - file_c1_connector_v2_annotation_raw_id_proto_rawDescOnce.Do(func() { - file_c1_connector_v2_annotation_raw_id_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_c1_connector_v2_annotation_raw_id_proto_rawDesc), len(file_c1_connector_v2_annotation_raw_id_proto_rawDesc))) - }) - return file_c1_connector_v2_annotation_raw_id_proto_rawDescData + Id string } +func (b0 RawId_builder) Build() *RawId { + m0 := &RawId{} + b, x := &b0, m0 + _, _ = b, x + x.Id = b.Id + return m0 +} + +var File_c1_connector_v2_annotation_raw_id_proto protoreflect.FileDescriptor + +const file_c1_connector_v2_annotation_raw_id_proto_rawDesc = "" + + "\n" + + "'c1/connector/v2/annotation_raw_id.proto\x12\x0fc1.connector.v2\"\x17\n" + + "\x05RawId\x12\x0e\n" + + "\x02id\x18\x01 \x01(\tR\x02idB6Z4github.com/conductorone/baton-sdk/pb/c1/connector/v2b\x06proto3" + var file_c1_connector_v2_annotation_raw_id_proto_msgTypes = make([]protoimpl.MessageInfo, 1) var file_c1_connector_v2_annotation_raw_id_proto_goTypes = []any{ (*RawId)(nil), // 0: c1.connector.v2.RawId diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_raw_id_protoopaque.pb.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_raw_id_protoopaque.pb.go new file mode 100644 index 00000000..48b8713d --- /dev/null +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_raw_id_protoopaque.pb.go @@ -0,0 +1,125 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.10 +// protoc (unknown) +// source: c1/connector/v2/annotation_raw_id.proto + +//go:build protoopaque + +package v2 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Raw ID from whatever API the resource/entitlement/grant came from. +type RawId struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Id string `protobuf:"bytes,1,opt,name=id,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *RawId) Reset() { + *x = RawId{} + mi := &file_c1_connector_v2_annotation_raw_id_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *RawId) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RawId) ProtoMessage() {} + +func (x *RawId) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_annotation_raw_id_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *RawId) GetId() string { + if x != nil { + return x.xxx_hidden_Id + } + return "" +} + +func (x *RawId) SetId(v string) { + x.xxx_hidden_Id = v +} + +type RawId_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Id string +} + +func (b0 RawId_builder) Build() *RawId { + m0 := &RawId{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Id = b.Id + return m0 +} + +var File_c1_connector_v2_annotation_raw_id_proto protoreflect.FileDescriptor + +const file_c1_connector_v2_annotation_raw_id_proto_rawDesc = "" + + "\n" + + "'c1/connector/v2/annotation_raw_id.proto\x12\x0fc1.connector.v2\"\x17\n" + + "\x05RawId\x12\x0e\n" + + "\x02id\x18\x01 \x01(\tR\x02idB6Z4github.com/conductorone/baton-sdk/pb/c1/connector/v2b\x06proto3" + +var file_c1_connector_v2_annotation_raw_id_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_c1_connector_v2_annotation_raw_id_proto_goTypes = []any{ + (*RawId)(nil), // 0: c1.connector.v2.RawId +} +var file_c1_connector_v2_annotation_raw_id_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_c1_connector_v2_annotation_raw_id_proto_init() } +func file_c1_connector_v2_annotation_raw_id_proto_init() { + if File_c1_connector_v2_annotation_raw_id_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_c1_connector_v2_annotation_raw_id_proto_rawDesc), len(file_c1_connector_v2_annotation_raw_id_proto_rawDesc)), + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_c1_connector_v2_annotation_raw_id_proto_goTypes, + DependencyIndexes: file_c1_connector_v2_annotation_raw_id_proto_depIdxs, + MessageInfos: file_c1_connector_v2_annotation_raw_id_proto_msgTypes, + }.Build() + File_c1_connector_v2_annotation_raw_id_proto = out.File + file_c1_connector_v2_annotation_raw_id_proto_goTypes = nil + file_c1_connector_v2_annotation_raw_id_proto_depIdxs = nil +} diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_request.pb.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_request.pb.go index 50d405e7..73e1f9cd 100644 --- a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_request.pb.go +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_request.pb.go @@ -1,16 +1,17 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.4 +// protoc-gen-go v1.36.10 // protoc (unknown) // source: c1/connector/v2/annotation_request.proto +//go:build !protoopaque + package v2 import ( protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" - sync "sync" unsafe "unsafe" ) @@ -22,7 +23,7 @@ const ( ) type RequestId struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` RequestId string `protobuf:"bytes,1,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache @@ -53,11 +54,6 @@ func (x *RequestId) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use RequestId.ProtoReflect.Descriptor instead. -func (*RequestId) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_annotation_request_proto_rawDescGZIP(), []int{0} -} - func (x *RequestId) GetRequestId() string { if x != nil { return x.RequestId @@ -65,34 +61,33 @@ func (x *RequestId) GetRequestId() string { return "" } -var File_c1_connector_v2_annotation_request_proto protoreflect.FileDescriptor +func (x *RequestId) SetRequestId(v string) { + x.RequestId = v +} -var file_c1_connector_v2_annotation_request_proto_rawDesc = string([]byte{ - 0x0a, 0x28, 0x63, 0x31, 0x2f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f, 0x76, - 0x32, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x63, 0x31, 0x2e, 0x63, - 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x22, 0x2a, 0x0a, 0x09, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x72, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x42, 0x36, 0x5a, 0x34, 0x67, 0x69, 0x74, 0x68, 0x75, - 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x64, 0x75, 0x63, 0x74, 0x6f, 0x72, 0x6f, - 0x6e, 0x65, 0x2f, 0x62, 0x61, 0x74, 0x6f, 0x6e, 0x2d, 0x73, 0x64, 0x6b, 0x2f, 0x70, 0x62, 0x2f, - 0x63, 0x31, 0x2f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f, 0x76, 0x32, 0x62, - 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -}) - -var ( - file_c1_connector_v2_annotation_request_proto_rawDescOnce sync.Once - file_c1_connector_v2_annotation_request_proto_rawDescData []byte -) +type RequestId_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. -func file_c1_connector_v2_annotation_request_proto_rawDescGZIP() []byte { - file_c1_connector_v2_annotation_request_proto_rawDescOnce.Do(func() { - file_c1_connector_v2_annotation_request_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_c1_connector_v2_annotation_request_proto_rawDesc), len(file_c1_connector_v2_annotation_request_proto_rawDesc))) - }) - return file_c1_connector_v2_annotation_request_proto_rawDescData + RequestId string } +func (b0 RequestId_builder) Build() *RequestId { + m0 := &RequestId{} + b, x := &b0, m0 + _, _ = b, x + x.RequestId = b.RequestId + return m0 +} + +var File_c1_connector_v2_annotation_request_proto protoreflect.FileDescriptor + +const file_c1_connector_v2_annotation_request_proto_rawDesc = "" + + "\n" + + "(c1/connector/v2/annotation_request.proto\x12\x0fc1.connector.v2\"*\n" + + "\tRequestId\x12\x1d\n" + + "\n" + + "request_id\x18\x01 \x01(\tR\trequestIdB6Z4github.com/conductorone/baton-sdk/pb/c1/connector/v2b\x06proto3" + var file_c1_connector_v2_annotation_request_proto_msgTypes = make([]protoimpl.MessageInfo, 1) var file_c1_connector_v2_annotation_request_proto_goTypes = []any{ (*RequestId)(nil), // 0: c1.connector.v2.RequestId diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_request_protoopaque.pb.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_request_protoopaque.pb.go new file mode 100644 index 00000000..574527ec --- /dev/null +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_request_protoopaque.pb.go @@ -0,0 +1,125 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.10 +// protoc (unknown) +// source: c1/connector/v2/annotation_request.proto + +//go:build protoopaque + +package v2 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type RequestId struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_RequestId string `protobuf:"bytes,1,opt,name=request_id,json=requestId,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *RequestId) Reset() { + *x = RequestId{} + mi := &file_c1_connector_v2_annotation_request_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *RequestId) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RequestId) ProtoMessage() {} + +func (x *RequestId) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_annotation_request_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *RequestId) GetRequestId() string { + if x != nil { + return x.xxx_hidden_RequestId + } + return "" +} + +func (x *RequestId) SetRequestId(v string) { + x.xxx_hidden_RequestId = v +} + +type RequestId_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + RequestId string +} + +func (b0 RequestId_builder) Build() *RequestId { + m0 := &RequestId{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_RequestId = b.RequestId + return m0 +} + +var File_c1_connector_v2_annotation_request_proto protoreflect.FileDescriptor + +const file_c1_connector_v2_annotation_request_proto_rawDesc = "" + + "\n" + + "(c1/connector/v2/annotation_request.proto\x12\x0fc1.connector.v2\"*\n" + + "\tRequestId\x12\x1d\n" + + "\n" + + "request_id\x18\x01 \x01(\tR\trequestIdB6Z4github.com/conductorone/baton-sdk/pb/c1/connector/v2b\x06proto3" + +var file_c1_connector_v2_annotation_request_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_c1_connector_v2_annotation_request_proto_goTypes = []any{ + (*RequestId)(nil), // 0: c1.connector.v2.RequestId +} +var file_c1_connector_v2_annotation_request_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_c1_connector_v2_annotation_request_proto_init() } +func file_c1_connector_v2_annotation_request_proto_init() { + if File_c1_connector_v2_annotation_request_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_c1_connector_v2_annotation_request_proto_rawDesc), len(file_c1_connector_v2_annotation_request_proto_rawDesc)), + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_c1_connector_v2_annotation_request_proto_goTypes, + DependencyIndexes: file_c1_connector_v2_annotation_request_proto_depIdxs, + MessageInfos: file_c1_connector_v2_annotation_request_proto_msgTypes, + }.Build() + File_c1_connector_v2_annotation_request_proto = out.File + file_c1_connector_v2_annotation_request_proto_goTypes = nil + file_c1_connector_v2_annotation_request_proto_depIdxs = nil +} diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_resource_tree.pb.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_resource_tree.pb.go index 61612ece..407721f2 100644 --- a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_resource_tree.pb.go +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_resource_tree.pb.go @@ -1,16 +1,17 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.4 +// protoc-gen-go v1.36.10 // protoc (unknown) // source: c1/connector/v2/annotation_resource_tree.proto +//go:build !protoopaque + package v2 import ( protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" - sync "sync" unsafe "unsafe" ) @@ -22,7 +23,7 @@ const ( ) type ChildResourceType struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` ResourceTypeId string `protobuf:"bytes,1,opt,name=resource_type_id,json=resourceTypeId,proto3" json:"resource_type_id,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache @@ -53,11 +54,6 @@ func (x *ChildResourceType) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ChildResourceType.ProtoReflect.Descriptor instead. -func (*ChildResourceType) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_annotation_resource_tree_proto_rawDescGZIP(), []int{0} -} - func (x *ChildResourceType) GetResourceTypeId() string { if x != nil { return x.ResourceTypeId @@ -65,8 +61,26 @@ func (x *ChildResourceType) GetResourceTypeId() string { return "" } +func (x *ChildResourceType) SetResourceTypeId(v string) { + x.ResourceTypeId = v +} + +type ChildResourceType_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + ResourceTypeId string +} + +func (b0 ChildResourceType_builder) Build() *ChildResourceType { + m0 := &ChildResourceType{} + b, x := &b0, m0 + _, _ = b, x + x.ResourceTypeId = b.ResourceTypeId + return m0 +} + type SkipEntitlementsAndGrants struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } @@ -96,13 +110,20 @@ func (x *SkipEntitlementsAndGrants) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use SkipEntitlementsAndGrants.ProtoReflect.Descriptor instead. -func (*SkipEntitlementsAndGrants) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_annotation_resource_tree_proto_rawDescGZIP(), []int{1} +type SkipEntitlementsAndGrants_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + +} + +func (b0 SkipEntitlementsAndGrants_builder) Build() *SkipEntitlementsAndGrants { + m0 := &SkipEntitlementsAndGrants{} + b, x := &b0, m0 + _, _ = b, x + return m0 } type SkipGrants struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } @@ -132,48 +153,79 @@ func (x *SkipGrants) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use SkipGrants.ProtoReflect.Descriptor instead. -func (*SkipGrants) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_annotation_resource_tree_proto_rawDescGZIP(), []int{2} +type SkipGrants_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + } -var File_c1_connector_v2_annotation_resource_tree_proto protoreflect.FileDescriptor +func (b0 SkipGrants_builder) Build() *SkipGrants { + m0 := &SkipGrants{} + b, x := &b0, m0 + _, _ = b, x + return m0 +} -var file_c1_connector_v2_annotation_resource_tree_proto_rawDesc = string([]byte{ - 0x0a, 0x2e, 0x63, 0x31, 0x2f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f, 0x76, - 0x32, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x12, 0x0f, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, - 0x32, 0x22, 0x3d, 0x0a, 0x11, 0x43, 0x68, 0x69, 0x6c, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x28, 0x0a, 0x10, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0e, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x49, 0x64, - 0x22, 0x1b, 0x0a, 0x19, 0x53, 0x6b, 0x69, 0x70, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x6d, - 0x65, 0x6e, 0x74, 0x73, 0x41, 0x6e, 0x64, 0x47, 0x72, 0x61, 0x6e, 0x74, 0x73, 0x22, 0x0c, 0x0a, - 0x0a, 0x53, 0x6b, 0x69, 0x70, 0x47, 0x72, 0x61, 0x6e, 0x74, 0x73, 0x42, 0x36, 0x5a, 0x34, 0x67, - 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x64, 0x75, 0x63, - 0x74, 0x6f, 0x72, 0x6f, 0x6e, 0x65, 0x2f, 0x62, 0x61, 0x74, 0x6f, 0x6e, 0x2d, 0x73, 0x64, 0x6b, - 0x2f, 0x70, 0x62, 0x2f, 0x63, 0x31, 0x2f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, - 0x2f, 0x76, 0x32, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -}) - -var ( - file_c1_connector_v2_annotation_resource_tree_proto_rawDescOnce sync.Once - file_c1_connector_v2_annotation_resource_tree_proto_rawDescData []byte -) +type SkipEntitlements struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SkipEntitlements) Reset() { + *x = SkipEntitlements{} + mi := &file_c1_connector_v2_annotation_resource_tree_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SkipEntitlements) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SkipEntitlements) ProtoMessage() {} + +func (x *SkipEntitlements) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_annotation_resource_tree_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +type SkipEntitlements_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. -func file_c1_connector_v2_annotation_resource_tree_proto_rawDescGZIP() []byte { - file_c1_connector_v2_annotation_resource_tree_proto_rawDescOnce.Do(func() { - file_c1_connector_v2_annotation_resource_tree_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_c1_connector_v2_annotation_resource_tree_proto_rawDesc), len(file_c1_connector_v2_annotation_resource_tree_proto_rawDesc))) - }) - return file_c1_connector_v2_annotation_resource_tree_proto_rawDescData } -var file_c1_connector_v2_annotation_resource_tree_proto_msgTypes = make([]protoimpl.MessageInfo, 3) +func (b0 SkipEntitlements_builder) Build() *SkipEntitlements { + m0 := &SkipEntitlements{} + b, x := &b0, m0 + _, _ = b, x + return m0 +} + +var File_c1_connector_v2_annotation_resource_tree_proto protoreflect.FileDescriptor + +const file_c1_connector_v2_annotation_resource_tree_proto_rawDesc = "" + + "\n" + + ".c1/connector/v2/annotation_resource_tree.proto\x12\x0fc1.connector.v2\"=\n" + + "\x11ChildResourceType\x12(\n" + + "\x10resource_type_id\x18\x01 \x01(\tR\x0eresourceTypeId\"\x1b\n" + + "\x19SkipEntitlementsAndGrants\"\f\n" + + "\n" + + "SkipGrants\"\x12\n" + + "\x10SkipEntitlementsB6Z4github.com/conductorone/baton-sdk/pb/c1/connector/v2b\x06proto3" + +var file_c1_connector_v2_annotation_resource_tree_proto_msgTypes = make([]protoimpl.MessageInfo, 4) var file_c1_connector_v2_annotation_resource_tree_proto_goTypes = []any{ (*ChildResourceType)(nil), // 0: c1.connector.v2.ChildResourceType (*SkipEntitlementsAndGrants)(nil), // 1: c1.connector.v2.SkipEntitlementsAndGrants (*SkipGrants)(nil), // 2: c1.connector.v2.SkipGrants + (*SkipEntitlements)(nil), // 3: c1.connector.v2.SkipEntitlements } var file_c1_connector_v2_annotation_resource_tree_proto_depIdxs = []int32{ 0, // [0:0] is the sub-list for method output_type @@ -194,7 +246,7 @@ func file_c1_connector_v2_annotation_resource_tree_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: unsafe.Slice(unsafe.StringData(file_c1_connector_v2_annotation_resource_tree_proto_rawDesc), len(file_c1_connector_v2_annotation_resource_tree_proto_rawDesc)), NumEnums: 0, - NumMessages: 3, + NumMessages: 4, NumExtensions: 0, NumServices: 0, }, diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_resource_tree.pb.validate.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_resource_tree.pb.validate.go index 36e768bd..8cff9db3 100644 --- a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_resource_tree.pb.validate.go +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_resource_tree.pb.validate.go @@ -339,3 +339,103 @@ var _ interface { Cause() error ErrorName() string } = SkipGrantsValidationError{} + +// Validate checks the field values on SkipEntitlements with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *SkipEntitlements) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on SkipEntitlements with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// SkipEntitlementsMultiError, or nil if none found. +func (m *SkipEntitlements) ValidateAll() error { + return m.validate(true) +} + +func (m *SkipEntitlements) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return SkipEntitlementsMultiError(errors) + } + + return nil +} + +// SkipEntitlementsMultiError is an error wrapping multiple validation errors +// returned by SkipEntitlements.ValidateAll() if the designated constraints +// aren't met. +type SkipEntitlementsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m SkipEntitlementsMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m SkipEntitlementsMultiError) AllErrors() []error { return m } + +// SkipEntitlementsValidationError is the validation error returned by +// SkipEntitlements.Validate if the designated constraints aren't met. +type SkipEntitlementsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e SkipEntitlementsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e SkipEntitlementsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e SkipEntitlementsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e SkipEntitlementsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e SkipEntitlementsValidationError) ErrorName() string { return "SkipEntitlementsValidationError" } + +// Error satisfies the builtin error interface +func (e SkipEntitlementsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sSkipEntitlements.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = SkipEntitlementsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = SkipEntitlementsValidationError{} diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_resource_tree_protoopaque.pb.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_resource_tree_protoopaque.pb.go new file mode 100644 index 00000000..187165f9 --- /dev/null +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_resource_tree_protoopaque.pb.go @@ -0,0 +1,260 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.10 +// protoc (unknown) +// source: c1/connector/v2/annotation_resource_tree.proto + +//go:build protoopaque + +package v2 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type ChildResourceType struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_ResourceTypeId string `protobuf:"bytes,1,opt,name=resource_type_id,json=resourceTypeId,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ChildResourceType) Reset() { + *x = ChildResourceType{} + mi := &file_c1_connector_v2_annotation_resource_tree_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ChildResourceType) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ChildResourceType) ProtoMessage() {} + +func (x *ChildResourceType) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_annotation_resource_tree_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *ChildResourceType) GetResourceTypeId() string { + if x != nil { + return x.xxx_hidden_ResourceTypeId + } + return "" +} + +func (x *ChildResourceType) SetResourceTypeId(v string) { + x.xxx_hidden_ResourceTypeId = v +} + +type ChildResourceType_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + ResourceTypeId string +} + +func (b0 ChildResourceType_builder) Build() *ChildResourceType { + m0 := &ChildResourceType{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_ResourceTypeId = b.ResourceTypeId + return m0 +} + +type SkipEntitlementsAndGrants struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SkipEntitlementsAndGrants) Reset() { + *x = SkipEntitlementsAndGrants{} + mi := &file_c1_connector_v2_annotation_resource_tree_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SkipEntitlementsAndGrants) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SkipEntitlementsAndGrants) ProtoMessage() {} + +func (x *SkipEntitlementsAndGrants) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_annotation_resource_tree_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +type SkipEntitlementsAndGrants_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + +} + +func (b0 SkipEntitlementsAndGrants_builder) Build() *SkipEntitlementsAndGrants { + m0 := &SkipEntitlementsAndGrants{} + b, x := &b0, m0 + _, _ = b, x + return m0 +} + +type SkipGrants struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SkipGrants) Reset() { + *x = SkipGrants{} + mi := &file_c1_connector_v2_annotation_resource_tree_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SkipGrants) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SkipGrants) ProtoMessage() {} + +func (x *SkipGrants) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_annotation_resource_tree_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +type SkipGrants_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + +} + +func (b0 SkipGrants_builder) Build() *SkipGrants { + m0 := &SkipGrants{} + b, x := &b0, m0 + _, _ = b, x + return m0 +} + +type SkipEntitlements struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SkipEntitlements) Reset() { + *x = SkipEntitlements{} + mi := &file_c1_connector_v2_annotation_resource_tree_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SkipEntitlements) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SkipEntitlements) ProtoMessage() {} + +func (x *SkipEntitlements) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_annotation_resource_tree_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +type SkipEntitlements_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + +} + +func (b0 SkipEntitlements_builder) Build() *SkipEntitlements { + m0 := &SkipEntitlements{} + b, x := &b0, m0 + _, _ = b, x + return m0 +} + +var File_c1_connector_v2_annotation_resource_tree_proto protoreflect.FileDescriptor + +const file_c1_connector_v2_annotation_resource_tree_proto_rawDesc = "" + + "\n" + + ".c1/connector/v2/annotation_resource_tree.proto\x12\x0fc1.connector.v2\"=\n" + + "\x11ChildResourceType\x12(\n" + + "\x10resource_type_id\x18\x01 \x01(\tR\x0eresourceTypeId\"\x1b\n" + + "\x19SkipEntitlementsAndGrants\"\f\n" + + "\n" + + "SkipGrants\"\x12\n" + + "\x10SkipEntitlementsB6Z4github.com/conductorone/baton-sdk/pb/c1/connector/v2b\x06proto3" + +var file_c1_connector_v2_annotation_resource_tree_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_c1_connector_v2_annotation_resource_tree_proto_goTypes = []any{ + (*ChildResourceType)(nil), // 0: c1.connector.v2.ChildResourceType + (*SkipEntitlementsAndGrants)(nil), // 1: c1.connector.v2.SkipEntitlementsAndGrants + (*SkipGrants)(nil), // 2: c1.connector.v2.SkipGrants + (*SkipEntitlements)(nil), // 3: c1.connector.v2.SkipEntitlements +} +var file_c1_connector_v2_annotation_resource_tree_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_c1_connector_v2_annotation_resource_tree_proto_init() } +func file_c1_connector_v2_annotation_resource_tree_proto_init() { + if File_c1_connector_v2_annotation_resource_tree_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_c1_connector_v2_annotation_resource_tree_proto_rawDesc), len(file_c1_connector_v2_annotation_resource_tree_proto_rawDesc)), + NumEnums: 0, + NumMessages: 4, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_c1_connector_v2_annotation_resource_tree_proto_goTypes, + DependencyIndexes: file_c1_connector_v2_annotation_resource_tree_proto_depIdxs, + MessageInfos: file_c1_connector_v2_annotation_resource_tree_proto_msgTypes, + }.Build() + File_c1_connector_v2_annotation_resource_tree_proto = out.File + file_c1_connector_v2_annotation_resource_tree_proto_goTypes = nil + file_c1_connector_v2_annotation_resource_tree_proto_depIdxs = nil +} diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_security_insight.pb.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_security_insight.pb.go new file mode 100644 index 00000000..9da76c41 --- /dev/null +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_security_insight.pb.go @@ -0,0 +1,535 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.10 +// protoc (unknown) +// source: c1/connector/v2/annotation_security_insight.proto + +//go:build !protoopaque + +package v2 + +import ( + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// SecurityInsightTrait is the trait annotation for resources with TRAIT_SECURITY_INSIGHT. +// It contains the metadata for the security insight including type, value, observation time, +// and the target entity (user or resource) that this insight should be bound to. +type SecurityInsightTrait struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + // The type of insight (e.g., "crowdstrike_zta_score", "wiz_critical_vulnerability") + InsightType string `protobuf:"bytes,1,opt,name=insight_type,json=insightType,proto3" json:"insight_type,omitempty"` + // The value of the insight (e.g., "85", "High", "Critical") + Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + // When this insight was observed/captured from the source system + ObservedAt *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=observed_at,json=observedAt,proto3" json:"observed_at,omitempty"` + // The target entity this insight should be bound to + // + // Types that are valid to be assigned to Target: + // + // *SecurityInsightTrait_User + // *SecurityInsightTrait_ResourceId + // *SecurityInsightTrait_ExternalResource + Target isSecurityInsightTrait_Target `protobuf_oneof:"target"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SecurityInsightTrait) Reset() { + *x = SecurityInsightTrait{} + mi := &file_c1_connector_v2_annotation_security_insight_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SecurityInsightTrait) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SecurityInsightTrait) ProtoMessage() {} + +func (x *SecurityInsightTrait) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_annotation_security_insight_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *SecurityInsightTrait) GetInsightType() string { + if x != nil { + return x.InsightType + } + return "" +} + +func (x *SecurityInsightTrait) GetValue() string { + if x != nil { + return x.Value + } + return "" +} + +func (x *SecurityInsightTrait) GetObservedAt() *timestamppb.Timestamp { + if x != nil { + return x.ObservedAt + } + return nil +} + +func (x *SecurityInsightTrait) GetTarget() isSecurityInsightTrait_Target { + if x != nil { + return x.Target + } + return nil +} + +func (x *SecurityInsightTrait) GetUser() *SecurityInsightTrait_UserTarget { + if x != nil { + if x, ok := x.Target.(*SecurityInsightTrait_User); ok { + return x.User + } + } + return nil +} + +func (x *SecurityInsightTrait) GetResourceId() *ResourceId { + if x != nil { + if x, ok := x.Target.(*SecurityInsightTrait_ResourceId); ok { + return x.ResourceId + } + } + return nil +} + +func (x *SecurityInsightTrait) GetExternalResource() *SecurityInsightTrait_ExternalResourceTarget { + if x != nil { + if x, ok := x.Target.(*SecurityInsightTrait_ExternalResource); ok { + return x.ExternalResource + } + } + return nil +} + +func (x *SecurityInsightTrait) SetInsightType(v string) { + x.InsightType = v +} + +func (x *SecurityInsightTrait) SetValue(v string) { + x.Value = v +} + +func (x *SecurityInsightTrait) SetObservedAt(v *timestamppb.Timestamp) { + x.ObservedAt = v +} + +func (x *SecurityInsightTrait) SetUser(v *SecurityInsightTrait_UserTarget) { + if v == nil { + x.Target = nil + return + } + x.Target = &SecurityInsightTrait_User{v} +} + +func (x *SecurityInsightTrait) SetResourceId(v *ResourceId) { + if v == nil { + x.Target = nil + return + } + x.Target = &SecurityInsightTrait_ResourceId{v} +} + +func (x *SecurityInsightTrait) SetExternalResource(v *SecurityInsightTrait_ExternalResourceTarget) { + if v == nil { + x.Target = nil + return + } + x.Target = &SecurityInsightTrait_ExternalResource{v} +} + +func (x *SecurityInsightTrait) HasObservedAt() bool { + if x == nil { + return false + } + return x.ObservedAt != nil +} + +func (x *SecurityInsightTrait) HasTarget() bool { + if x == nil { + return false + } + return x.Target != nil +} + +func (x *SecurityInsightTrait) HasUser() bool { + if x == nil { + return false + } + _, ok := x.Target.(*SecurityInsightTrait_User) + return ok +} + +func (x *SecurityInsightTrait) HasResourceId() bool { + if x == nil { + return false + } + _, ok := x.Target.(*SecurityInsightTrait_ResourceId) + return ok +} + +func (x *SecurityInsightTrait) HasExternalResource() bool { + if x == nil { + return false + } + _, ok := x.Target.(*SecurityInsightTrait_ExternalResource) + return ok +} + +func (x *SecurityInsightTrait) ClearObservedAt() { + x.ObservedAt = nil +} + +func (x *SecurityInsightTrait) ClearTarget() { + x.Target = nil +} + +func (x *SecurityInsightTrait) ClearUser() { + if _, ok := x.Target.(*SecurityInsightTrait_User); ok { + x.Target = nil + } +} + +func (x *SecurityInsightTrait) ClearResourceId() { + if _, ok := x.Target.(*SecurityInsightTrait_ResourceId); ok { + x.Target = nil + } +} + +func (x *SecurityInsightTrait) ClearExternalResource() { + if _, ok := x.Target.(*SecurityInsightTrait_ExternalResource); ok { + x.Target = nil + } +} + +const SecurityInsightTrait_Target_not_set_case case_SecurityInsightTrait_Target = 0 +const SecurityInsightTrait_User_case case_SecurityInsightTrait_Target = 4 +const SecurityInsightTrait_ResourceId_case case_SecurityInsightTrait_Target = 5 +const SecurityInsightTrait_ExternalResource_case case_SecurityInsightTrait_Target = 6 + +func (x *SecurityInsightTrait) WhichTarget() case_SecurityInsightTrait_Target { + if x == nil { + return SecurityInsightTrait_Target_not_set_case + } + switch x.Target.(type) { + case *SecurityInsightTrait_User: + return SecurityInsightTrait_User_case + case *SecurityInsightTrait_ResourceId: + return SecurityInsightTrait_ResourceId_case + case *SecurityInsightTrait_ExternalResource: + return SecurityInsightTrait_ExternalResource_case + default: + return SecurityInsightTrait_Target_not_set_case + } +} + +type SecurityInsightTrait_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // The type of insight (e.g., "crowdstrike_zta_score", "wiz_critical_vulnerability") + InsightType string + // The value of the insight (e.g., "85", "High", "Critical") + Value string + // When this insight was observed/captured from the source system + ObservedAt *timestamppb.Timestamp + // The target entity this insight should be bound to + + // Fields of oneof Target: + // For binding to a C1 User by email address + User *SecurityInsightTrait_UserTarget + // For direct reference to a resource the connector knows about + ResourceId *ResourceId + // For binding to an AppResource by external ID + ExternalResource *SecurityInsightTrait_ExternalResourceTarget + // -- end of Target +} + +func (b0 SecurityInsightTrait_builder) Build() *SecurityInsightTrait { + m0 := &SecurityInsightTrait{} + b, x := &b0, m0 + _, _ = b, x + x.InsightType = b.InsightType + x.Value = b.Value + x.ObservedAt = b.ObservedAt + if b.User != nil { + x.Target = &SecurityInsightTrait_User{b.User} + } + if b.ResourceId != nil { + x.Target = &SecurityInsightTrait_ResourceId{b.ResourceId} + } + if b.ExternalResource != nil { + x.Target = &SecurityInsightTrait_ExternalResource{b.ExternalResource} + } + return m0 +} + +type case_SecurityInsightTrait_Target protoreflect.FieldNumber + +func (x case_SecurityInsightTrait_Target) String() string { + md := file_c1_connector_v2_annotation_security_insight_proto_msgTypes[0].Descriptor() + if x == 0 { + return "not set" + } + return protoimpl.X.MessageFieldStringOf(md, protoreflect.FieldNumber(x)) +} + +type isSecurityInsightTrait_Target interface { + isSecurityInsightTrait_Target() +} + +type SecurityInsightTrait_User struct { + // For binding to a C1 User by email address + User *SecurityInsightTrait_UserTarget `protobuf:"bytes,4,opt,name=user,proto3,oneof"` +} + +type SecurityInsightTrait_ResourceId struct { + // For direct reference to a resource the connector knows about + ResourceId *ResourceId `protobuf:"bytes,5,opt,name=resource_id,json=resourceId,proto3,oneof"` +} + +type SecurityInsightTrait_ExternalResource struct { + // For binding to an AppResource by external ID + ExternalResource *SecurityInsightTrait_ExternalResourceTarget `protobuf:"bytes,6,opt,name=external_resource,json=externalResource,proto3,oneof"` +} + +func (*SecurityInsightTrait_User) isSecurityInsightTrait_Target() {} + +func (*SecurityInsightTrait_ResourceId) isSecurityInsightTrait_Target() {} + +func (*SecurityInsightTrait_ExternalResource) isSecurityInsightTrait_Target() {} + +// UserTarget identifies a user by email for resolution to a C1 User +type SecurityInsightTrait_UserTarget struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + Email string `protobuf:"bytes,1,opt,name=email,proto3" json:"email,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SecurityInsightTrait_UserTarget) Reset() { + *x = SecurityInsightTrait_UserTarget{} + mi := &file_c1_connector_v2_annotation_security_insight_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SecurityInsightTrait_UserTarget) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SecurityInsightTrait_UserTarget) ProtoMessage() {} + +func (x *SecurityInsightTrait_UserTarget) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_annotation_security_insight_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *SecurityInsightTrait_UserTarget) GetEmail() string { + if x != nil { + return x.Email + } + return "" +} + +func (x *SecurityInsightTrait_UserTarget) SetEmail(v string) { + x.Email = v +} + +type SecurityInsightTrait_UserTarget_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Email string +} + +func (b0 SecurityInsightTrait_UserTarget_builder) Build() *SecurityInsightTrait_UserTarget { + m0 := &SecurityInsightTrait_UserTarget{} + b, x := &b0, m0 + _, _ = b, x + x.Email = b.Email + return m0 +} + +// ExternalResourceTarget identifies a resource by external ID for resolution to an AppResource. +// Use this when the connector doesn't sync the target resource itself. +type SecurityInsightTrait_ExternalResourceTarget struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + // The external identifier of the resource (e.g., ARN, GUID, etc.) + ExternalId string `protobuf:"bytes,1,opt,name=external_id,json=externalId,proto3" json:"external_id,omitempty"` + // Optional hint to help find the owning app (e.g., "aws", "github") + AppHint string `protobuf:"bytes,2,opt,name=app_hint,json=appHint,proto3" json:"app_hint,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SecurityInsightTrait_ExternalResourceTarget) Reset() { + *x = SecurityInsightTrait_ExternalResourceTarget{} + mi := &file_c1_connector_v2_annotation_security_insight_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SecurityInsightTrait_ExternalResourceTarget) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SecurityInsightTrait_ExternalResourceTarget) ProtoMessage() {} + +func (x *SecurityInsightTrait_ExternalResourceTarget) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_annotation_security_insight_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *SecurityInsightTrait_ExternalResourceTarget) GetExternalId() string { + if x != nil { + return x.ExternalId + } + return "" +} + +func (x *SecurityInsightTrait_ExternalResourceTarget) GetAppHint() string { + if x != nil { + return x.AppHint + } + return "" +} + +func (x *SecurityInsightTrait_ExternalResourceTarget) SetExternalId(v string) { + x.ExternalId = v +} + +func (x *SecurityInsightTrait_ExternalResourceTarget) SetAppHint(v string) { + x.AppHint = v +} + +type SecurityInsightTrait_ExternalResourceTarget_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // The external identifier of the resource (e.g., ARN, GUID, etc.) + ExternalId string + // Optional hint to help find the owning app (e.g., "aws", "github") + AppHint string +} + +func (b0 SecurityInsightTrait_ExternalResourceTarget_builder) Build() *SecurityInsightTrait_ExternalResourceTarget { + m0 := &SecurityInsightTrait_ExternalResourceTarget{} + b, x := &b0, m0 + _, _ = b, x + x.ExternalId = b.ExternalId + x.AppHint = b.AppHint + return m0 +} + +var File_c1_connector_v2_annotation_security_insight_proto protoreflect.FileDescriptor + +const file_c1_connector_v2_annotation_security_insight_proto_rawDesc = "" + + "\n" + + "1c1/connector/v2/annotation_security_insight.proto\x12\x0fc1.connector.v2\x1a\x1ec1/connector/v2/resource.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x17validate/validate.proto\"\xc9\x04\n" + + "\x14SecurityInsightTrait\x12-\n" + + "\finsight_type\x18\x01 \x01(\tB\n" + + "\xfaB\ar\x05 \x01(\x80\bR\vinsightType\x12 \n" + + "\x05value\x18\x02 \x01(\tB\n" + + "\xfaB\ar\x05 \x01(\x80\bR\x05value\x12;\n" + + "\vobserved_at\x18\x03 \x01(\v2\x1a.google.protobuf.TimestampR\n" + + "observedAt\x12F\n" + + "\x04user\x18\x04 \x01(\v20.c1.connector.v2.SecurityInsightTrait.UserTargetH\x00R\x04user\x12>\n" + + "\vresource_id\x18\x05 \x01(\v2\x1b.c1.connector.v2.ResourceIdH\x00R\n" + + "resourceId\x12k\n" + + "\x11external_resource\x18\x06 \x01(\v2<.c1.connector.v2.SecurityInsightTrait.ExternalResourceTargetH\x00R\x10externalResource\x1a0\n" + + "\n" + + "UserTarget\x12\"\n" + + "\x05email\x18\x01 \x01(\tB\f\xfaB\tr\a \x01(\x80\b`\x01R\x05email\x1am\n" + + "\x16ExternalResourceTarget\x12+\n" + + "\vexternal_id\x18\x01 \x01(\tB\n" + + "\xfaB\ar\x05 \x01(\x80 R\n" + + "externalId\x12&\n" + + "\bapp_hint\x18\x02 \x01(\tB\v\xfaB\br\x06(\x80\b\xd0\x01\x01R\aappHintB\r\n" + + "\x06target\x12\x03\xf8B\x01B6Z4github.com/conductorone/baton-sdk/pb/c1/connector/v2b\x06proto3" + +var file_c1_connector_v2_annotation_security_insight_proto_msgTypes = make([]protoimpl.MessageInfo, 3) +var file_c1_connector_v2_annotation_security_insight_proto_goTypes = []any{ + (*SecurityInsightTrait)(nil), // 0: c1.connector.v2.SecurityInsightTrait + (*SecurityInsightTrait_UserTarget)(nil), // 1: c1.connector.v2.SecurityInsightTrait.UserTarget + (*SecurityInsightTrait_ExternalResourceTarget)(nil), // 2: c1.connector.v2.SecurityInsightTrait.ExternalResourceTarget + (*timestamppb.Timestamp)(nil), // 3: google.protobuf.Timestamp + (*ResourceId)(nil), // 4: c1.connector.v2.ResourceId +} +var file_c1_connector_v2_annotation_security_insight_proto_depIdxs = []int32{ + 3, // 0: c1.connector.v2.SecurityInsightTrait.observed_at:type_name -> google.protobuf.Timestamp + 1, // 1: c1.connector.v2.SecurityInsightTrait.user:type_name -> c1.connector.v2.SecurityInsightTrait.UserTarget + 4, // 2: c1.connector.v2.SecurityInsightTrait.resource_id:type_name -> c1.connector.v2.ResourceId + 2, // 3: c1.connector.v2.SecurityInsightTrait.external_resource:type_name -> c1.connector.v2.SecurityInsightTrait.ExternalResourceTarget + 4, // [4:4] is the sub-list for method output_type + 4, // [4:4] is the sub-list for method input_type + 4, // [4:4] is the sub-list for extension type_name + 4, // [4:4] is the sub-list for extension extendee + 0, // [0:4] is the sub-list for field type_name +} + +func init() { file_c1_connector_v2_annotation_security_insight_proto_init() } +func file_c1_connector_v2_annotation_security_insight_proto_init() { + if File_c1_connector_v2_annotation_security_insight_proto != nil { + return + } + file_c1_connector_v2_resource_proto_init() + file_c1_connector_v2_annotation_security_insight_proto_msgTypes[0].OneofWrappers = []any{ + (*SecurityInsightTrait_User)(nil), + (*SecurityInsightTrait_ResourceId)(nil), + (*SecurityInsightTrait_ExternalResource)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_c1_connector_v2_annotation_security_insight_proto_rawDesc), len(file_c1_connector_v2_annotation_security_insight_proto_rawDesc)), + NumEnums: 0, + NumMessages: 3, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_c1_connector_v2_annotation_security_insight_proto_goTypes, + DependencyIndexes: file_c1_connector_v2_annotation_security_insight_proto_depIdxs, + MessageInfos: file_c1_connector_v2_annotation_security_insight_proto_msgTypes, + }.Build() + File_c1_connector_v2_annotation_security_insight_proto = out.File + file_c1_connector_v2_annotation_security_insight_proto_goTypes = nil + file_c1_connector_v2_annotation_security_insight_proto_depIdxs = nil +} diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_security_insight.pb.validate.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_security_insight.pb.validate.go new file mode 100644 index 00000000..b550d79e --- /dev/null +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_security_insight.pb.validate.go @@ -0,0 +1,639 @@ +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: c1/connector/v2/annotation_security_insight.proto + +package v2 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on SecurityInsightTrait with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *SecurityInsightTrait) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on SecurityInsightTrait with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// SecurityInsightTraitMultiError, or nil if none found. +func (m *SecurityInsightTrait) ValidateAll() error { + return m.validate(true) +} + +func (m *SecurityInsightTrait) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if l := len(m.GetInsightType()); l < 1 || l > 1024 { + err := SecurityInsightTraitValidationError{ + field: "InsightType", + reason: "value length must be between 1 and 1024 bytes, inclusive", + } + if !all { + return err + } + errors = append(errors, err) + } + + if l := len(m.GetValue()); l < 1 || l > 1024 { + err := SecurityInsightTraitValidationError{ + field: "Value", + reason: "value length must be between 1 and 1024 bytes, inclusive", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetObservedAt()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, SecurityInsightTraitValidationError{ + field: "ObservedAt", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, SecurityInsightTraitValidationError{ + field: "ObservedAt", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetObservedAt()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return SecurityInsightTraitValidationError{ + field: "ObservedAt", + reason: "embedded message failed validation", + cause: err, + } + } + } + + oneofTargetPresent := false + switch v := m.Target.(type) { + case *SecurityInsightTrait_User: + if v == nil { + err := SecurityInsightTraitValidationError{ + field: "Target", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofTargetPresent = true + + if all { + switch v := interface{}(m.GetUser()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, SecurityInsightTraitValidationError{ + field: "User", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, SecurityInsightTraitValidationError{ + field: "User", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetUser()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return SecurityInsightTraitValidationError{ + field: "User", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *SecurityInsightTrait_ResourceId: + if v == nil { + err := SecurityInsightTraitValidationError{ + field: "Target", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofTargetPresent = true + + if all { + switch v := interface{}(m.GetResourceId()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, SecurityInsightTraitValidationError{ + field: "ResourceId", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, SecurityInsightTraitValidationError{ + field: "ResourceId", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetResourceId()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return SecurityInsightTraitValidationError{ + field: "ResourceId", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *SecurityInsightTrait_ExternalResource: + if v == nil { + err := SecurityInsightTraitValidationError{ + field: "Target", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofTargetPresent = true + + if all { + switch v := interface{}(m.GetExternalResource()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, SecurityInsightTraitValidationError{ + field: "ExternalResource", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, SecurityInsightTraitValidationError{ + field: "ExternalResource", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetExternalResource()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return SecurityInsightTraitValidationError{ + field: "ExternalResource", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofTargetPresent { + err := SecurityInsightTraitValidationError{ + field: "Target", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return SecurityInsightTraitMultiError(errors) + } + + return nil +} + +// SecurityInsightTraitMultiError is an error wrapping multiple validation +// errors returned by SecurityInsightTrait.ValidateAll() if the designated +// constraints aren't met. +type SecurityInsightTraitMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m SecurityInsightTraitMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m SecurityInsightTraitMultiError) AllErrors() []error { return m } + +// SecurityInsightTraitValidationError is the validation error returned by +// SecurityInsightTrait.Validate if the designated constraints aren't met. +type SecurityInsightTraitValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e SecurityInsightTraitValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e SecurityInsightTraitValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e SecurityInsightTraitValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e SecurityInsightTraitValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e SecurityInsightTraitValidationError) ErrorName() string { + return "SecurityInsightTraitValidationError" +} + +// Error satisfies the builtin error interface +func (e SecurityInsightTraitValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sSecurityInsightTrait.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = SecurityInsightTraitValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = SecurityInsightTraitValidationError{} + +// Validate checks the field values on SecurityInsightTrait_UserTarget with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *SecurityInsightTrait_UserTarget) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on SecurityInsightTrait_UserTarget with +// the rules defined in the proto definition for this message. If any rules +// are violated, the result is a list of violation errors wrapped in +// SecurityInsightTrait_UserTargetMultiError, or nil if none found. +func (m *SecurityInsightTrait_UserTarget) ValidateAll() error { + return m.validate(true) +} + +func (m *SecurityInsightTrait_UserTarget) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if l := len(m.GetEmail()); l < 1 || l > 1024 { + err := SecurityInsightTrait_UserTargetValidationError{ + field: "Email", + reason: "value length must be between 1 and 1024 bytes, inclusive", + } + if !all { + return err + } + errors = append(errors, err) + } + + if err := m._validateEmail(m.GetEmail()); err != nil { + err = SecurityInsightTrait_UserTargetValidationError{ + field: "Email", + reason: "value must be a valid email address", + cause: err, + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return SecurityInsightTrait_UserTargetMultiError(errors) + } + + return nil +} + +func (m *SecurityInsightTrait_UserTarget) _validateHostname(host string) error { + s := strings.ToLower(strings.TrimSuffix(host, ".")) + + if len(host) > 253 { + return errors.New("hostname cannot exceed 253 characters") + } + + for _, part := range strings.Split(s, ".") { + if l := len(part); l == 0 || l > 63 { + return errors.New("hostname part must be non-empty and cannot exceed 63 characters") + } + + if part[0] == '-' { + return errors.New("hostname parts cannot begin with hyphens") + } + + if part[len(part)-1] == '-' { + return errors.New("hostname parts cannot end with hyphens") + } + + for _, r := range part { + if (r < 'a' || r > 'z') && (r < '0' || r > '9') && r != '-' { + return fmt.Errorf("hostname parts can only contain alphanumeric characters or hyphens, got %q", string(r)) + } + } + } + + return nil +} + +func (m *SecurityInsightTrait_UserTarget) _validateEmail(addr string) error { + a, err := mail.ParseAddress(addr) + if err != nil { + return err + } + addr = a.Address + + if len(addr) > 254 { + return errors.New("email addresses cannot exceed 254 characters") + } + + parts := strings.SplitN(addr, "@", 2) + + if len(parts[0]) > 64 { + return errors.New("email address local phrase cannot exceed 64 characters") + } + + return m._validateHostname(parts[1]) +} + +// SecurityInsightTrait_UserTargetMultiError is an error wrapping multiple +// validation errors returned by SecurityInsightTrait_UserTarget.ValidateAll() +// if the designated constraints aren't met. +type SecurityInsightTrait_UserTargetMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m SecurityInsightTrait_UserTargetMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m SecurityInsightTrait_UserTargetMultiError) AllErrors() []error { return m } + +// SecurityInsightTrait_UserTargetValidationError is the validation error +// returned by SecurityInsightTrait_UserTarget.Validate if the designated +// constraints aren't met. +type SecurityInsightTrait_UserTargetValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e SecurityInsightTrait_UserTargetValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e SecurityInsightTrait_UserTargetValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e SecurityInsightTrait_UserTargetValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e SecurityInsightTrait_UserTargetValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e SecurityInsightTrait_UserTargetValidationError) ErrorName() string { + return "SecurityInsightTrait_UserTargetValidationError" +} + +// Error satisfies the builtin error interface +func (e SecurityInsightTrait_UserTargetValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sSecurityInsightTrait_UserTarget.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = SecurityInsightTrait_UserTargetValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = SecurityInsightTrait_UserTargetValidationError{} + +// Validate checks the field values on +// SecurityInsightTrait_ExternalResourceTarget with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *SecurityInsightTrait_ExternalResourceTarget) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// SecurityInsightTrait_ExternalResourceTarget with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in +// SecurityInsightTrait_ExternalResourceTargetMultiError, or nil if none found. +func (m *SecurityInsightTrait_ExternalResourceTarget) ValidateAll() error { + return m.validate(true) +} + +func (m *SecurityInsightTrait_ExternalResourceTarget) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if l := len(m.GetExternalId()); l < 1 || l > 4096 { + err := SecurityInsightTrait_ExternalResourceTargetValidationError{ + field: "ExternalId", + reason: "value length must be between 1 and 4096 bytes, inclusive", + } + if !all { + return err + } + errors = append(errors, err) + } + + if m.GetAppHint() != "" { + + if len(m.GetAppHint()) > 1024 { + err := SecurityInsightTrait_ExternalResourceTargetValidationError{ + field: "AppHint", + reason: "value length must be at most 1024 bytes", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if len(errors) > 0 { + return SecurityInsightTrait_ExternalResourceTargetMultiError(errors) + } + + return nil +} + +// SecurityInsightTrait_ExternalResourceTargetMultiError is an error wrapping +// multiple validation errors returned by +// SecurityInsightTrait_ExternalResourceTarget.ValidateAll() if the designated +// constraints aren't met. +type SecurityInsightTrait_ExternalResourceTargetMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m SecurityInsightTrait_ExternalResourceTargetMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m SecurityInsightTrait_ExternalResourceTargetMultiError) AllErrors() []error { return m } + +// SecurityInsightTrait_ExternalResourceTargetValidationError is the validation +// error returned by SecurityInsightTrait_ExternalResourceTarget.Validate if +// the designated constraints aren't met. +type SecurityInsightTrait_ExternalResourceTargetValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e SecurityInsightTrait_ExternalResourceTargetValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e SecurityInsightTrait_ExternalResourceTargetValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e SecurityInsightTrait_ExternalResourceTargetValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e SecurityInsightTrait_ExternalResourceTargetValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e SecurityInsightTrait_ExternalResourceTargetValidationError) ErrorName() string { + return "SecurityInsightTrait_ExternalResourceTargetValidationError" +} + +// Error satisfies the builtin error interface +func (e SecurityInsightTrait_ExternalResourceTargetValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sSecurityInsightTrait_ExternalResourceTarget.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = SecurityInsightTrait_ExternalResourceTargetValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = SecurityInsightTrait_ExternalResourceTargetValidationError{} diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_security_insight_protoopaque.pb.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_security_insight_protoopaque.pb.go new file mode 100644 index 00000000..184346c1 --- /dev/null +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_security_insight_protoopaque.pb.go @@ -0,0 +1,516 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.10 +// protoc (unknown) +// source: c1/connector/v2/annotation_security_insight.proto + +//go:build protoopaque + +package v2 + +import ( + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// SecurityInsightTrait is the trait annotation for resources with TRAIT_SECURITY_INSIGHT. +// It contains the metadata for the security insight including type, value, observation time, +// and the target entity (user or resource) that this insight should be bound to. +type SecurityInsightTrait struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_InsightType string `protobuf:"bytes,1,opt,name=insight_type,json=insightType,proto3"` + xxx_hidden_Value string `protobuf:"bytes,2,opt,name=value,proto3"` + xxx_hidden_ObservedAt *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=observed_at,json=observedAt,proto3"` + xxx_hidden_Target isSecurityInsightTrait_Target `protobuf_oneof:"target"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SecurityInsightTrait) Reset() { + *x = SecurityInsightTrait{} + mi := &file_c1_connector_v2_annotation_security_insight_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SecurityInsightTrait) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SecurityInsightTrait) ProtoMessage() {} + +func (x *SecurityInsightTrait) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_annotation_security_insight_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *SecurityInsightTrait) GetInsightType() string { + if x != nil { + return x.xxx_hidden_InsightType + } + return "" +} + +func (x *SecurityInsightTrait) GetValue() string { + if x != nil { + return x.xxx_hidden_Value + } + return "" +} + +func (x *SecurityInsightTrait) GetObservedAt() *timestamppb.Timestamp { + if x != nil { + return x.xxx_hidden_ObservedAt + } + return nil +} + +func (x *SecurityInsightTrait) GetUser() *SecurityInsightTrait_UserTarget { + if x != nil { + if x, ok := x.xxx_hidden_Target.(*securityInsightTrait_User); ok { + return x.User + } + } + return nil +} + +func (x *SecurityInsightTrait) GetResourceId() *ResourceId { + if x != nil { + if x, ok := x.xxx_hidden_Target.(*securityInsightTrait_ResourceId); ok { + return x.ResourceId + } + } + return nil +} + +func (x *SecurityInsightTrait) GetExternalResource() *SecurityInsightTrait_ExternalResourceTarget { + if x != nil { + if x, ok := x.xxx_hidden_Target.(*securityInsightTrait_ExternalResource); ok { + return x.ExternalResource + } + } + return nil +} + +func (x *SecurityInsightTrait) SetInsightType(v string) { + x.xxx_hidden_InsightType = v +} + +func (x *SecurityInsightTrait) SetValue(v string) { + x.xxx_hidden_Value = v +} + +func (x *SecurityInsightTrait) SetObservedAt(v *timestamppb.Timestamp) { + x.xxx_hidden_ObservedAt = v +} + +func (x *SecurityInsightTrait) SetUser(v *SecurityInsightTrait_UserTarget) { + if v == nil { + x.xxx_hidden_Target = nil + return + } + x.xxx_hidden_Target = &securityInsightTrait_User{v} +} + +func (x *SecurityInsightTrait) SetResourceId(v *ResourceId) { + if v == nil { + x.xxx_hidden_Target = nil + return + } + x.xxx_hidden_Target = &securityInsightTrait_ResourceId{v} +} + +func (x *SecurityInsightTrait) SetExternalResource(v *SecurityInsightTrait_ExternalResourceTarget) { + if v == nil { + x.xxx_hidden_Target = nil + return + } + x.xxx_hidden_Target = &securityInsightTrait_ExternalResource{v} +} + +func (x *SecurityInsightTrait) HasObservedAt() bool { + if x == nil { + return false + } + return x.xxx_hidden_ObservedAt != nil +} + +func (x *SecurityInsightTrait) HasTarget() bool { + if x == nil { + return false + } + return x.xxx_hidden_Target != nil +} + +func (x *SecurityInsightTrait) HasUser() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_Target.(*securityInsightTrait_User) + return ok +} + +func (x *SecurityInsightTrait) HasResourceId() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_Target.(*securityInsightTrait_ResourceId) + return ok +} + +func (x *SecurityInsightTrait) HasExternalResource() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_Target.(*securityInsightTrait_ExternalResource) + return ok +} + +func (x *SecurityInsightTrait) ClearObservedAt() { + x.xxx_hidden_ObservedAt = nil +} + +func (x *SecurityInsightTrait) ClearTarget() { + x.xxx_hidden_Target = nil +} + +func (x *SecurityInsightTrait) ClearUser() { + if _, ok := x.xxx_hidden_Target.(*securityInsightTrait_User); ok { + x.xxx_hidden_Target = nil + } +} + +func (x *SecurityInsightTrait) ClearResourceId() { + if _, ok := x.xxx_hidden_Target.(*securityInsightTrait_ResourceId); ok { + x.xxx_hidden_Target = nil + } +} + +func (x *SecurityInsightTrait) ClearExternalResource() { + if _, ok := x.xxx_hidden_Target.(*securityInsightTrait_ExternalResource); ok { + x.xxx_hidden_Target = nil + } +} + +const SecurityInsightTrait_Target_not_set_case case_SecurityInsightTrait_Target = 0 +const SecurityInsightTrait_User_case case_SecurityInsightTrait_Target = 4 +const SecurityInsightTrait_ResourceId_case case_SecurityInsightTrait_Target = 5 +const SecurityInsightTrait_ExternalResource_case case_SecurityInsightTrait_Target = 6 + +func (x *SecurityInsightTrait) WhichTarget() case_SecurityInsightTrait_Target { + if x == nil { + return SecurityInsightTrait_Target_not_set_case + } + switch x.xxx_hidden_Target.(type) { + case *securityInsightTrait_User: + return SecurityInsightTrait_User_case + case *securityInsightTrait_ResourceId: + return SecurityInsightTrait_ResourceId_case + case *securityInsightTrait_ExternalResource: + return SecurityInsightTrait_ExternalResource_case + default: + return SecurityInsightTrait_Target_not_set_case + } +} + +type SecurityInsightTrait_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // The type of insight (e.g., "crowdstrike_zta_score", "wiz_critical_vulnerability") + InsightType string + // The value of the insight (e.g., "85", "High", "Critical") + Value string + // When this insight was observed/captured from the source system + ObservedAt *timestamppb.Timestamp + // The target entity this insight should be bound to + + // Fields of oneof xxx_hidden_Target: + // For binding to a C1 User by email address + User *SecurityInsightTrait_UserTarget + // For direct reference to a resource the connector knows about + ResourceId *ResourceId + // For binding to an AppResource by external ID + ExternalResource *SecurityInsightTrait_ExternalResourceTarget + // -- end of xxx_hidden_Target +} + +func (b0 SecurityInsightTrait_builder) Build() *SecurityInsightTrait { + m0 := &SecurityInsightTrait{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_InsightType = b.InsightType + x.xxx_hidden_Value = b.Value + x.xxx_hidden_ObservedAt = b.ObservedAt + if b.User != nil { + x.xxx_hidden_Target = &securityInsightTrait_User{b.User} + } + if b.ResourceId != nil { + x.xxx_hidden_Target = &securityInsightTrait_ResourceId{b.ResourceId} + } + if b.ExternalResource != nil { + x.xxx_hidden_Target = &securityInsightTrait_ExternalResource{b.ExternalResource} + } + return m0 +} + +type case_SecurityInsightTrait_Target protoreflect.FieldNumber + +func (x case_SecurityInsightTrait_Target) String() string { + md := file_c1_connector_v2_annotation_security_insight_proto_msgTypes[0].Descriptor() + if x == 0 { + return "not set" + } + return protoimpl.X.MessageFieldStringOf(md, protoreflect.FieldNumber(x)) +} + +type isSecurityInsightTrait_Target interface { + isSecurityInsightTrait_Target() +} + +type securityInsightTrait_User struct { + // For binding to a C1 User by email address + User *SecurityInsightTrait_UserTarget `protobuf:"bytes,4,opt,name=user,proto3,oneof"` +} + +type securityInsightTrait_ResourceId struct { + // For direct reference to a resource the connector knows about + ResourceId *ResourceId `protobuf:"bytes,5,opt,name=resource_id,json=resourceId,proto3,oneof"` +} + +type securityInsightTrait_ExternalResource struct { + // For binding to an AppResource by external ID + ExternalResource *SecurityInsightTrait_ExternalResourceTarget `protobuf:"bytes,6,opt,name=external_resource,json=externalResource,proto3,oneof"` +} + +func (*securityInsightTrait_User) isSecurityInsightTrait_Target() {} + +func (*securityInsightTrait_ResourceId) isSecurityInsightTrait_Target() {} + +func (*securityInsightTrait_ExternalResource) isSecurityInsightTrait_Target() {} + +// UserTarget identifies a user by email for resolution to a C1 User +type SecurityInsightTrait_UserTarget struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Email string `protobuf:"bytes,1,opt,name=email,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SecurityInsightTrait_UserTarget) Reset() { + *x = SecurityInsightTrait_UserTarget{} + mi := &file_c1_connector_v2_annotation_security_insight_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SecurityInsightTrait_UserTarget) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SecurityInsightTrait_UserTarget) ProtoMessage() {} + +func (x *SecurityInsightTrait_UserTarget) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_annotation_security_insight_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *SecurityInsightTrait_UserTarget) GetEmail() string { + if x != nil { + return x.xxx_hidden_Email + } + return "" +} + +func (x *SecurityInsightTrait_UserTarget) SetEmail(v string) { + x.xxx_hidden_Email = v +} + +type SecurityInsightTrait_UserTarget_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Email string +} + +func (b0 SecurityInsightTrait_UserTarget_builder) Build() *SecurityInsightTrait_UserTarget { + m0 := &SecurityInsightTrait_UserTarget{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Email = b.Email + return m0 +} + +// ExternalResourceTarget identifies a resource by external ID for resolution to an AppResource. +// Use this when the connector doesn't sync the target resource itself. +type SecurityInsightTrait_ExternalResourceTarget struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_ExternalId string `protobuf:"bytes,1,opt,name=external_id,json=externalId,proto3"` + xxx_hidden_AppHint string `protobuf:"bytes,2,opt,name=app_hint,json=appHint,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SecurityInsightTrait_ExternalResourceTarget) Reset() { + *x = SecurityInsightTrait_ExternalResourceTarget{} + mi := &file_c1_connector_v2_annotation_security_insight_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SecurityInsightTrait_ExternalResourceTarget) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SecurityInsightTrait_ExternalResourceTarget) ProtoMessage() {} + +func (x *SecurityInsightTrait_ExternalResourceTarget) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_annotation_security_insight_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *SecurityInsightTrait_ExternalResourceTarget) GetExternalId() string { + if x != nil { + return x.xxx_hidden_ExternalId + } + return "" +} + +func (x *SecurityInsightTrait_ExternalResourceTarget) GetAppHint() string { + if x != nil { + return x.xxx_hidden_AppHint + } + return "" +} + +func (x *SecurityInsightTrait_ExternalResourceTarget) SetExternalId(v string) { + x.xxx_hidden_ExternalId = v +} + +func (x *SecurityInsightTrait_ExternalResourceTarget) SetAppHint(v string) { + x.xxx_hidden_AppHint = v +} + +type SecurityInsightTrait_ExternalResourceTarget_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // The external identifier of the resource (e.g., ARN, GUID, etc.) + ExternalId string + // Optional hint to help find the owning app (e.g., "aws", "github") + AppHint string +} + +func (b0 SecurityInsightTrait_ExternalResourceTarget_builder) Build() *SecurityInsightTrait_ExternalResourceTarget { + m0 := &SecurityInsightTrait_ExternalResourceTarget{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_ExternalId = b.ExternalId + x.xxx_hidden_AppHint = b.AppHint + return m0 +} + +var File_c1_connector_v2_annotation_security_insight_proto protoreflect.FileDescriptor + +const file_c1_connector_v2_annotation_security_insight_proto_rawDesc = "" + + "\n" + + "1c1/connector/v2/annotation_security_insight.proto\x12\x0fc1.connector.v2\x1a\x1ec1/connector/v2/resource.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x17validate/validate.proto\"\xc9\x04\n" + + "\x14SecurityInsightTrait\x12-\n" + + "\finsight_type\x18\x01 \x01(\tB\n" + + "\xfaB\ar\x05 \x01(\x80\bR\vinsightType\x12 \n" + + "\x05value\x18\x02 \x01(\tB\n" + + "\xfaB\ar\x05 \x01(\x80\bR\x05value\x12;\n" + + "\vobserved_at\x18\x03 \x01(\v2\x1a.google.protobuf.TimestampR\n" + + "observedAt\x12F\n" + + "\x04user\x18\x04 \x01(\v20.c1.connector.v2.SecurityInsightTrait.UserTargetH\x00R\x04user\x12>\n" + + "\vresource_id\x18\x05 \x01(\v2\x1b.c1.connector.v2.ResourceIdH\x00R\n" + + "resourceId\x12k\n" + + "\x11external_resource\x18\x06 \x01(\v2<.c1.connector.v2.SecurityInsightTrait.ExternalResourceTargetH\x00R\x10externalResource\x1a0\n" + + "\n" + + "UserTarget\x12\"\n" + + "\x05email\x18\x01 \x01(\tB\f\xfaB\tr\a \x01(\x80\b`\x01R\x05email\x1am\n" + + "\x16ExternalResourceTarget\x12+\n" + + "\vexternal_id\x18\x01 \x01(\tB\n" + + "\xfaB\ar\x05 \x01(\x80 R\n" + + "externalId\x12&\n" + + "\bapp_hint\x18\x02 \x01(\tB\v\xfaB\br\x06(\x80\b\xd0\x01\x01R\aappHintB\r\n" + + "\x06target\x12\x03\xf8B\x01B6Z4github.com/conductorone/baton-sdk/pb/c1/connector/v2b\x06proto3" + +var file_c1_connector_v2_annotation_security_insight_proto_msgTypes = make([]protoimpl.MessageInfo, 3) +var file_c1_connector_v2_annotation_security_insight_proto_goTypes = []any{ + (*SecurityInsightTrait)(nil), // 0: c1.connector.v2.SecurityInsightTrait + (*SecurityInsightTrait_UserTarget)(nil), // 1: c1.connector.v2.SecurityInsightTrait.UserTarget + (*SecurityInsightTrait_ExternalResourceTarget)(nil), // 2: c1.connector.v2.SecurityInsightTrait.ExternalResourceTarget + (*timestamppb.Timestamp)(nil), // 3: google.protobuf.Timestamp + (*ResourceId)(nil), // 4: c1.connector.v2.ResourceId +} +var file_c1_connector_v2_annotation_security_insight_proto_depIdxs = []int32{ + 3, // 0: c1.connector.v2.SecurityInsightTrait.observed_at:type_name -> google.protobuf.Timestamp + 1, // 1: c1.connector.v2.SecurityInsightTrait.user:type_name -> c1.connector.v2.SecurityInsightTrait.UserTarget + 4, // 2: c1.connector.v2.SecurityInsightTrait.resource_id:type_name -> c1.connector.v2.ResourceId + 2, // 3: c1.connector.v2.SecurityInsightTrait.external_resource:type_name -> c1.connector.v2.SecurityInsightTrait.ExternalResourceTarget + 4, // [4:4] is the sub-list for method output_type + 4, // [4:4] is the sub-list for method input_type + 4, // [4:4] is the sub-list for extension type_name + 4, // [4:4] is the sub-list for extension extendee + 0, // [0:4] is the sub-list for field type_name +} + +func init() { file_c1_connector_v2_annotation_security_insight_proto_init() } +func file_c1_connector_v2_annotation_security_insight_proto_init() { + if File_c1_connector_v2_annotation_security_insight_proto != nil { + return + } + file_c1_connector_v2_resource_proto_init() + file_c1_connector_v2_annotation_security_insight_proto_msgTypes[0].OneofWrappers = []any{ + (*securityInsightTrait_User)(nil), + (*securityInsightTrait_ResourceId)(nil), + (*securityInsightTrait_ExternalResource)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_c1_connector_v2_annotation_security_insight_proto_rawDesc), len(file_c1_connector_v2_annotation_security_insight_proto_rawDesc)), + NumEnums: 0, + NumMessages: 3, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_c1_connector_v2_annotation_security_insight_proto_goTypes, + DependencyIndexes: file_c1_connector_v2_annotation_security_insight_proto_depIdxs, + MessageInfos: file_c1_connector_v2_annotation_security_insight_proto_msgTypes, + }.Build() + File_c1_connector_v2_annotation_security_insight_proto = out.File + file_c1_connector_v2_annotation_security_insight_proto_goTypes = nil + file_c1_connector_v2_annotation_security_insight_proto_depIdxs = nil +} diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_sync_id.pb.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_sync_id.pb.go deleted file mode 100644 index 05159caa..00000000 --- a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_sync_id.pb.go +++ /dev/null @@ -1,130 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.36.4 -// protoc (unknown) -// source: c1/connector/v2/annotation_sync_id.proto - -package v2 - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" - unsafe "unsafe" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// ActiveSync is an annotation is used by the session cache. -type ActiveSync struct { - state protoimpl.MessageState `protogen:"open.v1"` - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *ActiveSync) Reset() { - *x = ActiveSync{} - mi := &file_c1_connector_v2_annotation_sync_id_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *ActiveSync) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ActiveSync) ProtoMessage() {} - -func (x *ActiveSync) ProtoReflect() protoreflect.Message { - mi := &file_c1_connector_v2_annotation_sync_id_proto_msgTypes[0] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ActiveSync.ProtoReflect.Descriptor instead. -func (*ActiveSync) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_annotation_sync_id_proto_rawDescGZIP(), []int{0} -} - -func (x *ActiveSync) GetId() string { - if x != nil { - return x.Id - } - return "" -} - -var File_c1_connector_v2_annotation_sync_id_proto protoreflect.FileDescriptor - -var file_c1_connector_v2_annotation_sync_id_proto_rawDesc = string([]byte{ - 0x0a, 0x28, 0x63, 0x31, 0x2f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f, 0x76, - 0x32, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x79, 0x6e, - 0x63, 0x5f, 0x69, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x63, 0x31, 0x2e, 0x63, - 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x22, 0x1c, 0x0a, 0x0a, 0x41, - 0x63, 0x74, 0x69, 0x76, 0x65, 0x53, 0x79, 0x6e, 0x63, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x42, 0x36, 0x5a, 0x34, 0x67, 0x69, 0x74, - 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x64, 0x75, 0x63, 0x74, 0x6f, - 0x72, 0x6f, 0x6e, 0x65, 0x2f, 0x62, 0x61, 0x74, 0x6f, 0x6e, 0x2d, 0x73, 0x64, 0x6b, 0x2f, 0x70, - 0x62, 0x2f, 0x63, 0x31, 0x2f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f, 0x76, - 0x32, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -}) - -var ( - file_c1_connector_v2_annotation_sync_id_proto_rawDescOnce sync.Once - file_c1_connector_v2_annotation_sync_id_proto_rawDescData []byte -) - -func file_c1_connector_v2_annotation_sync_id_proto_rawDescGZIP() []byte { - file_c1_connector_v2_annotation_sync_id_proto_rawDescOnce.Do(func() { - file_c1_connector_v2_annotation_sync_id_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_c1_connector_v2_annotation_sync_id_proto_rawDesc), len(file_c1_connector_v2_annotation_sync_id_proto_rawDesc))) - }) - return file_c1_connector_v2_annotation_sync_id_proto_rawDescData -} - -var file_c1_connector_v2_annotation_sync_id_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_c1_connector_v2_annotation_sync_id_proto_goTypes = []any{ - (*ActiveSync)(nil), // 0: c1.connector.v2.ActiveSync -} -var file_c1_connector_v2_annotation_sync_id_proto_depIdxs = []int32{ - 0, // [0:0] is the sub-list for method output_type - 0, // [0:0] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_c1_connector_v2_annotation_sync_id_proto_init() } -func file_c1_connector_v2_annotation_sync_id_proto_init() { - if File_c1_connector_v2_annotation_sync_id_proto != nil { - return - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: unsafe.Slice(unsafe.StringData(file_c1_connector_v2_annotation_sync_id_proto_rawDesc), len(file_c1_connector_v2_annotation_sync_id_proto_rawDesc)), - NumEnums: 0, - NumMessages: 1, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_c1_connector_v2_annotation_sync_id_proto_goTypes, - DependencyIndexes: file_c1_connector_v2_annotation_sync_id_proto_depIdxs, - MessageInfos: file_c1_connector_v2_annotation_sync_id_proto_msgTypes, - }.Build() - File_c1_connector_v2_annotation_sync_id_proto = out.File - file_c1_connector_v2_annotation_sync_id_proto_goTypes = nil - file_c1_connector_v2_annotation_sync_id_proto_depIdxs = nil -} diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_sync_id.pb.validate.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_sync_id.pb.validate.go deleted file mode 100644 index 66613f9e..00000000 --- a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_sync_id.pb.validate.go +++ /dev/null @@ -1,137 +0,0 @@ -// Code generated by protoc-gen-validate. DO NOT EDIT. -// source: c1/connector/v2/annotation_sync_id.proto - -package v2 - -import ( - "bytes" - "errors" - "fmt" - "net" - "net/mail" - "net/url" - "regexp" - "sort" - "strings" - "time" - "unicode/utf8" - - "google.golang.org/protobuf/types/known/anypb" -) - -// ensure the imports are used -var ( - _ = bytes.MinRead - _ = errors.New("") - _ = fmt.Print - _ = utf8.UTFMax - _ = (*regexp.Regexp)(nil) - _ = (*strings.Reader)(nil) - _ = net.IPv4len - _ = time.Duration(0) - _ = (*url.URL)(nil) - _ = (*mail.Address)(nil) - _ = anypb.Any{} - _ = sort.Sort -) - -// Validate checks the field values on ActiveSync with the rules defined in the -// proto definition for this message. If any rules are violated, the first -// error encountered is returned, or nil if there are no violations. -func (m *ActiveSync) Validate() error { - return m.validate(false) -} - -// ValidateAll checks the field values on ActiveSync with the rules defined in -// the proto definition for this message. If any rules are violated, the -// result is a list of violation errors wrapped in ActiveSyncMultiError, or -// nil if none found. -func (m *ActiveSync) ValidateAll() error { - return m.validate(true) -} - -func (m *ActiveSync) validate(all bool) error { - if m == nil { - return nil - } - - var errors []error - - // no validation rules for Id - - if len(errors) > 0 { - return ActiveSyncMultiError(errors) - } - - return nil -} - -// ActiveSyncMultiError is an error wrapping multiple validation errors -// returned by ActiveSync.ValidateAll() if the designated constraints aren't met. -type ActiveSyncMultiError []error - -// Error returns a concatenation of all the error messages it wraps. -func (m ActiveSyncMultiError) Error() string { - msgs := make([]string, 0, len(m)) - for _, err := range m { - msgs = append(msgs, err.Error()) - } - return strings.Join(msgs, "; ") -} - -// AllErrors returns a list of validation violation errors. -func (m ActiveSyncMultiError) AllErrors() []error { return m } - -// ActiveSyncValidationError is the validation error returned by -// ActiveSync.Validate if the designated constraints aren't met. -type ActiveSyncValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e ActiveSyncValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e ActiveSyncValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e ActiveSyncValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e ActiveSyncValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e ActiveSyncValidationError) ErrorName() string { return "ActiveSyncValidationError" } - -// Error satisfies the builtin error interface -func (e ActiveSyncValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sActiveSync.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = ActiveSyncValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = ActiveSyncValidationError{} diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_trait.pb.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_trait.pb.go index 628193c9..7b79389a 100644 --- a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_trait.pb.go +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_trait.pb.go @@ -1,9 +1,11 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.4 +// protoc-gen-go v1.36.10 // protoc (unknown) // source: c1/connector/v2/annotation_trait.proto +//go:build !protoopaque + package v2 import ( @@ -13,7 +15,6 @@ import ( structpb "google.golang.org/protobuf/types/known/structpb" timestamppb "google.golang.org/protobuf/types/known/timestamppb" reflect "reflect" - sync "sync" unsafe "unsafe" ) @@ -71,11 +72,6 @@ func (x UserTrait_AccountType) Number() protoreflect.EnumNumber { return protoreflect.EnumNumber(x) } -// Deprecated: Use UserTrait_AccountType.Descriptor instead. -func (UserTrait_AccountType) EnumDescriptor() ([]byte, []int) { - return file_c1_connector_v2_annotation_trait_proto_rawDescGZIP(), []int{0, 0} -} - type UserTrait_Status_Status int32 const ( @@ -123,11 +119,6 @@ func (x UserTrait_Status_Status) Number() protoreflect.EnumNumber { return protoreflect.EnumNumber(x) } -// Deprecated: Use UserTrait_Status_Status.Descriptor instead. -func (UserTrait_Status_Status) EnumDescriptor() ([]byte, []int) { - return file_c1_connector_v2_annotation_trait_proto_rawDescGZIP(), []int{0, 1, 0} -} - type AppTrait_AppFlag int32 const ( @@ -181,13 +172,8 @@ func (x AppTrait_AppFlag) Number() protoreflect.EnumNumber { return protoreflect.EnumNumber(x) } -// Deprecated: Use AppTrait_AppFlag.Descriptor instead. -func (AppTrait_AppFlag) EnumDescriptor() ([]byte, []int) { - return file_c1_connector_v2_annotation_trait_proto_rawDescGZIP(), []int{3, 0} -} - type UserTrait struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Emails []*UserTrait_Email `protobuf:"bytes,1,rep,name=emails,proto3" json:"emails,omitempty"` Status *UserTrait_Status `protobuf:"bytes,2,opt,name=status,proto3" json:"status,omitempty"` Profile *structpb.Struct `protobuf:"bytes,3,opt,name=profile,proto3" json:"profile,omitempty"` @@ -232,11 +218,6 @@ func (x *UserTrait) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use UserTrait.ProtoReflect.Descriptor instead. -func (*UserTrait) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_annotation_trait_proto_rawDescGZIP(), []int{0} -} - func (x *UserTrait) GetEmails() []*UserTrait_Email { if x != nil { return x.Emails @@ -328,8 +309,188 @@ func (x *UserTrait) GetStructuredName() *UserTrait_StructuredName { return nil } +func (x *UserTrait) SetEmails(v []*UserTrait_Email) { + x.Emails = v +} + +func (x *UserTrait) SetStatus(v *UserTrait_Status) { + x.Status = v +} + +func (x *UserTrait) SetProfile(v *structpb.Struct) { + x.Profile = v +} + +func (x *UserTrait) SetIcon(v *AssetRef) { + x.Icon = v +} + +func (x *UserTrait) SetAccountType(v UserTrait_AccountType) { + x.AccountType = v +} + +func (x *UserTrait) SetLogin(v string) { + x.Login = v +} + +func (x *UserTrait) SetLoginAliases(v []string) { + x.LoginAliases = v +} + +func (x *UserTrait) SetEmployeeIds(v []string) { + x.EmployeeIds = v +} + +func (x *UserTrait) SetCreatedAt(v *timestamppb.Timestamp) { + x.CreatedAt = v +} + +func (x *UserTrait) SetLastLogin(v *timestamppb.Timestamp) { + x.LastLogin = v +} + +func (x *UserTrait) SetMfaStatus(v *UserTrait_MFAStatus) { + x.MfaStatus = v +} + +func (x *UserTrait) SetSsoStatus(v *UserTrait_SSOStatus) { + x.SsoStatus = v +} + +func (x *UserTrait) SetStructuredName(v *UserTrait_StructuredName) { + x.StructuredName = v +} + +func (x *UserTrait) HasStatus() bool { + if x == nil { + return false + } + return x.Status != nil +} + +func (x *UserTrait) HasProfile() bool { + if x == nil { + return false + } + return x.Profile != nil +} + +func (x *UserTrait) HasIcon() bool { + if x == nil { + return false + } + return x.Icon != nil +} + +func (x *UserTrait) HasCreatedAt() bool { + if x == nil { + return false + } + return x.CreatedAt != nil +} + +func (x *UserTrait) HasLastLogin() bool { + if x == nil { + return false + } + return x.LastLogin != nil +} + +func (x *UserTrait) HasMfaStatus() bool { + if x == nil { + return false + } + return x.MfaStatus != nil +} + +func (x *UserTrait) HasSsoStatus() bool { + if x == nil { + return false + } + return x.SsoStatus != nil +} + +func (x *UserTrait) HasStructuredName() bool { + if x == nil { + return false + } + return x.StructuredName != nil +} + +func (x *UserTrait) ClearStatus() { + x.Status = nil +} + +func (x *UserTrait) ClearProfile() { + x.Profile = nil +} + +func (x *UserTrait) ClearIcon() { + x.Icon = nil +} + +func (x *UserTrait) ClearCreatedAt() { + x.CreatedAt = nil +} + +func (x *UserTrait) ClearLastLogin() { + x.LastLogin = nil +} + +func (x *UserTrait) ClearMfaStatus() { + x.MfaStatus = nil +} + +func (x *UserTrait) ClearSsoStatus() { + x.SsoStatus = nil +} + +func (x *UserTrait) ClearStructuredName() { + x.StructuredName = nil +} + +type UserTrait_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Emails []*UserTrait_Email + Status *UserTrait_Status + Profile *structpb.Struct + Icon *AssetRef + AccountType UserTrait_AccountType + // The user's login + Login string + // Any additional login aliases for the user + LoginAliases []string + EmployeeIds []string + CreatedAt *timestamppb.Timestamp + LastLogin *timestamppb.Timestamp + MfaStatus *UserTrait_MFAStatus + SsoStatus *UserTrait_SSOStatus + StructuredName *UserTrait_StructuredName +} + +func (b0 UserTrait_builder) Build() *UserTrait { + m0 := &UserTrait{} + b, x := &b0, m0 + _, _ = b, x + x.Emails = b.Emails + x.Status = b.Status + x.Profile = b.Profile + x.Icon = b.Icon + x.AccountType = b.AccountType + x.Login = b.Login + x.LoginAliases = b.LoginAliases + x.EmployeeIds = b.EmployeeIds + x.CreatedAt = b.CreatedAt + x.LastLogin = b.LastLogin + x.MfaStatus = b.MfaStatus + x.SsoStatus = b.SsoStatus + x.StructuredName = b.StructuredName + return m0 +} + type GroupTrait struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Icon *AssetRef `protobuf:"bytes,1,opt,name=icon,proto3" json:"icon,omitempty"` Profile *structpb.Struct `protobuf:"bytes,2,opt,name=profile,proto3" json:"profile,omitempty"` unknownFields protoimpl.UnknownFields @@ -361,11 +522,6 @@ func (x *GroupTrait) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GroupTrait.ProtoReflect.Descriptor instead. -func (*GroupTrait) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_annotation_trait_proto_rawDescGZIP(), []int{1} -} - func (x *GroupTrait) GetIcon() *AssetRef { if x != nil { return x.Icon @@ -380,8 +536,54 @@ func (x *GroupTrait) GetProfile() *structpb.Struct { return nil } +func (x *GroupTrait) SetIcon(v *AssetRef) { + x.Icon = v +} + +func (x *GroupTrait) SetProfile(v *structpb.Struct) { + x.Profile = v +} + +func (x *GroupTrait) HasIcon() bool { + if x == nil { + return false + } + return x.Icon != nil +} + +func (x *GroupTrait) HasProfile() bool { + if x == nil { + return false + } + return x.Profile != nil +} + +func (x *GroupTrait) ClearIcon() { + x.Icon = nil +} + +func (x *GroupTrait) ClearProfile() { + x.Profile = nil +} + +type GroupTrait_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Icon *AssetRef + Profile *structpb.Struct +} + +func (b0 GroupTrait_builder) Build() *GroupTrait { + m0 := &GroupTrait{} + b, x := &b0, m0 + _, _ = b, x + x.Icon = b.Icon + x.Profile = b.Profile + return m0 +} + type RoleTrait struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Profile *structpb.Struct `protobuf:"bytes,1,opt,name=profile,proto3" json:"profile,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache @@ -412,11 +614,6 @@ func (x *RoleTrait) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use RoleTrait.ProtoReflect.Descriptor instead. -func (*RoleTrait) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_annotation_trait_proto_rawDescGZIP(), []int{2} -} - func (x *RoleTrait) GetProfile() *structpb.Struct { if x != nil { return x.Profile @@ -424,8 +621,37 @@ func (x *RoleTrait) GetProfile() *structpb.Struct { return nil } +func (x *RoleTrait) SetProfile(v *structpb.Struct) { + x.Profile = v +} + +func (x *RoleTrait) HasProfile() bool { + if x == nil { + return false + } + return x.Profile != nil +} + +func (x *RoleTrait) ClearProfile() { + x.Profile = nil +} + +type RoleTrait_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Profile *structpb.Struct +} + +func (b0 RoleTrait_builder) Build() *RoleTrait { + m0 := &RoleTrait{} + b, x := &b0, m0 + _, _ = b, x + x.Profile = b.Profile + return m0 +} + type AppTrait struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` HelpUrl string `protobuf:"bytes,1,opt,name=help_url,json=helpUrl,proto3" json:"help_url,omitempty"` Icon *AssetRef `protobuf:"bytes,2,opt,name=icon,proto3" json:"icon,omitempty"` Logo *AssetRef `protobuf:"bytes,3,opt,name=logo,proto3" json:"logo,omitempty"` @@ -460,11 +686,6 @@ func (x *AppTrait) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use AppTrait.ProtoReflect.Descriptor instead. -func (*AppTrait) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_annotation_trait_proto_rawDescGZIP(), []int{3} -} - func (x *AppTrait) GetHelpUrl() string { if x != nil { return x.HelpUrl @@ -500,8 +721,83 @@ func (x *AppTrait) GetFlags() []AppTrait_AppFlag { return nil } +func (x *AppTrait) SetHelpUrl(v string) { + x.HelpUrl = v +} + +func (x *AppTrait) SetIcon(v *AssetRef) { + x.Icon = v +} + +func (x *AppTrait) SetLogo(v *AssetRef) { + x.Logo = v +} + +func (x *AppTrait) SetProfile(v *structpb.Struct) { + x.Profile = v +} + +func (x *AppTrait) SetFlags(v []AppTrait_AppFlag) { + x.Flags = v +} + +func (x *AppTrait) HasIcon() bool { + if x == nil { + return false + } + return x.Icon != nil +} + +func (x *AppTrait) HasLogo() bool { + if x == nil { + return false + } + return x.Logo != nil +} + +func (x *AppTrait) HasProfile() bool { + if x == nil { + return false + } + return x.Profile != nil +} + +func (x *AppTrait) ClearIcon() { + x.Icon = nil +} + +func (x *AppTrait) ClearLogo() { + x.Logo = nil +} + +func (x *AppTrait) ClearProfile() { + x.Profile = nil +} + +type AppTrait_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + HelpUrl string + Icon *AssetRef + Logo *AssetRef + Profile *structpb.Struct + Flags []AppTrait_AppFlag +} + +func (b0 AppTrait_builder) Build() *AppTrait { + m0 := &AppTrait{} + b, x := &b0, m0 + _, _ = b, x + x.HelpUrl = b.HelpUrl + x.Icon = b.Icon + x.Logo = b.Logo + x.Profile = b.Profile + x.Flags = b.Flags + return m0 +} + type SecretTrait struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Profile *structpb.Struct `protobuf:"bytes,1,opt,name=profile,proto3" json:"profile,omitempty"` CreatedAt *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` ExpiresAt *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=expires_at,json=expiresAt,proto3" json:"expires_at,omitempty"` @@ -537,11 +833,6 @@ func (x *SecretTrait) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use SecretTrait.ProtoReflect.Descriptor instead. -func (*SecretTrait) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_annotation_trait_proto_rawDescGZIP(), []int{4} -} - func (x *SecretTrait) GetProfile() *structpb.Struct { if x != nil { return x.Profile @@ -584,8 +875,122 @@ func (x *SecretTrait) GetIdentityId() *ResourceId { return nil } +func (x *SecretTrait) SetProfile(v *structpb.Struct) { + x.Profile = v +} + +func (x *SecretTrait) SetCreatedAt(v *timestamppb.Timestamp) { + x.CreatedAt = v +} + +func (x *SecretTrait) SetExpiresAt(v *timestamppb.Timestamp) { + x.ExpiresAt = v +} + +func (x *SecretTrait) SetLastUsedAt(v *timestamppb.Timestamp) { + x.LastUsedAt = v +} + +func (x *SecretTrait) SetCreatedById(v *ResourceId) { + x.CreatedById = v +} + +func (x *SecretTrait) SetIdentityId(v *ResourceId) { + x.IdentityId = v +} + +func (x *SecretTrait) HasProfile() bool { + if x == nil { + return false + } + return x.Profile != nil +} + +func (x *SecretTrait) HasCreatedAt() bool { + if x == nil { + return false + } + return x.CreatedAt != nil +} + +func (x *SecretTrait) HasExpiresAt() bool { + if x == nil { + return false + } + return x.ExpiresAt != nil +} + +func (x *SecretTrait) HasLastUsedAt() bool { + if x == nil { + return false + } + return x.LastUsedAt != nil +} + +func (x *SecretTrait) HasCreatedById() bool { + if x == nil { + return false + } + return x.CreatedById != nil +} + +func (x *SecretTrait) HasIdentityId() bool { + if x == nil { + return false + } + return x.IdentityId != nil +} + +func (x *SecretTrait) ClearProfile() { + x.Profile = nil +} + +func (x *SecretTrait) ClearCreatedAt() { + x.CreatedAt = nil +} + +func (x *SecretTrait) ClearExpiresAt() { + x.ExpiresAt = nil +} + +func (x *SecretTrait) ClearLastUsedAt() { + x.LastUsedAt = nil +} + +func (x *SecretTrait) ClearCreatedById() { + x.CreatedById = nil +} + +func (x *SecretTrait) ClearIdentityId() { + x.IdentityId = nil +} + +type SecretTrait_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Profile *structpb.Struct + CreatedAt *timestamppb.Timestamp + ExpiresAt *timestamppb.Timestamp + LastUsedAt *timestamppb.Timestamp + CreatedById *ResourceId + IdentityId *ResourceId +} + +func (b0 SecretTrait_builder) Build() *SecretTrait { + m0 := &SecretTrait{} + b, x := &b0, m0 + _, _ = b, x + x.Profile = b.Profile + x.CreatedAt = b.CreatedAt + x.ExpiresAt = b.ExpiresAt + x.LastUsedAt = b.LastUsedAt + x.CreatedById = b.CreatedById + x.IdentityId = b.IdentityId + return m0 +} + type UserTrait_Email struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Address string `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` // Indicates if this is the user's primary email. Only one entry can be marked as primary. IsPrimary bool `protobuf:"varint,2,opt,name=is_primary,json=isPrimary,proto3" json:"is_primary,omitempty"` @@ -618,11 +1023,6 @@ func (x *UserTrait_Email) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use UserTrait_Email.ProtoReflect.Descriptor instead. -func (*UserTrait_Email) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_annotation_trait_proto_rawDescGZIP(), []int{0, 0} -} - func (x *UserTrait_Email) GetAddress() string { if x != nil { return x.Address @@ -637,8 +1037,33 @@ func (x *UserTrait_Email) GetIsPrimary() bool { return false } +func (x *UserTrait_Email) SetAddress(v string) { + x.Address = v +} + +func (x *UserTrait_Email) SetIsPrimary(v bool) { + x.IsPrimary = v +} + +type UserTrait_Email_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Address string + // Indicates if this is the user's primary email. Only one entry can be marked as primary. + IsPrimary bool +} + +func (b0 UserTrait_Email_builder) Build() *UserTrait_Email { + m0 := &UserTrait_Email{} + b, x := &b0, m0 + _, _ = b, x + x.Address = b.Address + x.IsPrimary = b.IsPrimary + return m0 +} + type UserTrait_Status struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Status UserTrait_Status_Status `protobuf:"varint,1,opt,name=status,proto3,enum=c1.connector.v2.UserTrait_Status_Status" json:"status,omitempty"` Details string `protobuf:"bytes,2,opt,name=details,proto3" json:"details,omitempty"` unknownFields protoimpl.UnknownFields @@ -670,11 +1095,6 @@ func (x *UserTrait_Status) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use UserTrait_Status.ProtoReflect.Descriptor instead. -func (*UserTrait_Status) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_annotation_trait_proto_rawDescGZIP(), []int{0, 1} -} - func (x *UserTrait_Status) GetStatus() UserTrait_Status_Status { if x != nil { return x.Status @@ -689,8 +1109,32 @@ func (x *UserTrait_Status) GetDetails() string { return "" } +func (x *UserTrait_Status) SetStatus(v UserTrait_Status_Status) { + x.Status = v +} + +func (x *UserTrait_Status) SetDetails(v string) { + x.Details = v +} + +type UserTrait_Status_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Status UserTrait_Status_Status + Details string +} + +func (b0 UserTrait_Status_builder) Build() *UserTrait_Status { + m0 := &UserTrait_Status{} + b, x := &b0, m0 + _, _ = b, x + x.Status = b.Status + x.Details = b.Details + return m0 +} + type UserTrait_MFAStatus struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` MfaEnabled bool `protobuf:"varint,1,opt,name=mfa_enabled,json=mfaEnabled,proto3" json:"mfa_enabled,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache @@ -721,11 +1165,6 @@ func (x *UserTrait_MFAStatus) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use UserTrait_MFAStatus.ProtoReflect.Descriptor instead. -func (*UserTrait_MFAStatus) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_annotation_trait_proto_rawDescGZIP(), []int{0, 2} -} - func (x *UserTrait_MFAStatus) GetMfaEnabled() bool { if x != nil { return x.MfaEnabled @@ -733,8 +1172,26 @@ func (x *UserTrait_MFAStatus) GetMfaEnabled() bool { return false } +func (x *UserTrait_MFAStatus) SetMfaEnabled(v bool) { + x.MfaEnabled = v +} + +type UserTrait_MFAStatus_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + MfaEnabled bool +} + +func (b0 UserTrait_MFAStatus_builder) Build() *UserTrait_MFAStatus { + m0 := &UserTrait_MFAStatus{} + b, x := &b0, m0 + _, _ = b, x + x.MfaEnabled = b.MfaEnabled + return m0 +} + type UserTrait_SSOStatus struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` SsoEnabled bool `protobuf:"varint,1,opt,name=sso_enabled,json=ssoEnabled,proto3" json:"sso_enabled,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache @@ -765,11 +1222,6 @@ func (x *UserTrait_SSOStatus) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use UserTrait_SSOStatus.ProtoReflect.Descriptor instead. -func (*UserTrait_SSOStatus) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_annotation_trait_proto_rawDescGZIP(), []int{0, 3} -} - func (x *UserTrait_SSOStatus) GetSsoEnabled() bool { if x != nil { return x.SsoEnabled @@ -777,8 +1229,26 @@ func (x *UserTrait_SSOStatus) GetSsoEnabled() bool { return false } +func (x *UserTrait_SSOStatus) SetSsoEnabled(v bool) { + x.SsoEnabled = v +} + +type UserTrait_SSOStatus_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + SsoEnabled bool +} + +func (b0 UserTrait_SSOStatus_builder) Build() *UserTrait_SSOStatus { + m0 := &UserTrait_SSOStatus{} + b, x := &b0, m0 + _, _ = b, x + x.SsoEnabled = b.SsoEnabled + return m0 +} + type UserTrait_StructuredName struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` GivenName string `protobuf:"bytes,1,opt,name=given_name,json=givenName,proto3" json:"given_name,omitempty"` FamilyName string `protobuf:"bytes,2,opt,name=family_name,json=familyName,proto3" json:"family_name,omitempty"` MiddleNames []string `protobuf:"bytes,3,rep,name=middle_names,json=middleNames,proto3" json:"middle_names,omitempty"` @@ -813,11 +1283,6 @@ func (x *UserTrait_StructuredName) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use UserTrait_StructuredName.ProtoReflect.Descriptor instead. -func (*UserTrait_StructuredName) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_annotation_trait_proto_rawDescGZIP(), []int{0, 4} -} - func (x *UserTrait_StructuredName) GetGivenName() string { if x != nil { return x.GivenName @@ -853,190 +1318,135 @@ func (x *UserTrait_StructuredName) GetSuffix() string { return "" } -var File_c1_connector_v2_annotation_trait_proto protoreflect.FileDescriptor +func (x *UserTrait_StructuredName) SetGivenName(v string) { + x.GivenName = v +} -var file_c1_connector_v2_annotation_trait_proto_rawDesc = string([]byte{ - 0x0a, 0x26, 0x63, 0x31, 0x2f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f, 0x76, - 0x32, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x72, 0x61, - 0x69, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, - 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x1a, 0x1b, 0x63, 0x31, 0x2f, 0x63, 0x6f, - 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f, 0x76, 0x32, 0x2f, 0x61, 0x73, 0x73, 0x65, 0x74, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x63, 0x31, 0x2f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, - 0x63, 0x74, 0x6f, 0x72, 0x2f, 0x76, 0x32, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, - 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x97, - 0x0b, 0x0a, 0x09, 0x55, 0x73, 0x65, 0x72, 0x54, 0x72, 0x61, 0x69, 0x74, 0x12, 0x38, 0x0a, 0x06, - 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x63, - 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x55, - 0x73, 0x65, 0x72, 0x54, 0x72, 0x61, 0x69, 0x74, 0x2e, 0x45, 0x6d, 0x61, 0x69, 0x6c, 0x52, 0x06, - 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x43, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, - 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x55, 0x73, 0x65, 0x72, 0x54, 0x72, 0x61, - 0x69, 0x74, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, - 0x02, 0x10, 0x01, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x31, 0x0a, 0x07, 0x70, - 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, - 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x12, 0x2d, - 0x0a, 0x04, 0x69, 0x63, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x63, - 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x41, - 0x73, 0x73, 0x65, 0x74, 0x52, 0x65, 0x66, 0x52, 0x04, 0x69, 0x63, 0x6f, 0x6e, 0x12, 0x53, 0x0a, - 0x0c, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x0e, 0x32, 0x26, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, - 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x55, 0x73, 0x65, 0x72, 0x54, 0x72, 0x61, 0x69, 0x74, 0x2e, - 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, - 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x0b, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x54, 0x79, - 0x70, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x6f, 0x67, 0x69, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x05, 0x6c, 0x6f, 0x67, 0x69, 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x6c, 0x6f, 0x67, 0x69, - 0x6e, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x09, 0x52, - 0x0c, 0x6c, 0x6f, 0x67, 0x69, 0x6e, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x12, 0x21, 0x0a, - 0x0c, 0x65, 0x6d, 0x70, 0x6c, 0x6f, 0x79, 0x65, 0x65, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x0d, 0x20, - 0x03, 0x28, 0x09, 0x52, 0x0b, 0x65, 0x6d, 0x70, 0x6c, 0x6f, 0x79, 0x65, 0x65, 0x49, 0x64, 0x73, - 0x12, 0x39, 0x0a, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x08, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, - 0x52, 0x09, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x39, 0x0a, 0x0a, 0x6c, - 0x61, 0x73, 0x74, 0x5f, 0x6c, 0x6f, 0x67, 0x69, 0x6e, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x6c, 0x61, 0x73, - 0x74, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x12, 0x43, 0x0a, 0x0a, 0x6d, 0x66, 0x61, 0x5f, 0x73, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x63, 0x31, 0x2e, - 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x55, 0x73, 0x65, - 0x72, 0x54, 0x72, 0x61, 0x69, 0x74, 0x2e, 0x4d, 0x46, 0x41, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x52, 0x09, 0x6d, 0x66, 0x61, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x43, 0x0a, 0x0a, 0x73, - 0x73, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x24, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, - 0x32, 0x2e, 0x55, 0x73, 0x65, 0x72, 0x54, 0x72, 0x61, 0x69, 0x74, 0x2e, 0x53, 0x53, 0x4f, 0x53, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x09, 0x73, 0x73, 0x6f, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x12, 0x52, 0x0a, 0x0f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x75, 0x72, 0x65, 0x64, 0x5f, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x63, 0x31, 0x2e, 0x63, - 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x55, 0x73, 0x65, 0x72, - 0x54, 0x72, 0x61, 0x69, 0x74, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x75, 0x72, 0x65, 0x64, - 0x4e, 0x61, 0x6d, 0x65, 0x52, 0x0e, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x75, 0x72, 0x65, 0x64, - 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x49, 0x0a, 0x05, 0x45, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x21, 0x0a, - 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, - 0xfa, 0x42, 0x04, 0x72, 0x02, 0x60, 0x01, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, - 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x73, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x69, 0x73, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x1a, - 0xdc, 0x01, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x4a, 0x0a, 0x06, 0x73, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x28, 0x2e, 0x63, 0x31, 0x2e, - 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x55, 0x73, 0x65, - 0x72, 0x54, 0x72, 0x61, 0x69, 0x74, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x53, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x06, - 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x27, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, - 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0d, 0xfa, 0x42, 0x0a, 0x72, 0x08, 0x20, 0x01, - 0x28, 0x80, 0x08, 0xd0, 0x01, 0x01, 0x52, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x22, - 0x5d, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x16, 0x0a, 0x12, 0x53, 0x54, 0x41, - 0x54, 0x55, 0x53, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, - 0x00, 0x12, 0x12, 0x0a, 0x0e, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x45, 0x4e, 0x41, 0x42, - 0x4c, 0x45, 0x44, 0x10, 0x01, 0x12, 0x13, 0x0a, 0x0f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, - 0x44, 0x49, 0x53, 0x41, 0x42, 0x4c, 0x45, 0x44, 0x10, 0x02, 0x12, 0x12, 0x0a, 0x0e, 0x53, 0x54, - 0x41, 0x54, 0x55, 0x53, 0x5f, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x44, 0x10, 0x03, 0x1a, 0x2c, - 0x0a, 0x09, 0x4d, 0x46, 0x41, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x6d, - 0x66, 0x61, 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x0a, 0x6d, 0x66, 0x61, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x1a, 0x2c, 0x0a, 0x09, - 0x53, 0x53, 0x4f, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x73, 0x6f, - 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, - 0x73, 0x73, 0x6f, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x1a, 0xa3, 0x01, 0x0a, 0x0e, 0x53, - 0x74, 0x72, 0x75, 0x63, 0x74, 0x75, 0x72, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1d, 0x0a, - 0x0a, 0x67, 0x69, 0x76, 0x65, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x09, 0x67, 0x69, 0x76, 0x65, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1f, 0x0a, 0x0b, - 0x66, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0a, 0x66, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, - 0x0c, 0x6d, 0x69, 0x64, 0x64, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x03, 0x20, - 0x03, 0x28, 0x09, 0x52, 0x0b, 0x6d, 0x69, 0x64, 0x64, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, - 0x12, 0x16, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x75, 0x66, 0x66, - 0x69, 0x78, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, - 0x22, 0x76, 0x0a, 0x0b, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, - 0x1c, 0x0a, 0x18, 0x41, 0x43, 0x43, 0x4f, 0x55, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x16, 0x0a, - 0x12, 0x41, 0x43, 0x43, 0x4f, 0x55, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x48, 0x55, - 0x4d, 0x41, 0x4e, 0x10, 0x01, 0x12, 0x18, 0x0a, 0x14, 0x41, 0x43, 0x43, 0x4f, 0x55, 0x4e, 0x54, - 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x10, 0x02, 0x12, - 0x17, 0x0a, 0x13, 0x41, 0x43, 0x43, 0x4f, 0x55, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x53, 0x59, 0x53, 0x54, 0x45, 0x4d, 0x10, 0x03, 0x22, 0x6e, 0x0a, 0x0a, 0x47, 0x72, 0x6f, 0x75, - 0x70, 0x54, 0x72, 0x61, 0x69, 0x74, 0x12, 0x2d, 0x0a, 0x04, 0x69, 0x63, 0x6f, 0x6e, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, - 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x73, 0x73, 0x65, 0x74, 0x52, 0x65, 0x66, 0x52, - 0x04, 0x69, 0x63, 0x6f, 0x6e, 0x12, 0x31, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, - 0x07, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x22, 0x3e, 0x0a, 0x09, 0x52, 0x6f, 0x6c, 0x65, - 0x54, 0x72, 0x61, 0x69, 0x74, 0x12, 0x31, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, - 0x07, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x22, 0x9a, 0x03, 0x0a, 0x08, 0x41, 0x70, 0x70, - 0x54, 0x72, 0x61, 0x69, 0x74, 0x12, 0x35, 0x0a, 0x08, 0x68, 0x65, 0x6c, 0x70, 0x5f, 0x75, 0x72, - 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x1a, 0xfa, 0x42, 0x17, 0x72, 0x15, 0x20, 0x01, - 0x28, 0x80, 0x08, 0x3a, 0x08, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0xd0, 0x01, 0x01, - 0x88, 0x01, 0x01, 0x52, 0x07, 0x68, 0x65, 0x6c, 0x70, 0x55, 0x72, 0x6c, 0x12, 0x2d, 0x0a, 0x04, - 0x69, 0x63, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x63, 0x31, 0x2e, - 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x73, 0x73, - 0x65, 0x74, 0x52, 0x65, 0x66, 0x52, 0x04, 0x69, 0x63, 0x6f, 0x6e, 0x12, 0x2d, 0x0a, 0x04, 0x6c, - 0x6f, 0x67, 0x6f, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x63, 0x31, 0x2e, 0x63, - 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x73, 0x73, 0x65, - 0x74, 0x52, 0x65, 0x66, 0x52, 0x04, 0x6c, 0x6f, 0x67, 0x6f, 0x12, 0x31, 0x0a, 0x07, 0x70, 0x72, - 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, - 0x72, 0x75, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x12, 0x37, 0x0a, - 0x05, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x21, 0x2e, 0x63, - 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x41, - 0x70, 0x70, 0x54, 0x72, 0x61, 0x69, 0x74, 0x2e, 0x41, 0x70, 0x70, 0x46, 0x6c, 0x61, 0x67, 0x52, - 0x05, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x22, 0x8c, 0x01, 0x0a, 0x07, 0x41, 0x70, 0x70, 0x46, 0x6c, - 0x61, 0x67, 0x12, 0x18, 0x0a, 0x14, 0x41, 0x50, 0x50, 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x5f, 0x55, - 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, - 0x41, 0x50, 0x50, 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x5f, 0x48, 0x49, 0x44, 0x44, 0x45, 0x4e, 0x10, - 0x01, 0x12, 0x15, 0x0a, 0x11, 0x41, 0x50, 0x50, 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x5f, 0x49, 0x4e, - 0x41, 0x43, 0x54, 0x49, 0x56, 0x45, 0x10, 0x02, 0x12, 0x11, 0x0a, 0x0d, 0x41, 0x50, 0x50, 0x5f, - 0x46, 0x4c, 0x41, 0x47, 0x5f, 0x53, 0x41, 0x4d, 0x4c, 0x10, 0x03, 0x12, 0x11, 0x0a, 0x0d, 0x41, - 0x50, 0x50, 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x5f, 0x4f, 0x49, 0x44, 0x43, 0x10, 0x04, 0x12, 0x15, - 0x0a, 0x11, 0x41, 0x50, 0x50, 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x5f, 0x42, 0x4f, 0x4f, 0x4b, 0x4d, - 0x41, 0x52, 0x4b, 0x10, 0x05, 0x22, 0xf3, 0x02, 0x0a, 0x0b, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, - 0x54, 0x72, 0x61, 0x69, 0x74, 0x12, 0x31, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, - 0x07, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x12, 0x39, 0x0a, 0x0a, 0x63, 0x72, 0x65, 0x61, - 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, - 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, - 0x64, 0x41, 0x74, 0x12, 0x39, 0x0a, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x5f, 0x61, - 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, - 0x61, 0x6d, 0x70, 0x52, 0x09, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x41, 0x74, 0x12, 0x3c, - 0x0a, 0x0c, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x75, 0x73, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, - 0x52, 0x0a, 0x6c, 0x61, 0x73, 0x74, 0x55, 0x73, 0x65, 0x64, 0x41, 0x74, 0x12, 0x3f, 0x0a, 0x0d, - 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x62, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, - 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x64, - 0x52, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x42, 0x79, 0x49, 0x64, 0x12, 0x3c, 0x0a, - 0x0b, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, - 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x64, 0x52, - 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, 0x64, 0x42, 0x36, 0x5a, 0x34, 0x67, - 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x64, 0x75, 0x63, - 0x74, 0x6f, 0x72, 0x6f, 0x6e, 0x65, 0x2f, 0x62, 0x61, 0x74, 0x6f, 0x6e, 0x2d, 0x73, 0x64, 0x6b, - 0x2f, 0x70, 0x62, 0x2f, 0x63, 0x31, 0x2f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, - 0x2f, 0x76, 0x32, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -}) +func (x *UserTrait_StructuredName) SetFamilyName(v string) { + x.FamilyName = v +} -var ( - file_c1_connector_v2_annotation_trait_proto_rawDescOnce sync.Once - file_c1_connector_v2_annotation_trait_proto_rawDescData []byte -) +func (x *UserTrait_StructuredName) SetMiddleNames(v []string) { + x.MiddleNames = v +} + +func (x *UserTrait_StructuredName) SetPrefix(v string) { + x.Prefix = v +} -func file_c1_connector_v2_annotation_trait_proto_rawDescGZIP() []byte { - file_c1_connector_v2_annotation_trait_proto_rawDescOnce.Do(func() { - file_c1_connector_v2_annotation_trait_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_c1_connector_v2_annotation_trait_proto_rawDesc), len(file_c1_connector_v2_annotation_trait_proto_rawDesc))) - }) - return file_c1_connector_v2_annotation_trait_proto_rawDescData +func (x *UserTrait_StructuredName) SetSuffix(v string) { + x.Suffix = v } +type UserTrait_StructuredName_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + GivenName string + FamilyName string + MiddleNames []string + Prefix string + Suffix string +} + +func (b0 UserTrait_StructuredName_builder) Build() *UserTrait_StructuredName { + m0 := &UserTrait_StructuredName{} + b, x := &b0, m0 + _, _ = b, x + x.GivenName = b.GivenName + x.FamilyName = b.FamilyName + x.MiddleNames = b.MiddleNames + x.Prefix = b.Prefix + x.Suffix = b.Suffix + return m0 +} + +var File_c1_connector_v2_annotation_trait_proto protoreflect.FileDescriptor + +const file_c1_connector_v2_annotation_trait_proto_rawDesc = "" + + "\n" + + "&c1/connector/v2/annotation_trait.proto\x12\x0fc1.connector.v2\x1a\x1bc1/connector/v2/asset.proto\x1a\x1ec1/connector/v2/resource.proto\x1a\x1cgoogle/protobuf/struct.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x17validate/validate.proto\"\x97\v\n" + + "\tUserTrait\x128\n" + + "\x06emails\x18\x01 \x03(\v2 .c1.connector.v2.UserTrait.EmailR\x06emails\x12C\n" + + "\x06status\x18\x02 \x01(\v2!.c1.connector.v2.UserTrait.StatusB\b\xfaB\x05\x8a\x01\x02\x10\x01R\x06status\x121\n" + + "\aprofile\x18\x03 \x01(\v2\x17.google.protobuf.StructR\aprofile\x12-\n" + + "\x04icon\x18\x04 \x01(\v2\x19.c1.connector.v2.AssetRefR\x04icon\x12S\n" + + "\faccount_type\x18\x05 \x01(\x0e2&.c1.connector.v2.UserTrait.AccountTypeB\b\xfaB\x05\x82\x01\x02\x10\x01R\vaccountType\x12\x14\n" + + "\x05login\x18\x06 \x01(\tR\x05login\x12#\n" + + "\rlogin_aliases\x18\a \x03(\tR\floginAliases\x12!\n" + + "\femployee_ids\x18\r \x03(\tR\vemployeeIds\x129\n" + + "\n" + + "created_at\x18\b \x01(\v2\x1a.google.protobuf.TimestampR\tcreatedAt\x129\n" + + "\n" + + "last_login\x18\t \x01(\v2\x1a.google.protobuf.TimestampR\tlastLogin\x12C\n" + + "\n" + + "mfa_status\x18\n" + + " \x01(\v2$.c1.connector.v2.UserTrait.MFAStatusR\tmfaStatus\x12C\n" + + "\n" + + "sso_status\x18\v \x01(\v2$.c1.connector.v2.UserTrait.SSOStatusR\tssoStatus\x12R\n" + + "\x0fstructured_name\x18\f \x01(\v2).c1.connector.v2.UserTrait.StructuredNameR\x0estructuredName\x1aI\n" + + "\x05Email\x12!\n" + + "\aaddress\x18\x01 \x01(\tB\a\xfaB\x04r\x02`\x01R\aaddress\x12\x1d\n" + + "\n" + + "is_primary\x18\x02 \x01(\bR\tisPrimary\x1a\xdc\x01\n" + + "\x06Status\x12J\n" + + "\x06status\x18\x01 \x01(\x0e2(.c1.connector.v2.UserTrait.Status.StatusB\b\xfaB\x05\x82\x01\x02\x10\x01R\x06status\x12'\n" + + "\adetails\x18\x02 \x01(\tB\r\xfaB\n" + + "r\b \x01(\x80\b\xd0\x01\x01R\adetails\"]\n" + + "\x06Status\x12\x16\n" + + "\x12STATUS_UNSPECIFIED\x10\x00\x12\x12\n" + + "\x0eSTATUS_ENABLED\x10\x01\x12\x13\n" + + "\x0fSTATUS_DISABLED\x10\x02\x12\x12\n" + + "\x0eSTATUS_DELETED\x10\x03\x1a,\n" + + "\tMFAStatus\x12\x1f\n" + + "\vmfa_enabled\x18\x01 \x01(\bR\n" + + "mfaEnabled\x1a,\n" + + "\tSSOStatus\x12\x1f\n" + + "\vsso_enabled\x18\x01 \x01(\bR\n" + + "ssoEnabled\x1a\xa3\x01\n" + + "\x0eStructuredName\x12\x1d\n" + + "\n" + + "given_name\x18\x01 \x01(\tR\tgivenName\x12\x1f\n" + + "\vfamily_name\x18\x02 \x01(\tR\n" + + "familyName\x12!\n" + + "\fmiddle_names\x18\x03 \x03(\tR\vmiddleNames\x12\x16\n" + + "\x06prefix\x18\x04 \x01(\tR\x06prefix\x12\x16\n" + + "\x06suffix\x18\x05 \x01(\tR\x06suffix\"v\n" + + "\vAccountType\x12\x1c\n" + + "\x18ACCOUNT_TYPE_UNSPECIFIED\x10\x00\x12\x16\n" + + "\x12ACCOUNT_TYPE_HUMAN\x10\x01\x12\x18\n" + + "\x14ACCOUNT_TYPE_SERVICE\x10\x02\x12\x17\n" + + "\x13ACCOUNT_TYPE_SYSTEM\x10\x03\"n\n" + + "\n" + + "GroupTrait\x12-\n" + + "\x04icon\x18\x01 \x01(\v2\x19.c1.connector.v2.AssetRefR\x04icon\x121\n" + + "\aprofile\x18\x02 \x01(\v2\x17.google.protobuf.StructR\aprofile\">\n" + + "\tRoleTrait\x121\n" + + "\aprofile\x18\x01 \x01(\v2\x17.google.protobuf.StructR\aprofile\"\x9a\x03\n" + + "\bAppTrait\x125\n" + + "\bhelp_url\x18\x01 \x01(\tB\x1a\xfaB\x17r\x15 \x01(\x80\b:\bhttps://\xd0\x01\x01\x88\x01\x01R\ahelpUrl\x12-\n" + + "\x04icon\x18\x02 \x01(\v2\x19.c1.connector.v2.AssetRefR\x04icon\x12-\n" + + "\x04logo\x18\x03 \x01(\v2\x19.c1.connector.v2.AssetRefR\x04logo\x121\n" + + "\aprofile\x18\x04 \x01(\v2\x17.google.protobuf.StructR\aprofile\x127\n" + + "\x05flags\x18\x05 \x03(\x0e2!.c1.connector.v2.AppTrait.AppFlagR\x05flags\"\x8c\x01\n" + + "\aAppFlag\x12\x18\n" + + "\x14APP_FLAG_UNSPECIFIED\x10\x00\x12\x13\n" + + "\x0fAPP_FLAG_HIDDEN\x10\x01\x12\x15\n" + + "\x11APP_FLAG_INACTIVE\x10\x02\x12\x11\n" + + "\rAPP_FLAG_SAML\x10\x03\x12\x11\n" + + "\rAPP_FLAG_OIDC\x10\x04\x12\x15\n" + + "\x11APP_FLAG_BOOKMARK\x10\x05\"\xf3\x02\n" + + "\vSecretTrait\x121\n" + + "\aprofile\x18\x01 \x01(\v2\x17.google.protobuf.StructR\aprofile\x129\n" + + "\n" + + "created_at\x18\x02 \x01(\v2\x1a.google.protobuf.TimestampR\tcreatedAt\x129\n" + + "\n" + + "expires_at\x18\x03 \x01(\v2\x1a.google.protobuf.TimestampR\texpiresAt\x12<\n" + + "\flast_used_at\x18\x04 \x01(\v2\x1a.google.protobuf.TimestampR\n" + + "lastUsedAt\x12?\n" + + "\rcreated_by_id\x18\x05 \x01(\v2\x1b.c1.connector.v2.ResourceIdR\vcreatedById\x12<\n" + + "\videntity_id\x18\x06 \x01(\v2\x1b.c1.connector.v2.ResourceIdR\n" + + "identityIdB6Z4github.com/conductorone/baton-sdk/pb/c1/connector/v2b\x06proto3" + var file_c1_connector_v2_annotation_trait_proto_enumTypes = make([]protoimpl.EnumInfo, 3) var file_c1_connector_v2_annotation_trait_proto_msgTypes = make([]protoimpl.MessageInfo, 10) var file_c1_connector_v2_annotation_trait_proto_goTypes = []any{ diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_trait_protoopaque.pb.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_trait_protoopaque.pb.go new file mode 100644 index 00000000..c7a4531b --- /dev/null +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_trait_protoopaque.pb.go @@ -0,0 +1,1527 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.10 +// protoc (unknown) +// source: c1/connector/v2/annotation_trait.proto + +//go:build protoopaque + +package v2 + +import ( + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + structpb "google.golang.org/protobuf/types/known/structpb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type UserTrait_AccountType int32 + +const ( + UserTrait_ACCOUNT_TYPE_UNSPECIFIED UserTrait_AccountType = 0 + UserTrait_ACCOUNT_TYPE_HUMAN UserTrait_AccountType = 1 + UserTrait_ACCOUNT_TYPE_SERVICE UserTrait_AccountType = 2 + UserTrait_ACCOUNT_TYPE_SYSTEM UserTrait_AccountType = 3 +) + +// Enum value maps for UserTrait_AccountType. +var ( + UserTrait_AccountType_name = map[int32]string{ + 0: "ACCOUNT_TYPE_UNSPECIFIED", + 1: "ACCOUNT_TYPE_HUMAN", + 2: "ACCOUNT_TYPE_SERVICE", + 3: "ACCOUNT_TYPE_SYSTEM", + } + UserTrait_AccountType_value = map[string]int32{ + "ACCOUNT_TYPE_UNSPECIFIED": 0, + "ACCOUNT_TYPE_HUMAN": 1, + "ACCOUNT_TYPE_SERVICE": 2, + "ACCOUNT_TYPE_SYSTEM": 3, + } +) + +func (x UserTrait_AccountType) Enum() *UserTrait_AccountType { + p := new(UserTrait_AccountType) + *p = x + return p +} + +func (x UserTrait_AccountType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (UserTrait_AccountType) Descriptor() protoreflect.EnumDescriptor { + return file_c1_connector_v2_annotation_trait_proto_enumTypes[0].Descriptor() +} + +func (UserTrait_AccountType) Type() protoreflect.EnumType { + return &file_c1_connector_v2_annotation_trait_proto_enumTypes[0] +} + +func (x UserTrait_AccountType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +type UserTrait_Status_Status int32 + +const ( + UserTrait_Status_STATUS_UNSPECIFIED UserTrait_Status_Status = 0 + UserTrait_Status_STATUS_ENABLED UserTrait_Status_Status = 1 + UserTrait_Status_STATUS_DISABLED UserTrait_Status_Status = 2 + UserTrait_Status_STATUS_DELETED UserTrait_Status_Status = 3 +) + +// Enum value maps for UserTrait_Status_Status. +var ( + UserTrait_Status_Status_name = map[int32]string{ + 0: "STATUS_UNSPECIFIED", + 1: "STATUS_ENABLED", + 2: "STATUS_DISABLED", + 3: "STATUS_DELETED", + } + UserTrait_Status_Status_value = map[string]int32{ + "STATUS_UNSPECIFIED": 0, + "STATUS_ENABLED": 1, + "STATUS_DISABLED": 2, + "STATUS_DELETED": 3, + } +) + +func (x UserTrait_Status_Status) Enum() *UserTrait_Status_Status { + p := new(UserTrait_Status_Status) + *p = x + return p +} + +func (x UserTrait_Status_Status) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (UserTrait_Status_Status) Descriptor() protoreflect.EnumDescriptor { + return file_c1_connector_v2_annotation_trait_proto_enumTypes[1].Descriptor() +} + +func (UserTrait_Status_Status) Type() protoreflect.EnumType { + return &file_c1_connector_v2_annotation_trait_proto_enumTypes[1] +} + +func (x UserTrait_Status_Status) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +type AppTrait_AppFlag int32 + +const ( + AppTrait_APP_FLAG_UNSPECIFIED AppTrait_AppFlag = 0 + AppTrait_APP_FLAG_HIDDEN AppTrait_AppFlag = 1 + AppTrait_APP_FLAG_INACTIVE AppTrait_AppFlag = 2 + AppTrait_APP_FLAG_SAML AppTrait_AppFlag = 3 + AppTrait_APP_FLAG_OIDC AppTrait_AppFlag = 4 + AppTrait_APP_FLAG_BOOKMARK AppTrait_AppFlag = 5 +) + +// Enum value maps for AppTrait_AppFlag. +var ( + AppTrait_AppFlag_name = map[int32]string{ + 0: "APP_FLAG_UNSPECIFIED", + 1: "APP_FLAG_HIDDEN", + 2: "APP_FLAG_INACTIVE", + 3: "APP_FLAG_SAML", + 4: "APP_FLAG_OIDC", + 5: "APP_FLAG_BOOKMARK", + } + AppTrait_AppFlag_value = map[string]int32{ + "APP_FLAG_UNSPECIFIED": 0, + "APP_FLAG_HIDDEN": 1, + "APP_FLAG_INACTIVE": 2, + "APP_FLAG_SAML": 3, + "APP_FLAG_OIDC": 4, + "APP_FLAG_BOOKMARK": 5, + } +) + +func (x AppTrait_AppFlag) Enum() *AppTrait_AppFlag { + p := new(AppTrait_AppFlag) + *p = x + return p +} + +func (x AppTrait_AppFlag) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (AppTrait_AppFlag) Descriptor() protoreflect.EnumDescriptor { + return file_c1_connector_v2_annotation_trait_proto_enumTypes[2].Descriptor() +} + +func (AppTrait_AppFlag) Type() protoreflect.EnumType { + return &file_c1_connector_v2_annotation_trait_proto_enumTypes[2] +} + +func (x AppTrait_AppFlag) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +type UserTrait struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Emails *[]*UserTrait_Email `protobuf:"bytes,1,rep,name=emails,proto3"` + xxx_hidden_Status *UserTrait_Status `protobuf:"bytes,2,opt,name=status,proto3"` + xxx_hidden_Profile *structpb.Struct `protobuf:"bytes,3,opt,name=profile,proto3"` + xxx_hidden_Icon *AssetRef `protobuf:"bytes,4,opt,name=icon,proto3"` + xxx_hidden_AccountType UserTrait_AccountType `protobuf:"varint,5,opt,name=account_type,json=accountType,proto3,enum=c1.connector.v2.UserTrait_AccountType"` + xxx_hidden_Login string `protobuf:"bytes,6,opt,name=login,proto3"` + xxx_hidden_LoginAliases []string `protobuf:"bytes,7,rep,name=login_aliases,json=loginAliases,proto3"` + xxx_hidden_EmployeeIds []string `protobuf:"bytes,13,rep,name=employee_ids,json=employeeIds,proto3"` + xxx_hidden_CreatedAt *timestamppb.Timestamp `protobuf:"bytes,8,opt,name=created_at,json=createdAt,proto3"` + xxx_hidden_LastLogin *timestamppb.Timestamp `protobuf:"bytes,9,opt,name=last_login,json=lastLogin,proto3"` + xxx_hidden_MfaStatus *UserTrait_MFAStatus `protobuf:"bytes,10,opt,name=mfa_status,json=mfaStatus,proto3"` + xxx_hidden_SsoStatus *UserTrait_SSOStatus `protobuf:"bytes,11,opt,name=sso_status,json=ssoStatus,proto3"` + xxx_hidden_StructuredName *UserTrait_StructuredName `protobuf:"bytes,12,opt,name=structured_name,json=structuredName,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *UserTrait) Reset() { + *x = UserTrait{} + mi := &file_c1_connector_v2_annotation_trait_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *UserTrait) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UserTrait) ProtoMessage() {} + +func (x *UserTrait) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_annotation_trait_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *UserTrait) GetEmails() []*UserTrait_Email { + if x != nil { + if x.xxx_hidden_Emails != nil { + return *x.xxx_hidden_Emails + } + } + return nil +} + +func (x *UserTrait) GetStatus() *UserTrait_Status { + if x != nil { + return x.xxx_hidden_Status + } + return nil +} + +func (x *UserTrait) GetProfile() *structpb.Struct { + if x != nil { + return x.xxx_hidden_Profile + } + return nil +} + +func (x *UserTrait) GetIcon() *AssetRef { + if x != nil { + return x.xxx_hidden_Icon + } + return nil +} + +func (x *UserTrait) GetAccountType() UserTrait_AccountType { + if x != nil { + return x.xxx_hidden_AccountType + } + return UserTrait_ACCOUNT_TYPE_UNSPECIFIED +} + +func (x *UserTrait) GetLogin() string { + if x != nil { + return x.xxx_hidden_Login + } + return "" +} + +func (x *UserTrait) GetLoginAliases() []string { + if x != nil { + return x.xxx_hidden_LoginAliases + } + return nil +} + +func (x *UserTrait) GetEmployeeIds() []string { + if x != nil { + return x.xxx_hidden_EmployeeIds + } + return nil +} + +func (x *UserTrait) GetCreatedAt() *timestamppb.Timestamp { + if x != nil { + return x.xxx_hidden_CreatedAt + } + return nil +} + +func (x *UserTrait) GetLastLogin() *timestamppb.Timestamp { + if x != nil { + return x.xxx_hidden_LastLogin + } + return nil +} + +func (x *UserTrait) GetMfaStatus() *UserTrait_MFAStatus { + if x != nil { + return x.xxx_hidden_MfaStatus + } + return nil +} + +func (x *UserTrait) GetSsoStatus() *UserTrait_SSOStatus { + if x != nil { + return x.xxx_hidden_SsoStatus + } + return nil +} + +func (x *UserTrait) GetStructuredName() *UserTrait_StructuredName { + if x != nil { + return x.xxx_hidden_StructuredName + } + return nil +} + +func (x *UserTrait) SetEmails(v []*UserTrait_Email) { + x.xxx_hidden_Emails = &v +} + +func (x *UserTrait) SetStatus(v *UserTrait_Status) { + x.xxx_hidden_Status = v +} + +func (x *UserTrait) SetProfile(v *structpb.Struct) { + x.xxx_hidden_Profile = v +} + +func (x *UserTrait) SetIcon(v *AssetRef) { + x.xxx_hidden_Icon = v +} + +func (x *UserTrait) SetAccountType(v UserTrait_AccountType) { + x.xxx_hidden_AccountType = v +} + +func (x *UserTrait) SetLogin(v string) { + x.xxx_hidden_Login = v +} + +func (x *UserTrait) SetLoginAliases(v []string) { + x.xxx_hidden_LoginAliases = v +} + +func (x *UserTrait) SetEmployeeIds(v []string) { + x.xxx_hidden_EmployeeIds = v +} + +func (x *UserTrait) SetCreatedAt(v *timestamppb.Timestamp) { + x.xxx_hidden_CreatedAt = v +} + +func (x *UserTrait) SetLastLogin(v *timestamppb.Timestamp) { + x.xxx_hidden_LastLogin = v +} + +func (x *UserTrait) SetMfaStatus(v *UserTrait_MFAStatus) { + x.xxx_hidden_MfaStatus = v +} + +func (x *UserTrait) SetSsoStatus(v *UserTrait_SSOStatus) { + x.xxx_hidden_SsoStatus = v +} + +func (x *UserTrait) SetStructuredName(v *UserTrait_StructuredName) { + x.xxx_hidden_StructuredName = v +} + +func (x *UserTrait) HasStatus() bool { + if x == nil { + return false + } + return x.xxx_hidden_Status != nil +} + +func (x *UserTrait) HasProfile() bool { + if x == nil { + return false + } + return x.xxx_hidden_Profile != nil +} + +func (x *UserTrait) HasIcon() bool { + if x == nil { + return false + } + return x.xxx_hidden_Icon != nil +} + +func (x *UserTrait) HasCreatedAt() bool { + if x == nil { + return false + } + return x.xxx_hidden_CreatedAt != nil +} + +func (x *UserTrait) HasLastLogin() bool { + if x == nil { + return false + } + return x.xxx_hidden_LastLogin != nil +} + +func (x *UserTrait) HasMfaStatus() bool { + if x == nil { + return false + } + return x.xxx_hidden_MfaStatus != nil +} + +func (x *UserTrait) HasSsoStatus() bool { + if x == nil { + return false + } + return x.xxx_hidden_SsoStatus != nil +} + +func (x *UserTrait) HasStructuredName() bool { + if x == nil { + return false + } + return x.xxx_hidden_StructuredName != nil +} + +func (x *UserTrait) ClearStatus() { + x.xxx_hidden_Status = nil +} + +func (x *UserTrait) ClearProfile() { + x.xxx_hidden_Profile = nil +} + +func (x *UserTrait) ClearIcon() { + x.xxx_hidden_Icon = nil +} + +func (x *UserTrait) ClearCreatedAt() { + x.xxx_hidden_CreatedAt = nil +} + +func (x *UserTrait) ClearLastLogin() { + x.xxx_hidden_LastLogin = nil +} + +func (x *UserTrait) ClearMfaStatus() { + x.xxx_hidden_MfaStatus = nil +} + +func (x *UserTrait) ClearSsoStatus() { + x.xxx_hidden_SsoStatus = nil +} + +func (x *UserTrait) ClearStructuredName() { + x.xxx_hidden_StructuredName = nil +} + +type UserTrait_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Emails []*UserTrait_Email + Status *UserTrait_Status + Profile *structpb.Struct + Icon *AssetRef + AccountType UserTrait_AccountType + // The user's login + Login string + // Any additional login aliases for the user + LoginAliases []string + EmployeeIds []string + CreatedAt *timestamppb.Timestamp + LastLogin *timestamppb.Timestamp + MfaStatus *UserTrait_MFAStatus + SsoStatus *UserTrait_SSOStatus + StructuredName *UserTrait_StructuredName +} + +func (b0 UserTrait_builder) Build() *UserTrait { + m0 := &UserTrait{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Emails = &b.Emails + x.xxx_hidden_Status = b.Status + x.xxx_hidden_Profile = b.Profile + x.xxx_hidden_Icon = b.Icon + x.xxx_hidden_AccountType = b.AccountType + x.xxx_hidden_Login = b.Login + x.xxx_hidden_LoginAliases = b.LoginAliases + x.xxx_hidden_EmployeeIds = b.EmployeeIds + x.xxx_hidden_CreatedAt = b.CreatedAt + x.xxx_hidden_LastLogin = b.LastLogin + x.xxx_hidden_MfaStatus = b.MfaStatus + x.xxx_hidden_SsoStatus = b.SsoStatus + x.xxx_hidden_StructuredName = b.StructuredName + return m0 +} + +type GroupTrait struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Icon *AssetRef `protobuf:"bytes,1,opt,name=icon,proto3"` + xxx_hidden_Profile *structpb.Struct `protobuf:"bytes,2,opt,name=profile,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GroupTrait) Reset() { + *x = GroupTrait{} + mi := &file_c1_connector_v2_annotation_trait_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GroupTrait) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GroupTrait) ProtoMessage() {} + +func (x *GroupTrait) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_annotation_trait_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *GroupTrait) GetIcon() *AssetRef { + if x != nil { + return x.xxx_hidden_Icon + } + return nil +} + +func (x *GroupTrait) GetProfile() *structpb.Struct { + if x != nil { + return x.xxx_hidden_Profile + } + return nil +} + +func (x *GroupTrait) SetIcon(v *AssetRef) { + x.xxx_hidden_Icon = v +} + +func (x *GroupTrait) SetProfile(v *structpb.Struct) { + x.xxx_hidden_Profile = v +} + +func (x *GroupTrait) HasIcon() bool { + if x == nil { + return false + } + return x.xxx_hidden_Icon != nil +} + +func (x *GroupTrait) HasProfile() bool { + if x == nil { + return false + } + return x.xxx_hidden_Profile != nil +} + +func (x *GroupTrait) ClearIcon() { + x.xxx_hidden_Icon = nil +} + +func (x *GroupTrait) ClearProfile() { + x.xxx_hidden_Profile = nil +} + +type GroupTrait_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Icon *AssetRef + Profile *structpb.Struct +} + +func (b0 GroupTrait_builder) Build() *GroupTrait { + m0 := &GroupTrait{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Icon = b.Icon + x.xxx_hidden_Profile = b.Profile + return m0 +} + +type RoleTrait struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Profile *structpb.Struct `protobuf:"bytes,1,opt,name=profile,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *RoleTrait) Reset() { + *x = RoleTrait{} + mi := &file_c1_connector_v2_annotation_trait_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *RoleTrait) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RoleTrait) ProtoMessage() {} + +func (x *RoleTrait) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_annotation_trait_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *RoleTrait) GetProfile() *structpb.Struct { + if x != nil { + return x.xxx_hidden_Profile + } + return nil +} + +func (x *RoleTrait) SetProfile(v *structpb.Struct) { + x.xxx_hidden_Profile = v +} + +func (x *RoleTrait) HasProfile() bool { + if x == nil { + return false + } + return x.xxx_hidden_Profile != nil +} + +func (x *RoleTrait) ClearProfile() { + x.xxx_hidden_Profile = nil +} + +type RoleTrait_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Profile *structpb.Struct +} + +func (b0 RoleTrait_builder) Build() *RoleTrait { + m0 := &RoleTrait{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Profile = b.Profile + return m0 +} + +type AppTrait struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_HelpUrl string `protobuf:"bytes,1,opt,name=help_url,json=helpUrl,proto3"` + xxx_hidden_Icon *AssetRef `protobuf:"bytes,2,opt,name=icon,proto3"` + xxx_hidden_Logo *AssetRef `protobuf:"bytes,3,opt,name=logo,proto3"` + xxx_hidden_Profile *structpb.Struct `protobuf:"bytes,4,opt,name=profile,proto3"` + xxx_hidden_Flags []AppTrait_AppFlag `protobuf:"varint,5,rep,packed,name=flags,proto3,enum=c1.connector.v2.AppTrait_AppFlag"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *AppTrait) Reset() { + *x = AppTrait{} + mi := &file_c1_connector_v2_annotation_trait_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *AppTrait) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AppTrait) ProtoMessage() {} + +func (x *AppTrait) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_annotation_trait_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *AppTrait) GetHelpUrl() string { + if x != nil { + return x.xxx_hidden_HelpUrl + } + return "" +} + +func (x *AppTrait) GetIcon() *AssetRef { + if x != nil { + return x.xxx_hidden_Icon + } + return nil +} + +func (x *AppTrait) GetLogo() *AssetRef { + if x != nil { + return x.xxx_hidden_Logo + } + return nil +} + +func (x *AppTrait) GetProfile() *structpb.Struct { + if x != nil { + return x.xxx_hidden_Profile + } + return nil +} + +func (x *AppTrait) GetFlags() []AppTrait_AppFlag { + if x != nil { + return x.xxx_hidden_Flags + } + return nil +} + +func (x *AppTrait) SetHelpUrl(v string) { + x.xxx_hidden_HelpUrl = v +} + +func (x *AppTrait) SetIcon(v *AssetRef) { + x.xxx_hidden_Icon = v +} + +func (x *AppTrait) SetLogo(v *AssetRef) { + x.xxx_hidden_Logo = v +} + +func (x *AppTrait) SetProfile(v *structpb.Struct) { + x.xxx_hidden_Profile = v +} + +func (x *AppTrait) SetFlags(v []AppTrait_AppFlag) { + x.xxx_hidden_Flags = v +} + +func (x *AppTrait) HasIcon() bool { + if x == nil { + return false + } + return x.xxx_hidden_Icon != nil +} + +func (x *AppTrait) HasLogo() bool { + if x == nil { + return false + } + return x.xxx_hidden_Logo != nil +} + +func (x *AppTrait) HasProfile() bool { + if x == nil { + return false + } + return x.xxx_hidden_Profile != nil +} + +func (x *AppTrait) ClearIcon() { + x.xxx_hidden_Icon = nil +} + +func (x *AppTrait) ClearLogo() { + x.xxx_hidden_Logo = nil +} + +func (x *AppTrait) ClearProfile() { + x.xxx_hidden_Profile = nil +} + +type AppTrait_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + HelpUrl string + Icon *AssetRef + Logo *AssetRef + Profile *structpb.Struct + Flags []AppTrait_AppFlag +} + +func (b0 AppTrait_builder) Build() *AppTrait { + m0 := &AppTrait{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_HelpUrl = b.HelpUrl + x.xxx_hidden_Icon = b.Icon + x.xxx_hidden_Logo = b.Logo + x.xxx_hidden_Profile = b.Profile + x.xxx_hidden_Flags = b.Flags + return m0 +} + +type SecretTrait struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Profile *structpb.Struct `protobuf:"bytes,1,opt,name=profile,proto3"` + xxx_hidden_CreatedAt *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=created_at,json=createdAt,proto3"` + xxx_hidden_ExpiresAt *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=expires_at,json=expiresAt,proto3"` + xxx_hidden_LastUsedAt *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=last_used_at,json=lastUsedAt,proto3"` + xxx_hidden_CreatedById *ResourceId `protobuf:"bytes,5,opt,name=created_by_id,json=createdById,proto3"` + xxx_hidden_IdentityId *ResourceId `protobuf:"bytes,6,opt,name=identity_id,json=identityId,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SecretTrait) Reset() { + *x = SecretTrait{} + mi := &file_c1_connector_v2_annotation_trait_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SecretTrait) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SecretTrait) ProtoMessage() {} + +func (x *SecretTrait) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_annotation_trait_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *SecretTrait) GetProfile() *structpb.Struct { + if x != nil { + return x.xxx_hidden_Profile + } + return nil +} + +func (x *SecretTrait) GetCreatedAt() *timestamppb.Timestamp { + if x != nil { + return x.xxx_hidden_CreatedAt + } + return nil +} + +func (x *SecretTrait) GetExpiresAt() *timestamppb.Timestamp { + if x != nil { + return x.xxx_hidden_ExpiresAt + } + return nil +} + +func (x *SecretTrait) GetLastUsedAt() *timestamppb.Timestamp { + if x != nil { + return x.xxx_hidden_LastUsedAt + } + return nil +} + +func (x *SecretTrait) GetCreatedById() *ResourceId { + if x != nil { + return x.xxx_hidden_CreatedById + } + return nil +} + +func (x *SecretTrait) GetIdentityId() *ResourceId { + if x != nil { + return x.xxx_hidden_IdentityId + } + return nil +} + +func (x *SecretTrait) SetProfile(v *structpb.Struct) { + x.xxx_hidden_Profile = v +} + +func (x *SecretTrait) SetCreatedAt(v *timestamppb.Timestamp) { + x.xxx_hidden_CreatedAt = v +} + +func (x *SecretTrait) SetExpiresAt(v *timestamppb.Timestamp) { + x.xxx_hidden_ExpiresAt = v +} + +func (x *SecretTrait) SetLastUsedAt(v *timestamppb.Timestamp) { + x.xxx_hidden_LastUsedAt = v +} + +func (x *SecretTrait) SetCreatedById(v *ResourceId) { + x.xxx_hidden_CreatedById = v +} + +func (x *SecretTrait) SetIdentityId(v *ResourceId) { + x.xxx_hidden_IdentityId = v +} + +func (x *SecretTrait) HasProfile() bool { + if x == nil { + return false + } + return x.xxx_hidden_Profile != nil +} + +func (x *SecretTrait) HasCreatedAt() bool { + if x == nil { + return false + } + return x.xxx_hidden_CreatedAt != nil +} + +func (x *SecretTrait) HasExpiresAt() bool { + if x == nil { + return false + } + return x.xxx_hidden_ExpiresAt != nil +} + +func (x *SecretTrait) HasLastUsedAt() bool { + if x == nil { + return false + } + return x.xxx_hidden_LastUsedAt != nil +} + +func (x *SecretTrait) HasCreatedById() bool { + if x == nil { + return false + } + return x.xxx_hidden_CreatedById != nil +} + +func (x *SecretTrait) HasIdentityId() bool { + if x == nil { + return false + } + return x.xxx_hidden_IdentityId != nil +} + +func (x *SecretTrait) ClearProfile() { + x.xxx_hidden_Profile = nil +} + +func (x *SecretTrait) ClearCreatedAt() { + x.xxx_hidden_CreatedAt = nil +} + +func (x *SecretTrait) ClearExpiresAt() { + x.xxx_hidden_ExpiresAt = nil +} + +func (x *SecretTrait) ClearLastUsedAt() { + x.xxx_hidden_LastUsedAt = nil +} + +func (x *SecretTrait) ClearCreatedById() { + x.xxx_hidden_CreatedById = nil +} + +func (x *SecretTrait) ClearIdentityId() { + x.xxx_hidden_IdentityId = nil +} + +type SecretTrait_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Profile *structpb.Struct + CreatedAt *timestamppb.Timestamp + ExpiresAt *timestamppb.Timestamp + LastUsedAt *timestamppb.Timestamp + CreatedById *ResourceId + IdentityId *ResourceId +} + +func (b0 SecretTrait_builder) Build() *SecretTrait { + m0 := &SecretTrait{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Profile = b.Profile + x.xxx_hidden_CreatedAt = b.CreatedAt + x.xxx_hidden_ExpiresAt = b.ExpiresAt + x.xxx_hidden_LastUsedAt = b.LastUsedAt + x.xxx_hidden_CreatedById = b.CreatedById + x.xxx_hidden_IdentityId = b.IdentityId + return m0 +} + +type UserTrait_Email struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Address string `protobuf:"bytes,1,opt,name=address,proto3"` + xxx_hidden_IsPrimary bool `protobuf:"varint,2,opt,name=is_primary,json=isPrimary,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *UserTrait_Email) Reset() { + *x = UserTrait_Email{} + mi := &file_c1_connector_v2_annotation_trait_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *UserTrait_Email) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UserTrait_Email) ProtoMessage() {} + +func (x *UserTrait_Email) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_annotation_trait_proto_msgTypes[5] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *UserTrait_Email) GetAddress() string { + if x != nil { + return x.xxx_hidden_Address + } + return "" +} + +func (x *UserTrait_Email) GetIsPrimary() bool { + if x != nil { + return x.xxx_hidden_IsPrimary + } + return false +} + +func (x *UserTrait_Email) SetAddress(v string) { + x.xxx_hidden_Address = v +} + +func (x *UserTrait_Email) SetIsPrimary(v bool) { + x.xxx_hidden_IsPrimary = v +} + +type UserTrait_Email_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Address string + // Indicates if this is the user's primary email. Only one entry can be marked as primary. + IsPrimary bool +} + +func (b0 UserTrait_Email_builder) Build() *UserTrait_Email { + m0 := &UserTrait_Email{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Address = b.Address + x.xxx_hidden_IsPrimary = b.IsPrimary + return m0 +} + +type UserTrait_Status struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Status UserTrait_Status_Status `protobuf:"varint,1,opt,name=status,proto3,enum=c1.connector.v2.UserTrait_Status_Status"` + xxx_hidden_Details string `protobuf:"bytes,2,opt,name=details,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *UserTrait_Status) Reset() { + *x = UserTrait_Status{} + mi := &file_c1_connector_v2_annotation_trait_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *UserTrait_Status) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UserTrait_Status) ProtoMessage() {} + +func (x *UserTrait_Status) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_annotation_trait_proto_msgTypes[6] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *UserTrait_Status) GetStatus() UserTrait_Status_Status { + if x != nil { + return x.xxx_hidden_Status + } + return UserTrait_Status_STATUS_UNSPECIFIED +} + +func (x *UserTrait_Status) GetDetails() string { + if x != nil { + return x.xxx_hidden_Details + } + return "" +} + +func (x *UserTrait_Status) SetStatus(v UserTrait_Status_Status) { + x.xxx_hidden_Status = v +} + +func (x *UserTrait_Status) SetDetails(v string) { + x.xxx_hidden_Details = v +} + +type UserTrait_Status_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Status UserTrait_Status_Status + Details string +} + +func (b0 UserTrait_Status_builder) Build() *UserTrait_Status { + m0 := &UserTrait_Status{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Status = b.Status + x.xxx_hidden_Details = b.Details + return m0 +} + +type UserTrait_MFAStatus struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_MfaEnabled bool `protobuf:"varint,1,opt,name=mfa_enabled,json=mfaEnabled,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *UserTrait_MFAStatus) Reset() { + *x = UserTrait_MFAStatus{} + mi := &file_c1_connector_v2_annotation_trait_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *UserTrait_MFAStatus) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UserTrait_MFAStatus) ProtoMessage() {} + +func (x *UserTrait_MFAStatus) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_annotation_trait_proto_msgTypes[7] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *UserTrait_MFAStatus) GetMfaEnabled() bool { + if x != nil { + return x.xxx_hidden_MfaEnabled + } + return false +} + +func (x *UserTrait_MFAStatus) SetMfaEnabled(v bool) { + x.xxx_hidden_MfaEnabled = v +} + +type UserTrait_MFAStatus_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + MfaEnabled bool +} + +func (b0 UserTrait_MFAStatus_builder) Build() *UserTrait_MFAStatus { + m0 := &UserTrait_MFAStatus{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_MfaEnabled = b.MfaEnabled + return m0 +} + +type UserTrait_SSOStatus struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_SsoEnabled bool `protobuf:"varint,1,opt,name=sso_enabled,json=ssoEnabled,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *UserTrait_SSOStatus) Reset() { + *x = UserTrait_SSOStatus{} + mi := &file_c1_connector_v2_annotation_trait_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *UserTrait_SSOStatus) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UserTrait_SSOStatus) ProtoMessage() {} + +func (x *UserTrait_SSOStatus) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_annotation_trait_proto_msgTypes[8] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *UserTrait_SSOStatus) GetSsoEnabled() bool { + if x != nil { + return x.xxx_hidden_SsoEnabled + } + return false +} + +func (x *UserTrait_SSOStatus) SetSsoEnabled(v bool) { + x.xxx_hidden_SsoEnabled = v +} + +type UserTrait_SSOStatus_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + SsoEnabled bool +} + +func (b0 UserTrait_SSOStatus_builder) Build() *UserTrait_SSOStatus { + m0 := &UserTrait_SSOStatus{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_SsoEnabled = b.SsoEnabled + return m0 +} + +type UserTrait_StructuredName struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_GivenName string `protobuf:"bytes,1,opt,name=given_name,json=givenName,proto3"` + xxx_hidden_FamilyName string `protobuf:"bytes,2,opt,name=family_name,json=familyName,proto3"` + xxx_hidden_MiddleNames []string `protobuf:"bytes,3,rep,name=middle_names,json=middleNames,proto3"` + xxx_hidden_Prefix string `protobuf:"bytes,4,opt,name=prefix,proto3"` + xxx_hidden_Suffix string `protobuf:"bytes,5,opt,name=suffix,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *UserTrait_StructuredName) Reset() { + *x = UserTrait_StructuredName{} + mi := &file_c1_connector_v2_annotation_trait_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *UserTrait_StructuredName) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UserTrait_StructuredName) ProtoMessage() {} + +func (x *UserTrait_StructuredName) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_annotation_trait_proto_msgTypes[9] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *UserTrait_StructuredName) GetGivenName() string { + if x != nil { + return x.xxx_hidden_GivenName + } + return "" +} + +func (x *UserTrait_StructuredName) GetFamilyName() string { + if x != nil { + return x.xxx_hidden_FamilyName + } + return "" +} + +func (x *UserTrait_StructuredName) GetMiddleNames() []string { + if x != nil { + return x.xxx_hidden_MiddleNames + } + return nil +} + +func (x *UserTrait_StructuredName) GetPrefix() string { + if x != nil { + return x.xxx_hidden_Prefix + } + return "" +} + +func (x *UserTrait_StructuredName) GetSuffix() string { + if x != nil { + return x.xxx_hidden_Suffix + } + return "" +} + +func (x *UserTrait_StructuredName) SetGivenName(v string) { + x.xxx_hidden_GivenName = v +} + +func (x *UserTrait_StructuredName) SetFamilyName(v string) { + x.xxx_hidden_FamilyName = v +} + +func (x *UserTrait_StructuredName) SetMiddleNames(v []string) { + x.xxx_hidden_MiddleNames = v +} + +func (x *UserTrait_StructuredName) SetPrefix(v string) { + x.xxx_hidden_Prefix = v +} + +func (x *UserTrait_StructuredName) SetSuffix(v string) { + x.xxx_hidden_Suffix = v +} + +type UserTrait_StructuredName_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + GivenName string + FamilyName string + MiddleNames []string + Prefix string + Suffix string +} + +func (b0 UserTrait_StructuredName_builder) Build() *UserTrait_StructuredName { + m0 := &UserTrait_StructuredName{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_GivenName = b.GivenName + x.xxx_hidden_FamilyName = b.FamilyName + x.xxx_hidden_MiddleNames = b.MiddleNames + x.xxx_hidden_Prefix = b.Prefix + x.xxx_hidden_Suffix = b.Suffix + return m0 +} + +var File_c1_connector_v2_annotation_trait_proto protoreflect.FileDescriptor + +const file_c1_connector_v2_annotation_trait_proto_rawDesc = "" + + "\n" + + "&c1/connector/v2/annotation_trait.proto\x12\x0fc1.connector.v2\x1a\x1bc1/connector/v2/asset.proto\x1a\x1ec1/connector/v2/resource.proto\x1a\x1cgoogle/protobuf/struct.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x17validate/validate.proto\"\x97\v\n" + + "\tUserTrait\x128\n" + + "\x06emails\x18\x01 \x03(\v2 .c1.connector.v2.UserTrait.EmailR\x06emails\x12C\n" + + "\x06status\x18\x02 \x01(\v2!.c1.connector.v2.UserTrait.StatusB\b\xfaB\x05\x8a\x01\x02\x10\x01R\x06status\x121\n" + + "\aprofile\x18\x03 \x01(\v2\x17.google.protobuf.StructR\aprofile\x12-\n" + + "\x04icon\x18\x04 \x01(\v2\x19.c1.connector.v2.AssetRefR\x04icon\x12S\n" + + "\faccount_type\x18\x05 \x01(\x0e2&.c1.connector.v2.UserTrait.AccountTypeB\b\xfaB\x05\x82\x01\x02\x10\x01R\vaccountType\x12\x14\n" + + "\x05login\x18\x06 \x01(\tR\x05login\x12#\n" + + "\rlogin_aliases\x18\a \x03(\tR\floginAliases\x12!\n" + + "\femployee_ids\x18\r \x03(\tR\vemployeeIds\x129\n" + + "\n" + + "created_at\x18\b \x01(\v2\x1a.google.protobuf.TimestampR\tcreatedAt\x129\n" + + "\n" + + "last_login\x18\t \x01(\v2\x1a.google.protobuf.TimestampR\tlastLogin\x12C\n" + + "\n" + + "mfa_status\x18\n" + + " \x01(\v2$.c1.connector.v2.UserTrait.MFAStatusR\tmfaStatus\x12C\n" + + "\n" + + "sso_status\x18\v \x01(\v2$.c1.connector.v2.UserTrait.SSOStatusR\tssoStatus\x12R\n" + + "\x0fstructured_name\x18\f \x01(\v2).c1.connector.v2.UserTrait.StructuredNameR\x0estructuredName\x1aI\n" + + "\x05Email\x12!\n" + + "\aaddress\x18\x01 \x01(\tB\a\xfaB\x04r\x02`\x01R\aaddress\x12\x1d\n" + + "\n" + + "is_primary\x18\x02 \x01(\bR\tisPrimary\x1a\xdc\x01\n" + + "\x06Status\x12J\n" + + "\x06status\x18\x01 \x01(\x0e2(.c1.connector.v2.UserTrait.Status.StatusB\b\xfaB\x05\x82\x01\x02\x10\x01R\x06status\x12'\n" + + "\adetails\x18\x02 \x01(\tB\r\xfaB\n" + + "r\b \x01(\x80\b\xd0\x01\x01R\adetails\"]\n" + + "\x06Status\x12\x16\n" + + "\x12STATUS_UNSPECIFIED\x10\x00\x12\x12\n" + + "\x0eSTATUS_ENABLED\x10\x01\x12\x13\n" + + "\x0fSTATUS_DISABLED\x10\x02\x12\x12\n" + + "\x0eSTATUS_DELETED\x10\x03\x1a,\n" + + "\tMFAStatus\x12\x1f\n" + + "\vmfa_enabled\x18\x01 \x01(\bR\n" + + "mfaEnabled\x1a,\n" + + "\tSSOStatus\x12\x1f\n" + + "\vsso_enabled\x18\x01 \x01(\bR\n" + + "ssoEnabled\x1a\xa3\x01\n" + + "\x0eStructuredName\x12\x1d\n" + + "\n" + + "given_name\x18\x01 \x01(\tR\tgivenName\x12\x1f\n" + + "\vfamily_name\x18\x02 \x01(\tR\n" + + "familyName\x12!\n" + + "\fmiddle_names\x18\x03 \x03(\tR\vmiddleNames\x12\x16\n" + + "\x06prefix\x18\x04 \x01(\tR\x06prefix\x12\x16\n" + + "\x06suffix\x18\x05 \x01(\tR\x06suffix\"v\n" + + "\vAccountType\x12\x1c\n" + + "\x18ACCOUNT_TYPE_UNSPECIFIED\x10\x00\x12\x16\n" + + "\x12ACCOUNT_TYPE_HUMAN\x10\x01\x12\x18\n" + + "\x14ACCOUNT_TYPE_SERVICE\x10\x02\x12\x17\n" + + "\x13ACCOUNT_TYPE_SYSTEM\x10\x03\"n\n" + + "\n" + + "GroupTrait\x12-\n" + + "\x04icon\x18\x01 \x01(\v2\x19.c1.connector.v2.AssetRefR\x04icon\x121\n" + + "\aprofile\x18\x02 \x01(\v2\x17.google.protobuf.StructR\aprofile\">\n" + + "\tRoleTrait\x121\n" + + "\aprofile\x18\x01 \x01(\v2\x17.google.protobuf.StructR\aprofile\"\x9a\x03\n" + + "\bAppTrait\x125\n" + + "\bhelp_url\x18\x01 \x01(\tB\x1a\xfaB\x17r\x15 \x01(\x80\b:\bhttps://\xd0\x01\x01\x88\x01\x01R\ahelpUrl\x12-\n" + + "\x04icon\x18\x02 \x01(\v2\x19.c1.connector.v2.AssetRefR\x04icon\x12-\n" + + "\x04logo\x18\x03 \x01(\v2\x19.c1.connector.v2.AssetRefR\x04logo\x121\n" + + "\aprofile\x18\x04 \x01(\v2\x17.google.protobuf.StructR\aprofile\x127\n" + + "\x05flags\x18\x05 \x03(\x0e2!.c1.connector.v2.AppTrait.AppFlagR\x05flags\"\x8c\x01\n" + + "\aAppFlag\x12\x18\n" + + "\x14APP_FLAG_UNSPECIFIED\x10\x00\x12\x13\n" + + "\x0fAPP_FLAG_HIDDEN\x10\x01\x12\x15\n" + + "\x11APP_FLAG_INACTIVE\x10\x02\x12\x11\n" + + "\rAPP_FLAG_SAML\x10\x03\x12\x11\n" + + "\rAPP_FLAG_OIDC\x10\x04\x12\x15\n" + + "\x11APP_FLAG_BOOKMARK\x10\x05\"\xf3\x02\n" + + "\vSecretTrait\x121\n" + + "\aprofile\x18\x01 \x01(\v2\x17.google.protobuf.StructR\aprofile\x129\n" + + "\n" + + "created_at\x18\x02 \x01(\v2\x1a.google.protobuf.TimestampR\tcreatedAt\x129\n" + + "\n" + + "expires_at\x18\x03 \x01(\v2\x1a.google.protobuf.TimestampR\texpiresAt\x12<\n" + + "\flast_used_at\x18\x04 \x01(\v2\x1a.google.protobuf.TimestampR\n" + + "lastUsedAt\x12?\n" + + "\rcreated_by_id\x18\x05 \x01(\v2\x1b.c1.connector.v2.ResourceIdR\vcreatedById\x12<\n" + + "\videntity_id\x18\x06 \x01(\v2\x1b.c1.connector.v2.ResourceIdR\n" + + "identityIdB6Z4github.com/conductorone/baton-sdk/pb/c1/connector/v2b\x06proto3" + +var file_c1_connector_v2_annotation_trait_proto_enumTypes = make([]protoimpl.EnumInfo, 3) +var file_c1_connector_v2_annotation_trait_proto_msgTypes = make([]protoimpl.MessageInfo, 10) +var file_c1_connector_v2_annotation_trait_proto_goTypes = []any{ + (UserTrait_AccountType)(0), // 0: c1.connector.v2.UserTrait.AccountType + (UserTrait_Status_Status)(0), // 1: c1.connector.v2.UserTrait.Status.Status + (AppTrait_AppFlag)(0), // 2: c1.connector.v2.AppTrait.AppFlag + (*UserTrait)(nil), // 3: c1.connector.v2.UserTrait + (*GroupTrait)(nil), // 4: c1.connector.v2.GroupTrait + (*RoleTrait)(nil), // 5: c1.connector.v2.RoleTrait + (*AppTrait)(nil), // 6: c1.connector.v2.AppTrait + (*SecretTrait)(nil), // 7: c1.connector.v2.SecretTrait + (*UserTrait_Email)(nil), // 8: c1.connector.v2.UserTrait.Email + (*UserTrait_Status)(nil), // 9: c1.connector.v2.UserTrait.Status + (*UserTrait_MFAStatus)(nil), // 10: c1.connector.v2.UserTrait.MFAStatus + (*UserTrait_SSOStatus)(nil), // 11: c1.connector.v2.UserTrait.SSOStatus + (*UserTrait_StructuredName)(nil), // 12: c1.connector.v2.UserTrait.StructuredName + (*structpb.Struct)(nil), // 13: google.protobuf.Struct + (*AssetRef)(nil), // 14: c1.connector.v2.AssetRef + (*timestamppb.Timestamp)(nil), // 15: google.protobuf.Timestamp + (*ResourceId)(nil), // 16: c1.connector.v2.ResourceId +} +var file_c1_connector_v2_annotation_trait_proto_depIdxs = []int32{ + 8, // 0: c1.connector.v2.UserTrait.emails:type_name -> c1.connector.v2.UserTrait.Email + 9, // 1: c1.connector.v2.UserTrait.status:type_name -> c1.connector.v2.UserTrait.Status + 13, // 2: c1.connector.v2.UserTrait.profile:type_name -> google.protobuf.Struct + 14, // 3: c1.connector.v2.UserTrait.icon:type_name -> c1.connector.v2.AssetRef + 0, // 4: c1.connector.v2.UserTrait.account_type:type_name -> c1.connector.v2.UserTrait.AccountType + 15, // 5: c1.connector.v2.UserTrait.created_at:type_name -> google.protobuf.Timestamp + 15, // 6: c1.connector.v2.UserTrait.last_login:type_name -> google.protobuf.Timestamp + 10, // 7: c1.connector.v2.UserTrait.mfa_status:type_name -> c1.connector.v2.UserTrait.MFAStatus + 11, // 8: c1.connector.v2.UserTrait.sso_status:type_name -> c1.connector.v2.UserTrait.SSOStatus + 12, // 9: c1.connector.v2.UserTrait.structured_name:type_name -> c1.connector.v2.UserTrait.StructuredName + 14, // 10: c1.connector.v2.GroupTrait.icon:type_name -> c1.connector.v2.AssetRef + 13, // 11: c1.connector.v2.GroupTrait.profile:type_name -> google.protobuf.Struct + 13, // 12: c1.connector.v2.RoleTrait.profile:type_name -> google.protobuf.Struct + 14, // 13: c1.connector.v2.AppTrait.icon:type_name -> c1.connector.v2.AssetRef + 14, // 14: c1.connector.v2.AppTrait.logo:type_name -> c1.connector.v2.AssetRef + 13, // 15: c1.connector.v2.AppTrait.profile:type_name -> google.protobuf.Struct + 2, // 16: c1.connector.v2.AppTrait.flags:type_name -> c1.connector.v2.AppTrait.AppFlag + 13, // 17: c1.connector.v2.SecretTrait.profile:type_name -> google.protobuf.Struct + 15, // 18: c1.connector.v2.SecretTrait.created_at:type_name -> google.protobuf.Timestamp + 15, // 19: c1.connector.v2.SecretTrait.expires_at:type_name -> google.protobuf.Timestamp + 15, // 20: c1.connector.v2.SecretTrait.last_used_at:type_name -> google.protobuf.Timestamp + 16, // 21: c1.connector.v2.SecretTrait.created_by_id:type_name -> c1.connector.v2.ResourceId + 16, // 22: c1.connector.v2.SecretTrait.identity_id:type_name -> c1.connector.v2.ResourceId + 1, // 23: c1.connector.v2.UserTrait.Status.status:type_name -> c1.connector.v2.UserTrait.Status.Status + 24, // [24:24] is the sub-list for method output_type + 24, // [24:24] is the sub-list for method input_type + 24, // [24:24] is the sub-list for extension type_name + 24, // [24:24] is the sub-list for extension extendee + 0, // [0:24] is the sub-list for field type_name +} + +func init() { file_c1_connector_v2_annotation_trait_proto_init() } +func file_c1_connector_v2_annotation_trait_proto_init() { + if File_c1_connector_v2_annotation_trait_proto != nil { + return + } + file_c1_connector_v2_asset_proto_init() + file_c1_connector_v2_resource_proto_init() + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_c1_connector_v2_annotation_trait_proto_rawDesc), len(file_c1_connector_v2_annotation_trait_proto_rawDesc)), + NumEnums: 3, + NumMessages: 10, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_c1_connector_v2_annotation_trait_proto_goTypes, + DependencyIndexes: file_c1_connector_v2_annotation_trait_proto_depIdxs, + EnumInfos: file_c1_connector_v2_annotation_trait_proto_enumTypes, + MessageInfos: file_c1_connector_v2_annotation_trait_proto_msgTypes, + }.Build() + File_c1_connector_v2_annotation_trait_proto = out.File + file_c1_connector_v2_annotation_trait_proto_goTypes = nil + file_c1_connector_v2_annotation_trait_proto_depIdxs = nil +} diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_v1_identifier.pb.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_v1_identifier.pb.go index 94049c61..bb38e446 100644 --- a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_v1_identifier.pb.go +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_v1_identifier.pb.go @@ -1,16 +1,17 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.4 +// protoc-gen-go v1.36.10 // protoc (unknown) // source: c1/connector/v2/annotation_v1_identifier.proto +//go:build !protoopaque + package v2 import ( protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" - sync "sync" unsafe "unsafe" ) @@ -22,7 +23,7 @@ const ( ) type V1Identifier struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache @@ -53,11 +54,6 @@ func (x *V1Identifier) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use V1Identifier.ProtoReflect.Descriptor instead. -func (*V1Identifier) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_annotation_v1_identifier_proto_rawDescGZIP(), []int{0} -} - func (x *V1Identifier) GetId() string { if x != nil { return x.Id @@ -65,34 +61,32 @@ func (x *V1Identifier) GetId() string { return "" } -var File_c1_connector_v2_annotation_v1_identifier_proto protoreflect.FileDescriptor +func (x *V1Identifier) SetId(v string) { + x.Id = v +} -var file_c1_connector_v2_annotation_v1_identifier_proto_rawDesc = string([]byte{ - 0x0a, 0x2e, 0x63, 0x31, 0x2f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f, 0x76, - 0x32, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x76, 0x31, 0x5f, - 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x12, 0x0f, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, - 0x32, 0x22, 0x1e, 0x0a, 0x0c, 0x56, 0x31, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, - 0x72, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, - 0x64, 0x42, 0x36, 0x5a, 0x34, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, - 0x63, 0x6f, 0x6e, 0x64, 0x75, 0x63, 0x74, 0x6f, 0x72, 0x6f, 0x6e, 0x65, 0x2f, 0x62, 0x61, 0x74, - 0x6f, 0x6e, 0x2d, 0x73, 0x64, 0x6b, 0x2f, 0x70, 0x62, 0x2f, 0x63, 0x31, 0x2f, 0x63, 0x6f, 0x6e, - 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f, 0x76, 0x32, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x33, -}) - -var ( - file_c1_connector_v2_annotation_v1_identifier_proto_rawDescOnce sync.Once - file_c1_connector_v2_annotation_v1_identifier_proto_rawDescData []byte -) +type V1Identifier_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. -func file_c1_connector_v2_annotation_v1_identifier_proto_rawDescGZIP() []byte { - file_c1_connector_v2_annotation_v1_identifier_proto_rawDescOnce.Do(func() { - file_c1_connector_v2_annotation_v1_identifier_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_c1_connector_v2_annotation_v1_identifier_proto_rawDesc), len(file_c1_connector_v2_annotation_v1_identifier_proto_rawDesc))) - }) - return file_c1_connector_v2_annotation_v1_identifier_proto_rawDescData + Id string } +func (b0 V1Identifier_builder) Build() *V1Identifier { + m0 := &V1Identifier{} + b, x := &b0, m0 + _, _ = b, x + x.Id = b.Id + return m0 +} + +var File_c1_connector_v2_annotation_v1_identifier_proto protoreflect.FileDescriptor + +const file_c1_connector_v2_annotation_v1_identifier_proto_rawDesc = "" + + "\n" + + ".c1/connector/v2/annotation_v1_identifier.proto\x12\x0fc1.connector.v2\"\x1e\n" + + "\fV1Identifier\x12\x0e\n" + + "\x02id\x18\x01 \x01(\tR\x02idB6Z4github.com/conductorone/baton-sdk/pb/c1/connector/v2b\x06proto3" + var file_c1_connector_v2_annotation_v1_identifier_proto_msgTypes = make([]protoimpl.MessageInfo, 1) var file_c1_connector_v2_annotation_v1_identifier_proto_goTypes = []any{ (*V1Identifier)(nil), // 0: c1.connector.v2.V1Identifier diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_v1_identifier_protoopaque.pb.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_v1_identifier_protoopaque.pb.go new file mode 100644 index 00000000..8838f2db --- /dev/null +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_v1_identifier_protoopaque.pb.go @@ -0,0 +1,124 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.10 +// protoc (unknown) +// source: c1/connector/v2/annotation_v1_identifier.proto + +//go:build protoopaque + +package v2 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type V1Identifier struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Id string `protobuf:"bytes,1,opt,name=id,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *V1Identifier) Reset() { + *x = V1Identifier{} + mi := &file_c1_connector_v2_annotation_v1_identifier_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *V1Identifier) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*V1Identifier) ProtoMessage() {} + +func (x *V1Identifier) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_annotation_v1_identifier_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *V1Identifier) GetId() string { + if x != nil { + return x.xxx_hidden_Id + } + return "" +} + +func (x *V1Identifier) SetId(v string) { + x.xxx_hidden_Id = v +} + +type V1Identifier_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Id string +} + +func (b0 V1Identifier_builder) Build() *V1Identifier { + m0 := &V1Identifier{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Id = b.Id + return m0 +} + +var File_c1_connector_v2_annotation_v1_identifier_proto protoreflect.FileDescriptor + +const file_c1_connector_v2_annotation_v1_identifier_proto_rawDesc = "" + + "\n" + + ".c1/connector/v2/annotation_v1_identifier.proto\x12\x0fc1.connector.v2\"\x1e\n" + + "\fV1Identifier\x12\x0e\n" + + "\x02id\x18\x01 \x01(\tR\x02idB6Z4github.com/conductorone/baton-sdk/pb/c1/connector/v2b\x06proto3" + +var file_c1_connector_v2_annotation_v1_identifier_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_c1_connector_v2_annotation_v1_identifier_proto_goTypes = []any{ + (*V1Identifier)(nil), // 0: c1.connector.v2.V1Identifier +} +var file_c1_connector_v2_annotation_v1_identifier_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_c1_connector_v2_annotation_v1_identifier_proto_init() } +func file_c1_connector_v2_annotation_v1_identifier_proto_init() { + if File_c1_connector_v2_annotation_v1_identifier_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_c1_connector_v2_annotation_v1_identifier_proto_rawDesc), len(file_c1_connector_v2_annotation_v1_identifier_proto_rawDesc)), + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_c1_connector_v2_annotation_v1_identifier_proto_goTypes, + DependencyIndexes: file_c1_connector_v2_annotation_v1_identifier_proto_depIdxs, + MessageInfos: file_c1_connector_v2_annotation_v1_identifier_proto_msgTypes, + }.Build() + File_c1_connector_v2_annotation_v1_identifier_proto = out.File + file_c1_connector_v2_annotation_v1_identifier_proto_goTypes = nil + file_c1_connector_v2_annotation_v1_identifier_proto_depIdxs = nil +} diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/asset.pb.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/asset.pb.go index f720a3b7..71a4df7a 100644 --- a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/asset.pb.go +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/asset.pb.go @@ -1,9 +1,11 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.4 +// protoc-gen-go v1.36.10 // protoc (unknown) // source: c1/connector/v2/asset.proto +//go:build !protoopaque + package v2 import ( @@ -11,7 +13,6 @@ import ( protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" - sync "sync" unsafe "unsafe" ) @@ -23,7 +24,7 @@ const ( ) type AssetRef struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache @@ -54,11 +55,6 @@ func (x *AssetRef) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use AssetRef.ProtoReflect.Descriptor instead. -func (*AssetRef) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_asset_proto_rawDescGZIP(), []int{0} -} - func (x *AssetRef) GetId() string { if x != nil { return x.Id @@ -66,8 +62,26 @@ func (x *AssetRef) GetId() string { return "" } +func (x *AssetRef) SetId(v string) { + x.Id = v +} + +type AssetRef_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Id string +} + +func (b0 AssetRef_builder) Build() *AssetRef { + m0 := &AssetRef{} + b, x := &b0, m0 + _, _ = b, x + x.Id = b.Id + return m0 +} + type AssetServiceGetAssetRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Asset *AssetRef `protobuf:"bytes,1,opt,name=asset,proto3" json:"asset,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache @@ -98,11 +112,6 @@ func (x *AssetServiceGetAssetRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use AssetServiceGetAssetRequest.ProtoReflect.Descriptor instead. -func (*AssetServiceGetAssetRequest) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_asset_proto_rawDescGZIP(), []int{1} -} - func (x *AssetServiceGetAssetRequest) GetAsset() *AssetRef { if x != nil { return x.Asset @@ -110,8 +119,37 @@ func (x *AssetServiceGetAssetRequest) GetAsset() *AssetRef { return nil } +func (x *AssetServiceGetAssetRequest) SetAsset(v *AssetRef) { + x.Asset = v +} + +func (x *AssetServiceGetAssetRequest) HasAsset() bool { + if x == nil { + return false + } + return x.Asset != nil +} + +func (x *AssetServiceGetAssetRequest) ClearAsset() { + x.Asset = nil +} + +type AssetServiceGetAssetRequest_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Asset *AssetRef +} + +func (b0 AssetServiceGetAssetRequest_builder) Build() *AssetServiceGetAssetRequest { + m0 := &AssetServiceGetAssetRequest{} + b, x := &b0, m0 + _, _ = b, x + x.Asset = b.Asset + return m0 +} + type AssetServiceGetAssetResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` // Types that are valid to be assigned to Msg: // // *AssetServiceGetAssetResponse_Metadata_ @@ -146,11 +184,6 @@ func (x *AssetServiceGetAssetResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use AssetServiceGetAssetResponse.ProtoReflect.Descriptor instead. -func (*AssetServiceGetAssetResponse) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_asset_proto_rawDescGZIP(), []int{2} -} - func (x *AssetServiceGetAssetResponse) GetMsg() isAssetServiceGetAssetResponse_Msg { if x != nil { return x.Msg @@ -176,6 +209,111 @@ func (x *AssetServiceGetAssetResponse) GetData() *AssetServiceGetAssetResponse_D return nil } +func (x *AssetServiceGetAssetResponse) SetMetadata(v *AssetServiceGetAssetResponse_Metadata) { + if v == nil { + x.Msg = nil + return + } + x.Msg = &AssetServiceGetAssetResponse_Metadata_{v} +} + +func (x *AssetServiceGetAssetResponse) SetData(v *AssetServiceGetAssetResponse_Data) { + if v == nil { + x.Msg = nil + return + } + x.Msg = &AssetServiceGetAssetResponse_Data_{v} +} + +func (x *AssetServiceGetAssetResponse) HasMsg() bool { + if x == nil { + return false + } + return x.Msg != nil +} + +func (x *AssetServiceGetAssetResponse) HasMetadata() bool { + if x == nil { + return false + } + _, ok := x.Msg.(*AssetServiceGetAssetResponse_Metadata_) + return ok +} + +func (x *AssetServiceGetAssetResponse) HasData() bool { + if x == nil { + return false + } + _, ok := x.Msg.(*AssetServiceGetAssetResponse_Data_) + return ok +} + +func (x *AssetServiceGetAssetResponse) ClearMsg() { + x.Msg = nil +} + +func (x *AssetServiceGetAssetResponse) ClearMetadata() { + if _, ok := x.Msg.(*AssetServiceGetAssetResponse_Metadata_); ok { + x.Msg = nil + } +} + +func (x *AssetServiceGetAssetResponse) ClearData() { + if _, ok := x.Msg.(*AssetServiceGetAssetResponse_Data_); ok { + x.Msg = nil + } +} + +const AssetServiceGetAssetResponse_Msg_not_set_case case_AssetServiceGetAssetResponse_Msg = 0 +const AssetServiceGetAssetResponse_Metadata_case case_AssetServiceGetAssetResponse_Msg = 1 +const AssetServiceGetAssetResponse_Data_case case_AssetServiceGetAssetResponse_Msg = 2 + +func (x *AssetServiceGetAssetResponse) WhichMsg() case_AssetServiceGetAssetResponse_Msg { + if x == nil { + return AssetServiceGetAssetResponse_Msg_not_set_case + } + switch x.Msg.(type) { + case *AssetServiceGetAssetResponse_Metadata_: + return AssetServiceGetAssetResponse_Metadata_case + case *AssetServiceGetAssetResponse_Data_: + return AssetServiceGetAssetResponse_Data_case + default: + return AssetServiceGetAssetResponse_Msg_not_set_case + } +} + +type AssetServiceGetAssetResponse_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // Fields of oneof Msg: + Metadata *AssetServiceGetAssetResponse_Metadata + Data *AssetServiceGetAssetResponse_Data + // -- end of Msg +} + +func (b0 AssetServiceGetAssetResponse_builder) Build() *AssetServiceGetAssetResponse { + m0 := &AssetServiceGetAssetResponse{} + b, x := &b0, m0 + _, _ = b, x + if b.Metadata != nil { + x.Msg = &AssetServiceGetAssetResponse_Metadata_{b.Metadata} + } + if b.Data != nil { + x.Msg = &AssetServiceGetAssetResponse_Data_{b.Data} + } + return m0 +} + +type case_AssetServiceGetAssetResponse_Msg protoreflect.FieldNumber + +func (x case_AssetServiceGetAssetResponse_Msg) String() string { + md := file_c1_connector_v2_asset_proto_msgTypes[2].Descriptor() + if x == 0 { + return "not set" + } + return protoimpl.X.MessageFieldStringOf(md, protoreflect.FieldNumber(x)) +} + type isAssetServiceGetAssetResponse_Msg interface { isAssetServiceGetAssetResponse_Msg() } @@ -193,7 +331,7 @@ func (*AssetServiceGetAssetResponse_Metadata_) isAssetServiceGetAssetResponse_Ms func (*AssetServiceGetAssetResponse_Data_) isAssetServiceGetAssetResponse_Msg() {} type AssetServiceGetAssetResponse_Metadata struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` ContentType string `protobuf:"bytes,1,opt,name=content_type,json=contentType,proto3" json:"content_type,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache @@ -224,11 +362,6 @@ func (x *AssetServiceGetAssetResponse_Metadata) ProtoReflect() protoreflect.Mess return mi.MessageOf(x) } -// Deprecated: Use AssetServiceGetAssetResponse_Metadata.ProtoReflect.Descriptor instead. -func (*AssetServiceGetAssetResponse_Metadata) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_asset_proto_rawDescGZIP(), []int{2, 0} -} - func (x *AssetServiceGetAssetResponse_Metadata) GetContentType() string { if x != nil { return x.ContentType @@ -236,8 +369,26 @@ func (x *AssetServiceGetAssetResponse_Metadata) GetContentType() string { return "" } +func (x *AssetServiceGetAssetResponse_Metadata) SetContentType(v string) { + x.ContentType = v +} + +type AssetServiceGetAssetResponse_Metadata_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + ContentType string +} + +func (b0 AssetServiceGetAssetResponse_Metadata_builder) Build() *AssetServiceGetAssetResponse_Metadata { + m0 := &AssetServiceGetAssetResponse_Metadata{} + b, x := &b0, m0 + _, _ = b, x + x.ContentType = b.ContentType + return m0 +} + type AssetServiceGetAssetResponse_Data struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache @@ -268,11 +419,6 @@ func (x *AssetServiceGetAssetResponse_Data) ProtoReflect() protoreflect.Message return mi.MessageOf(x) } -// Deprecated: Use AssetServiceGetAssetResponse_Data.ProtoReflect.Descriptor instead. -func (*AssetServiceGetAssetResponse_Data) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_asset_proto_rawDescGZIP(), []int{2, 1} -} - func (x *AssetServiceGetAssetResponse_Data) GetData() []byte { if x != nil { return x.Data @@ -280,67 +426,50 @@ func (x *AssetServiceGetAssetResponse_Data) GetData() []byte { return nil } -var File_c1_connector_v2_asset_proto protoreflect.FileDescriptor +func (x *AssetServiceGetAssetResponse_Data) SetData(v []byte) { + if v == nil { + v = []byte{} + } + x.Data = v +} -var file_c1_connector_v2_asset_proto_rawDesc = string([]byte{ - 0x0a, 0x1b, 0x63, 0x31, 0x2f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f, 0x76, - 0x32, 0x2f, 0x61, 0x73, 0x73, 0x65, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x63, - 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x1a, 0x17, - 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x26, 0x0a, 0x08, 0x41, 0x73, 0x73, 0x65, 0x74, - 0x52, 0x65, 0x66, 0x12, 0x1a, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, - 0x0a, 0xfa, 0x42, 0x07, 0x72, 0x05, 0x20, 0x01, 0x28, 0x80, 0x08, 0x52, 0x02, 0x69, 0x64, 0x22, - 0x58, 0x0a, 0x1b, 0x41, 0x73, 0x73, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x47, - 0x65, 0x74, 0x41, 0x73, 0x73, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x39, - 0x0a, 0x05, 0x61, 0x73, 0x73, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, - 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, - 0x41, 0x73, 0x73, 0x65, 0x74, 0x52, 0x65, 0x66, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, - 0x10, 0x01, 0x52, 0x05, 0x61, 0x73, 0x73, 0x65, 0x74, 0x22, 0xae, 0x02, 0x0a, 0x1c, 0x41, 0x73, - 0x73, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x47, 0x65, 0x74, 0x41, 0x73, 0x73, - 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x54, 0x0a, 0x08, 0x6d, 0x65, - 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x63, - 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x41, - 0x73, 0x73, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x47, 0x65, 0x74, 0x41, 0x73, - 0x73, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x4d, 0x65, 0x74, 0x61, - 0x64, 0x61, 0x74, 0x61, 0x48, 0x00, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, - 0x12, 0x48, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, - 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, - 0x2e, 0x41, 0x73, 0x73, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x47, 0x65, 0x74, - 0x41, 0x73, 0x73, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x44, 0x61, - 0x74, 0x61, 0x48, 0x00, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x1a, 0x39, 0x0a, 0x08, 0x4d, 0x65, - 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x2d, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, - 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0a, 0xfa, 0x42, - 0x07, 0x72, 0x05, 0x20, 0x01, 0x28, 0x80, 0x02, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, - 0x74, 0x54, 0x79, 0x70, 0x65, 0x1a, 0x27, 0x0a, 0x04, 0x44, 0x61, 0x74, 0x61, 0x12, 0x1f, 0x0a, - 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x0b, 0xfa, 0x42, 0x08, - 0x7a, 0x06, 0x10, 0x00, 0x18, 0x80, 0x80, 0x40, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x42, 0x0a, - 0x0a, 0x03, 0x6d, 0x73, 0x67, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x32, 0x79, 0x0a, 0x0c, 0x41, 0x73, - 0x73, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x69, 0x0a, 0x08, 0x47, 0x65, - 0x74, 0x41, 0x73, 0x73, 0x65, 0x74, 0x12, 0x2c, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, - 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x73, 0x73, 0x65, 0x74, 0x53, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x47, 0x65, 0x74, 0x41, 0x73, 0x73, 0x65, 0x74, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, - 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x73, 0x73, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x47, 0x65, 0x74, 0x41, 0x73, 0x73, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x30, 0x01, 0x42, 0x36, 0x5a, 0x34, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, - 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x64, 0x75, 0x63, 0x74, 0x6f, 0x72, 0x6f, 0x6e, 0x65, - 0x2f, 0x62, 0x61, 0x74, 0x6f, 0x6e, 0x2d, 0x73, 0x64, 0x6b, 0x2f, 0x70, 0x62, 0x2f, 0x63, 0x31, - 0x2f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f, 0x76, 0x32, 0x62, 0x06, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x33, -}) - -var ( - file_c1_connector_v2_asset_proto_rawDescOnce sync.Once - file_c1_connector_v2_asset_proto_rawDescData []byte -) +type AssetServiceGetAssetResponse_Data_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. -func file_c1_connector_v2_asset_proto_rawDescGZIP() []byte { - file_c1_connector_v2_asset_proto_rawDescOnce.Do(func() { - file_c1_connector_v2_asset_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_c1_connector_v2_asset_proto_rawDesc), len(file_c1_connector_v2_asset_proto_rawDesc))) - }) - return file_c1_connector_v2_asset_proto_rawDescData + Data []byte } +func (b0 AssetServiceGetAssetResponse_Data_builder) Build() *AssetServiceGetAssetResponse_Data { + m0 := &AssetServiceGetAssetResponse_Data{} + b, x := &b0, m0 + _, _ = b, x + x.Data = b.Data + return m0 +} + +var File_c1_connector_v2_asset_proto protoreflect.FileDescriptor + +const file_c1_connector_v2_asset_proto_rawDesc = "" + + "\n" + + "\x1bc1/connector/v2/asset.proto\x12\x0fc1.connector.v2\x1a\x17validate/validate.proto\"&\n" + + "\bAssetRef\x12\x1a\n" + + "\x02id\x18\x01 \x01(\tB\n" + + "\xfaB\ar\x05 \x01(\x80\bR\x02id\"X\n" + + "\x1bAssetServiceGetAssetRequest\x129\n" + + "\x05asset\x18\x01 \x01(\v2\x19.c1.connector.v2.AssetRefB\b\xfaB\x05\x8a\x01\x02\x10\x01R\x05asset\"\xae\x02\n" + + "\x1cAssetServiceGetAssetResponse\x12T\n" + + "\bmetadata\x18\x01 \x01(\v26.c1.connector.v2.AssetServiceGetAssetResponse.MetadataH\x00R\bmetadata\x12H\n" + + "\x04data\x18\x02 \x01(\v22.c1.connector.v2.AssetServiceGetAssetResponse.DataH\x00R\x04data\x1a9\n" + + "\bMetadata\x12-\n" + + "\fcontent_type\x18\x01 \x01(\tB\n" + + "\xfaB\ar\x05 \x01(\x80\x02R\vcontentType\x1a'\n" + + "\x04Data\x12\x1f\n" + + "\x04data\x18\x01 \x01(\fB\v\xfaB\bz\x06\x10\x00\x18\x80\x80@R\x04dataB\n" + + "\n" + + "\x03msg\x12\x03\xf8B\x012y\n" + + "\fAssetService\x12i\n" + + "\bGetAsset\x12,.c1.connector.v2.AssetServiceGetAssetRequest\x1a-.c1.connector.v2.AssetServiceGetAssetResponse0\x01B6Z4github.com/conductorone/baton-sdk/pb/c1/connector/v2b\x06proto3" + var file_c1_connector_v2_asset_proto_msgTypes = make([]protoimpl.MessageInfo, 5) var file_c1_connector_v2_asset_proto_goTypes = []any{ (*AssetRef)(nil), // 0: c1.connector.v2.AssetRef diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/asset_protoopaque.pb.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/asset_protoopaque.pb.go new file mode 100644 index 00000000..f795efba --- /dev/null +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/asset_protoopaque.pb.go @@ -0,0 +1,509 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.10 +// protoc (unknown) +// source: c1/connector/v2/asset.proto + +//go:build protoopaque + +package v2 + +import ( + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type AssetRef struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Id string `protobuf:"bytes,1,opt,name=id,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *AssetRef) Reset() { + *x = AssetRef{} + mi := &file_c1_connector_v2_asset_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *AssetRef) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AssetRef) ProtoMessage() {} + +func (x *AssetRef) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_asset_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *AssetRef) GetId() string { + if x != nil { + return x.xxx_hidden_Id + } + return "" +} + +func (x *AssetRef) SetId(v string) { + x.xxx_hidden_Id = v +} + +type AssetRef_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Id string +} + +func (b0 AssetRef_builder) Build() *AssetRef { + m0 := &AssetRef{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Id = b.Id + return m0 +} + +type AssetServiceGetAssetRequest struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Asset *AssetRef `protobuf:"bytes,1,opt,name=asset,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *AssetServiceGetAssetRequest) Reset() { + *x = AssetServiceGetAssetRequest{} + mi := &file_c1_connector_v2_asset_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *AssetServiceGetAssetRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AssetServiceGetAssetRequest) ProtoMessage() {} + +func (x *AssetServiceGetAssetRequest) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_asset_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *AssetServiceGetAssetRequest) GetAsset() *AssetRef { + if x != nil { + return x.xxx_hidden_Asset + } + return nil +} + +func (x *AssetServiceGetAssetRequest) SetAsset(v *AssetRef) { + x.xxx_hidden_Asset = v +} + +func (x *AssetServiceGetAssetRequest) HasAsset() bool { + if x == nil { + return false + } + return x.xxx_hidden_Asset != nil +} + +func (x *AssetServiceGetAssetRequest) ClearAsset() { + x.xxx_hidden_Asset = nil +} + +type AssetServiceGetAssetRequest_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Asset *AssetRef +} + +func (b0 AssetServiceGetAssetRequest_builder) Build() *AssetServiceGetAssetRequest { + m0 := &AssetServiceGetAssetRequest{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Asset = b.Asset + return m0 +} + +type AssetServiceGetAssetResponse struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Msg isAssetServiceGetAssetResponse_Msg `protobuf_oneof:"msg"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *AssetServiceGetAssetResponse) Reset() { + *x = AssetServiceGetAssetResponse{} + mi := &file_c1_connector_v2_asset_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *AssetServiceGetAssetResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AssetServiceGetAssetResponse) ProtoMessage() {} + +func (x *AssetServiceGetAssetResponse) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_asset_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *AssetServiceGetAssetResponse) GetMetadata() *AssetServiceGetAssetResponse_Metadata { + if x != nil { + if x, ok := x.xxx_hidden_Msg.(*assetServiceGetAssetResponse_Metadata_); ok { + return x.Metadata + } + } + return nil +} + +func (x *AssetServiceGetAssetResponse) GetData() *AssetServiceGetAssetResponse_Data { + if x != nil { + if x, ok := x.xxx_hidden_Msg.(*assetServiceGetAssetResponse_Data_); ok { + return x.Data + } + } + return nil +} + +func (x *AssetServiceGetAssetResponse) SetMetadata(v *AssetServiceGetAssetResponse_Metadata) { + if v == nil { + x.xxx_hidden_Msg = nil + return + } + x.xxx_hidden_Msg = &assetServiceGetAssetResponse_Metadata_{v} +} + +func (x *AssetServiceGetAssetResponse) SetData(v *AssetServiceGetAssetResponse_Data) { + if v == nil { + x.xxx_hidden_Msg = nil + return + } + x.xxx_hidden_Msg = &assetServiceGetAssetResponse_Data_{v} +} + +func (x *AssetServiceGetAssetResponse) HasMsg() bool { + if x == nil { + return false + } + return x.xxx_hidden_Msg != nil +} + +func (x *AssetServiceGetAssetResponse) HasMetadata() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_Msg.(*assetServiceGetAssetResponse_Metadata_) + return ok +} + +func (x *AssetServiceGetAssetResponse) HasData() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_Msg.(*assetServiceGetAssetResponse_Data_) + return ok +} + +func (x *AssetServiceGetAssetResponse) ClearMsg() { + x.xxx_hidden_Msg = nil +} + +func (x *AssetServiceGetAssetResponse) ClearMetadata() { + if _, ok := x.xxx_hidden_Msg.(*assetServiceGetAssetResponse_Metadata_); ok { + x.xxx_hidden_Msg = nil + } +} + +func (x *AssetServiceGetAssetResponse) ClearData() { + if _, ok := x.xxx_hidden_Msg.(*assetServiceGetAssetResponse_Data_); ok { + x.xxx_hidden_Msg = nil + } +} + +const AssetServiceGetAssetResponse_Msg_not_set_case case_AssetServiceGetAssetResponse_Msg = 0 +const AssetServiceGetAssetResponse_Metadata_case case_AssetServiceGetAssetResponse_Msg = 1 +const AssetServiceGetAssetResponse_Data_case case_AssetServiceGetAssetResponse_Msg = 2 + +func (x *AssetServiceGetAssetResponse) WhichMsg() case_AssetServiceGetAssetResponse_Msg { + if x == nil { + return AssetServiceGetAssetResponse_Msg_not_set_case + } + switch x.xxx_hidden_Msg.(type) { + case *assetServiceGetAssetResponse_Metadata_: + return AssetServiceGetAssetResponse_Metadata_case + case *assetServiceGetAssetResponse_Data_: + return AssetServiceGetAssetResponse_Data_case + default: + return AssetServiceGetAssetResponse_Msg_not_set_case + } +} + +type AssetServiceGetAssetResponse_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // Fields of oneof xxx_hidden_Msg: + Metadata *AssetServiceGetAssetResponse_Metadata + Data *AssetServiceGetAssetResponse_Data + // -- end of xxx_hidden_Msg +} + +func (b0 AssetServiceGetAssetResponse_builder) Build() *AssetServiceGetAssetResponse { + m0 := &AssetServiceGetAssetResponse{} + b, x := &b0, m0 + _, _ = b, x + if b.Metadata != nil { + x.xxx_hidden_Msg = &assetServiceGetAssetResponse_Metadata_{b.Metadata} + } + if b.Data != nil { + x.xxx_hidden_Msg = &assetServiceGetAssetResponse_Data_{b.Data} + } + return m0 +} + +type case_AssetServiceGetAssetResponse_Msg protoreflect.FieldNumber + +func (x case_AssetServiceGetAssetResponse_Msg) String() string { + md := file_c1_connector_v2_asset_proto_msgTypes[2].Descriptor() + if x == 0 { + return "not set" + } + return protoimpl.X.MessageFieldStringOf(md, protoreflect.FieldNumber(x)) +} + +type isAssetServiceGetAssetResponse_Msg interface { + isAssetServiceGetAssetResponse_Msg() +} + +type assetServiceGetAssetResponse_Metadata_ struct { + Metadata *AssetServiceGetAssetResponse_Metadata `protobuf:"bytes,1,opt,name=metadata,proto3,oneof"` +} + +type assetServiceGetAssetResponse_Data_ struct { + Data *AssetServiceGetAssetResponse_Data `protobuf:"bytes,2,opt,name=data,proto3,oneof"` +} + +func (*assetServiceGetAssetResponse_Metadata_) isAssetServiceGetAssetResponse_Msg() {} + +func (*assetServiceGetAssetResponse_Data_) isAssetServiceGetAssetResponse_Msg() {} + +type AssetServiceGetAssetResponse_Metadata struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_ContentType string `protobuf:"bytes,1,opt,name=content_type,json=contentType,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *AssetServiceGetAssetResponse_Metadata) Reset() { + *x = AssetServiceGetAssetResponse_Metadata{} + mi := &file_c1_connector_v2_asset_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *AssetServiceGetAssetResponse_Metadata) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AssetServiceGetAssetResponse_Metadata) ProtoMessage() {} + +func (x *AssetServiceGetAssetResponse_Metadata) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_asset_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *AssetServiceGetAssetResponse_Metadata) GetContentType() string { + if x != nil { + return x.xxx_hidden_ContentType + } + return "" +} + +func (x *AssetServiceGetAssetResponse_Metadata) SetContentType(v string) { + x.xxx_hidden_ContentType = v +} + +type AssetServiceGetAssetResponse_Metadata_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + ContentType string +} + +func (b0 AssetServiceGetAssetResponse_Metadata_builder) Build() *AssetServiceGetAssetResponse_Metadata { + m0 := &AssetServiceGetAssetResponse_Metadata{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_ContentType = b.ContentType + return m0 +} + +type AssetServiceGetAssetResponse_Data struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Data []byte `protobuf:"bytes,1,opt,name=data,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *AssetServiceGetAssetResponse_Data) Reset() { + *x = AssetServiceGetAssetResponse_Data{} + mi := &file_c1_connector_v2_asset_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *AssetServiceGetAssetResponse_Data) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AssetServiceGetAssetResponse_Data) ProtoMessage() {} + +func (x *AssetServiceGetAssetResponse_Data) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_asset_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *AssetServiceGetAssetResponse_Data) GetData() []byte { + if x != nil { + return x.xxx_hidden_Data + } + return nil +} + +func (x *AssetServiceGetAssetResponse_Data) SetData(v []byte) { + if v == nil { + v = []byte{} + } + x.xxx_hidden_Data = v +} + +type AssetServiceGetAssetResponse_Data_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Data []byte +} + +func (b0 AssetServiceGetAssetResponse_Data_builder) Build() *AssetServiceGetAssetResponse_Data { + m0 := &AssetServiceGetAssetResponse_Data{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Data = b.Data + return m0 +} + +var File_c1_connector_v2_asset_proto protoreflect.FileDescriptor + +const file_c1_connector_v2_asset_proto_rawDesc = "" + + "\n" + + "\x1bc1/connector/v2/asset.proto\x12\x0fc1.connector.v2\x1a\x17validate/validate.proto\"&\n" + + "\bAssetRef\x12\x1a\n" + + "\x02id\x18\x01 \x01(\tB\n" + + "\xfaB\ar\x05 \x01(\x80\bR\x02id\"X\n" + + "\x1bAssetServiceGetAssetRequest\x129\n" + + "\x05asset\x18\x01 \x01(\v2\x19.c1.connector.v2.AssetRefB\b\xfaB\x05\x8a\x01\x02\x10\x01R\x05asset\"\xae\x02\n" + + "\x1cAssetServiceGetAssetResponse\x12T\n" + + "\bmetadata\x18\x01 \x01(\v26.c1.connector.v2.AssetServiceGetAssetResponse.MetadataH\x00R\bmetadata\x12H\n" + + "\x04data\x18\x02 \x01(\v22.c1.connector.v2.AssetServiceGetAssetResponse.DataH\x00R\x04data\x1a9\n" + + "\bMetadata\x12-\n" + + "\fcontent_type\x18\x01 \x01(\tB\n" + + "\xfaB\ar\x05 \x01(\x80\x02R\vcontentType\x1a'\n" + + "\x04Data\x12\x1f\n" + + "\x04data\x18\x01 \x01(\fB\v\xfaB\bz\x06\x10\x00\x18\x80\x80@R\x04dataB\n" + + "\n" + + "\x03msg\x12\x03\xf8B\x012y\n" + + "\fAssetService\x12i\n" + + "\bGetAsset\x12,.c1.connector.v2.AssetServiceGetAssetRequest\x1a-.c1.connector.v2.AssetServiceGetAssetResponse0\x01B6Z4github.com/conductorone/baton-sdk/pb/c1/connector/v2b\x06proto3" + +var file_c1_connector_v2_asset_proto_msgTypes = make([]protoimpl.MessageInfo, 5) +var file_c1_connector_v2_asset_proto_goTypes = []any{ + (*AssetRef)(nil), // 0: c1.connector.v2.AssetRef + (*AssetServiceGetAssetRequest)(nil), // 1: c1.connector.v2.AssetServiceGetAssetRequest + (*AssetServiceGetAssetResponse)(nil), // 2: c1.connector.v2.AssetServiceGetAssetResponse + (*AssetServiceGetAssetResponse_Metadata)(nil), // 3: c1.connector.v2.AssetServiceGetAssetResponse.Metadata + (*AssetServiceGetAssetResponse_Data)(nil), // 4: c1.connector.v2.AssetServiceGetAssetResponse.Data +} +var file_c1_connector_v2_asset_proto_depIdxs = []int32{ + 0, // 0: c1.connector.v2.AssetServiceGetAssetRequest.asset:type_name -> c1.connector.v2.AssetRef + 3, // 1: c1.connector.v2.AssetServiceGetAssetResponse.metadata:type_name -> c1.connector.v2.AssetServiceGetAssetResponse.Metadata + 4, // 2: c1.connector.v2.AssetServiceGetAssetResponse.data:type_name -> c1.connector.v2.AssetServiceGetAssetResponse.Data + 1, // 3: c1.connector.v2.AssetService.GetAsset:input_type -> c1.connector.v2.AssetServiceGetAssetRequest + 2, // 4: c1.connector.v2.AssetService.GetAsset:output_type -> c1.connector.v2.AssetServiceGetAssetResponse + 4, // [4:5] is the sub-list for method output_type + 3, // [3:4] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name +} + +func init() { file_c1_connector_v2_asset_proto_init() } +func file_c1_connector_v2_asset_proto_init() { + if File_c1_connector_v2_asset_proto != nil { + return + } + file_c1_connector_v2_asset_proto_msgTypes[2].OneofWrappers = []any{ + (*assetServiceGetAssetResponse_Metadata_)(nil), + (*assetServiceGetAssetResponse_Data_)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_c1_connector_v2_asset_proto_rawDesc), len(file_c1_connector_v2_asset_proto_rawDesc)), + NumEnums: 0, + NumMessages: 5, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_c1_connector_v2_asset_proto_goTypes, + DependencyIndexes: file_c1_connector_v2_asset_proto_depIdxs, + MessageInfos: file_c1_connector_v2_asset_proto_msgTypes, + }.Build() + File_c1_connector_v2_asset_proto = out.File + file_c1_connector_v2_asset_proto_goTypes = nil + file_c1_connector_v2_asset_proto_depIdxs = nil +} diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/config.pb.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/config.pb.go index f0e34d03..c316cda9 100644 --- a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/config.pb.go +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/config.pb.go @@ -1,9 +1,11 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.4 +// protoc-gen-go v1.36.10 // protoc (unknown) // source: c1/connector/v2/config.proto +//go:build !protoopaque + package v2 import ( @@ -11,7 +13,6 @@ import ( protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" - sync "sync" unsafe "unsafe" ) @@ -23,7 +24,7 @@ const ( ) type SchemaServiceGetSchemaRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } @@ -53,13 +54,20 @@ func (x *SchemaServiceGetSchemaRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use SchemaServiceGetSchemaRequest.ProtoReflect.Descriptor instead. -func (*SchemaServiceGetSchemaRequest) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_config_proto_rawDescGZIP(), []int{0} +type SchemaServiceGetSchemaRequest_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + +} + +func (b0 SchemaServiceGetSchemaRequest_builder) Build() *SchemaServiceGetSchemaRequest { + m0 := &SchemaServiceGetSchemaRequest{} + b, x := &b0, m0 + _, _ = b, x + return m0 } type SchemaServiceGetSchemaResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` Schema *ConfigSchema `protobuf:"bytes,2,opt,name=schema,proto3" json:"schema,omitempty"` unknownFields protoimpl.UnknownFields @@ -91,11 +99,6 @@ func (x *SchemaServiceGetSchemaResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use SchemaServiceGetSchemaResponse.ProtoReflect.Descriptor instead. -func (*SchemaServiceGetSchemaResponse) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_config_proto_rawDescGZIP(), []int{1} -} - func (x *SchemaServiceGetSchemaResponse) GetVersion() string { if x != nil { return x.Version @@ -110,8 +113,43 @@ func (x *SchemaServiceGetSchemaResponse) GetSchema() *ConfigSchema { return nil } +func (x *SchemaServiceGetSchemaResponse) SetVersion(v string) { + x.Version = v +} + +func (x *SchemaServiceGetSchemaResponse) SetSchema(v *ConfigSchema) { + x.Schema = v +} + +func (x *SchemaServiceGetSchemaResponse) HasSchema() bool { + if x == nil { + return false + } + return x.Schema != nil +} + +func (x *SchemaServiceGetSchemaResponse) ClearSchema() { + x.Schema = nil +} + +type SchemaServiceGetSchemaResponse_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Version string + Schema *ConfigSchema +} + +func (b0 SchemaServiceGetSchemaResponse_builder) Build() *SchemaServiceGetSchemaResponse { + m0 := &SchemaServiceGetSchemaResponse{} + b, x := &b0, m0 + _, _ = b, x + x.Version = b.Version + x.Schema = b.Schema + return m0 +} + type ConfigSchema struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Fields []*Field `protobuf:"bytes,1,rep,name=fields,proto3" json:"fields,omitempty"` DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` HelpUrl string `protobuf:"bytes,3,opt,name=help_url,json=helpUrl,proto3" json:"help_url,omitempty"` @@ -146,11 +184,6 @@ func (x *ConfigSchema) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ConfigSchema.ProtoReflect.Descriptor instead. -func (*ConfigSchema) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_config_proto_rawDescGZIP(), []int{2} -} - func (x *ConfigSchema) GetFields() []*Field { if x != nil { return x.Fields @@ -186,8 +219,72 @@ func (x *ConfigSchema) GetLogo() *AssetRef { return nil } +func (x *ConfigSchema) SetFields(v []*Field) { + x.Fields = v +} + +func (x *ConfigSchema) SetDisplayName(v string) { + x.DisplayName = v +} + +func (x *ConfigSchema) SetHelpUrl(v string) { + x.HelpUrl = v +} + +func (x *ConfigSchema) SetIcon(v *AssetRef) { + x.Icon = v +} + +func (x *ConfigSchema) SetLogo(v *AssetRef) { + x.Logo = v +} + +func (x *ConfigSchema) HasIcon() bool { + if x == nil { + return false + } + return x.Icon != nil +} + +func (x *ConfigSchema) HasLogo() bool { + if x == nil { + return false + } + return x.Logo != nil +} + +func (x *ConfigSchema) ClearIcon() { + x.Icon = nil +} + +func (x *ConfigSchema) ClearLogo() { + x.Logo = nil +} + +type ConfigSchema_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Fields []*Field + DisplayName string + HelpUrl string + Icon *AssetRef + Logo *AssetRef +} + +func (b0 ConfigSchema_builder) Build() *ConfigSchema { + m0 := &ConfigSchema{} + b, x := &b0, m0 + _, _ = b, x + x.Fields = b.Fields + x.DisplayName = b.DisplayName + x.HelpUrl = b.HelpUrl + x.Icon = b.Icon + x.Logo = b.Logo + return m0 +} + type Field struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` // Must not start with `C1_` and match [a-zA-Z0-9_]{2,64}. Must be unique within a connector. Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // empty or https URL @@ -231,11 +328,6 @@ func (x *Field) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use Field.ProtoReflect.Descriptor instead. -func (*Field) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_config_proto_rawDescGZIP(), []int{3} -} - func (x *Field) GetName() string { if x != nil { return x.Name @@ -307,6 +399,196 @@ func (x *Field) GetFile() *FileField { return nil } +func (x *Field) SetName(v string) { + x.Name = v +} + +func (x *Field) SetHelpUrl(v string) { + x.HelpUrl = v +} + +func (x *Field) SetDisplayName(v string) { + x.DisplayName = v +} + +func (x *Field) SetPlaceholder(v string) { + x.Placeholder = v +} + +func (x *Field) SetStr(v *StringField) { + if v == nil { + x.Field = nil + return + } + x.Field = &Field_Str{v} +} + +func (x *Field) SetSelect(v *SelectField) { + if v == nil { + x.Field = nil + return + } + x.Field = &Field_Select{v} +} + +func (x *Field) SetRandom(v *RandomStringField) { + if v == nil { + x.Field = nil + return + } + x.Field = &Field_Random{v} +} + +func (x *Field) SetFile(v *FileField) { + if v == nil { + x.Field = nil + return + } + x.Field = &Field_File{v} +} + +func (x *Field) HasField() bool { + if x == nil { + return false + } + return x.Field != nil +} + +func (x *Field) HasStr() bool { + if x == nil { + return false + } + _, ok := x.Field.(*Field_Str) + return ok +} + +func (x *Field) HasSelect() bool { + if x == nil { + return false + } + _, ok := x.Field.(*Field_Select) + return ok +} + +func (x *Field) HasRandom() bool { + if x == nil { + return false + } + _, ok := x.Field.(*Field_Random) + return ok +} + +func (x *Field) HasFile() bool { + if x == nil { + return false + } + _, ok := x.Field.(*Field_File) + return ok +} + +func (x *Field) ClearField() { + x.Field = nil +} + +func (x *Field) ClearStr() { + if _, ok := x.Field.(*Field_Str); ok { + x.Field = nil + } +} + +func (x *Field) ClearSelect() { + if _, ok := x.Field.(*Field_Select); ok { + x.Field = nil + } +} + +func (x *Field) ClearRandom() { + if _, ok := x.Field.(*Field_Random); ok { + x.Field = nil + } +} + +func (x *Field) ClearFile() { + if _, ok := x.Field.(*Field_File); ok { + x.Field = nil + } +} + +const Field_Field_not_set_case case_Field_Field = 0 +const Field_Str_case case_Field_Field = 100 +const Field_Select_case case_Field_Field = 101 +const Field_Random_case case_Field_Field = 102 +const Field_File_case case_Field_Field = 103 + +func (x *Field) WhichField() case_Field_Field { + if x == nil { + return Field_Field_not_set_case + } + switch x.Field.(type) { + case *Field_Str: + return Field_Str_case + case *Field_Select: + return Field_Select_case + case *Field_Random: + return Field_Random_case + case *Field_File: + return Field_File_case + default: + return Field_Field_not_set_case + } +} + +type Field_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // Must not start with `C1_` and match [a-zA-Z0-9_]{2,64}. Must be unique within a connector. + Name string + // empty or https URL + HelpUrl string + // Human readable label for this Field + DisplayName string + Placeholder string + // Fields of oneof Field: + Str *StringField + Select *SelectField + Random *RandomStringField + File *FileField + // -- end of Field +} + +func (b0 Field_builder) Build() *Field { + m0 := &Field{} + b, x := &b0, m0 + _, _ = b, x + x.Name = b.Name + x.HelpUrl = b.HelpUrl + x.DisplayName = b.DisplayName + x.Placeholder = b.Placeholder + if b.Str != nil { + x.Field = &Field_Str{b.Str} + } + if b.Select != nil { + x.Field = &Field_Select{b.Select} + } + if b.Random != nil { + x.Field = &Field_Random{b.Random} + } + if b.File != nil { + x.Field = &Field_File{b.File} + } + return m0 +} + +type case_Field_Field protoreflect.FieldNumber + +func (x case_Field_Field) String() string { + md := file_c1_connector_v2_config_proto_msgTypes[3].Descriptor() + if x == 0 { + return "not set" + } + return protoimpl.X.MessageFieldStringOf(md, protoreflect.FieldNumber(x)) +} + type isField_Field interface { isField_Field() } @@ -336,7 +618,7 @@ func (*Field_Random) isField_Field() {} func (*Field_File) isField_Field() {} type StringField struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` // If secret, value is write-only in UI and a password-type form is used. Secret bool `protobuf:"varint,1,opt,name=secret,proto3" json:"secret,omitempty"` // validator rules for value. may be empty. @@ -370,11 +652,6 @@ func (x *StringField) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use StringField.ProtoReflect.Descriptor instead. -func (*StringField) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_config_proto_rawDescGZIP(), []int{4} -} - func (x *StringField) GetSecret() bool { if x != nil { return x.Secret @@ -389,8 +666,45 @@ func (x *StringField) GetValueValidator() *validate.StringRules { return nil } +func (x *StringField) SetSecret(v bool) { + x.Secret = v +} + +func (x *StringField) SetValueValidator(v *validate.StringRules) { + x.ValueValidator = v +} + +func (x *StringField) HasValueValidator() bool { + if x == nil { + return false + } + return x.ValueValidator != nil +} + +func (x *StringField) ClearValueValidator() { + x.ValueValidator = nil +} + +type StringField_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // If secret, value is write-only in UI and a password-type form is used. + Secret bool + // validator rules for value. may be empty. + ValueValidator *validate.StringRules +} + +func (b0 StringField_builder) Build() *StringField { + m0 := &StringField{} + b, x := &b0, m0 + _, _ = b, x + x.Secret = b.Secret + x.ValueValidator = b.ValueValidator + return m0 +} + type SelectField struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` // list of items that are selected from Items []*SelectField_Item `protobuf:"bytes,5,rep,name=items,proto3" json:"items,omitempty"` unknownFields protoimpl.UnknownFields @@ -422,11 +736,6 @@ func (x *SelectField) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use SelectField.ProtoReflect.Descriptor instead. -func (*SelectField) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_config_proto_rawDescGZIP(), []int{5} -} - func (x *SelectField) GetItems() []*SelectField_Item { if x != nil { return x.Items @@ -434,8 +743,27 @@ func (x *SelectField) GetItems() []*SelectField_Item { return nil } +func (x *SelectField) SetItems(v []*SelectField_Item) { + x.Items = v +} + +type SelectField_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // list of items that are selected from + Items []*SelectField_Item +} + +func (b0 SelectField_builder) Build() *SelectField { + m0 := &SelectField{} + b, x := &b0, m0 + _, _ = b, x + x.Items = b.Items + return m0 +} + type RandomStringField struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Length int32 `protobuf:"varint,1,opt,name=length,proto3" json:"length,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache @@ -466,11 +794,6 @@ func (x *RandomStringField) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use RandomStringField.ProtoReflect.Descriptor instead. -func (*RandomStringField) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_config_proto_rawDescGZIP(), []int{6} -} - func (x *RandomStringField) GetLength() int32 { if x != nil { return x.Length @@ -478,8 +801,26 @@ func (x *RandomStringField) GetLength() int32 { return 0 } +func (x *RandomStringField) SetLength(v int32) { + x.Length = v +} + +type RandomStringField_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Length int32 +} + +func (b0 RandomStringField_builder) Build() *RandomStringField { + m0 := &RandomStringField{} + b, x := &b0, m0 + _, _ = b, x + x.Length = b.Length + return m0 +} + type FileField struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Secret bool `protobuf:"varint,1,opt,name=secret,proto3" json:"secret,omitempty"` ValueValidator *validate.StringRules `protobuf:"bytes,2,opt,name=value_validator,json=valueValidator,proto3" json:"value_validator,omitempty"` AllowedExtensions []string `protobuf:"bytes,3,rep,name=allowed_extensions,json=allowedExtensions,proto3" json:"allowed_extensions,omitempty"` @@ -512,11 +853,6 @@ func (x *FileField) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use FileField.ProtoReflect.Descriptor instead. -func (*FileField) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_config_proto_rawDescGZIP(), []int{7} -} - func (x *FileField) GetSecret() bool { if x != nil { return x.Secret @@ -538,8 +874,49 @@ func (x *FileField) GetAllowedExtensions() []string { return nil } +func (x *FileField) SetSecret(v bool) { + x.Secret = v +} + +func (x *FileField) SetValueValidator(v *validate.StringRules) { + x.ValueValidator = v +} + +func (x *FileField) SetAllowedExtensions(v []string) { + x.AllowedExtensions = v +} + +func (x *FileField) HasValueValidator() bool { + if x == nil { + return false + } + return x.ValueValidator != nil +} + +func (x *FileField) ClearValueValidator() { + x.ValueValidator = nil +} + +type FileField_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Secret bool + ValueValidator *validate.StringRules + AllowedExtensions []string +} + +func (b0 FileField_builder) Build() *FileField { + m0 := &FileField{} + b, x := &b0, m0 + _, _ = b, x + x.Secret = b.Secret + x.ValueValidator = b.ValueValidator + x.AllowedExtensions = b.AllowedExtensions + return m0 +} + type SelectField_Item struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` DisplayName string `protobuf:"bytes,1,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` unknownFields protoimpl.UnknownFields @@ -571,11 +948,6 @@ func (x *SelectField_Item) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use SelectField_Item.ProtoReflect.Descriptor instead. -func (*SelectField_Item) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_config_proto_rawDescGZIP(), []int{5, 0} -} - func (x *SelectField_Item) GetDisplayName() string { if x != nil { return x.DisplayName @@ -590,114 +962,72 @@ func (x *SelectField_Item) GetValue() string { return "" } -var File_c1_connector_v2_config_proto protoreflect.FileDescriptor +func (x *SelectField_Item) SetDisplayName(v string) { + x.DisplayName = v +} -var file_c1_connector_v2_config_proto_rawDesc = string([]byte{ - 0x0a, 0x1c, 0x63, 0x31, 0x2f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f, 0x76, - 0x32, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, - 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x1a, - 0x1b, 0x63, 0x31, 0x2f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f, 0x76, 0x32, - 0x2f, 0x61, 0x73, 0x73, 0x65, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, - 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x1f, 0x0a, 0x1d, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x53, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x71, 0x0a, 0x1e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x12, 0x35, 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, - 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x63, 0x68, 0x65, 0x6d, - 0x61, 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, 0xda, 0x01, 0x0a, 0x0c, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x2e, 0x0a, 0x06, 0x66, 0x69, - 0x65, 0x6c, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x63, 0x31, 0x2e, - 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x46, 0x69, 0x65, - 0x6c, 0x64, 0x52, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x69, - 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x19, 0x0a, - 0x08, 0x68, 0x65, 0x6c, 0x70, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x07, 0x68, 0x65, 0x6c, 0x70, 0x55, 0x72, 0x6c, 0x12, 0x2d, 0x0a, 0x04, 0x69, 0x63, 0x6f, 0x6e, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, - 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x73, 0x73, 0x65, 0x74, 0x52, 0x65, - 0x66, 0x52, 0x04, 0x69, 0x63, 0x6f, 0x6e, 0x12, 0x2d, 0x0a, 0x04, 0x6c, 0x6f, 0x67, 0x6f, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, - 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x73, 0x73, 0x65, 0x74, 0x52, 0x65, 0x66, - 0x52, 0x04, 0x6c, 0x6f, 0x67, 0x6f, 0x22, 0xde, 0x02, 0x0a, 0x05, 0x46, 0x69, 0x65, 0x6c, 0x64, - 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x68, 0x65, 0x6c, 0x70, 0x5f, 0x75, 0x72, 0x6c, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x68, 0x65, 0x6c, 0x70, 0x55, 0x72, 0x6c, 0x12, - 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e, 0x61, - 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x68, 0x6f, 0x6c, 0x64, 0x65, - 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x68, 0x6f, - 0x6c, 0x64, 0x65, 0x72, 0x12, 0x30, 0x0a, 0x03, 0x73, 0x74, 0x72, 0x18, 0x64, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1c, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, - 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x48, - 0x00, 0x52, 0x03, 0x73, 0x74, 0x72, 0x12, 0x36, 0x0a, 0x06, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, - 0x18, 0x65, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, - 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x46, - 0x69, 0x65, 0x6c, 0x64, 0x48, 0x00, 0x52, 0x06, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x12, 0x3c, - 0x0a, 0x06, 0x72, 0x61, 0x6e, 0x64, 0x6f, 0x6d, 0x18, 0x66, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, - 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, - 0x2e, 0x52, 0x61, 0x6e, 0x64, 0x6f, 0x6d, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x46, 0x69, 0x65, - 0x6c, 0x64, 0x48, 0x00, 0x52, 0x06, 0x72, 0x61, 0x6e, 0x64, 0x6f, 0x6d, 0x12, 0x30, 0x0a, 0x04, - 0x66, 0x69, 0x6c, 0x65, 0x18, 0x67, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x63, 0x31, 0x2e, - 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x46, 0x69, 0x6c, - 0x65, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x48, 0x00, 0x52, 0x04, 0x66, 0x69, 0x6c, 0x65, 0x42, 0x07, - 0x0a, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x22, 0x65, 0x0a, 0x0b, 0x53, 0x74, 0x72, 0x69, 0x6e, - 0x67, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x12, 0x3e, - 0x0a, 0x0f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, - 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, - 0x74, 0x65, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x0e, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x22, 0x87, - 0x01, 0x0a, 0x0b, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x37, - 0x0a, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, - 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, - 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x2e, 0x49, 0x74, 0x65, 0x6d, - 0x52, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x1a, 0x3f, 0x0a, 0x04, 0x49, 0x74, 0x65, 0x6d, 0x12, - 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e, 0x61, - 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x2b, 0x0a, 0x11, 0x52, 0x61, 0x6e, 0x64, - 0x6f, 0x6d, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x16, 0x0a, - 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6c, - 0x65, 0x6e, 0x67, 0x74, 0x68, 0x22, 0x92, 0x01, 0x0a, 0x09, 0x46, 0x69, 0x6c, 0x65, 0x46, 0x69, - 0x65, 0x6c, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x06, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x12, 0x3e, 0x0a, 0x0f, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, - 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x0e, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x12, 0x2d, 0x0a, 0x12, 0x61, - 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, - 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x11, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, - 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x32, 0x7d, 0x0a, 0x0d, 0x53, 0x63, - 0x68, 0x65, 0x6d, 0x61, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x6c, 0x0a, 0x09, 0x47, - 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x2e, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, - 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, - 0x61, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, - 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, - 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, - 0x61, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, - 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x36, 0x5a, 0x34, 0x67, 0x69, 0x74, - 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x64, 0x75, 0x63, 0x74, 0x6f, - 0x72, 0x6f, 0x6e, 0x65, 0x2f, 0x62, 0x61, 0x74, 0x6f, 0x6e, 0x2d, 0x73, 0x64, 0x6b, 0x2f, 0x70, - 0x62, 0x2f, 0x63, 0x31, 0x2f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f, 0x76, - 0x32, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -}) - -var ( - file_c1_connector_v2_config_proto_rawDescOnce sync.Once - file_c1_connector_v2_config_proto_rawDescData []byte -) +func (x *SelectField_Item) SetValue(v string) { + x.Value = v +} + +type SelectField_Item_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. -func file_c1_connector_v2_config_proto_rawDescGZIP() []byte { - file_c1_connector_v2_config_proto_rawDescOnce.Do(func() { - file_c1_connector_v2_config_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_c1_connector_v2_config_proto_rawDesc), len(file_c1_connector_v2_config_proto_rawDesc))) - }) - return file_c1_connector_v2_config_proto_rawDescData + DisplayName string + Value string } +func (b0 SelectField_Item_builder) Build() *SelectField_Item { + m0 := &SelectField_Item{} + b, x := &b0, m0 + _, _ = b, x + x.DisplayName = b.DisplayName + x.Value = b.Value + return m0 +} + +var File_c1_connector_v2_config_proto protoreflect.FileDescriptor + +const file_c1_connector_v2_config_proto_rawDesc = "" + + "\n" + + "\x1cc1/connector/v2/config.proto\x12\x0fc1.connector.v2\x1a\x1bc1/connector/v2/asset.proto\x1a\x17validate/validate.proto\"\x1f\n" + + "\x1dSchemaServiceGetSchemaRequest\"q\n" + + "\x1eSchemaServiceGetSchemaResponse\x12\x18\n" + + "\aversion\x18\x01 \x01(\tR\aversion\x125\n" + + "\x06schema\x18\x02 \x01(\v2\x1d.c1.connector.v2.ConfigSchemaR\x06schema\"\xda\x01\n" + + "\fConfigSchema\x12.\n" + + "\x06fields\x18\x01 \x03(\v2\x16.c1.connector.v2.FieldR\x06fields\x12!\n" + + "\fdisplay_name\x18\x02 \x01(\tR\vdisplayName\x12\x19\n" + + "\bhelp_url\x18\x03 \x01(\tR\ahelpUrl\x12-\n" + + "\x04icon\x18\x04 \x01(\v2\x19.c1.connector.v2.AssetRefR\x04icon\x12-\n" + + "\x04logo\x18\x05 \x01(\v2\x19.c1.connector.v2.AssetRefR\x04logo\"\xde\x02\n" + + "\x05Field\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\x12\x19\n" + + "\bhelp_url\x18\x02 \x01(\tR\ahelpUrl\x12!\n" + + "\fdisplay_name\x18\x03 \x01(\tR\vdisplayName\x12 \n" + + "\vplaceholder\x18\x04 \x01(\tR\vplaceholder\x120\n" + + "\x03str\x18d \x01(\v2\x1c.c1.connector.v2.StringFieldH\x00R\x03str\x126\n" + + "\x06select\x18e \x01(\v2\x1c.c1.connector.v2.SelectFieldH\x00R\x06select\x12<\n" + + "\x06random\x18f \x01(\v2\".c1.connector.v2.RandomStringFieldH\x00R\x06random\x120\n" + + "\x04file\x18g \x01(\v2\x1a.c1.connector.v2.FileFieldH\x00R\x04fileB\a\n" + + "\x05field\"e\n" + + "\vStringField\x12\x16\n" + + "\x06secret\x18\x01 \x01(\bR\x06secret\x12>\n" + + "\x0fvalue_validator\x18\x02 \x01(\v2\x15.validate.StringRulesR\x0evalueValidator\"\x87\x01\n" + + "\vSelectField\x127\n" + + "\x05items\x18\x05 \x03(\v2!.c1.connector.v2.SelectField.ItemR\x05items\x1a?\n" + + "\x04Item\x12!\n" + + "\fdisplay_name\x18\x01 \x01(\tR\vdisplayName\x12\x14\n" + + "\x05value\x18\x02 \x01(\tR\x05value\"+\n" + + "\x11RandomStringField\x12\x16\n" + + "\x06length\x18\x01 \x01(\x05R\x06length\"\x92\x01\n" + + "\tFileField\x12\x16\n" + + "\x06secret\x18\x01 \x01(\bR\x06secret\x12>\n" + + "\x0fvalue_validator\x18\x02 \x01(\v2\x15.validate.StringRulesR\x0evalueValidator\x12-\n" + + "\x12allowed_extensions\x18\x03 \x03(\tR\x11allowedExtensions2}\n" + + "\rSchemaService\x12l\n" + + "\tGetSchema\x12..c1.connector.v2.SchemaServiceGetSchemaRequest\x1a/.c1.connector.v2.SchemaServiceGetSchemaResponseB6Z4github.com/conductorone/baton-sdk/pb/c1/connector/v2b\x06proto3" + var file_c1_connector_v2_config_proto_msgTypes = make([]protoimpl.MessageInfo, 9) var file_c1_connector_v2_config_proto_goTypes = []any{ (*SchemaServiceGetSchemaRequest)(nil), // 0: c1.connector.v2.SchemaServiceGetSchemaRequest diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/config_protoopaque.pb.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/config_protoopaque.pb.go new file mode 100644 index 00000000..00fb412c --- /dev/null +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/config_protoopaque.pb.go @@ -0,0 +1,1080 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.10 +// protoc (unknown) +// source: c1/connector/v2/config.proto + +//go:build protoopaque + +package v2 + +import ( + validate "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type SchemaServiceGetSchemaRequest struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SchemaServiceGetSchemaRequest) Reset() { + *x = SchemaServiceGetSchemaRequest{} + mi := &file_c1_connector_v2_config_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SchemaServiceGetSchemaRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SchemaServiceGetSchemaRequest) ProtoMessage() {} + +func (x *SchemaServiceGetSchemaRequest) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_config_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +type SchemaServiceGetSchemaRequest_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + +} + +func (b0 SchemaServiceGetSchemaRequest_builder) Build() *SchemaServiceGetSchemaRequest { + m0 := &SchemaServiceGetSchemaRequest{} + b, x := &b0, m0 + _, _ = b, x + return m0 +} + +type SchemaServiceGetSchemaResponse struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Version string `protobuf:"bytes,1,opt,name=version,proto3"` + xxx_hidden_Schema *ConfigSchema `protobuf:"bytes,2,opt,name=schema,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SchemaServiceGetSchemaResponse) Reset() { + *x = SchemaServiceGetSchemaResponse{} + mi := &file_c1_connector_v2_config_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SchemaServiceGetSchemaResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SchemaServiceGetSchemaResponse) ProtoMessage() {} + +func (x *SchemaServiceGetSchemaResponse) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_config_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *SchemaServiceGetSchemaResponse) GetVersion() string { + if x != nil { + return x.xxx_hidden_Version + } + return "" +} + +func (x *SchemaServiceGetSchemaResponse) GetSchema() *ConfigSchema { + if x != nil { + return x.xxx_hidden_Schema + } + return nil +} + +func (x *SchemaServiceGetSchemaResponse) SetVersion(v string) { + x.xxx_hidden_Version = v +} + +func (x *SchemaServiceGetSchemaResponse) SetSchema(v *ConfigSchema) { + x.xxx_hidden_Schema = v +} + +func (x *SchemaServiceGetSchemaResponse) HasSchema() bool { + if x == nil { + return false + } + return x.xxx_hidden_Schema != nil +} + +func (x *SchemaServiceGetSchemaResponse) ClearSchema() { + x.xxx_hidden_Schema = nil +} + +type SchemaServiceGetSchemaResponse_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Version string + Schema *ConfigSchema +} + +func (b0 SchemaServiceGetSchemaResponse_builder) Build() *SchemaServiceGetSchemaResponse { + m0 := &SchemaServiceGetSchemaResponse{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Version = b.Version + x.xxx_hidden_Schema = b.Schema + return m0 +} + +type ConfigSchema struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Fields *[]*Field `protobuf:"bytes,1,rep,name=fields,proto3"` + xxx_hidden_DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3"` + xxx_hidden_HelpUrl string `protobuf:"bytes,3,opt,name=help_url,json=helpUrl,proto3"` + xxx_hidden_Icon *AssetRef `protobuf:"bytes,4,opt,name=icon,proto3"` + xxx_hidden_Logo *AssetRef `protobuf:"bytes,5,opt,name=logo,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ConfigSchema) Reset() { + *x = ConfigSchema{} + mi := &file_c1_connector_v2_config_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ConfigSchema) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ConfigSchema) ProtoMessage() {} + +func (x *ConfigSchema) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_config_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *ConfigSchema) GetFields() []*Field { + if x != nil { + if x.xxx_hidden_Fields != nil { + return *x.xxx_hidden_Fields + } + } + return nil +} + +func (x *ConfigSchema) GetDisplayName() string { + if x != nil { + return x.xxx_hidden_DisplayName + } + return "" +} + +func (x *ConfigSchema) GetHelpUrl() string { + if x != nil { + return x.xxx_hidden_HelpUrl + } + return "" +} + +func (x *ConfigSchema) GetIcon() *AssetRef { + if x != nil { + return x.xxx_hidden_Icon + } + return nil +} + +func (x *ConfigSchema) GetLogo() *AssetRef { + if x != nil { + return x.xxx_hidden_Logo + } + return nil +} + +func (x *ConfigSchema) SetFields(v []*Field) { + x.xxx_hidden_Fields = &v +} + +func (x *ConfigSchema) SetDisplayName(v string) { + x.xxx_hidden_DisplayName = v +} + +func (x *ConfigSchema) SetHelpUrl(v string) { + x.xxx_hidden_HelpUrl = v +} + +func (x *ConfigSchema) SetIcon(v *AssetRef) { + x.xxx_hidden_Icon = v +} + +func (x *ConfigSchema) SetLogo(v *AssetRef) { + x.xxx_hidden_Logo = v +} + +func (x *ConfigSchema) HasIcon() bool { + if x == nil { + return false + } + return x.xxx_hidden_Icon != nil +} + +func (x *ConfigSchema) HasLogo() bool { + if x == nil { + return false + } + return x.xxx_hidden_Logo != nil +} + +func (x *ConfigSchema) ClearIcon() { + x.xxx_hidden_Icon = nil +} + +func (x *ConfigSchema) ClearLogo() { + x.xxx_hidden_Logo = nil +} + +type ConfigSchema_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Fields []*Field + DisplayName string + HelpUrl string + Icon *AssetRef + Logo *AssetRef +} + +func (b0 ConfigSchema_builder) Build() *ConfigSchema { + m0 := &ConfigSchema{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Fields = &b.Fields + x.xxx_hidden_DisplayName = b.DisplayName + x.xxx_hidden_HelpUrl = b.HelpUrl + x.xxx_hidden_Icon = b.Icon + x.xxx_hidden_Logo = b.Logo + return m0 +} + +type Field struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Name string `protobuf:"bytes,1,opt,name=name,proto3"` + xxx_hidden_HelpUrl string `protobuf:"bytes,2,opt,name=help_url,json=helpUrl,proto3"` + xxx_hidden_DisplayName string `protobuf:"bytes,3,opt,name=display_name,json=displayName,proto3"` + xxx_hidden_Placeholder string `protobuf:"bytes,4,opt,name=placeholder,proto3"` + xxx_hidden_Field isField_Field `protobuf_oneof:"field"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Field) Reset() { + *x = Field{} + mi := &file_c1_connector_v2_config_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Field) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Field) ProtoMessage() {} + +func (x *Field) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_config_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *Field) GetName() string { + if x != nil { + return x.xxx_hidden_Name + } + return "" +} + +func (x *Field) GetHelpUrl() string { + if x != nil { + return x.xxx_hidden_HelpUrl + } + return "" +} + +func (x *Field) GetDisplayName() string { + if x != nil { + return x.xxx_hidden_DisplayName + } + return "" +} + +func (x *Field) GetPlaceholder() string { + if x != nil { + return x.xxx_hidden_Placeholder + } + return "" +} + +func (x *Field) GetStr() *StringField { + if x != nil { + if x, ok := x.xxx_hidden_Field.(*field_Str); ok { + return x.Str + } + } + return nil +} + +func (x *Field) GetSelect() *SelectField { + if x != nil { + if x, ok := x.xxx_hidden_Field.(*field_Select); ok { + return x.Select + } + } + return nil +} + +func (x *Field) GetRandom() *RandomStringField { + if x != nil { + if x, ok := x.xxx_hidden_Field.(*field_Random); ok { + return x.Random + } + } + return nil +} + +func (x *Field) GetFile() *FileField { + if x != nil { + if x, ok := x.xxx_hidden_Field.(*field_File); ok { + return x.File + } + } + return nil +} + +func (x *Field) SetName(v string) { + x.xxx_hidden_Name = v +} + +func (x *Field) SetHelpUrl(v string) { + x.xxx_hidden_HelpUrl = v +} + +func (x *Field) SetDisplayName(v string) { + x.xxx_hidden_DisplayName = v +} + +func (x *Field) SetPlaceholder(v string) { + x.xxx_hidden_Placeholder = v +} + +func (x *Field) SetStr(v *StringField) { + if v == nil { + x.xxx_hidden_Field = nil + return + } + x.xxx_hidden_Field = &field_Str{v} +} + +func (x *Field) SetSelect(v *SelectField) { + if v == nil { + x.xxx_hidden_Field = nil + return + } + x.xxx_hidden_Field = &field_Select{v} +} + +func (x *Field) SetRandom(v *RandomStringField) { + if v == nil { + x.xxx_hidden_Field = nil + return + } + x.xxx_hidden_Field = &field_Random{v} +} + +func (x *Field) SetFile(v *FileField) { + if v == nil { + x.xxx_hidden_Field = nil + return + } + x.xxx_hidden_Field = &field_File{v} +} + +func (x *Field) HasField() bool { + if x == nil { + return false + } + return x.xxx_hidden_Field != nil +} + +func (x *Field) HasStr() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_Field.(*field_Str) + return ok +} + +func (x *Field) HasSelect() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_Field.(*field_Select) + return ok +} + +func (x *Field) HasRandom() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_Field.(*field_Random) + return ok +} + +func (x *Field) HasFile() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_Field.(*field_File) + return ok +} + +func (x *Field) ClearField() { + x.xxx_hidden_Field = nil +} + +func (x *Field) ClearStr() { + if _, ok := x.xxx_hidden_Field.(*field_Str); ok { + x.xxx_hidden_Field = nil + } +} + +func (x *Field) ClearSelect() { + if _, ok := x.xxx_hidden_Field.(*field_Select); ok { + x.xxx_hidden_Field = nil + } +} + +func (x *Field) ClearRandom() { + if _, ok := x.xxx_hidden_Field.(*field_Random); ok { + x.xxx_hidden_Field = nil + } +} + +func (x *Field) ClearFile() { + if _, ok := x.xxx_hidden_Field.(*field_File); ok { + x.xxx_hidden_Field = nil + } +} + +const Field_Field_not_set_case case_Field_Field = 0 +const Field_Str_case case_Field_Field = 100 +const Field_Select_case case_Field_Field = 101 +const Field_Random_case case_Field_Field = 102 +const Field_File_case case_Field_Field = 103 + +func (x *Field) WhichField() case_Field_Field { + if x == nil { + return Field_Field_not_set_case + } + switch x.xxx_hidden_Field.(type) { + case *field_Str: + return Field_Str_case + case *field_Select: + return Field_Select_case + case *field_Random: + return Field_Random_case + case *field_File: + return Field_File_case + default: + return Field_Field_not_set_case + } +} + +type Field_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // Must not start with `C1_` and match [a-zA-Z0-9_]{2,64}. Must be unique within a connector. + Name string + // empty or https URL + HelpUrl string + // Human readable label for this Field + DisplayName string + Placeholder string + // Fields of oneof xxx_hidden_Field: + Str *StringField + Select *SelectField + Random *RandomStringField + File *FileField + // -- end of xxx_hidden_Field +} + +func (b0 Field_builder) Build() *Field { + m0 := &Field{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Name = b.Name + x.xxx_hidden_HelpUrl = b.HelpUrl + x.xxx_hidden_DisplayName = b.DisplayName + x.xxx_hidden_Placeholder = b.Placeholder + if b.Str != nil { + x.xxx_hidden_Field = &field_Str{b.Str} + } + if b.Select != nil { + x.xxx_hidden_Field = &field_Select{b.Select} + } + if b.Random != nil { + x.xxx_hidden_Field = &field_Random{b.Random} + } + if b.File != nil { + x.xxx_hidden_Field = &field_File{b.File} + } + return m0 +} + +type case_Field_Field protoreflect.FieldNumber + +func (x case_Field_Field) String() string { + md := file_c1_connector_v2_config_proto_msgTypes[3].Descriptor() + if x == 0 { + return "not set" + } + return protoimpl.X.MessageFieldStringOf(md, protoreflect.FieldNumber(x)) +} + +type isField_Field interface { + isField_Field() +} + +type field_Str struct { + Str *StringField `protobuf:"bytes,100,opt,name=str,proto3,oneof"` +} + +type field_Select struct { + Select *SelectField `protobuf:"bytes,101,opt,name=select,proto3,oneof"` +} + +type field_Random struct { + Random *RandomStringField `protobuf:"bytes,102,opt,name=random,proto3,oneof"` +} + +type field_File struct { + File *FileField `protobuf:"bytes,103,opt,name=file,proto3,oneof"` +} + +func (*field_Str) isField_Field() {} + +func (*field_Select) isField_Field() {} + +func (*field_Random) isField_Field() {} + +func (*field_File) isField_Field() {} + +type StringField struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Secret bool `protobuf:"varint,1,opt,name=secret,proto3"` + xxx_hidden_ValueValidator *validate.StringRules `protobuf:"bytes,2,opt,name=value_validator,json=valueValidator,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *StringField) Reset() { + *x = StringField{} + mi := &file_c1_connector_v2_config_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StringField) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StringField) ProtoMessage() {} + +func (x *StringField) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_config_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *StringField) GetSecret() bool { + if x != nil { + return x.xxx_hidden_Secret + } + return false +} + +func (x *StringField) GetValueValidator() *validate.StringRules { + if x != nil { + return x.xxx_hidden_ValueValidator + } + return nil +} + +func (x *StringField) SetSecret(v bool) { + x.xxx_hidden_Secret = v +} + +func (x *StringField) SetValueValidator(v *validate.StringRules) { + x.xxx_hidden_ValueValidator = v +} + +func (x *StringField) HasValueValidator() bool { + if x == nil { + return false + } + return x.xxx_hidden_ValueValidator != nil +} + +func (x *StringField) ClearValueValidator() { + x.xxx_hidden_ValueValidator = nil +} + +type StringField_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // If secret, value is write-only in UI and a password-type form is used. + Secret bool + // validator rules for value. may be empty. + ValueValidator *validate.StringRules +} + +func (b0 StringField_builder) Build() *StringField { + m0 := &StringField{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Secret = b.Secret + x.xxx_hidden_ValueValidator = b.ValueValidator + return m0 +} + +type SelectField struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Items *[]*SelectField_Item `protobuf:"bytes,5,rep,name=items,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SelectField) Reset() { + *x = SelectField{} + mi := &file_c1_connector_v2_config_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SelectField) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SelectField) ProtoMessage() {} + +func (x *SelectField) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_config_proto_msgTypes[5] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *SelectField) GetItems() []*SelectField_Item { + if x != nil { + if x.xxx_hidden_Items != nil { + return *x.xxx_hidden_Items + } + } + return nil +} + +func (x *SelectField) SetItems(v []*SelectField_Item) { + x.xxx_hidden_Items = &v +} + +type SelectField_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // list of items that are selected from + Items []*SelectField_Item +} + +func (b0 SelectField_builder) Build() *SelectField { + m0 := &SelectField{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Items = &b.Items + return m0 +} + +type RandomStringField struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Length int32 `protobuf:"varint,1,opt,name=length,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *RandomStringField) Reset() { + *x = RandomStringField{} + mi := &file_c1_connector_v2_config_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *RandomStringField) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RandomStringField) ProtoMessage() {} + +func (x *RandomStringField) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_config_proto_msgTypes[6] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *RandomStringField) GetLength() int32 { + if x != nil { + return x.xxx_hidden_Length + } + return 0 +} + +func (x *RandomStringField) SetLength(v int32) { + x.xxx_hidden_Length = v +} + +type RandomStringField_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Length int32 +} + +func (b0 RandomStringField_builder) Build() *RandomStringField { + m0 := &RandomStringField{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Length = b.Length + return m0 +} + +type FileField struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Secret bool `protobuf:"varint,1,opt,name=secret,proto3"` + xxx_hidden_ValueValidator *validate.StringRules `protobuf:"bytes,2,opt,name=value_validator,json=valueValidator,proto3"` + xxx_hidden_AllowedExtensions []string `protobuf:"bytes,3,rep,name=allowed_extensions,json=allowedExtensions,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *FileField) Reset() { + *x = FileField{} + mi := &file_c1_connector_v2_config_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *FileField) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FileField) ProtoMessage() {} + +func (x *FileField) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_config_proto_msgTypes[7] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *FileField) GetSecret() bool { + if x != nil { + return x.xxx_hidden_Secret + } + return false +} + +func (x *FileField) GetValueValidator() *validate.StringRules { + if x != nil { + return x.xxx_hidden_ValueValidator + } + return nil +} + +func (x *FileField) GetAllowedExtensions() []string { + if x != nil { + return x.xxx_hidden_AllowedExtensions + } + return nil +} + +func (x *FileField) SetSecret(v bool) { + x.xxx_hidden_Secret = v +} + +func (x *FileField) SetValueValidator(v *validate.StringRules) { + x.xxx_hidden_ValueValidator = v +} + +func (x *FileField) SetAllowedExtensions(v []string) { + x.xxx_hidden_AllowedExtensions = v +} + +func (x *FileField) HasValueValidator() bool { + if x == nil { + return false + } + return x.xxx_hidden_ValueValidator != nil +} + +func (x *FileField) ClearValueValidator() { + x.xxx_hidden_ValueValidator = nil +} + +type FileField_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Secret bool + ValueValidator *validate.StringRules + AllowedExtensions []string +} + +func (b0 FileField_builder) Build() *FileField { + m0 := &FileField{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Secret = b.Secret + x.xxx_hidden_ValueValidator = b.ValueValidator + x.xxx_hidden_AllowedExtensions = b.AllowedExtensions + return m0 +} + +type SelectField_Item struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_DisplayName string `protobuf:"bytes,1,opt,name=display_name,json=displayName,proto3"` + xxx_hidden_Value string `protobuf:"bytes,2,opt,name=value,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SelectField_Item) Reset() { + *x = SelectField_Item{} + mi := &file_c1_connector_v2_config_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SelectField_Item) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SelectField_Item) ProtoMessage() {} + +func (x *SelectField_Item) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_config_proto_msgTypes[8] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *SelectField_Item) GetDisplayName() string { + if x != nil { + return x.xxx_hidden_DisplayName + } + return "" +} + +func (x *SelectField_Item) GetValue() string { + if x != nil { + return x.xxx_hidden_Value + } + return "" +} + +func (x *SelectField_Item) SetDisplayName(v string) { + x.xxx_hidden_DisplayName = v +} + +func (x *SelectField_Item) SetValue(v string) { + x.xxx_hidden_Value = v +} + +type SelectField_Item_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + DisplayName string + Value string +} + +func (b0 SelectField_Item_builder) Build() *SelectField_Item { + m0 := &SelectField_Item{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_DisplayName = b.DisplayName + x.xxx_hidden_Value = b.Value + return m0 +} + +var File_c1_connector_v2_config_proto protoreflect.FileDescriptor + +const file_c1_connector_v2_config_proto_rawDesc = "" + + "\n" + + "\x1cc1/connector/v2/config.proto\x12\x0fc1.connector.v2\x1a\x1bc1/connector/v2/asset.proto\x1a\x17validate/validate.proto\"\x1f\n" + + "\x1dSchemaServiceGetSchemaRequest\"q\n" + + "\x1eSchemaServiceGetSchemaResponse\x12\x18\n" + + "\aversion\x18\x01 \x01(\tR\aversion\x125\n" + + "\x06schema\x18\x02 \x01(\v2\x1d.c1.connector.v2.ConfigSchemaR\x06schema\"\xda\x01\n" + + "\fConfigSchema\x12.\n" + + "\x06fields\x18\x01 \x03(\v2\x16.c1.connector.v2.FieldR\x06fields\x12!\n" + + "\fdisplay_name\x18\x02 \x01(\tR\vdisplayName\x12\x19\n" + + "\bhelp_url\x18\x03 \x01(\tR\ahelpUrl\x12-\n" + + "\x04icon\x18\x04 \x01(\v2\x19.c1.connector.v2.AssetRefR\x04icon\x12-\n" + + "\x04logo\x18\x05 \x01(\v2\x19.c1.connector.v2.AssetRefR\x04logo\"\xde\x02\n" + + "\x05Field\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\x12\x19\n" + + "\bhelp_url\x18\x02 \x01(\tR\ahelpUrl\x12!\n" + + "\fdisplay_name\x18\x03 \x01(\tR\vdisplayName\x12 \n" + + "\vplaceholder\x18\x04 \x01(\tR\vplaceholder\x120\n" + + "\x03str\x18d \x01(\v2\x1c.c1.connector.v2.StringFieldH\x00R\x03str\x126\n" + + "\x06select\x18e \x01(\v2\x1c.c1.connector.v2.SelectFieldH\x00R\x06select\x12<\n" + + "\x06random\x18f \x01(\v2\".c1.connector.v2.RandomStringFieldH\x00R\x06random\x120\n" + + "\x04file\x18g \x01(\v2\x1a.c1.connector.v2.FileFieldH\x00R\x04fileB\a\n" + + "\x05field\"e\n" + + "\vStringField\x12\x16\n" + + "\x06secret\x18\x01 \x01(\bR\x06secret\x12>\n" + + "\x0fvalue_validator\x18\x02 \x01(\v2\x15.validate.StringRulesR\x0evalueValidator\"\x87\x01\n" + + "\vSelectField\x127\n" + + "\x05items\x18\x05 \x03(\v2!.c1.connector.v2.SelectField.ItemR\x05items\x1a?\n" + + "\x04Item\x12!\n" + + "\fdisplay_name\x18\x01 \x01(\tR\vdisplayName\x12\x14\n" + + "\x05value\x18\x02 \x01(\tR\x05value\"+\n" + + "\x11RandomStringField\x12\x16\n" + + "\x06length\x18\x01 \x01(\x05R\x06length\"\x92\x01\n" + + "\tFileField\x12\x16\n" + + "\x06secret\x18\x01 \x01(\bR\x06secret\x12>\n" + + "\x0fvalue_validator\x18\x02 \x01(\v2\x15.validate.StringRulesR\x0evalueValidator\x12-\n" + + "\x12allowed_extensions\x18\x03 \x03(\tR\x11allowedExtensions2}\n" + + "\rSchemaService\x12l\n" + + "\tGetSchema\x12..c1.connector.v2.SchemaServiceGetSchemaRequest\x1a/.c1.connector.v2.SchemaServiceGetSchemaResponseB6Z4github.com/conductorone/baton-sdk/pb/c1/connector/v2b\x06proto3" + +var file_c1_connector_v2_config_proto_msgTypes = make([]protoimpl.MessageInfo, 9) +var file_c1_connector_v2_config_proto_goTypes = []any{ + (*SchemaServiceGetSchemaRequest)(nil), // 0: c1.connector.v2.SchemaServiceGetSchemaRequest + (*SchemaServiceGetSchemaResponse)(nil), // 1: c1.connector.v2.SchemaServiceGetSchemaResponse + (*ConfigSchema)(nil), // 2: c1.connector.v2.ConfigSchema + (*Field)(nil), // 3: c1.connector.v2.Field + (*StringField)(nil), // 4: c1.connector.v2.StringField + (*SelectField)(nil), // 5: c1.connector.v2.SelectField + (*RandomStringField)(nil), // 6: c1.connector.v2.RandomStringField + (*FileField)(nil), // 7: c1.connector.v2.FileField + (*SelectField_Item)(nil), // 8: c1.connector.v2.SelectField.Item + (*AssetRef)(nil), // 9: c1.connector.v2.AssetRef + (*validate.StringRules)(nil), // 10: validate.StringRules +} +var file_c1_connector_v2_config_proto_depIdxs = []int32{ + 2, // 0: c1.connector.v2.SchemaServiceGetSchemaResponse.schema:type_name -> c1.connector.v2.ConfigSchema + 3, // 1: c1.connector.v2.ConfigSchema.fields:type_name -> c1.connector.v2.Field + 9, // 2: c1.connector.v2.ConfigSchema.icon:type_name -> c1.connector.v2.AssetRef + 9, // 3: c1.connector.v2.ConfigSchema.logo:type_name -> c1.connector.v2.AssetRef + 4, // 4: c1.connector.v2.Field.str:type_name -> c1.connector.v2.StringField + 5, // 5: c1.connector.v2.Field.select:type_name -> c1.connector.v2.SelectField + 6, // 6: c1.connector.v2.Field.random:type_name -> c1.connector.v2.RandomStringField + 7, // 7: c1.connector.v2.Field.file:type_name -> c1.connector.v2.FileField + 10, // 8: c1.connector.v2.StringField.value_validator:type_name -> validate.StringRules + 8, // 9: c1.connector.v2.SelectField.items:type_name -> c1.connector.v2.SelectField.Item + 10, // 10: c1.connector.v2.FileField.value_validator:type_name -> validate.StringRules + 0, // 11: c1.connector.v2.SchemaService.GetSchema:input_type -> c1.connector.v2.SchemaServiceGetSchemaRequest + 1, // 12: c1.connector.v2.SchemaService.GetSchema:output_type -> c1.connector.v2.SchemaServiceGetSchemaResponse + 12, // [12:13] is the sub-list for method output_type + 11, // [11:12] is the sub-list for method input_type + 11, // [11:11] is the sub-list for extension type_name + 11, // [11:11] is the sub-list for extension extendee + 0, // [0:11] is the sub-list for field type_name +} + +func init() { file_c1_connector_v2_config_proto_init() } +func file_c1_connector_v2_config_proto_init() { + if File_c1_connector_v2_config_proto != nil { + return + } + file_c1_connector_v2_asset_proto_init() + file_c1_connector_v2_config_proto_msgTypes[3].OneofWrappers = []any{ + (*field_Str)(nil), + (*field_Select)(nil), + (*field_Random)(nil), + (*field_File)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_c1_connector_v2_config_proto_rawDesc), len(file_c1_connector_v2_config_proto_rawDesc)), + NumEnums: 0, + NumMessages: 9, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_c1_connector_v2_config_proto_goTypes, + DependencyIndexes: file_c1_connector_v2_config_proto_depIdxs, + MessageInfos: file_c1_connector_v2_config_proto_msgTypes, + }.Build() + File_c1_connector_v2_config_proto = out.File + file_c1_connector_v2_config_proto_goTypes = nil + file_c1_connector_v2_config_proto_depIdxs = nil +} diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/connector.pb.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/connector.pb.go index da231740..21ac6d98 100644 --- a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/connector.pb.go +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/connector.pb.go @@ -1,9 +1,11 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.4 +// protoc-gen-go v1.36.10 // protoc (unknown) // source: c1/connector/v2/connector.proto +//go:build !protoopaque + package v2 import ( @@ -13,7 +15,6 @@ import ( anypb "google.golang.org/protobuf/types/known/anypb" structpb "google.golang.org/protobuf/types/known/structpb" reflect "reflect" - sync "sync" unsafe "unsafe" ) @@ -98,11 +99,6 @@ func (x Capability) Number() protoreflect.EnumNumber { return protoreflect.EnumNumber(x) } -// Deprecated: Use Capability.Descriptor instead. -func (Capability) EnumDescriptor() ([]byte, []int) { - return file_c1_connector_v2_connector_proto_rawDescGZIP(), []int{0} -} - type CapabilityDetailCredentialOption int32 const ( @@ -153,14 +149,10 @@ func (x CapabilityDetailCredentialOption) Number() protoreflect.EnumNumber { return protoreflect.EnumNumber(x) } -// Deprecated: Use CapabilityDetailCredentialOption.Descriptor instead. -func (CapabilityDetailCredentialOption) EnumDescriptor() ([]byte, []int) { - return file_c1_connector_v2_connector_proto_rawDescGZIP(), []int{1} -} - type ConnectorServiceCleanupRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Annotations []*anypb.Any `protobuf:"bytes,1,rep,name=annotations,proto3" json:"annotations,omitempty"` + ActiveSyncId string `protobuf:"bytes,2,opt,name=active_sync_id,json=activeSyncId,proto3" json:"active_sync_id,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } @@ -190,11 +182,6 @@ func (x *ConnectorServiceCleanupRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ConnectorServiceCleanupRequest.ProtoReflect.Descriptor instead. -func (*ConnectorServiceCleanupRequest) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_connector_proto_rawDescGZIP(), []int{0} -} - func (x *ConnectorServiceCleanupRequest) GetAnnotations() []*anypb.Any { if x != nil { return x.Annotations @@ -202,8 +189,39 @@ func (x *ConnectorServiceCleanupRequest) GetAnnotations() []*anypb.Any { return nil } +func (x *ConnectorServiceCleanupRequest) GetActiveSyncId() string { + if x != nil { + return x.ActiveSyncId + } + return "" +} + +func (x *ConnectorServiceCleanupRequest) SetAnnotations(v []*anypb.Any) { + x.Annotations = v +} + +func (x *ConnectorServiceCleanupRequest) SetActiveSyncId(v string) { + x.ActiveSyncId = v +} + +type ConnectorServiceCleanupRequest_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Annotations []*anypb.Any + ActiveSyncId string +} + +func (b0 ConnectorServiceCleanupRequest_builder) Build() *ConnectorServiceCleanupRequest { + m0 := &ConnectorServiceCleanupRequest{} + b, x := &b0, m0 + _, _ = b, x + x.Annotations = b.Annotations + x.ActiveSyncId = b.ActiveSyncId + return m0 +} + type ConnectorServiceCleanupResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Annotations []*anypb.Any `protobuf:"bytes,1,rep,name=annotations,proto3" json:"annotations,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache @@ -234,11 +252,6 @@ func (x *ConnectorServiceCleanupResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ConnectorServiceCleanupResponse.ProtoReflect.Descriptor instead. -func (*ConnectorServiceCleanupResponse) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_connector_proto_rawDescGZIP(), []int{1} -} - func (x *ConnectorServiceCleanupResponse) GetAnnotations() []*anypb.Any { if x != nil { return x.Annotations @@ -246,8 +259,26 @@ func (x *ConnectorServiceCleanupResponse) GetAnnotations() []*anypb.Any { return nil } +func (x *ConnectorServiceCleanupResponse) SetAnnotations(v []*anypb.Any) { + x.Annotations = v +} + +type ConnectorServiceCleanupResponse_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Annotations []*anypb.Any +} + +func (b0 ConnectorServiceCleanupResponse_builder) Build() *ConnectorServiceCleanupResponse { + m0 := &ConnectorServiceCleanupResponse{} + b, x := &b0, m0 + _, _ = b, x + x.Annotations = b.Annotations + return m0 +} + type ConnectorMetadata struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` DisplayName string `protobuf:"bytes,1,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` HelpUrl string `protobuf:"bytes,2,opt,name=help_url,json=helpUrl,proto3" json:"help_url,omitempty"` Icon *AssetRef `protobuf:"bytes,3,opt,name=icon,proto3" json:"icon,omitempty"` @@ -286,11 +317,6 @@ func (x *ConnectorMetadata) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ConnectorMetadata.ProtoReflect.Descriptor instead. -func (*ConnectorMetadata) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_connector_proto_rawDescGZIP(), []int{2} -} - func (x *ConnectorMetadata) GetDisplayName() string { if x != nil { return x.DisplayName @@ -354,8 +380,129 @@ func (x *ConnectorMetadata) GetAccountCreationSchema() *ConnectorAccountCreation return nil } +func (x *ConnectorMetadata) SetDisplayName(v string) { + x.DisplayName = v +} + +func (x *ConnectorMetadata) SetHelpUrl(v string) { + x.HelpUrl = v +} + +func (x *ConnectorMetadata) SetIcon(v *AssetRef) { + x.Icon = v +} + +func (x *ConnectorMetadata) SetLogo(v *AssetRef) { + x.Logo = v +} + +func (x *ConnectorMetadata) SetProfile(v *structpb.Struct) { + x.Profile = v +} + +func (x *ConnectorMetadata) SetAnnotations(v []*anypb.Any) { + x.Annotations = v +} + +func (x *ConnectorMetadata) SetDescription(v string) { + x.Description = v +} + +func (x *ConnectorMetadata) SetCapabilities(v *ConnectorCapabilities) { + x.Capabilities = v +} + +func (x *ConnectorMetadata) SetAccountCreationSchema(v *ConnectorAccountCreationSchema) { + x.AccountCreationSchema = v +} + +func (x *ConnectorMetadata) HasIcon() bool { + if x == nil { + return false + } + return x.Icon != nil +} + +func (x *ConnectorMetadata) HasLogo() bool { + if x == nil { + return false + } + return x.Logo != nil +} + +func (x *ConnectorMetadata) HasProfile() bool { + if x == nil { + return false + } + return x.Profile != nil +} + +func (x *ConnectorMetadata) HasCapabilities() bool { + if x == nil { + return false + } + return x.Capabilities != nil +} + +func (x *ConnectorMetadata) HasAccountCreationSchema() bool { + if x == nil { + return false + } + return x.AccountCreationSchema != nil +} + +func (x *ConnectorMetadata) ClearIcon() { + x.Icon = nil +} + +func (x *ConnectorMetadata) ClearLogo() { + x.Logo = nil +} + +func (x *ConnectorMetadata) ClearProfile() { + x.Profile = nil +} + +func (x *ConnectorMetadata) ClearCapabilities() { + x.Capabilities = nil +} + +func (x *ConnectorMetadata) ClearAccountCreationSchema() { + x.AccountCreationSchema = nil +} + +type ConnectorMetadata_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + DisplayName string + HelpUrl string + Icon *AssetRef + Logo *AssetRef + Profile *structpb.Struct + Annotations []*anypb.Any + Description string + Capabilities *ConnectorCapabilities + AccountCreationSchema *ConnectorAccountCreationSchema +} + +func (b0 ConnectorMetadata_builder) Build() *ConnectorMetadata { + m0 := &ConnectorMetadata{} + b, x := &b0, m0 + _, _ = b, x + x.DisplayName = b.DisplayName + x.HelpUrl = b.HelpUrl + x.Icon = b.Icon + x.Logo = b.Logo + x.Profile = b.Profile + x.Annotations = b.Annotations + x.Description = b.Description + x.Capabilities = b.Capabilities + x.AccountCreationSchema = b.AccountCreationSchema + return m0 +} + type CredentialDetails struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` CapabilityAccountProvisioning *CredentialDetailsAccountProvisioning `protobuf:"bytes,1,opt,name=capability_account_provisioning,json=capabilityAccountProvisioning,proto3" json:"capability_account_provisioning,omitempty"` CapabilityCredentialRotation *CredentialDetailsCredentialRotation `protobuf:"bytes,2,opt,name=capability_credential_rotation,json=capabilityCredentialRotation,proto3" json:"capability_credential_rotation,omitempty"` unknownFields protoimpl.UnknownFields @@ -387,11 +534,6 @@ func (x *CredentialDetails) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use CredentialDetails.ProtoReflect.Descriptor instead. -func (*CredentialDetails) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_connector_proto_rawDescGZIP(), []int{3} -} - func (x *CredentialDetails) GetCapabilityAccountProvisioning() *CredentialDetailsAccountProvisioning { if x != nil { return x.CapabilityAccountProvisioning @@ -406,8 +548,54 @@ func (x *CredentialDetails) GetCapabilityCredentialRotation() *CredentialDetails return nil } +func (x *CredentialDetails) SetCapabilityAccountProvisioning(v *CredentialDetailsAccountProvisioning) { + x.CapabilityAccountProvisioning = v +} + +func (x *CredentialDetails) SetCapabilityCredentialRotation(v *CredentialDetailsCredentialRotation) { + x.CapabilityCredentialRotation = v +} + +func (x *CredentialDetails) HasCapabilityAccountProvisioning() bool { + if x == nil { + return false + } + return x.CapabilityAccountProvisioning != nil +} + +func (x *CredentialDetails) HasCapabilityCredentialRotation() bool { + if x == nil { + return false + } + return x.CapabilityCredentialRotation != nil +} + +func (x *CredentialDetails) ClearCapabilityAccountProvisioning() { + x.CapabilityAccountProvisioning = nil +} + +func (x *CredentialDetails) ClearCapabilityCredentialRotation() { + x.CapabilityCredentialRotation = nil +} + +type CredentialDetails_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + CapabilityAccountProvisioning *CredentialDetailsAccountProvisioning + CapabilityCredentialRotation *CredentialDetailsCredentialRotation +} + +func (b0 CredentialDetails_builder) Build() *CredentialDetails { + m0 := &CredentialDetails{} + b, x := &b0, m0 + _, _ = b, x + x.CapabilityAccountProvisioning = b.CapabilityAccountProvisioning + x.CapabilityCredentialRotation = b.CapabilityCredentialRotation + return m0 +} + type CredentialDetailsAccountProvisioning struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` SupportedCredentialOptions []CapabilityDetailCredentialOption `protobuf:"varint,1,rep,packed,name=supported_credential_options,json=supportedCredentialOptions,proto3,enum=c1.connector.v2.CapabilityDetailCredentialOption" json:"supported_credential_options,omitempty"` PreferredCredentialOption CapabilityDetailCredentialOption `protobuf:"varint,2,opt,name=preferred_credential_option,json=preferredCredentialOption,proto3,enum=c1.connector.v2.CapabilityDetailCredentialOption" json:"preferred_credential_option,omitempty"` unknownFields protoimpl.UnknownFields @@ -439,11 +627,6 @@ func (x *CredentialDetailsAccountProvisioning) ProtoReflect() protoreflect.Messa return mi.MessageOf(x) } -// Deprecated: Use CredentialDetailsAccountProvisioning.ProtoReflect.Descriptor instead. -func (*CredentialDetailsAccountProvisioning) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_connector_proto_rawDescGZIP(), []int{4} -} - func (x *CredentialDetailsAccountProvisioning) GetSupportedCredentialOptions() []CapabilityDetailCredentialOption { if x != nil { return x.SupportedCredentialOptions @@ -458,8 +641,32 @@ func (x *CredentialDetailsAccountProvisioning) GetPreferredCredentialOption() Ca return CapabilityDetailCredentialOption_CAPABILITY_DETAIL_CREDENTIAL_OPTION_UNSPECIFIED } +func (x *CredentialDetailsAccountProvisioning) SetSupportedCredentialOptions(v []CapabilityDetailCredentialOption) { + x.SupportedCredentialOptions = v +} + +func (x *CredentialDetailsAccountProvisioning) SetPreferredCredentialOption(v CapabilityDetailCredentialOption) { + x.PreferredCredentialOption = v +} + +type CredentialDetailsAccountProvisioning_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + SupportedCredentialOptions []CapabilityDetailCredentialOption + PreferredCredentialOption CapabilityDetailCredentialOption +} + +func (b0 CredentialDetailsAccountProvisioning_builder) Build() *CredentialDetailsAccountProvisioning { + m0 := &CredentialDetailsAccountProvisioning{} + b, x := &b0, m0 + _, _ = b, x + x.SupportedCredentialOptions = b.SupportedCredentialOptions + x.PreferredCredentialOption = b.PreferredCredentialOption + return m0 +} + type CredentialDetailsCredentialRotation struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` SupportedCredentialOptions []CapabilityDetailCredentialOption `protobuf:"varint,1,rep,packed,name=supported_credential_options,json=supportedCredentialOptions,proto3,enum=c1.connector.v2.CapabilityDetailCredentialOption" json:"supported_credential_options,omitempty"` PreferredCredentialOption CapabilityDetailCredentialOption `protobuf:"varint,2,opt,name=preferred_credential_option,json=preferredCredentialOption,proto3,enum=c1.connector.v2.CapabilityDetailCredentialOption" json:"preferred_credential_option,omitempty"` unknownFields protoimpl.UnknownFields @@ -491,11 +698,6 @@ func (x *CredentialDetailsCredentialRotation) ProtoReflect() protoreflect.Messag return mi.MessageOf(x) } -// Deprecated: Use CredentialDetailsCredentialRotation.ProtoReflect.Descriptor instead. -func (*CredentialDetailsCredentialRotation) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_connector_proto_rawDescGZIP(), []int{5} -} - func (x *CredentialDetailsCredentialRotation) GetSupportedCredentialOptions() []CapabilityDetailCredentialOption { if x != nil { return x.SupportedCredentialOptions @@ -510,8 +712,32 @@ func (x *CredentialDetailsCredentialRotation) GetPreferredCredentialOption() Cap return CapabilityDetailCredentialOption_CAPABILITY_DETAIL_CREDENTIAL_OPTION_UNSPECIFIED } +func (x *CredentialDetailsCredentialRotation) SetSupportedCredentialOptions(v []CapabilityDetailCredentialOption) { + x.SupportedCredentialOptions = v +} + +func (x *CredentialDetailsCredentialRotation) SetPreferredCredentialOption(v CapabilityDetailCredentialOption) { + x.PreferredCredentialOption = v +} + +type CredentialDetailsCredentialRotation_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + SupportedCredentialOptions []CapabilityDetailCredentialOption + PreferredCredentialOption CapabilityDetailCredentialOption +} + +func (b0 CredentialDetailsCredentialRotation_builder) Build() *CredentialDetailsCredentialRotation { + m0 := &CredentialDetailsCredentialRotation{} + b, x := &b0, m0 + _, _ = b, x + x.SupportedCredentialOptions = b.SupportedCredentialOptions + x.PreferredCredentialOption = b.PreferredCredentialOption + return m0 +} + type ConnectorCapabilities struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` ResourceTypeCapabilities []*ResourceTypeCapability `protobuf:"bytes,1,rep,name=resource_type_capabilities,json=resourceTypeCapabilities,proto3" json:"resource_type_capabilities,omitempty"` ConnectorCapabilities []Capability `protobuf:"varint,2,rep,packed,name=connector_capabilities,json=connectorCapabilities,proto3,enum=c1.connector.v2.Capability" json:"connector_capabilities,omitempty"` CredentialDetails *CredentialDetails `protobuf:"bytes,3,opt,name=credential_details,json=credentialDetails,proto3" json:"credential_details,omitempty"` @@ -544,11 +770,6 @@ func (x *ConnectorCapabilities) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ConnectorCapabilities.ProtoReflect.Descriptor instead. -func (*ConnectorCapabilities) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_connector_proto_rawDescGZIP(), []int{6} -} - func (x *ConnectorCapabilities) GetResourceTypeCapabilities() []*ResourceTypeCapability { if x != nil { return x.ResourceTypeCapabilities @@ -570,17 +791,173 @@ func (x *ConnectorCapabilities) GetCredentialDetails() *CredentialDetails { return nil } +func (x *ConnectorCapabilities) SetResourceTypeCapabilities(v []*ResourceTypeCapability) { + x.ResourceTypeCapabilities = v +} + +func (x *ConnectorCapabilities) SetConnectorCapabilities(v []Capability) { + x.ConnectorCapabilities = v +} + +func (x *ConnectorCapabilities) SetCredentialDetails(v *CredentialDetails) { + x.CredentialDetails = v +} + +func (x *ConnectorCapabilities) HasCredentialDetails() bool { + if x == nil { + return false + } + return x.CredentialDetails != nil +} + +func (x *ConnectorCapabilities) ClearCredentialDetails() { + x.CredentialDetails = nil +} + +type ConnectorCapabilities_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + ResourceTypeCapabilities []*ResourceTypeCapability + ConnectorCapabilities []Capability + CredentialDetails *CredentialDetails +} + +func (b0 ConnectorCapabilities_builder) Build() *ConnectorCapabilities { + m0 := &ConnectorCapabilities{} + b, x := &b0, m0 + _, _ = b, x + x.ResourceTypeCapabilities = b.ResourceTypeCapabilities + x.ConnectorCapabilities = b.ConnectorCapabilities + x.CredentialDetails = b.CredentialDetails + return m0 +} + +type CapabilityPermission struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + Permission string `protobuf:"bytes,1,opt,name=permission,proto3" json:"permission,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *CapabilityPermission) Reset() { + *x = CapabilityPermission{} + mi := &file_c1_connector_v2_connector_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *CapabilityPermission) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CapabilityPermission) ProtoMessage() {} + +func (x *CapabilityPermission) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_connector_proto_msgTypes[7] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *CapabilityPermission) GetPermission() string { + if x != nil { + return x.Permission + } + return "" +} + +func (x *CapabilityPermission) SetPermission(v string) { + x.Permission = v +} + +type CapabilityPermission_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Permission string +} + +func (b0 CapabilityPermission_builder) Build() *CapabilityPermission { + m0 := &CapabilityPermission{} + b, x := &b0, m0 + _, _ = b, x + x.Permission = b.Permission + return m0 +} + +type CapabilityPermissions struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + Permissions []*CapabilityPermission `protobuf:"bytes,1,rep,name=permissions,proto3" json:"permissions,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *CapabilityPermissions) Reset() { + *x = CapabilityPermissions{} + mi := &file_c1_connector_v2_connector_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *CapabilityPermissions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CapabilityPermissions) ProtoMessage() {} + +func (x *CapabilityPermissions) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_connector_proto_msgTypes[8] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *CapabilityPermissions) GetPermissions() []*CapabilityPermission { + if x != nil { + return x.Permissions + } + return nil +} + +func (x *CapabilityPermissions) SetPermissions(v []*CapabilityPermission) { + x.Permissions = v +} + +type CapabilityPermissions_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Permissions []*CapabilityPermission +} + +func (b0 CapabilityPermissions_builder) Build() *CapabilityPermissions { + m0 := &CapabilityPermissions{} + b, x := &b0, m0 + _, _ = b, x + x.Permissions = b.Permissions + return m0 +} + type ResourceTypeCapability struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` ResourceType *ResourceType `protobuf:"bytes,1,opt,name=resource_type,json=resourceType,proto3" json:"resource_type,omitempty"` Capabilities []Capability `protobuf:"varint,2,rep,packed,name=capabilities,proto3,enum=c1.connector.v2.Capability" json:"capabilities,omitempty"` + Permissions *CapabilityPermissions `protobuf:"bytes,3,opt,name=permissions,proto3" json:"permissions,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } func (x *ResourceTypeCapability) Reset() { *x = ResourceTypeCapability{} - mi := &file_c1_connector_v2_connector_proto_msgTypes[7] + mi := &file_c1_connector_v2_connector_proto_msgTypes[9] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -592,7 +969,7 @@ func (x *ResourceTypeCapability) String() string { func (*ResourceTypeCapability) ProtoMessage() {} func (x *ResourceTypeCapability) ProtoReflect() protoreflect.Message { - mi := &file_c1_connector_v2_connector_proto_msgTypes[7] + mi := &file_c1_connector_v2_connector_proto_msgTypes[9] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -603,11 +980,6 @@ func (x *ResourceTypeCapability) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ResourceTypeCapability.ProtoReflect.Descriptor instead. -func (*ResourceTypeCapability) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_connector_proto_rawDescGZIP(), []int{7} -} - func (x *ResourceTypeCapability) GetResourceType() *ResourceType { if x != nil { return x.ResourceType @@ -622,15 +994,74 @@ func (x *ResourceTypeCapability) GetCapabilities() []Capability { return nil } +func (x *ResourceTypeCapability) GetPermissions() *CapabilityPermissions { + if x != nil { + return x.Permissions + } + return nil +} + +func (x *ResourceTypeCapability) SetResourceType(v *ResourceType) { + x.ResourceType = v +} + +func (x *ResourceTypeCapability) SetCapabilities(v []Capability) { + x.Capabilities = v +} + +func (x *ResourceTypeCapability) SetPermissions(v *CapabilityPermissions) { + x.Permissions = v +} + +func (x *ResourceTypeCapability) HasResourceType() bool { + if x == nil { + return false + } + return x.ResourceType != nil +} + +func (x *ResourceTypeCapability) HasPermissions() bool { + if x == nil { + return false + } + return x.Permissions != nil +} + +func (x *ResourceTypeCapability) ClearResourceType() { + x.ResourceType = nil +} + +func (x *ResourceTypeCapability) ClearPermissions() { + x.Permissions = nil +} + +type ResourceTypeCapability_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + ResourceType *ResourceType + Capabilities []Capability + Permissions *CapabilityPermissions +} + +func (b0 ResourceTypeCapability_builder) Build() *ResourceTypeCapability { + m0 := &ResourceTypeCapability{} + b, x := &b0, m0 + _, _ = b, x + x.ResourceType = b.ResourceType + x.Capabilities = b.Capabilities + x.Permissions = b.Permissions + return m0 +} + type ConnectorServiceGetMetadataRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } func (x *ConnectorServiceGetMetadataRequest) Reset() { *x = ConnectorServiceGetMetadataRequest{} - mi := &file_c1_connector_v2_connector_proto_msgTypes[8] + mi := &file_c1_connector_v2_connector_proto_msgTypes[10] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -642,7 +1073,7 @@ func (x *ConnectorServiceGetMetadataRequest) String() string { func (*ConnectorServiceGetMetadataRequest) ProtoMessage() {} func (x *ConnectorServiceGetMetadataRequest) ProtoReflect() protoreflect.Message { - mi := &file_c1_connector_v2_connector_proto_msgTypes[8] + mi := &file_c1_connector_v2_connector_proto_msgTypes[10] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -653,13 +1084,20 @@ func (x *ConnectorServiceGetMetadataRequest) ProtoReflect() protoreflect.Message return mi.MessageOf(x) } -// Deprecated: Use ConnectorServiceGetMetadataRequest.ProtoReflect.Descriptor instead. -func (*ConnectorServiceGetMetadataRequest) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_connector_proto_rawDescGZIP(), []int{8} +type ConnectorServiceGetMetadataRequest_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + +} + +func (b0 ConnectorServiceGetMetadataRequest_builder) Build() *ConnectorServiceGetMetadataRequest { + m0 := &ConnectorServiceGetMetadataRequest{} + b, x := &b0, m0 + _, _ = b, x + return m0 } type ConnectorServiceGetMetadataResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Metadata *ConnectorMetadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache @@ -667,7 +1105,7 @@ type ConnectorServiceGetMetadataResponse struct { func (x *ConnectorServiceGetMetadataResponse) Reset() { *x = ConnectorServiceGetMetadataResponse{} - mi := &file_c1_connector_v2_connector_proto_msgTypes[9] + mi := &file_c1_connector_v2_connector_proto_msgTypes[11] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -679,7 +1117,7 @@ func (x *ConnectorServiceGetMetadataResponse) String() string { func (*ConnectorServiceGetMetadataResponse) ProtoMessage() {} func (x *ConnectorServiceGetMetadataResponse) ProtoReflect() protoreflect.Message { - mi := &file_c1_connector_v2_connector_proto_msgTypes[9] + mi := &file_c1_connector_v2_connector_proto_msgTypes[11] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -690,11 +1128,6 @@ func (x *ConnectorServiceGetMetadataResponse) ProtoReflect() protoreflect.Messag return mi.MessageOf(x) } -// Deprecated: Use ConnectorServiceGetMetadataResponse.ProtoReflect.Descriptor instead. -func (*ConnectorServiceGetMetadataResponse) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_connector_proto_rawDescGZIP(), []int{9} -} - func (x *ConnectorServiceGetMetadataResponse) GetMetadata() *ConnectorMetadata { if x != nil { return x.Metadata @@ -702,15 +1135,44 @@ func (x *ConnectorServiceGetMetadataResponse) GetMetadata() *ConnectorMetadata { return nil } +func (x *ConnectorServiceGetMetadataResponse) SetMetadata(v *ConnectorMetadata) { + x.Metadata = v +} + +func (x *ConnectorServiceGetMetadataResponse) HasMetadata() bool { + if x == nil { + return false + } + return x.Metadata != nil +} + +func (x *ConnectorServiceGetMetadataResponse) ClearMetadata() { + x.Metadata = nil +} + +type ConnectorServiceGetMetadataResponse_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Metadata *ConnectorMetadata +} + +func (b0 ConnectorServiceGetMetadataResponse_builder) Build() *ConnectorServiceGetMetadataResponse { + m0 := &ConnectorServiceGetMetadataResponse{} + b, x := &b0, m0 + _, _ = b, x + x.Metadata = b.Metadata + return m0 +} + type ConnectorServiceValidateRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } func (x *ConnectorServiceValidateRequest) Reset() { *x = ConnectorServiceValidateRequest{} - mi := &file_c1_connector_v2_connector_proto_msgTypes[10] + mi := &file_c1_connector_v2_connector_proto_msgTypes[12] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -722,7 +1184,7 @@ func (x *ConnectorServiceValidateRequest) String() string { func (*ConnectorServiceValidateRequest) ProtoMessage() {} func (x *ConnectorServiceValidateRequest) ProtoReflect() protoreflect.Message { - mi := &file_c1_connector_v2_connector_proto_msgTypes[10] + mi := &file_c1_connector_v2_connector_proto_msgTypes[12] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -733,23 +1195,31 @@ func (x *ConnectorServiceValidateRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ConnectorServiceValidateRequest.ProtoReflect.Descriptor instead. -func (*ConnectorServiceValidateRequest) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_connector_proto_rawDescGZIP(), []int{10} +type ConnectorServiceValidateRequest_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + +} + +func (b0 ConnectorServiceValidateRequest_builder) Build() *ConnectorServiceValidateRequest { + m0 := &ConnectorServiceValidateRequest{} + b, x := &b0, m0 + _, _ = b, x + return m0 } // NOTE(morgabra) We're expecting correct grpc.Status responses // for things like 401/403/500, etc type ConnectorServiceValidateResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Annotations []*anypb.Any `protobuf:"bytes,1,rep,name=annotations,proto3" json:"annotations,omitempty"` + SdkVersion string `protobuf:"bytes,2,opt,name=sdk_version,json=sdkVersion,proto3" json:"sdk_version,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } func (x *ConnectorServiceValidateResponse) Reset() { *x = ConnectorServiceValidateResponse{} - mi := &file_c1_connector_v2_connector_proto_msgTypes[11] + mi := &file_c1_connector_v2_connector_proto_msgTypes[13] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -761,7 +1231,7 @@ func (x *ConnectorServiceValidateResponse) String() string { func (*ConnectorServiceValidateResponse) ProtoMessage() {} func (x *ConnectorServiceValidateResponse) ProtoReflect() protoreflect.Message { - mi := &file_c1_connector_v2_connector_proto_msgTypes[11] + mi := &file_c1_connector_v2_connector_proto_msgTypes[13] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -772,11 +1242,6 @@ func (x *ConnectorServiceValidateResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ConnectorServiceValidateResponse.ProtoReflect.Descriptor instead. -func (*ConnectorServiceValidateResponse) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_connector_proto_rawDescGZIP(), []int{11} -} - func (x *ConnectorServiceValidateResponse) GetAnnotations() []*anypb.Any { if x != nil { return x.Annotations @@ -784,8 +1249,39 @@ func (x *ConnectorServiceValidateResponse) GetAnnotations() []*anypb.Any { return nil } +func (x *ConnectorServiceValidateResponse) GetSdkVersion() string { + if x != nil { + return x.SdkVersion + } + return "" +} + +func (x *ConnectorServiceValidateResponse) SetAnnotations(v []*anypb.Any) { + x.Annotations = v +} + +func (x *ConnectorServiceValidateResponse) SetSdkVersion(v string) { + x.SdkVersion = v +} + +type ConnectorServiceValidateResponse_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Annotations []*anypb.Any + SdkVersion string +} + +func (b0 ConnectorServiceValidateResponse_builder) Build() *ConnectorServiceValidateResponse { + m0 := &ConnectorServiceValidateResponse{} + b, x := &b0, m0 + _, _ = b, x + x.Annotations = b.Annotations + x.SdkVersion = b.SdkVersion + return m0 +} + type ConnectorAccountCreationSchema struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` FieldMap map[string]*ConnectorAccountCreationSchema_Field `protobuf:"bytes,1,rep,name=field_map,json=fieldMap,proto3" json:"field_map,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache @@ -793,7 +1289,7 @@ type ConnectorAccountCreationSchema struct { func (x *ConnectorAccountCreationSchema) Reset() { *x = ConnectorAccountCreationSchema{} - mi := &file_c1_connector_v2_connector_proto_msgTypes[12] + mi := &file_c1_connector_v2_connector_proto_msgTypes[14] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -805,7 +1301,7 @@ func (x *ConnectorAccountCreationSchema) String() string { func (*ConnectorAccountCreationSchema) ProtoMessage() {} func (x *ConnectorAccountCreationSchema) ProtoReflect() protoreflect.Message { - mi := &file_c1_connector_v2_connector_proto_msgTypes[12] + mi := &file_c1_connector_v2_connector_proto_msgTypes[14] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -816,11 +1312,6 @@ func (x *ConnectorAccountCreationSchema) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ConnectorAccountCreationSchema.ProtoReflect.Descriptor instead. -func (*ConnectorAccountCreationSchema) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_connector_proto_rawDescGZIP(), []int{12} -} - func (x *ConnectorAccountCreationSchema) GetFieldMap() map[string]*ConnectorAccountCreationSchema_Field { if x != nil { return x.FieldMap @@ -828,8 +1319,26 @@ func (x *ConnectorAccountCreationSchema) GetFieldMap() map[string]*ConnectorAcco return nil } +func (x *ConnectorAccountCreationSchema) SetFieldMap(v map[string]*ConnectorAccountCreationSchema_Field) { + x.FieldMap = v +} + +type ConnectorAccountCreationSchema_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + FieldMap map[string]*ConnectorAccountCreationSchema_Field +} + +func (b0 ConnectorAccountCreationSchema_builder) Build() *ConnectorAccountCreationSchema { + m0 := &ConnectorAccountCreationSchema{} + b, x := &b0, m0 + _, _ = b, x + x.FieldMap = b.FieldMap + return m0 +} + type ConnectorAccountCreationSchema_Field struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` DisplayName string `protobuf:"bytes,1,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` Required bool `protobuf:"varint,2,opt,name=required,proto3" json:"required,omitempty"` Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` @@ -850,7 +1359,7 @@ type ConnectorAccountCreationSchema_Field struct { func (x *ConnectorAccountCreationSchema_Field) Reset() { *x = ConnectorAccountCreationSchema_Field{} - mi := &file_c1_connector_v2_connector_proto_msgTypes[14] + mi := &file_c1_connector_v2_connector_proto_msgTypes[16] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -862,7 +1371,7 @@ func (x *ConnectorAccountCreationSchema_Field) String() string { func (*ConnectorAccountCreationSchema_Field) ProtoMessage() {} func (x *ConnectorAccountCreationSchema_Field) ProtoReflect() protoreflect.Message { - mi := &file_c1_connector_v2_connector_proto_msgTypes[14] + mi := &file_c1_connector_v2_connector_proto_msgTypes[16] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -873,11 +1382,6 @@ func (x *ConnectorAccountCreationSchema_Field) ProtoReflect() protoreflect.Messa return mi.MessageOf(x) } -// Deprecated: Use ConnectorAccountCreationSchema_Field.ProtoReflect.Descriptor instead. -func (*ConnectorAccountCreationSchema_Field) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_connector_proto_rawDescGZIP(), []int{12, 1} -} - func (x *ConnectorAccountCreationSchema_Field) GetDisplayName() string { if x != nil { return x.DisplayName @@ -972,6 +1476,234 @@ func (x *ConnectorAccountCreationSchema_Field) GetMapField() *ConnectorAccountCr return nil } +func (x *ConnectorAccountCreationSchema_Field) SetDisplayName(v string) { + x.DisplayName = v +} + +func (x *ConnectorAccountCreationSchema_Field) SetRequired(v bool) { + x.Required = v +} + +func (x *ConnectorAccountCreationSchema_Field) SetDescription(v string) { + x.Description = v +} + +func (x *ConnectorAccountCreationSchema_Field) SetPlaceholder(v string) { + x.Placeholder = v +} + +func (x *ConnectorAccountCreationSchema_Field) SetOrder(v int32) { + x.Order = v +} + +func (x *ConnectorAccountCreationSchema_Field) SetDeprecated(v bool) { + x.Deprecated = v +} + +func (x *ConnectorAccountCreationSchema_Field) SetStringField(v *ConnectorAccountCreationSchema_StringField) { + if v == nil { + x.Field = nil + return + } + x.Field = &ConnectorAccountCreationSchema_Field_StringField{v} +} + +func (x *ConnectorAccountCreationSchema_Field) SetBoolField(v *ConnectorAccountCreationSchema_BoolField) { + if v == nil { + x.Field = nil + return + } + x.Field = &ConnectorAccountCreationSchema_Field_BoolField{v} +} + +func (x *ConnectorAccountCreationSchema_Field) SetStringListField(v *ConnectorAccountCreationSchema_StringListField) { + if v == nil { + x.Field = nil + return + } + x.Field = &ConnectorAccountCreationSchema_Field_StringListField{v} +} + +func (x *ConnectorAccountCreationSchema_Field) SetIntField(v *ConnectorAccountCreationSchema_IntField) { + if v == nil { + x.Field = nil + return + } + x.Field = &ConnectorAccountCreationSchema_Field_IntField{v} +} + +func (x *ConnectorAccountCreationSchema_Field) SetMapField(v *ConnectorAccountCreationSchema_MapField) { + if v == nil { + x.Field = nil + return + } + x.Field = &ConnectorAccountCreationSchema_Field_MapField{v} +} + +func (x *ConnectorAccountCreationSchema_Field) HasField() bool { + if x == nil { + return false + } + return x.Field != nil +} + +func (x *ConnectorAccountCreationSchema_Field) HasStringField() bool { + if x == nil { + return false + } + _, ok := x.Field.(*ConnectorAccountCreationSchema_Field_StringField) + return ok +} + +func (x *ConnectorAccountCreationSchema_Field) HasBoolField() bool { + if x == nil { + return false + } + _, ok := x.Field.(*ConnectorAccountCreationSchema_Field_BoolField) + return ok +} + +func (x *ConnectorAccountCreationSchema_Field) HasStringListField() bool { + if x == nil { + return false + } + _, ok := x.Field.(*ConnectorAccountCreationSchema_Field_StringListField) + return ok +} + +func (x *ConnectorAccountCreationSchema_Field) HasIntField() bool { + if x == nil { + return false + } + _, ok := x.Field.(*ConnectorAccountCreationSchema_Field_IntField) + return ok +} + +func (x *ConnectorAccountCreationSchema_Field) HasMapField() bool { + if x == nil { + return false + } + _, ok := x.Field.(*ConnectorAccountCreationSchema_Field_MapField) + return ok +} + +func (x *ConnectorAccountCreationSchema_Field) ClearField() { + x.Field = nil +} + +func (x *ConnectorAccountCreationSchema_Field) ClearStringField() { + if _, ok := x.Field.(*ConnectorAccountCreationSchema_Field_StringField); ok { + x.Field = nil + } +} + +func (x *ConnectorAccountCreationSchema_Field) ClearBoolField() { + if _, ok := x.Field.(*ConnectorAccountCreationSchema_Field_BoolField); ok { + x.Field = nil + } +} + +func (x *ConnectorAccountCreationSchema_Field) ClearStringListField() { + if _, ok := x.Field.(*ConnectorAccountCreationSchema_Field_StringListField); ok { + x.Field = nil + } +} + +func (x *ConnectorAccountCreationSchema_Field) ClearIntField() { + if _, ok := x.Field.(*ConnectorAccountCreationSchema_Field_IntField); ok { + x.Field = nil + } +} + +func (x *ConnectorAccountCreationSchema_Field) ClearMapField() { + if _, ok := x.Field.(*ConnectorAccountCreationSchema_Field_MapField); ok { + x.Field = nil + } +} + +const ConnectorAccountCreationSchema_Field_Field_not_set_case case_ConnectorAccountCreationSchema_Field_Field = 0 +const ConnectorAccountCreationSchema_Field_StringField_case case_ConnectorAccountCreationSchema_Field_Field = 100 +const ConnectorAccountCreationSchema_Field_BoolField_case case_ConnectorAccountCreationSchema_Field_Field = 101 +const ConnectorAccountCreationSchema_Field_StringListField_case case_ConnectorAccountCreationSchema_Field_Field = 102 +const ConnectorAccountCreationSchema_Field_IntField_case case_ConnectorAccountCreationSchema_Field_Field = 103 +const ConnectorAccountCreationSchema_Field_MapField_case case_ConnectorAccountCreationSchema_Field_Field = 104 + +func (x *ConnectorAccountCreationSchema_Field) WhichField() case_ConnectorAccountCreationSchema_Field_Field { + if x == nil { + return ConnectorAccountCreationSchema_Field_Field_not_set_case + } + switch x.Field.(type) { + case *ConnectorAccountCreationSchema_Field_StringField: + return ConnectorAccountCreationSchema_Field_StringField_case + case *ConnectorAccountCreationSchema_Field_BoolField: + return ConnectorAccountCreationSchema_Field_BoolField_case + case *ConnectorAccountCreationSchema_Field_StringListField: + return ConnectorAccountCreationSchema_Field_StringListField_case + case *ConnectorAccountCreationSchema_Field_IntField: + return ConnectorAccountCreationSchema_Field_IntField_case + case *ConnectorAccountCreationSchema_Field_MapField: + return ConnectorAccountCreationSchema_Field_MapField_case + default: + return ConnectorAccountCreationSchema_Field_Field_not_set_case + } +} + +type ConnectorAccountCreationSchema_Field_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + DisplayName string + Required bool + Description string + Placeholder string + Order int32 + Deprecated bool + // Fields of oneof Field: + StringField *ConnectorAccountCreationSchema_StringField + BoolField *ConnectorAccountCreationSchema_BoolField + StringListField *ConnectorAccountCreationSchema_StringListField + IntField *ConnectorAccountCreationSchema_IntField + MapField *ConnectorAccountCreationSchema_MapField + // -- end of Field +} + +func (b0 ConnectorAccountCreationSchema_Field_builder) Build() *ConnectorAccountCreationSchema_Field { + m0 := &ConnectorAccountCreationSchema_Field{} + b, x := &b0, m0 + _, _ = b, x + x.DisplayName = b.DisplayName + x.Required = b.Required + x.Description = b.Description + x.Placeholder = b.Placeholder + x.Order = b.Order + x.Deprecated = b.Deprecated + if b.StringField != nil { + x.Field = &ConnectorAccountCreationSchema_Field_StringField{b.StringField} + } + if b.BoolField != nil { + x.Field = &ConnectorAccountCreationSchema_Field_BoolField{b.BoolField} + } + if b.StringListField != nil { + x.Field = &ConnectorAccountCreationSchema_Field_StringListField{b.StringListField} + } + if b.IntField != nil { + x.Field = &ConnectorAccountCreationSchema_Field_IntField{b.IntField} + } + if b.MapField != nil { + x.Field = &ConnectorAccountCreationSchema_Field_MapField{b.MapField} + } + return m0 +} + +type case_ConnectorAccountCreationSchema_Field_Field protoreflect.FieldNumber + +func (x case_ConnectorAccountCreationSchema_Field_Field) String() string { + md := file_c1_connector_v2_connector_proto_msgTypes[16].Descriptor() + if x == 0 { + return "not set" + } + return protoimpl.X.MessageFieldStringOf(md, protoreflect.FieldNumber(x)) +} + type isConnectorAccountCreationSchema_Field_Field interface { isConnectorAccountCreationSchema_Field_Field() } @@ -1012,7 +1744,7 @@ func (*ConnectorAccountCreationSchema_Field_MapField) isConnectorAccountCreation } type ConnectorAccountCreationSchema_StringField struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` DefaultValue *string `protobuf:"bytes,1,opt,name=default_value,json=defaultValue,proto3,oneof" json:"default_value,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache @@ -1020,7 +1752,7 @@ type ConnectorAccountCreationSchema_StringField struct { func (x *ConnectorAccountCreationSchema_StringField) Reset() { *x = ConnectorAccountCreationSchema_StringField{} - mi := &file_c1_connector_v2_connector_proto_msgTypes[15] + mi := &file_c1_connector_v2_connector_proto_msgTypes[17] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1032,7 +1764,7 @@ func (x *ConnectorAccountCreationSchema_StringField) String() string { func (*ConnectorAccountCreationSchema_StringField) ProtoMessage() {} func (x *ConnectorAccountCreationSchema_StringField) ProtoReflect() protoreflect.Message { - mi := &file_c1_connector_v2_connector_proto_msgTypes[15] + mi := &file_c1_connector_v2_connector_proto_msgTypes[17] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1043,11 +1775,6 @@ func (x *ConnectorAccountCreationSchema_StringField) ProtoReflect() protoreflect return mi.MessageOf(x) } -// Deprecated: Use ConnectorAccountCreationSchema_StringField.ProtoReflect.Descriptor instead. -func (*ConnectorAccountCreationSchema_StringField) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_connector_proto_rawDescGZIP(), []int{12, 2} -} - func (x *ConnectorAccountCreationSchema_StringField) GetDefaultValue() string { if x != nil && x.DefaultValue != nil { return *x.DefaultValue @@ -1055,8 +1782,37 @@ func (x *ConnectorAccountCreationSchema_StringField) GetDefaultValue() string { return "" } +func (x *ConnectorAccountCreationSchema_StringField) SetDefaultValue(v string) { + x.DefaultValue = &v +} + +func (x *ConnectorAccountCreationSchema_StringField) HasDefaultValue() bool { + if x == nil { + return false + } + return x.DefaultValue != nil +} + +func (x *ConnectorAccountCreationSchema_StringField) ClearDefaultValue() { + x.DefaultValue = nil +} + +type ConnectorAccountCreationSchema_StringField_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + DefaultValue *string +} + +func (b0 ConnectorAccountCreationSchema_StringField_builder) Build() *ConnectorAccountCreationSchema_StringField { + m0 := &ConnectorAccountCreationSchema_StringField{} + b, x := &b0, m0 + _, _ = b, x + x.DefaultValue = b.DefaultValue + return m0 +} + type ConnectorAccountCreationSchema_BoolField struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` DefaultValue *bool `protobuf:"varint,1,opt,name=default_value,json=defaultValue,proto3,oneof" json:"default_value,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache @@ -1064,7 +1820,7 @@ type ConnectorAccountCreationSchema_BoolField struct { func (x *ConnectorAccountCreationSchema_BoolField) Reset() { *x = ConnectorAccountCreationSchema_BoolField{} - mi := &file_c1_connector_v2_connector_proto_msgTypes[16] + mi := &file_c1_connector_v2_connector_proto_msgTypes[18] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1076,7 +1832,7 @@ func (x *ConnectorAccountCreationSchema_BoolField) String() string { func (*ConnectorAccountCreationSchema_BoolField) ProtoMessage() {} func (x *ConnectorAccountCreationSchema_BoolField) ProtoReflect() protoreflect.Message { - mi := &file_c1_connector_v2_connector_proto_msgTypes[16] + mi := &file_c1_connector_v2_connector_proto_msgTypes[18] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1087,11 +1843,6 @@ func (x *ConnectorAccountCreationSchema_BoolField) ProtoReflect() protoreflect.M return mi.MessageOf(x) } -// Deprecated: Use ConnectorAccountCreationSchema_BoolField.ProtoReflect.Descriptor instead. -func (*ConnectorAccountCreationSchema_BoolField) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_connector_proto_rawDescGZIP(), []int{12, 3} -} - func (x *ConnectorAccountCreationSchema_BoolField) GetDefaultValue() bool { if x != nil && x.DefaultValue != nil { return *x.DefaultValue @@ -1099,8 +1850,37 @@ func (x *ConnectorAccountCreationSchema_BoolField) GetDefaultValue() bool { return false } +func (x *ConnectorAccountCreationSchema_BoolField) SetDefaultValue(v bool) { + x.DefaultValue = &v +} + +func (x *ConnectorAccountCreationSchema_BoolField) HasDefaultValue() bool { + if x == nil { + return false + } + return x.DefaultValue != nil +} + +func (x *ConnectorAccountCreationSchema_BoolField) ClearDefaultValue() { + x.DefaultValue = nil +} + +type ConnectorAccountCreationSchema_BoolField_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + DefaultValue *bool +} + +func (b0 ConnectorAccountCreationSchema_BoolField_builder) Build() *ConnectorAccountCreationSchema_BoolField { + m0 := &ConnectorAccountCreationSchema_BoolField{} + b, x := &b0, m0 + _, _ = b, x + x.DefaultValue = b.DefaultValue + return m0 +} + type ConnectorAccountCreationSchema_StringListField struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` DefaultValue []string `protobuf:"bytes,1,rep,name=default_value,json=defaultValue,proto3" json:"default_value,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache @@ -1108,7 +1888,7 @@ type ConnectorAccountCreationSchema_StringListField struct { func (x *ConnectorAccountCreationSchema_StringListField) Reset() { *x = ConnectorAccountCreationSchema_StringListField{} - mi := &file_c1_connector_v2_connector_proto_msgTypes[17] + mi := &file_c1_connector_v2_connector_proto_msgTypes[19] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1120,7 +1900,7 @@ func (x *ConnectorAccountCreationSchema_StringListField) String() string { func (*ConnectorAccountCreationSchema_StringListField) ProtoMessage() {} func (x *ConnectorAccountCreationSchema_StringListField) ProtoReflect() protoreflect.Message { - mi := &file_c1_connector_v2_connector_proto_msgTypes[17] + mi := &file_c1_connector_v2_connector_proto_msgTypes[19] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1131,11 +1911,6 @@ func (x *ConnectorAccountCreationSchema_StringListField) ProtoReflect() protoref return mi.MessageOf(x) } -// Deprecated: Use ConnectorAccountCreationSchema_StringListField.ProtoReflect.Descriptor instead. -func (*ConnectorAccountCreationSchema_StringListField) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_connector_proto_rawDescGZIP(), []int{12, 4} -} - func (x *ConnectorAccountCreationSchema_StringListField) GetDefaultValue() []string { if x != nil { return x.DefaultValue @@ -1143,8 +1918,26 @@ func (x *ConnectorAccountCreationSchema_StringListField) GetDefaultValue() []str return nil } +func (x *ConnectorAccountCreationSchema_StringListField) SetDefaultValue(v []string) { + x.DefaultValue = v +} + +type ConnectorAccountCreationSchema_StringListField_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + DefaultValue []string +} + +func (b0 ConnectorAccountCreationSchema_StringListField_builder) Build() *ConnectorAccountCreationSchema_StringListField { + m0 := &ConnectorAccountCreationSchema_StringListField{} + b, x := &b0, m0 + _, _ = b, x + x.DefaultValue = b.DefaultValue + return m0 +} + type ConnectorAccountCreationSchema_IntField struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` DefaultValue *int32 `protobuf:"varint,1,opt,name=default_value,json=defaultValue,proto3,oneof" json:"default_value,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache @@ -1152,7 +1945,7 @@ type ConnectorAccountCreationSchema_IntField struct { func (x *ConnectorAccountCreationSchema_IntField) Reset() { *x = ConnectorAccountCreationSchema_IntField{} - mi := &file_c1_connector_v2_connector_proto_msgTypes[18] + mi := &file_c1_connector_v2_connector_proto_msgTypes[20] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1164,7 +1957,7 @@ func (x *ConnectorAccountCreationSchema_IntField) String() string { func (*ConnectorAccountCreationSchema_IntField) ProtoMessage() {} func (x *ConnectorAccountCreationSchema_IntField) ProtoReflect() protoreflect.Message { - mi := &file_c1_connector_v2_connector_proto_msgTypes[18] + mi := &file_c1_connector_v2_connector_proto_msgTypes[20] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1175,11 +1968,6 @@ func (x *ConnectorAccountCreationSchema_IntField) ProtoReflect() protoreflect.Me return mi.MessageOf(x) } -// Deprecated: Use ConnectorAccountCreationSchema_IntField.ProtoReflect.Descriptor instead. -func (*ConnectorAccountCreationSchema_IntField) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_connector_proto_rawDescGZIP(), []int{12, 5} -} - func (x *ConnectorAccountCreationSchema_IntField) GetDefaultValue() int32 { if x != nil && x.DefaultValue != nil { return *x.DefaultValue @@ -1187,8 +1975,37 @@ func (x *ConnectorAccountCreationSchema_IntField) GetDefaultValue() int32 { return 0 } +func (x *ConnectorAccountCreationSchema_IntField) SetDefaultValue(v int32) { + x.DefaultValue = &v +} + +func (x *ConnectorAccountCreationSchema_IntField) HasDefaultValue() bool { + if x == nil { + return false + } + return x.DefaultValue != nil +} + +func (x *ConnectorAccountCreationSchema_IntField) ClearDefaultValue() { + x.DefaultValue = nil +} + +type ConnectorAccountCreationSchema_IntField_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + DefaultValue *int32 +} + +func (b0 ConnectorAccountCreationSchema_IntField_builder) Build() *ConnectorAccountCreationSchema_IntField { + m0 := &ConnectorAccountCreationSchema_IntField{} + b, x := &b0, m0 + _, _ = b, x + x.DefaultValue = b.DefaultValue + return m0 +} + type ConnectorAccountCreationSchema_MapField struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` DefaultValue map[string]*ConnectorAccountCreationSchema_Field `protobuf:"bytes,1,rep,name=default_value,json=defaultValue,proto3" json:"default_value,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache @@ -1196,7 +2013,7 @@ type ConnectorAccountCreationSchema_MapField struct { func (x *ConnectorAccountCreationSchema_MapField) Reset() { *x = ConnectorAccountCreationSchema_MapField{} - mi := &file_c1_connector_v2_connector_proto_msgTypes[19] + mi := &file_c1_connector_v2_connector_proto_msgTypes[21] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1208,7 +2025,7 @@ func (x *ConnectorAccountCreationSchema_MapField) String() string { func (*ConnectorAccountCreationSchema_MapField) ProtoMessage() {} func (x *ConnectorAccountCreationSchema_MapField) ProtoReflect() protoreflect.Message { - mi := &file_c1_connector_v2_connector_proto_msgTypes[19] + mi := &file_c1_connector_v2_connector_proto_msgTypes[21] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1219,11 +2036,6 @@ func (x *ConnectorAccountCreationSchema_MapField) ProtoReflect() protoreflect.Me return mi.MessageOf(x) } -// Deprecated: Use ConnectorAccountCreationSchema_MapField.ProtoReflect.Descriptor instead. -func (*ConnectorAccountCreationSchema_MapField) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_connector_proto_rawDescGZIP(), []int{12, 6} -} - func (x *ConnectorAccountCreationSchema_MapField) GetDefaultValue() map[string]*ConnectorAccountCreationSchema_Field { if x != nil { return x.DefaultValue @@ -1231,341 +2043,144 @@ func (x *ConnectorAccountCreationSchema_MapField) GetDefaultValue() map[string]* return nil } -var File_c1_connector_v2_connector_proto protoreflect.FileDescriptor +func (x *ConnectorAccountCreationSchema_MapField) SetDefaultValue(v map[string]*ConnectorAccountCreationSchema_Field) { + x.DefaultValue = v +} -var file_c1_connector_v2_connector_proto_rawDesc = string([]byte{ - 0x0a, 0x1f, 0x63, 0x31, 0x2f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f, 0x76, - 0x32, 0x2f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x12, 0x0f, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, - 0x76, 0x32, 0x1a, 0x1b, 0x63, 0x31, 0x2f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, - 0x2f, 0x76, 0x32, 0x2f, 0x61, 0x73, 0x73, 0x65, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, - 0x1e, 0x63, 0x31, 0x2f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f, 0x76, 0x32, - 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, - 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, - 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, - 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x22, 0x58, 0x0a, 0x1e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x53, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6c, 0x65, 0x61, 0x6e, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x36, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0b, - 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x59, 0x0a, 0x1f, 0x43, - 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, - 0x6c, 0x65, 0x61, 0x6e, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x36, - 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa8, 0x04, 0x0a, 0x11, 0x43, 0x6f, 0x6e, 0x6e, 0x65, - 0x63, 0x74, 0x6f, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x2d, 0x0a, 0x0c, - 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x42, 0x0a, 0xfa, 0x42, 0x07, 0x72, 0x05, 0x20, 0x01, 0x28, 0x80, 0x08, 0x52, 0x0b, - 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x08, 0x68, - 0x65, 0x6c, 0x70, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x1a, 0xfa, - 0x42, 0x17, 0x72, 0x15, 0x20, 0x01, 0x28, 0x80, 0x08, 0x3a, 0x08, 0x68, 0x74, 0x74, 0x70, 0x73, - 0x3a, 0x2f, 0x2f, 0xd0, 0x01, 0x01, 0x88, 0x01, 0x01, 0x52, 0x07, 0x68, 0x65, 0x6c, 0x70, 0x55, - 0x72, 0x6c, 0x12, 0x2d, 0x0a, 0x04, 0x69, 0x63, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x19, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, - 0x76, 0x32, 0x2e, 0x41, 0x73, 0x73, 0x65, 0x74, 0x52, 0x65, 0x66, 0x52, 0x04, 0x69, 0x63, 0x6f, - 0x6e, 0x12, 0x2d, 0x0a, 0x04, 0x6c, 0x6f, 0x67, 0x6f, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x19, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, - 0x32, 0x2e, 0x41, 0x73, 0x73, 0x65, 0x74, 0x52, 0x65, 0x66, 0x52, 0x04, 0x6c, 0x6f, 0x67, 0x6f, - 0x12, 0x31, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x66, - 0x69, 0x6c, 0x65, 0x12, 0x36, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0b, - 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x2f, 0x0a, 0x0b, 0x64, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, - 0x42, 0x0d, 0xfa, 0x42, 0x0a, 0x72, 0x08, 0x20, 0x01, 0x28, 0x80, 0x20, 0xd0, 0x01, 0x01, 0x52, - 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4a, 0x0a, 0x0c, - 0x63, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x08, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, - 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x43, 0x61, - 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x52, 0x0c, 0x63, 0x61, 0x70, 0x61, - 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x12, 0x67, 0x0a, 0x17, 0x61, 0x63, 0x63, 0x6f, - 0x75, 0x6e, 0x74, 0x5f, 0x63, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x63, 0x68, - 0x65, 0x6d, 0x61, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x63, 0x31, 0x2e, 0x63, - 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, - 0x65, 0x63, 0x74, 0x6f, 0x72, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x43, 0x72, 0x65, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x15, 0x61, 0x63, 0x63, 0x6f, - 0x75, 0x6e, 0x74, 0x43, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x63, 0x68, 0x65, 0x6d, - 0x61, 0x22, 0x8e, 0x02, 0x0a, 0x11, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, - 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x7d, 0x0a, 0x1f, 0x63, 0x61, 0x70, 0x61, 0x62, - 0x69, 0x6c, 0x69, 0x74, 0x79, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x70, 0x72, - 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x35, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, - 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x44, 0x65, 0x74, - 0x61, 0x69, 0x6c, 0x73, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, - 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x52, 0x1d, 0x63, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, - 0x69, 0x74, 0x79, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, - 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x7a, 0x0a, 0x1e, 0x63, 0x61, 0x70, 0x61, 0x62, 0x69, - 0x6c, 0x69, 0x74, 0x79, 0x5f, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x5f, - 0x72, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, - 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, - 0x2e, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x44, 0x65, 0x74, 0x61, 0x69, - 0x6c, 0x73, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x52, 0x6f, 0x74, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x1c, 0x63, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, - 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x52, 0x6f, 0x74, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x22, 0x8e, 0x02, 0x0a, 0x24, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, - 0x6c, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x50, - 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x73, 0x0a, 0x1c, 0x73, - 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x5f, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, - 0x69, 0x61, 0x6c, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x0e, 0x32, 0x31, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, - 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x44, 0x65, - 0x74, 0x61, 0x69, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x1a, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x43, - 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x12, 0x71, 0x0a, 0x1b, 0x70, 0x72, 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, 0x64, 0x5f, 0x63, 0x72, - 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x31, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, - 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, - 0x74, 0x79, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, - 0x61, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x19, 0x70, 0x72, 0x65, 0x66, 0x65, 0x72, - 0x72, 0x65, 0x64, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x22, 0x8d, 0x02, 0x0a, 0x23, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, - 0x61, 0x6c, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, - 0x69, 0x61, 0x6c, 0x52, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x73, 0x0a, 0x1c, 0x73, - 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x5f, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, - 0x69, 0x61, 0x6c, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x0e, 0x32, 0x31, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, - 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x44, 0x65, - 0x74, 0x61, 0x69, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x1a, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x43, - 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x12, 0x71, 0x0a, 0x1b, 0x70, 0x72, 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, 0x64, 0x5f, 0x63, 0x72, - 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x31, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, - 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, - 0x74, 0x79, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, - 0x61, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x19, 0x70, 0x72, 0x65, 0x66, 0x65, 0x72, - 0x72, 0x65, 0x64, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x22, 0xa5, 0x02, 0x0a, 0x15, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, - 0x72, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x12, 0x65, 0x0a, - 0x1a, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x63, - 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x27, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, - 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, - 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x52, 0x18, 0x72, 0x65, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, - 0x74, 0x69, 0x65, 0x73, 0x12, 0x52, 0x0a, 0x16, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, - 0x72, 0x5f, 0x63, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x02, - 0x20, 0x03, 0x28, 0x0e, 0x32, 0x1b, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, - 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, - 0x79, 0x52, 0x15, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x43, 0x61, 0x70, 0x61, - 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x12, 0x51, 0x0a, 0x12, 0x63, 0x72, 0x65, 0x64, - 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, - 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, - 0x6c, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x52, 0x11, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, - 0x74, 0x69, 0x61, 0x6c, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x22, 0x9d, 0x01, 0x0a, 0x16, - 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x43, 0x61, 0x70, 0x61, - 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x12, 0x42, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, - 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, - 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0c, 0x72, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x3f, 0x0a, 0x0c, 0x63, 0x61, - 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0e, - 0x32, 0x1b, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, - 0x76, 0x32, 0x2e, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x52, 0x0c, 0x63, - 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x22, 0x24, 0x0a, 0x22, 0x43, - 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x47, - 0x65, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x22, 0x65, 0x0a, 0x23, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x53, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x47, 0x65, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3e, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, - 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x63, 0x31, 0x2e, - 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6e, - 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, - 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0x21, 0x0a, 0x1f, 0x43, 0x6f, 0x6e, 0x6e, - 0x65, 0x63, 0x74, 0x6f, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x56, 0x61, 0x6c, 0x69, - 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x5a, 0x0a, 0x20, 0x43, - 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x56, - 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x36, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, - 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa5, 0x0b, 0x0a, 0x1e, 0x43, 0x6f, 0x6e, 0x6e, - 0x65, 0x63, 0x74, 0x6f, 0x72, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x43, 0x72, 0x65, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x5a, 0x0a, 0x09, 0x66, 0x69, - 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x70, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3d, 0x2e, - 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, - 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, - 0x43, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x46, - 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x66, 0x69, - 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x70, 0x1a, 0x72, 0x0a, 0x0d, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, - 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x4b, 0x0a, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, - 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, - 0x63, 0x74, 0x6f, 0x72, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x43, 0x72, 0x65, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x52, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0xa8, 0x05, 0x0a, 0x05, 0x46, - 0x69, 0x65, 0x6c, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, - 0x6c, 0x61, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, - 0x72, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, - 0x72, 0x65, 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x68, 0x6f, - 0x6c, 0x64, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x70, 0x6c, 0x61, 0x63, - 0x65, 0x68, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x6f, 0x72, 0x64, 0x65, 0x72, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x12, 0x1e, 0x0a, - 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x60, 0x0a, - 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x64, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x3b, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, - 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x41, - 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x43, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x63, - 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x46, 0x69, 0x65, 0x6c, 0x64, - 0x48, 0x00, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x12, - 0x5a, 0x0a, 0x0a, 0x62, 0x6f, 0x6f, 0x6c, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x65, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, - 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x41, - 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x43, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x63, - 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x48, 0x00, - 0x52, 0x09, 0x62, 0x6f, 0x6f, 0x6c, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x6d, 0x0a, 0x11, 0x73, - 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, - 0x18, 0x66, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3f, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, - 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, - 0x6f, 0x72, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x43, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4c, 0x69, - 0x73, 0x74, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x48, 0x00, 0x52, 0x0f, 0x73, 0x74, 0x72, 0x69, 0x6e, - 0x67, 0x4c, 0x69, 0x73, 0x74, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x57, 0x0a, 0x09, 0x69, 0x6e, - 0x74, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x67, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, - 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, - 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, - 0x43, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x49, - 0x6e, 0x74, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x48, 0x00, 0x52, 0x08, 0x69, 0x6e, 0x74, 0x46, 0x69, - 0x65, 0x6c, 0x64, 0x12, 0x57, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, - 0x18, 0x68, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, - 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, - 0x6f, 0x72, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x43, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x4d, 0x61, 0x70, 0x46, 0x69, 0x65, 0x6c, 0x64, - 0x48, 0x00, 0x52, 0x08, 0x6d, 0x61, 0x70, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x42, 0x07, 0x0a, 0x05, - 0x66, 0x69, 0x65, 0x6c, 0x64, 0x1a, 0x49, 0x0a, 0x0b, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x46, - 0x69, 0x65, 0x6c, 0x64, 0x12, 0x28, 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0c, 0x64, - 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x88, 0x01, 0x01, 0x42, 0x10, - 0x0a, 0x0e, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x1a, 0x47, 0x0a, 0x09, 0x42, 0x6f, 0x6f, 0x6c, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x28, 0x0a, - 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x88, 0x01, 0x01, 0x42, 0x10, 0x0a, 0x0e, 0x5f, 0x64, 0x65, 0x66, 0x61, - 0x75, 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x36, 0x0a, 0x0f, 0x53, 0x74, 0x72, - 0x69, 0x6e, 0x67, 0x4c, 0x69, 0x73, 0x74, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x23, 0x0a, 0x0d, - 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, - 0x03, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x1a, 0x46, 0x0a, 0x08, 0x49, 0x6e, 0x74, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x28, 0x0a, - 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x05, 0x48, 0x00, 0x52, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x88, 0x01, 0x01, 0x42, 0x10, 0x0a, 0x0e, 0x5f, 0x64, 0x65, 0x66, 0x61, - 0x75, 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0xf3, 0x01, 0x0a, 0x08, 0x4d, 0x61, - 0x70, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x6f, 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, - 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x4a, 0x2e, - 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, - 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, - 0x43, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x4d, - 0x61, 0x70, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x2e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, - 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x76, 0x0a, 0x11, 0x44, 0x65, 0x66, 0x61, 0x75, - 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, - 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x4b, - 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x35, 0x2e, - 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, - 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, - 0x43, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x46, - 0x69, 0x65, 0x6c, 0x64, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x2a, - 0x86, 0x03, 0x0a, 0x0a, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x12, 0x1a, - 0x0a, 0x16, 0x43, 0x41, 0x50, 0x41, 0x42, 0x49, 0x4c, 0x49, 0x54, 0x59, 0x5f, 0x55, 0x4e, 0x53, - 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x18, 0x0a, 0x14, 0x43, 0x41, - 0x50, 0x41, 0x42, 0x49, 0x4c, 0x49, 0x54, 0x59, 0x5f, 0x50, 0x52, 0x4f, 0x56, 0x49, 0x53, 0x49, - 0x4f, 0x4e, 0x10, 0x01, 0x12, 0x13, 0x0a, 0x0f, 0x43, 0x41, 0x50, 0x41, 0x42, 0x49, 0x4c, 0x49, - 0x54, 0x59, 0x5f, 0x53, 0x59, 0x4e, 0x43, 0x10, 0x02, 0x12, 0x19, 0x0a, 0x15, 0x43, 0x41, 0x50, - 0x41, 0x42, 0x49, 0x4c, 0x49, 0x54, 0x59, 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x46, 0x45, - 0x45, 0x44, 0x10, 0x03, 0x12, 0x18, 0x0a, 0x14, 0x43, 0x41, 0x50, 0x41, 0x42, 0x49, 0x4c, 0x49, - 0x54, 0x59, 0x5f, 0x54, 0x49, 0x43, 0x4b, 0x45, 0x54, 0x49, 0x4e, 0x47, 0x10, 0x04, 0x12, 0x23, - 0x0a, 0x1f, 0x43, 0x41, 0x50, 0x41, 0x42, 0x49, 0x4c, 0x49, 0x54, 0x59, 0x5f, 0x41, 0x43, 0x43, - 0x4f, 0x55, 0x4e, 0x54, 0x5f, 0x50, 0x52, 0x4f, 0x56, 0x49, 0x53, 0x49, 0x4f, 0x4e, 0x49, 0x4e, - 0x47, 0x10, 0x05, 0x12, 0x22, 0x0a, 0x1e, 0x43, 0x41, 0x50, 0x41, 0x42, 0x49, 0x4c, 0x49, 0x54, - 0x59, 0x5f, 0x43, 0x52, 0x45, 0x44, 0x45, 0x4e, 0x54, 0x49, 0x41, 0x4c, 0x5f, 0x52, 0x4f, 0x54, - 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x06, 0x12, 0x1e, 0x0a, 0x1a, 0x43, 0x41, 0x50, 0x41, 0x42, - 0x49, 0x4c, 0x49, 0x54, 0x59, 0x5f, 0x52, 0x45, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x5f, 0x43, - 0x52, 0x45, 0x41, 0x54, 0x45, 0x10, 0x07, 0x12, 0x1e, 0x0a, 0x1a, 0x43, 0x41, 0x50, 0x41, 0x42, - 0x49, 0x4c, 0x49, 0x54, 0x59, 0x5f, 0x52, 0x45, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x5f, 0x44, - 0x45, 0x4c, 0x45, 0x54, 0x45, 0x10, 0x08, 0x12, 0x1b, 0x0a, 0x17, 0x43, 0x41, 0x50, 0x41, 0x42, - 0x49, 0x4c, 0x49, 0x54, 0x59, 0x5f, 0x53, 0x59, 0x4e, 0x43, 0x5f, 0x53, 0x45, 0x43, 0x52, 0x45, - 0x54, 0x53, 0x10, 0x09, 0x12, 0x16, 0x0a, 0x12, 0x43, 0x41, 0x50, 0x41, 0x42, 0x49, 0x4c, 0x49, - 0x54, 0x59, 0x5f, 0x41, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x53, 0x10, 0x0a, 0x12, 0x1c, 0x0a, 0x18, - 0x43, 0x41, 0x50, 0x41, 0x42, 0x49, 0x4c, 0x49, 0x54, 0x59, 0x5f, 0x54, 0x41, 0x52, 0x47, 0x45, - 0x54, 0x45, 0x44, 0x5f, 0x53, 0x59, 0x4e, 0x43, 0x10, 0x0b, 0x12, 0x1c, 0x0a, 0x18, 0x43, 0x41, - 0x50, 0x41, 0x42, 0x49, 0x4c, 0x49, 0x54, 0x59, 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x46, - 0x45, 0x45, 0x44, 0x5f, 0x56, 0x32, 0x10, 0x0c, 0x2a, 0xae, 0x02, 0x0a, 0x20, 0x43, 0x61, 0x70, - 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x43, 0x72, 0x65, - 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x33, 0x0a, - 0x2f, 0x43, 0x41, 0x50, 0x41, 0x42, 0x49, 0x4c, 0x49, 0x54, 0x59, 0x5f, 0x44, 0x45, 0x54, 0x41, - 0x49, 0x4c, 0x5f, 0x43, 0x52, 0x45, 0x44, 0x45, 0x4e, 0x54, 0x49, 0x41, 0x4c, 0x5f, 0x4f, 0x50, - 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, - 0x10, 0x00, 0x12, 0x33, 0x0a, 0x2f, 0x43, 0x41, 0x50, 0x41, 0x42, 0x49, 0x4c, 0x49, 0x54, 0x59, - 0x5f, 0x44, 0x45, 0x54, 0x41, 0x49, 0x4c, 0x5f, 0x43, 0x52, 0x45, 0x44, 0x45, 0x4e, 0x54, 0x49, - 0x41, 0x4c, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4e, 0x4f, 0x5f, 0x50, 0x41, 0x53, - 0x53, 0x57, 0x4f, 0x52, 0x44, 0x10, 0x01, 0x12, 0x37, 0x0a, 0x33, 0x43, 0x41, 0x50, 0x41, 0x42, - 0x49, 0x4c, 0x49, 0x54, 0x59, 0x5f, 0x44, 0x45, 0x54, 0x41, 0x49, 0x4c, 0x5f, 0x43, 0x52, 0x45, - 0x44, 0x45, 0x4e, 0x54, 0x49, 0x41, 0x4c, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x52, - 0x41, 0x4e, 0x44, 0x4f, 0x4d, 0x5f, 0x50, 0x41, 0x53, 0x53, 0x57, 0x4f, 0x52, 0x44, 0x10, 0x02, - 0x12, 0x2b, 0x0a, 0x27, 0x43, 0x41, 0x50, 0x41, 0x42, 0x49, 0x4c, 0x49, 0x54, 0x59, 0x5f, 0x44, - 0x45, 0x54, 0x41, 0x49, 0x4c, 0x5f, 0x43, 0x52, 0x45, 0x44, 0x45, 0x4e, 0x54, 0x49, 0x41, 0x4c, - 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x53, 0x4f, 0x10, 0x03, 0x12, 0x3a, 0x0a, - 0x36, 0x43, 0x41, 0x50, 0x41, 0x42, 0x49, 0x4c, 0x49, 0x54, 0x59, 0x5f, 0x44, 0x45, 0x54, 0x41, - 0x49, 0x4c, 0x5f, 0x43, 0x52, 0x45, 0x44, 0x45, 0x4e, 0x54, 0x49, 0x41, 0x4c, 0x5f, 0x4f, 0x50, - 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x45, 0x4e, 0x43, 0x52, 0x59, 0x50, 0x54, 0x45, 0x44, 0x5f, 0x50, - 0x41, 0x53, 0x53, 0x57, 0x4f, 0x52, 0x44, 0x10, 0x04, 0x32, 0xeb, 0x02, 0x0a, 0x10, 0x43, 0x6f, - 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x78, - 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x33, 0x2e, - 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, - 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x47, 0x65, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x34, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, - 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x53, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x47, 0x65, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6f, 0x0a, 0x08, 0x56, 0x61, 0x6c, 0x69, - 0x64, 0x61, 0x74, 0x65, 0x12, 0x30, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, - 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x31, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, - 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, - 0x6f, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, - 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6c, 0x0a, 0x07, 0x43, 0x6c, 0x65, - 0x61, 0x6e, 0x75, 0x70, 0x12, 0x2f, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, - 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6c, 0x65, 0x61, 0x6e, 0x75, 0x70, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x30, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, - 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, - 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6c, 0x65, 0x61, 0x6e, 0x75, 0x70, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x36, 0x5a, 0x34, 0x67, 0x69, 0x74, 0x68, 0x75, - 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x64, 0x75, 0x63, 0x74, 0x6f, 0x72, 0x6f, - 0x6e, 0x65, 0x2f, 0x62, 0x61, 0x74, 0x6f, 0x6e, 0x2d, 0x73, 0x64, 0x6b, 0x2f, 0x70, 0x62, 0x2f, - 0x63, 0x31, 0x2f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f, 0x76, 0x32, 0x62, - 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -}) +type ConnectorAccountCreationSchema_MapField_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. -var ( - file_c1_connector_v2_connector_proto_rawDescOnce sync.Once - file_c1_connector_v2_connector_proto_rawDescData []byte -) + DefaultValue map[string]*ConnectorAccountCreationSchema_Field +} -func file_c1_connector_v2_connector_proto_rawDescGZIP() []byte { - file_c1_connector_v2_connector_proto_rawDescOnce.Do(func() { - file_c1_connector_v2_connector_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_c1_connector_v2_connector_proto_rawDesc), len(file_c1_connector_v2_connector_proto_rawDesc))) - }) - return file_c1_connector_v2_connector_proto_rawDescData +func (b0 ConnectorAccountCreationSchema_MapField_builder) Build() *ConnectorAccountCreationSchema_MapField { + m0 := &ConnectorAccountCreationSchema_MapField{} + b, x := &b0, m0 + _, _ = b, x + x.DefaultValue = b.DefaultValue + return m0 } +var File_c1_connector_v2_connector_proto protoreflect.FileDescriptor + +const file_c1_connector_v2_connector_proto_rawDesc = "" + + "\n" + + "\x1fc1/connector/v2/connector.proto\x12\x0fc1.connector.v2\x1a\x1bc1/connector/v2/asset.proto\x1a\x1ec1/connector/v2/resource.proto\x1a\x19google/protobuf/any.proto\x1a\x1cgoogle/protobuf/struct.proto\x1a\x17validate/validate.proto\"\x8d\x01\n" + + "\x1eConnectorServiceCleanupRequest\x126\n" + + "\vannotations\x18\x01 \x03(\v2\x14.google.protobuf.AnyR\vannotations\x123\n" + + "\x0eactive_sync_id\x18\x02 \x01(\tB\r\xfaB\n" + + "r\b \x01(\x80\b\xd0\x01\x01R\factiveSyncId\"Y\n" + + "\x1fConnectorServiceCleanupResponse\x126\n" + + "\vannotations\x18\x01 \x03(\v2\x14.google.protobuf.AnyR\vannotations\"\xa8\x04\n" + + "\x11ConnectorMetadata\x12-\n" + + "\fdisplay_name\x18\x01 \x01(\tB\n" + + "\xfaB\ar\x05 \x01(\x80\bR\vdisplayName\x125\n" + + "\bhelp_url\x18\x02 \x01(\tB\x1a\xfaB\x17r\x15 \x01(\x80\b:\bhttps://\xd0\x01\x01\x88\x01\x01R\ahelpUrl\x12-\n" + + "\x04icon\x18\x03 \x01(\v2\x19.c1.connector.v2.AssetRefR\x04icon\x12-\n" + + "\x04logo\x18\x04 \x01(\v2\x19.c1.connector.v2.AssetRefR\x04logo\x121\n" + + "\aprofile\x18\x05 \x01(\v2\x17.google.protobuf.StructR\aprofile\x126\n" + + "\vannotations\x18\x06 \x03(\v2\x14.google.protobuf.AnyR\vannotations\x12/\n" + + "\vdescription\x18\a \x01(\tB\r\xfaB\n" + + "r\b \x01(\x80 \xd0\x01\x01R\vdescription\x12J\n" + + "\fcapabilities\x18\b \x01(\v2&.c1.connector.v2.ConnectorCapabilitiesR\fcapabilities\x12g\n" + + "\x17account_creation_schema\x18\t \x01(\v2/.c1.connector.v2.ConnectorAccountCreationSchemaR\x15accountCreationSchema\"\x8e\x02\n" + + "\x11CredentialDetails\x12}\n" + + "\x1fcapability_account_provisioning\x18\x01 \x01(\v25.c1.connector.v2.CredentialDetailsAccountProvisioningR\x1dcapabilityAccountProvisioning\x12z\n" + + "\x1ecapability_credential_rotation\x18\x02 \x01(\v24.c1.connector.v2.CredentialDetailsCredentialRotationR\x1ccapabilityCredentialRotation\"\x8e\x02\n" + + "$CredentialDetailsAccountProvisioning\x12s\n" + + "\x1csupported_credential_options\x18\x01 \x03(\x0e21.c1.connector.v2.CapabilityDetailCredentialOptionR\x1asupportedCredentialOptions\x12q\n" + + "\x1bpreferred_credential_option\x18\x02 \x01(\x0e21.c1.connector.v2.CapabilityDetailCredentialOptionR\x19preferredCredentialOption\"\x8d\x02\n" + + "#CredentialDetailsCredentialRotation\x12s\n" + + "\x1csupported_credential_options\x18\x01 \x03(\x0e21.c1.connector.v2.CapabilityDetailCredentialOptionR\x1asupportedCredentialOptions\x12q\n" + + "\x1bpreferred_credential_option\x18\x02 \x01(\x0e21.c1.connector.v2.CapabilityDetailCredentialOptionR\x19preferredCredentialOption\"\xa5\x02\n" + + "\x15ConnectorCapabilities\x12e\n" + + "\x1aresource_type_capabilities\x18\x01 \x03(\v2'.c1.connector.v2.ResourceTypeCapabilityR\x18resourceTypeCapabilities\x12R\n" + + "\x16connector_capabilities\x18\x02 \x03(\x0e2\x1b.c1.connector.v2.CapabilityR\x15connectorCapabilities\x12Q\n" + + "\x12credential_details\x18\x03 \x01(\v2\".c1.connector.v2.CredentialDetailsR\x11credentialDetails\"6\n" + + "\x14CapabilityPermission\x12\x1e\n" + + "\n" + + "permission\x18\x01 \x01(\tR\n" + + "permission\"`\n" + + "\x15CapabilityPermissions\x12G\n" + + "\vpermissions\x18\x01 \x03(\v2%.c1.connector.v2.CapabilityPermissionR\vpermissions\"\xe7\x01\n" + + "\x16ResourceTypeCapability\x12B\n" + + "\rresource_type\x18\x01 \x01(\v2\x1d.c1.connector.v2.ResourceTypeR\fresourceType\x12?\n" + + "\fcapabilities\x18\x02 \x03(\x0e2\x1b.c1.connector.v2.CapabilityR\fcapabilities\x12H\n" + + "\vpermissions\x18\x03 \x01(\v2&.c1.connector.v2.CapabilityPermissionsR\vpermissions\"$\n" + + "\"ConnectorServiceGetMetadataRequest\"e\n" + + "#ConnectorServiceGetMetadataResponse\x12>\n" + + "\bmetadata\x18\x01 \x01(\v2\".c1.connector.v2.ConnectorMetadataR\bmetadata\"!\n" + + "\x1fConnectorServiceValidateRequest\"{\n" + + " ConnectorServiceValidateResponse\x126\n" + + "\vannotations\x18\x01 \x03(\v2\x14.google.protobuf.AnyR\vannotations\x12\x1f\n" + + "\vsdk_version\x18\x02 \x01(\tR\n" + + "sdkVersion\"\xa5\v\n" + + "\x1eConnectorAccountCreationSchema\x12Z\n" + + "\tfield_map\x18\x01 \x03(\v2=.c1.connector.v2.ConnectorAccountCreationSchema.FieldMapEntryR\bfieldMap\x1ar\n" + + "\rFieldMapEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12K\n" + + "\x05value\x18\x02 \x01(\v25.c1.connector.v2.ConnectorAccountCreationSchema.FieldR\x05value:\x028\x01\x1a\xa8\x05\n" + + "\x05Field\x12!\n" + + "\fdisplay_name\x18\x01 \x01(\tR\vdisplayName\x12\x1a\n" + + "\brequired\x18\x02 \x01(\bR\brequired\x12 \n" + + "\vdescription\x18\x03 \x01(\tR\vdescription\x12 \n" + + "\vplaceholder\x18\x04 \x01(\tR\vplaceholder\x12\x14\n" + + "\x05order\x18\x05 \x01(\x05R\x05order\x12\x1e\n" + + "\n" + + "deprecated\x18\x06 \x01(\bR\n" + + "deprecated\x12`\n" + + "\fstring_field\x18d \x01(\v2;.c1.connector.v2.ConnectorAccountCreationSchema.StringFieldH\x00R\vstringField\x12Z\n" + + "\n" + + "bool_field\x18e \x01(\v29.c1.connector.v2.ConnectorAccountCreationSchema.BoolFieldH\x00R\tboolField\x12m\n" + + "\x11string_list_field\x18f \x01(\v2?.c1.connector.v2.ConnectorAccountCreationSchema.StringListFieldH\x00R\x0fstringListField\x12W\n" + + "\tint_field\x18g \x01(\v28.c1.connector.v2.ConnectorAccountCreationSchema.IntFieldH\x00R\bintField\x12W\n" + + "\tmap_field\x18h \x01(\v28.c1.connector.v2.ConnectorAccountCreationSchema.MapFieldH\x00R\bmapFieldB\a\n" + + "\x05field\x1aI\n" + + "\vStringField\x12(\n" + + "\rdefault_value\x18\x01 \x01(\tH\x00R\fdefaultValue\x88\x01\x01B\x10\n" + + "\x0e_default_value\x1aG\n" + + "\tBoolField\x12(\n" + + "\rdefault_value\x18\x01 \x01(\bH\x00R\fdefaultValue\x88\x01\x01B\x10\n" + + "\x0e_default_value\x1a6\n" + + "\x0fStringListField\x12#\n" + + "\rdefault_value\x18\x01 \x03(\tR\fdefaultValue\x1aF\n" + + "\bIntField\x12(\n" + + "\rdefault_value\x18\x01 \x01(\x05H\x00R\fdefaultValue\x88\x01\x01B\x10\n" + + "\x0e_default_value\x1a\xf3\x01\n" + + "\bMapField\x12o\n" + + "\rdefault_value\x18\x01 \x03(\v2J.c1.connector.v2.ConnectorAccountCreationSchema.MapField.DefaultValueEntryR\fdefaultValue\x1av\n" + + "\x11DefaultValueEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12K\n" + + "\x05value\x18\x02 \x01(\v25.c1.connector.v2.ConnectorAccountCreationSchema.FieldR\x05value:\x028\x01*\x86\x03\n" + + "\n" + + "Capability\x12\x1a\n" + + "\x16CAPABILITY_UNSPECIFIED\x10\x00\x12\x18\n" + + "\x14CAPABILITY_PROVISION\x10\x01\x12\x13\n" + + "\x0fCAPABILITY_SYNC\x10\x02\x12\x19\n" + + "\x15CAPABILITY_EVENT_FEED\x10\x03\x12\x18\n" + + "\x14CAPABILITY_TICKETING\x10\x04\x12#\n" + + "\x1fCAPABILITY_ACCOUNT_PROVISIONING\x10\x05\x12\"\n" + + "\x1eCAPABILITY_CREDENTIAL_ROTATION\x10\x06\x12\x1e\n" + + "\x1aCAPABILITY_RESOURCE_CREATE\x10\a\x12\x1e\n" + + "\x1aCAPABILITY_RESOURCE_DELETE\x10\b\x12\x1b\n" + + "\x17CAPABILITY_SYNC_SECRETS\x10\t\x12\x16\n" + + "\x12CAPABILITY_ACTIONS\x10\n" + + "\x12\x1c\n" + + "\x18CAPABILITY_TARGETED_SYNC\x10\v\x12\x1c\n" + + "\x18CAPABILITY_EVENT_FEED_V2\x10\f*\xae\x02\n" + + " CapabilityDetailCredentialOption\x123\n" + + "/CAPABILITY_DETAIL_CREDENTIAL_OPTION_UNSPECIFIED\x10\x00\x123\n" + + "/CAPABILITY_DETAIL_CREDENTIAL_OPTION_NO_PASSWORD\x10\x01\x127\n" + + "3CAPABILITY_DETAIL_CREDENTIAL_OPTION_RANDOM_PASSWORD\x10\x02\x12+\n" + + "'CAPABILITY_DETAIL_CREDENTIAL_OPTION_SSO\x10\x03\x12:\n" + + "6CAPABILITY_DETAIL_CREDENTIAL_OPTION_ENCRYPTED_PASSWORD\x10\x042\xeb\x02\n" + + "\x10ConnectorService\x12x\n" + + "\vGetMetadata\x123.c1.connector.v2.ConnectorServiceGetMetadataRequest\x1a4.c1.connector.v2.ConnectorServiceGetMetadataResponse\x12o\n" + + "\bValidate\x120.c1.connector.v2.ConnectorServiceValidateRequest\x1a1.c1.connector.v2.ConnectorServiceValidateResponse\x12l\n" + + "\aCleanup\x12/.c1.connector.v2.ConnectorServiceCleanupRequest\x1a0.c1.connector.v2.ConnectorServiceCleanupResponseB6Z4github.com/conductorone/baton-sdk/pb/c1/connector/v2b\x06proto3" + var file_c1_connector_v2_connector_proto_enumTypes = make([]protoimpl.EnumInfo, 2) -var file_c1_connector_v2_connector_proto_msgTypes = make([]protoimpl.MessageInfo, 21) +var file_c1_connector_v2_connector_proto_msgTypes = make([]protoimpl.MessageInfo, 23) var file_c1_connector_v2_connector_proto_goTypes = []any{ (Capability)(0), // 0: c1.connector.v2.Capability (CapabilityDetailCredentialOption)(0), // 1: c1.connector.v2.CapabilityDetailCredentialOption @@ -1576,67 +2191,71 @@ var file_c1_connector_v2_connector_proto_goTypes = []any{ (*CredentialDetailsAccountProvisioning)(nil), // 6: c1.connector.v2.CredentialDetailsAccountProvisioning (*CredentialDetailsCredentialRotation)(nil), // 7: c1.connector.v2.CredentialDetailsCredentialRotation (*ConnectorCapabilities)(nil), // 8: c1.connector.v2.ConnectorCapabilities - (*ResourceTypeCapability)(nil), // 9: c1.connector.v2.ResourceTypeCapability - (*ConnectorServiceGetMetadataRequest)(nil), // 10: c1.connector.v2.ConnectorServiceGetMetadataRequest - (*ConnectorServiceGetMetadataResponse)(nil), // 11: c1.connector.v2.ConnectorServiceGetMetadataResponse - (*ConnectorServiceValidateRequest)(nil), // 12: c1.connector.v2.ConnectorServiceValidateRequest - (*ConnectorServiceValidateResponse)(nil), // 13: c1.connector.v2.ConnectorServiceValidateResponse - (*ConnectorAccountCreationSchema)(nil), // 14: c1.connector.v2.ConnectorAccountCreationSchema - nil, // 15: c1.connector.v2.ConnectorAccountCreationSchema.FieldMapEntry - (*ConnectorAccountCreationSchema_Field)(nil), // 16: c1.connector.v2.ConnectorAccountCreationSchema.Field - (*ConnectorAccountCreationSchema_StringField)(nil), // 17: c1.connector.v2.ConnectorAccountCreationSchema.StringField - (*ConnectorAccountCreationSchema_BoolField)(nil), // 18: c1.connector.v2.ConnectorAccountCreationSchema.BoolField - (*ConnectorAccountCreationSchema_StringListField)(nil), // 19: c1.connector.v2.ConnectorAccountCreationSchema.StringListField - (*ConnectorAccountCreationSchema_IntField)(nil), // 20: c1.connector.v2.ConnectorAccountCreationSchema.IntField - (*ConnectorAccountCreationSchema_MapField)(nil), // 21: c1.connector.v2.ConnectorAccountCreationSchema.MapField - nil, // 22: c1.connector.v2.ConnectorAccountCreationSchema.MapField.DefaultValueEntry - (*anypb.Any)(nil), // 23: google.protobuf.Any - (*AssetRef)(nil), // 24: c1.connector.v2.AssetRef - (*structpb.Struct)(nil), // 25: google.protobuf.Struct - (*ResourceType)(nil), // 26: c1.connector.v2.ResourceType + (*CapabilityPermission)(nil), // 9: c1.connector.v2.CapabilityPermission + (*CapabilityPermissions)(nil), // 10: c1.connector.v2.CapabilityPermissions + (*ResourceTypeCapability)(nil), // 11: c1.connector.v2.ResourceTypeCapability + (*ConnectorServiceGetMetadataRequest)(nil), // 12: c1.connector.v2.ConnectorServiceGetMetadataRequest + (*ConnectorServiceGetMetadataResponse)(nil), // 13: c1.connector.v2.ConnectorServiceGetMetadataResponse + (*ConnectorServiceValidateRequest)(nil), // 14: c1.connector.v2.ConnectorServiceValidateRequest + (*ConnectorServiceValidateResponse)(nil), // 15: c1.connector.v2.ConnectorServiceValidateResponse + (*ConnectorAccountCreationSchema)(nil), // 16: c1.connector.v2.ConnectorAccountCreationSchema + nil, // 17: c1.connector.v2.ConnectorAccountCreationSchema.FieldMapEntry + (*ConnectorAccountCreationSchema_Field)(nil), // 18: c1.connector.v2.ConnectorAccountCreationSchema.Field + (*ConnectorAccountCreationSchema_StringField)(nil), // 19: c1.connector.v2.ConnectorAccountCreationSchema.StringField + (*ConnectorAccountCreationSchema_BoolField)(nil), // 20: c1.connector.v2.ConnectorAccountCreationSchema.BoolField + (*ConnectorAccountCreationSchema_StringListField)(nil), // 21: c1.connector.v2.ConnectorAccountCreationSchema.StringListField + (*ConnectorAccountCreationSchema_IntField)(nil), // 22: c1.connector.v2.ConnectorAccountCreationSchema.IntField + (*ConnectorAccountCreationSchema_MapField)(nil), // 23: c1.connector.v2.ConnectorAccountCreationSchema.MapField + nil, // 24: c1.connector.v2.ConnectorAccountCreationSchema.MapField.DefaultValueEntry + (*anypb.Any)(nil), // 25: google.protobuf.Any + (*AssetRef)(nil), // 26: c1.connector.v2.AssetRef + (*structpb.Struct)(nil), // 27: google.protobuf.Struct + (*ResourceType)(nil), // 28: c1.connector.v2.ResourceType } var file_c1_connector_v2_connector_proto_depIdxs = []int32{ - 23, // 0: c1.connector.v2.ConnectorServiceCleanupRequest.annotations:type_name -> google.protobuf.Any - 23, // 1: c1.connector.v2.ConnectorServiceCleanupResponse.annotations:type_name -> google.protobuf.Any - 24, // 2: c1.connector.v2.ConnectorMetadata.icon:type_name -> c1.connector.v2.AssetRef - 24, // 3: c1.connector.v2.ConnectorMetadata.logo:type_name -> c1.connector.v2.AssetRef - 25, // 4: c1.connector.v2.ConnectorMetadata.profile:type_name -> google.protobuf.Struct - 23, // 5: c1.connector.v2.ConnectorMetadata.annotations:type_name -> google.protobuf.Any + 25, // 0: c1.connector.v2.ConnectorServiceCleanupRequest.annotations:type_name -> google.protobuf.Any + 25, // 1: c1.connector.v2.ConnectorServiceCleanupResponse.annotations:type_name -> google.protobuf.Any + 26, // 2: c1.connector.v2.ConnectorMetadata.icon:type_name -> c1.connector.v2.AssetRef + 26, // 3: c1.connector.v2.ConnectorMetadata.logo:type_name -> c1.connector.v2.AssetRef + 27, // 4: c1.connector.v2.ConnectorMetadata.profile:type_name -> google.protobuf.Struct + 25, // 5: c1.connector.v2.ConnectorMetadata.annotations:type_name -> google.protobuf.Any 8, // 6: c1.connector.v2.ConnectorMetadata.capabilities:type_name -> c1.connector.v2.ConnectorCapabilities - 14, // 7: c1.connector.v2.ConnectorMetadata.account_creation_schema:type_name -> c1.connector.v2.ConnectorAccountCreationSchema + 16, // 7: c1.connector.v2.ConnectorMetadata.account_creation_schema:type_name -> c1.connector.v2.ConnectorAccountCreationSchema 6, // 8: c1.connector.v2.CredentialDetails.capability_account_provisioning:type_name -> c1.connector.v2.CredentialDetailsAccountProvisioning 7, // 9: c1.connector.v2.CredentialDetails.capability_credential_rotation:type_name -> c1.connector.v2.CredentialDetailsCredentialRotation 1, // 10: c1.connector.v2.CredentialDetailsAccountProvisioning.supported_credential_options:type_name -> c1.connector.v2.CapabilityDetailCredentialOption 1, // 11: c1.connector.v2.CredentialDetailsAccountProvisioning.preferred_credential_option:type_name -> c1.connector.v2.CapabilityDetailCredentialOption 1, // 12: c1.connector.v2.CredentialDetailsCredentialRotation.supported_credential_options:type_name -> c1.connector.v2.CapabilityDetailCredentialOption 1, // 13: c1.connector.v2.CredentialDetailsCredentialRotation.preferred_credential_option:type_name -> c1.connector.v2.CapabilityDetailCredentialOption - 9, // 14: c1.connector.v2.ConnectorCapabilities.resource_type_capabilities:type_name -> c1.connector.v2.ResourceTypeCapability + 11, // 14: c1.connector.v2.ConnectorCapabilities.resource_type_capabilities:type_name -> c1.connector.v2.ResourceTypeCapability 0, // 15: c1.connector.v2.ConnectorCapabilities.connector_capabilities:type_name -> c1.connector.v2.Capability 5, // 16: c1.connector.v2.ConnectorCapabilities.credential_details:type_name -> c1.connector.v2.CredentialDetails - 26, // 17: c1.connector.v2.ResourceTypeCapability.resource_type:type_name -> c1.connector.v2.ResourceType - 0, // 18: c1.connector.v2.ResourceTypeCapability.capabilities:type_name -> c1.connector.v2.Capability - 4, // 19: c1.connector.v2.ConnectorServiceGetMetadataResponse.metadata:type_name -> c1.connector.v2.ConnectorMetadata - 23, // 20: c1.connector.v2.ConnectorServiceValidateResponse.annotations:type_name -> google.protobuf.Any - 15, // 21: c1.connector.v2.ConnectorAccountCreationSchema.field_map:type_name -> c1.connector.v2.ConnectorAccountCreationSchema.FieldMapEntry - 16, // 22: c1.connector.v2.ConnectorAccountCreationSchema.FieldMapEntry.value:type_name -> c1.connector.v2.ConnectorAccountCreationSchema.Field - 17, // 23: c1.connector.v2.ConnectorAccountCreationSchema.Field.string_field:type_name -> c1.connector.v2.ConnectorAccountCreationSchema.StringField - 18, // 24: c1.connector.v2.ConnectorAccountCreationSchema.Field.bool_field:type_name -> c1.connector.v2.ConnectorAccountCreationSchema.BoolField - 19, // 25: c1.connector.v2.ConnectorAccountCreationSchema.Field.string_list_field:type_name -> c1.connector.v2.ConnectorAccountCreationSchema.StringListField - 20, // 26: c1.connector.v2.ConnectorAccountCreationSchema.Field.int_field:type_name -> c1.connector.v2.ConnectorAccountCreationSchema.IntField - 21, // 27: c1.connector.v2.ConnectorAccountCreationSchema.Field.map_field:type_name -> c1.connector.v2.ConnectorAccountCreationSchema.MapField - 22, // 28: c1.connector.v2.ConnectorAccountCreationSchema.MapField.default_value:type_name -> c1.connector.v2.ConnectorAccountCreationSchema.MapField.DefaultValueEntry - 16, // 29: c1.connector.v2.ConnectorAccountCreationSchema.MapField.DefaultValueEntry.value:type_name -> c1.connector.v2.ConnectorAccountCreationSchema.Field - 10, // 30: c1.connector.v2.ConnectorService.GetMetadata:input_type -> c1.connector.v2.ConnectorServiceGetMetadataRequest - 12, // 31: c1.connector.v2.ConnectorService.Validate:input_type -> c1.connector.v2.ConnectorServiceValidateRequest - 2, // 32: c1.connector.v2.ConnectorService.Cleanup:input_type -> c1.connector.v2.ConnectorServiceCleanupRequest - 11, // 33: c1.connector.v2.ConnectorService.GetMetadata:output_type -> c1.connector.v2.ConnectorServiceGetMetadataResponse - 13, // 34: c1.connector.v2.ConnectorService.Validate:output_type -> c1.connector.v2.ConnectorServiceValidateResponse - 3, // 35: c1.connector.v2.ConnectorService.Cleanup:output_type -> c1.connector.v2.ConnectorServiceCleanupResponse - 33, // [33:36] is the sub-list for method output_type - 30, // [30:33] is the sub-list for method input_type - 30, // [30:30] is the sub-list for extension type_name - 30, // [30:30] is the sub-list for extension extendee - 0, // [0:30] is the sub-list for field type_name + 9, // 17: c1.connector.v2.CapabilityPermissions.permissions:type_name -> c1.connector.v2.CapabilityPermission + 28, // 18: c1.connector.v2.ResourceTypeCapability.resource_type:type_name -> c1.connector.v2.ResourceType + 0, // 19: c1.connector.v2.ResourceTypeCapability.capabilities:type_name -> c1.connector.v2.Capability + 10, // 20: c1.connector.v2.ResourceTypeCapability.permissions:type_name -> c1.connector.v2.CapabilityPermissions + 4, // 21: c1.connector.v2.ConnectorServiceGetMetadataResponse.metadata:type_name -> c1.connector.v2.ConnectorMetadata + 25, // 22: c1.connector.v2.ConnectorServiceValidateResponse.annotations:type_name -> google.protobuf.Any + 17, // 23: c1.connector.v2.ConnectorAccountCreationSchema.field_map:type_name -> c1.connector.v2.ConnectorAccountCreationSchema.FieldMapEntry + 18, // 24: c1.connector.v2.ConnectorAccountCreationSchema.FieldMapEntry.value:type_name -> c1.connector.v2.ConnectorAccountCreationSchema.Field + 19, // 25: c1.connector.v2.ConnectorAccountCreationSchema.Field.string_field:type_name -> c1.connector.v2.ConnectorAccountCreationSchema.StringField + 20, // 26: c1.connector.v2.ConnectorAccountCreationSchema.Field.bool_field:type_name -> c1.connector.v2.ConnectorAccountCreationSchema.BoolField + 21, // 27: c1.connector.v2.ConnectorAccountCreationSchema.Field.string_list_field:type_name -> c1.connector.v2.ConnectorAccountCreationSchema.StringListField + 22, // 28: c1.connector.v2.ConnectorAccountCreationSchema.Field.int_field:type_name -> c1.connector.v2.ConnectorAccountCreationSchema.IntField + 23, // 29: c1.connector.v2.ConnectorAccountCreationSchema.Field.map_field:type_name -> c1.connector.v2.ConnectorAccountCreationSchema.MapField + 24, // 30: c1.connector.v2.ConnectorAccountCreationSchema.MapField.default_value:type_name -> c1.connector.v2.ConnectorAccountCreationSchema.MapField.DefaultValueEntry + 18, // 31: c1.connector.v2.ConnectorAccountCreationSchema.MapField.DefaultValueEntry.value:type_name -> c1.connector.v2.ConnectorAccountCreationSchema.Field + 12, // 32: c1.connector.v2.ConnectorService.GetMetadata:input_type -> c1.connector.v2.ConnectorServiceGetMetadataRequest + 14, // 33: c1.connector.v2.ConnectorService.Validate:input_type -> c1.connector.v2.ConnectorServiceValidateRequest + 2, // 34: c1.connector.v2.ConnectorService.Cleanup:input_type -> c1.connector.v2.ConnectorServiceCleanupRequest + 13, // 35: c1.connector.v2.ConnectorService.GetMetadata:output_type -> c1.connector.v2.ConnectorServiceGetMetadataResponse + 15, // 36: c1.connector.v2.ConnectorService.Validate:output_type -> c1.connector.v2.ConnectorServiceValidateResponse + 3, // 37: c1.connector.v2.ConnectorService.Cleanup:output_type -> c1.connector.v2.ConnectorServiceCleanupResponse + 35, // [35:38] is the sub-list for method output_type + 32, // [32:35] is the sub-list for method input_type + 32, // [32:32] is the sub-list for extension type_name + 32, // [32:32] is the sub-list for extension extendee + 0, // [0:32] is the sub-list for field type_name } func init() { file_c1_connector_v2_connector_proto_init() } @@ -1646,23 +2265,23 @@ func file_c1_connector_v2_connector_proto_init() { } file_c1_connector_v2_asset_proto_init() file_c1_connector_v2_resource_proto_init() - file_c1_connector_v2_connector_proto_msgTypes[14].OneofWrappers = []any{ + file_c1_connector_v2_connector_proto_msgTypes[16].OneofWrappers = []any{ (*ConnectorAccountCreationSchema_Field_StringField)(nil), (*ConnectorAccountCreationSchema_Field_BoolField)(nil), (*ConnectorAccountCreationSchema_Field_StringListField)(nil), (*ConnectorAccountCreationSchema_Field_IntField)(nil), (*ConnectorAccountCreationSchema_Field_MapField)(nil), } - file_c1_connector_v2_connector_proto_msgTypes[15].OneofWrappers = []any{} - file_c1_connector_v2_connector_proto_msgTypes[16].OneofWrappers = []any{} + file_c1_connector_v2_connector_proto_msgTypes[17].OneofWrappers = []any{} file_c1_connector_v2_connector_proto_msgTypes[18].OneofWrappers = []any{} + file_c1_connector_v2_connector_proto_msgTypes[20].OneofWrappers = []any{} type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: unsafe.Slice(unsafe.StringData(file_c1_connector_v2_connector_proto_rawDesc), len(file_c1_connector_v2_connector_proto_rawDesc)), NumEnums: 2, - NumMessages: 21, + NumMessages: 23, NumExtensions: 0, NumServices: 1, }, diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/connector.pb.validate.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/connector.pb.validate.go index f84beed1..b6f86d8f 100644 --- a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/connector.pb.validate.go +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/connector.pb.validate.go @@ -91,6 +91,21 @@ func (m *ConnectorServiceCleanupRequest) validate(all bool) error { } + if m.GetActiveSyncId() != "" { + + if l := len(m.GetActiveSyncId()); l < 1 || l > 1024 { + err := ConnectorServiceCleanupRequestValidationError{ + field: "ActiveSyncId", + reason: "value length must be between 1 and 1024 bytes, inclusive", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + if len(errors) > 0 { return ConnectorServiceCleanupRequestMultiError(errors) } @@ -1202,6 +1217,246 @@ var _ interface { ErrorName() string } = ConnectorCapabilitiesValidationError{} +// Validate checks the field values on CapabilityPermission with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *CapabilityPermission) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on CapabilityPermission with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// CapabilityPermissionMultiError, or nil if none found. +func (m *CapabilityPermission) ValidateAll() error { + return m.validate(true) +} + +func (m *CapabilityPermission) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Permission + + if len(errors) > 0 { + return CapabilityPermissionMultiError(errors) + } + + return nil +} + +// CapabilityPermissionMultiError is an error wrapping multiple validation +// errors returned by CapabilityPermission.ValidateAll() if the designated +// constraints aren't met. +type CapabilityPermissionMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m CapabilityPermissionMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m CapabilityPermissionMultiError) AllErrors() []error { return m } + +// CapabilityPermissionValidationError is the validation error returned by +// CapabilityPermission.Validate if the designated constraints aren't met. +type CapabilityPermissionValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e CapabilityPermissionValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e CapabilityPermissionValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e CapabilityPermissionValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e CapabilityPermissionValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e CapabilityPermissionValidationError) ErrorName() string { + return "CapabilityPermissionValidationError" +} + +// Error satisfies the builtin error interface +func (e CapabilityPermissionValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCapabilityPermission.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = CapabilityPermissionValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = CapabilityPermissionValidationError{} + +// Validate checks the field values on CapabilityPermissions with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *CapabilityPermissions) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on CapabilityPermissions with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// CapabilityPermissionsMultiError, or nil if none found. +func (m *CapabilityPermissions) ValidateAll() error { + return m.validate(true) +} + +func (m *CapabilityPermissions) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetPermissions() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CapabilityPermissionsValidationError{ + field: fmt.Sprintf("Permissions[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CapabilityPermissionsValidationError{ + field: fmt.Sprintf("Permissions[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CapabilityPermissionsValidationError{ + field: fmt.Sprintf("Permissions[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return CapabilityPermissionsMultiError(errors) + } + + return nil +} + +// CapabilityPermissionsMultiError is an error wrapping multiple validation +// errors returned by CapabilityPermissions.ValidateAll() if the designated +// constraints aren't met. +type CapabilityPermissionsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m CapabilityPermissionsMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m CapabilityPermissionsMultiError) AllErrors() []error { return m } + +// CapabilityPermissionsValidationError is the validation error returned by +// CapabilityPermissions.Validate if the designated constraints aren't met. +type CapabilityPermissionsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e CapabilityPermissionsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e CapabilityPermissionsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e CapabilityPermissionsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e CapabilityPermissionsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e CapabilityPermissionsValidationError) ErrorName() string { + return "CapabilityPermissionsValidationError" +} + +// Error satisfies the builtin error interface +func (e CapabilityPermissionsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCapabilityPermissions.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = CapabilityPermissionsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = CapabilityPermissionsValidationError{} + // Validate checks the field values on ResourceTypeCapability with the rules // defined in the proto definition for this message. If any rules are // violated, the first error encountered is returned, or nil if there are no violations. @@ -1253,6 +1508,35 @@ func (m *ResourceTypeCapability) validate(all bool) error { } } + if all { + switch v := interface{}(m.GetPermissions()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ResourceTypeCapabilityValidationError{ + field: "Permissions", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ResourceTypeCapabilityValidationError{ + field: "Permissions", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetPermissions()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ResourceTypeCapabilityValidationError{ + field: "Permissions", + reason: "embedded message failed validation", + cause: err, + } + } + } + if len(errors) > 0 { return ResourceTypeCapabilityMultiError(errors) } @@ -1732,6 +2016,8 @@ func (m *ConnectorServiceValidateResponse) validate(all bool) error { } + // no validation rules for SdkVersion + if len(errors) > 0 { return ConnectorServiceValidateResponseMultiError(errors) } diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/connector_protoopaque.pb.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/connector_protoopaque.pb.go new file mode 100644 index 00000000..e8ec7de5 --- /dev/null +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/connector_protoopaque.pb.go @@ -0,0 +1,2318 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.10 +// protoc (unknown) +// source: c1/connector/v2/connector.proto + +//go:build protoopaque + +package v2 + +import ( + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + structpb "google.golang.org/protobuf/types/known/structpb" + reflect "reflect" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type Capability int32 + +const ( + Capability_CAPABILITY_UNSPECIFIED Capability = 0 + Capability_CAPABILITY_PROVISION Capability = 1 + Capability_CAPABILITY_SYNC Capability = 2 + Capability_CAPABILITY_EVENT_FEED Capability = 3 + Capability_CAPABILITY_TICKETING Capability = 4 + Capability_CAPABILITY_ACCOUNT_PROVISIONING Capability = 5 + Capability_CAPABILITY_CREDENTIAL_ROTATION Capability = 6 + Capability_CAPABILITY_RESOURCE_CREATE Capability = 7 + Capability_CAPABILITY_RESOURCE_DELETE Capability = 8 + Capability_CAPABILITY_SYNC_SECRETS Capability = 9 + Capability_CAPABILITY_ACTIONS Capability = 10 + Capability_CAPABILITY_TARGETED_SYNC Capability = 11 + Capability_CAPABILITY_EVENT_FEED_V2 Capability = 12 +) + +// Enum value maps for Capability. +var ( + Capability_name = map[int32]string{ + 0: "CAPABILITY_UNSPECIFIED", + 1: "CAPABILITY_PROVISION", + 2: "CAPABILITY_SYNC", + 3: "CAPABILITY_EVENT_FEED", + 4: "CAPABILITY_TICKETING", + 5: "CAPABILITY_ACCOUNT_PROVISIONING", + 6: "CAPABILITY_CREDENTIAL_ROTATION", + 7: "CAPABILITY_RESOURCE_CREATE", + 8: "CAPABILITY_RESOURCE_DELETE", + 9: "CAPABILITY_SYNC_SECRETS", + 10: "CAPABILITY_ACTIONS", + 11: "CAPABILITY_TARGETED_SYNC", + 12: "CAPABILITY_EVENT_FEED_V2", + } + Capability_value = map[string]int32{ + "CAPABILITY_UNSPECIFIED": 0, + "CAPABILITY_PROVISION": 1, + "CAPABILITY_SYNC": 2, + "CAPABILITY_EVENT_FEED": 3, + "CAPABILITY_TICKETING": 4, + "CAPABILITY_ACCOUNT_PROVISIONING": 5, + "CAPABILITY_CREDENTIAL_ROTATION": 6, + "CAPABILITY_RESOURCE_CREATE": 7, + "CAPABILITY_RESOURCE_DELETE": 8, + "CAPABILITY_SYNC_SECRETS": 9, + "CAPABILITY_ACTIONS": 10, + "CAPABILITY_TARGETED_SYNC": 11, + "CAPABILITY_EVENT_FEED_V2": 12, + } +) + +func (x Capability) Enum() *Capability { + p := new(Capability) + *p = x + return p +} + +func (x Capability) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Capability) Descriptor() protoreflect.EnumDescriptor { + return file_c1_connector_v2_connector_proto_enumTypes[0].Descriptor() +} + +func (Capability) Type() protoreflect.EnumType { + return &file_c1_connector_v2_connector_proto_enumTypes[0] +} + +func (x Capability) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +type CapabilityDetailCredentialOption int32 + +const ( + CapabilityDetailCredentialOption_CAPABILITY_DETAIL_CREDENTIAL_OPTION_UNSPECIFIED CapabilityDetailCredentialOption = 0 + CapabilityDetailCredentialOption_CAPABILITY_DETAIL_CREDENTIAL_OPTION_NO_PASSWORD CapabilityDetailCredentialOption = 1 + CapabilityDetailCredentialOption_CAPABILITY_DETAIL_CREDENTIAL_OPTION_RANDOM_PASSWORD CapabilityDetailCredentialOption = 2 + CapabilityDetailCredentialOption_CAPABILITY_DETAIL_CREDENTIAL_OPTION_SSO CapabilityDetailCredentialOption = 3 + CapabilityDetailCredentialOption_CAPABILITY_DETAIL_CREDENTIAL_OPTION_ENCRYPTED_PASSWORD CapabilityDetailCredentialOption = 4 +) + +// Enum value maps for CapabilityDetailCredentialOption. +var ( + CapabilityDetailCredentialOption_name = map[int32]string{ + 0: "CAPABILITY_DETAIL_CREDENTIAL_OPTION_UNSPECIFIED", + 1: "CAPABILITY_DETAIL_CREDENTIAL_OPTION_NO_PASSWORD", + 2: "CAPABILITY_DETAIL_CREDENTIAL_OPTION_RANDOM_PASSWORD", + 3: "CAPABILITY_DETAIL_CREDENTIAL_OPTION_SSO", + 4: "CAPABILITY_DETAIL_CREDENTIAL_OPTION_ENCRYPTED_PASSWORD", + } + CapabilityDetailCredentialOption_value = map[string]int32{ + "CAPABILITY_DETAIL_CREDENTIAL_OPTION_UNSPECIFIED": 0, + "CAPABILITY_DETAIL_CREDENTIAL_OPTION_NO_PASSWORD": 1, + "CAPABILITY_DETAIL_CREDENTIAL_OPTION_RANDOM_PASSWORD": 2, + "CAPABILITY_DETAIL_CREDENTIAL_OPTION_SSO": 3, + "CAPABILITY_DETAIL_CREDENTIAL_OPTION_ENCRYPTED_PASSWORD": 4, + } +) + +func (x CapabilityDetailCredentialOption) Enum() *CapabilityDetailCredentialOption { + p := new(CapabilityDetailCredentialOption) + *p = x + return p +} + +func (x CapabilityDetailCredentialOption) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (CapabilityDetailCredentialOption) Descriptor() protoreflect.EnumDescriptor { + return file_c1_connector_v2_connector_proto_enumTypes[1].Descriptor() +} + +func (CapabilityDetailCredentialOption) Type() protoreflect.EnumType { + return &file_c1_connector_v2_connector_proto_enumTypes[1] +} + +func (x CapabilityDetailCredentialOption) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +type ConnectorServiceCleanupRequest struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Annotations *[]*anypb.Any `protobuf:"bytes,1,rep,name=annotations,proto3"` + xxx_hidden_ActiveSyncId string `protobuf:"bytes,2,opt,name=active_sync_id,json=activeSyncId,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ConnectorServiceCleanupRequest) Reset() { + *x = ConnectorServiceCleanupRequest{} + mi := &file_c1_connector_v2_connector_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ConnectorServiceCleanupRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ConnectorServiceCleanupRequest) ProtoMessage() {} + +func (x *ConnectorServiceCleanupRequest) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_connector_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *ConnectorServiceCleanupRequest) GetAnnotations() []*anypb.Any { + if x != nil { + if x.xxx_hidden_Annotations != nil { + return *x.xxx_hidden_Annotations + } + } + return nil +} + +func (x *ConnectorServiceCleanupRequest) GetActiveSyncId() string { + if x != nil { + return x.xxx_hidden_ActiveSyncId + } + return "" +} + +func (x *ConnectorServiceCleanupRequest) SetAnnotations(v []*anypb.Any) { + x.xxx_hidden_Annotations = &v +} + +func (x *ConnectorServiceCleanupRequest) SetActiveSyncId(v string) { + x.xxx_hidden_ActiveSyncId = v +} + +type ConnectorServiceCleanupRequest_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Annotations []*anypb.Any + ActiveSyncId string +} + +func (b0 ConnectorServiceCleanupRequest_builder) Build() *ConnectorServiceCleanupRequest { + m0 := &ConnectorServiceCleanupRequest{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Annotations = &b.Annotations + x.xxx_hidden_ActiveSyncId = b.ActiveSyncId + return m0 +} + +type ConnectorServiceCleanupResponse struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Annotations *[]*anypb.Any `protobuf:"bytes,1,rep,name=annotations,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ConnectorServiceCleanupResponse) Reset() { + *x = ConnectorServiceCleanupResponse{} + mi := &file_c1_connector_v2_connector_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ConnectorServiceCleanupResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ConnectorServiceCleanupResponse) ProtoMessage() {} + +func (x *ConnectorServiceCleanupResponse) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_connector_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *ConnectorServiceCleanupResponse) GetAnnotations() []*anypb.Any { + if x != nil { + if x.xxx_hidden_Annotations != nil { + return *x.xxx_hidden_Annotations + } + } + return nil +} + +func (x *ConnectorServiceCleanupResponse) SetAnnotations(v []*anypb.Any) { + x.xxx_hidden_Annotations = &v +} + +type ConnectorServiceCleanupResponse_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Annotations []*anypb.Any +} + +func (b0 ConnectorServiceCleanupResponse_builder) Build() *ConnectorServiceCleanupResponse { + m0 := &ConnectorServiceCleanupResponse{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Annotations = &b.Annotations + return m0 +} + +type ConnectorMetadata struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_DisplayName string `protobuf:"bytes,1,opt,name=display_name,json=displayName,proto3"` + xxx_hidden_HelpUrl string `protobuf:"bytes,2,opt,name=help_url,json=helpUrl,proto3"` + xxx_hidden_Icon *AssetRef `protobuf:"bytes,3,opt,name=icon,proto3"` + xxx_hidden_Logo *AssetRef `protobuf:"bytes,4,opt,name=logo,proto3"` + xxx_hidden_Profile *structpb.Struct `protobuf:"bytes,5,opt,name=profile,proto3"` + xxx_hidden_Annotations *[]*anypb.Any `protobuf:"bytes,6,rep,name=annotations,proto3"` + xxx_hidden_Description string `protobuf:"bytes,7,opt,name=description,proto3"` + xxx_hidden_Capabilities *ConnectorCapabilities `protobuf:"bytes,8,opt,name=capabilities,proto3"` + xxx_hidden_AccountCreationSchema *ConnectorAccountCreationSchema `protobuf:"bytes,9,opt,name=account_creation_schema,json=accountCreationSchema,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ConnectorMetadata) Reset() { + *x = ConnectorMetadata{} + mi := &file_c1_connector_v2_connector_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ConnectorMetadata) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ConnectorMetadata) ProtoMessage() {} + +func (x *ConnectorMetadata) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_connector_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *ConnectorMetadata) GetDisplayName() string { + if x != nil { + return x.xxx_hidden_DisplayName + } + return "" +} + +func (x *ConnectorMetadata) GetHelpUrl() string { + if x != nil { + return x.xxx_hidden_HelpUrl + } + return "" +} + +func (x *ConnectorMetadata) GetIcon() *AssetRef { + if x != nil { + return x.xxx_hidden_Icon + } + return nil +} + +func (x *ConnectorMetadata) GetLogo() *AssetRef { + if x != nil { + return x.xxx_hidden_Logo + } + return nil +} + +func (x *ConnectorMetadata) GetProfile() *structpb.Struct { + if x != nil { + return x.xxx_hidden_Profile + } + return nil +} + +func (x *ConnectorMetadata) GetAnnotations() []*anypb.Any { + if x != nil { + if x.xxx_hidden_Annotations != nil { + return *x.xxx_hidden_Annotations + } + } + return nil +} + +func (x *ConnectorMetadata) GetDescription() string { + if x != nil { + return x.xxx_hidden_Description + } + return "" +} + +func (x *ConnectorMetadata) GetCapabilities() *ConnectorCapabilities { + if x != nil { + return x.xxx_hidden_Capabilities + } + return nil +} + +func (x *ConnectorMetadata) GetAccountCreationSchema() *ConnectorAccountCreationSchema { + if x != nil { + return x.xxx_hidden_AccountCreationSchema + } + return nil +} + +func (x *ConnectorMetadata) SetDisplayName(v string) { + x.xxx_hidden_DisplayName = v +} + +func (x *ConnectorMetadata) SetHelpUrl(v string) { + x.xxx_hidden_HelpUrl = v +} + +func (x *ConnectorMetadata) SetIcon(v *AssetRef) { + x.xxx_hidden_Icon = v +} + +func (x *ConnectorMetadata) SetLogo(v *AssetRef) { + x.xxx_hidden_Logo = v +} + +func (x *ConnectorMetadata) SetProfile(v *structpb.Struct) { + x.xxx_hidden_Profile = v +} + +func (x *ConnectorMetadata) SetAnnotations(v []*anypb.Any) { + x.xxx_hidden_Annotations = &v +} + +func (x *ConnectorMetadata) SetDescription(v string) { + x.xxx_hidden_Description = v +} + +func (x *ConnectorMetadata) SetCapabilities(v *ConnectorCapabilities) { + x.xxx_hidden_Capabilities = v +} + +func (x *ConnectorMetadata) SetAccountCreationSchema(v *ConnectorAccountCreationSchema) { + x.xxx_hidden_AccountCreationSchema = v +} + +func (x *ConnectorMetadata) HasIcon() bool { + if x == nil { + return false + } + return x.xxx_hidden_Icon != nil +} + +func (x *ConnectorMetadata) HasLogo() bool { + if x == nil { + return false + } + return x.xxx_hidden_Logo != nil +} + +func (x *ConnectorMetadata) HasProfile() bool { + if x == nil { + return false + } + return x.xxx_hidden_Profile != nil +} + +func (x *ConnectorMetadata) HasCapabilities() bool { + if x == nil { + return false + } + return x.xxx_hidden_Capabilities != nil +} + +func (x *ConnectorMetadata) HasAccountCreationSchema() bool { + if x == nil { + return false + } + return x.xxx_hidden_AccountCreationSchema != nil +} + +func (x *ConnectorMetadata) ClearIcon() { + x.xxx_hidden_Icon = nil +} + +func (x *ConnectorMetadata) ClearLogo() { + x.xxx_hidden_Logo = nil +} + +func (x *ConnectorMetadata) ClearProfile() { + x.xxx_hidden_Profile = nil +} + +func (x *ConnectorMetadata) ClearCapabilities() { + x.xxx_hidden_Capabilities = nil +} + +func (x *ConnectorMetadata) ClearAccountCreationSchema() { + x.xxx_hidden_AccountCreationSchema = nil +} + +type ConnectorMetadata_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + DisplayName string + HelpUrl string + Icon *AssetRef + Logo *AssetRef + Profile *structpb.Struct + Annotations []*anypb.Any + Description string + Capabilities *ConnectorCapabilities + AccountCreationSchema *ConnectorAccountCreationSchema +} + +func (b0 ConnectorMetadata_builder) Build() *ConnectorMetadata { + m0 := &ConnectorMetadata{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_DisplayName = b.DisplayName + x.xxx_hidden_HelpUrl = b.HelpUrl + x.xxx_hidden_Icon = b.Icon + x.xxx_hidden_Logo = b.Logo + x.xxx_hidden_Profile = b.Profile + x.xxx_hidden_Annotations = &b.Annotations + x.xxx_hidden_Description = b.Description + x.xxx_hidden_Capabilities = b.Capabilities + x.xxx_hidden_AccountCreationSchema = b.AccountCreationSchema + return m0 +} + +type CredentialDetails struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_CapabilityAccountProvisioning *CredentialDetailsAccountProvisioning `protobuf:"bytes,1,opt,name=capability_account_provisioning,json=capabilityAccountProvisioning,proto3"` + xxx_hidden_CapabilityCredentialRotation *CredentialDetailsCredentialRotation `protobuf:"bytes,2,opt,name=capability_credential_rotation,json=capabilityCredentialRotation,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *CredentialDetails) Reset() { + *x = CredentialDetails{} + mi := &file_c1_connector_v2_connector_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *CredentialDetails) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CredentialDetails) ProtoMessage() {} + +func (x *CredentialDetails) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_connector_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *CredentialDetails) GetCapabilityAccountProvisioning() *CredentialDetailsAccountProvisioning { + if x != nil { + return x.xxx_hidden_CapabilityAccountProvisioning + } + return nil +} + +func (x *CredentialDetails) GetCapabilityCredentialRotation() *CredentialDetailsCredentialRotation { + if x != nil { + return x.xxx_hidden_CapabilityCredentialRotation + } + return nil +} + +func (x *CredentialDetails) SetCapabilityAccountProvisioning(v *CredentialDetailsAccountProvisioning) { + x.xxx_hidden_CapabilityAccountProvisioning = v +} + +func (x *CredentialDetails) SetCapabilityCredentialRotation(v *CredentialDetailsCredentialRotation) { + x.xxx_hidden_CapabilityCredentialRotation = v +} + +func (x *CredentialDetails) HasCapabilityAccountProvisioning() bool { + if x == nil { + return false + } + return x.xxx_hidden_CapabilityAccountProvisioning != nil +} + +func (x *CredentialDetails) HasCapabilityCredentialRotation() bool { + if x == nil { + return false + } + return x.xxx_hidden_CapabilityCredentialRotation != nil +} + +func (x *CredentialDetails) ClearCapabilityAccountProvisioning() { + x.xxx_hidden_CapabilityAccountProvisioning = nil +} + +func (x *CredentialDetails) ClearCapabilityCredentialRotation() { + x.xxx_hidden_CapabilityCredentialRotation = nil +} + +type CredentialDetails_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + CapabilityAccountProvisioning *CredentialDetailsAccountProvisioning + CapabilityCredentialRotation *CredentialDetailsCredentialRotation +} + +func (b0 CredentialDetails_builder) Build() *CredentialDetails { + m0 := &CredentialDetails{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_CapabilityAccountProvisioning = b.CapabilityAccountProvisioning + x.xxx_hidden_CapabilityCredentialRotation = b.CapabilityCredentialRotation + return m0 +} + +type CredentialDetailsAccountProvisioning struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_SupportedCredentialOptions []CapabilityDetailCredentialOption `protobuf:"varint,1,rep,packed,name=supported_credential_options,json=supportedCredentialOptions,proto3,enum=c1.connector.v2.CapabilityDetailCredentialOption"` + xxx_hidden_PreferredCredentialOption CapabilityDetailCredentialOption `protobuf:"varint,2,opt,name=preferred_credential_option,json=preferredCredentialOption,proto3,enum=c1.connector.v2.CapabilityDetailCredentialOption"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *CredentialDetailsAccountProvisioning) Reset() { + *x = CredentialDetailsAccountProvisioning{} + mi := &file_c1_connector_v2_connector_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *CredentialDetailsAccountProvisioning) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CredentialDetailsAccountProvisioning) ProtoMessage() {} + +func (x *CredentialDetailsAccountProvisioning) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_connector_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *CredentialDetailsAccountProvisioning) GetSupportedCredentialOptions() []CapabilityDetailCredentialOption { + if x != nil { + return x.xxx_hidden_SupportedCredentialOptions + } + return nil +} + +func (x *CredentialDetailsAccountProvisioning) GetPreferredCredentialOption() CapabilityDetailCredentialOption { + if x != nil { + return x.xxx_hidden_PreferredCredentialOption + } + return CapabilityDetailCredentialOption_CAPABILITY_DETAIL_CREDENTIAL_OPTION_UNSPECIFIED +} + +func (x *CredentialDetailsAccountProvisioning) SetSupportedCredentialOptions(v []CapabilityDetailCredentialOption) { + x.xxx_hidden_SupportedCredentialOptions = v +} + +func (x *CredentialDetailsAccountProvisioning) SetPreferredCredentialOption(v CapabilityDetailCredentialOption) { + x.xxx_hidden_PreferredCredentialOption = v +} + +type CredentialDetailsAccountProvisioning_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + SupportedCredentialOptions []CapabilityDetailCredentialOption + PreferredCredentialOption CapabilityDetailCredentialOption +} + +func (b0 CredentialDetailsAccountProvisioning_builder) Build() *CredentialDetailsAccountProvisioning { + m0 := &CredentialDetailsAccountProvisioning{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_SupportedCredentialOptions = b.SupportedCredentialOptions + x.xxx_hidden_PreferredCredentialOption = b.PreferredCredentialOption + return m0 +} + +type CredentialDetailsCredentialRotation struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_SupportedCredentialOptions []CapabilityDetailCredentialOption `protobuf:"varint,1,rep,packed,name=supported_credential_options,json=supportedCredentialOptions,proto3,enum=c1.connector.v2.CapabilityDetailCredentialOption"` + xxx_hidden_PreferredCredentialOption CapabilityDetailCredentialOption `protobuf:"varint,2,opt,name=preferred_credential_option,json=preferredCredentialOption,proto3,enum=c1.connector.v2.CapabilityDetailCredentialOption"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *CredentialDetailsCredentialRotation) Reset() { + *x = CredentialDetailsCredentialRotation{} + mi := &file_c1_connector_v2_connector_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *CredentialDetailsCredentialRotation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CredentialDetailsCredentialRotation) ProtoMessage() {} + +func (x *CredentialDetailsCredentialRotation) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_connector_proto_msgTypes[5] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *CredentialDetailsCredentialRotation) GetSupportedCredentialOptions() []CapabilityDetailCredentialOption { + if x != nil { + return x.xxx_hidden_SupportedCredentialOptions + } + return nil +} + +func (x *CredentialDetailsCredentialRotation) GetPreferredCredentialOption() CapabilityDetailCredentialOption { + if x != nil { + return x.xxx_hidden_PreferredCredentialOption + } + return CapabilityDetailCredentialOption_CAPABILITY_DETAIL_CREDENTIAL_OPTION_UNSPECIFIED +} + +func (x *CredentialDetailsCredentialRotation) SetSupportedCredentialOptions(v []CapabilityDetailCredentialOption) { + x.xxx_hidden_SupportedCredentialOptions = v +} + +func (x *CredentialDetailsCredentialRotation) SetPreferredCredentialOption(v CapabilityDetailCredentialOption) { + x.xxx_hidden_PreferredCredentialOption = v +} + +type CredentialDetailsCredentialRotation_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + SupportedCredentialOptions []CapabilityDetailCredentialOption + PreferredCredentialOption CapabilityDetailCredentialOption +} + +func (b0 CredentialDetailsCredentialRotation_builder) Build() *CredentialDetailsCredentialRotation { + m0 := &CredentialDetailsCredentialRotation{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_SupportedCredentialOptions = b.SupportedCredentialOptions + x.xxx_hidden_PreferredCredentialOption = b.PreferredCredentialOption + return m0 +} + +type ConnectorCapabilities struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_ResourceTypeCapabilities *[]*ResourceTypeCapability `protobuf:"bytes,1,rep,name=resource_type_capabilities,json=resourceTypeCapabilities,proto3"` + xxx_hidden_ConnectorCapabilities []Capability `protobuf:"varint,2,rep,packed,name=connector_capabilities,json=connectorCapabilities,proto3,enum=c1.connector.v2.Capability"` + xxx_hidden_CredentialDetails *CredentialDetails `protobuf:"bytes,3,opt,name=credential_details,json=credentialDetails,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ConnectorCapabilities) Reset() { + *x = ConnectorCapabilities{} + mi := &file_c1_connector_v2_connector_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ConnectorCapabilities) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ConnectorCapabilities) ProtoMessage() {} + +func (x *ConnectorCapabilities) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_connector_proto_msgTypes[6] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *ConnectorCapabilities) GetResourceTypeCapabilities() []*ResourceTypeCapability { + if x != nil { + if x.xxx_hidden_ResourceTypeCapabilities != nil { + return *x.xxx_hidden_ResourceTypeCapabilities + } + } + return nil +} + +func (x *ConnectorCapabilities) GetConnectorCapabilities() []Capability { + if x != nil { + return x.xxx_hidden_ConnectorCapabilities + } + return nil +} + +func (x *ConnectorCapabilities) GetCredentialDetails() *CredentialDetails { + if x != nil { + return x.xxx_hidden_CredentialDetails + } + return nil +} + +func (x *ConnectorCapabilities) SetResourceTypeCapabilities(v []*ResourceTypeCapability) { + x.xxx_hidden_ResourceTypeCapabilities = &v +} + +func (x *ConnectorCapabilities) SetConnectorCapabilities(v []Capability) { + x.xxx_hidden_ConnectorCapabilities = v +} + +func (x *ConnectorCapabilities) SetCredentialDetails(v *CredentialDetails) { + x.xxx_hidden_CredentialDetails = v +} + +func (x *ConnectorCapabilities) HasCredentialDetails() bool { + if x == nil { + return false + } + return x.xxx_hidden_CredentialDetails != nil +} + +func (x *ConnectorCapabilities) ClearCredentialDetails() { + x.xxx_hidden_CredentialDetails = nil +} + +type ConnectorCapabilities_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + ResourceTypeCapabilities []*ResourceTypeCapability + ConnectorCapabilities []Capability + CredentialDetails *CredentialDetails +} + +func (b0 ConnectorCapabilities_builder) Build() *ConnectorCapabilities { + m0 := &ConnectorCapabilities{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_ResourceTypeCapabilities = &b.ResourceTypeCapabilities + x.xxx_hidden_ConnectorCapabilities = b.ConnectorCapabilities + x.xxx_hidden_CredentialDetails = b.CredentialDetails + return m0 +} + +type CapabilityPermission struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Permission string `protobuf:"bytes,1,opt,name=permission,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *CapabilityPermission) Reset() { + *x = CapabilityPermission{} + mi := &file_c1_connector_v2_connector_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *CapabilityPermission) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CapabilityPermission) ProtoMessage() {} + +func (x *CapabilityPermission) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_connector_proto_msgTypes[7] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *CapabilityPermission) GetPermission() string { + if x != nil { + return x.xxx_hidden_Permission + } + return "" +} + +func (x *CapabilityPermission) SetPermission(v string) { + x.xxx_hidden_Permission = v +} + +type CapabilityPermission_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Permission string +} + +func (b0 CapabilityPermission_builder) Build() *CapabilityPermission { + m0 := &CapabilityPermission{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Permission = b.Permission + return m0 +} + +type CapabilityPermissions struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Permissions *[]*CapabilityPermission `protobuf:"bytes,1,rep,name=permissions,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *CapabilityPermissions) Reset() { + *x = CapabilityPermissions{} + mi := &file_c1_connector_v2_connector_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *CapabilityPermissions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CapabilityPermissions) ProtoMessage() {} + +func (x *CapabilityPermissions) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_connector_proto_msgTypes[8] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *CapabilityPermissions) GetPermissions() []*CapabilityPermission { + if x != nil { + if x.xxx_hidden_Permissions != nil { + return *x.xxx_hidden_Permissions + } + } + return nil +} + +func (x *CapabilityPermissions) SetPermissions(v []*CapabilityPermission) { + x.xxx_hidden_Permissions = &v +} + +type CapabilityPermissions_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Permissions []*CapabilityPermission +} + +func (b0 CapabilityPermissions_builder) Build() *CapabilityPermissions { + m0 := &CapabilityPermissions{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Permissions = &b.Permissions + return m0 +} + +type ResourceTypeCapability struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_ResourceType *ResourceType `protobuf:"bytes,1,opt,name=resource_type,json=resourceType,proto3"` + xxx_hidden_Capabilities []Capability `protobuf:"varint,2,rep,packed,name=capabilities,proto3,enum=c1.connector.v2.Capability"` + xxx_hidden_Permissions *CapabilityPermissions `protobuf:"bytes,3,opt,name=permissions,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ResourceTypeCapability) Reset() { + *x = ResourceTypeCapability{} + mi := &file_c1_connector_v2_connector_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ResourceTypeCapability) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResourceTypeCapability) ProtoMessage() {} + +func (x *ResourceTypeCapability) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_connector_proto_msgTypes[9] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *ResourceTypeCapability) GetResourceType() *ResourceType { + if x != nil { + return x.xxx_hidden_ResourceType + } + return nil +} + +func (x *ResourceTypeCapability) GetCapabilities() []Capability { + if x != nil { + return x.xxx_hidden_Capabilities + } + return nil +} + +func (x *ResourceTypeCapability) GetPermissions() *CapabilityPermissions { + if x != nil { + return x.xxx_hidden_Permissions + } + return nil +} + +func (x *ResourceTypeCapability) SetResourceType(v *ResourceType) { + x.xxx_hidden_ResourceType = v +} + +func (x *ResourceTypeCapability) SetCapabilities(v []Capability) { + x.xxx_hidden_Capabilities = v +} + +func (x *ResourceTypeCapability) SetPermissions(v *CapabilityPermissions) { + x.xxx_hidden_Permissions = v +} + +func (x *ResourceTypeCapability) HasResourceType() bool { + if x == nil { + return false + } + return x.xxx_hidden_ResourceType != nil +} + +func (x *ResourceTypeCapability) HasPermissions() bool { + if x == nil { + return false + } + return x.xxx_hidden_Permissions != nil +} + +func (x *ResourceTypeCapability) ClearResourceType() { + x.xxx_hidden_ResourceType = nil +} + +func (x *ResourceTypeCapability) ClearPermissions() { + x.xxx_hidden_Permissions = nil +} + +type ResourceTypeCapability_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + ResourceType *ResourceType + Capabilities []Capability + Permissions *CapabilityPermissions +} + +func (b0 ResourceTypeCapability_builder) Build() *ResourceTypeCapability { + m0 := &ResourceTypeCapability{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_ResourceType = b.ResourceType + x.xxx_hidden_Capabilities = b.Capabilities + x.xxx_hidden_Permissions = b.Permissions + return m0 +} + +type ConnectorServiceGetMetadataRequest struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ConnectorServiceGetMetadataRequest) Reset() { + *x = ConnectorServiceGetMetadataRequest{} + mi := &file_c1_connector_v2_connector_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ConnectorServiceGetMetadataRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ConnectorServiceGetMetadataRequest) ProtoMessage() {} + +func (x *ConnectorServiceGetMetadataRequest) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_connector_proto_msgTypes[10] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +type ConnectorServiceGetMetadataRequest_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + +} + +func (b0 ConnectorServiceGetMetadataRequest_builder) Build() *ConnectorServiceGetMetadataRequest { + m0 := &ConnectorServiceGetMetadataRequest{} + b, x := &b0, m0 + _, _ = b, x + return m0 +} + +type ConnectorServiceGetMetadataResponse struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Metadata *ConnectorMetadata `protobuf:"bytes,1,opt,name=metadata,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ConnectorServiceGetMetadataResponse) Reset() { + *x = ConnectorServiceGetMetadataResponse{} + mi := &file_c1_connector_v2_connector_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ConnectorServiceGetMetadataResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ConnectorServiceGetMetadataResponse) ProtoMessage() {} + +func (x *ConnectorServiceGetMetadataResponse) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_connector_proto_msgTypes[11] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *ConnectorServiceGetMetadataResponse) GetMetadata() *ConnectorMetadata { + if x != nil { + return x.xxx_hidden_Metadata + } + return nil +} + +func (x *ConnectorServiceGetMetadataResponse) SetMetadata(v *ConnectorMetadata) { + x.xxx_hidden_Metadata = v +} + +func (x *ConnectorServiceGetMetadataResponse) HasMetadata() bool { + if x == nil { + return false + } + return x.xxx_hidden_Metadata != nil +} + +func (x *ConnectorServiceGetMetadataResponse) ClearMetadata() { + x.xxx_hidden_Metadata = nil +} + +type ConnectorServiceGetMetadataResponse_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Metadata *ConnectorMetadata +} + +func (b0 ConnectorServiceGetMetadataResponse_builder) Build() *ConnectorServiceGetMetadataResponse { + m0 := &ConnectorServiceGetMetadataResponse{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Metadata = b.Metadata + return m0 +} + +type ConnectorServiceValidateRequest struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ConnectorServiceValidateRequest) Reset() { + *x = ConnectorServiceValidateRequest{} + mi := &file_c1_connector_v2_connector_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ConnectorServiceValidateRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ConnectorServiceValidateRequest) ProtoMessage() {} + +func (x *ConnectorServiceValidateRequest) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_connector_proto_msgTypes[12] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +type ConnectorServiceValidateRequest_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + +} + +func (b0 ConnectorServiceValidateRequest_builder) Build() *ConnectorServiceValidateRequest { + m0 := &ConnectorServiceValidateRequest{} + b, x := &b0, m0 + _, _ = b, x + return m0 +} + +// NOTE(morgabra) We're expecting correct grpc.Status responses +// for things like 401/403/500, etc +type ConnectorServiceValidateResponse struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Annotations *[]*anypb.Any `protobuf:"bytes,1,rep,name=annotations,proto3"` + xxx_hidden_SdkVersion string `protobuf:"bytes,2,opt,name=sdk_version,json=sdkVersion,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ConnectorServiceValidateResponse) Reset() { + *x = ConnectorServiceValidateResponse{} + mi := &file_c1_connector_v2_connector_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ConnectorServiceValidateResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ConnectorServiceValidateResponse) ProtoMessage() {} + +func (x *ConnectorServiceValidateResponse) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_connector_proto_msgTypes[13] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *ConnectorServiceValidateResponse) GetAnnotations() []*anypb.Any { + if x != nil { + if x.xxx_hidden_Annotations != nil { + return *x.xxx_hidden_Annotations + } + } + return nil +} + +func (x *ConnectorServiceValidateResponse) GetSdkVersion() string { + if x != nil { + return x.xxx_hidden_SdkVersion + } + return "" +} + +func (x *ConnectorServiceValidateResponse) SetAnnotations(v []*anypb.Any) { + x.xxx_hidden_Annotations = &v +} + +func (x *ConnectorServiceValidateResponse) SetSdkVersion(v string) { + x.xxx_hidden_SdkVersion = v +} + +type ConnectorServiceValidateResponse_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Annotations []*anypb.Any + SdkVersion string +} + +func (b0 ConnectorServiceValidateResponse_builder) Build() *ConnectorServiceValidateResponse { + m0 := &ConnectorServiceValidateResponse{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Annotations = &b.Annotations + x.xxx_hidden_SdkVersion = b.SdkVersion + return m0 +} + +type ConnectorAccountCreationSchema struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_FieldMap map[string]*ConnectorAccountCreationSchema_Field `protobuf:"bytes,1,rep,name=field_map,json=fieldMap,proto3" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ConnectorAccountCreationSchema) Reset() { + *x = ConnectorAccountCreationSchema{} + mi := &file_c1_connector_v2_connector_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ConnectorAccountCreationSchema) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ConnectorAccountCreationSchema) ProtoMessage() {} + +func (x *ConnectorAccountCreationSchema) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_connector_proto_msgTypes[14] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *ConnectorAccountCreationSchema) GetFieldMap() map[string]*ConnectorAccountCreationSchema_Field { + if x != nil { + return x.xxx_hidden_FieldMap + } + return nil +} + +func (x *ConnectorAccountCreationSchema) SetFieldMap(v map[string]*ConnectorAccountCreationSchema_Field) { + x.xxx_hidden_FieldMap = v +} + +type ConnectorAccountCreationSchema_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + FieldMap map[string]*ConnectorAccountCreationSchema_Field +} + +func (b0 ConnectorAccountCreationSchema_builder) Build() *ConnectorAccountCreationSchema { + m0 := &ConnectorAccountCreationSchema{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_FieldMap = b.FieldMap + return m0 +} + +type ConnectorAccountCreationSchema_Field struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_DisplayName string `protobuf:"bytes,1,opt,name=display_name,json=displayName,proto3"` + xxx_hidden_Required bool `protobuf:"varint,2,opt,name=required,proto3"` + xxx_hidden_Description string `protobuf:"bytes,3,opt,name=description,proto3"` + xxx_hidden_Placeholder string `protobuf:"bytes,4,opt,name=placeholder,proto3"` + xxx_hidden_Order int32 `protobuf:"varint,5,opt,name=order,proto3"` + xxx_hidden_Deprecated bool `protobuf:"varint,6,opt,name=deprecated,proto3"` + xxx_hidden_Field isConnectorAccountCreationSchema_Field_Field `protobuf_oneof:"field"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ConnectorAccountCreationSchema_Field) Reset() { + *x = ConnectorAccountCreationSchema_Field{} + mi := &file_c1_connector_v2_connector_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ConnectorAccountCreationSchema_Field) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ConnectorAccountCreationSchema_Field) ProtoMessage() {} + +func (x *ConnectorAccountCreationSchema_Field) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_connector_proto_msgTypes[16] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *ConnectorAccountCreationSchema_Field) GetDisplayName() string { + if x != nil { + return x.xxx_hidden_DisplayName + } + return "" +} + +func (x *ConnectorAccountCreationSchema_Field) GetRequired() bool { + if x != nil { + return x.xxx_hidden_Required + } + return false +} + +func (x *ConnectorAccountCreationSchema_Field) GetDescription() string { + if x != nil { + return x.xxx_hidden_Description + } + return "" +} + +func (x *ConnectorAccountCreationSchema_Field) GetPlaceholder() string { + if x != nil { + return x.xxx_hidden_Placeholder + } + return "" +} + +func (x *ConnectorAccountCreationSchema_Field) GetOrder() int32 { + if x != nil { + return x.xxx_hidden_Order + } + return 0 +} + +func (x *ConnectorAccountCreationSchema_Field) GetDeprecated() bool { + if x != nil { + return x.xxx_hidden_Deprecated + } + return false +} + +func (x *ConnectorAccountCreationSchema_Field) GetStringField() *ConnectorAccountCreationSchema_StringField { + if x != nil { + if x, ok := x.xxx_hidden_Field.(*connectorAccountCreationSchema_Field_StringField); ok { + return x.StringField + } + } + return nil +} + +func (x *ConnectorAccountCreationSchema_Field) GetBoolField() *ConnectorAccountCreationSchema_BoolField { + if x != nil { + if x, ok := x.xxx_hidden_Field.(*connectorAccountCreationSchema_Field_BoolField); ok { + return x.BoolField + } + } + return nil +} + +func (x *ConnectorAccountCreationSchema_Field) GetStringListField() *ConnectorAccountCreationSchema_StringListField { + if x != nil { + if x, ok := x.xxx_hidden_Field.(*connectorAccountCreationSchema_Field_StringListField); ok { + return x.StringListField + } + } + return nil +} + +func (x *ConnectorAccountCreationSchema_Field) GetIntField() *ConnectorAccountCreationSchema_IntField { + if x != nil { + if x, ok := x.xxx_hidden_Field.(*connectorAccountCreationSchema_Field_IntField); ok { + return x.IntField + } + } + return nil +} + +func (x *ConnectorAccountCreationSchema_Field) GetMapField() *ConnectorAccountCreationSchema_MapField { + if x != nil { + if x, ok := x.xxx_hidden_Field.(*connectorAccountCreationSchema_Field_MapField); ok { + return x.MapField + } + } + return nil +} + +func (x *ConnectorAccountCreationSchema_Field) SetDisplayName(v string) { + x.xxx_hidden_DisplayName = v +} + +func (x *ConnectorAccountCreationSchema_Field) SetRequired(v bool) { + x.xxx_hidden_Required = v +} + +func (x *ConnectorAccountCreationSchema_Field) SetDescription(v string) { + x.xxx_hidden_Description = v +} + +func (x *ConnectorAccountCreationSchema_Field) SetPlaceholder(v string) { + x.xxx_hidden_Placeholder = v +} + +func (x *ConnectorAccountCreationSchema_Field) SetOrder(v int32) { + x.xxx_hidden_Order = v +} + +func (x *ConnectorAccountCreationSchema_Field) SetDeprecated(v bool) { + x.xxx_hidden_Deprecated = v +} + +func (x *ConnectorAccountCreationSchema_Field) SetStringField(v *ConnectorAccountCreationSchema_StringField) { + if v == nil { + x.xxx_hidden_Field = nil + return + } + x.xxx_hidden_Field = &connectorAccountCreationSchema_Field_StringField{v} +} + +func (x *ConnectorAccountCreationSchema_Field) SetBoolField(v *ConnectorAccountCreationSchema_BoolField) { + if v == nil { + x.xxx_hidden_Field = nil + return + } + x.xxx_hidden_Field = &connectorAccountCreationSchema_Field_BoolField{v} +} + +func (x *ConnectorAccountCreationSchema_Field) SetStringListField(v *ConnectorAccountCreationSchema_StringListField) { + if v == nil { + x.xxx_hidden_Field = nil + return + } + x.xxx_hidden_Field = &connectorAccountCreationSchema_Field_StringListField{v} +} + +func (x *ConnectorAccountCreationSchema_Field) SetIntField(v *ConnectorAccountCreationSchema_IntField) { + if v == nil { + x.xxx_hidden_Field = nil + return + } + x.xxx_hidden_Field = &connectorAccountCreationSchema_Field_IntField{v} +} + +func (x *ConnectorAccountCreationSchema_Field) SetMapField(v *ConnectorAccountCreationSchema_MapField) { + if v == nil { + x.xxx_hidden_Field = nil + return + } + x.xxx_hidden_Field = &connectorAccountCreationSchema_Field_MapField{v} +} + +func (x *ConnectorAccountCreationSchema_Field) HasField() bool { + if x == nil { + return false + } + return x.xxx_hidden_Field != nil +} + +func (x *ConnectorAccountCreationSchema_Field) HasStringField() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_Field.(*connectorAccountCreationSchema_Field_StringField) + return ok +} + +func (x *ConnectorAccountCreationSchema_Field) HasBoolField() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_Field.(*connectorAccountCreationSchema_Field_BoolField) + return ok +} + +func (x *ConnectorAccountCreationSchema_Field) HasStringListField() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_Field.(*connectorAccountCreationSchema_Field_StringListField) + return ok +} + +func (x *ConnectorAccountCreationSchema_Field) HasIntField() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_Field.(*connectorAccountCreationSchema_Field_IntField) + return ok +} + +func (x *ConnectorAccountCreationSchema_Field) HasMapField() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_Field.(*connectorAccountCreationSchema_Field_MapField) + return ok +} + +func (x *ConnectorAccountCreationSchema_Field) ClearField() { + x.xxx_hidden_Field = nil +} + +func (x *ConnectorAccountCreationSchema_Field) ClearStringField() { + if _, ok := x.xxx_hidden_Field.(*connectorAccountCreationSchema_Field_StringField); ok { + x.xxx_hidden_Field = nil + } +} + +func (x *ConnectorAccountCreationSchema_Field) ClearBoolField() { + if _, ok := x.xxx_hidden_Field.(*connectorAccountCreationSchema_Field_BoolField); ok { + x.xxx_hidden_Field = nil + } +} + +func (x *ConnectorAccountCreationSchema_Field) ClearStringListField() { + if _, ok := x.xxx_hidden_Field.(*connectorAccountCreationSchema_Field_StringListField); ok { + x.xxx_hidden_Field = nil + } +} + +func (x *ConnectorAccountCreationSchema_Field) ClearIntField() { + if _, ok := x.xxx_hidden_Field.(*connectorAccountCreationSchema_Field_IntField); ok { + x.xxx_hidden_Field = nil + } +} + +func (x *ConnectorAccountCreationSchema_Field) ClearMapField() { + if _, ok := x.xxx_hidden_Field.(*connectorAccountCreationSchema_Field_MapField); ok { + x.xxx_hidden_Field = nil + } +} + +const ConnectorAccountCreationSchema_Field_Field_not_set_case case_ConnectorAccountCreationSchema_Field_Field = 0 +const ConnectorAccountCreationSchema_Field_StringField_case case_ConnectorAccountCreationSchema_Field_Field = 100 +const ConnectorAccountCreationSchema_Field_BoolField_case case_ConnectorAccountCreationSchema_Field_Field = 101 +const ConnectorAccountCreationSchema_Field_StringListField_case case_ConnectorAccountCreationSchema_Field_Field = 102 +const ConnectorAccountCreationSchema_Field_IntField_case case_ConnectorAccountCreationSchema_Field_Field = 103 +const ConnectorAccountCreationSchema_Field_MapField_case case_ConnectorAccountCreationSchema_Field_Field = 104 + +func (x *ConnectorAccountCreationSchema_Field) WhichField() case_ConnectorAccountCreationSchema_Field_Field { + if x == nil { + return ConnectorAccountCreationSchema_Field_Field_not_set_case + } + switch x.xxx_hidden_Field.(type) { + case *connectorAccountCreationSchema_Field_StringField: + return ConnectorAccountCreationSchema_Field_StringField_case + case *connectorAccountCreationSchema_Field_BoolField: + return ConnectorAccountCreationSchema_Field_BoolField_case + case *connectorAccountCreationSchema_Field_StringListField: + return ConnectorAccountCreationSchema_Field_StringListField_case + case *connectorAccountCreationSchema_Field_IntField: + return ConnectorAccountCreationSchema_Field_IntField_case + case *connectorAccountCreationSchema_Field_MapField: + return ConnectorAccountCreationSchema_Field_MapField_case + default: + return ConnectorAccountCreationSchema_Field_Field_not_set_case + } +} + +type ConnectorAccountCreationSchema_Field_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + DisplayName string + Required bool + Description string + Placeholder string + Order int32 + Deprecated bool + // Fields of oneof xxx_hidden_Field: + StringField *ConnectorAccountCreationSchema_StringField + BoolField *ConnectorAccountCreationSchema_BoolField + StringListField *ConnectorAccountCreationSchema_StringListField + IntField *ConnectorAccountCreationSchema_IntField + MapField *ConnectorAccountCreationSchema_MapField + // -- end of xxx_hidden_Field +} + +func (b0 ConnectorAccountCreationSchema_Field_builder) Build() *ConnectorAccountCreationSchema_Field { + m0 := &ConnectorAccountCreationSchema_Field{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_DisplayName = b.DisplayName + x.xxx_hidden_Required = b.Required + x.xxx_hidden_Description = b.Description + x.xxx_hidden_Placeholder = b.Placeholder + x.xxx_hidden_Order = b.Order + x.xxx_hidden_Deprecated = b.Deprecated + if b.StringField != nil { + x.xxx_hidden_Field = &connectorAccountCreationSchema_Field_StringField{b.StringField} + } + if b.BoolField != nil { + x.xxx_hidden_Field = &connectorAccountCreationSchema_Field_BoolField{b.BoolField} + } + if b.StringListField != nil { + x.xxx_hidden_Field = &connectorAccountCreationSchema_Field_StringListField{b.StringListField} + } + if b.IntField != nil { + x.xxx_hidden_Field = &connectorAccountCreationSchema_Field_IntField{b.IntField} + } + if b.MapField != nil { + x.xxx_hidden_Field = &connectorAccountCreationSchema_Field_MapField{b.MapField} + } + return m0 +} + +type case_ConnectorAccountCreationSchema_Field_Field protoreflect.FieldNumber + +func (x case_ConnectorAccountCreationSchema_Field_Field) String() string { + md := file_c1_connector_v2_connector_proto_msgTypes[16].Descriptor() + if x == 0 { + return "not set" + } + return protoimpl.X.MessageFieldStringOf(md, protoreflect.FieldNumber(x)) +} + +type isConnectorAccountCreationSchema_Field_Field interface { + isConnectorAccountCreationSchema_Field_Field() +} + +type connectorAccountCreationSchema_Field_StringField struct { + StringField *ConnectorAccountCreationSchema_StringField `protobuf:"bytes,100,opt,name=string_field,json=stringField,proto3,oneof"` +} + +type connectorAccountCreationSchema_Field_BoolField struct { + BoolField *ConnectorAccountCreationSchema_BoolField `protobuf:"bytes,101,opt,name=bool_field,json=boolField,proto3,oneof"` +} + +type connectorAccountCreationSchema_Field_StringListField struct { + StringListField *ConnectorAccountCreationSchema_StringListField `protobuf:"bytes,102,opt,name=string_list_field,json=stringListField,proto3,oneof"` +} + +type connectorAccountCreationSchema_Field_IntField struct { + IntField *ConnectorAccountCreationSchema_IntField `protobuf:"bytes,103,opt,name=int_field,json=intField,proto3,oneof"` +} + +type connectorAccountCreationSchema_Field_MapField struct { + MapField *ConnectorAccountCreationSchema_MapField `protobuf:"bytes,104,opt,name=map_field,json=mapField,proto3,oneof"` +} + +func (*connectorAccountCreationSchema_Field_StringField) isConnectorAccountCreationSchema_Field_Field() { +} + +func (*connectorAccountCreationSchema_Field_BoolField) isConnectorAccountCreationSchema_Field_Field() { +} + +func (*connectorAccountCreationSchema_Field_StringListField) isConnectorAccountCreationSchema_Field_Field() { +} + +func (*connectorAccountCreationSchema_Field_IntField) isConnectorAccountCreationSchema_Field_Field() { +} + +func (*connectorAccountCreationSchema_Field_MapField) isConnectorAccountCreationSchema_Field_Field() { +} + +type ConnectorAccountCreationSchema_StringField struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_DefaultValue *string `protobuf:"bytes,1,opt,name=default_value,json=defaultValue,proto3,oneof"` + XXX_raceDetectHookData protoimpl.RaceDetectHookData + XXX_presence [1]uint32 + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ConnectorAccountCreationSchema_StringField) Reset() { + *x = ConnectorAccountCreationSchema_StringField{} + mi := &file_c1_connector_v2_connector_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ConnectorAccountCreationSchema_StringField) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ConnectorAccountCreationSchema_StringField) ProtoMessage() {} + +func (x *ConnectorAccountCreationSchema_StringField) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_connector_proto_msgTypes[17] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *ConnectorAccountCreationSchema_StringField) GetDefaultValue() string { + if x != nil { + if x.xxx_hidden_DefaultValue != nil { + return *x.xxx_hidden_DefaultValue + } + return "" + } + return "" +} + +func (x *ConnectorAccountCreationSchema_StringField) SetDefaultValue(v string) { + x.xxx_hidden_DefaultValue = &v + protoimpl.X.SetPresent(&(x.XXX_presence[0]), 0, 1) +} + +func (x *ConnectorAccountCreationSchema_StringField) HasDefaultValue() bool { + if x == nil { + return false + } + return protoimpl.X.Present(&(x.XXX_presence[0]), 0) +} + +func (x *ConnectorAccountCreationSchema_StringField) ClearDefaultValue() { + protoimpl.X.ClearPresent(&(x.XXX_presence[0]), 0) + x.xxx_hidden_DefaultValue = nil +} + +type ConnectorAccountCreationSchema_StringField_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + DefaultValue *string +} + +func (b0 ConnectorAccountCreationSchema_StringField_builder) Build() *ConnectorAccountCreationSchema_StringField { + m0 := &ConnectorAccountCreationSchema_StringField{} + b, x := &b0, m0 + _, _ = b, x + if b.DefaultValue != nil { + protoimpl.X.SetPresentNonAtomic(&(x.XXX_presence[0]), 0, 1) + x.xxx_hidden_DefaultValue = b.DefaultValue + } + return m0 +} + +type ConnectorAccountCreationSchema_BoolField struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_DefaultValue bool `protobuf:"varint,1,opt,name=default_value,json=defaultValue,proto3,oneof"` + XXX_raceDetectHookData protoimpl.RaceDetectHookData + XXX_presence [1]uint32 + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ConnectorAccountCreationSchema_BoolField) Reset() { + *x = ConnectorAccountCreationSchema_BoolField{} + mi := &file_c1_connector_v2_connector_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ConnectorAccountCreationSchema_BoolField) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ConnectorAccountCreationSchema_BoolField) ProtoMessage() {} + +func (x *ConnectorAccountCreationSchema_BoolField) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_connector_proto_msgTypes[18] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *ConnectorAccountCreationSchema_BoolField) GetDefaultValue() bool { + if x != nil { + return x.xxx_hidden_DefaultValue + } + return false +} + +func (x *ConnectorAccountCreationSchema_BoolField) SetDefaultValue(v bool) { + x.xxx_hidden_DefaultValue = v + protoimpl.X.SetPresent(&(x.XXX_presence[0]), 0, 1) +} + +func (x *ConnectorAccountCreationSchema_BoolField) HasDefaultValue() bool { + if x == nil { + return false + } + return protoimpl.X.Present(&(x.XXX_presence[0]), 0) +} + +func (x *ConnectorAccountCreationSchema_BoolField) ClearDefaultValue() { + protoimpl.X.ClearPresent(&(x.XXX_presence[0]), 0) + x.xxx_hidden_DefaultValue = false +} + +type ConnectorAccountCreationSchema_BoolField_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + DefaultValue *bool +} + +func (b0 ConnectorAccountCreationSchema_BoolField_builder) Build() *ConnectorAccountCreationSchema_BoolField { + m0 := &ConnectorAccountCreationSchema_BoolField{} + b, x := &b0, m0 + _, _ = b, x + if b.DefaultValue != nil { + protoimpl.X.SetPresentNonAtomic(&(x.XXX_presence[0]), 0, 1) + x.xxx_hidden_DefaultValue = *b.DefaultValue + } + return m0 +} + +type ConnectorAccountCreationSchema_StringListField struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_DefaultValue []string `protobuf:"bytes,1,rep,name=default_value,json=defaultValue,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ConnectorAccountCreationSchema_StringListField) Reset() { + *x = ConnectorAccountCreationSchema_StringListField{} + mi := &file_c1_connector_v2_connector_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ConnectorAccountCreationSchema_StringListField) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ConnectorAccountCreationSchema_StringListField) ProtoMessage() {} + +func (x *ConnectorAccountCreationSchema_StringListField) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_connector_proto_msgTypes[19] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *ConnectorAccountCreationSchema_StringListField) GetDefaultValue() []string { + if x != nil { + return x.xxx_hidden_DefaultValue + } + return nil +} + +func (x *ConnectorAccountCreationSchema_StringListField) SetDefaultValue(v []string) { + x.xxx_hidden_DefaultValue = v +} + +type ConnectorAccountCreationSchema_StringListField_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + DefaultValue []string +} + +func (b0 ConnectorAccountCreationSchema_StringListField_builder) Build() *ConnectorAccountCreationSchema_StringListField { + m0 := &ConnectorAccountCreationSchema_StringListField{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_DefaultValue = b.DefaultValue + return m0 +} + +type ConnectorAccountCreationSchema_IntField struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_DefaultValue int32 `protobuf:"varint,1,opt,name=default_value,json=defaultValue,proto3,oneof"` + XXX_raceDetectHookData protoimpl.RaceDetectHookData + XXX_presence [1]uint32 + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ConnectorAccountCreationSchema_IntField) Reset() { + *x = ConnectorAccountCreationSchema_IntField{} + mi := &file_c1_connector_v2_connector_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ConnectorAccountCreationSchema_IntField) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ConnectorAccountCreationSchema_IntField) ProtoMessage() {} + +func (x *ConnectorAccountCreationSchema_IntField) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_connector_proto_msgTypes[20] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *ConnectorAccountCreationSchema_IntField) GetDefaultValue() int32 { + if x != nil { + return x.xxx_hidden_DefaultValue + } + return 0 +} + +func (x *ConnectorAccountCreationSchema_IntField) SetDefaultValue(v int32) { + x.xxx_hidden_DefaultValue = v + protoimpl.X.SetPresent(&(x.XXX_presence[0]), 0, 1) +} + +func (x *ConnectorAccountCreationSchema_IntField) HasDefaultValue() bool { + if x == nil { + return false + } + return protoimpl.X.Present(&(x.XXX_presence[0]), 0) +} + +func (x *ConnectorAccountCreationSchema_IntField) ClearDefaultValue() { + protoimpl.X.ClearPresent(&(x.XXX_presence[0]), 0) + x.xxx_hidden_DefaultValue = 0 +} + +type ConnectorAccountCreationSchema_IntField_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + DefaultValue *int32 +} + +func (b0 ConnectorAccountCreationSchema_IntField_builder) Build() *ConnectorAccountCreationSchema_IntField { + m0 := &ConnectorAccountCreationSchema_IntField{} + b, x := &b0, m0 + _, _ = b, x + if b.DefaultValue != nil { + protoimpl.X.SetPresentNonAtomic(&(x.XXX_presence[0]), 0, 1) + x.xxx_hidden_DefaultValue = *b.DefaultValue + } + return m0 +} + +type ConnectorAccountCreationSchema_MapField struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_DefaultValue map[string]*ConnectorAccountCreationSchema_Field `protobuf:"bytes,1,rep,name=default_value,json=defaultValue,proto3" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ConnectorAccountCreationSchema_MapField) Reset() { + *x = ConnectorAccountCreationSchema_MapField{} + mi := &file_c1_connector_v2_connector_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ConnectorAccountCreationSchema_MapField) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ConnectorAccountCreationSchema_MapField) ProtoMessage() {} + +func (x *ConnectorAccountCreationSchema_MapField) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_connector_proto_msgTypes[21] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *ConnectorAccountCreationSchema_MapField) GetDefaultValue() map[string]*ConnectorAccountCreationSchema_Field { + if x != nil { + return x.xxx_hidden_DefaultValue + } + return nil +} + +func (x *ConnectorAccountCreationSchema_MapField) SetDefaultValue(v map[string]*ConnectorAccountCreationSchema_Field) { + x.xxx_hidden_DefaultValue = v +} + +type ConnectorAccountCreationSchema_MapField_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + DefaultValue map[string]*ConnectorAccountCreationSchema_Field +} + +func (b0 ConnectorAccountCreationSchema_MapField_builder) Build() *ConnectorAccountCreationSchema_MapField { + m0 := &ConnectorAccountCreationSchema_MapField{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_DefaultValue = b.DefaultValue + return m0 +} + +var File_c1_connector_v2_connector_proto protoreflect.FileDescriptor + +const file_c1_connector_v2_connector_proto_rawDesc = "" + + "\n" + + "\x1fc1/connector/v2/connector.proto\x12\x0fc1.connector.v2\x1a\x1bc1/connector/v2/asset.proto\x1a\x1ec1/connector/v2/resource.proto\x1a\x19google/protobuf/any.proto\x1a\x1cgoogle/protobuf/struct.proto\x1a\x17validate/validate.proto\"\x8d\x01\n" + + "\x1eConnectorServiceCleanupRequest\x126\n" + + "\vannotations\x18\x01 \x03(\v2\x14.google.protobuf.AnyR\vannotations\x123\n" + + "\x0eactive_sync_id\x18\x02 \x01(\tB\r\xfaB\n" + + "r\b \x01(\x80\b\xd0\x01\x01R\factiveSyncId\"Y\n" + + "\x1fConnectorServiceCleanupResponse\x126\n" + + "\vannotations\x18\x01 \x03(\v2\x14.google.protobuf.AnyR\vannotations\"\xa8\x04\n" + + "\x11ConnectorMetadata\x12-\n" + + "\fdisplay_name\x18\x01 \x01(\tB\n" + + "\xfaB\ar\x05 \x01(\x80\bR\vdisplayName\x125\n" + + "\bhelp_url\x18\x02 \x01(\tB\x1a\xfaB\x17r\x15 \x01(\x80\b:\bhttps://\xd0\x01\x01\x88\x01\x01R\ahelpUrl\x12-\n" + + "\x04icon\x18\x03 \x01(\v2\x19.c1.connector.v2.AssetRefR\x04icon\x12-\n" + + "\x04logo\x18\x04 \x01(\v2\x19.c1.connector.v2.AssetRefR\x04logo\x121\n" + + "\aprofile\x18\x05 \x01(\v2\x17.google.protobuf.StructR\aprofile\x126\n" + + "\vannotations\x18\x06 \x03(\v2\x14.google.protobuf.AnyR\vannotations\x12/\n" + + "\vdescription\x18\a \x01(\tB\r\xfaB\n" + + "r\b \x01(\x80 \xd0\x01\x01R\vdescription\x12J\n" + + "\fcapabilities\x18\b \x01(\v2&.c1.connector.v2.ConnectorCapabilitiesR\fcapabilities\x12g\n" + + "\x17account_creation_schema\x18\t \x01(\v2/.c1.connector.v2.ConnectorAccountCreationSchemaR\x15accountCreationSchema\"\x8e\x02\n" + + "\x11CredentialDetails\x12}\n" + + "\x1fcapability_account_provisioning\x18\x01 \x01(\v25.c1.connector.v2.CredentialDetailsAccountProvisioningR\x1dcapabilityAccountProvisioning\x12z\n" + + "\x1ecapability_credential_rotation\x18\x02 \x01(\v24.c1.connector.v2.CredentialDetailsCredentialRotationR\x1ccapabilityCredentialRotation\"\x8e\x02\n" + + "$CredentialDetailsAccountProvisioning\x12s\n" + + "\x1csupported_credential_options\x18\x01 \x03(\x0e21.c1.connector.v2.CapabilityDetailCredentialOptionR\x1asupportedCredentialOptions\x12q\n" + + "\x1bpreferred_credential_option\x18\x02 \x01(\x0e21.c1.connector.v2.CapabilityDetailCredentialOptionR\x19preferredCredentialOption\"\x8d\x02\n" + + "#CredentialDetailsCredentialRotation\x12s\n" + + "\x1csupported_credential_options\x18\x01 \x03(\x0e21.c1.connector.v2.CapabilityDetailCredentialOptionR\x1asupportedCredentialOptions\x12q\n" + + "\x1bpreferred_credential_option\x18\x02 \x01(\x0e21.c1.connector.v2.CapabilityDetailCredentialOptionR\x19preferredCredentialOption\"\xa5\x02\n" + + "\x15ConnectorCapabilities\x12e\n" + + "\x1aresource_type_capabilities\x18\x01 \x03(\v2'.c1.connector.v2.ResourceTypeCapabilityR\x18resourceTypeCapabilities\x12R\n" + + "\x16connector_capabilities\x18\x02 \x03(\x0e2\x1b.c1.connector.v2.CapabilityR\x15connectorCapabilities\x12Q\n" + + "\x12credential_details\x18\x03 \x01(\v2\".c1.connector.v2.CredentialDetailsR\x11credentialDetails\"6\n" + + "\x14CapabilityPermission\x12\x1e\n" + + "\n" + + "permission\x18\x01 \x01(\tR\n" + + "permission\"`\n" + + "\x15CapabilityPermissions\x12G\n" + + "\vpermissions\x18\x01 \x03(\v2%.c1.connector.v2.CapabilityPermissionR\vpermissions\"\xe7\x01\n" + + "\x16ResourceTypeCapability\x12B\n" + + "\rresource_type\x18\x01 \x01(\v2\x1d.c1.connector.v2.ResourceTypeR\fresourceType\x12?\n" + + "\fcapabilities\x18\x02 \x03(\x0e2\x1b.c1.connector.v2.CapabilityR\fcapabilities\x12H\n" + + "\vpermissions\x18\x03 \x01(\v2&.c1.connector.v2.CapabilityPermissionsR\vpermissions\"$\n" + + "\"ConnectorServiceGetMetadataRequest\"e\n" + + "#ConnectorServiceGetMetadataResponse\x12>\n" + + "\bmetadata\x18\x01 \x01(\v2\".c1.connector.v2.ConnectorMetadataR\bmetadata\"!\n" + + "\x1fConnectorServiceValidateRequest\"{\n" + + " ConnectorServiceValidateResponse\x126\n" + + "\vannotations\x18\x01 \x03(\v2\x14.google.protobuf.AnyR\vannotations\x12\x1f\n" + + "\vsdk_version\x18\x02 \x01(\tR\n" + + "sdkVersion\"\xa5\v\n" + + "\x1eConnectorAccountCreationSchema\x12Z\n" + + "\tfield_map\x18\x01 \x03(\v2=.c1.connector.v2.ConnectorAccountCreationSchema.FieldMapEntryR\bfieldMap\x1ar\n" + + "\rFieldMapEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12K\n" + + "\x05value\x18\x02 \x01(\v25.c1.connector.v2.ConnectorAccountCreationSchema.FieldR\x05value:\x028\x01\x1a\xa8\x05\n" + + "\x05Field\x12!\n" + + "\fdisplay_name\x18\x01 \x01(\tR\vdisplayName\x12\x1a\n" + + "\brequired\x18\x02 \x01(\bR\brequired\x12 \n" + + "\vdescription\x18\x03 \x01(\tR\vdescription\x12 \n" + + "\vplaceholder\x18\x04 \x01(\tR\vplaceholder\x12\x14\n" + + "\x05order\x18\x05 \x01(\x05R\x05order\x12\x1e\n" + + "\n" + + "deprecated\x18\x06 \x01(\bR\n" + + "deprecated\x12`\n" + + "\fstring_field\x18d \x01(\v2;.c1.connector.v2.ConnectorAccountCreationSchema.StringFieldH\x00R\vstringField\x12Z\n" + + "\n" + + "bool_field\x18e \x01(\v29.c1.connector.v2.ConnectorAccountCreationSchema.BoolFieldH\x00R\tboolField\x12m\n" + + "\x11string_list_field\x18f \x01(\v2?.c1.connector.v2.ConnectorAccountCreationSchema.StringListFieldH\x00R\x0fstringListField\x12W\n" + + "\tint_field\x18g \x01(\v28.c1.connector.v2.ConnectorAccountCreationSchema.IntFieldH\x00R\bintField\x12W\n" + + "\tmap_field\x18h \x01(\v28.c1.connector.v2.ConnectorAccountCreationSchema.MapFieldH\x00R\bmapFieldB\a\n" + + "\x05field\x1aI\n" + + "\vStringField\x12(\n" + + "\rdefault_value\x18\x01 \x01(\tH\x00R\fdefaultValue\x88\x01\x01B\x10\n" + + "\x0e_default_value\x1aG\n" + + "\tBoolField\x12(\n" + + "\rdefault_value\x18\x01 \x01(\bH\x00R\fdefaultValue\x88\x01\x01B\x10\n" + + "\x0e_default_value\x1a6\n" + + "\x0fStringListField\x12#\n" + + "\rdefault_value\x18\x01 \x03(\tR\fdefaultValue\x1aF\n" + + "\bIntField\x12(\n" + + "\rdefault_value\x18\x01 \x01(\x05H\x00R\fdefaultValue\x88\x01\x01B\x10\n" + + "\x0e_default_value\x1a\xf3\x01\n" + + "\bMapField\x12o\n" + + "\rdefault_value\x18\x01 \x03(\v2J.c1.connector.v2.ConnectorAccountCreationSchema.MapField.DefaultValueEntryR\fdefaultValue\x1av\n" + + "\x11DefaultValueEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12K\n" + + "\x05value\x18\x02 \x01(\v25.c1.connector.v2.ConnectorAccountCreationSchema.FieldR\x05value:\x028\x01*\x86\x03\n" + + "\n" + + "Capability\x12\x1a\n" + + "\x16CAPABILITY_UNSPECIFIED\x10\x00\x12\x18\n" + + "\x14CAPABILITY_PROVISION\x10\x01\x12\x13\n" + + "\x0fCAPABILITY_SYNC\x10\x02\x12\x19\n" + + "\x15CAPABILITY_EVENT_FEED\x10\x03\x12\x18\n" + + "\x14CAPABILITY_TICKETING\x10\x04\x12#\n" + + "\x1fCAPABILITY_ACCOUNT_PROVISIONING\x10\x05\x12\"\n" + + "\x1eCAPABILITY_CREDENTIAL_ROTATION\x10\x06\x12\x1e\n" + + "\x1aCAPABILITY_RESOURCE_CREATE\x10\a\x12\x1e\n" + + "\x1aCAPABILITY_RESOURCE_DELETE\x10\b\x12\x1b\n" + + "\x17CAPABILITY_SYNC_SECRETS\x10\t\x12\x16\n" + + "\x12CAPABILITY_ACTIONS\x10\n" + + "\x12\x1c\n" + + "\x18CAPABILITY_TARGETED_SYNC\x10\v\x12\x1c\n" + + "\x18CAPABILITY_EVENT_FEED_V2\x10\f*\xae\x02\n" + + " CapabilityDetailCredentialOption\x123\n" + + "/CAPABILITY_DETAIL_CREDENTIAL_OPTION_UNSPECIFIED\x10\x00\x123\n" + + "/CAPABILITY_DETAIL_CREDENTIAL_OPTION_NO_PASSWORD\x10\x01\x127\n" + + "3CAPABILITY_DETAIL_CREDENTIAL_OPTION_RANDOM_PASSWORD\x10\x02\x12+\n" + + "'CAPABILITY_DETAIL_CREDENTIAL_OPTION_SSO\x10\x03\x12:\n" + + "6CAPABILITY_DETAIL_CREDENTIAL_OPTION_ENCRYPTED_PASSWORD\x10\x042\xeb\x02\n" + + "\x10ConnectorService\x12x\n" + + "\vGetMetadata\x123.c1.connector.v2.ConnectorServiceGetMetadataRequest\x1a4.c1.connector.v2.ConnectorServiceGetMetadataResponse\x12o\n" + + "\bValidate\x120.c1.connector.v2.ConnectorServiceValidateRequest\x1a1.c1.connector.v2.ConnectorServiceValidateResponse\x12l\n" + + "\aCleanup\x12/.c1.connector.v2.ConnectorServiceCleanupRequest\x1a0.c1.connector.v2.ConnectorServiceCleanupResponseB6Z4github.com/conductorone/baton-sdk/pb/c1/connector/v2b\x06proto3" + +var file_c1_connector_v2_connector_proto_enumTypes = make([]protoimpl.EnumInfo, 2) +var file_c1_connector_v2_connector_proto_msgTypes = make([]protoimpl.MessageInfo, 23) +var file_c1_connector_v2_connector_proto_goTypes = []any{ + (Capability)(0), // 0: c1.connector.v2.Capability + (CapabilityDetailCredentialOption)(0), // 1: c1.connector.v2.CapabilityDetailCredentialOption + (*ConnectorServiceCleanupRequest)(nil), // 2: c1.connector.v2.ConnectorServiceCleanupRequest + (*ConnectorServiceCleanupResponse)(nil), // 3: c1.connector.v2.ConnectorServiceCleanupResponse + (*ConnectorMetadata)(nil), // 4: c1.connector.v2.ConnectorMetadata + (*CredentialDetails)(nil), // 5: c1.connector.v2.CredentialDetails + (*CredentialDetailsAccountProvisioning)(nil), // 6: c1.connector.v2.CredentialDetailsAccountProvisioning + (*CredentialDetailsCredentialRotation)(nil), // 7: c1.connector.v2.CredentialDetailsCredentialRotation + (*ConnectorCapabilities)(nil), // 8: c1.connector.v2.ConnectorCapabilities + (*CapabilityPermission)(nil), // 9: c1.connector.v2.CapabilityPermission + (*CapabilityPermissions)(nil), // 10: c1.connector.v2.CapabilityPermissions + (*ResourceTypeCapability)(nil), // 11: c1.connector.v2.ResourceTypeCapability + (*ConnectorServiceGetMetadataRequest)(nil), // 12: c1.connector.v2.ConnectorServiceGetMetadataRequest + (*ConnectorServiceGetMetadataResponse)(nil), // 13: c1.connector.v2.ConnectorServiceGetMetadataResponse + (*ConnectorServiceValidateRequest)(nil), // 14: c1.connector.v2.ConnectorServiceValidateRequest + (*ConnectorServiceValidateResponse)(nil), // 15: c1.connector.v2.ConnectorServiceValidateResponse + (*ConnectorAccountCreationSchema)(nil), // 16: c1.connector.v2.ConnectorAccountCreationSchema + nil, // 17: c1.connector.v2.ConnectorAccountCreationSchema.FieldMapEntry + (*ConnectorAccountCreationSchema_Field)(nil), // 18: c1.connector.v2.ConnectorAccountCreationSchema.Field + (*ConnectorAccountCreationSchema_StringField)(nil), // 19: c1.connector.v2.ConnectorAccountCreationSchema.StringField + (*ConnectorAccountCreationSchema_BoolField)(nil), // 20: c1.connector.v2.ConnectorAccountCreationSchema.BoolField + (*ConnectorAccountCreationSchema_StringListField)(nil), // 21: c1.connector.v2.ConnectorAccountCreationSchema.StringListField + (*ConnectorAccountCreationSchema_IntField)(nil), // 22: c1.connector.v2.ConnectorAccountCreationSchema.IntField + (*ConnectorAccountCreationSchema_MapField)(nil), // 23: c1.connector.v2.ConnectorAccountCreationSchema.MapField + nil, // 24: c1.connector.v2.ConnectorAccountCreationSchema.MapField.DefaultValueEntry + (*anypb.Any)(nil), // 25: google.protobuf.Any + (*AssetRef)(nil), // 26: c1.connector.v2.AssetRef + (*structpb.Struct)(nil), // 27: google.protobuf.Struct + (*ResourceType)(nil), // 28: c1.connector.v2.ResourceType +} +var file_c1_connector_v2_connector_proto_depIdxs = []int32{ + 25, // 0: c1.connector.v2.ConnectorServiceCleanupRequest.annotations:type_name -> google.protobuf.Any + 25, // 1: c1.connector.v2.ConnectorServiceCleanupResponse.annotations:type_name -> google.protobuf.Any + 26, // 2: c1.connector.v2.ConnectorMetadata.icon:type_name -> c1.connector.v2.AssetRef + 26, // 3: c1.connector.v2.ConnectorMetadata.logo:type_name -> c1.connector.v2.AssetRef + 27, // 4: c1.connector.v2.ConnectorMetadata.profile:type_name -> google.protobuf.Struct + 25, // 5: c1.connector.v2.ConnectorMetadata.annotations:type_name -> google.protobuf.Any + 8, // 6: c1.connector.v2.ConnectorMetadata.capabilities:type_name -> c1.connector.v2.ConnectorCapabilities + 16, // 7: c1.connector.v2.ConnectorMetadata.account_creation_schema:type_name -> c1.connector.v2.ConnectorAccountCreationSchema + 6, // 8: c1.connector.v2.CredentialDetails.capability_account_provisioning:type_name -> c1.connector.v2.CredentialDetailsAccountProvisioning + 7, // 9: c1.connector.v2.CredentialDetails.capability_credential_rotation:type_name -> c1.connector.v2.CredentialDetailsCredentialRotation + 1, // 10: c1.connector.v2.CredentialDetailsAccountProvisioning.supported_credential_options:type_name -> c1.connector.v2.CapabilityDetailCredentialOption + 1, // 11: c1.connector.v2.CredentialDetailsAccountProvisioning.preferred_credential_option:type_name -> c1.connector.v2.CapabilityDetailCredentialOption + 1, // 12: c1.connector.v2.CredentialDetailsCredentialRotation.supported_credential_options:type_name -> c1.connector.v2.CapabilityDetailCredentialOption + 1, // 13: c1.connector.v2.CredentialDetailsCredentialRotation.preferred_credential_option:type_name -> c1.connector.v2.CapabilityDetailCredentialOption + 11, // 14: c1.connector.v2.ConnectorCapabilities.resource_type_capabilities:type_name -> c1.connector.v2.ResourceTypeCapability + 0, // 15: c1.connector.v2.ConnectorCapabilities.connector_capabilities:type_name -> c1.connector.v2.Capability + 5, // 16: c1.connector.v2.ConnectorCapabilities.credential_details:type_name -> c1.connector.v2.CredentialDetails + 9, // 17: c1.connector.v2.CapabilityPermissions.permissions:type_name -> c1.connector.v2.CapabilityPermission + 28, // 18: c1.connector.v2.ResourceTypeCapability.resource_type:type_name -> c1.connector.v2.ResourceType + 0, // 19: c1.connector.v2.ResourceTypeCapability.capabilities:type_name -> c1.connector.v2.Capability + 10, // 20: c1.connector.v2.ResourceTypeCapability.permissions:type_name -> c1.connector.v2.CapabilityPermissions + 4, // 21: c1.connector.v2.ConnectorServiceGetMetadataResponse.metadata:type_name -> c1.connector.v2.ConnectorMetadata + 25, // 22: c1.connector.v2.ConnectorServiceValidateResponse.annotations:type_name -> google.protobuf.Any + 17, // 23: c1.connector.v2.ConnectorAccountCreationSchema.field_map:type_name -> c1.connector.v2.ConnectorAccountCreationSchema.FieldMapEntry + 18, // 24: c1.connector.v2.ConnectorAccountCreationSchema.FieldMapEntry.value:type_name -> c1.connector.v2.ConnectorAccountCreationSchema.Field + 19, // 25: c1.connector.v2.ConnectorAccountCreationSchema.Field.string_field:type_name -> c1.connector.v2.ConnectorAccountCreationSchema.StringField + 20, // 26: c1.connector.v2.ConnectorAccountCreationSchema.Field.bool_field:type_name -> c1.connector.v2.ConnectorAccountCreationSchema.BoolField + 21, // 27: c1.connector.v2.ConnectorAccountCreationSchema.Field.string_list_field:type_name -> c1.connector.v2.ConnectorAccountCreationSchema.StringListField + 22, // 28: c1.connector.v2.ConnectorAccountCreationSchema.Field.int_field:type_name -> c1.connector.v2.ConnectorAccountCreationSchema.IntField + 23, // 29: c1.connector.v2.ConnectorAccountCreationSchema.Field.map_field:type_name -> c1.connector.v2.ConnectorAccountCreationSchema.MapField + 24, // 30: c1.connector.v2.ConnectorAccountCreationSchema.MapField.default_value:type_name -> c1.connector.v2.ConnectorAccountCreationSchema.MapField.DefaultValueEntry + 18, // 31: c1.connector.v2.ConnectorAccountCreationSchema.MapField.DefaultValueEntry.value:type_name -> c1.connector.v2.ConnectorAccountCreationSchema.Field + 12, // 32: c1.connector.v2.ConnectorService.GetMetadata:input_type -> c1.connector.v2.ConnectorServiceGetMetadataRequest + 14, // 33: c1.connector.v2.ConnectorService.Validate:input_type -> c1.connector.v2.ConnectorServiceValidateRequest + 2, // 34: c1.connector.v2.ConnectorService.Cleanup:input_type -> c1.connector.v2.ConnectorServiceCleanupRequest + 13, // 35: c1.connector.v2.ConnectorService.GetMetadata:output_type -> c1.connector.v2.ConnectorServiceGetMetadataResponse + 15, // 36: c1.connector.v2.ConnectorService.Validate:output_type -> c1.connector.v2.ConnectorServiceValidateResponse + 3, // 37: c1.connector.v2.ConnectorService.Cleanup:output_type -> c1.connector.v2.ConnectorServiceCleanupResponse + 35, // [35:38] is the sub-list for method output_type + 32, // [32:35] is the sub-list for method input_type + 32, // [32:32] is the sub-list for extension type_name + 32, // [32:32] is the sub-list for extension extendee + 0, // [0:32] is the sub-list for field type_name +} + +func init() { file_c1_connector_v2_connector_proto_init() } +func file_c1_connector_v2_connector_proto_init() { + if File_c1_connector_v2_connector_proto != nil { + return + } + file_c1_connector_v2_asset_proto_init() + file_c1_connector_v2_resource_proto_init() + file_c1_connector_v2_connector_proto_msgTypes[16].OneofWrappers = []any{ + (*connectorAccountCreationSchema_Field_StringField)(nil), + (*connectorAccountCreationSchema_Field_BoolField)(nil), + (*connectorAccountCreationSchema_Field_StringListField)(nil), + (*connectorAccountCreationSchema_Field_IntField)(nil), + (*connectorAccountCreationSchema_Field_MapField)(nil), + } + file_c1_connector_v2_connector_proto_msgTypes[17].OneofWrappers = []any{} + file_c1_connector_v2_connector_proto_msgTypes[18].OneofWrappers = []any{} + file_c1_connector_v2_connector_proto_msgTypes[20].OneofWrappers = []any{} + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_c1_connector_v2_connector_proto_rawDesc), len(file_c1_connector_v2_connector_proto_rawDesc)), + NumEnums: 2, + NumMessages: 23, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_c1_connector_v2_connector_proto_goTypes, + DependencyIndexes: file_c1_connector_v2_connector_proto_depIdxs, + EnumInfos: file_c1_connector_v2_connector_proto_enumTypes, + MessageInfos: file_c1_connector_v2_connector_proto_msgTypes, + }.Build() + File_c1_connector_v2_connector_proto = out.File + file_c1_connector_v2_connector_proto_goTypes = nil + file_c1_connector_v2_connector_proto_depIdxs = nil +} diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/entitlement.pb.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/entitlement.pb.go index ec6c1d0e..fca3e908 100644 --- a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/entitlement.pb.go +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/entitlement.pb.go @@ -1,9 +1,11 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.4 +// protoc-gen-go v1.36.10 // protoc (unknown) // source: c1/connector/v2/entitlement.proto +//go:build !protoopaque + package v2 import ( @@ -12,7 +14,6 @@ import ( protoimpl "google.golang.org/protobuf/runtime/protoimpl" anypb "google.golang.org/protobuf/types/known/anypb" reflect "reflect" - sync "sync" unsafe "unsafe" ) @@ -67,13 +68,8 @@ func (x Entitlement_PurposeValue) Number() protoreflect.EnumNumber { return protoreflect.EnumNumber(x) } -// Deprecated: Use Entitlement_PurposeValue.Descriptor instead. -func (Entitlement_PurposeValue) EnumDescriptor() ([]byte, []int) { - return file_c1_connector_v2_entitlement_proto_rawDescGZIP(), []int{0, 0} -} - type Entitlement struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Resource *Resource `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"` Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` DisplayName string `protobuf:"bytes,3,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` @@ -111,11 +107,6 @@ func (x *Entitlement) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use Entitlement.ProtoReflect.Descriptor instead. -func (*Entitlement) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_entitlement_proto_rawDescGZIP(), []int{0} -} - func (x *Entitlement) GetResource() *Resource { if x != nil { return x.Resource @@ -172,12 +163,84 @@ func (x *Entitlement) GetSlug() string { return "" } +func (x *Entitlement) SetResource(v *Resource) { + x.Resource = v +} + +func (x *Entitlement) SetId(v string) { + x.Id = v +} + +func (x *Entitlement) SetDisplayName(v string) { + x.DisplayName = v +} + +func (x *Entitlement) SetDescription(v string) { + x.Description = v +} + +func (x *Entitlement) SetGrantableTo(v []*ResourceType) { + x.GrantableTo = v +} + +func (x *Entitlement) SetAnnotations(v []*anypb.Any) { + x.Annotations = v +} + +func (x *Entitlement) SetPurpose(v Entitlement_PurposeValue) { + x.Purpose = v +} + +func (x *Entitlement) SetSlug(v string) { + x.Slug = v +} + +func (x *Entitlement) HasResource() bool { + if x == nil { + return false + } + return x.Resource != nil +} + +func (x *Entitlement) ClearResource() { + x.Resource = nil +} + +type Entitlement_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Resource *Resource + Id string + DisplayName string + Description string + GrantableTo []*ResourceType + Annotations []*anypb.Any + Purpose Entitlement_PurposeValue + Slug string +} + +func (b0 Entitlement_builder) Build() *Entitlement { + m0 := &Entitlement{} + b, x := &b0, m0 + _, _ = b, x + x.Resource = b.Resource + x.Id = b.Id + x.DisplayName = b.DisplayName + x.Description = b.Description + x.GrantableTo = b.GrantableTo + x.Annotations = b.Annotations + x.Purpose = b.Purpose + x.Slug = b.Slug + return m0 +} + type EntitlementsServiceListEntitlementsRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Resource *Resource `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"` PageSize uint32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` Annotations []*anypb.Any `protobuf:"bytes,4,rep,name=annotations,proto3" json:"annotations,omitempty"` + ActiveSyncId string `protobuf:"bytes,5,opt,name=active_sync_id,json=activeSyncId,proto3" json:"active_sync_id,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } @@ -207,11 +270,6 @@ func (x *EntitlementsServiceListEntitlementsRequest) ProtoReflect() protoreflect return mi.MessageOf(x) } -// Deprecated: Use EntitlementsServiceListEntitlementsRequest.ProtoReflect.Descriptor instead. -func (*EntitlementsServiceListEntitlementsRequest) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_entitlement_proto_rawDescGZIP(), []int{1} -} - func (x *EntitlementsServiceListEntitlementsRequest) GetResource() *Resource { if x != nil { return x.Resource @@ -240,8 +298,68 @@ func (x *EntitlementsServiceListEntitlementsRequest) GetAnnotations() []*anypb.A return nil } +func (x *EntitlementsServiceListEntitlementsRequest) GetActiveSyncId() string { + if x != nil { + return x.ActiveSyncId + } + return "" +} + +func (x *EntitlementsServiceListEntitlementsRequest) SetResource(v *Resource) { + x.Resource = v +} + +func (x *EntitlementsServiceListEntitlementsRequest) SetPageSize(v uint32) { + x.PageSize = v +} + +func (x *EntitlementsServiceListEntitlementsRequest) SetPageToken(v string) { + x.PageToken = v +} + +func (x *EntitlementsServiceListEntitlementsRequest) SetAnnotations(v []*anypb.Any) { + x.Annotations = v +} + +func (x *EntitlementsServiceListEntitlementsRequest) SetActiveSyncId(v string) { + x.ActiveSyncId = v +} + +func (x *EntitlementsServiceListEntitlementsRequest) HasResource() bool { + if x == nil { + return false + } + return x.Resource != nil +} + +func (x *EntitlementsServiceListEntitlementsRequest) ClearResource() { + x.Resource = nil +} + +type EntitlementsServiceListEntitlementsRequest_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Resource *Resource + PageSize uint32 + PageToken string + Annotations []*anypb.Any + ActiveSyncId string +} + +func (b0 EntitlementsServiceListEntitlementsRequest_builder) Build() *EntitlementsServiceListEntitlementsRequest { + m0 := &EntitlementsServiceListEntitlementsRequest{} + b, x := &b0, m0 + _, _ = b, x + x.Resource = b.Resource + x.PageSize = b.PageSize + x.PageToken = b.PageToken + x.Annotations = b.Annotations + x.ActiveSyncId = b.ActiveSyncId + return m0 +} + type EntitlementsServiceListEntitlementsResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` List []*Entitlement `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"` NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` Annotations []*anypb.Any `protobuf:"bytes,3,rep,name=annotations,proto3" json:"annotations,omitempty"` @@ -274,11 +392,6 @@ func (x *EntitlementsServiceListEntitlementsResponse) ProtoReflect() protoreflec return mi.MessageOf(x) } -// Deprecated: Use EntitlementsServiceListEntitlementsResponse.ProtoReflect.Descriptor instead. -func (*EntitlementsServiceListEntitlementsResponse) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_entitlement_proto_rawDescGZIP(), []int{2} -} - func (x *EntitlementsServiceListEntitlementsResponse) GetList() []*Entitlement { if x != nil { return x.List @@ -300,135 +413,320 @@ func (x *EntitlementsServiceListEntitlementsResponse) GetAnnotations() []*anypb. return nil } -var File_c1_connector_v2_entitlement_proto protoreflect.FileDescriptor +func (x *EntitlementsServiceListEntitlementsResponse) SetList(v []*Entitlement) { + x.List = v +} -var file_c1_connector_v2_entitlement_proto_rawDesc = string([]byte{ - 0x0a, 0x21, 0x63, 0x31, 0x2f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f, 0x76, - 0x32, 0x2f, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, - 0x72, 0x2e, 0x76, 0x32, 0x1a, 0x1e, 0x63, 0x31, 0x2f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, - 0x6f, 0x72, 0x2f, 0x76, 0x32, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, - 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, - 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x95, 0x04, 0x0a, 0x0b, 0x45, 0x6e, 0x74, - 0x69, 0x74, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x3f, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x63, 0x31, 0x2e, - 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, - 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x02, 0x69, 0x64, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0a, 0xfa, 0x42, 0x07, 0x72, 0x05, 0x20, 0x01, 0x28, 0x80, - 0x08, 0x52, 0x02, 0x69, 0x64, 0x12, 0x30, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, - 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0d, 0xfa, 0x42, 0x0a, - 0x72, 0x08, 0x20, 0x01, 0x28, 0x80, 0x08, 0xd0, 0x01, 0x01, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, - 0x6c, 0x61, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2f, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0d, 0xfa, 0x42, - 0x0a, 0x72, 0x08, 0x20, 0x01, 0x28, 0x80, 0x10, 0xd0, 0x01, 0x01, 0x52, 0x0b, 0x64, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x40, 0x0a, 0x0c, 0x67, 0x72, 0x61, 0x6e, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x74, 0x6f, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, - 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, - 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0b, 0x67, - 0x72, 0x61, 0x6e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x54, 0x6f, 0x12, 0x36, 0x0a, 0x0b, 0x61, 0x6e, - 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x75, 0x72, 0x70, 0x6f, 0x73, 0x65, 0x18, 0x07, 0x20, - 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, - 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x6d, 0x65, 0x6e, - 0x74, 0x2e, 0x50, 0x75, 0x72, 0x70, 0x6f, 0x73, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x08, - 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x07, 0x70, 0x75, 0x72, 0x70, 0x6f, 0x73, - 0x65, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x6c, 0x75, 0x67, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x04, 0x73, 0x6c, 0x75, 0x67, 0x22, 0x69, 0x0a, 0x0c, 0x50, 0x75, 0x72, 0x70, 0x6f, 0x73, 0x65, - 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1d, 0x0a, 0x19, 0x50, 0x55, 0x52, 0x50, 0x4f, 0x53, 0x45, - 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, - 0x45, 0x44, 0x10, 0x00, 0x12, 0x1c, 0x0a, 0x18, 0x50, 0x55, 0x52, 0x50, 0x4f, 0x53, 0x45, 0x5f, - 0x56, 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x41, 0x53, 0x53, 0x49, 0x47, 0x4e, 0x4d, 0x45, 0x4e, 0x54, - 0x10, 0x01, 0x12, 0x1c, 0x0a, 0x18, 0x50, 0x55, 0x52, 0x50, 0x4f, 0x53, 0x45, 0x5f, 0x56, 0x41, - 0x4c, 0x55, 0x45, 0x5f, 0x50, 0x45, 0x52, 0x4d, 0x49, 0x53, 0x53, 0x49, 0x4f, 0x4e, 0x10, 0x02, - 0x22, 0xf3, 0x01, 0x0a, 0x2a, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, - 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x45, 0x6e, 0x74, 0x69, - 0x74, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x35, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x19, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, - 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x08, 0x72, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x27, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, - 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x42, 0x0a, 0xfa, 0x42, 0x07, 0x2a, 0x05, - 0x18, 0xfa, 0x01, 0x40, 0x01, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, - 0x2d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x09, 0x42, 0x0e, 0xfa, 0x42, 0x0b, 0x72, 0x09, 0x20, 0x01, 0x28, 0x80, 0x80, 0x40, - 0xd0, 0x01, 0x01, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x36, - 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xcf, 0x01, 0x0a, 0x2b, 0x45, 0x6e, 0x74, 0x69, 0x74, - 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x69, - 0x73, 0x74, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x30, 0x0a, 0x04, 0x6c, 0x69, 0x73, 0x74, 0x18, 0x01, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, - 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x6d, 0x65, - 0x6e, 0x74, 0x52, 0x04, 0x6c, 0x69, 0x73, 0x74, 0x12, 0x36, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, - 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x42, 0x0e, 0xfa, 0x42, 0x0b, 0x72, 0x09, 0x20, 0x01, 0x28, 0x80, 0x80, 0x40, 0xd0, 0x01, - 0x01, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, - 0x12, 0x36, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, - 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, - 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x32, 0xa5, 0x01, 0x0a, 0x13, 0x45, 0x6e, 0x74, - 0x69, 0x74, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x12, 0x8d, 0x01, 0x0a, 0x10, 0x4c, 0x69, 0x73, 0x74, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x6c, 0x65, - 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x3b, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, - 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x6d, - 0x65, 0x6e, 0x74, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x45, - 0x6e, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x3c, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, - 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, - 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x45, 0x6e, 0x74, 0x69, - 0x74, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x42, 0x36, 0x5a, 0x34, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, - 0x6f, 0x6e, 0x64, 0x75, 0x63, 0x74, 0x6f, 0x72, 0x6f, 0x6e, 0x65, 0x2f, 0x62, 0x61, 0x74, 0x6f, - 0x6e, 0x2d, 0x73, 0x64, 0x6b, 0x2f, 0x70, 0x62, 0x2f, 0x63, 0x31, 0x2f, 0x63, 0x6f, 0x6e, 0x6e, - 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f, 0x76, 0x32, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -}) +func (x *EntitlementsServiceListEntitlementsResponse) SetNextPageToken(v string) { + x.NextPageToken = v +} -var ( - file_c1_connector_v2_entitlement_proto_rawDescOnce sync.Once - file_c1_connector_v2_entitlement_proto_rawDescData []byte -) +func (x *EntitlementsServiceListEntitlementsResponse) SetAnnotations(v []*anypb.Any) { + x.Annotations = v +} -func file_c1_connector_v2_entitlement_proto_rawDescGZIP() []byte { - file_c1_connector_v2_entitlement_proto_rawDescOnce.Do(func() { - file_c1_connector_v2_entitlement_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_c1_connector_v2_entitlement_proto_rawDesc), len(file_c1_connector_v2_entitlement_proto_rawDesc))) - }) - return file_c1_connector_v2_entitlement_proto_rawDescData +type EntitlementsServiceListEntitlementsResponse_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + List []*Entitlement + NextPageToken string + Annotations []*anypb.Any +} + +func (b0 EntitlementsServiceListEntitlementsResponse_builder) Build() *EntitlementsServiceListEntitlementsResponse { + m0 := &EntitlementsServiceListEntitlementsResponse{} + b, x := &b0, m0 + _, _ = b, x + x.List = b.List + x.NextPageToken = b.NextPageToken + x.Annotations = b.Annotations + return m0 } +type EntitlementsServiceListStaticEntitlementsRequest struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + ResourceTypeId string `protobuf:"bytes,1,opt,name=resource_type_id,json=resourceTypeId,proto3" json:"resource_type_id,omitempty"` + PageSize uint32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + Annotations []*anypb.Any `protobuf:"bytes,4,rep,name=annotations,proto3" json:"annotations,omitempty"` + ActiveSyncId string `protobuf:"bytes,5,opt,name=active_sync_id,json=activeSyncId,proto3" json:"active_sync_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *EntitlementsServiceListStaticEntitlementsRequest) Reset() { + *x = EntitlementsServiceListStaticEntitlementsRequest{} + mi := &file_c1_connector_v2_entitlement_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *EntitlementsServiceListStaticEntitlementsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EntitlementsServiceListStaticEntitlementsRequest) ProtoMessage() {} + +func (x *EntitlementsServiceListStaticEntitlementsRequest) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_entitlement_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *EntitlementsServiceListStaticEntitlementsRequest) GetResourceTypeId() string { + if x != nil { + return x.ResourceTypeId + } + return "" +} + +func (x *EntitlementsServiceListStaticEntitlementsRequest) GetPageSize() uint32 { + if x != nil { + return x.PageSize + } + return 0 +} + +func (x *EntitlementsServiceListStaticEntitlementsRequest) GetPageToken() string { + if x != nil { + return x.PageToken + } + return "" +} + +func (x *EntitlementsServiceListStaticEntitlementsRequest) GetAnnotations() []*anypb.Any { + if x != nil { + return x.Annotations + } + return nil +} + +func (x *EntitlementsServiceListStaticEntitlementsRequest) GetActiveSyncId() string { + if x != nil { + return x.ActiveSyncId + } + return "" +} + +func (x *EntitlementsServiceListStaticEntitlementsRequest) SetResourceTypeId(v string) { + x.ResourceTypeId = v +} + +func (x *EntitlementsServiceListStaticEntitlementsRequest) SetPageSize(v uint32) { + x.PageSize = v +} + +func (x *EntitlementsServiceListStaticEntitlementsRequest) SetPageToken(v string) { + x.PageToken = v +} + +func (x *EntitlementsServiceListStaticEntitlementsRequest) SetAnnotations(v []*anypb.Any) { + x.Annotations = v +} + +func (x *EntitlementsServiceListStaticEntitlementsRequest) SetActiveSyncId(v string) { + x.ActiveSyncId = v +} + +type EntitlementsServiceListStaticEntitlementsRequest_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + ResourceTypeId string + PageSize uint32 + PageToken string + Annotations []*anypb.Any + ActiveSyncId string +} + +func (b0 EntitlementsServiceListStaticEntitlementsRequest_builder) Build() *EntitlementsServiceListStaticEntitlementsRequest { + m0 := &EntitlementsServiceListStaticEntitlementsRequest{} + b, x := &b0, m0 + _, _ = b, x + x.ResourceTypeId = b.ResourceTypeId + x.PageSize = b.PageSize + x.PageToken = b.PageToken + x.Annotations = b.Annotations + x.ActiveSyncId = b.ActiveSyncId + return m0 +} + +type EntitlementsServiceListStaticEntitlementsResponse struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + List []*Entitlement `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"` + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + Annotations []*anypb.Any `protobuf:"bytes,3,rep,name=annotations,proto3" json:"annotations,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *EntitlementsServiceListStaticEntitlementsResponse) Reset() { + *x = EntitlementsServiceListStaticEntitlementsResponse{} + mi := &file_c1_connector_v2_entitlement_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *EntitlementsServiceListStaticEntitlementsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EntitlementsServiceListStaticEntitlementsResponse) ProtoMessage() {} + +func (x *EntitlementsServiceListStaticEntitlementsResponse) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_entitlement_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *EntitlementsServiceListStaticEntitlementsResponse) GetList() []*Entitlement { + if x != nil { + return x.List + } + return nil +} + +func (x *EntitlementsServiceListStaticEntitlementsResponse) GetNextPageToken() string { + if x != nil { + return x.NextPageToken + } + return "" +} + +func (x *EntitlementsServiceListStaticEntitlementsResponse) GetAnnotations() []*anypb.Any { + if x != nil { + return x.Annotations + } + return nil +} + +func (x *EntitlementsServiceListStaticEntitlementsResponse) SetList(v []*Entitlement) { + x.List = v +} + +func (x *EntitlementsServiceListStaticEntitlementsResponse) SetNextPageToken(v string) { + x.NextPageToken = v +} + +func (x *EntitlementsServiceListStaticEntitlementsResponse) SetAnnotations(v []*anypb.Any) { + x.Annotations = v +} + +type EntitlementsServiceListStaticEntitlementsResponse_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + List []*Entitlement + NextPageToken string + Annotations []*anypb.Any +} + +func (b0 EntitlementsServiceListStaticEntitlementsResponse_builder) Build() *EntitlementsServiceListStaticEntitlementsResponse { + m0 := &EntitlementsServiceListStaticEntitlementsResponse{} + b, x := &b0, m0 + _, _ = b, x + x.List = b.List + x.NextPageToken = b.NextPageToken + x.Annotations = b.Annotations + return m0 +} + +var File_c1_connector_v2_entitlement_proto protoreflect.FileDescriptor + +const file_c1_connector_v2_entitlement_proto_rawDesc = "" + + "\n" + + "!c1/connector/v2/entitlement.proto\x12\x0fc1.connector.v2\x1a\x1ec1/connector/v2/resource.proto\x1a\x19google/protobuf/any.proto\x1a\x17validate/validate.proto\"\x95\x04\n" + + "\vEntitlement\x12?\n" + + "\bresource\x18\x01 \x01(\v2\x19.c1.connector.v2.ResourceB\b\xfaB\x05\x8a\x01\x02\x10\x01R\bresource\x12\x1a\n" + + "\x02id\x18\x02 \x01(\tB\n" + + "\xfaB\ar\x05 \x01(\x80\bR\x02id\x120\n" + + "\fdisplay_name\x18\x03 \x01(\tB\r\xfaB\n" + + "r\b \x01(\x80\b\xd0\x01\x01R\vdisplayName\x12/\n" + + "\vdescription\x18\x04 \x01(\tB\r\xfaB\n" + + "r\b \x01(\x80\x10\xd0\x01\x01R\vdescription\x12@\n" + + "\fgrantable_to\x18\x05 \x03(\v2\x1d.c1.connector.v2.ResourceTypeR\vgrantableTo\x126\n" + + "\vannotations\x18\x06 \x03(\v2\x14.google.protobuf.AnyR\vannotations\x12M\n" + + "\apurpose\x18\a \x01(\x0e2).c1.connector.v2.Entitlement.PurposeValueB\b\xfaB\x05\x82\x01\x02\x10\x01R\apurpose\x12\x12\n" + + "\x04slug\x18\b \x01(\tR\x04slug\"i\n" + + "\fPurposeValue\x12\x1d\n" + + "\x19PURPOSE_VALUE_UNSPECIFIED\x10\x00\x12\x1c\n" + + "\x18PURPOSE_VALUE_ASSIGNMENT\x10\x01\x12\x1c\n" + + "\x18PURPOSE_VALUE_PERMISSION\x10\x02\"\xa8\x02\n" + + "*EntitlementsServiceListEntitlementsRequest\x125\n" + + "\bresource\x18\x01 \x01(\v2\x19.c1.connector.v2.ResourceR\bresource\x12'\n" + + "\tpage_size\x18\x02 \x01(\rB\n" + + "\xfaB\a*\x05\x18\xfa\x01@\x01R\bpageSize\x12-\n" + + "\n" + + "page_token\x18\x03 \x01(\tB\x0e\xfaB\vr\t \x01(\x80\x80@\xd0\x01\x01R\tpageToken\x126\n" + + "\vannotations\x18\x04 \x03(\v2\x14.google.protobuf.AnyR\vannotations\x123\n" + + "\x0eactive_sync_id\x18\x05 \x01(\tB\r\xfaB\n" + + "r\b \x01(\x80\b\xd0\x01\x01R\factiveSyncId\"\xcf\x01\n" + + "+EntitlementsServiceListEntitlementsResponse\x120\n" + + "\x04list\x18\x01 \x03(\v2\x1c.c1.connector.v2.EntitlementR\x04list\x126\n" + + "\x0fnext_page_token\x18\x02 \x01(\tB\x0e\xfaB\vr\t \x01(\x80\x80@\xd0\x01\x01R\rnextPageToken\x126\n" + + "\vannotations\x18\x03 \x03(\v2\x14.google.protobuf.AnyR\vannotations\"\xad\x02\n" + + "0EntitlementsServiceListStaticEntitlementsRequest\x124\n" + + "\x10resource_type_id\x18\x01 \x01(\tB\n" + + "\xfaB\ar\x05 \x01(\x80\bR\x0eresourceTypeId\x12'\n" + + "\tpage_size\x18\x02 \x01(\rB\n" + + "\xfaB\a*\x05\x18\xfa\x01@\x01R\bpageSize\x12-\n" + + "\n" + + "page_token\x18\x03 \x01(\tB\x0e\xfaB\vr\t \x01(\x80\x80@\xd0\x01\x01R\tpageToken\x126\n" + + "\vannotations\x18\x04 \x03(\v2\x14.google.protobuf.AnyR\vannotations\x123\n" + + "\x0eactive_sync_id\x18\x05 \x01(\tB\r\xfaB\n" + + "r\b \x01(\x80\b\xd0\x01\x01R\factiveSyncId\"\xd5\x01\n" + + "1EntitlementsServiceListStaticEntitlementsResponse\x120\n" + + "\x04list\x18\x01 \x03(\v2\x1c.c1.connector.v2.EntitlementR\x04list\x126\n" + + "\x0fnext_page_token\x18\x02 \x01(\tB\x0e\xfaB\vr\t \x01(\x80\x80@\xd0\x01\x01R\rnextPageToken\x126\n" + + "\vannotations\x18\x03 \x03(\v2\x14.google.protobuf.AnyR\vannotations2\xc7\x02\n" + + "\x13EntitlementsService\x12\x8d\x01\n" + + "\x10ListEntitlements\x12;.c1.connector.v2.EntitlementsServiceListEntitlementsRequest\x1a<.c1.connector.v2.EntitlementsServiceListEntitlementsResponse\x12\x9f\x01\n" + + "\x16ListStaticEntitlements\x12A.c1.connector.v2.EntitlementsServiceListStaticEntitlementsRequest\x1aB.c1.connector.v2.EntitlementsServiceListStaticEntitlementsResponseB6Z4github.com/conductorone/baton-sdk/pb/c1/connector/v2b\x06proto3" + var file_c1_connector_v2_entitlement_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_c1_connector_v2_entitlement_proto_msgTypes = make([]protoimpl.MessageInfo, 3) +var file_c1_connector_v2_entitlement_proto_msgTypes = make([]protoimpl.MessageInfo, 5) var file_c1_connector_v2_entitlement_proto_goTypes = []any{ - (Entitlement_PurposeValue)(0), // 0: c1.connector.v2.Entitlement.PurposeValue - (*Entitlement)(nil), // 1: c1.connector.v2.Entitlement - (*EntitlementsServiceListEntitlementsRequest)(nil), // 2: c1.connector.v2.EntitlementsServiceListEntitlementsRequest - (*EntitlementsServiceListEntitlementsResponse)(nil), // 3: c1.connector.v2.EntitlementsServiceListEntitlementsResponse - (*Resource)(nil), // 4: c1.connector.v2.Resource - (*ResourceType)(nil), // 5: c1.connector.v2.ResourceType - (*anypb.Any)(nil), // 6: google.protobuf.Any + (Entitlement_PurposeValue)(0), // 0: c1.connector.v2.Entitlement.PurposeValue + (*Entitlement)(nil), // 1: c1.connector.v2.Entitlement + (*EntitlementsServiceListEntitlementsRequest)(nil), // 2: c1.connector.v2.EntitlementsServiceListEntitlementsRequest + (*EntitlementsServiceListEntitlementsResponse)(nil), // 3: c1.connector.v2.EntitlementsServiceListEntitlementsResponse + (*EntitlementsServiceListStaticEntitlementsRequest)(nil), // 4: c1.connector.v2.EntitlementsServiceListStaticEntitlementsRequest + (*EntitlementsServiceListStaticEntitlementsResponse)(nil), // 5: c1.connector.v2.EntitlementsServiceListStaticEntitlementsResponse + (*Resource)(nil), // 6: c1.connector.v2.Resource + (*ResourceType)(nil), // 7: c1.connector.v2.ResourceType + (*anypb.Any)(nil), // 8: google.protobuf.Any } var file_c1_connector_v2_entitlement_proto_depIdxs = []int32{ - 4, // 0: c1.connector.v2.Entitlement.resource:type_name -> c1.connector.v2.Resource - 5, // 1: c1.connector.v2.Entitlement.grantable_to:type_name -> c1.connector.v2.ResourceType - 6, // 2: c1.connector.v2.Entitlement.annotations:type_name -> google.protobuf.Any - 0, // 3: c1.connector.v2.Entitlement.purpose:type_name -> c1.connector.v2.Entitlement.PurposeValue - 4, // 4: c1.connector.v2.EntitlementsServiceListEntitlementsRequest.resource:type_name -> c1.connector.v2.Resource - 6, // 5: c1.connector.v2.EntitlementsServiceListEntitlementsRequest.annotations:type_name -> google.protobuf.Any - 1, // 6: c1.connector.v2.EntitlementsServiceListEntitlementsResponse.list:type_name -> c1.connector.v2.Entitlement - 6, // 7: c1.connector.v2.EntitlementsServiceListEntitlementsResponse.annotations:type_name -> google.protobuf.Any - 2, // 8: c1.connector.v2.EntitlementsService.ListEntitlements:input_type -> c1.connector.v2.EntitlementsServiceListEntitlementsRequest - 3, // 9: c1.connector.v2.EntitlementsService.ListEntitlements:output_type -> c1.connector.v2.EntitlementsServiceListEntitlementsResponse - 9, // [9:10] is the sub-list for method output_type - 8, // [8:9] is the sub-list for method input_type - 8, // [8:8] is the sub-list for extension type_name - 8, // [8:8] is the sub-list for extension extendee - 0, // [0:8] is the sub-list for field type_name + 6, // 0: c1.connector.v2.Entitlement.resource:type_name -> c1.connector.v2.Resource + 7, // 1: c1.connector.v2.Entitlement.grantable_to:type_name -> c1.connector.v2.ResourceType + 8, // 2: c1.connector.v2.Entitlement.annotations:type_name -> google.protobuf.Any + 0, // 3: c1.connector.v2.Entitlement.purpose:type_name -> c1.connector.v2.Entitlement.PurposeValue + 6, // 4: c1.connector.v2.EntitlementsServiceListEntitlementsRequest.resource:type_name -> c1.connector.v2.Resource + 8, // 5: c1.connector.v2.EntitlementsServiceListEntitlementsRequest.annotations:type_name -> google.protobuf.Any + 1, // 6: c1.connector.v2.EntitlementsServiceListEntitlementsResponse.list:type_name -> c1.connector.v2.Entitlement + 8, // 7: c1.connector.v2.EntitlementsServiceListEntitlementsResponse.annotations:type_name -> google.protobuf.Any + 8, // 8: c1.connector.v2.EntitlementsServiceListStaticEntitlementsRequest.annotations:type_name -> google.protobuf.Any + 1, // 9: c1.connector.v2.EntitlementsServiceListStaticEntitlementsResponse.list:type_name -> c1.connector.v2.Entitlement + 8, // 10: c1.connector.v2.EntitlementsServiceListStaticEntitlementsResponse.annotations:type_name -> google.protobuf.Any + 2, // 11: c1.connector.v2.EntitlementsService.ListEntitlements:input_type -> c1.connector.v2.EntitlementsServiceListEntitlementsRequest + 4, // 12: c1.connector.v2.EntitlementsService.ListStaticEntitlements:input_type -> c1.connector.v2.EntitlementsServiceListStaticEntitlementsRequest + 3, // 13: c1.connector.v2.EntitlementsService.ListEntitlements:output_type -> c1.connector.v2.EntitlementsServiceListEntitlementsResponse + 5, // 14: c1.connector.v2.EntitlementsService.ListStaticEntitlements:output_type -> c1.connector.v2.EntitlementsServiceListStaticEntitlementsResponse + 13, // [13:15] is the sub-list for method output_type + 11, // [11:13] is the sub-list for method input_type + 11, // [11:11] is the sub-list for extension type_name + 11, // [11:11] is the sub-list for extension extendee + 0, // [0:11] is the sub-list for field type_name } func init() { file_c1_connector_v2_entitlement_proto_init() } @@ -443,7 +741,7 @@ func file_c1_connector_v2_entitlement_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: unsafe.Slice(unsafe.StringData(file_c1_connector_v2_entitlement_proto_rawDesc), len(file_c1_connector_v2_entitlement_proto_rawDesc)), NumEnums: 1, - NumMessages: 3, + NumMessages: 5, NumExtensions: 0, NumServices: 1, }, diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/entitlement.pb.validate.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/entitlement.pb.validate.go index 4f979076..cf8fb4e8 100644 --- a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/entitlement.pb.validate.go +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/entitlement.pb.validate.go @@ -413,6 +413,21 @@ func (m *EntitlementsServiceListEntitlementsRequest) validate(all bool) error { } + if m.GetActiveSyncId() != "" { + + if l := len(m.GetActiveSyncId()); l < 1 || l > 1024 { + err := EntitlementsServiceListEntitlementsRequestValidationError{ + field: "ActiveSyncId", + reason: "value length must be between 1 and 1024 bytes, inclusive", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + if len(errors) > 0 { return EntitlementsServiceListEntitlementsRequestMultiError(errors) } @@ -683,3 +698,402 @@ var _ interface { Cause() error ErrorName() string } = EntitlementsServiceListEntitlementsResponseValidationError{} + +// Validate checks the field values on +// EntitlementsServiceListStaticEntitlementsRequest with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *EntitlementsServiceListStaticEntitlementsRequest) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// EntitlementsServiceListStaticEntitlementsRequest with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in +// EntitlementsServiceListStaticEntitlementsRequestMultiError, or nil if none found. +func (m *EntitlementsServiceListStaticEntitlementsRequest) ValidateAll() error { + return m.validate(true) +} + +func (m *EntitlementsServiceListStaticEntitlementsRequest) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if l := len(m.GetResourceTypeId()); l < 1 || l > 1024 { + err := EntitlementsServiceListStaticEntitlementsRequestValidationError{ + field: "ResourceTypeId", + reason: "value length must be between 1 and 1024 bytes, inclusive", + } + if !all { + return err + } + errors = append(errors, err) + } + + if m.GetPageSize() != 0 { + + if m.GetPageSize() > 250 { + err := EntitlementsServiceListStaticEntitlementsRequestValidationError{ + field: "PageSize", + reason: "value must be less than or equal to 250", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if m.GetPageToken() != "" { + + if l := len(m.GetPageToken()); l < 1 || l > 1048576 { + err := EntitlementsServiceListStaticEntitlementsRequestValidationError{ + field: "PageToken", + reason: "value length must be between 1 and 1048576 bytes, inclusive", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + for idx, item := range m.GetAnnotations() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, EntitlementsServiceListStaticEntitlementsRequestValidationError{ + field: fmt.Sprintf("Annotations[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, EntitlementsServiceListStaticEntitlementsRequestValidationError{ + field: fmt.Sprintf("Annotations[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return EntitlementsServiceListStaticEntitlementsRequestValidationError{ + field: fmt.Sprintf("Annotations[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if m.GetActiveSyncId() != "" { + + if l := len(m.GetActiveSyncId()); l < 1 || l > 1024 { + err := EntitlementsServiceListStaticEntitlementsRequestValidationError{ + field: "ActiveSyncId", + reason: "value length must be between 1 and 1024 bytes, inclusive", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if len(errors) > 0 { + return EntitlementsServiceListStaticEntitlementsRequestMultiError(errors) + } + + return nil +} + +// EntitlementsServiceListStaticEntitlementsRequestMultiError is an error +// wrapping multiple validation errors returned by +// EntitlementsServiceListStaticEntitlementsRequest.ValidateAll() if the +// designated constraints aren't met. +type EntitlementsServiceListStaticEntitlementsRequestMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m EntitlementsServiceListStaticEntitlementsRequestMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m EntitlementsServiceListStaticEntitlementsRequestMultiError) AllErrors() []error { return m } + +// EntitlementsServiceListStaticEntitlementsRequestValidationError is the +// validation error returned by +// EntitlementsServiceListStaticEntitlementsRequest.Validate if the designated +// constraints aren't met. +type EntitlementsServiceListStaticEntitlementsRequestValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e EntitlementsServiceListStaticEntitlementsRequestValidationError) Field() string { + return e.field +} + +// Reason function returns reason value. +func (e EntitlementsServiceListStaticEntitlementsRequestValidationError) Reason() string { + return e.reason +} + +// Cause function returns cause value. +func (e EntitlementsServiceListStaticEntitlementsRequestValidationError) Cause() error { + return e.cause +} + +// Key function returns key value. +func (e EntitlementsServiceListStaticEntitlementsRequestValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e EntitlementsServiceListStaticEntitlementsRequestValidationError) ErrorName() string { + return "EntitlementsServiceListStaticEntitlementsRequestValidationError" +} + +// Error satisfies the builtin error interface +func (e EntitlementsServiceListStaticEntitlementsRequestValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sEntitlementsServiceListStaticEntitlementsRequest.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = EntitlementsServiceListStaticEntitlementsRequestValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = EntitlementsServiceListStaticEntitlementsRequestValidationError{} + +// Validate checks the field values on +// EntitlementsServiceListStaticEntitlementsResponse with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *EntitlementsServiceListStaticEntitlementsResponse) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// EntitlementsServiceListStaticEntitlementsResponse with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in +// EntitlementsServiceListStaticEntitlementsResponseMultiError, or nil if none found. +func (m *EntitlementsServiceListStaticEntitlementsResponse) ValidateAll() error { + return m.validate(true) +} + +func (m *EntitlementsServiceListStaticEntitlementsResponse) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetList() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, EntitlementsServiceListStaticEntitlementsResponseValidationError{ + field: fmt.Sprintf("List[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, EntitlementsServiceListStaticEntitlementsResponseValidationError{ + field: fmt.Sprintf("List[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return EntitlementsServiceListStaticEntitlementsResponseValidationError{ + field: fmt.Sprintf("List[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if m.GetNextPageToken() != "" { + + if l := len(m.GetNextPageToken()); l < 1 || l > 1048576 { + err := EntitlementsServiceListStaticEntitlementsResponseValidationError{ + field: "NextPageToken", + reason: "value length must be between 1 and 1048576 bytes, inclusive", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + for idx, item := range m.GetAnnotations() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, EntitlementsServiceListStaticEntitlementsResponseValidationError{ + field: fmt.Sprintf("Annotations[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, EntitlementsServiceListStaticEntitlementsResponseValidationError{ + field: fmt.Sprintf("Annotations[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return EntitlementsServiceListStaticEntitlementsResponseValidationError{ + field: fmt.Sprintf("Annotations[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return EntitlementsServiceListStaticEntitlementsResponseMultiError(errors) + } + + return nil +} + +// EntitlementsServiceListStaticEntitlementsResponseMultiError is an error +// wrapping multiple validation errors returned by +// EntitlementsServiceListStaticEntitlementsResponse.ValidateAll() if the +// designated constraints aren't met. +type EntitlementsServiceListStaticEntitlementsResponseMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m EntitlementsServiceListStaticEntitlementsResponseMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m EntitlementsServiceListStaticEntitlementsResponseMultiError) AllErrors() []error { return m } + +// EntitlementsServiceListStaticEntitlementsResponseValidationError is the +// validation error returned by +// EntitlementsServiceListStaticEntitlementsResponse.Validate if the +// designated constraints aren't met. +type EntitlementsServiceListStaticEntitlementsResponseValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e EntitlementsServiceListStaticEntitlementsResponseValidationError) Field() string { + return e.field +} + +// Reason function returns reason value. +func (e EntitlementsServiceListStaticEntitlementsResponseValidationError) Reason() string { + return e.reason +} + +// Cause function returns cause value. +func (e EntitlementsServiceListStaticEntitlementsResponseValidationError) Cause() error { + return e.cause +} + +// Key function returns key value. +func (e EntitlementsServiceListStaticEntitlementsResponseValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e EntitlementsServiceListStaticEntitlementsResponseValidationError) ErrorName() string { + return "EntitlementsServiceListStaticEntitlementsResponseValidationError" +} + +// Error satisfies the builtin error interface +func (e EntitlementsServiceListStaticEntitlementsResponseValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sEntitlementsServiceListStaticEntitlementsResponse.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = EntitlementsServiceListStaticEntitlementsResponseValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = EntitlementsServiceListStaticEntitlementsResponseValidationError{} diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/entitlement_grpc.pb.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/entitlement_grpc.pb.go index 821161d3..6286b134 100644 --- a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/entitlement_grpc.pb.go +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/entitlement_grpc.pb.go @@ -19,7 +19,8 @@ import ( const _ = grpc.SupportPackageIsVersion9 const ( - EntitlementsService_ListEntitlements_FullMethodName = "/c1.connector.v2.EntitlementsService/ListEntitlements" + EntitlementsService_ListEntitlements_FullMethodName = "/c1.connector.v2.EntitlementsService/ListEntitlements" + EntitlementsService_ListStaticEntitlements_FullMethodName = "/c1.connector.v2.EntitlementsService/ListStaticEntitlements" ) // EntitlementsServiceClient is the client API for EntitlementsService service. @@ -27,6 +28,7 @@ const ( // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. type EntitlementsServiceClient interface { ListEntitlements(ctx context.Context, in *EntitlementsServiceListEntitlementsRequest, opts ...grpc.CallOption) (*EntitlementsServiceListEntitlementsResponse, error) + ListStaticEntitlements(ctx context.Context, in *EntitlementsServiceListStaticEntitlementsRequest, opts ...grpc.CallOption) (*EntitlementsServiceListStaticEntitlementsResponse, error) } type entitlementsServiceClient struct { @@ -47,11 +49,22 @@ func (c *entitlementsServiceClient) ListEntitlements(ctx context.Context, in *En return out, nil } +func (c *entitlementsServiceClient) ListStaticEntitlements(ctx context.Context, in *EntitlementsServiceListStaticEntitlementsRequest, opts ...grpc.CallOption) (*EntitlementsServiceListStaticEntitlementsResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(EntitlementsServiceListStaticEntitlementsResponse) + err := c.cc.Invoke(ctx, EntitlementsService_ListStaticEntitlements_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + // EntitlementsServiceServer is the server API for EntitlementsService service. // All implementations should embed UnimplementedEntitlementsServiceServer // for forward compatibility. type EntitlementsServiceServer interface { ListEntitlements(context.Context, *EntitlementsServiceListEntitlementsRequest) (*EntitlementsServiceListEntitlementsResponse, error) + ListStaticEntitlements(context.Context, *EntitlementsServiceListStaticEntitlementsRequest) (*EntitlementsServiceListStaticEntitlementsResponse, error) } // UnimplementedEntitlementsServiceServer should be embedded to have @@ -64,6 +77,9 @@ type UnimplementedEntitlementsServiceServer struct{} func (UnimplementedEntitlementsServiceServer) ListEntitlements(context.Context, *EntitlementsServiceListEntitlementsRequest) (*EntitlementsServiceListEntitlementsResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method ListEntitlements not implemented") } +func (UnimplementedEntitlementsServiceServer) ListStaticEntitlements(context.Context, *EntitlementsServiceListStaticEntitlementsRequest) (*EntitlementsServiceListStaticEntitlementsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListStaticEntitlements not implemented") +} func (UnimplementedEntitlementsServiceServer) testEmbeddedByValue() {} // UnsafeEntitlementsServiceServer may be embedded to opt out of forward compatibility for this service. @@ -102,6 +118,24 @@ func _EntitlementsService_ListEntitlements_Handler(srv interface{}, ctx context. return interceptor(ctx, in, info, handler) } +func _EntitlementsService_ListStaticEntitlements_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(EntitlementsServiceListStaticEntitlementsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(EntitlementsServiceServer).ListStaticEntitlements(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: EntitlementsService_ListStaticEntitlements_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(EntitlementsServiceServer).ListStaticEntitlements(ctx, req.(*EntitlementsServiceListStaticEntitlementsRequest)) + } + return interceptor(ctx, in, info, handler) +} + // EntitlementsService_ServiceDesc is the grpc.ServiceDesc for EntitlementsService service. // It's only intended for direct use with grpc.RegisterService, // and not to be introspected or modified (even as a copy) @@ -113,6 +147,10 @@ var EntitlementsService_ServiceDesc = grpc.ServiceDesc{ MethodName: "ListEntitlements", Handler: _EntitlementsService_ListEntitlements_Handler, }, + { + MethodName: "ListStaticEntitlements", + Handler: _EntitlementsService_ListStaticEntitlements_Handler, + }, }, Streams: []grpc.StreamDesc{}, Metadata: "c1/connector/v2/entitlement.proto", diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/entitlement_protoopaque.pb.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/entitlement_protoopaque.pb.go new file mode 100644 index 00000000..88a1142b --- /dev/null +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/entitlement_protoopaque.pb.go @@ -0,0 +1,772 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.10 +// protoc (unknown) +// source: c1/connector/v2/entitlement.proto + +//go:build protoopaque + +package v2 + +import ( + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + reflect "reflect" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type Entitlement_PurposeValue int32 + +const ( + Entitlement_PURPOSE_VALUE_UNSPECIFIED Entitlement_PurposeValue = 0 + Entitlement_PURPOSE_VALUE_ASSIGNMENT Entitlement_PurposeValue = 1 + Entitlement_PURPOSE_VALUE_PERMISSION Entitlement_PurposeValue = 2 +) + +// Enum value maps for Entitlement_PurposeValue. +var ( + Entitlement_PurposeValue_name = map[int32]string{ + 0: "PURPOSE_VALUE_UNSPECIFIED", + 1: "PURPOSE_VALUE_ASSIGNMENT", + 2: "PURPOSE_VALUE_PERMISSION", + } + Entitlement_PurposeValue_value = map[string]int32{ + "PURPOSE_VALUE_UNSPECIFIED": 0, + "PURPOSE_VALUE_ASSIGNMENT": 1, + "PURPOSE_VALUE_PERMISSION": 2, + } +) + +func (x Entitlement_PurposeValue) Enum() *Entitlement_PurposeValue { + p := new(Entitlement_PurposeValue) + *p = x + return p +} + +func (x Entitlement_PurposeValue) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Entitlement_PurposeValue) Descriptor() protoreflect.EnumDescriptor { + return file_c1_connector_v2_entitlement_proto_enumTypes[0].Descriptor() +} + +func (Entitlement_PurposeValue) Type() protoreflect.EnumType { + return &file_c1_connector_v2_entitlement_proto_enumTypes[0] +} + +func (x Entitlement_PurposeValue) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +type Entitlement struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Resource *Resource `protobuf:"bytes,1,opt,name=resource,proto3"` + xxx_hidden_Id string `protobuf:"bytes,2,opt,name=id,proto3"` + xxx_hidden_DisplayName string `protobuf:"bytes,3,opt,name=display_name,json=displayName,proto3"` + xxx_hidden_Description string `protobuf:"bytes,4,opt,name=description,proto3"` + xxx_hidden_GrantableTo *[]*ResourceType `protobuf:"bytes,5,rep,name=grantable_to,json=grantableTo,proto3"` + xxx_hidden_Annotations *[]*anypb.Any `protobuf:"bytes,6,rep,name=annotations,proto3"` + xxx_hidden_Purpose Entitlement_PurposeValue `protobuf:"varint,7,opt,name=purpose,proto3,enum=c1.connector.v2.Entitlement_PurposeValue"` + xxx_hidden_Slug string `protobuf:"bytes,8,opt,name=slug,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Entitlement) Reset() { + *x = Entitlement{} + mi := &file_c1_connector_v2_entitlement_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Entitlement) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Entitlement) ProtoMessage() {} + +func (x *Entitlement) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_entitlement_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *Entitlement) GetResource() *Resource { + if x != nil { + return x.xxx_hidden_Resource + } + return nil +} + +func (x *Entitlement) GetId() string { + if x != nil { + return x.xxx_hidden_Id + } + return "" +} + +func (x *Entitlement) GetDisplayName() string { + if x != nil { + return x.xxx_hidden_DisplayName + } + return "" +} + +func (x *Entitlement) GetDescription() string { + if x != nil { + return x.xxx_hidden_Description + } + return "" +} + +func (x *Entitlement) GetGrantableTo() []*ResourceType { + if x != nil { + if x.xxx_hidden_GrantableTo != nil { + return *x.xxx_hidden_GrantableTo + } + } + return nil +} + +func (x *Entitlement) GetAnnotations() []*anypb.Any { + if x != nil { + if x.xxx_hidden_Annotations != nil { + return *x.xxx_hidden_Annotations + } + } + return nil +} + +func (x *Entitlement) GetPurpose() Entitlement_PurposeValue { + if x != nil { + return x.xxx_hidden_Purpose + } + return Entitlement_PURPOSE_VALUE_UNSPECIFIED +} + +func (x *Entitlement) GetSlug() string { + if x != nil { + return x.xxx_hidden_Slug + } + return "" +} + +func (x *Entitlement) SetResource(v *Resource) { + x.xxx_hidden_Resource = v +} + +func (x *Entitlement) SetId(v string) { + x.xxx_hidden_Id = v +} + +func (x *Entitlement) SetDisplayName(v string) { + x.xxx_hidden_DisplayName = v +} + +func (x *Entitlement) SetDescription(v string) { + x.xxx_hidden_Description = v +} + +func (x *Entitlement) SetGrantableTo(v []*ResourceType) { + x.xxx_hidden_GrantableTo = &v +} + +func (x *Entitlement) SetAnnotations(v []*anypb.Any) { + x.xxx_hidden_Annotations = &v +} + +func (x *Entitlement) SetPurpose(v Entitlement_PurposeValue) { + x.xxx_hidden_Purpose = v +} + +func (x *Entitlement) SetSlug(v string) { + x.xxx_hidden_Slug = v +} + +func (x *Entitlement) HasResource() bool { + if x == nil { + return false + } + return x.xxx_hidden_Resource != nil +} + +func (x *Entitlement) ClearResource() { + x.xxx_hidden_Resource = nil +} + +type Entitlement_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Resource *Resource + Id string + DisplayName string + Description string + GrantableTo []*ResourceType + Annotations []*anypb.Any + Purpose Entitlement_PurposeValue + Slug string +} + +func (b0 Entitlement_builder) Build() *Entitlement { + m0 := &Entitlement{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Resource = b.Resource + x.xxx_hidden_Id = b.Id + x.xxx_hidden_DisplayName = b.DisplayName + x.xxx_hidden_Description = b.Description + x.xxx_hidden_GrantableTo = &b.GrantableTo + x.xxx_hidden_Annotations = &b.Annotations + x.xxx_hidden_Purpose = b.Purpose + x.xxx_hidden_Slug = b.Slug + return m0 +} + +type EntitlementsServiceListEntitlementsRequest struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Resource *Resource `protobuf:"bytes,1,opt,name=resource,proto3"` + xxx_hidden_PageSize uint32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3"` + xxx_hidden_PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3"` + xxx_hidden_Annotations *[]*anypb.Any `protobuf:"bytes,4,rep,name=annotations,proto3"` + xxx_hidden_ActiveSyncId string `protobuf:"bytes,5,opt,name=active_sync_id,json=activeSyncId,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *EntitlementsServiceListEntitlementsRequest) Reset() { + *x = EntitlementsServiceListEntitlementsRequest{} + mi := &file_c1_connector_v2_entitlement_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *EntitlementsServiceListEntitlementsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EntitlementsServiceListEntitlementsRequest) ProtoMessage() {} + +func (x *EntitlementsServiceListEntitlementsRequest) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_entitlement_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *EntitlementsServiceListEntitlementsRequest) GetResource() *Resource { + if x != nil { + return x.xxx_hidden_Resource + } + return nil +} + +func (x *EntitlementsServiceListEntitlementsRequest) GetPageSize() uint32 { + if x != nil { + return x.xxx_hidden_PageSize + } + return 0 +} + +func (x *EntitlementsServiceListEntitlementsRequest) GetPageToken() string { + if x != nil { + return x.xxx_hidden_PageToken + } + return "" +} + +func (x *EntitlementsServiceListEntitlementsRequest) GetAnnotations() []*anypb.Any { + if x != nil { + if x.xxx_hidden_Annotations != nil { + return *x.xxx_hidden_Annotations + } + } + return nil +} + +func (x *EntitlementsServiceListEntitlementsRequest) GetActiveSyncId() string { + if x != nil { + return x.xxx_hidden_ActiveSyncId + } + return "" +} + +func (x *EntitlementsServiceListEntitlementsRequest) SetResource(v *Resource) { + x.xxx_hidden_Resource = v +} + +func (x *EntitlementsServiceListEntitlementsRequest) SetPageSize(v uint32) { + x.xxx_hidden_PageSize = v +} + +func (x *EntitlementsServiceListEntitlementsRequest) SetPageToken(v string) { + x.xxx_hidden_PageToken = v +} + +func (x *EntitlementsServiceListEntitlementsRequest) SetAnnotations(v []*anypb.Any) { + x.xxx_hidden_Annotations = &v +} + +func (x *EntitlementsServiceListEntitlementsRequest) SetActiveSyncId(v string) { + x.xxx_hidden_ActiveSyncId = v +} + +func (x *EntitlementsServiceListEntitlementsRequest) HasResource() bool { + if x == nil { + return false + } + return x.xxx_hidden_Resource != nil +} + +func (x *EntitlementsServiceListEntitlementsRequest) ClearResource() { + x.xxx_hidden_Resource = nil +} + +type EntitlementsServiceListEntitlementsRequest_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Resource *Resource + PageSize uint32 + PageToken string + Annotations []*anypb.Any + ActiveSyncId string +} + +func (b0 EntitlementsServiceListEntitlementsRequest_builder) Build() *EntitlementsServiceListEntitlementsRequest { + m0 := &EntitlementsServiceListEntitlementsRequest{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Resource = b.Resource + x.xxx_hidden_PageSize = b.PageSize + x.xxx_hidden_PageToken = b.PageToken + x.xxx_hidden_Annotations = &b.Annotations + x.xxx_hidden_ActiveSyncId = b.ActiveSyncId + return m0 +} + +type EntitlementsServiceListEntitlementsResponse struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_List *[]*Entitlement `protobuf:"bytes,1,rep,name=list,proto3"` + xxx_hidden_NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3"` + xxx_hidden_Annotations *[]*anypb.Any `protobuf:"bytes,3,rep,name=annotations,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *EntitlementsServiceListEntitlementsResponse) Reset() { + *x = EntitlementsServiceListEntitlementsResponse{} + mi := &file_c1_connector_v2_entitlement_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *EntitlementsServiceListEntitlementsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EntitlementsServiceListEntitlementsResponse) ProtoMessage() {} + +func (x *EntitlementsServiceListEntitlementsResponse) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_entitlement_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *EntitlementsServiceListEntitlementsResponse) GetList() []*Entitlement { + if x != nil { + if x.xxx_hidden_List != nil { + return *x.xxx_hidden_List + } + } + return nil +} + +func (x *EntitlementsServiceListEntitlementsResponse) GetNextPageToken() string { + if x != nil { + return x.xxx_hidden_NextPageToken + } + return "" +} + +func (x *EntitlementsServiceListEntitlementsResponse) GetAnnotations() []*anypb.Any { + if x != nil { + if x.xxx_hidden_Annotations != nil { + return *x.xxx_hidden_Annotations + } + } + return nil +} + +func (x *EntitlementsServiceListEntitlementsResponse) SetList(v []*Entitlement) { + x.xxx_hidden_List = &v +} + +func (x *EntitlementsServiceListEntitlementsResponse) SetNextPageToken(v string) { + x.xxx_hidden_NextPageToken = v +} + +func (x *EntitlementsServiceListEntitlementsResponse) SetAnnotations(v []*anypb.Any) { + x.xxx_hidden_Annotations = &v +} + +type EntitlementsServiceListEntitlementsResponse_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + List []*Entitlement + NextPageToken string + Annotations []*anypb.Any +} + +func (b0 EntitlementsServiceListEntitlementsResponse_builder) Build() *EntitlementsServiceListEntitlementsResponse { + m0 := &EntitlementsServiceListEntitlementsResponse{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_List = &b.List + x.xxx_hidden_NextPageToken = b.NextPageToken + x.xxx_hidden_Annotations = &b.Annotations + return m0 +} + +type EntitlementsServiceListStaticEntitlementsRequest struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_ResourceTypeId string `protobuf:"bytes,1,opt,name=resource_type_id,json=resourceTypeId,proto3"` + xxx_hidden_PageSize uint32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3"` + xxx_hidden_PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3"` + xxx_hidden_Annotations *[]*anypb.Any `protobuf:"bytes,4,rep,name=annotations,proto3"` + xxx_hidden_ActiveSyncId string `protobuf:"bytes,5,opt,name=active_sync_id,json=activeSyncId,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *EntitlementsServiceListStaticEntitlementsRequest) Reset() { + *x = EntitlementsServiceListStaticEntitlementsRequest{} + mi := &file_c1_connector_v2_entitlement_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *EntitlementsServiceListStaticEntitlementsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EntitlementsServiceListStaticEntitlementsRequest) ProtoMessage() {} + +func (x *EntitlementsServiceListStaticEntitlementsRequest) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_entitlement_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *EntitlementsServiceListStaticEntitlementsRequest) GetResourceTypeId() string { + if x != nil { + return x.xxx_hidden_ResourceTypeId + } + return "" +} + +func (x *EntitlementsServiceListStaticEntitlementsRequest) GetPageSize() uint32 { + if x != nil { + return x.xxx_hidden_PageSize + } + return 0 +} + +func (x *EntitlementsServiceListStaticEntitlementsRequest) GetPageToken() string { + if x != nil { + return x.xxx_hidden_PageToken + } + return "" +} + +func (x *EntitlementsServiceListStaticEntitlementsRequest) GetAnnotations() []*anypb.Any { + if x != nil { + if x.xxx_hidden_Annotations != nil { + return *x.xxx_hidden_Annotations + } + } + return nil +} + +func (x *EntitlementsServiceListStaticEntitlementsRequest) GetActiveSyncId() string { + if x != nil { + return x.xxx_hidden_ActiveSyncId + } + return "" +} + +func (x *EntitlementsServiceListStaticEntitlementsRequest) SetResourceTypeId(v string) { + x.xxx_hidden_ResourceTypeId = v +} + +func (x *EntitlementsServiceListStaticEntitlementsRequest) SetPageSize(v uint32) { + x.xxx_hidden_PageSize = v +} + +func (x *EntitlementsServiceListStaticEntitlementsRequest) SetPageToken(v string) { + x.xxx_hidden_PageToken = v +} + +func (x *EntitlementsServiceListStaticEntitlementsRequest) SetAnnotations(v []*anypb.Any) { + x.xxx_hidden_Annotations = &v +} + +func (x *EntitlementsServiceListStaticEntitlementsRequest) SetActiveSyncId(v string) { + x.xxx_hidden_ActiveSyncId = v +} + +type EntitlementsServiceListStaticEntitlementsRequest_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + ResourceTypeId string + PageSize uint32 + PageToken string + Annotations []*anypb.Any + ActiveSyncId string +} + +func (b0 EntitlementsServiceListStaticEntitlementsRequest_builder) Build() *EntitlementsServiceListStaticEntitlementsRequest { + m0 := &EntitlementsServiceListStaticEntitlementsRequest{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_ResourceTypeId = b.ResourceTypeId + x.xxx_hidden_PageSize = b.PageSize + x.xxx_hidden_PageToken = b.PageToken + x.xxx_hidden_Annotations = &b.Annotations + x.xxx_hidden_ActiveSyncId = b.ActiveSyncId + return m0 +} + +type EntitlementsServiceListStaticEntitlementsResponse struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_List *[]*Entitlement `protobuf:"bytes,1,rep,name=list,proto3"` + xxx_hidden_NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3"` + xxx_hidden_Annotations *[]*anypb.Any `protobuf:"bytes,3,rep,name=annotations,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *EntitlementsServiceListStaticEntitlementsResponse) Reset() { + *x = EntitlementsServiceListStaticEntitlementsResponse{} + mi := &file_c1_connector_v2_entitlement_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *EntitlementsServiceListStaticEntitlementsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EntitlementsServiceListStaticEntitlementsResponse) ProtoMessage() {} + +func (x *EntitlementsServiceListStaticEntitlementsResponse) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_entitlement_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *EntitlementsServiceListStaticEntitlementsResponse) GetList() []*Entitlement { + if x != nil { + if x.xxx_hidden_List != nil { + return *x.xxx_hidden_List + } + } + return nil +} + +func (x *EntitlementsServiceListStaticEntitlementsResponse) GetNextPageToken() string { + if x != nil { + return x.xxx_hidden_NextPageToken + } + return "" +} + +func (x *EntitlementsServiceListStaticEntitlementsResponse) GetAnnotations() []*anypb.Any { + if x != nil { + if x.xxx_hidden_Annotations != nil { + return *x.xxx_hidden_Annotations + } + } + return nil +} + +func (x *EntitlementsServiceListStaticEntitlementsResponse) SetList(v []*Entitlement) { + x.xxx_hidden_List = &v +} + +func (x *EntitlementsServiceListStaticEntitlementsResponse) SetNextPageToken(v string) { + x.xxx_hidden_NextPageToken = v +} + +func (x *EntitlementsServiceListStaticEntitlementsResponse) SetAnnotations(v []*anypb.Any) { + x.xxx_hidden_Annotations = &v +} + +type EntitlementsServiceListStaticEntitlementsResponse_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + List []*Entitlement + NextPageToken string + Annotations []*anypb.Any +} + +func (b0 EntitlementsServiceListStaticEntitlementsResponse_builder) Build() *EntitlementsServiceListStaticEntitlementsResponse { + m0 := &EntitlementsServiceListStaticEntitlementsResponse{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_List = &b.List + x.xxx_hidden_NextPageToken = b.NextPageToken + x.xxx_hidden_Annotations = &b.Annotations + return m0 +} + +var File_c1_connector_v2_entitlement_proto protoreflect.FileDescriptor + +const file_c1_connector_v2_entitlement_proto_rawDesc = "" + + "\n" + + "!c1/connector/v2/entitlement.proto\x12\x0fc1.connector.v2\x1a\x1ec1/connector/v2/resource.proto\x1a\x19google/protobuf/any.proto\x1a\x17validate/validate.proto\"\x95\x04\n" + + "\vEntitlement\x12?\n" + + "\bresource\x18\x01 \x01(\v2\x19.c1.connector.v2.ResourceB\b\xfaB\x05\x8a\x01\x02\x10\x01R\bresource\x12\x1a\n" + + "\x02id\x18\x02 \x01(\tB\n" + + "\xfaB\ar\x05 \x01(\x80\bR\x02id\x120\n" + + "\fdisplay_name\x18\x03 \x01(\tB\r\xfaB\n" + + "r\b \x01(\x80\b\xd0\x01\x01R\vdisplayName\x12/\n" + + "\vdescription\x18\x04 \x01(\tB\r\xfaB\n" + + "r\b \x01(\x80\x10\xd0\x01\x01R\vdescription\x12@\n" + + "\fgrantable_to\x18\x05 \x03(\v2\x1d.c1.connector.v2.ResourceTypeR\vgrantableTo\x126\n" + + "\vannotations\x18\x06 \x03(\v2\x14.google.protobuf.AnyR\vannotations\x12M\n" + + "\apurpose\x18\a \x01(\x0e2).c1.connector.v2.Entitlement.PurposeValueB\b\xfaB\x05\x82\x01\x02\x10\x01R\apurpose\x12\x12\n" + + "\x04slug\x18\b \x01(\tR\x04slug\"i\n" + + "\fPurposeValue\x12\x1d\n" + + "\x19PURPOSE_VALUE_UNSPECIFIED\x10\x00\x12\x1c\n" + + "\x18PURPOSE_VALUE_ASSIGNMENT\x10\x01\x12\x1c\n" + + "\x18PURPOSE_VALUE_PERMISSION\x10\x02\"\xa8\x02\n" + + "*EntitlementsServiceListEntitlementsRequest\x125\n" + + "\bresource\x18\x01 \x01(\v2\x19.c1.connector.v2.ResourceR\bresource\x12'\n" + + "\tpage_size\x18\x02 \x01(\rB\n" + + "\xfaB\a*\x05\x18\xfa\x01@\x01R\bpageSize\x12-\n" + + "\n" + + "page_token\x18\x03 \x01(\tB\x0e\xfaB\vr\t \x01(\x80\x80@\xd0\x01\x01R\tpageToken\x126\n" + + "\vannotations\x18\x04 \x03(\v2\x14.google.protobuf.AnyR\vannotations\x123\n" + + "\x0eactive_sync_id\x18\x05 \x01(\tB\r\xfaB\n" + + "r\b \x01(\x80\b\xd0\x01\x01R\factiveSyncId\"\xcf\x01\n" + + "+EntitlementsServiceListEntitlementsResponse\x120\n" + + "\x04list\x18\x01 \x03(\v2\x1c.c1.connector.v2.EntitlementR\x04list\x126\n" + + "\x0fnext_page_token\x18\x02 \x01(\tB\x0e\xfaB\vr\t \x01(\x80\x80@\xd0\x01\x01R\rnextPageToken\x126\n" + + "\vannotations\x18\x03 \x03(\v2\x14.google.protobuf.AnyR\vannotations\"\xad\x02\n" + + "0EntitlementsServiceListStaticEntitlementsRequest\x124\n" + + "\x10resource_type_id\x18\x01 \x01(\tB\n" + + "\xfaB\ar\x05 \x01(\x80\bR\x0eresourceTypeId\x12'\n" + + "\tpage_size\x18\x02 \x01(\rB\n" + + "\xfaB\a*\x05\x18\xfa\x01@\x01R\bpageSize\x12-\n" + + "\n" + + "page_token\x18\x03 \x01(\tB\x0e\xfaB\vr\t \x01(\x80\x80@\xd0\x01\x01R\tpageToken\x126\n" + + "\vannotations\x18\x04 \x03(\v2\x14.google.protobuf.AnyR\vannotations\x123\n" + + "\x0eactive_sync_id\x18\x05 \x01(\tB\r\xfaB\n" + + "r\b \x01(\x80\b\xd0\x01\x01R\factiveSyncId\"\xd5\x01\n" + + "1EntitlementsServiceListStaticEntitlementsResponse\x120\n" + + "\x04list\x18\x01 \x03(\v2\x1c.c1.connector.v2.EntitlementR\x04list\x126\n" + + "\x0fnext_page_token\x18\x02 \x01(\tB\x0e\xfaB\vr\t \x01(\x80\x80@\xd0\x01\x01R\rnextPageToken\x126\n" + + "\vannotations\x18\x03 \x03(\v2\x14.google.protobuf.AnyR\vannotations2\xc7\x02\n" + + "\x13EntitlementsService\x12\x8d\x01\n" + + "\x10ListEntitlements\x12;.c1.connector.v2.EntitlementsServiceListEntitlementsRequest\x1a<.c1.connector.v2.EntitlementsServiceListEntitlementsResponse\x12\x9f\x01\n" + + "\x16ListStaticEntitlements\x12A.c1.connector.v2.EntitlementsServiceListStaticEntitlementsRequest\x1aB.c1.connector.v2.EntitlementsServiceListStaticEntitlementsResponseB6Z4github.com/conductorone/baton-sdk/pb/c1/connector/v2b\x06proto3" + +var file_c1_connector_v2_entitlement_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_c1_connector_v2_entitlement_proto_msgTypes = make([]protoimpl.MessageInfo, 5) +var file_c1_connector_v2_entitlement_proto_goTypes = []any{ + (Entitlement_PurposeValue)(0), // 0: c1.connector.v2.Entitlement.PurposeValue + (*Entitlement)(nil), // 1: c1.connector.v2.Entitlement + (*EntitlementsServiceListEntitlementsRequest)(nil), // 2: c1.connector.v2.EntitlementsServiceListEntitlementsRequest + (*EntitlementsServiceListEntitlementsResponse)(nil), // 3: c1.connector.v2.EntitlementsServiceListEntitlementsResponse + (*EntitlementsServiceListStaticEntitlementsRequest)(nil), // 4: c1.connector.v2.EntitlementsServiceListStaticEntitlementsRequest + (*EntitlementsServiceListStaticEntitlementsResponse)(nil), // 5: c1.connector.v2.EntitlementsServiceListStaticEntitlementsResponse + (*Resource)(nil), // 6: c1.connector.v2.Resource + (*ResourceType)(nil), // 7: c1.connector.v2.ResourceType + (*anypb.Any)(nil), // 8: google.protobuf.Any +} +var file_c1_connector_v2_entitlement_proto_depIdxs = []int32{ + 6, // 0: c1.connector.v2.Entitlement.resource:type_name -> c1.connector.v2.Resource + 7, // 1: c1.connector.v2.Entitlement.grantable_to:type_name -> c1.connector.v2.ResourceType + 8, // 2: c1.connector.v2.Entitlement.annotations:type_name -> google.protobuf.Any + 0, // 3: c1.connector.v2.Entitlement.purpose:type_name -> c1.connector.v2.Entitlement.PurposeValue + 6, // 4: c1.connector.v2.EntitlementsServiceListEntitlementsRequest.resource:type_name -> c1.connector.v2.Resource + 8, // 5: c1.connector.v2.EntitlementsServiceListEntitlementsRequest.annotations:type_name -> google.protobuf.Any + 1, // 6: c1.connector.v2.EntitlementsServiceListEntitlementsResponse.list:type_name -> c1.connector.v2.Entitlement + 8, // 7: c1.connector.v2.EntitlementsServiceListEntitlementsResponse.annotations:type_name -> google.protobuf.Any + 8, // 8: c1.connector.v2.EntitlementsServiceListStaticEntitlementsRequest.annotations:type_name -> google.protobuf.Any + 1, // 9: c1.connector.v2.EntitlementsServiceListStaticEntitlementsResponse.list:type_name -> c1.connector.v2.Entitlement + 8, // 10: c1.connector.v2.EntitlementsServiceListStaticEntitlementsResponse.annotations:type_name -> google.protobuf.Any + 2, // 11: c1.connector.v2.EntitlementsService.ListEntitlements:input_type -> c1.connector.v2.EntitlementsServiceListEntitlementsRequest + 4, // 12: c1.connector.v2.EntitlementsService.ListStaticEntitlements:input_type -> c1.connector.v2.EntitlementsServiceListStaticEntitlementsRequest + 3, // 13: c1.connector.v2.EntitlementsService.ListEntitlements:output_type -> c1.connector.v2.EntitlementsServiceListEntitlementsResponse + 5, // 14: c1.connector.v2.EntitlementsService.ListStaticEntitlements:output_type -> c1.connector.v2.EntitlementsServiceListStaticEntitlementsResponse + 13, // [13:15] is the sub-list for method output_type + 11, // [11:13] is the sub-list for method input_type + 11, // [11:11] is the sub-list for extension type_name + 11, // [11:11] is the sub-list for extension extendee + 0, // [0:11] is the sub-list for field type_name +} + +func init() { file_c1_connector_v2_entitlement_proto_init() } +func file_c1_connector_v2_entitlement_proto_init() { + if File_c1_connector_v2_entitlement_proto != nil { + return + } + file_c1_connector_v2_resource_proto_init() + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_c1_connector_v2_entitlement_proto_rawDesc), len(file_c1_connector_v2_entitlement_proto_rawDesc)), + NumEnums: 1, + NumMessages: 5, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_c1_connector_v2_entitlement_proto_goTypes, + DependencyIndexes: file_c1_connector_v2_entitlement_proto_depIdxs, + EnumInfos: file_c1_connector_v2_entitlement_proto_enumTypes, + MessageInfos: file_c1_connector_v2_entitlement_proto_msgTypes, + }.Build() + File_c1_connector_v2_entitlement_proto = out.File + file_c1_connector_v2_entitlement_proto_goTypes = nil + file_c1_connector_v2_entitlement_proto_depIdxs = nil +} diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/event_feed.pb.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/event_feed.pb.go index 133fbcba..0b4f4556 100644 --- a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/event_feed.pb.go +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/event_feed.pb.go @@ -1,9 +1,11 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.4 +// protoc-gen-go v1.36.10 // protoc (unknown) // source: c1/connector/v2/event_feed.proto +//go:build !protoopaque + package v2 import ( @@ -13,7 +15,6 @@ import ( anypb "google.golang.org/protobuf/types/known/anypb" timestamppb "google.golang.org/protobuf/types/known/timestamppb" reflect "reflect" - sync "sync" unsafe "unsafe" ) @@ -74,13 +75,8 @@ func (x EventType) Number() protoreflect.EnumNumber { return protoreflect.EnumNumber(x) } -// Deprecated: Use EventType.Descriptor instead. -func (EventType) EnumDescriptor() ([]byte, []int) { - return file_c1_connector_v2_event_feed_proto_rawDescGZIP(), []int{0} -} - type ListEventsRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` // Can function like a page token but also can be arbitrary to resume a feed at any point Cursor string `protobuf:"bytes,1,opt,name=cursor,proto3" json:"cursor,omitempty"` StartAt *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=start_at,json=startAt,proto3" json:"start_at,omitempty"` @@ -118,11 +114,6 @@ func (x *ListEventsRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ListEventsRequest.ProtoReflect.Descriptor instead. -func (*ListEventsRequest) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_event_feed_proto_rawDescGZIP(), []int{0} -} - func (x *ListEventsRequest) GetCursor() string { if x != nil { return x.Cursor @@ -158,8 +149,64 @@ func (x *ListEventsRequest) GetEventFeedId() string { return "" } +func (x *ListEventsRequest) SetCursor(v string) { + x.Cursor = v +} + +func (x *ListEventsRequest) SetStartAt(v *timestamppb.Timestamp) { + x.StartAt = v +} + +func (x *ListEventsRequest) SetPageSize(v uint32) { + x.PageSize = v +} + +func (x *ListEventsRequest) SetAnnotations(v []*anypb.Any) { + x.Annotations = v +} + +func (x *ListEventsRequest) SetEventFeedId(v string) { + x.EventFeedId = v +} + +func (x *ListEventsRequest) HasStartAt() bool { + if x == nil { + return false + } + return x.StartAt != nil +} + +func (x *ListEventsRequest) ClearStartAt() { + x.StartAt = nil +} + +type ListEventsRequest_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // Can function like a page token but also can be arbitrary to resume a feed at any point + Cursor string + StartAt *timestamppb.Timestamp + PageSize uint32 + Annotations []*anypb.Any + // Used to specify a specific event feed to list events from. + // If not provided, the connector will use the old event feed. + EventFeedId string +} + +func (b0 ListEventsRequest_builder) Build() *ListEventsRequest { + m0 := &ListEventsRequest{} + b, x := &b0, m0 + _, _ = b, x + x.Cursor = b.Cursor + x.StartAt = b.StartAt + x.PageSize = b.PageSize + x.Annotations = b.Annotations + x.EventFeedId = b.EventFeedId + return m0 +} + type ListEventsResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Events []*Event `protobuf:"bytes,1,rep,name=events,proto3" json:"events,omitempty"` Cursor string `protobuf:"bytes,2,opt,name=cursor,proto3" json:"cursor,omitempty"` HasMore bool `protobuf:"varint,3,opt,name=has_more,json=hasMore,proto3" json:"has_more,omitempty"` @@ -193,11 +240,6 @@ func (x *ListEventsResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ListEventsResponse.ProtoReflect.Descriptor instead. -func (*ListEventsResponse) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_event_feed_proto_rawDescGZIP(), []int{1} -} - func (x *ListEventsResponse) GetEvents() []*Event { if x != nil { return x.Events @@ -226,8 +268,44 @@ func (x *ListEventsResponse) GetAnnotations() []*anypb.Any { return nil } +func (x *ListEventsResponse) SetEvents(v []*Event) { + x.Events = v +} + +func (x *ListEventsResponse) SetCursor(v string) { + x.Cursor = v +} + +func (x *ListEventsResponse) SetHasMore(v bool) { + x.HasMore = v +} + +func (x *ListEventsResponse) SetAnnotations(v []*anypb.Any) { + x.Annotations = v +} + +type ListEventsResponse_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Events []*Event + Cursor string + HasMore bool + Annotations []*anypb.Any +} + +func (b0 ListEventsResponse_builder) Build() *ListEventsResponse { + m0 := &ListEventsResponse{} + b, x := &b0, m0 + _, _ = b, x + x.Events = b.Events + x.Cursor = b.Cursor + x.HasMore = b.HasMore + x.Annotations = b.Annotations + return m0 +} + type ListEventFeedsRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Annotations []*anypb.Any `protobuf:"bytes,1,rep,name=annotations,proto3" json:"annotations,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache @@ -258,11 +336,6 @@ func (x *ListEventFeedsRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ListEventFeedsRequest.ProtoReflect.Descriptor instead. -func (*ListEventFeedsRequest) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_event_feed_proto_rawDescGZIP(), []int{2} -} - func (x *ListEventFeedsRequest) GetAnnotations() []*anypb.Any { if x != nil { return x.Annotations @@ -270,8 +343,26 @@ func (x *ListEventFeedsRequest) GetAnnotations() []*anypb.Any { return nil } +func (x *ListEventFeedsRequest) SetAnnotations(v []*anypb.Any) { + x.Annotations = v +} + +type ListEventFeedsRequest_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Annotations []*anypb.Any +} + +func (b0 ListEventFeedsRequest_builder) Build() *ListEventFeedsRequest { + m0 := &ListEventFeedsRequest{} + b, x := &b0, m0 + _, _ = b, x + x.Annotations = b.Annotations + return m0 +} + type ListEventFeedsResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` List []*EventFeedMetadata `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"` Annotations []*anypb.Any `protobuf:"bytes,2,rep,name=annotations,proto3" json:"annotations,omitempty"` unknownFields protoimpl.UnknownFields @@ -303,11 +394,6 @@ func (x *ListEventFeedsResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ListEventFeedsResponse.ProtoReflect.Descriptor instead. -func (*ListEventFeedsResponse) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_event_feed_proto_rawDescGZIP(), []int{3} -} - func (x *ListEventFeedsResponse) GetList() []*EventFeedMetadata { if x != nil { return x.List @@ -322,8 +408,32 @@ func (x *ListEventFeedsResponse) GetAnnotations() []*anypb.Any { return nil } +func (x *ListEventFeedsResponse) SetList(v []*EventFeedMetadata) { + x.List = v +} + +func (x *ListEventFeedsResponse) SetAnnotations(v []*anypb.Any) { + x.Annotations = v +} + +type ListEventFeedsResponse_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + List []*EventFeedMetadata + Annotations []*anypb.Any +} + +func (b0 ListEventFeedsResponse_builder) Build() *ListEventFeedsResponse { + m0 := &ListEventFeedsResponse{} + b, x := &b0, m0 + _, _ = b, x + x.List = b.List + x.Annotations = b.Annotations + return m0 +} + type Event struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` OccurredAt *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=occurred_at,json=occurredAt,proto3" json:"occurred_at,omitempty"` // Each event has structs that will usually be mostly empty. @@ -369,11 +479,6 @@ func (x *Event) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use Event.ProtoReflect.Descriptor instead. -func (*Event) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_event_feed_proto_rawDescGZIP(), []int{4} -} - func (x *Event) GetId() string { if x != nil { return x.Id @@ -456,6 +561,260 @@ func (x *Event) GetAnnotations() []*anypb.Any { return nil } +func (x *Event) SetId(v string) { + x.Id = v +} + +func (x *Event) SetOccurredAt(v *timestamppb.Timestamp) { + x.OccurredAt = v +} + +func (x *Event) SetUsageEvent(v *UsageEvent) { + if v == nil { + x.Event = nil + return + } + x.Event = &Event_UsageEvent{v} +} + +func (x *Event) SetGrantEvent(v *GrantEvent) { + if v == nil { + x.Event = nil + return + } + x.Event = &Event_GrantEvent{v} +} + +func (x *Event) SetRevokeEvent(v *RevokeEvent) { + if v == nil { + x.Event = nil + return + } + x.Event = &Event_RevokeEvent{v} +} + +func (x *Event) SetResourceChangeEvent(v *ResourceChangeEvent) { + if v == nil { + x.Event = nil + return + } + x.Event = &Event_ResourceChangeEvent{v} +} + +func (x *Event) SetCreateGrantEvent(v *CreateGrantEvent) { + if v == nil { + x.Event = nil + return + } + x.Event = &Event_CreateGrantEvent{v} +} + +func (x *Event) SetCreateRevokeEvent(v *CreateRevokeEvent) { + if v == nil { + x.Event = nil + return + } + x.Event = &Event_CreateRevokeEvent{v} +} + +func (x *Event) SetAnnotations(v []*anypb.Any) { + x.Annotations = v +} + +func (x *Event) HasOccurredAt() bool { + if x == nil { + return false + } + return x.OccurredAt != nil +} + +func (x *Event) HasEvent() bool { + if x == nil { + return false + } + return x.Event != nil +} + +func (x *Event) HasUsageEvent() bool { + if x == nil { + return false + } + _, ok := x.Event.(*Event_UsageEvent) + return ok +} + +func (x *Event) HasGrantEvent() bool { + if x == nil { + return false + } + _, ok := x.Event.(*Event_GrantEvent) + return ok +} + +func (x *Event) HasRevokeEvent() bool { + if x == nil { + return false + } + _, ok := x.Event.(*Event_RevokeEvent) + return ok +} + +func (x *Event) HasResourceChangeEvent() bool { + if x == nil { + return false + } + _, ok := x.Event.(*Event_ResourceChangeEvent) + return ok +} + +func (x *Event) HasCreateGrantEvent() bool { + if x == nil { + return false + } + _, ok := x.Event.(*Event_CreateGrantEvent) + return ok +} + +func (x *Event) HasCreateRevokeEvent() bool { + if x == nil { + return false + } + _, ok := x.Event.(*Event_CreateRevokeEvent) + return ok +} + +func (x *Event) ClearOccurredAt() { + x.OccurredAt = nil +} + +func (x *Event) ClearEvent() { + x.Event = nil +} + +func (x *Event) ClearUsageEvent() { + if _, ok := x.Event.(*Event_UsageEvent); ok { + x.Event = nil + } +} + +func (x *Event) ClearGrantEvent() { + if _, ok := x.Event.(*Event_GrantEvent); ok { + x.Event = nil + } +} + +func (x *Event) ClearRevokeEvent() { + if _, ok := x.Event.(*Event_RevokeEvent); ok { + x.Event = nil + } +} + +func (x *Event) ClearResourceChangeEvent() { + if _, ok := x.Event.(*Event_ResourceChangeEvent); ok { + x.Event = nil + } +} + +func (x *Event) ClearCreateGrantEvent() { + if _, ok := x.Event.(*Event_CreateGrantEvent); ok { + x.Event = nil + } +} + +func (x *Event) ClearCreateRevokeEvent() { + if _, ok := x.Event.(*Event_CreateRevokeEvent); ok { + x.Event = nil + } +} + +const Event_Event_not_set_case case_Event_Event = 0 +const Event_UsageEvent_case case_Event_Event = 100 +const Event_GrantEvent_case case_Event_Event = 101 +const Event_RevokeEvent_case case_Event_Event = 102 +const Event_ResourceChangeEvent_case case_Event_Event = 103 +const Event_CreateGrantEvent_case case_Event_Event = 104 +const Event_CreateRevokeEvent_case case_Event_Event = 105 + +func (x *Event) WhichEvent() case_Event_Event { + if x == nil { + return Event_Event_not_set_case + } + switch x.Event.(type) { + case *Event_UsageEvent: + return Event_UsageEvent_case + case *Event_GrantEvent: + return Event_GrantEvent_case + case *Event_RevokeEvent: + return Event_RevokeEvent_case + case *Event_ResourceChangeEvent: + return Event_ResourceChangeEvent_case + case *Event_CreateGrantEvent: + return Event_CreateGrantEvent_case + case *Event_CreateRevokeEvent: + return Event_CreateRevokeEvent_case + default: + return Event_Event_not_set_case + } +} + +type Event_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Id string + OccurredAt *timestamppb.Timestamp + // Each event has structs that will usually be mostly empty. + // Stream consumer must be defensive about what it's reading + + // Fields of oneof Event: + UsageEvent *UsageEvent + GrantEvent *GrantEvent + RevokeEvent *RevokeEvent + ResourceChangeEvent *ResourceChangeEvent + CreateGrantEvent *CreateGrantEvent + CreateRevokeEvent *CreateRevokeEvent + // -- end of Event + // May contain resources for targets, actor, or items referenced in events + Annotations []*anypb.Any +} + +func (b0 Event_builder) Build() *Event { + m0 := &Event{} + b, x := &b0, m0 + _, _ = b, x + x.Id = b.Id + x.OccurredAt = b.OccurredAt + if b.UsageEvent != nil { + x.Event = &Event_UsageEvent{b.UsageEvent} + } + if b.GrantEvent != nil { + x.Event = &Event_GrantEvent{b.GrantEvent} + } + if b.RevokeEvent != nil { + x.Event = &Event_RevokeEvent{b.RevokeEvent} + } + if b.ResourceChangeEvent != nil { + x.Event = &Event_ResourceChangeEvent{b.ResourceChangeEvent} + } + if b.CreateGrantEvent != nil { + x.Event = &Event_CreateGrantEvent{b.CreateGrantEvent} + } + if b.CreateRevokeEvent != nil { + x.Event = &Event_CreateRevokeEvent{b.CreateRevokeEvent} + } + x.Annotations = b.Annotations + return m0 +} + +type case_Event_Event protoreflect.FieldNumber + +func (x case_Event_Event) String() string { + md := file_c1_connector_v2_event_feed_proto_msgTypes[4].Descriptor() + if x == 0 { + return "not set" + } + return protoimpl.X.MessageFieldStringOf(md, protoreflect.FieldNumber(x)) +} + type isEvent_Event interface { isEvent_Event() } @@ -497,7 +856,7 @@ func (*Event_CreateGrantEvent) isEvent_Event() {} func (*Event_CreateRevokeEvent) isEvent_Event() {} type UsageEvent struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` TargetResource *Resource `protobuf:"bytes,1,opt,name=target_resource,json=targetResource,proto3" json:"target_resource,omitempty"` ActorResource *Resource `protobuf:"bytes,2,opt,name=actor_resource,json=actorResource,proto3" json:"actor_resource,omitempty"` unknownFields protoimpl.UnknownFields @@ -529,11 +888,6 @@ func (x *UsageEvent) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use UsageEvent.ProtoReflect.Descriptor instead. -func (*UsageEvent) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_event_feed_proto_rawDescGZIP(), []int{5} -} - func (x *UsageEvent) GetTargetResource() *Resource { if x != nil { return x.TargetResource @@ -548,9 +902,55 @@ func (x *UsageEvent) GetActorResource() *Resource { return nil } +func (x *UsageEvent) SetTargetResource(v *Resource) { + x.TargetResource = v +} + +func (x *UsageEvent) SetActorResource(v *Resource) { + x.ActorResource = v +} + +func (x *UsageEvent) HasTargetResource() bool { + if x == nil { + return false + } + return x.TargetResource != nil +} + +func (x *UsageEvent) HasActorResource() bool { + if x == nil { + return false + } + return x.ActorResource != nil +} + +func (x *UsageEvent) ClearTargetResource() { + x.TargetResource = nil +} + +func (x *UsageEvent) ClearActorResource() { + x.ActorResource = nil +} + +type UsageEvent_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + TargetResource *Resource + ActorResource *Resource +} + +func (b0 UsageEvent_builder) Build() *UsageEvent { + m0 := &UsageEvent{} + b, x := &b0, m0 + _, _ = b, x + x.TargetResource = b.TargetResource + x.ActorResource = b.ActorResource + return m0 +} + // Unrelated to this effort, but valuable for other projects type GrantEvent struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Grant *Grant `protobuf:"bytes,1,opt,name=grant,proto3" json:"grant,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache @@ -581,11 +981,6 @@ func (x *GrantEvent) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GrantEvent.ProtoReflect.Descriptor instead. -func (*GrantEvent) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_event_feed_proto_rawDescGZIP(), []int{6} -} - func (x *GrantEvent) GetGrant() *Grant { if x != nil { return x.Grant @@ -593,8 +988,37 @@ func (x *GrantEvent) GetGrant() *Grant { return nil } +func (x *GrantEvent) SetGrant(v *Grant) { + x.Grant = v +} + +func (x *GrantEvent) HasGrant() bool { + if x == nil { + return false + } + return x.Grant != nil +} + +func (x *GrantEvent) ClearGrant() { + x.Grant = nil +} + +type GrantEvent_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Grant *Grant +} + +func (b0 GrantEvent_builder) Build() *GrantEvent { + m0 := &GrantEvent{} + b, x := &b0, m0 + _, _ = b, x + x.Grant = b.Grant + return m0 +} + type CreateGrantEvent struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Entitlement *Entitlement `protobuf:"bytes,1,opt,name=entitlement,proto3" json:"entitlement,omitempty"` Principal *Resource `protobuf:"bytes,2,opt,name=principal,proto3" json:"principal,omitempty"` Annotations []*anypb.Any `protobuf:"bytes,3,rep,name=annotations,proto3" json:"annotations,omitempty"` @@ -627,11 +1051,6 @@ func (x *CreateGrantEvent) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use CreateGrantEvent.ProtoReflect.Descriptor instead. -func (*CreateGrantEvent) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_event_feed_proto_rawDescGZIP(), []int{7} -} - func (x *CreateGrantEvent) GetEntitlement() *Entitlement { if x != nil { return x.Entitlement @@ -653,8 +1072,60 @@ func (x *CreateGrantEvent) GetAnnotations() []*anypb.Any { return nil } +func (x *CreateGrantEvent) SetEntitlement(v *Entitlement) { + x.Entitlement = v +} + +func (x *CreateGrantEvent) SetPrincipal(v *Resource) { + x.Principal = v +} + +func (x *CreateGrantEvent) SetAnnotations(v []*anypb.Any) { + x.Annotations = v +} + +func (x *CreateGrantEvent) HasEntitlement() bool { + if x == nil { + return false + } + return x.Entitlement != nil +} + +func (x *CreateGrantEvent) HasPrincipal() bool { + if x == nil { + return false + } + return x.Principal != nil +} + +func (x *CreateGrantEvent) ClearEntitlement() { + x.Entitlement = nil +} + +func (x *CreateGrantEvent) ClearPrincipal() { + x.Principal = nil +} + +type CreateGrantEvent_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Entitlement *Entitlement + Principal *Resource + Annotations []*anypb.Any +} + +func (b0 CreateGrantEvent_builder) Build() *CreateGrantEvent { + m0 := &CreateGrantEvent{} + b, x := &b0, m0 + _, _ = b, x + x.Entitlement = b.Entitlement + x.Principal = b.Principal + x.Annotations = b.Annotations + return m0 +} + type CreateRevokeEvent struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Entitlement *Entitlement `protobuf:"bytes,1,opt,name=entitlement,proto3" json:"entitlement,omitempty"` Principal *Resource `protobuf:"bytes,2,opt,name=principal,proto3" json:"principal,omitempty"` Annotations []*anypb.Any `protobuf:"bytes,3,rep,name=annotations,proto3" json:"annotations,omitempty"` @@ -687,11 +1158,6 @@ func (x *CreateRevokeEvent) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use CreateRevokeEvent.ProtoReflect.Descriptor instead. -func (*CreateRevokeEvent) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_event_feed_proto_rawDescGZIP(), []int{8} -} - func (x *CreateRevokeEvent) GetEntitlement() *Entitlement { if x != nil { return x.Entitlement @@ -713,8 +1179,60 @@ func (x *CreateRevokeEvent) GetAnnotations() []*anypb.Any { return nil } +func (x *CreateRevokeEvent) SetEntitlement(v *Entitlement) { + x.Entitlement = v +} + +func (x *CreateRevokeEvent) SetPrincipal(v *Resource) { + x.Principal = v +} + +func (x *CreateRevokeEvent) SetAnnotations(v []*anypb.Any) { + x.Annotations = v +} + +func (x *CreateRevokeEvent) HasEntitlement() bool { + if x == nil { + return false + } + return x.Entitlement != nil +} + +func (x *CreateRevokeEvent) HasPrincipal() bool { + if x == nil { + return false + } + return x.Principal != nil +} + +func (x *CreateRevokeEvent) ClearEntitlement() { + x.Entitlement = nil +} + +func (x *CreateRevokeEvent) ClearPrincipal() { + x.Principal = nil +} + +type CreateRevokeEvent_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Entitlement *Entitlement + Principal *Resource + Annotations []*anypb.Any +} + +func (b0 CreateRevokeEvent_builder) Build() *CreateRevokeEvent { + m0 := &CreateRevokeEvent{} + b, x := &b0, m0 + _, _ = b, x + x.Entitlement = b.Entitlement + x.Principal = b.Principal + x.Annotations = b.Annotations + return m0 +} + type RevokeEvent struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Entitlement *Entitlement `protobuf:"bytes,1,opt,name=entitlement,proto3" json:"entitlement,omitempty"` Principal *Resource `protobuf:"bytes,2,opt,name=principal,proto3" json:"principal,omitempty"` unknownFields protoimpl.UnknownFields @@ -746,11 +1264,6 @@ func (x *RevokeEvent) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use RevokeEvent.ProtoReflect.Descriptor instead. -func (*RevokeEvent) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_event_feed_proto_rawDescGZIP(), []int{9} -} - func (x *RevokeEvent) GetEntitlement() *Entitlement { if x != nil { return x.Entitlement @@ -765,9 +1278,55 @@ func (x *RevokeEvent) GetPrincipal() *Resource { return nil } +func (x *RevokeEvent) SetEntitlement(v *Entitlement) { + x.Entitlement = v +} + +func (x *RevokeEvent) SetPrincipal(v *Resource) { + x.Principal = v +} + +func (x *RevokeEvent) HasEntitlement() bool { + if x == nil { + return false + } + return x.Entitlement != nil +} + +func (x *RevokeEvent) HasPrincipal() bool { + if x == nil { + return false + } + return x.Principal != nil +} + +func (x *RevokeEvent) ClearEntitlement() { + x.Entitlement = nil +} + +func (x *RevokeEvent) ClearPrincipal() { + x.Principal = nil +} + +type RevokeEvent_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Entitlement *Entitlement + Principal *Resource +} + +func (b0 RevokeEvent_builder) Build() *RevokeEvent { + m0 := &RevokeEvent{} + b, x := &b0, m0 + _, _ = b, x + x.Entitlement = b.Entitlement + x.Principal = b.Principal + return m0 +} + // generic light weight event indicating a resource was changed type ResourceChangeEvent struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` ResourceId *ResourceId `protobuf:"bytes,1,opt,name=resource_id,json=resourceId,proto3" json:"resource_id,omitempty"` ParentResourceId *ResourceId `protobuf:"bytes,2,opt,name=parent_resource_id,json=parentResourceId,proto3" json:"parent_resource_id,omitempty"` unknownFields protoimpl.UnknownFields @@ -799,11 +1358,6 @@ func (x *ResourceChangeEvent) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ResourceChangeEvent.ProtoReflect.Descriptor instead. -func (*ResourceChangeEvent) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_event_feed_proto_rawDescGZIP(), []int{10} -} - func (x *ResourceChangeEvent) GetResourceId() *ResourceId { if x != nil { return x.ResourceId @@ -818,8 +1372,54 @@ func (x *ResourceChangeEvent) GetParentResourceId() *ResourceId { return nil } +func (x *ResourceChangeEvent) SetResourceId(v *ResourceId) { + x.ResourceId = v +} + +func (x *ResourceChangeEvent) SetParentResourceId(v *ResourceId) { + x.ParentResourceId = v +} + +func (x *ResourceChangeEvent) HasResourceId() bool { + if x == nil { + return false + } + return x.ResourceId != nil +} + +func (x *ResourceChangeEvent) HasParentResourceId() bool { + if x == nil { + return false + } + return x.ParentResourceId != nil +} + +func (x *ResourceChangeEvent) ClearResourceId() { + x.ResourceId = nil +} + +func (x *ResourceChangeEvent) ClearParentResourceId() { + x.ParentResourceId = nil +} + +type ResourceChangeEvent_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + ResourceId *ResourceId + ParentResourceId *ResourceId +} + +func (b0 ResourceChangeEvent_builder) Build() *ResourceChangeEvent { + m0 := &ResourceChangeEvent{} + b, x := &b0, m0 + _, _ = b, x + x.ResourceId = b.ResourceId + x.ParentResourceId = b.ParentResourceId + return m0 +} + type EventFeedMetadata struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` // unique identifier for the event feed Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` SupportedEventTypes []EventType `protobuf:"varint,2,rep,packed,name=supported_event_types,json=supportedEventTypes,proto3,enum=c1.connector.v2.EventType" json:"supported_event_types,omitempty"` @@ -852,11 +1452,6 @@ func (x *EventFeedMetadata) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use EventFeedMetadata.ProtoReflect.Descriptor instead. -func (*EventFeedMetadata) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_event_feed_proto_rawDescGZIP(), []int{11} -} - func (x *EventFeedMetadata) GetId() string { if x != nil { return x.Id @@ -871,214 +1466,107 @@ func (x *EventFeedMetadata) GetSupportedEventTypes() []EventType { return nil } -var File_c1_connector_v2_event_feed_proto protoreflect.FileDescriptor +func (x *EventFeedMetadata) SetId(v string) { + x.Id = v +} -var file_c1_connector_v2_event_feed_proto_rawDesc = string([]byte{ - 0x0a, 0x20, 0x63, 0x31, 0x2f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f, 0x76, - 0x32, 0x2f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x66, 0x65, 0x65, 0x64, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x12, 0x0f, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, - 0x2e, 0x76, 0x32, 0x1a, 0x21, 0x63, 0x31, 0x2f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, - 0x72, 0x2f, 0x76, 0x32, 0x2f, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x63, 0x31, 0x2f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, - 0x63, 0x74, 0x6f, 0x72, 0x2f, 0x76, 0x32, 0x2f, 0x67, 0x72, 0x61, 0x6e, 0x74, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x63, 0x31, 0x2f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, - 0x72, 0x2f, 0x76, 0x32, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, - 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, - 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, - 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x85, 0x02, 0x0a, 0x11, 0x4c, 0x69, 0x73, - 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, - 0x0a, 0x06, 0x63, 0x75, 0x72, 0x73, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0d, - 0xfa, 0x42, 0x0a, 0x72, 0x08, 0x20, 0x01, 0x28, 0x80, 0x20, 0xd0, 0x01, 0x01, 0x52, 0x06, 0x63, - 0x75, 0x72, 0x73, 0x6f, 0x72, 0x12, 0x35, 0x0a, 0x08, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x61, - 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, - 0x61, 0x6d, 0x70, 0x52, 0x07, 0x73, 0x74, 0x61, 0x72, 0x74, 0x41, 0x74, 0x12, 0x27, 0x0a, 0x09, - 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x42, - 0x0a, 0xfa, 0x42, 0x07, 0x2a, 0x05, 0x18, 0xfa, 0x01, 0x40, 0x01, 0x52, 0x08, 0x70, 0x61, 0x67, - 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x36, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, - 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x31, 0x0a, - 0x0d, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x66, 0x65, 0x65, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x09, 0x42, 0x0d, 0xfa, 0x42, 0x0a, 0x72, 0x08, 0x20, 0x01, 0x28, 0x80, 0x08, - 0xd0, 0x01, 0x01, 0x52, 0x0b, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x46, 0x65, 0x65, 0x64, 0x49, 0x64, - 0x22, 0xbe, 0x01, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2e, 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, - 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, - 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, - 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x25, 0x0a, 0x06, 0x63, 0x75, 0x72, 0x73, 0x6f, - 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0d, 0xfa, 0x42, 0x0a, 0x72, 0x08, 0x20, 0x01, - 0x28, 0x80, 0x20, 0xd0, 0x01, 0x01, 0x52, 0x06, 0x63, 0x75, 0x72, 0x73, 0x6f, 0x72, 0x12, 0x19, - 0x0a, 0x08, 0x68, 0x61, 0x73, 0x5f, 0x6d, 0x6f, 0x72, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x07, 0x68, 0x61, 0x73, 0x4d, 0x6f, 0x72, 0x65, 0x12, 0x36, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, - 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x22, 0x4f, 0x0a, 0x15, 0x4c, 0x69, 0x73, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x46, 0x65, - 0x65, 0x64, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x36, 0x0a, 0x0b, 0x61, 0x6e, - 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x22, 0x88, 0x01, 0x0a, 0x16, 0x4c, 0x69, 0x73, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, - 0x46, 0x65, 0x65, 0x64, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x36, 0x0a, - 0x04, 0x6c, 0x69, 0x73, 0x74, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x63, 0x31, - 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x76, - 0x65, 0x6e, 0x74, 0x46, 0x65, 0x65, 0x64, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, - 0x04, 0x6c, 0x69, 0x73, 0x74, 0x12, 0x36, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, - 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xdd, 0x04, - 0x0a, 0x05, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x3b, 0x0a, 0x0b, 0x6f, 0x63, 0x63, 0x75, 0x72, - 0x72, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, - 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x6f, 0x63, 0x63, 0x75, 0x72, 0x72, - 0x65, 0x64, 0x41, 0x74, 0x12, 0x3e, 0x0a, 0x0b, 0x75, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x65, 0x76, - 0x65, 0x6e, 0x74, 0x18, 0x64, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x63, 0x31, 0x2e, 0x63, - 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x55, 0x73, 0x61, 0x67, - 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x48, 0x00, 0x52, 0x0a, 0x75, 0x73, 0x61, 0x67, 0x65, 0x45, - 0x76, 0x65, 0x6e, 0x74, 0x12, 0x3e, 0x0a, 0x0b, 0x67, 0x72, 0x61, 0x6e, 0x74, 0x5f, 0x65, 0x76, - 0x65, 0x6e, 0x74, 0x18, 0x65, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x63, 0x31, 0x2e, 0x63, - 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x72, 0x61, 0x6e, - 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x48, 0x00, 0x52, 0x0a, 0x67, 0x72, 0x61, 0x6e, 0x74, 0x45, - 0x76, 0x65, 0x6e, 0x74, 0x12, 0x41, 0x0a, 0x0c, 0x72, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x5f, 0x65, - 0x76, 0x65, 0x6e, 0x74, 0x18, 0x66, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x63, 0x31, 0x2e, - 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x76, - 0x6f, 0x6b, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x48, 0x00, 0x52, 0x0b, 0x72, 0x65, 0x76, 0x6f, - 0x6b, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x5a, 0x0a, 0x15, 0x72, 0x65, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, - 0x18, 0x67, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, - 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x48, 0x00, 0x52, 0x13, - 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x45, 0x76, - 0x65, 0x6e, 0x74, 0x12, 0x51, 0x0a, 0x12, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x67, 0x72, - 0x61, 0x6e, 0x74, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x68, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x21, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, - 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x47, 0x72, 0x61, 0x6e, 0x74, 0x45, 0x76, 0x65, - 0x6e, 0x74, 0x48, 0x00, 0x52, 0x10, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x47, 0x72, 0x61, 0x6e, - 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x54, 0x0a, 0x13, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, - 0x5f, 0x72, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x69, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, - 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x76, 0x6f, - 0x6b, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x48, 0x00, 0x52, 0x11, 0x63, 0x72, 0x65, 0x61, 0x74, - 0x65, 0x52, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x36, 0x0a, 0x0b, - 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x07, 0x0a, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x22, 0x92, 0x01, - 0x0a, 0x0a, 0x55, 0x73, 0x61, 0x67, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x42, 0x0a, 0x0f, - 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, - 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x52, 0x0e, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x12, 0x40, 0x0a, 0x0e, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, - 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x52, 0x0d, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x22, 0x3a, 0x0a, 0x0a, 0x47, 0x72, 0x61, 0x6e, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, - 0x12, 0x2c, 0x0a, 0x05, 0x67, 0x72, 0x61, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x16, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, - 0x32, 0x2e, 0x47, 0x72, 0x61, 0x6e, 0x74, 0x52, 0x05, 0x67, 0x72, 0x61, 0x6e, 0x74, 0x22, 0xd7, - 0x01, 0x0a, 0x10, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x47, 0x72, 0x61, 0x6e, 0x74, 0x45, 0x76, - 0x65, 0x6e, 0x74, 0x12, 0x48, 0x0a, 0x0b, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x6d, 0x65, - 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, - 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x6e, 0x74, 0x69, 0x74, - 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, - 0x52, 0x0b, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x41, 0x0a, - 0x09, 0x70, 0x72, 0x69, 0x6e, 0x63, 0x69, 0x70, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x19, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, - 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, - 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x09, 0x70, 0x72, 0x69, 0x6e, 0x63, 0x69, 0x70, 0x61, 0x6c, - 0x12, 0x36, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, - 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, - 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xd8, 0x01, 0x0a, 0x11, 0x43, 0x72, 0x65, - 0x61, 0x74, 0x65, 0x52, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x48, - 0x0a, 0x0b, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, - 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x6d, 0x65, 0x6e, - 0x74, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x0b, 0x65, 0x6e, 0x74, - 0x69, 0x74, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x41, 0x0a, 0x09, 0x70, 0x72, 0x69, 0x6e, - 0x63, 0x69, 0x70, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x63, 0x31, - 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, - 0x52, 0x09, 0x70, 0x72, 0x69, 0x6e, 0x63, 0x69, 0x70, 0x61, 0x6c, 0x12, 0x36, 0x0a, 0x0b, 0x61, - 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x22, 0x86, 0x01, 0x0a, 0x0b, 0x52, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x45, 0x76, - 0x65, 0x6e, 0x74, 0x12, 0x3e, 0x0a, 0x0b, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x6d, 0x65, - 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, - 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x6e, 0x74, 0x69, 0x74, - 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x0b, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x6d, - 0x65, 0x6e, 0x74, 0x12, 0x37, 0x0a, 0x09, 0x70, 0x72, 0x69, 0x6e, 0x63, 0x69, 0x70, 0x61, 0x6c, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, - 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x52, 0x09, 0x70, 0x72, 0x69, 0x6e, 0x63, 0x69, 0x70, 0x61, 0x6c, 0x22, 0xa8, 0x01, 0x0a, - 0x13, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x45, - 0x76, 0x65, 0x6e, 0x74, 0x12, 0x46, 0x0a, 0x0b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x63, 0x31, 0x2e, 0x63, - 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x49, 0x64, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, - 0x52, 0x0a, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x64, 0x12, 0x49, 0x0a, 0x12, - 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, - 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, - 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x49, 0x64, 0x52, 0x10, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x64, 0x22, 0x90, 0x01, 0x0a, 0x11, 0x45, 0x76, 0x65, 0x6e, - 0x74, 0x46, 0x65, 0x65, 0x64, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1a, 0x0a, - 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0a, 0xfa, 0x42, 0x07, 0x72, 0x05, - 0x20, 0x01, 0x28, 0x80, 0x08, 0x52, 0x02, 0x69, 0x64, 0x12, 0x5f, 0x0a, 0x15, 0x73, 0x75, 0x70, - 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, - 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, - 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, - 0x54, 0x79, 0x70, 0x65, 0x42, 0x0f, 0xfa, 0x42, 0x0c, 0x92, 0x01, 0x09, 0x18, 0x01, 0x22, 0x05, - 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x13, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, - 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x73, 0x2a, 0xa4, 0x01, 0x0a, 0x09, 0x45, - 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1a, 0x0a, 0x16, 0x45, 0x56, 0x45, 0x4e, - 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, - 0x45, 0x44, 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, - 0x50, 0x45, 0x5f, 0x55, 0x53, 0x41, 0x47, 0x45, 0x10, 0x01, 0x12, 0x1e, 0x0a, 0x1a, 0x45, 0x56, - 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x52, 0x45, 0x53, 0x4f, 0x55, 0x52, 0x43, - 0x45, 0x5f, 0x43, 0x48, 0x41, 0x4e, 0x47, 0x45, 0x10, 0x04, 0x12, 0x1b, 0x0a, 0x17, 0x45, 0x56, - 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x52, 0x45, 0x41, 0x54, 0x45, 0x5f, - 0x47, 0x52, 0x41, 0x4e, 0x54, 0x10, 0x05, 0x12, 0x1c, 0x0a, 0x18, 0x45, 0x56, 0x45, 0x4e, 0x54, - 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x52, 0x45, 0x41, 0x54, 0x45, 0x5f, 0x52, 0x45, 0x56, - 0x4f, 0x4b, 0x45, 0x10, 0x06, 0x22, 0x04, 0x08, 0x02, 0x10, 0x02, 0x22, 0x04, 0x08, 0x03, 0x10, - 0x03, 0x32, 0xc8, 0x01, 0x0a, 0x0c, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x12, 0x55, 0x0a, 0x0a, 0x4c, 0x69, 0x73, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, - 0x12, 0x22, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, - 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, - 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, - 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x61, 0x0a, 0x0e, 0x4c, 0x69, 0x73, - 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x46, 0x65, 0x65, 0x64, 0x73, 0x12, 0x26, 0x2e, 0x63, 0x31, - 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, - 0x73, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x46, 0x65, 0x65, 0x64, 0x73, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, - 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x46, - 0x65, 0x65, 0x64, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x36, 0x5a, 0x34, - 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x64, 0x75, - 0x63, 0x74, 0x6f, 0x72, 0x6f, 0x6e, 0x65, 0x2f, 0x62, 0x61, 0x74, 0x6f, 0x6e, 0x2d, 0x73, 0x64, - 0x6b, 0x2f, 0x70, 0x62, 0x2f, 0x63, 0x31, 0x2f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, - 0x72, 0x2f, 0x76, 0x32, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -}) +func (x *EventFeedMetadata) SetSupportedEventTypes(v []EventType) { + x.SupportedEventTypes = v +} -var ( - file_c1_connector_v2_event_feed_proto_rawDescOnce sync.Once - file_c1_connector_v2_event_feed_proto_rawDescData []byte -) +type EventFeedMetadata_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. -func file_c1_connector_v2_event_feed_proto_rawDescGZIP() []byte { - file_c1_connector_v2_event_feed_proto_rawDescOnce.Do(func() { - file_c1_connector_v2_event_feed_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_c1_connector_v2_event_feed_proto_rawDesc), len(file_c1_connector_v2_event_feed_proto_rawDesc))) - }) - return file_c1_connector_v2_event_feed_proto_rawDescData + // unique identifier for the event feed + Id string + SupportedEventTypes []EventType } +func (b0 EventFeedMetadata_builder) Build() *EventFeedMetadata { + m0 := &EventFeedMetadata{} + b, x := &b0, m0 + _, _ = b, x + x.Id = b.Id + x.SupportedEventTypes = b.SupportedEventTypes + return m0 +} + +var File_c1_connector_v2_event_feed_proto protoreflect.FileDescriptor + +const file_c1_connector_v2_event_feed_proto_rawDesc = "" + + "\n" + + " c1/connector/v2/event_feed.proto\x12\x0fc1.connector.v2\x1a!c1/connector/v2/entitlement.proto\x1a\x1bc1/connector/v2/grant.proto\x1a\x1ec1/connector/v2/resource.proto\x1a\x19google/protobuf/any.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x17validate/validate.proto\"\x85\x02\n" + + "\x11ListEventsRequest\x12%\n" + + "\x06cursor\x18\x01 \x01(\tB\r\xfaB\n" + + "r\b \x01(\x80 \xd0\x01\x01R\x06cursor\x125\n" + + "\bstart_at\x18\x02 \x01(\v2\x1a.google.protobuf.TimestampR\astartAt\x12'\n" + + "\tpage_size\x18\x03 \x01(\rB\n" + + "\xfaB\a*\x05\x18\xfa\x01@\x01R\bpageSize\x126\n" + + "\vannotations\x18\x04 \x03(\v2\x14.google.protobuf.AnyR\vannotations\x121\n" + + "\revent_feed_id\x18\x05 \x01(\tB\r\xfaB\n" + + "r\b \x01(\x80\b\xd0\x01\x01R\veventFeedId\"\xbe\x01\n" + + "\x12ListEventsResponse\x12.\n" + + "\x06events\x18\x01 \x03(\v2\x16.c1.connector.v2.EventR\x06events\x12%\n" + + "\x06cursor\x18\x02 \x01(\tB\r\xfaB\n" + + "r\b \x01(\x80 \xd0\x01\x01R\x06cursor\x12\x19\n" + + "\bhas_more\x18\x03 \x01(\bR\ahasMore\x126\n" + + "\vannotations\x18\x04 \x03(\v2\x14.google.protobuf.AnyR\vannotations\"O\n" + + "\x15ListEventFeedsRequest\x126\n" + + "\vannotations\x18\x01 \x03(\v2\x14.google.protobuf.AnyR\vannotations\"\x88\x01\n" + + "\x16ListEventFeedsResponse\x126\n" + + "\x04list\x18\x01 \x03(\v2\".c1.connector.v2.EventFeedMetadataR\x04list\x126\n" + + "\vannotations\x18\x02 \x03(\v2\x14.google.protobuf.AnyR\vannotations\"\xdd\x04\n" + + "\x05Event\x12\x0e\n" + + "\x02id\x18\x01 \x01(\tR\x02id\x12;\n" + + "\voccurred_at\x18\x02 \x01(\v2\x1a.google.protobuf.TimestampR\n" + + "occurredAt\x12>\n" + + "\vusage_event\x18d \x01(\v2\x1b.c1.connector.v2.UsageEventH\x00R\n" + + "usageEvent\x12>\n" + + "\vgrant_event\x18e \x01(\v2\x1b.c1.connector.v2.GrantEventH\x00R\n" + + "grantEvent\x12A\n" + + "\frevoke_event\x18f \x01(\v2\x1c.c1.connector.v2.RevokeEventH\x00R\vrevokeEvent\x12Z\n" + + "\x15resource_change_event\x18g \x01(\v2$.c1.connector.v2.ResourceChangeEventH\x00R\x13resourceChangeEvent\x12Q\n" + + "\x12create_grant_event\x18h \x01(\v2!.c1.connector.v2.CreateGrantEventH\x00R\x10createGrantEvent\x12T\n" + + "\x13create_revoke_event\x18i \x01(\v2\".c1.connector.v2.CreateRevokeEventH\x00R\x11createRevokeEvent\x126\n" + + "\vannotations\x18\x03 \x03(\v2\x14.google.protobuf.AnyR\vannotationsB\a\n" + + "\x05event\"\x92\x01\n" + + "\n" + + "UsageEvent\x12B\n" + + "\x0ftarget_resource\x18\x01 \x01(\v2\x19.c1.connector.v2.ResourceR\x0etargetResource\x12@\n" + + "\x0eactor_resource\x18\x02 \x01(\v2\x19.c1.connector.v2.ResourceR\ractorResource\":\n" + + "\n" + + "GrantEvent\x12,\n" + + "\x05grant\x18\x01 \x01(\v2\x16.c1.connector.v2.GrantR\x05grant\"\xd7\x01\n" + + "\x10CreateGrantEvent\x12H\n" + + "\ventitlement\x18\x01 \x01(\v2\x1c.c1.connector.v2.EntitlementB\b\xfaB\x05\x8a\x01\x02\x10\x01R\ventitlement\x12A\n" + + "\tprincipal\x18\x02 \x01(\v2\x19.c1.connector.v2.ResourceB\b\xfaB\x05\x8a\x01\x02\x10\x01R\tprincipal\x126\n" + + "\vannotations\x18\x03 \x03(\v2\x14.google.protobuf.AnyR\vannotations\"\xd8\x01\n" + + "\x11CreateRevokeEvent\x12H\n" + + "\ventitlement\x18\x01 \x01(\v2\x1c.c1.connector.v2.EntitlementB\b\xfaB\x05\x8a\x01\x02\x10\x01R\ventitlement\x12A\n" + + "\tprincipal\x18\x02 \x01(\v2\x19.c1.connector.v2.ResourceB\b\xfaB\x05\x8a\x01\x02\x10\x01R\tprincipal\x126\n" + + "\vannotations\x18\x03 \x03(\v2\x14.google.protobuf.AnyR\vannotations\"\x86\x01\n" + + "\vRevokeEvent\x12>\n" + + "\ventitlement\x18\x01 \x01(\v2\x1c.c1.connector.v2.EntitlementR\ventitlement\x127\n" + + "\tprincipal\x18\x02 \x01(\v2\x19.c1.connector.v2.ResourceR\tprincipal\"\xa8\x01\n" + + "\x13ResourceChangeEvent\x12F\n" + + "\vresource_id\x18\x01 \x01(\v2\x1b.c1.connector.v2.ResourceIdB\b\xfaB\x05\x8a\x01\x02\x10\x01R\n" + + "resourceId\x12I\n" + + "\x12parent_resource_id\x18\x02 \x01(\v2\x1b.c1.connector.v2.ResourceIdR\x10parentResourceId\"\x90\x01\n" + + "\x11EventFeedMetadata\x12\x1a\n" + + "\x02id\x18\x01 \x01(\tB\n" + + "\xfaB\ar\x05 \x01(\x80\bR\x02id\x12_\n" + + "\x15supported_event_types\x18\x02 \x03(\x0e2\x1a.c1.connector.v2.EventTypeB\x0f\xfaB\f\x92\x01\t\x18\x01\"\x05\x82\x01\x02\x10\x01R\x13supportedEventTypes*\xa4\x01\n" + + "\tEventType\x12\x1a\n" + + "\x16EVENT_TYPE_UNSPECIFIED\x10\x00\x12\x14\n" + + "\x10EVENT_TYPE_USAGE\x10\x01\x12\x1e\n" + + "\x1aEVENT_TYPE_RESOURCE_CHANGE\x10\x04\x12\x1b\n" + + "\x17EVENT_TYPE_CREATE_GRANT\x10\x05\x12\x1c\n" + + "\x18EVENT_TYPE_CREATE_REVOKE\x10\x06\"\x04\b\x02\x10\x02\"\x04\b\x03\x10\x032\xc8\x01\n" + + "\fEventService\x12U\n" + + "\n" + + "ListEvents\x12\".c1.connector.v2.ListEventsRequest\x1a#.c1.connector.v2.ListEventsResponse\x12a\n" + + "\x0eListEventFeeds\x12&.c1.connector.v2.ListEventFeedsRequest\x1a'.c1.connector.v2.ListEventFeedsResponseB6Z4github.com/conductorone/baton-sdk/pb/c1/connector/v2b\x06proto3" + var file_c1_connector_v2_event_feed_proto_enumTypes = make([]protoimpl.EnumInfo, 1) var file_c1_connector_v2_event_feed_proto_msgTypes = make([]protoimpl.MessageInfo, 12) var file_c1_connector_v2_event_feed_proto_goTypes = []any{ diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/event_feed_protoopaque.pb.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/event_feed_protoopaque.pb.go new file mode 100644 index 00000000..6433eddf --- /dev/null +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/event_feed_protoopaque.pb.go @@ -0,0 +1,1663 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.10 +// protoc (unknown) +// source: c1/connector/v2/event_feed.proto + +//go:build protoopaque + +package v2 + +import ( + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type EventType int32 + +const ( + EventType_EVENT_TYPE_UNSPECIFIED EventType = 0 + EventType_EVENT_TYPE_USAGE EventType = 1 + EventType_EVENT_TYPE_RESOURCE_CHANGE EventType = 4 + EventType_EVENT_TYPE_CREATE_GRANT EventType = 5 + EventType_EVENT_TYPE_CREATE_REVOKE EventType = 6 +) + +// Enum value maps for EventType. +var ( + EventType_name = map[int32]string{ + 0: "EVENT_TYPE_UNSPECIFIED", + 1: "EVENT_TYPE_USAGE", + 4: "EVENT_TYPE_RESOURCE_CHANGE", + 5: "EVENT_TYPE_CREATE_GRANT", + 6: "EVENT_TYPE_CREATE_REVOKE", + } + EventType_value = map[string]int32{ + "EVENT_TYPE_UNSPECIFIED": 0, + "EVENT_TYPE_USAGE": 1, + "EVENT_TYPE_RESOURCE_CHANGE": 4, + "EVENT_TYPE_CREATE_GRANT": 5, + "EVENT_TYPE_CREATE_REVOKE": 6, + } +) + +func (x EventType) Enum() *EventType { + p := new(EventType) + *p = x + return p +} + +func (x EventType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (EventType) Descriptor() protoreflect.EnumDescriptor { + return file_c1_connector_v2_event_feed_proto_enumTypes[0].Descriptor() +} + +func (EventType) Type() protoreflect.EnumType { + return &file_c1_connector_v2_event_feed_proto_enumTypes[0] +} + +func (x EventType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +type ListEventsRequest struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Cursor string `protobuf:"bytes,1,opt,name=cursor,proto3"` + xxx_hidden_StartAt *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=start_at,json=startAt,proto3"` + xxx_hidden_PageSize uint32 `protobuf:"varint,3,opt,name=page_size,json=pageSize,proto3"` + xxx_hidden_Annotations *[]*anypb.Any `protobuf:"bytes,4,rep,name=annotations,proto3"` + xxx_hidden_EventFeedId string `protobuf:"bytes,5,opt,name=event_feed_id,json=eventFeedId,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ListEventsRequest) Reset() { + *x = ListEventsRequest{} + mi := &file_c1_connector_v2_event_feed_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ListEventsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListEventsRequest) ProtoMessage() {} + +func (x *ListEventsRequest) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_event_feed_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *ListEventsRequest) GetCursor() string { + if x != nil { + return x.xxx_hidden_Cursor + } + return "" +} + +func (x *ListEventsRequest) GetStartAt() *timestamppb.Timestamp { + if x != nil { + return x.xxx_hidden_StartAt + } + return nil +} + +func (x *ListEventsRequest) GetPageSize() uint32 { + if x != nil { + return x.xxx_hidden_PageSize + } + return 0 +} + +func (x *ListEventsRequest) GetAnnotations() []*anypb.Any { + if x != nil { + if x.xxx_hidden_Annotations != nil { + return *x.xxx_hidden_Annotations + } + } + return nil +} + +func (x *ListEventsRequest) GetEventFeedId() string { + if x != nil { + return x.xxx_hidden_EventFeedId + } + return "" +} + +func (x *ListEventsRequest) SetCursor(v string) { + x.xxx_hidden_Cursor = v +} + +func (x *ListEventsRequest) SetStartAt(v *timestamppb.Timestamp) { + x.xxx_hidden_StartAt = v +} + +func (x *ListEventsRequest) SetPageSize(v uint32) { + x.xxx_hidden_PageSize = v +} + +func (x *ListEventsRequest) SetAnnotations(v []*anypb.Any) { + x.xxx_hidden_Annotations = &v +} + +func (x *ListEventsRequest) SetEventFeedId(v string) { + x.xxx_hidden_EventFeedId = v +} + +func (x *ListEventsRequest) HasStartAt() bool { + if x == nil { + return false + } + return x.xxx_hidden_StartAt != nil +} + +func (x *ListEventsRequest) ClearStartAt() { + x.xxx_hidden_StartAt = nil +} + +type ListEventsRequest_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // Can function like a page token but also can be arbitrary to resume a feed at any point + Cursor string + StartAt *timestamppb.Timestamp + PageSize uint32 + Annotations []*anypb.Any + // Used to specify a specific event feed to list events from. + // If not provided, the connector will use the old event feed. + EventFeedId string +} + +func (b0 ListEventsRequest_builder) Build() *ListEventsRequest { + m0 := &ListEventsRequest{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Cursor = b.Cursor + x.xxx_hidden_StartAt = b.StartAt + x.xxx_hidden_PageSize = b.PageSize + x.xxx_hidden_Annotations = &b.Annotations + x.xxx_hidden_EventFeedId = b.EventFeedId + return m0 +} + +type ListEventsResponse struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Events *[]*Event `protobuf:"bytes,1,rep,name=events,proto3"` + xxx_hidden_Cursor string `protobuf:"bytes,2,opt,name=cursor,proto3"` + xxx_hidden_HasMore bool `protobuf:"varint,3,opt,name=has_more,json=hasMore,proto3"` + xxx_hidden_Annotations *[]*anypb.Any `protobuf:"bytes,4,rep,name=annotations,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ListEventsResponse) Reset() { + *x = ListEventsResponse{} + mi := &file_c1_connector_v2_event_feed_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ListEventsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListEventsResponse) ProtoMessage() {} + +func (x *ListEventsResponse) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_event_feed_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *ListEventsResponse) GetEvents() []*Event { + if x != nil { + if x.xxx_hidden_Events != nil { + return *x.xxx_hidden_Events + } + } + return nil +} + +func (x *ListEventsResponse) GetCursor() string { + if x != nil { + return x.xxx_hidden_Cursor + } + return "" +} + +func (x *ListEventsResponse) GetHasMore() bool { + if x != nil { + return x.xxx_hidden_HasMore + } + return false +} + +func (x *ListEventsResponse) GetAnnotations() []*anypb.Any { + if x != nil { + if x.xxx_hidden_Annotations != nil { + return *x.xxx_hidden_Annotations + } + } + return nil +} + +func (x *ListEventsResponse) SetEvents(v []*Event) { + x.xxx_hidden_Events = &v +} + +func (x *ListEventsResponse) SetCursor(v string) { + x.xxx_hidden_Cursor = v +} + +func (x *ListEventsResponse) SetHasMore(v bool) { + x.xxx_hidden_HasMore = v +} + +func (x *ListEventsResponse) SetAnnotations(v []*anypb.Any) { + x.xxx_hidden_Annotations = &v +} + +type ListEventsResponse_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Events []*Event + Cursor string + HasMore bool + Annotations []*anypb.Any +} + +func (b0 ListEventsResponse_builder) Build() *ListEventsResponse { + m0 := &ListEventsResponse{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Events = &b.Events + x.xxx_hidden_Cursor = b.Cursor + x.xxx_hidden_HasMore = b.HasMore + x.xxx_hidden_Annotations = &b.Annotations + return m0 +} + +type ListEventFeedsRequest struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Annotations *[]*anypb.Any `protobuf:"bytes,1,rep,name=annotations,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ListEventFeedsRequest) Reset() { + *x = ListEventFeedsRequest{} + mi := &file_c1_connector_v2_event_feed_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ListEventFeedsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListEventFeedsRequest) ProtoMessage() {} + +func (x *ListEventFeedsRequest) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_event_feed_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *ListEventFeedsRequest) GetAnnotations() []*anypb.Any { + if x != nil { + if x.xxx_hidden_Annotations != nil { + return *x.xxx_hidden_Annotations + } + } + return nil +} + +func (x *ListEventFeedsRequest) SetAnnotations(v []*anypb.Any) { + x.xxx_hidden_Annotations = &v +} + +type ListEventFeedsRequest_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Annotations []*anypb.Any +} + +func (b0 ListEventFeedsRequest_builder) Build() *ListEventFeedsRequest { + m0 := &ListEventFeedsRequest{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Annotations = &b.Annotations + return m0 +} + +type ListEventFeedsResponse struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_List *[]*EventFeedMetadata `protobuf:"bytes,1,rep,name=list,proto3"` + xxx_hidden_Annotations *[]*anypb.Any `protobuf:"bytes,2,rep,name=annotations,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ListEventFeedsResponse) Reset() { + *x = ListEventFeedsResponse{} + mi := &file_c1_connector_v2_event_feed_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ListEventFeedsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListEventFeedsResponse) ProtoMessage() {} + +func (x *ListEventFeedsResponse) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_event_feed_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *ListEventFeedsResponse) GetList() []*EventFeedMetadata { + if x != nil { + if x.xxx_hidden_List != nil { + return *x.xxx_hidden_List + } + } + return nil +} + +func (x *ListEventFeedsResponse) GetAnnotations() []*anypb.Any { + if x != nil { + if x.xxx_hidden_Annotations != nil { + return *x.xxx_hidden_Annotations + } + } + return nil +} + +func (x *ListEventFeedsResponse) SetList(v []*EventFeedMetadata) { + x.xxx_hidden_List = &v +} + +func (x *ListEventFeedsResponse) SetAnnotations(v []*anypb.Any) { + x.xxx_hidden_Annotations = &v +} + +type ListEventFeedsResponse_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + List []*EventFeedMetadata + Annotations []*anypb.Any +} + +func (b0 ListEventFeedsResponse_builder) Build() *ListEventFeedsResponse { + m0 := &ListEventFeedsResponse{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_List = &b.List + x.xxx_hidden_Annotations = &b.Annotations + return m0 +} + +type Event struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Id string `protobuf:"bytes,1,opt,name=id,proto3"` + xxx_hidden_OccurredAt *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=occurred_at,json=occurredAt,proto3"` + xxx_hidden_Event isEvent_Event `protobuf_oneof:"event"` + xxx_hidden_Annotations *[]*anypb.Any `protobuf:"bytes,3,rep,name=annotations,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Event) Reset() { + *x = Event{} + mi := &file_c1_connector_v2_event_feed_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Event) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Event) ProtoMessage() {} + +func (x *Event) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_event_feed_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *Event) GetId() string { + if x != nil { + return x.xxx_hidden_Id + } + return "" +} + +func (x *Event) GetOccurredAt() *timestamppb.Timestamp { + if x != nil { + return x.xxx_hidden_OccurredAt + } + return nil +} + +func (x *Event) GetUsageEvent() *UsageEvent { + if x != nil { + if x, ok := x.xxx_hidden_Event.(*event_UsageEvent); ok { + return x.UsageEvent + } + } + return nil +} + +func (x *Event) GetGrantEvent() *GrantEvent { + if x != nil { + if x, ok := x.xxx_hidden_Event.(*event_GrantEvent); ok { + return x.GrantEvent + } + } + return nil +} + +func (x *Event) GetRevokeEvent() *RevokeEvent { + if x != nil { + if x, ok := x.xxx_hidden_Event.(*event_RevokeEvent); ok { + return x.RevokeEvent + } + } + return nil +} + +func (x *Event) GetResourceChangeEvent() *ResourceChangeEvent { + if x != nil { + if x, ok := x.xxx_hidden_Event.(*event_ResourceChangeEvent); ok { + return x.ResourceChangeEvent + } + } + return nil +} + +func (x *Event) GetCreateGrantEvent() *CreateGrantEvent { + if x != nil { + if x, ok := x.xxx_hidden_Event.(*event_CreateGrantEvent); ok { + return x.CreateGrantEvent + } + } + return nil +} + +func (x *Event) GetCreateRevokeEvent() *CreateRevokeEvent { + if x != nil { + if x, ok := x.xxx_hidden_Event.(*event_CreateRevokeEvent); ok { + return x.CreateRevokeEvent + } + } + return nil +} + +func (x *Event) GetAnnotations() []*anypb.Any { + if x != nil { + if x.xxx_hidden_Annotations != nil { + return *x.xxx_hidden_Annotations + } + } + return nil +} + +func (x *Event) SetId(v string) { + x.xxx_hidden_Id = v +} + +func (x *Event) SetOccurredAt(v *timestamppb.Timestamp) { + x.xxx_hidden_OccurredAt = v +} + +func (x *Event) SetUsageEvent(v *UsageEvent) { + if v == nil { + x.xxx_hidden_Event = nil + return + } + x.xxx_hidden_Event = &event_UsageEvent{v} +} + +func (x *Event) SetGrantEvent(v *GrantEvent) { + if v == nil { + x.xxx_hidden_Event = nil + return + } + x.xxx_hidden_Event = &event_GrantEvent{v} +} + +func (x *Event) SetRevokeEvent(v *RevokeEvent) { + if v == nil { + x.xxx_hidden_Event = nil + return + } + x.xxx_hidden_Event = &event_RevokeEvent{v} +} + +func (x *Event) SetResourceChangeEvent(v *ResourceChangeEvent) { + if v == nil { + x.xxx_hidden_Event = nil + return + } + x.xxx_hidden_Event = &event_ResourceChangeEvent{v} +} + +func (x *Event) SetCreateGrantEvent(v *CreateGrantEvent) { + if v == nil { + x.xxx_hidden_Event = nil + return + } + x.xxx_hidden_Event = &event_CreateGrantEvent{v} +} + +func (x *Event) SetCreateRevokeEvent(v *CreateRevokeEvent) { + if v == nil { + x.xxx_hidden_Event = nil + return + } + x.xxx_hidden_Event = &event_CreateRevokeEvent{v} +} + +func (x *Event) SetAnnotations(v []*anypb.Any) { + x.xxx_hidden_Annotations = &v +} + +func (x *Event) HasOccurredAt() bool { + if x == nil { + return false + } + return x.xxx_hidden_OccurredAt != nil +} + +func (x *Event) HasEvent() bool { + if x == nil { + return false + } + return x.xxx_hidden_Event != nil +} + +func (x *Event) HasUsageEvent() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_Event.(*event_UsageEvent) + return ok +} + +func (x *Event) HasGrantEvent() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_Event.(*event_GrantEvent) + return ok +} + +func (x *Event) HasRevokeEvent() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_Event.(*event_RevokeEvent) + return ok +} + +func (x *Event) HasResourceChangeEvent() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_Event.(*event_ResourceChangeEvent) + return ok +} + +func (x *Event) HasCreateGrantEvent() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_Event.(*event_CreateGrantEvent) + return ok +} + +func (x *Event) HasCreateRevokeEvent() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_Event.(*event_CreateRevokeEvent) + return ok +} + +func (x *Event) ClearOccurredAt() { + x.xxx_hidden_OccurredAt = nil +} + +func (x *Event) ClearEvent() { + x.xxx_hidden_Event = nil +} + +func (x *Event) ClearUsageEvent() { + if _, ok := x.xxx_hidden_Event.(*event_UsageEvent); ok { + x.xxx_hidden_Event = nil + } +} + +func (x *Event) ClearGrantEvent() { + if _, ok := x.xxx_hidden_Event.(*event_GrantEvent); ok { + x.xxx_hidden_Event = nil + } +} + +func (x *Event) ClearRevokeEvent() { + if _, ok := x.xxx_hidden_Event.(*event_RevokeEvent); ok { + x.xxx_hidden_Event = nil + } +} + +func (x *Event) ClearResourceChangeEvent() { + if _, ok := x.xxx_hidden_Event.(*event_ResourceChangeEvent); ok { + x.xxx_hidden_Event = nil + } +} + +func (x *Event) ClearCreateGrantEvent() { + if _, ok := x.xxx_hidden_Event.(*event_CreateGrantEvent); ok { + x.xxx_hidden_Event = nil + } +} + +func (x *Event) ClearCreateRevokeEvent() { + if _, ok := x.xxx_hidden_Event.(*event_CreateRevokeEvent); ok { + x.xxx_hidden_Event = nil + } +} + +const Event_Event_not_set_case case_Event_Event = 0 +const Event_UsageEvent_case case_Event_Event = 100 +const Event_GrantEvent_case case_Event_Event = 101 +const Event_RevokeEvent_case case_Event_Event = 102 +const Event_ResourceChangeEvent_case case_Event_Event = 103 +const Event_CreateGrantEvent_case case_Event_Event = 104 +const Event_CreateRevokeEvent_case case_Event_Event = 105 + +func (x *Event) WhichEvent() case_Event_Event { + if x == nil { + return Event_Event_not_set_case + } + switch x.xxx_hidden_Event.(type) { + case *event_UsageEvent: + return Event_UsageEvent_case + case *event_GrantEvent: + return Event_GrantEvent_case + case *event_RevokeEvent: + return Event_RevokeEvent_case + case *event_ResourceChangeEvent: + return Event_ResourceChangeEvent_case + case *event_CreateGrantEvent: + return Event_CreateGrantEvent_case + case *event_CreateRevokeEvent: + return Event_CreateRevokeEvent_case + default: + return Event_Event_not_set_case + } +} + +type Event_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Id string + OccurredAt *timestamppb.Timestamp + // Each event has structs that will usually be mostly empty. + // Stream consumer must be defensive about what it's reading + + // Fields of oneof xxx_hidden_Event: + UsageEvent *UsageEvent + GrantEvent *GrantEvent + RevokeEvent *RevokeEvent + ResourceChangeEvent *ResourceChangeEvent + CreateGrantEvent *CreateGrantEvent + CreateRevokeEvent *CreateRevokeEvent + // -- end of xxx_hidden_Event + // May contain resources for targets, actor, or items referenced in events + Annotations []*anypb.Any +} + +func (b0 Event_builder) Build() *Event { + m0 := &Event{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Id = b.Id + x.xxx_hidden_OccurredAt = b.OccurredAt + if b.UsageEvent != nil { + x.xxx_hidden_Event = &event_UsageEvent{b.UsageEvent} + } + if b.GrantEvent != nil { + x.xxx_hidden_Event = &event_GrantEvent{b.GrantEvent} + } + if b.RevokeEvent != nil { + x.xxx_hidden_Event = &event_RevokeEvent{b.RevokeEvent} + } + if b.ResourceChangeEvent != nil { + x.xxx_hidden_Event = &event_ResourceChangeEvent{b.ResourceChangeEvent} + } + if b.CreateGrantEvent != nil { + x.xxx_hidden_Event = &event_CreateGrantEvent{b.CreateGrantEvent} + } + if b.CreateRevokeEvent != nil { + x.xxx_hidden_Event = &event_CreateRevokeEvent{b.CreateRevokeEvent} + } + x.xxx_hidden_Annotations = &b.Annotations + return m0 +} + +type case_Event_Event protoreflect.FieldNumber + +func (x case_Event_Event) String() string { + md := file_c1_connector_v2_event_feed_proto_msgTypes[4].Descriptor() + if x == 0 { + return "not set" + } + return protoimpl.X.MessageFieldStringOf(md, protoreflect.FieldNumber(x)) +} + +type isEvent_Event interface { + isEvent_Event() +} + +type event_UsageEvent struct { + UsageEvent *UsageEvent `protobuf:"bytes,100,opt,name=usage_event,json=usageEvent,proto3,oneof"` +} + +type event_GrantEvent struct { + GrantEvent *GrantEvent `protobuf:"bytes,101,opt,name=grant_event,json=grantEvent,proto3,oneof"` +} + +type event_RevokeEvent struct { + RevokeEvent *RevokeEvent `protobuf:"bytes,102,opt,name=revoke_event,json=revokeEvent,proto3,oneof"` +} + +type event_ResourceChangeEvent struct { + ResourceChangeEvent *ResourceChangeEvent `protobuf:"bytes,103,opt,name=resource_change_event,json=resourceChangeEvent,proto3,oneof"` +} + +type event_CreateGrantEvent struct { + CreateGrantEvent *CreateGrantEvent `protobuf:"bytes,104,opt,name=create_grant_event,json=createGrantEvent,proto3,oneof"` +} + +type event_CreateRevokeEvent struct { + CreateRevokeEvent *CreateRevokeEvent `protobuf:"bytes,105,opt,name=create_revoke_event,json=createRevokeEvent,proto3,oneof"` +} + +func (*event_UsageEvent) isEvent_Event() {} + +func (*event_GrantEvent) isEvent_Event() {} + +func (*event_RevokeEvent) isEvent_Event() {} + +func (*event_ResourceChangeEvent) isEvent_Event() {} + +func (*event_CreateGrantEvent) isEvent_Event() {} + +func (*event_CreateRevokeEvent) isEvent_Event() {} + +type UsageEvent struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_TargetResource *Resource `protobuf:"bytes,1,opt,name=target_resource,json=targetResource,proto3"` + xxx_hidden_ActorResource *Resource `protobuf:"bytes,2,opt,name=actor_resource,json=actorResource,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *UsageEvent) Reset() { + *x = UsageEvent{} + mi := &file_c1_connector_v2_event_feed_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *UsageEvent) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UsageEvent) ProtoMessage() {} + +func (x *UsageEvent) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_event_feed_proto_msgTypes[5] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *UsageEvent) GetTargetResource() *Resource { + if x != nil { + return x.xxx_hidden_TargetResource + } + return nil +} + +func (x *UsageEvent) GetActorResource() *Resource { + if x != nil { + return x.xxx_hidden_ActorResource + } + return nil +} + +func (x *UsageEvent) SetTargetResource(v *Resource) { + x.xxx_hidden_TargetResource = v +} + +func (x *UsageEvent) SetActorResource(v *Resource) { + x.xxx_hidden_ActorResource = v +} + +func (x *UsageEvent) HasTargetResource() bool { + if x == nil { + return false + } + return x.xxx_hidden_TargetResource != nil +} + +func (x *UsageEvent) HasActorResource() bool { + if x == nil { + return false + } + return x.xxx_hidden_ActorResource != nil +} + +func (x *UsageEvent) ClearTargetResource() { + x.xxx_hidden_TargetResource = nil +} + +func (x *UsageEvent) ClearActorResource() { + x.xxx_hidden_ActorResource = nil +} + +type UsageEvent_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + TargetResource *Resource + ActorResource *Resource +} + +func (b0 UsageEvent_builder) Build() *UsageEvent { + m0 := &UsageEvent{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_TargetResource = b.TargetResource + x.xxx_hidden_ActorResource = b.ActorResource + return m0 +} + +// Unrelated to this effort, but valuable for other projects +type GrantEvent struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Grant *Grant `protobuf:"bytes,1,opt,name=grant,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GrantEvent) Reset() { + *x = GrantEvent{} + mi := &file_c1_connector_v2_event_feed_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GrantEvent) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GrantEvent) ProtoMessage() {} + +func (x *GrantEvent) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_event_feed_proto_msgTypes[6] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *GrantEvent) GetGrant() *Grant { + if x != nil { + return x.xxx_hidden_Grant + } + return nil +} + +func (x *GrantEvent) SetGrant(v *Grant) { + x.xxx_hidden_Grant = v +} + +func (x *GrantEvent) HasGrant() bool { + if x == nil { + return false + } + return x.xxx_hidden_Grant != nil +} + +func (x *GrantEvent) ClearGrant() { + x.xxx_hidden_Grant = nil +} + +type GrantEvent_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Grant *Grant +} + +func (b0 GrantEvent_builder) Build() *GrantEvent { + m0 := &GrantEvent{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Grant = b.Grant + return m0 +} + +type CreateGrantEvent struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Entitlement *Entitlement `protobuf:"bytes,1,opt,name=entitlement,proto3"` + xxx_hidden_Principal *Resource `protobuf:"bytes,2,opt,name=principal,proto3"` + xxx_hidden_Annotations *[]*anypb.Any `protobuf:"bytes,3,rep,name=annotations,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *CreateGrantEvent) Reset() { + *x = CreateGrantEvent{} + mi := &file_c1_connector_v2_event_feed_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *CreateGrantEvent) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateGrantEvent) ProtoMessage() {} + +func (x *CreateGrantEvent) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_event_feed_proto_msgTypes[7] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *CreateGrantEvent) GetEntitlement() *Entitlement { + if x != nil { + return x.xxx_hidden_Entitlement + } + return nil +} + +func (x *CreateGrantEvent) GetPrincipal() *Resource { + if x != nil { + return x.xxx_hidden_Principal + } + return nil +} + +func (x *CreateGrantEvent) GetAnnotations() []*anypb.Any { + if x != nil { + if x.xxx_hidden_Annotations != nil { + return *x.xxx_hidden_Annotations + } + } + return nil +} + +func (x *CreateGrantEvent) SetEntitlement(v *Entitlement) { + x.xxx_hidden_Entitlement = v +} + +func (x *CreateGrantEvent) SetPrincipal(v *Resource) { + x.xxx_hidden_Principal = v +} + +func (x *CreateGrantEvent) SetAnnotations(v []*anypb.Any) { + x.xxx_hidden_Annotations = &v +} + +func (x *CreateGrantEvent) HasEntitlement() bool { + if x == nil { + return false + } + return x.xxx_hidden_Entitlement != nil +} + +func (x *CreateGrantEvent) HasPrincipal() bool { + if x == nil { + return false + } + return x.xxx_hidden_Principal != nil +} + +func (x *CreateGrantEvent) ClearEntitlement() { + x.xxx_hidden_Entitlement = nil +} + +func (x *CreateGrantEvent) ClearPrincipal() { + x.xxx_hidden_Principal = nil +} + +type CreateGrantEvent_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Entitlement *Entitlement + Principal *Resource + Annotations []*anypb.Any +} + +func (b0 CreateGrantEvent_builder) Build() *CreateGrantEvent { + m0 := &CreateGrantEvent{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Entitlement = b.Entitlement + x.xxx_hidden_Principal = b.Principal + x.xxx_hidden_Annotations = &b.Annotations + return m0 +} + +type CreateRevokeEvent struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Entitlement *Entitlement `protobuf:"bytes,1,opt,name=entitlement,proto3"` + xxx_hidden_Principal *Resource `protobuf:"bytes,2,opt,name=principal,proto3"` + xxx_hidden_Annotations *[]*anypb.Any `protobuf:"bytes,3,rep,name=annotations,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *CreateRevokeEvent) Reset() { + *x = CreateRevokeEvent{} + mi := &file_c1_connector_v2_event_feed_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *CreateRevokeEvent) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateRevokeEvent) ProtoMessage() {} + +func (x *CreateRevokeEvent) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_event_feed_proto_msgTypes[8] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *CreateRevokeEvent) GetEntitlement() *Entitlement { + if x != nil { + return x.xxx_hidden_Entitlement + } + return nil +} + +func (x *CreateRevokeEvent) GetPrincipal() *Resource { + if x != nil { + return x.xxx_hidden_Principal + } + return nil +} + +func (x *CreateRevokeEvent) GetAnnotations() []*anypb.Any { + if x != nil { + if x.xxx_hidden_Annotations != nil { + return *x.xxx_hidden_Annotations + } + } + return nil +} + +func (x *CreateRevokeEvent) SetEntitlement(v *Entitlement) { + x.xxx_hidden_Entitlement = v +} + +func (x *CreateRevokeEvent) SetPrincipal(v *Resource) { + x.xxx_hidden_Principal = v +} + +func (x *CreateRevokeEvent) SetAnnotations(v []*anypb.Any) { + x.xxx_hidden_Annotations = &v +} + +func (x *CreateRevokeEvent) HasEntitlement() bool { + if x == nil { + return false + } + return x.xxx_hidden_Entitlement != nil +} + +func (x *CreateRevokeEvent) HasPrincipal() bool { + if x == nil { + return false + } + return x.xxx_hidden_Principal != nil +} + +func (x *CreateRevokeEvent) ClearEntitlement() { + x.xxx_hidden_Entitlement = nil +} + +func (x *CreateRevokeEvent) ClearPrincipal() { + x.xxx_hidden_Principal = nil +} + +type CreateRevokeEvent_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Entitlement *Entitlement + Principal *Resource + Annotations []*anypb.Any +} + +func (b0 CreateRevokeEvent_builder) Build() *CreateRevokeEvent { + m0 := &CreateRevokeEvent{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Entitlement = b.Entitlement + x.xxx_hidden_Principal = b.Principal + x.xxx_hidden_Annotations = &b.Annotations + return m0 +} + +type RevokeEvent struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Entitlement *Entitlement `protobuf:"bytes,1,opt,name=entitlement,proto3"` + xxx_hidden_Principal *Resource `protobuf:"bytes,2,opt,name=principal,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *RevokeEvent) Reset() { + *x = RevokeEvent{} + mi := &file_c1_connector_v2_event_feed_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *RevokeEvent) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RevokeEvent) ProtoMessage() {} + +func (x *RevokeEvent) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_event_feed_proto_msgTypes[9] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *RevokeEvent) GetEntitlement() *Entitlement { + if x != nil { + return x.xxx_hidden_Entitlement + } + return nil +} + +func (x *RevokeEvent) GetPrincipal() *Resource { + if x != nil { + return x.xxx_hidden_Principal + } + return nil +} + +func (x *RevokeEvent) SetEntitlement(v *Entitlement) { + x.xxx_hidden_Entitlement = v +} + +func (x *RevokeEvent) SetPrincipal(v *Resource) { + x.xxx_hidden_Principal = v +} + +func (x *RevokeEvent) HasEntitlement() bool { + if x == nil { + return false + } + return x.xxx_hidden_Entitlement != nil +} + +func (x *RevokeEvent) HasPrincipal() bool { + if x == nil { + return false + } + return x.xxx_hidden_Principal != nil +} + +func (x *RevokeEvent) ClearEntitlement() { + x.xxx_hidden_Entitlement = nil +} + +func (x *RevokeEvent) ClearPrincipal() { + x.xxx_hidden_Principal = nil +} + +type RevokeEvent_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Entitlement *Entitlement + Principal *Resource +} + +func (b0 RevokeEvent_builder) Build() *RevokeEvent { + m0 := &RevokeEvent{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Entitlement = b.Entitlement + x.xxx_hidden_Principal = b.Principal + return m0 +} + +// generic light weight event indicating a resource was changed +type ResourceChangeEvent struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_ResourceId *ResourceId `protobuf:"bytes,1,opt,name=resource_id,json=resourceId,proto3"` + xxx_hidden_ParentResourceId *ResourceId `protobuf:"bytes,2,opt,name=parent_resource_id,json=parentResourceId,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ResourceChangeEvent) Reset() { + *x = ResourceChangeEvent{} + mi := &file_c1_connector_v2_event_feed_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ResourceChangeEvent) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResourceChangeEvent) ProtoMessage() {} + +func (x *ResourceChangeEvent) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_event_feed_proto_msgTypes[10] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *ResourceChangeEvent) GetResourceId() *ResourceId { + if x != nil { + return x.xxx_hidden_ResourceId + } + return nil +} + +func (x *ResourceChangeEvent) GetParentResourceId() *ResourceId { + if x != nil { + return x.xxx_hidden_ParentResourceId + } + return nil +} + +func (x *ResourceChangeEvent) SetResourceId(v *ResourceId) { + x.xxx_hidden_ResourceId = v +} + +func (x *ResourceChangeEvent) SetParentResourceId(v *ResourceId) { + x.xxx_hidden_ParentResourceId = v +} + +func (x *ResourceChangeEvent) HasResourceId() bool { + if x == nil { + return false + } + return x.xxx_hidden_ResourceId != nil +} + +func (x *ResourceChangeEvent) HasParentResourceId() bool { + if x == nil { + return false + } + return x.xxx_hidden_ParentResourceId != nil +} + +func (x *ResourceChangeEvent) ClearResourceId() { + x.xxx_hidden_ResourceId = nil +} + +func (x *ResourceChangeEvent) ClearParentResourceId() { + x.xxx_hidden_ParentResourceId = nil +} + +type ResourceChangeEvent_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + ResourceId *ResourceId + ParentResourceId *ResourceId +} + +func (b0 ResourceChangeEvent_builder) Build() *ResourceChangeEvent { + m0 := &ResourceChangeEvent{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_ResourceId = b.ResourceId + x.xxx_hidden_ParentResourceId = b.ParentResourceId + return m0 +} + +type EventFeedMetadata struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Id string `protobuf:"bytes,1,opt,name=id,proto3"` + xxx_hidden_SupportedEventTypes []EventType `protobuf:"varint,2,rep,packed,name=supported_event_types,json=supportedEventTypes,proto3,enum=c1.connector.v2.EventType"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *EventFeedMetadata) Reset() { + *x = EventFeedMetadata{} + mi := &file_c1_connector_v2_event_feed_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *EventFeedMetadata) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EventFeedMetadata) ProtoMessage() {} + +func (x *EventFeedMetadata) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_event_feed_proto_msgTypes[11] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *EventFeedMetadata) GetId() string { + if x != nil { + return x.xxx_hidden_Id + } + return "" +} + +func (x *EventFeedMetadata) GetSupportedEventTypes() []EventType { + if x != nil { + return x.xxx_hidden_SupportedEventTypes + } + return nil +} + +func (x *EventFeedMetadata) SetId(v string) { + x.xxx_hidden_Id = v +} + +func (x *EventFeedMetadata) SetSupportedEventTypes(v []EventType) { + x.xxx_hidden_SupportedEventTypes = v +} + +type EventFeedMetadata_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // unique identifier for the event feed + Id string + SupportedEventTypes []EventType +} + +func (b0 EventFeedMetadata_builder) Build() *EventFeedMetadata { + m0 := &EventFeedMetadata{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Id = b.Id + x.xxx_hidden_SupportedEventTypes = b.SupportedEventTypes + return m0 +} + +var File_c1_connector_v2_event_feed_proto protoreflect.FileDescriptor + +const file_c1_connector_v2_event_feed_proto_rawDesc = "" + + "\n" + + " c1/connector/v2/event_feed.proto\x12\x0fc1.connector.v2\x1a!c1/connector/v2/entitlement.proto\x1a\x1bc1/connector/v2/grant.proto\x1a\x1ec1/connector/v2/resource.proto\x1a\x19google/protobuf/any.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x17validate/validate.proto\"\x85\x02\n" + + "\x11ListEventsRequest\x12%\n" + + "\x06cursor\x18\x01 \x01(\tB\r\xfaB\n" + + "r\b \x01(\x80 \xd0\x01\x01R\x06cursor\x125\n" + + "\bstart_at\x18\x02 \x01(\v2\x1a.google.protobuf.TimestampR\astartAt\x12'\n" + + "\tpage_size\x18\x03 \x01(\rB\n" + + "\xfaB\a*\x05\x18\xfa\x01@\x01R\bpageSize\x126\n" + + "\vannotations\x18\x04 \x03(\v2\x14.google.protobuf.AnyR\vannotations\x121\n" + + "\revent_feed_id\x18\x05 \x01(\tB\r\xfaB\n" + + "r\b \x01(\x80\b\xd0\x01\x01R\veventFeedId\"\xbe\x01\n" + + "\x12ListEventsResponse\x12.\n" + + "\x06events\x18\x01 \x03(\v2\x16.c1.connector.v2.EventR\x06events\x12%\n" + + "\x06cursor\x18\x02 \x01(\tB\r\xfaB\n" + + "r\b \x01(\x80 \xd0\x01\x01R\x06cursor\x12\x19\n" + + "\bhas_more\x18\x03 \x01(\bR\ahasMore\x126\n" + + "\vannotations\x18\x04 \x03(\v2\x14.google.protobuf.AnyR\vannotations\"O\n" + + "\x15ListEventFeedsRequest\x126\n" + + "\vannotations\x18\x01 \x03(\v2\x14.google.protobuf.AnyR\vannotations\"\x88\x01\n" + + "\x16ListEventFeedsResponse\x126\n" + + "\x04list\x18\x01 \x03(\v2\".c1.connector.v2.EventFeedMetadataR\x04list\x126\n" + + "\vannotations\x18\x02 \x03(\v2\x14.google.protobuf.AnyR\vannotations\"\xdd\x04\n" + + "\x05Event\x12\x0e\n" + + "\x02id\x18\x01 \x01(\tR\x02id\x12;\n" + + "\voccurred_at\x18\x02 \x01(\v2\x1a.google.protobuf.TimestampR\n" + + "occurredAt\x12>\n" + + "\vusage_event\x18d \x01(\v2\x1b.c1.connector.v2.UsageEventH\x00R\n" + + "usageEvent\x12>\n" + + "\vgrant_event\x18e \x01(\v2\x1b.c1.connector.v2.GrantEventH\x00R\n" + + "grantEvent\x12A\n" + + "\frevoke_event\x18f \x01(\v2\x1c.c1.connector.v2.RevokeEventH\x00R\vrevokeEvent\x12Z\n" + + "\x15resource_change_event\x18g \x01(\v2$.c1.connector.v2.ResourceChangeEventH\x00R\x13resourceChangeEvent\x12Q\n" + + "\x12create_grant_event\x18h \x01(\v2!.c1.connector.v2.CreateGrantEventH\x00R\x10createGrantEvent\x12T\n" + + "\x13create_revoke_event\x18i \x01(\v2\".c1.connector.v2.CreateRevokeEventH\x00R\x11createRevokeEvent\x126\n" + + "\vannotations\x18\x03 \x03(\v2\x14.google.protobuf.AnyR\vannotationsB\a\n" + + "\x05event\"\x92\x01\n" + + "\n" + + "UsageEvent\x12B\n" + + "\x0ftarget_resource\x18\x01 \x01(\v2\x19.c1.connector.v2.ResourceR\x0etargetResource\x12@\n" + + "\x0eactor_resource\x18\x02 \x01(\v2\x19.c1.connector.v2.ResourceR\ractorResource\":\n" + + "\n" + + "GrantEvent\x12,\n" + + "\x05grant\x18\x01 \x01(\v2\x16.c1.connector.v2.GrantR\x05grant\"\xd7\x01\n" + + "\x10CreateGrantEvent\x12H\n" + + "\ventitlement\x18\x01 \x01(\v2\x1c.c1.connector.v2.EntitlementB\b\xfaB\x05\x8a\x01\x02\x10\x01R\ventitlement\x12A\n" + + "\tprincipal\x18\x02 \x01(\v2\x19.c1.connector.v2.ResourceB\b\xfaB\x05\x8a\x01\x02\x10\x01R\tprincipal\x126\n" + + "\vannotations\x18\x03 \x03(\v2\x14.google.protobuf.AnyR\vannotations\"\xd8\x01\n" + + "\x11CreateRevokeEvent\x12H\n" + + "\ventitlement\x18\x01 \x01(\v2\x1c.c1.connector.v2.EntitlementB\b\xfaB\x05\x8a\x01\x02\x10\x01R\ventitlement\x12A\n" + + "\tprincipal\x18\x02 \x01(\v2\x19.c1.connector.v2.ResourceB\b\xfaB\x05\x8a\x01\x02\x10\x01R\tprincipal\x126\n" + + "\vannotations\x18\x03 \x03(\v2\x14.google.protobuf.AnyR\vannotations\"\x86\x01\n" + + "\vRevokeEvent\x12>\n" + + "\ventitlement\x18\x01 \x01(\v2\x1c.c1.connector.v2.EntitlementR\ventitlement\x127\n" + + "\tprincipal\x18\x02 \x01(\v2\x19.c1.connector.v2.ResourceR\tprincipal\"\xa8\x01\n" + + "\x13ResourceChangeEvent\x12F\n" + + "\vresource_id\x18\x01 \x01(\v2\x1b.c1.connector.v2.ResourceIdB\b\xfaB\x05\x8a\x01\x02\x10\x01R\n" + + "resourceId\x12I\n" + + "\x12parent_resource_id\x18\x02 \x01(\v2\x1b.c1.connector.v2.ResourceIdR\x10parentResourceId\"\x90\x01\n" + + "\x11EventFeedMetadata\x12\x1a\n" + + "\x02id\x18\x01 \x01(\tB\n" + + "\xfaB\ar\x05 \x01(\x80\bR\x02id\x12_\n" + + "\x15supported_event_types\x18\x02 \x03(\x0e2\x1a.c1.connector.v2.EventTypeB\x0f\xfaB\f\x92\x01\t\x18\x01\"\x05\x82\x01\x02\x10\x01R\x13supportedEventTypes*\xa4\x01\n" + + "\tEventType\x12\x1a\n" + + "\x16EVENT_TYPE_UNSPECIFIED\x10\x00\x12\x14\n" + + "\x10EVENT_TYPE_USAGE\x10\x01\x12\x1e\n" + + "\x1aEVENT_TYPE_RESOURCE_CHANGE\x10\x04\x12\x1b\n" + + "\x17EVENT_TYPE_CREATE_GRANT\x10\x05\x12\x1c\n" + + "\x18EVENT_TYPE_CREATE_REVOKE\x10\x06\"\x04\b\x02\x10\x02\"\x04\b\x03\x10\x032\xc8\x01\n" + + "\fEventService\x12U\n" + + "\n" + + "ListEvents\x12\".c1.connector.v2.ListEventsRequest\x1a#.c1.connector.v2.ListEventsResponse\x12a\n" + + "\x0eListEventFeeds\x12&.c1.connector.v2.ListEventFeedsRequest\x1a'.c1.connector.v2.ListEventFeedsResponseB6Z4github.com/conductorone/baton-sdk/pb/c1/connector/v2b\x06proto3" + +var file_c1_connector_v2_event_feed_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_c1_connector_v2_event_feed_proto_msgTypes = make([]protoimpl.MessageInfo, 12) +var file_c1_connector_v2_event_feed_proto_goTypes = []any{ + (EventType)(0), // 0: c1.connector.v2.EventType + (*ListEventsRequest)(nil), // 1: c1.connector.v2.ListEventsRequest + (*ListEventsResponse)(nil), // 2: c1.connector.v2.ListEventsResponse + (*ListEventFeedsRequest)(nil), // 3: c1.connector.v2.ListEventFeedsRequest + (*ListEventFeedsResponse)(nil), // 4: c1.connector.v2.ListEventFeedsResponse + (*Event)(nil), // 5: c1.connector.v2.Event + (*UsageEvent)(nil), // 6: c1.connector.v2.UsageEvent + (*GrantEvent)(nil), // 7: c1.connector.v2.GrantEvent + (*CreateGrantEvent)(nil), // 8: c1.connector.v2.CreateGrantEvent + (*CreateRevokeEvent)(nil), // 9: c1.connector.v2.CreateRevokeEvent + (*RevokeEvent)(nil), // 10: c1.connector.v2.RevokeEvent + (*ResourceChangeEvent)(nil), // 11: c1.connector.v2.ResourceChangeEvent + (*EventFeedMetadata)(nil), // 12: c1.connector.v2.EventFeedMetadata + (*timestamppb.Timestamp)(nil), // 13: google.protobuf.Timestamp + (*anypb.Any)(nil), // 14: google.protobuf.Any + (*Resource)(nil), // 15: c1.connector.v2.Resource + (*Grant)(nil), // 16: c1.connector.v2.Grant + (*Entitlement)(nil), // 17: c1.connector.v2.Entitlement + (*ResourceId)(nil), // 18: c1.connector.v2.ResourceId +} +var file_c1_connector_v2_event_feed_proto_depIdxs = []int32{ + 13, // 0: c1.connector.v2.ListEventsRequest.start_at:type_name -> google.protobuf.Timestamp + 14, // 1: c1.connector.v2.ListEventsRequest.annotations:type_name -> google.protobuf.Any + 5, // 2: c1.connector.v2.ListEventsResponse.events:type_name -> c1.connector.v2.Event + 14, // 3: c1.connector.v2.ListEventsResponse.annotations:type_name -> google.protobuf.Any + 14, // 4: c1.connector.v2.ListEventFeedsRequest.annotations:type_name -> google.protobuf.Any + 12, // 5: c1.connector.v2.ListEventFeedsResponse.list:type_name -> c1.connector.v2.EventFeedMetadata + 14, // 6: c1.connector.v2.ListEventFeedsResponse.annotations:type_name -> google.protobuf.Any + 13, // 7: c1.connector.v2.Event.occurred_at:type_name -> google.protobuf.Timestamp + 6, // 8: c1.connector.v2.Event.usage_event:type_name -> c1.connector.v2.UsageEvent + 7, // 9: c1.connector.v2.Event.grant_event:type_name -> c1.connector.v2.GrantEvent + 10, // 10: c1.connector.v2.Event.revoke_event:type_name -> c1.connector.v2.RevokeEvent + 11, // 11: c1.connector.v2.Event.resource_change_event:type_name -> c1.connector.v2.ResourceChangeEvent + 8, // 12: c1.connector.v2.Event.create_grant_event:type_name -> c1.connector.v2.CreateGrantEvent + 9, // 13: c1.connector.v2.Event.create_revoke_event:type_name -> c1.connector.v2.CreateRevokeEvent + 14, // 14: c1.connector.v2.Event.annotations:type_name -> google.protobuf.Any + 15, // 15: c1.connector.v2.UsageEvent.target_resource:type_name -> c1.connector.v2.Resource + 15, // 16: c1.connector.v2.UsageEvent.actor_resource:type_name -> c1.connector.v2.Resource + 16, // 17: c1.connector.v2.GrantEvent.grant:type_name -> c1.connector.v2.Grant + 17, // 18: c1.connector.v2.CreateGrantEvent.entitlement:type_name -> c1.connector.v2.Entitlement + 15, // 19: c1.connector.v2.CreateGrantEvent.principal:type_name -> c1.connector.v2.Resource + 14, // 20: c1.connector.v2.CreateGrantEvent.annotations:type_name -> google.protobuf.Any + 17, // 21: c1.connector.v2.CreateRevokeEvent.entitlement:type_name -> c1.connector.v2.Entitlement + 15, // 22: c1.connector.v2.CreateRevokeEvent.principal:type_name -> c1.connector.v2.Resource + 14, // 23: c1.connector.v2.CreateRevokeEvent.annotations:type_name -> google.protobuf.Any + 17, // 24: c1.connector.v2.RevokeEvent.entitlement:type_name -> c1.connector.v2.Entitlement + 15, // 25: c1.connector.v2.RevokeEvent.principal:type_name -> c1.connector.v2.Resource + 18, // 26: c1.connector.v2.ResourceChangeEvent.resource_id:type_name -> c1.connector.v2.ResourceId + 18, // 27: c1.connector.v2.ResourceChangeEvent.parent_resource_id:type_name -> c1.connector.v2.ResourceId + 0, // 28: c1.connector.v2.EventFeedMetadata.supported_event_types:type_name -> c1.connector.v2.EventType + 1, // 29: c1.connector.v2.EventService.ListEvents:input_type -> c1.connector.v2.ListEventsRequest + 3, // 30: c1.connector.v2.EventService.ListEventFeeds:input_type -> c1.connector.v2.ListEventFeedsRequest + 2, // 31: c1.connector.v2.EventService.ListEvents:output_type -> c1.connector.v2.ListEventsResponse + 4, // 32: c1.connector.v2.EventService.ListEventFeeds:output_type -> c1.connector.v2.ListEventFeedsResponse + 31, // [31:33] is the sub-list for method output_type + 29, // [29:31] is the sub-list for method input_type + 29, // [29:29] is the sub-list for extension type_name + 29, // [29:29] is the sub-list for extension extendee + 0, // [0:29] is the sub-list for field type_name +} + +func init() { file_c1_connector_v2_event_feed_proto_init() } +func file_c1_connector_v2_event_feed_proto_init() { + if File_c1_connector_v2_event_feed_proto != nil { + return + } + file_c1_connector_v2_entitlement_proto_init() + file_c1_connector_v2_grant_proto_init() + file_c1_connector_v2_resource_proto_init() + file_c1_connector_v2_event_feed_proto_msgTypes[4].OneofWrappers = []any{ + (*event_UsageEvent)(nil), + (*event_GrantEvent)(nil), + (*event_RevokeEvent)(nil), + (*event_ResourceChangeEvent)(nil), + (*event_CreateGrantEvent)(nil), + (*event_CreateRevokeEvent)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_c1_connector_v2_event_feed_proto_rawDesc), len(file_c1_connector_v2_event_feed_proto_rawDesc)), + NumEnums: 1, + NumMessages: 12, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_c1_connector_v2_event_feed_proto_goTypes, + DependencyIndexes: file_c1_connector_v2_event_feed_proto_depIdxs, + EnumInfos: file_c1_connector_v2_event_feed_proto_enumTypes, + MessageInfos: file_c1_connector_v2_event_feed_proto_msgTypes, + }.Build() + File_c1_connector_v2_event_feed_proto = out.File + file_c1_connector_v2_event_feed_proto_goTypes = nil + file_c1_connector_v2_event_feed_proto_depIdxs = nil +} diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/grant.pb.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/grant.pb.go index 2091c808..485decbe 100644 --- a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/grant.pb.go +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/grant.pb.go @@ -1,9 +1,11 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.4 +// protoc-gen-go v1.36.10 // protoc (unknown) // source: c1/connector/v2/grant.proto +//go:build !protoopaque + package v2 import ( @@ -12,7 +14,6 @@ import ( protoimpl "google.golang.org/protobuf/runtime/protoimpl" anypb "google.golang.org/protobuf/types/known/anypb" reflect "reflect" - sync "sync" unsafe "unsafe" ) @@ -24,7 +25,7 @@ const ( ) type GrantSources struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Sources map[string]*GrantSources_GrantSource `protobuf:"bytes,1,rep,name=sources,proto3" json:"sources,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache @@ -55,11 +56,6 @@ func (x *GrantSources) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GrantSources.ProtoReflect.Descriptor instead. -func (*GrantSources) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_grant_proto_rawDescGZIP(), []int{0} -} - func (x *GrantSources) GetSources() map[string]*GrantSources_GrantSource { if x != nil { return x.Sources @@ -67,8 +63,26 @@ func (x *GrantSources) GetSources() map[string]*GrantSources_GrantSource { return nil } +func (x *GrantSources) SetSources(v map[string]*GrantSources_GrantSource) { + x.Sources = v +} + +type GrantSources_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Sources map[string]*GrantSources_GrantSource +} + +func (b0 GrantSources_builder) Build() *GrantSources { + m0 := &GrantSources{} + b, x := &b0, m0 + _, _ = b, x + x.Sources = b.Sources + return m0 +} + type Grant struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Entitlement *Entitlement `protobuf:"bytes,1,opt,name=entitlement,proto3" json:"entitlement,omitempty"` Principal *Resource `protobuf:"bytes,2,opt,name=principal,proto3" json:"principal,omitempty"` Id string `protobuf:"bytes,3,opt,name=id,proto3" json:"id,omitempty"` @@ -103,11 +117,6 @@ func (x *Grant) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use Grant.ProtoReflect.Descriptor instead. -func (*Grant) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_grant_proto_rawDescGZIP(), []int{1} -} - func (x *Grant) GetEntitlement() *Entitlement { if x != nil { return x.Entitlement @@ -143,12 +152,88 @@ func (x *Grant) GetAnnotations() []*anypb.Any { return nil } +func (x *Grant) SetEntitlement(v *Entitlement) { + x.Entitlement = v +} + +func (x *Grant) SetPrincipal(v *Resource) { + x.Principal = v +} + +func (x *Grant) SetId(v string) { + x.Id = v +} + +func (x *Grant) SetSources(v *GrantSources) { + x.Sources = v +} + +func (x *Grant) SetAnnotations(v []*anypb.Any) { + x.Annotations = v +} + +func (x *Grant) HasEntitlement() bool { + if x == nil { + return false + } + return x.Entitlement != nil +} + +func (x *Grant) HasPrincipal() bool { + if x == nil { + return false + } + return x.Principal != nil +} + +func (x *Grant) HasSources() bool { + if x == nil { + return false + } + return x.Sources != nil +} + +func (x *Grant) ClearEntitlement() { + x.Entitlement = nil +} + +func (x *Grant) ClearPrincipal() { + x.Principal = nil +} + +func (x *Grant) ClearSources() { + x.Sources = nil +} + +type Grant_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Entitlement *Entitlement + Principal *Resource + Id string + Sources *GrantSources + Annotations []*anypb.Any +} + +func (b0 Grant_builder) Build() *Grant { + m0 := &Grant{} + b, x := &b0, m0 + _, _ = b, x + x.Entitlement = b.Entitlement + x.Principal = b.Principal + x.Id = b.Id + x.Sources = b.Sources + x.Annotations = b.Annotations + return m0 +} + type GrantsServiceListGrantsRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Resource *Resource `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"` PageSize uint32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` Annotations []*anypb.Any `protobuf:"bytes,4,rep,name=annotations,proto3" json:"annotations,omitempty"` + ActiveSyncId string `protobuf:"bytes,5,opt,name=active_sync_id,json=activeSyncId,proto3" json:"active_sync_id,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } @@ -178,11 +263,6 @@ func (x *GrantsServiceListGrantsRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GrantsServiceListGrantsRequest.ProtoReflect.Descriptor instead. -func (*GrantsServiceListGrantsRequest) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_grant_proto_rawDescGZIP(), []int{2} -} - func (x *GrantsServiceListGrantsRequest) GetResource() *Resource { if x != nil { return x.Resource @@ -211,8 +291,68 @@ func (x *GrantsServiceListGrantsRequest) GetAnnotations() []*anypb.Any { return nil } +func (x *GrantsServiceListGrantsRequest) GetActiveSyncId() string { + if x != nil { + return x.ActiveSyncId + } + return "" +} + +func (x *GrantsServiceListGrantsRequest) SetResource(v *Resource) { + x.Resource = v +} + +func (x *GrantsServiceListGrantsRequest) SetPageSize(v uint32) { + x.PageSize = v +} + +func (x *GrantsServiceListGrantsRequest) SetPageToken(v string) { + x.PageToken = v +} + +func (x *GrantsServiceListGrantsRequest) SetAnnotations(v []*anypb.Any) { + x.Annotations = v +} + +func (x *GrantsServiceListGrantsRequest) SetActiveSyncId(v string) { + x.ActiveSyncId = v +} + +func (x *GrantsServiceListGrantsRequest) HasResource() bool { + if x == nil { + return false + } + return x.Resource != nil +} + +func (x *GrantsServiceListGrantsRequest) ClearResource() { + x.Resource = nil +} + +type GrantsServiceListGrantsRequest_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Resource *Resource + PageSize uint32 + PageToken string + Annotations []*anypb.Any + ActiveSyncId string +} + +func (b0 GrantsServiceListGrantsRequest_builder) Build() *GrantsServiceListGrantsRequest { + m0 := &GrantsServiceListGrantsRequest{} + b, x := &b0, m0 + _, _ = b, x + x.Resource = b.Resource + x.PageSize = b.PageSize + x.PageToken = b.PageToken + x.Annotations = b.Annotations + x.ActiveSyncId = b.ActiveSyncId + return m0 +} + type GrantsServiceListGrantsResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` List []*Grant `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"` NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` Annotations []*anypb.Any `protobuf:"bytes,3,rep,name=annotations,proto3" json:"annotations,omitempty"` @@ -245,11 +385,6 @@ func (x *GrantsServiceListGrantsResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GrantsServiceListGrantsResponse.ProtoReflect.Descriptor instead. -func (*GrantsServiceListGrantsResponse) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_grant_proto_rawDescGZIP(), []int{3} -} - func (x *GrantsServiceListGrantsResponse) GetList() []*Grant { if x != nil { return x.List @@ -271,8 +406,38 @@ func (x *GrantsServiceListGrantsResponse) GetAnnotations() []*anypb.Any { return nil } +func (x *GrantsServiceListGrantsResponse) SetList(v []*Grant) { + x.List = v +} + +func (x *GrantsServiceListGrantsResponse) SetNextPageToken(v string) { + x.NextPageToken = v +} + +func (x *GrantsServiceListGrantsResponse) SetAnnotations(v []*anypb.Any) { + x.Annotations = v +} + +type GrantsServiceListGrantsResponse_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + List []*Grant + NextPageToken string + Annotations []*anypb.Any +} + +func (b0 GrantsServiceListGrantsResponse_builder) Build() *GrantsServiceListGrantsResponse { + m0 := &GrantsServiceListGrantsResponse{} + b, x := &b0, m0 + _, _ = b, x + x.List = b.List + x.NextPageToken = b.NextPageToken + x.Annotations = b.Annotations + return m0 +} + type GrantManagerServiceGrantRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Entitlement *Entitlement `protobuf:"bytes,1,opt,name=entitlement,proto3" json:"entitlement,omitempty"` Principal *Resource `protobuf:"bytes,2,opt,name=principal,proto3" json:"principal,omitempty"` Annotations []*anypb.Any `protobuf:"bytes,3,rep,name=annotations,proto3" json:"annotations,omitempty"` @@ -305,11 +470,6 @@ func (x *GrantManagerServiceGrantRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GrantManagerServiceGrantRequest.ProtoReflect.Descriptor instead. -func (*GrantManagerServiceGrantRequest) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_grant_proto_rawDescGZIP(), []int{4} -} - func (x *GrantManagerServiceGrantRequest) GetEntitlement() *Entitlement { if x != nil { return x.Entitlement @@ -331,8 +491,60 @@ func (x *GrantManagerServiceGrantRequest) GetAnnotations() []*anypb.Any { return nil } +func (x *GrantManagerServiceGrantRequest) SetEntitlement(v *Entitlement) { + x.Entitlement = v +} + +func (x *GrantManagerServiceGrantRequest) SetPrincipal(v *Resource) { + x.Principal = v +} + +func (x *GrantManagerServiceGrantRequest) SetAnnotations(v []*anypb.Any) { + x.Annotations = v +} + +func (x *GrantManagerServiceGrantRequest) HasEntitlement() bool { + if x == nil { + return false + } + return x.Entitlement != nil +} + +func (x *GrantManagerServiceGrantRequest) HasPrincipal() bool { + if x == nil { + return false + } + return x.Principal != nil +} + +func (x *GrantManagerServiceGrantRequest) ClearEntitlement() { + x.Entitlement = nil +} + +func (x *GrantManagerServiceGrantRequest) ClearPrincipal() { + x.Principal = nil +} + +type GrantManagerServiceGrantRequest_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Entitlement *Entitlement + Principal *Resource + Annotations []*anypb.Any +} + +func (b0 GrantManagerServiceGrantRequest_builder) Build() *GrantManagerServiceGrantRequest { + m0 := &GrantManagerServiceGrantRequest{} + b, x := &b0, m0 + _, _ = b, x + x.Entitlement = b.Entitlement + x.Principal = b.Principal + x.Annotations = b.Annotations + return m0 +} + type GrantManagerServiceGrantResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Annotations []*anypb.Any `protobuf:"bytes,1,rep,name=annotations,proto3" json:"annotations,omitempty"` Grants []*Grant `protobuf:"bytes,2,rep,name=grants,proto3" json:"grants,omitempty"` unknownFields protoimpl.UnknownFields @@ -364,11 +576,6 @@ func (x *GrantManagerServiceGrantResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GrantManagerServiceGrantResponse.ProtoReflect.Descriptor instead. -func (*GrantManagerServiceGrantResponse) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_grant_proto_rawDescGZIP(), []int{5} -} - func (x *GrantManagerServiceGrantResponse) GetAnnotations() []*anypb.Any { if x != nil { return x.Annotations @@ -383,8 +590,32 @@ func (x *GrantManagerServiceGrantResponse) GetGrants() []*Grant { return nil } +func (x *GrantManagerServiceGrantResponse) SetAnnotations(v []*anypb.Any) { + x.Annotations = v +} + +func (x *GrantManagerServiceGrantResponse) SetGrants(v []*Grant) { + x.Grants = v +} + +type GrantManagerServiceGrantResponse_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Annotations []*anypb.Any + Grants []*Grant +} + +func (b0 GrantManagerServiceGrantResponse_builder) Build() *GrantManagerServiceGrantResponse { + m0 := &GrantManagerServiceGrantResponse{} + b, x := &b0, m0 + _, _ = b, x + x.Annotations = b.Annotations + x.Grants = b.Grants + return m0 +} + type GrantManagerServiceRevokeRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Grant *Grant `protobuf:"bytes,1,opt,name=grant,proto3" json:"grant,omitempty"` Annotations []*anypb.Any `protobuf:"bytes,2,rep,name=annotations,proto3" json:"annotations,omitempty"` unknownFields protoimpl.UnknownFields @@ -416,11 +647,6 @@ func (x *GrantManagerServiceRevokeRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GrantManagerServiceRevokeRequest.ProtoReflect.Descriptor instead. -func (*GrantManagerServiceRevokeRequest) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_grant_proto_rawDescGZIP(), []int{6} -} - func (x *GrantManagerServiceRevokeRequest) GetGrant() *Grant { if x != nil { return x.Grant @@ -435,8 +661,43 @@ func (x *GrantManagerServiceRevokeRequest) GetAnnotations() []*anypb.Any { return nil } +func (x *GrantManagerServiceRevokeRequest) SetGrant(v *Grant) { + x.Grant = v +} + +func (x *GrantManagerServiceRevokeRequest) SetAnnotations(v []*anypb.Any) { + x.Annotations = v +} + +func (x *GrantManagerServiceRevokeRequest) HasGrant() bool { + if x == nil { + return false + } + return x.Grant != nil +} + +func (x *GrantManagerServiceRevokeRequest) ClearGrant() { + x.Grant = nil +} + +type GrantManagerServiceRevokeRequest_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Grant *Grant + Annotations []*anypb.Any +} + +func (b0 GrantManagerServiceRevokeRequest_builder) Build() *GrantManagerServiceRevokeRequest { + m0 := &GrantManagerServiceRevokeRequest{} + b, x := &b0, m0 + _, _ = b, x + x.Grant = b.Grant + x.Annotations = b.Annotations + return m0 +} + type GrantManagerServiceRevokeResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Annotations []*anypb.Any `protobuf:"bytes,1,rep,name=annotations,proto3" json:"annotations,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache @@ -467,11 +728,6 @@ func (x *GrantManagerServiceRevokeResponse) ProtoReflect() protoreflect.Message return mi.MessageOf(x) } -// Deprecated: Use GrantManagerServiceRevokeResponse.ProtoReflect.Descriptor instead. -func (*GrantManagerServiceRevokeResponse) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_grant_proto_rawDescGZIP(), []int{7} -} - func (x *GrantManagerServiceRevokeResponse) GetAnnotations() []*anypb.Any { if x != nil { return x.Annotations @@ -479,8 +735,26 @@ func (x *GrantManagerServiceRevokeResponse) GetAnnotations() []*anypb.Any { return nil } +func (x *GrantManagerServiceRevokeResponse) SetAnnotations(v []*anypb.Any) { + x.Annotations = v +} + +type GrantManagerServiceRevokeResponse_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Annotations []*anypb.Any +} + +func (b0 GrantManagerServiceRevokeResponse_builder) Build() *GrantManagerServiceRevokeResponse { + m0 := &GrantManagerServiceRevokeResponse{} + b, x := &b0, m0 + _, _ = b, x + x.Annotations = b.Annotations + return m0 +} + type GrantSources_GrantSource struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } @@ -510,163 +784,67 @@ func (x *GrantSources_GrantSource) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GrantSources_GrantSource.ProtoReflect.Descriptor instead. -func (*GrantSources_GrantSource) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_grant_proto_rawDescGZIP(), []int{0, 0} +type GrantSources_GrantSource_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + } -var File_c1_connector_v2_grant_proto protoreflect.FileDescriptor +func (b0 GrantSources_GrantSource_builder) Build() *GrantSources_GrantSource { + m0 := &GrantSources_GrantSource{} + b, x := &b0, m0 + _, _ = b, x + return m0 +} -var file_c1_connector_v2_grant_proto_rawDesc = string([]byte{ - 0x0a, 0x1b, 0x63, 0x31, 0x2f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f, 0x76, - 0x32, 0x2f, 0x67, 0x72, 0x61, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x63, - 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x1a, 0x21, - 0x63, 0x31, 0x2f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f, 0x76, 0x32, 0x2f, - 0x65, 0x6e, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x1a, 0x1e, 0x63, 0x31, 0x2f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f, - 0x76, 0x32, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, - 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xca, 0x01, 0x0a, 0x0c, 0x47, 0x72, 0x61, 0x6e, 0x74, 0x53, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x44, 0x0a, 0x07, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, - 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x72, 0x61, 0x6e, 0x74, 0x53, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, - 0x74, 0x72, 0x79, 0x52, 0x07, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x1a, 0x0d, 0x0a, 0x0b, - 0x47, 0x72, 0x61, 0x6e, 0x74, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x1a, 0x65, 0x0a, 0x0c, 0x53, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, - 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x3f, 0x0a, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x63, - 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x47, - 0x72, 0x61, 0x6e, 0x74, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x2e, 0x47, 0x72, 0x61, 0x6e, - 0x74, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, - 0x38, 0x01, 0x22, 0xab, 0x02, 0x0a, 0x05, 0x47, 0x72, 0x61, 0x6e, 0x74, 0x12, 0x48, 0x0a, 0x0b, - 0x65, 0x6e, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1c, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, - 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x42, - 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x0b, 0x65, 0x6e, 0x74, 0x69, 0x74, - 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x41, 0x0a, 0x09, 0x70, 0x72, 0x69, 0x6e, 0x63, 0x69, - 0x70, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x63, 0x31, 0x2e, 0x63, - 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x09, - 0x70, 0x72, 0x69, 0x6e, 0x63, 0x69, 0x70, 0x61, 0x6c, 0x12, 0x1a, 0x0a, 0x02, 0x69, 0x64, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0a, 0xfa, 0x42, 0x07, 0x72, 0x05, 0x20, 0x01, 0x28, 0x80, - 0x08, 0x52, 0x02, 0x69, 0x64, 0x12, 0x41, 0x0a, 0x07, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, - 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x72, 0x61, 0x6e, 0x74, 0x53, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x73, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x00, 0x52, - 0x07, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x36, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, - 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x41, 0x6e, 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x22, 0xf1, 0x01, 0x0a, 0x1e, 0x47, 0x72, 0x61, 0x6e, 0x74, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x47, 0x72, 0x61, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, - 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x12, 0x27, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x42, 0x0a, 0xfa, 0x42, 0x07, 0x2a, 0x05, 0x18, 0xfa, - 0x01, 0x40, 0x01, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x2d, 0x0a, - 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x09, 0x42, 0x0e, 0xfa, 0x42, 0x0b, 0x72, 0x09, 0x20, 0x01, 0x28, 0x80, 0x80, 0x40, 0xd0, 0x01, - 0x01, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x36, 0x0a, 0x0b, - 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xbd, 0x01, 0x0a, 0x1f, 0x47, 0x72, 0x61, 0x6e, 0x74, 0x73, 0x53, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x47, 0x72, 0x61, 0x6e, 0x74, 0x73, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x04, 0x6c, 0x69, 0x73, 0x74, - 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, - 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x72, 0x61, 0x6e, 0x74, 0x52, 0x04, - 0x6c, 0x69, 0x73, 0x74, 0x12, 0x36, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, - 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0e, 0xfa, - 0x42, 0x0b, 0x72, 0x09, 0x20, 0x01, 0x28, 0x80, 0x80, 0x40, 0xd0, 0x01, 0x01, 0x52, 0x0d, 0x6e, - 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x36, 0x0a, 0x0b, - 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xe6, 0x01, 0x0a, 0x1f, 0x47, 0x72, 0x61, 0x6e, 0x74, 0x4d, 0x61, - 0x6e, 0x61, 0x67, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x47, 0x72, 0x61, 0x6e, - 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x48, 0x0a, 0x0b, 0x65, 0x6e, 0x74, 0x69, - 0x74, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, - 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, - 0x45, 0x6e, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x42, 0x08, 0xfa, 0x42, 0x05, - 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x0b, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x6d, 0x65, - 0x6e, 0x74, 0x12, 0x41, 0x0a, 0x09, 0x70, 0x72, 0x69, 0x6e, 0x63, 0x69, 0x70, 0x61, 0x6c, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, - 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x09, 0x70, 0x72, 0x69, 0x6e, - 0x63, 0x69, 0x70, 0x61, 0x6c, 0x12, 0x36, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, - 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x8a, 0x01, - 0x0a, 0x20, 0x47, 0x72, 0x61, 0x6e, 0x74, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x53, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x47, 0x72, 0x61, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x36, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0b, 0x61, - 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x2e, 0x0a, 0x06, 0x67, 0x72, - 0x61, 0x6e, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x63, 0x31, 0x2e, - 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x72, 0x61, - 0x6e, 0x74, 0x52, 0x06, 0x67, 0x72, 0x61, 0x6e, 0x74, 0x73, 0x22, 0x92, 0x01, 0x0a, 0x20, 0x47, - 0x72, 0x61, 0x6e, 0x74, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x52, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x36, 0x0a, 0x05, 0x67, 0x72, 0x61, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, - 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, - 0x2e, 0x47, 0x72, 0x61, 0x6e, 0x74, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, - 0x52, 0x05, 0x67, 0x72, 0x61, 0x6e, 0x74, 0x12, 0x36, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, - 0x6e, 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, - 0x5b, 0x0a, 0x21, 0x47, 0x72, 0x61, 0x6e, 0x74, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x53, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x36, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, - 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x32, 0x80, 0x01, 0x0a, - 0x0d, 0x47, 0x72, 0x61, 0x6e, 0x74, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x6f, - 0x0a, 0x0a, 0x4c, 0x69, 0x73, 0x74, 0x47, 0x72, 0x61, 0x6e, 0x74, 0x73, 0x12, 0x2f, 0x2e, 0x63, - 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x47, - 0x72, 0x61, 0x6e, 0x74, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x69, 0x73, 0x74, - 0x47, 0x72, 0x61, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x30, 0x2e, - 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, - 0x47, 0x72, 0x61, 0x6e, 0x74, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x69, 0x73, - 0x74, 0x47, 0x72, 0x61, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, - 0xf4, 0x01, 0x0a, 0x13, 0x47, 0x72, 0x61, 0x6e, 0x74, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x6c, 0x0a, 0x05, 0x47, 0x72, 0x61, 0x6e, 0x74, - 0x12, 0x30, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, - 0x76, 0x32, 0x2e, 0x47, 0x72, 0x61, 0x6e, 0x74, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x53, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x47, 0x72, 0x61, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x31, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, - 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x72, 0x61, 0x6e, 0x74, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, - 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x47, 0x72, 0x61, 0x6e, 0x74, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6f, 0x0a, 0x06, 0x52, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x12, - 0x31, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, - 0x32, 0x2e, 0x47, 0x72, 0x61, 0x6e, 0x74, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x53, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x32, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, - 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x72, 0x61, 0x6e, 0x74, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, - 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x36, 0x5a, 0x34, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, - 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x64, 0x75, 0x63, 0x74, 0x6f, 0x72, 0x6f, 0x6e, - 0x65, 0x2f, 0x62, 0x61, 0x74, 0x6f, 0x6e, 0x2d, 0x73, 0x64, 0x6b, 0x2f, 0x70, 0x62, 0x2f, 0x63, - 0x31, 0x2f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f, 0x76, 0x32, 0x62, 0x06, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -}) - -var ( - file_c1_connector_v2_grant_proto_rawDescOnce sync.Once - file_c1_connector_v2_grant_proto_rawDescData []byte -) +var File_c1_connector_v2_grant_proto protoreflect.FileDescriptor -func file_c1_connector_v2_grant_proto_rawDescGZIP() []byte { - file_c1_connector_v2_grant_proto_rawDescOnce.Do(func() { - file_c1_connector_v2_grant_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_c1_connector_v2_grant_proto_rawDesc), len(file_c1_connector_v2_grant_proto_rawDesc))) - }) - return file_c1_connector_v2_grant_proto_rawDescData -} +const file_c1_connector_v2_grant_proto_rawDesc = "" + + "\n" + + "\x1bc1/connector/v2/grant.proto\x12\x0fc1.connector.v2\x1a!c1/connector/v2/entitlement.proto\x1a\x1ec1/connector/v2/resource.proto\x1a\x19google/protobuf/any.proto\x1a\x17validate/validate.proto\"\xca\x01\n" + + "\fGrantSources\x12D\n" + + "\asources\x18\x01 \x03(\v2*.c1.connector.v2.GrantSources.SourcesEntryR\asources\x1a\r\n" + + "\vGrantSource\x1ae\n" + + "\fSourcesEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12?\n" + + "\x05value\x18\x02 \x01(\v2).c1.connector.v2.GrantSources.GrantSourceR\x05value:\x028\x01\"\xab\x02\n" + + "\x05Grant\x12H\n" + + "\ventitlement\x18\x01 \x01(\v2\x1c.c1.connector.v2.EntitlementB\b\xfaB\x05\x8a\x01\x02\x10\x01R\ventitlement\x12A\n" + + "\tprincipal\x18\x02 \x01(\v2\x19.c1.connector.v2.ResourceB\b\xfaB\x05\x8a\x01\x02\x10\x01R\tprincipal\x12\x1a\n" + + "\x02id\x18\x03 \x01(\tB\n" + + "\xfaB\ar\x05 \x01(\x80\bR\x02id\x12A\n" + + "\asources\x18\x05 \x01(\v2\x1d.c1.connector.v2.GrantSourcesB\b\xfaB\x05\x8a\x01\x02\x10\x00R\asources\x126\n" + + "\vannotations\x18\x04 \x03(\v2\x14.google.protobuf.AnyR\vannotations\"\xa6\x02\n" + + "\x1eGrantsServiceListGrantsRequest\x12?\n" + + "\bresource\x18\x01 \x01(\v2\x19.c1.connector.v2.ResourceB\b\xfaB\x05\x8a\x01\x02\x10\x01R\bresource\x12'\n" + + "\tpage_size\x18\x02 \x01(\rB\n" + + "\xfaB\a*\x05\x18\xfa\x01@\x01R\bpageSize\x12-\n" + + "\n" + + "page_token\x18\x03 \x01(\tB\x0e\xfaB\vr\t \x01(\x80\x80@\xd0\x01\x01R\tpageToken\x126\n" + + "\vannotations\x18\x04 \x03(\v2\x14.google.protobuf.AnyR\vannotations\x123\n" + + "\x0eactive_sync_id\x18\x05 \x01(\tB\r\xfaB\n" + + "r\b \x01(\x80\b\xd0\x01\x01R\factiveSyncId\"\xbd\x01\n" + + "\x1fGrantsServiceListGrantsResponse\x12*\n" + + "\x04list\x18\x01 \x03(\v2\x16.c1.connector.v2.GrantR\x04list\x126\n" + + "\x0fnext_page_token\x18\x02 \x01(\tB\x0e\xfaB\vr\t \x01(\x80\x80@\xd0\x01\x01R\rnextPageToken\x126\n" + + "\vannotations\x18\x03 \x03(\v2\x14.google.protobuf.AnyR\vannotations\"\xe6\x01\n" + + "\x1fGrantManagerServiceGrantRequest\x12H\n" + + "\ventitlement\x18\x01 \x01(\v2\x1c.c1.connector.v2.EntitlementB\b\xfaB\x05\x8a\x01\x02\x10\x01R\ventitlement\x12A\n" + + "\tprincipal\x18\x02 \x01(\v2\x19.c1.connector.v2.ResourceB\b\xfaB\x05\x8a\x01\x02\x10\x01R\tprincipal\x126\n" + + "\vannotations\x18\x03 \x03(\v2\x14.google.protobuf.AnyR\vannotations\"\x8a\x01\n" + + " GrantManagerServiceGrantResponse\x126\n" + + "\vannotations\x18\x01 \x03(\v2\x14.google.protobuf.AnyR\vannotations\x12.\n" + + "\x06grants\x18\x02 \x03(\v2\x16.c1.connector.v2.GrantR\x06grants\"\x92\x01\n" + + " GrantManagerServiceRevokeRequest\x126\n" + + "\x05grant\x18\x01 \x01(\v2\x16.c1.connector.v2.GrantB\b\xfaB\x05\x8a\x01\x02\x10\x01R\x05grant\x126\n" + + "\vannotations\x18\x02 \x03(\v2\x14.google.protobuf.AnyR\vannotations\"[\n" + + "!GrantManagerServiceRevokeResponse\x126\n" + + "\vannotations\x18\x01 \x03(\v2\x14.google.protobuf.AnyR\vannotations2\x80\x01\n" + + "\rGrantsService\x12o\n" + + "\n" + + "ListGrants\x12/.c1.connector.v2.GrantsServiceListGrantsRequest\x1a0.c1.connector.v2.GrantsServiceListGrantsResponse2\xf4\x01\n" + + "\x13GrantManagerService\x12l\n" + + "\x05Grant\x120.c1.connector.v2.GrantManagerServiceGrantRequest\x1a1.c1.connector.v2.GrantManagerServiceGrantResponse\x12o\n" + + "\x06Revoke\x121.c1.connector.v2.GrantManagerServiceRevokeRequest\x1a2.c1.connector.v2.GrantManagerServiceRevokeResponseB6Z4github.com/conductorone/baton-sdk/pb/c1/connector/v2b\x06proto3" var file_c1_connector_v2_grant_proto_msgTypes = make([]protoimpl.MessageInfo, 10) var file_c1_connector_v2_grant_proto_goTypes = []any{ diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/grant.pb.validate.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/grant.pb.validate.go index 7d252857..a66db3e1 100644 --- a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/grant.pb.validate.go +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/grant.pb.validate.go @@ -558,6 +558,21 @@ func (m *GrantsServiceListGrantsRequest) validate(all bool) error { } + if m.GetActiveSyncId() != "" { + + if l := len(m.GetActiveSyncId()); l < 1 || l > 1024 { + err := GrantsServiceListGrantsRequestValidationError{ + field: "ActiveSyncId", + reason: "value length must be between 1 and 1024 bytes, inclusive", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + if len(errors) > 0 { return GrantsServiceListGrantsRequestMultiError(errors) } diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/grant_protoopaque.pb.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/grant_protoopaque.pb.go new file mode 100644 index 00000000..2aa8d446 --- /dev/null +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/grant_protoopaque.pb.go @@ -0,0 +1,939 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.10 +// protoc (unknown) +// source: c1/connector/v2/grant.proto + +//go:build protoopaque + +package v2 + +import ( + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + reflect "reflect" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type GrantSources struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Sources map[string]*GrantSources_GrantSource `protobuf:"bytes,1,rep,name=sources,proto3" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GrantSources) Reset() { + *x = GrantSources{} + mi := &file_c1_connector_v2_grant_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GrantSources) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GrantSources) ProtoMessage() {} + +func (x *GrantSources) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_grant_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *GrantSources) GetSources() map[string]*GrantSources_GrantSource { + if x != nil { + return x.xxx_hidden_Sources + } + return nil +} + +func (x *GrantSources) SetSources(v map[string]*GrantSources_GrantSource) { + x.xxx_hidden_Sources = v +} + +type GrantSources_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Sources map[string]*GrantSources_GrantSource +} + +func (b0 GrantSources_builder) Build() *GrantSources { + m0 := &GrantSources{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Sources = b.Sources + return m0 +} + +type Grant struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Entitlement *Entitlement `protobuf:"bytes,1,opt,name=entitlement,proto3"` + xxx_hidden_Principal *Resource `protobuf:"bytes,2,opt,name=principal,proto3"` + xxx_hidden_Id string `protobuf:"bytes,3,opt,name=id,proto3"` + xxx_hidden_Sources *GrantSources `protobuf:"bytes,5,opt,name=sources,proto3"` + xxx_hidden_Annotations *[]*anypb.Any `protobuf:"bytes,4,rep,name=annotations,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Grant) Reset() { + *x = Grant{} + mi := &file_c1_connector_v2_grant_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Grant) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Grant) ProtoMessage() {} + +func (x *Grant) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_grant_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *Grant) GetEntitlement() *Entitlement { + if x != nil { + return x.xxx_hidden_Entitlement + } + return nil +} + +func (x *Grant) GetPrincipal() *Resource { + if x != nil { + return x.xxx_hidden_Principal + } + return nil +} + +func (x *Grant) GetId() string { + if x != nil { + return x.xxx_hidden_Id + } + return "" +} + +func (x *Grant) GetSources() *GrantSources { + if x != nil { + return x.xxx_hidden_Sources + } + return nil +} + +func (x *Grant) GetAnnotations() []*anypb.Any { + if x != nil { + if x.xxx_hidden_Annotations != nil { + return *x.xxx_hidden_Annotations + } + } + return nil +} + +func (x *Grant) SetEntitlement(v *Entitlement) { + x.xxx_hidden_Entitlement = v +} + +func (x *Grant) SetPrincipal(v *Resource) { + x.xxx_hidden_Principal = v +} + +func (x *Grant) SetId(v string) { + x.xxx_hidden_Id = v +} + +func (x *Grant) SetSources(v *GrantSources) { + x.xxx_hidden_Sources = v +} + +func (x *Grant) SetAnnotations(v []*anypb.Any) { + x.xxx_hidden_Annotations = &v +} + +func (x *Grant) HasEntitlement() bool { + if x == nil { + return false + } + return x.xxx_hidden_Entitlement != nil +} + +func (x *Grant) HasPrincipal() bool { + if x == nil { + return false + } + return x.xxx_hidden_Principal != nil +} + +func (x *Grant) HasSources() bool { + if x == nil { + return false + } + return x.xxx_hidden_Sources != nil +} + +func (x *Grant) ClearEntitlement() { + x.xxx_hidden_Entitlement = nil +} + +func (x *Grant) ClearPrincipal() { + x.xxx_hidden_Principal = nil +} + +func (x *Grant) ClearSources() { + x.xxx_hidden_Sources = nil +} + +type Grant_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Entitlement *Entitlement + Principal *Resource + Id string + Sources *GrantSources + Annotations []*anypb.Any +} + +func (b0 Grant_builder) Build() *Grant { + m0 := &Grant{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Entitlement = b.Entitlement + x.xxx_hidden_Principal = b.Principal + x.xxx_hidden_Id = b.Id + x.xxx_hidden_Sources = b.Sources + x.xxx_hidden_Annotations = &b.Annotations + return m0 +} + +type GrantsServiceListGrantsRequest struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Resource *Resource `protobuf:"bytes,1,opt,name=resource,proto3"` + xxx_hidden_PageSize uint32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3"` + xxx_hidden_PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3"` + xxx_hidden_Annotations *[]*anypb.Any `protobuf:"bytes,4,rep,name=annotations,proto3"` + xxx_hidden_ActiveSyncId string `protobuf:"bytes,5,opt,name=active_sync_id,json=activeSyncId,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GrantsServiceListGrantsRequest) Reset() { + *x = GrantsServiceListGrantsRequest{} + mi := &file_c1_connector_v2_grant_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GrantsServiceListGrantsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GrantsServiceListGrantsRequest) ProtoMessage() {} + +func (x *GrantsServiceListGrantsRequest) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_grant_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *GrantsServiceListGrantsRequest) GetResource() *Resource { + if x != nil { + return x.xxx_hidden_Resource + } + return nil +} + +func (x *GrantsServiceListGrantsRequest) GetPageSize() uint32 { + if x != nil { + return x.xxx_hidden_PageSize + } + return 0 +} + +func (x *GrantsServiceListGrantsRequest) GetPageToken() string { + if x != nil { + return x.xxx_hidden_PageToken + } + return "" +} + +func (x *GrantsServiceListGrantsRequest) GetAnnotations() []*anypb.Any { + if x != nil { + if x.xxx_hidden_Annotations != nil { + return *x.xxx_hidden_Annotations + } + } + return nil +} + +func (x *GrantsServiceListGrantsRequest) GetActiveSyncId() string { + if x != nil { + return x.xxx_hidden_ActiveSyncId + } + return "" +} + +func (x *GrantsServiceListGrantsRequest) SetResource(v *Resource) { + x.xxx_hidden_Resource = v +} + +func (x *GrantsServiceListGrantsRequest) SetPageSize(v uint32) { + x.xxx_hidden_PageSize = v +} + +func (x *GrantsServiceListGrantsRequest) SetPageToken(v string) { + x.xxx_hidden_PageToken = v +} + +func (x *GrantsServiceListGrantsRequest) SetAnnotations(v []*anypb.Any) { + x.xxx_hidden_Annotations = &v +} + +func (x *GrantsServiceListGrantsRequest) SetActiveSyncId(v string) { + x.xxx_hidden_ActiveSyncId = v +} + +func (x *GrantsServiceListGrantsRequest) HasResource() bool { + if x == nil { + return false + } + return x.xxx_hidden_Resource != nil +} + +func (x *GrantsServiceListGrantsRequest) ClearResource() { + x.xxx_hidden_Resource = nil +} + +type GrantsServiceListGrantsRequest_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Resource *Resource + PageSize uint32 + PageToken string + Annotations []*anypb.Any + ActiveSyncId string +} + +func (b0 GrantsServiceListGrantsRequest_builder) Build() *GrantsServiceListGrantsRequest { + m0 := &GrantsServiceListGrantsRequest{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Resource = b.Resource + x.xxx_hidden_PageSize = b.PageSize + x.xxx_hidden_PageToken = b.PageToken + x.xxx_hidden_Annotations = &b.Annotations + x.xxx_hidden_ActiveSyncId = b.ActiveSyncId + return m0 +} + +type GrantsServiceListGrantsResponse struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_List *[]*Grant `protobuf:"bytes,1,rep,name=list,proto3"` + xxx_hidden_NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3"` + xxx_hidden_Annotations *[]*anypb.Any `protobuf:"bytes,3,rep,name=annotations,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GrantsServiceListGrantsResponse) Reset() { + *x = GrantsServiceListGrantsResponse{} + mi := &file_c1_connector_v2_grant_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GrantsServiceListGrantsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GrantsServiceListGrantsResponse) ProtoMessage() {} + +func (x *GrantsServiceListGrantsResponse) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_grant_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *GrantsServiceListGrantsResponse) GetList() []*Grant { + if x != nil { + if x.xxx_hidden_List != nil { + return *x.xxx_hidden_List + } + } + return nil +} + +func (x *GrantsServiceListGrantsResponse) GetNextPageToken() string { + if x != nil { + return x.xxx_hidden_NextPageToken + } + return "" +} + +func (x *GrantsServiceListGrantsResponse) GetAnnotations() []*anypb.Any { + if x != nil { + if x.xxx_hidden_Annotations != nil { + return *x.xxx_hidden_Annotations + } + } + return nil +} + +func (x *GrantsServiceListGrantsResponse) SetList(v []*Grant) { + x.xxx_hidden_List = &v +} + +func (x *GrantsServiceListGrantsResponse) SetNextPageToken(v string) { + x.xxx_hidden_NextPageToken = v +} + +func (x *GrantsServiceListGrantsResponse) SetAnnotations(v []*anypb.Any) { + x.xxx_hidden_Annotations = &v +} + +type GrantsServiceListGrantsResponse_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + List []*Grant + NextPageToken string + Annotations []*anypb.Any +} + +func (b0 GrantsServiceListGrantsResponse_builder) Build() *GrantsServiceListGrantsResponse { + m0 := &GrantsServiceListGrantsResponse{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_List = &b.List + x.xxx_hidden_NextPageToken = b.NextPageToken + x.xxx_hidden_Annotations = &b.Annotations + return m0 +} + +type GrantManagerServiceGrantRequest struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Entitlement *Entitlement `protobuf:"bytes,1,opt,name=entitlement,proto3"` + xxx_hidden_Principal *Resource `protobuf:"bytes,2,opt,name=principal,proto3"` + xxx_hidden_Annotations *[]*anypb.Any `protobuf:"bytes,3,rep,name=annotations,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GrantManagerServiceGrantRequest) Reset() { + *x = GrantManagerServiceGrantRequest{} + mi := &file_c1_connector_v2_grant_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GrantManagerServiceGrantRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GrantManagerServiceGrantRequest) ProtoMessage() {} + +func (x *GrantManagerServiceGrantRequest) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_grant_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *GrantManagerServiceGrantRequest) GetEntitlement() *Entitlement { + if x != nil { + return x.xxx_hidden_Entitlement + } + return nil +} + +func (x *GrantManagerServiceGrantRequest) GetPrincipal() *Resource { + if x != nil { + return x.xxx_hidden_Principal + } + return nil +} + +func (x *GrantManagerServiceGrantRequest) GetAnnotations() []*anypb.Any { + if x != nil { + if x.xxx_hidden_Annotations != nil { + return *x.xxx_hidden_Annotations + } + } + return nil +} + +func (x *GrantManagerServiceGrantRequest) SetEntitlement(v *Entitlement) { + x.xxx_hidden_Entitlement = v +} + +func (x *GrantManagerServiceGrantRequest) SetPrincipal(v *Resource) { + x.xxx_hidden_Principal = v +} + +func (x *GrantManagerServiceGrantRequest) SetAnnotations(v []*anypb.Any) { + x.xxx_hidden_Annotations = &v +} + +func (x *GrantManagerServiceGrantRequest) HasEntitlement() bool { + if x == nil { + return false + } + return x.xxx_hidden_Entitlement != nil +} + +func (x *GrantManagerServiceGrantRequest) HasPrincipal() bool { + if x == nil { + return false + } + return x.xxx_hidden_Principal != nil +} + +func (x *GrantManagerServiceGrantRequest) ClearEntitlement() { + x.xxx_hidden_Entitlement = nil +} + +func (x *GrantManagerServiceGrantRequest) ClearPrincipal() { + x.xxx_hidden_Principal = nil +} + +type GrantManagerServiceGrantRequest_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Entitlement *Entitlement + Principal *Resource + Annotations []*anypb.Any +} + +func (b0 GrantManagerServiceGrantRequest_builder) Build() *GrantManagerServiceGrantRequest { + m0 := &GrantManagerServiceGrantRequest{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Entitlement = b.Entitlement + x.xxx_hidden_Principal = b.Principal + x.xxx_hidden_Annotations = &b.Annotations + return m0 +} + +type GrantManagerServiceGrantResponse struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Annotations *[]*anypb.Any `protobuf:"bytes,1,rep,name=annotations,proto3"` + xxx_hidden_Grants *[]*Grant `protobuf:"bytes,2,rep,name=grants,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GrantManagerServiceGrantResponse) Reset() { + *x = GrantManagerServiceGrantResponse{} + mi := &file_c1_connector_v2_grant_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GrantManagerServiceGrantResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GrantManagerServiceGrantResponse) ProtoMessage() {} + +func (x *GrantManagerServiceGrantResponse) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_grant_proto_msgTypes[5] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *GrantManagerServiceGrantResponse) GetAnnotations() []*anypb.Any { + if x != nil { + if x.xxx_hidden_Annotations != nil { + return *x.xxx_hidden_Annotations + } + } + return nil +} + +func (x *GrantManagerServiceGrantResponse) GetGrants() []*Grant { + if x != nil { + if x.xxx_hidden_Grants != nil { + return *x.xxx_hidden_Grants + } + } + return nil +} + +func (x *GrantManagerServiceGrantResponse) SetAnnotations(v []*anypb.Any) { + x.xxx_hidden_Annotations = &v +} + +func (x *GrantManagerServiceGrantResponse) SetGrants(v []*Grant) { + x.xxx_hidden_Grants = &v +} + +type GrantManagerServiceGrantResponse_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Annotations []*anypb.Any + Grants []*Grant +} + +func (b0 GrantManagerServiceGrantResponse_builder) Build() *GrantManagerServiceGrantResponse { + m0 := &GrantManagerServiceGrantResponse{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Annotations = &b.Annotations + x.xxx_hidden_Grants = &b.Grants + return m0 +} + +type GrantManagerServiceRevokeRequest struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Grant *Grant `protobuf:"bytes,1,opt,name=grant,proto3"` + xxx_hidden_Annotations *[]*anypb.Any `protobuf:"bytes,2,rep,name=annotations,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GrantManagerServiceRevokeRequest) Reset() { + *x = GrantManagerServiceRevokeRequest{} + mi := &file_c1_connector_v2_grant_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GrantManagerServiceRevokeRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GrantManagerServiceRevokeRequest) ProtoMessage() {} + +func (x *GrantManagerServiceRevokeRequest) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_grant_proto_msgTypes[6] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *GrantManagerServiceRevokeRequest) GetGrant() *Grant { + if x != nil { + return x.xxx_hidden_Grant + } + return nil +} + +func (x *GrantManagerServiceRevokeRequest) GetAnnotations() []*anypb.Any { + if x != nil { + if x.xxx_hidden_Annotations != nil { + return *x.xxx_hidden_Annotations + } + } + return nil +} + +func (x *GrantManagerServiceRevokeRequest) SetGrant(v *Grant) { + x.xxx_hidden_Grant = v +} + +func (x *GrantManagerServiceRevokeRequest) SetAnnotations(v []*anypb.Any) { + x.xxx_hidden_Annotations = &v +} + +func (x *GrantManagerServiceRevokeRequest) HasGrant() bool { + if x == nil { + return false + } + return x.xxx_hidden_Grant != nil +} + +func (x *GrantManagerServiceRevokeRequest) ClearGrant() { + x.xxx_hidden_Grant = nil +} + +type GrantManagerServiceRevokeRequest_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Grant *Grant + Annotations []*anypb.Any +} + +func (b0 GrantManagerServiceRevokeRequest_builder) Build() *GrantManagerServiceRevokeRequest { + m0 := &GrantManagerServiceRevokeRequest{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Grant = b.Grant + x.xxx_hidden_Annotations = &b.Annotations + return m0 +} + +type GrantManagerServiceRevokeResponse struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Annotations *[]*anypb.Any `protobuf:"bytes,1,rep,name=annotations,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GrantManagerServiceRevokeResponse) Reset() { + *x = GrantManagerServiceRevokeResponse{} + mi := &file_c1_connector_v2_grant_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GrantManagerServiceRevokeResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GrantManagerServiceRevokeResponse) ProtoMessage() {} + +func (x *GrantManagerServiceRevokeResponse) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_grant_proto_msgTypes[7] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *GrantManagerServiceRevokeResponse) GetAnnotations() []*anypb.Any { + if x != nil { + if x.xxx_hidden_Annotations != nil { + return *x.xxx_hidden_Annotations + } + } + return nil +} + +func (x *GrantManagerServiceRevokeResponse) SetAnnotations(v []*anypb.Any) { + x.xxx_hidden_Annotations = &v +} + +type GrantManagerServiceRevokeResponse_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Annotations []*anypb.Any +} + +func (b0 GrantManagerServiceRevokeResponse_builder) Build() *GrantManagerServiceRevokeResponse { + m0 := &GrantManagerServiceRevokeResponse{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Annotations = &b.Annotations + return m0 +} + +type GrantSources_GrantSource struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GrantSources_GrantSource) Reset() { + *x = GrantSources_GrantSource{} + mi := &file_c1_connector_v2_grant_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GrantSources_GrantSource) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GrantSources_GrantSource) ProtoMessage() {} + +func (x *GrantSources_GrantSource) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_grant_proto_msgTypes[8] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +type GrantSources_GrantSource_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + +} + +func (b0 GrantSources_GrantSource_builder) Build() *GrantSources_GrantSource { + m0 := &GrantSources_GrantSource{} + b, x := &b0, m0 + _, _ = b, x + return m0 +} + +var File_c1_connector_v2_grant_proto protoreflect.FileDescriptor + +const file_c1_connector_v2_grant_proto_rawDesc = "" + + "\n" + + "\x1bc1/connector/v2/grant.proto\x12\x0fc1.connector.v2\x1a!c1/connector/v2/entitlement.proto\x1a\x1ec1/connector/v2/resource.proto\x1a\x19google/protobuf/any.proto\x1a\x17validate/validate.proto\"\xca\x01\n" + + "\fGrantSources\x12D\n" + + "\asources\x18\x01 \x03(\v2*.c1.connector.v2.GrantSources.SourcesEntryR\asources\x1a\r\n" + + "\vGrantSource\x1ae\n" + + "\fSourcesEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12?\n" + + "\x05value\x18\x02 \x01(\v2).c1.connector.v2.GrantSources.GrantSourceR\x05value:\x028\x01\"\xab\x02\n" + + "\x05Grant\x12H\n" + + "\ventitlement\x18\x01 \x01(\v2\x1c.c1.connector.v2.EntitlementB\b\xfaB\x05\x8a\x01\x02\x10\x01R\ventitlement\x12A\n" + + "\tprincipal\x18\x02 \x01(\v2\x19.c1.connector.v2.ResourceB\b\xfaB\x05\x8a\x01\x02\x10\x01R\tprincipal\x12\x1a\n" + + "\x02id\x18\x03 \x01(\tB\n" + + "\xfaB\ar\x05 \x01(\x80\bR\x02id\x12A\n" + + "\asources\x18\x05 \x01(\v2\x1d.c1.connector.v2.GrantSourcesB\b\xfaB\x05\x8a\x01\x02\x10\x00R\asources\x126\n" + + "\vannotations\x18\x04 \x03(\v2\x14.google.protobuf.AnyR\vannotations\"\xa6\x02\n" + + "\x1eGrantsServiceListGrantsRequest\x12?\n" + + "\bresource\x18\x01 \x01(\v2\x19.c1.connector.v2.ResourceB\b\xfaB\x05\x8a\x01\x02\x10\x01R\bresource\x12'\n" + + "\tpage_size\x18\x02 \x01(\rB\n" + + "\xfaB\a*\x05\x18\xfa\x01@\x01R\bpageSize\x12-\n" + + "\n" + + "page_token\x18\x03 \x01(\tB\x0e\xfaB\vr\t \x01(\x80\x80@\xd0\x01\x01R\tpageToken\x126\n" + + "\vannotations\x18\x04 \x03(\v2\x14.google.protobuf.AnyR\vannotations\x123\n" + + "\x0eactive_sync_id\x18\x05 \x01(\tB\r\xfaB\n" + + "r\b \x01(\x80\b\xd0\x01\x01R\factiveSyncId\"\xbd\x01\n" + + "\x1fGrantsServiceListGrantsResponse\x12*\n" + + "\x04list\x18\x01 \x03(\v2\x16.c1.connector.v2.GrantR\x04list\x126\n" + + "\x0fnext_page_token\x18\x02 \x01(\tB\x0e\xfaB\vr\t \x01(\x80\x80@\xd0\x01\x01R\rnextPageToken\x126\n" + + "\vannotations\x18\x03 \x03(\v2\x14.google.protobuf.AnyR\vannotations\"\xe6\x01\n" + + "\x1fGrantManagerServiceGrantRequest\x12H\n" + + "\ventitlement\x18\x01 \x01(\v2\x1c.c1.connector.v2.EntitlementB\b\xfaB\x05\x8a\x01\x02\x10\x01R\ventitlement\x12A\n" + + "\tprincipal\x18\x02 \x01(\v2\x19.c1.connector.v2.ResourceB\b\xfaB\x05\x8a\x01\x02\x10\x01R\tprincipal\x126\n" + + "\vannotations\x18\x03 \x03(\v2\x14.google.protobuf.AnyR\vannotations\"\x8a\x01\n" + + " GrantManagerServiceGrantResponse\x126\n" + + "\vannotations\x18\x01 \x03(\v2\x14.google.protobuf.AnyR\vannotations\x12.\n" + + "\x06grants\x18\x02 \x03(\v2\x16.c1.connector.v2.GrantR\x06grants\"\x92\x01\n" + + " GrantManagerServiceRevokeRequest\x126\n" + + "\x05grant\x18\x01 \x01(\v2\x16.c1.connector.v2.GrantB\b\xfaB\x05\x8a\x01\x02\x10\x01R\x05grant\x126\n" + + "\vannotations\x18\x02 \x03(\v2\x14.google.protobuf.AnyR\vannotations\"[\n" + + "!GrantManagerServiceRevokeResponse\x126\n" + + "\vannotations\x18\x01 \x03(\v2\x14.google.protobuf.AnyR\vannotations2\x80\x01\n" + + "\rGrantsService\x12o\n" + + "\n" + + "ListGrants\x12/.c1.connector.v2.GrantsServiceListGrantsRequest\x1a0.c1.connector.v2.GrantsServiceListGrantsResponse2\xf4\x01\n" + + "\x13GrantManagerService\x12l\n" + + "\x05Grant\x120.c1.connector.v2.GrantManagerServiceGrantRequest\x1a1.c1.connector.v2.GrantManagerServiceGrantResponse\x12o\n" + + "\x06Revoke\x121.c1.connector.v2.GrantManagerServiceRevokeRequest\x1a2.c1.connector.v2.GrantManagerServiceRevokeResponseB6Z4github.com/conductorone/baton-sdk/pb/c1/connector/v2b\x06proto3" + +var file_c1_connector_v2_grant_proto_msgTypes = make([]protoimpl.MessageInfo, 10) +var file_c1_connector_v2_grant_proto_goTypes = []any{ + (*GrantSources)(nil), // 0: c1.connector.v2.GrantSources + (*Grant)(nil), // 1: c1.connector.v2.Grant + (*GrantsServiceListGrantsRequest)(nil), // 2: c1.connector.v2.GrantsServiceListGrantsRequest + (*GrantsServiceListGrantsResponse)(nil), // 3: c1.connector.v2.GrantsServiceListGrantsResponse + (*GrantManagerServiceGrantRequest)(nil), // 4: c1.connector.v2.GrantManagerServiceGrantRequest + (*GrantManagerServiceGrantResponse)(nil), // 5: c1.connector.v2.GrantManagerServiceGrantResponse + (*GrantManagerServiceRevokeRequest)(nil), // 6: c1.connector.v2.GrantManagerServiceRevokeRequest + (*GrantManagerServiceRevokeResponse)(nil), // 7: c1.connector.v2.GrantManagerServiceRevokeResponse + (*GrantSources_GrantSource)(nil), // 8: c1.connector.v2.GrantSources.GrantSource + nil, // 9: c1.connector.v2.GrantSources.SourcesEntry + (*Entitlement)(nil), // 10: c1.connector.v2.Entitlement + (*Resource)(nil), // 11: c1.connector.v2.Resource + (*anypb.Any)(nil), // 12: google.protobuf.Any +} +var file_c1_connector_v2_grant_proto_depIdxs = []int32{ + 9, // 0: c1.connector.v2.GrantSources.sources:type_name -> c1.connector.v2.GrantSources.SourcesEntry + 10, // 1: c1.connector.v2.Grant.entitlement:type_name -> c1.connector.v2.Entitlement + 11, // 2: c1.connector.v2.Grant.principal:type_name -> c1.connector.v2.Resource + 0, // 3: c1.connector.v2.Grant.sources:type_name -> c1.connector.v2.GrantSources + 12, // 4: c1.connector.v2.Grant.annotations:type_name -> google.protobuf.Any + 11, // 5: c1.connector.v2.GrantsServiceListGrantsRequest.resource:type_name -> c1.connector.v2.Resource + 12, // 6: c1.connector.v2.GrantsServiceListGrantsRequest.annotations:type_name -> google.protobuf.Any + 1, // 7: c1.connector.v2.GrantsServiceListGrantsResponse.list:type_name -> c1.connector.v2.Grant + 12, // 8: c1.connector.v2.GrantsServiceListGrantsResponse.annotations:type_name -> google.protobuf.Any + 10, // 9: c1.connector.v2.GrantManagerServiceGrantRequest.entitlement:type_name -> c1.connector.v2.Entitlement + 11, // 10: c1.connector.v2.GrantManagerServiceGrantRequest.principal:type_name -> c1.connector.v2.Resource + 12, // 11: c1.connector.v2.GrantManagerServiceGrantRequest.annotations:type_name -> google.protobuf.Any + 12, // 12: c1.connector.v2.GrantManagerServiceGrantResponse.annotations:type_name -> google.protobuf.Any + 1, // 13: c1.connector.v2.GrantManagerServiceGrantResponse.grants:type_name -> c1.connector.v2.Grant + 1, // 14: c1.connector.v2.GrantManagerServiceRevokeRequest.grant:type_name -> c1.connector.v2.Grant + 12, // 15: c1.connector.v2.GrantManagerServiceRevokeRequest.annotations:type_name -> google.protobuf.Any + 12, // 16: c1.connector.v2.GrantManagerServiceRevokeResponse.annotations:type_name -> google.protobuf.Any + 8, // 17: c1.connector.v2.GrantSources.SourcesEntry.value:type_name -> c1.connector.v2.GrantSources.GrantSource + 2, // 18: c1.connector.v2.GrantsService.ListGrants:input_type -> c1.connector.v2.GrantsServiceListGrantsRequest + 4, // 19: c1.connector.v2.GrantManagerService.Grant:input_type -> c1.connector.v2.GrantManagerServiceGrantRequest + 6, // 20: c1.connector.v2.GrantManagerService.Revoke:input_type -> c1.connector.v2.GrantManagerServiceRevokeRequest + 3, // 21: c1.connector.v2.GrantsService.ListGrants:output_type -> c1.connector.v2.GrantsServiceListGrantsResponse + 5, // 22: c1.connector.v2.GrantManagerService.Grant:output_type -> c1.connector.v2.GrantManagerServiceGrantResponse + 7, // 23: c1.connector.v2.GrantManagerService.Revoke:output_type -> c1.connector.v2.GrantManagerServiceRevokeResponse + 21, // [21:24] is the sub-list for method output_type + 18, // [18:21] is the sub-list for method input_type + 18, // [18:18] is the sub-list for extension type_name + 18, // [18:18] is the sub-list for extension extendee + 0, // [0:18] is the sub-list for field type_name +} + +func init() { file_c1_connector_v2_grant_proto_init() } +func file_c1_connector_v2_grant_proto_init() { + if File_c1_connector_v2_grant_proto != nil { + return + } + file_c1_connector_v2_entitlement_proto_init() + file_c1_connector_v2_resource_proto_init() + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_c1_connector_v2_grant_proto_rawDesc), len(file_c1_connector_v2_grant_proto_rawDesc)), + NumEnums: 0, + NumMessages: 10, + NumExtensions: 0, + NumServices: 2, + }, + GoTypes: file_c1_connector_v2_grant_proto_goTypes, + DependencyIndexes: file_c1_connector_v2_grant_proto_depIdxs, + MessageInfos: file_c1_connector_v2_grant_proto_msgTypes, + }.Build() + File_c1_connector_v2_grant_proto = out.File + file_c1_connector_v2_grant_proto_goTypes = nil + file_c1_connector_v2_grant_proto_depIdxs = nil +} diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/resource.pb.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/resource.pb.go index 27a761c5..de8fff47 100644 --- a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/resource.pb.go +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/resource.pb.go @@ -1,9 +1,11 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.4 +// protoc-gen-go v1.36.10 // protoc (unknown) // source: c1/connector/v2/resource.proto +//go:build !protoopaque + package v2 import ( @@ -13,7 +15,6 @@ import ( anypb "google.golang.org/protobuf/types/known/anypb" structpb "google.golang.org/protobuf/types/known/structpb" reflect "reflect" - sync "sync" unsafe "unsafe" ) @@ -27,12 +28,13 @@ const ( type ResourceType_Trait int32 const ( - ResourceType_TRAIT_UNSPECIFIED ResourceType_Trait = 0 - ResourceType_TRAIT_USER ResourceType_Trait = 1 - ResourceType_TRAIT_GROUP ResourceType_Trait = 2 - ResourceType_TRAIT_ROLE ResourceType_Trait = 3 - ResourceType_TRAIT_APP ResourceType_Trait = 4 - ResourceType_TRAIT_SECRET ResourceType_Trait = 5 + ResourceType_TRAIT_UNSPECIFIED ResourceType_Trait = 0 + ResourceType_TRAIT_USER ResourceType_Trait = 1 + ResourceType_TRAIT_GROUP ResourceType_Trait = 2 + ResourceType_TRAIT_ROLE ResourceType_Trait = 3 + ResourceType_TRAIT_APP ResourceType_Trait = 4 + ResourceType_TRAIT_SECRET ResourceType_Trait = 5 + ResourceType_TRAIT_SECURITY_INSIGHT ResourceType_Trait = 6 ) // Enum value maps for ResourceType_Trait. @@ -44,14 +46,16 @@ var ( 3: "TRAIT_ROLE", 4: "TRAIT_APP", 5: "TRAIT_SECRET", + 6: "TRAIT_SECURITY_INSIGHT", } ResourceType_Trait_value = map[string]int32{ - "TRAIT_UNSPECIFIED": 0, - "TRAIT_USER": 1, - "TRAIT_GROUP": 2, - "TRAIT_ROLE": 3, - "TRAIT_APP": 4, - "TRAIT_SECRET": 5, + "TRAIT_UNSPECIFIED": 0, + "TRAIT_USER": 1, + "TRAIT_GROUP": 2, + "TRAIT_ROLE": 3, + "TRAIT_APP": 4, + "TRAIT_SECRET": 5, + "TRAIT_SECURITY_INSIGHT": 6, } ) @@ -77,11 +81,6 @@ func (x ResourceType_Trait) Number() protoreflect.EnumNumber { return protoreflect.EnumNumber(x) } -// Deprecated: Use ResourceType_Trait.Descriptor instead. -func (ResourceType_Trait) EnumDescriptor() ([]byte, []int) { - return file_c1_connector_v2_resource_proto_rawDescGZIP(), []int{0, 0} -} - // FIXME(mstanbCO): call this something else? Should it just be a bool? Possibly just use an annotation? type Resource_CreationSource int32 @@ -127,13 +126,8 @@ func (x Resource_CreationSource) Number() protoreflect.EnumNumber { return protoreflect.EnumNumber(x) } -// Deprecated: Use Resource_CreationSource.Descriptor instead. -func (Resource_CreationSource) EnumDescriptor() ([]byte, []int) { - return file_c1_connector_v2_resource_proto_rawDescGZIP(), []int{21, 0} -} - type ResourceType struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` Traits []ResourceType_Trait `protobuf:"varint,3,rep,packed,name=traits,proto3,enum=c1.connector.v2.ResourceType_Trait" json:"traits,omitempty"` @@ -169,11 +163,6 @@ func (x *ResourceType) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ResourceType.ProtoReflect.Descriptor instead. -func (*ResourceType) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_resource_proto_rawDescGZIP(), []int{0} -} - func (x *ResourceType) GetId() string { if x != nil { return x.Id @@ -216,12 +205,61 @@ func (x *ResourceType) GetSourcedExternally() bool { return false } +func (x *ResourceType) SetId(v string) { + x.Id = v +} + +func (x *ResourceType) SetDisplayName(v string) { + x.DisplayName = v +} + +func (x *ResourceType) SetTraits(v []ResourceType_Trait) { + x.Traits = v +} + +func (x *ResourceType) SetAnnotations(v []*anypb.Any) { + x.Annotations = v +} + +func (x *ResourceType) SetDescription(v string) { + x.Description = v +} + +func (x *ResourceType) SetSourcedExternally(v bool) { + x.SourcedExternally = v +} + +type ResourceType_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Id string + DisplayName string + Traits []ResourceType_Trait + Annotations []*anypb.Any + Description string + SourcedExternally bool +} + +func (b0 ResourceType_builder) Build() *ResourceType { + m0 := &ResourceType{} + b, x := &b0, m0 + _, _ = b, x + x.Id = b.Id + x.DisplayName = b.DisplayName + x.Traits = b.Traits + x.Annotations = b.Annotations + x.Description = b.Description + x.SourcedExternally = b.SourcedExternally + return m0 +} + type ResourceTypesServiceListResourceTypesRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Parent *Resource `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` PageSize uint32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` Annotations []*anypb.Any `protobuf:"bytes,4,rep,name=annotations,proto3" json:"annotations,omitempty"` + ActiveSyncId string `protobuf:"bytes,5,opt,name=active_sync_id,json=activeSyncId,proto3" json:"active_sync_id,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } @@ -251,11 +289,6 @@ func (x *ResourceTypesServiceListResourceTypesRequest) ProtoReflect() protorefle return mi.MessageOf(x) } -// Deprecated: Use ResourceTypesServiceListResourceTypesRequest.ProtoReflect.Descriptor instead. -func (*ResourceTypesServiceListResourceTypesRequest) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_resource_proto_rawDescGZIP(), []int{1} -} - func (x *ResourceTypesServiceListResourceTypesRequest) GetParent() *Resource { if x != nil { return x.Parent @@ -284,8 +317,68 @@ func (x *ResourceTypesServiceListResourceTypesRequest) GetAnnotations() []*anypb return nil } +func (x *ResourceTypesServiceListResourceTypesRequest) GetActiveSyncId() string { + if x != nil { + return x.ActiveSyncId + } + return "" +} + +func (x *ResourceTypesServiceListResourceTypesRequest) SetParent(v *Resource) { + x.Parent = v +} + +func (x *ResourceTypesServiceListResourceTypesRequest) SetPageSize(v uint32) { + x.PageSize = v +} + +func (x *ResourceTypesServiceListResourceTypesRequest) SetPageToken(v string) { + x.PageToken = v +} + +func (x *ResourceTypesServiceListResourceTypesRequest) SetAnnotations(v []*anypb.Any) { + x.Annotations = v +} + +func (x *ResourceTypesServiceListResourceTypesRequest) SetActiveSyncId(v string) { + x.ActiveSyncId = v +} + +func (x *ResourceTypesServiceListResourceTypesRequest) HasParent() bool { + if x == nil { + return false + } + return x.Parent != nil +} + +func (x *ResourceTypesServiceListResourceTypesRequest) ClearParent() { + x.Parent = nil +} + +type ResourceTypesServiceListResourceTypesRequest_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Parent *Resource + PageSize uint32 + PageToken string + Annotations []*anypb.Any + ActiveSyncId string +} + +func (b0 ResourceTypesServiceListResourceTypesRequest_builder) Build() *ResourceTypesServiceListResourceTypesRequest { + m0 := &ResourceTypesServiceListResourceTypesRequest{} + b, x := &b0, m0 + _, _ = b, x + x.Parent = b.Parent + x.PageSize = b.PageSize + x.PageToken = b.PageToken + x.Annotations = b.Annotations + x.ActiveSyncId = b.ActiveSyncId + return m0 +} + type ResourceTypesServiceListResourceTypesResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` List []*ResourceType `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"` NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` Annotations []*anypb.Any `protobuf:"bytes,3,rep,name=annotations,proto3" json:"annotations,omitempty"` @@ -318,11 +411,6 @@ func (x *ResourceTypesServiceListResourceTypesResponse) ProtoReflect() protorefl return mi.MessageOf(x) } -// Deprecated: Use ResourceTypesServiceListResourceTypesResponse.ProtoReflect.Descriptor instead. -func (*ResourceTypesServiceListResourceTypesResponse) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_resource_proto_rawDescGZIP(), []int{2} -} - func (x *ResourceTypesServiceListResourceTypesResponse) GetList() []*ResourceType { if x != nil { return x.List @@ -344,8 +432,38 @@ func (x *ResourceTypesServiceListResourceTypesResponse) GetAnnotations() []*anyp return nil } +func (x *ResourceTypesServiceListResourceTypesResponse) SetList(v []*ResourceType) { + x.List = v +} + +func (x *ResourceTypesServiceListResourceTypesResponse) SetNextPageToken(v string) { + x.NextPageToken = v +} + +func (x *ResourceTypesServiceListResourceTypesResponse) SetAnnotations(v []*anypb.Any) { + x.Annotations = v +} + +type ResourceTypesServiceListResourceTypesResponse_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + List []*ResourceType + NextPageToken string + Annotations []*anypb.Any +} + +func (b0 ResourceTypesServiceListResourceTypesResponse_builder) Build() *ResourceTypesServiceListResourceTypesResponse { + m0 := &ResourceTypesServiceListResourceTypesResponse{} + b, x := &b0, m0 + _, _ = b, x + x.List = b.List + x.NextPageToken = b.NextPageToken + x.Annotations = b.Annotations + return m0 +} + type CreateResourceRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Resource *Resource `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache @@ -376,11 +494,6 @@ func (x *CreateResourceRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use CreateResourceRequest.ProtoReflect.Descriptor instead. -func (*CreateResourceRequest) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_resource_proto_rawDescGZIP(), []int{3} -} - func (x *CreateResourceRequest) GetResource() *Resource { if x != nil { return x.Resource @@ -388,8 +501,37 @@ func (x *CreateResourceRequest) GetResource() *Resource { return nil } +func (x *CreateResourceRequest) SetResource(v *Resource) { + x.Resource = v +} + +func (x *CreateResourceRequest) HasResource() bool { + if x == nil { + return false + } + return x.Resource != nil +} + +func (x *CreateResourceRequest) ClearResource() { + x.Resource = nil +} + +type CreateResourceRequest_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Resource *Resource +} + +func (b0 CreateResourceRequest_builder) Build() *CreateResourceRequest { + m0 := &CreateResourceRequest{} + b, x := &b0, m0 + _, _ = b, x + x.Resource = b.Resource + return m0 +} + type CreateResourceResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Created *Resource `protobuf:"bytes,1,opt,name=created,proto3" json:"created,omitempty"` Annotations []*anypb.Any `protobuf:"bytes,2,rep,name=annotations,proto3" json:"annotations,omitempty"` unknownFields protoimpl.UnknownFields @@ -421,11 +563,6 @@ func (x *CreateResourceResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use CreateResourceResponse.ProtoReflect.Descriptor instead. -func (*CreateResourceResponse) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_resource_proto_rawDescGZIP(), []int{4} -} - func (x *CreateResourceResponse) GetCreated() *Resource { if x != nil { return x.Created @@ -440,8 +577,43 @@ func (x *CreateResourceResponse) GetAnnotations() []*anypb.Any { return nil } +func (x *CreateResourceResponse) SetCreated(v *Resource) { + x.Created = v +} + +func (x *CreateResourceResponse) SetAnnotations(v []*anypb.Any) { + x.Annotations = v +} + +func (x *CreateResourceResponse) HasCreated() bool { + if x == nil { + return false + } + return x.Created != nil +} + +func (x *CreateResourceResponse) ClearCreated() { + x.Created = nil +} + +type CreateResourceResponse_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Created *Resource + Annotations []*anypb.Any +} + +func (b0 CreateResourceResponse_builder) Build() *CreateResourceResponse { + m0 := &CreateResourceResponse{} + b, x := &b0, m0 + _, _ = b, x + x.Created = b.Created + x.Annotations = b.Annotations + return m0 +} + type DeleteResourceRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` ResourceId *ResourceId `protobuf:"bytes,1,opt,name=resource_id,json=resourceId,proto3" json:"resource_id,omitempty"` ParentResourceId *ResourceId `protobuf:"bytes,2,opt,name=parent_resource_id,json=parentResourceId,proto3" json:"parent_resource_id,omitempty"` unknownFields protoimpl.UnknownFields @@ -473,11 +645,6 @@ func (x *DeleteResourceRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use DeleteResourceRequest.ProtoReflect.Descriptor instead. -func (*DeleteResourceRequest) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_resource_proto_rawDescGZIP(), []int{5} -} - func (x *DeleteResourceRequest) GetResourceId() *ResourceId { if x != nil { return x.ResourceId @@ -492,8 +659,54 @@ func (x *DeleteResourceRequest) GetParentResourceId() *ResourceId { return nil } +func (x *DeleteResourceRequest) SetResourceId(v *ResourceId) { + x.ResourceId = v +} + +func (x *DeleteResourceRequest) SetParentResourceId(v *ResourceId) { + x.ParentResourceId = v +} + +func (x *DeleteResourceRequest) HasResourceId() bool { + if x == nil { + return false + } + return x.ResourceId != nil +} + +func (x *DeleteResourceRequest) HasParentResourceId() bool { + if x == nil { + return false + } + return x.ParentResourceId != nil +} + +func (x *DeleteResourceRequest) ClearResourceId() { + x.ResourceId = nil +} + +func (x *DeleteResourceRequest) ClearParentResourceId() { + x.ParentResourceId = nil +} + +type DeleteResourceRequest_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + ResourceId *ResourceId + ParentResourceId *ResourceId +} + +func (b0 DeleteResourceRequest_builder) Build() *DeleteResourceRequest { + m0 := &DeleteResourceRequest{} + b, x := &b0, m0 + _, _ = b, x + x.ResourceId = b.ResourceId + x.ParentResourceId = b.ParentResourceId + return m0 +} + type DeleteResourceResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Annotations []*anypb.Any `protobuf:"bytes,1,rep,name=annotations,proto3" json:"annotations,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache @@ -524,11 +737,6 @@ func (x *DeleteResourceResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use DeleteResourceResponse.ProtoReflect.Descriptor instead. -func (*DeleteResourceResponse) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_resource_proto_rawDescGZIP(), []int{6} -} - func (x *DeleteResourceResponse) GetAnnotations() []*anypb.Any { if x != nil { return x.Annotations @@ -536,8 +744,26 @@ func (x *DeleteResourceResponse) GetAnnotations() []*anypb.Any { return nil } +func (x *DeleteResourceResponse) SetAnnotations(v []*anypb.Any) { + x.Annotations = v +} + +type DeleteResourceResponse_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Annotations []*anypb.Any +} + +func (b0 DeleteResourceResponse_builder) Build() *DeleteResourceResponse { + m0 := &DeleteResourceResponse{} + b, x := &b0, m0 + _, _ = b, x + x.Annotations = b.Annotations + return m0 +} + type DeleteResourceV2Request struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` ResourceId *ResourceId `protobuf:"bytes,1,opt,name=resource_id,json=resourceId,proto3" json:"resource_id,omitempty"` ParentResourceId *ResourceId `protobuf:"bytes,2,opt,name=parent_resource_id,json=parentResourceId,proto3" json:"parent_resource_id,omitempty"` unknownFields protoimpl.UnknownFields @@ -569,11 +795,6 @@ func (x *DeleteResourceV2Request) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use DeleteResourceV2Request.ProtoReflect.Descriptor instead. -func (*DeleteResourceV2Request) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_resource_proto_rawDescGZIP(), []int{7} -} - func (x *DeleteResourceV2Request) GetResourceId() *ResourceId { if x != nil { return x.ResourceId @@ -588,8 +809,54 @@ func (x *DeleteResourceV2Request) GetParentResourceId() *ResourceId { return nil } +func (x *DeleteResourceV2Request) SetResourceId(v *ResourceId) { + x.ResourceId = v +} + +func (x *DeleteResourceV2Request) SetParentResourceId(v *ResourceId) { + x.ParentResourceId = v +} + +func (x *DeleteResourceV2Request) HasResourceId() bool { + if x == nil { + return false + } + return x.ResourceId != nil +} + +func (x *DeleteResourceV2Request) HasParentResourceId() bool { + if x == nil { + return false + } + return x.ParentResourceId != nil +} + +func (x *DeleteResourceV2Request) ClearResourceId() { + x.ResourceId = nil +} + +func (x *DeleteResourceV2Request) ClearParentResourceId() { + x.ParentResourceId = nil +} + +type DeleteResourceV2Request_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + ResourceId *ResourceId + ParentResourceId *ResourceId +} + +func (b0 DeleteResourceV2Request_builder) Build() *DeleteResourceV2Request { + m0 := &DeleteResourceV2Request{} + b, x := &b0, m0 + _, _ = b, x + x.ResourceId = b.ResourceId + x.ParentResourceId = b.ParentResourceId + return m0 +} + type DeleteResourceV2Response struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Annotations []*anypb.Any `protobuf:"bytes,1,rep,name=annotations,proto3" json:"annotations,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache @@ -620,11 +887,6 @@ func (x *DeleteResourceV2Response) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use DeleteResourceV2Response.ProtoReflect.Descriptor instead. -func (*DeleteResourceV2Response) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_resource_proto_rawDescGZIP(), []int{8} -} - func (x *DeleteResourceV2Response) GetAnnotations() []*anypb.Any { if x != nil { return x.Annotations @@ -632,8 +894,26 @@ func (x *DeleteResourceV2Response) GetAnnotations() []*anypb.Any { return nil } +func (x *DeleteResourceV2Response) SetAnnotations(v []*anypb.Any) { + x.Annotations = v +} + +type DeleteResourceV2Response_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Annotations []*anypb.Any +} + +func (b0 DeleteResourceV2Response_builder) Build() *DeleteResourceV2Response { + m0 := &DeleteResourceV2Response{} + b, x := &b0, m0 + _, _ = b, x + x.Annotations = b.Annotations + return m0 +} + type RotateCredentialRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` ResourceId *ResourceId `protobuf:"bytes,1,opt,name=resource_id,json=resourceId,proto3" json:"resource_id,omitempty"` CredentialOptions *CredentialOptions `protobuf:"bytes,2,opt,name=credential_options,json=credentialOptions,proto3" json:"credential_options,omitempty"` EncryptionConfigs []*EncryptionConfig `protobuf:"bytes,3,rep,name=encryption_configs,json=encryptionConfigs,proto3" json:"encryption_configs,omitempty"` @@ -666,11 +946,6 @@ func (x *RotateCredentialRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use RotateCredentialRequest.ProtoReflect.Descriptor instead. -func (*RotateCredentialRequest) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_resource_proto_rawDescGZIP(), []int{9} -} - func (x *RotateCredentialRequest) GetResourceId() *ResourceId { if x != nil { return x.ResourceId @@ -692,8 +967,60 @@ func (x *RotateCredentialRequest) GetEncryptionConfigs() []*EncryptionConfig { return nil } +func (x *RotateCredentialRequest) SetResourceId(v *ResourceId) { + x.ResourceId = v +} + +func (x *RotateCredentialRequest) SetCredentialOptions(v *CredentialOptions) { + x.CredentialOptions = v +} + +func (x *RotateCredentialRequest) SetEncryptionConfigs(v []*EncryptionConfig) { + x.EncryptionConfigs = v +} + +func (x *RotateCredentialRequest) HasResourceId() bool { + if x == nil { + return false + } + return x.ResourceId != nil +} + +func (x *RotateCredentialRequest) HasCredentialOptions() bool { + if x == nil { + return false + } + return x.CredentialOptions != nil +} + +func (x *RotateCredentialRequest) ClearResourceId() { + x.ResourceId = nil +} + +func (x *RotateCredentialRequest) ClearCredentialOptions() { + x.CredentialOptions = nil +} + +type RotateCredentialRequest_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + ResourceId *ResourceId + CredentialOptions *CredentialOptions + EncryptionConfigs []*EncryptionConfig +} + +func (b0 RotateCredentialRequest_builder) Build() *RotateCredentialRequest { + m0 := &RotateCredentialRequest{} + b, x := &b0, m0 + _, _ = b, x + x.ResourceId = b.ResourceId + x.CredentialOptions = b.CredentialOptions + x.EncryptionConfigs = b.EncryptionConfigs + return m0 +} + type RotateCredentialResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` EncryptedData []*EncryptedData `protobuf:"bytes,1,rep,name=encrypted_data,json=encryptedData,proto3" json:"encrypted_data,omitempty"` ResourceId *ResourceId `protobuf:"bytes,2,opt,name=resource_id,json=resourceId,proto3" json:"resource_id,omitempty"` Annotations []*anypb.Any `protobuf:"bytes,3,rep,name=annotations,proto3" json:"annotations,omitempty"` @@ -726,11 +1053,6 @@ func (x *RotateCredentialResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use RotateCredentialResponse.ProtoReflect.Descriptor instead. -func (*RotateCredentialResponse) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_resource_proto_rawDescGZIP(), []int{10} -} - func (x *RotateCredentialResponse) GetEncryptedData() []*EncryptedData { if x != nil { return x.EncryptedData @@ -752,8 +1074,49 @@ func (x *RotateCredentialResponse) GetAnnotations() []*anypb.Any { return nil } +func (x *RotateCredentialResponse) SetEncryptedData(v []*EncryptedData) { + x.EncryptedData = v +} + +func (x *RotateCredentialResponse) SetResourceId(v *ResourceId) { + x.ResourceId = v +} + +func (x *RotateCredentialResponse) SetAnnotations(v []*anypb.Any) { + x.Annotations = v +} + +func (x *RotateCredentialResponse) HasResourceId() bool { + if x == nil { + return false + } + return x.ResourceId != nil +} + +func (x *RotateCredentialResponse) ClearResourceId() { + x.ResourceId = nil +} + +type RotateCredentialResponse_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + EncryptedData []*EncryptedData + ResourceId *ResourceId + Annotations []*anypb.Any +} + +func (b0 RotateCredentialResponse_builder) Build() *RotateCredentialResponse { + m0 := &RotateCredentialResponse{} + b, x := &b0, m0 + _, _ = b, x + x.EncryptedData = b.EncryptedData + x.ResourceId = b.ResourceId + x.Annotations = b.Annotations + return m0 +} + type AccountInfo struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Emails []*AccountInfo_Email `protobuf:"bytes,1,rep,name=emails,proto3" json:"emails,omitempty"` // The user's login Login string `protobuf:"bytes,2,opt,name=login,proto3" json:"login,omitempty"` @@ -789,11 +1152,6 @@ func (x *AccountInfo) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use AccountInfo.ProtoReflect.Descriptor instead. -func (*AccountInfo) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_resource_proto_rawDescGZIP(), []int{11} -} - func (x *AccountInfo) GetEmails() []*AccountInfo_Email { if x != nil { return x.Emails @@ -822,8 +1180,57 @@ func (x *AccountInfo) GetProfile() *structpb.Struct { return nil } +func (x *AccountInfo) SetEmails(v []*AccountInfo_Email) { + x.Emails = v +} + +func (x *AccountInfo) SetLogin(v string) { + x.Login = v +} + +func (x *AccountInfo) SetLoginAliases(v []string) { + x.LoginAliases = v +} + +func (x *AccountInfo) SetProfile(v *structpb.Struct) { + x.Profile = v +} + +func (x *AccountInfo) HasProfile() bool { + if x == nil { + return false + } + return x.Profile != nil +} + +func (x *AccountInfo) ClearProfile() { + x.Profile = nil +} + +type AccountInfo_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Emails []*AccountInfo_Email + // The user's login + Login string + // Any additional login aliases for the user + LoginAliases []string + Profile *structpb.Struct +} + +func (b0 AccountInfo_builder) Build() *AccountInfo { + m0 := &AccountInfo{} + b, x := &b0, m0 + _, _ = b, x + x.Emails = b.Emails + x.Login = b.Login + x.LoginAliases = b.LoginAliases + x.Profile = b.Profile + return m0 +} + type CredentialOptions struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` // Types that are valid to be assigned to Options: // // *CredentialOptions_RandomPassword_ @@ -861,11 +1268,6 @@ func (x *CredentialOptions) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use CredentialOptions.ProtoReflect.Descriptor instead. -func (*CredentialOptions) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_resource_proto_rawDescGZIP(), []int{12} -} - func (x *CredentialOptions) GetOptions() isCredentialOptions_Options { if x != nil { return x.Options @@ -916,42 +1318,211 @@ func (x *CredentialOptions) GetForceChangeAtNextLogin() bool { return false } -type isCredentialOptions_Options interface { - isCredentialOptions_Options() +func (x *CredentialOptions) SetRandomPassword(v *CredentialOptions_RandomPassword) { + if v == nil { + x.Options = nil + return + } + x.Options = &CredentialOptions_RandomPassword_{v} } -type CredentialOptions_RandomPassword_ struct { - RandomPassword *CredentialOptions_RandomPassword `protobuf:"bytes,100,opt,name=random_password,json=randomPassword,proto3,oneof"` +func (x *CredentialOptions) SetNoPassword(v *CredentialOptions_NoPassword) { + if v == nil { + x.Options = nil + return + } + x.Options = &CredentialOptions_NoPassword_{v} } -type CredentialOptions_NoPassword_ struct { - NoPassword *CredentialOptions_NoPassword `protobuf:"bytes,101,opt,name=no_password,json=noPassword,proto3,oneof"` +func (x *CredentialOptions) SetSso(v *CredentialOptions_SSO) { + if v == nil { + x.Options = nil + return + } + x.Options = &CredentialOptions_Sso{v} } -type CredentialOptions_Sso struct { - Sso *CredentialOptions_SSO `protobuf:"bytes,102,opt,name=sso,proto3,oneof"` +func (x *CredentialOptions) SetEncryptedPassword(v *CredentialOptions_EncryptedPassword) { + if v == nil { + x.Options = nil + return + } + x.Options = &CredentialOptions_EncryptedPassword_{v} } -type CredentialOptions_EncryptedPassword_ struct { - EncryptedPassword *CredentialOptions_EncryptedPassword `protobuf:"bytes,103,opt,name=encrypted_password,json=encryptedPassword,proto3,oneof"` +func (x *CredentialOptions) SetForceChangeAtNextLogin(v bool) { + x.ForceChangeAtNextLogin = v } -func (*CredentialOptions_RandomPassword_) isCredentialOptions_Options() {} +func (x *CredentialOptions) HasOptions() bool { + if x == nil { + return false + } + return x.Options != nil +} -func (*CredentialOptions_NoPassword_) isCredentialOptions_Options() {} +func (x *CredentialOptions) HasRandomPassword() bool { + if x == nil { + return false + } + _, ok := x.Options.(*CredentialOptions_RandomPassword_) + return ok +} -func (*CredentialOptions_Sso) isCredentialOptions_Options() {} +func (x *CredentialOptions) HasNoPassword() bool { + if x == nil { + return false + } + _, ok := x.Options.(*CredentialOptions_NoPassword_) + return ok +} -func (*CredentialOptions_EncryptedPassword_) isCredentialOptions_Options() {} +func (x *CredentialOptions) HasSso() bool { + if x == nil { + return false + } + _, ok := x.Options.(*CredentialOptions_Sso) + return ok +} -// Do not use this in any RPC or any message that is in an RPC. -type LocalCredentialOptions struct { - state protoimpl.MessageState `protogen:"open.v1"` - // Types that are valid to be assigned to Options: - // - // *LocalCredentialOptions_RandomPassword_ - // *LocalCredentialOptions_NoPassword_ - // *LocalCredentialOptions_Sso +func (x *CredentialOptions) HasEncryptedPassword() bool { + if x == nil { + return false + } + _, ok := x.Options.(*CredentialOptions_EncryptedPassword_) + return ok +} + +func (x *CredentialOptions) ClearOptions() { + x.Options = nil +} + +func (x *CredentialOptions) ClearRandomPassword() { + if _, ok := x.Options.(*CredentialOptions_RandomPassword_); ok { + x.Options = nil + } +} + +func (x *CredentialOptions) ClearNoPassword() { + if _, ok := x.Options.(*CredentialOptions_NoPassword_); ok { + x.Options = nil + } +} + +func (x *CredentialOptions) ClearSso() { + if _, ok := x.Options.(*CredentialOptions_Sso); ok { + x.Options = nil + } +} + +func (x *CredentialOptions) ClearEncryptedPassword() { + if _, ok := x.Options.(*CredentialOptions_EncryptedPassword_); ok { + x.Options = nil + } +} + +const CredentialOptions_Options_not_set_case case_CredentialOptions_Options = 0 +const CredentialOptions_RandomPassword_case case_CredentialOptions_Options = 100 +const CredentialOptions_NoPassword_case case_CredentialOptions_Options = 101 +const CredentialOptions_Sso_case case_CredentialOptions_Options = 102 +const CredentialOptions_EncryptedPassword_case case_CredentialOptions_Options = 103 + +func (x *CredentialOptions) WhichOptions() case_CredentialOptions_Options { + if x == nil { + return CredentialOptions_Options_not_set_case + } + switch x.Options.(type) { + case *CredentialOptions_RandomPassword_: + return CredentialOptions_RandomPassword_case + case *CredentialOptions_NoPassword_: + return CredentialOptions_NoPassword_case + case *CredentialOptions_Sso: + return CredentialOptions_Sso_case + case *CredentialOptions_EncryptedPassword_: + return CredentialOptions_EncryptedPassword_case + default: + return CredentialOptions_Options_not_set_case + } +} + +type CredentialOptions_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // Fields of oneof Options: + RandomPassword *CredentialOptions_RandomPassword + NoPassword *CredentialOptions_NoPassword + Sso *CredentialOptions_SSO + EncryptedPassword *CredentialOptions_EncryptedPassword + // -- end of Options + ForceChangeAtNextLogin bool +} + +func (b0 CredentialOptions_builder) Build() *CredentialOptions { + m0 := &CredentialOptions{} + b, x := &b0, m0 + _, _ = b, x + if b.RandomPassword != nil { + x.Options = &CredentialOptions_RandomPassword_{b.RandomPassword} + } + if b.NoPassword != nil { + x.Options = &CredentialOptions_NoPassword_{b.NoPassword} + } + if b.Sso != nil { + x.Options = &CredentialOptions_Sso{b.Sso} + } + if b.EncryptedPassword != nil { + x.Options = &CredentialOptions_EncryptedPassword_{b.EncryptedPassword} + } + x.ForceChangeAtNextLogin = b.ForceChangeAtNextLogin + return m0 +} + +type case_CredentialOptions_Options protoreflect.FieldNumber + +func (x case_CredentialOptions_Options) String() string { + md := file_c1_connector_v2_resource_proto_msgTypes[12].Descriptor() + if x == 0 { + return "not set" + } + return protoimpl.X.MessageFieldStringOf(md, protoreflect.FieldNumber(x)) +} + +type isCredentialOptions_Options interface { + isCredentialOptions_Options() +} + +type CredentialOptions_RandomPassword_ struct { + RandomPassword *CredentialOptions_RandomPassword `protobuf:"bytes,100,opt,name=random_password,json=randomPassword,proto3,oneof"` +} + +type CredentialOptions_NoPassword_ struct { + NoPassword *CredentialOptions_NoPassword `protobuf:"bytes,101,opt,name=no_password,json=noPassword,proto3,oneof"` +} + +type CredentialOptions_Sso struct { + Sso *CredentialOptions_SSO `protobuf:"bytes,102,opt,name=sso,proto3,oneof"` +} + +type CredentialOptions_EncryptedPassword_ struct { + EncryptedPassword *CredentialOptions_EncryptedPassword `protobuf:"bytes,103,opt,name=encrypted_password,json=encryptedPassword,proto3,oneof"` +} + +func (*CredentialOptions_RandomPassword_) isCredentialOptions_Options() {} + +func (*CredentialOptions_NoPassword_) isCredentialOptions_Options() {} + +func (*CredentialOptions_Sso) isCredentialOptions_Options() {} + +func (*CredentialOptions_EncryptedPassword_) isCredentialOptions_Options() {} + +// Do not use this in any RPC or any message that is in an RPC. +type LocalCredentialOptions struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + // Types that are valid to be assigned to Options: + // + // *LocalCredentialOptions_RandomPassword_ + // *LocalCredentialOptions_NoPassword_ + // *LocalCredentialOptions_Sso // *LocalCredentialOptions_PlaintextPassword_ Options isLocalCredentialOptions_Options `protobuf_oneof:"options"` ForceChangeAtNextLogin bool `protobuf:"varint,1,opt,name=force_change_at_next_login,json=forceChangeAtNextLogin,proto3" json:"force_change_at_next_login,omitempty"` @@ -984,11 +1555,6 @@ func (x *LocalCredentialOptions) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use LocalCredentialOptions.ProtoReflect.Descriptor instead. -func (*LocalCredentialOptions) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_resource_proto_rawDescGZIP(), []int{13} -} - func (x *LocalCredentialOptions) GetOptions() isLocalCredentialOptions_Options { if x != nil { return x.Options @@ -1039,6 +1605,175 @@ func (x *LocalCredentialOptions) GetForceChangeAtNextLogin() bool { return false } +func (x *LocalCredentialOptions) SetRandomPassword(v *LocalCredentialOptions_RandomPassword) { + if v == nil { + x.Options = nil + return + } + x.Options = &LocalCredentialOptions_RandomPassword_{v} +} + +func (x *LocalCredentialOptions) SetNoPassword(v *LocalCredentialOptions_NoPassword) { + if v == nil { + x.Options = nil + return + } + x.Options = &LocalCredentialOptions_NoPassword_{v} +} + +func (x *LocalCredentialOptions) SetSso(v *LocalCredentialOptions_SSO) { + if v == nil { + x.Options = nil + return + } + x.Options = &LocalCredentialOptions_Sso{v} +} + +func (x *LocalCredentialOptions) SetPlaintextPassword(v *LocalCredentialOptions_PlaintextPassword) { + if v == nil { + x.Options = nil + return + } + x.Options = &LocalCredentialOptions_PlaintextPassword_{v} +} + +func (x *LocalCredentialOptions) SetForceChangeAtNextLogin(v bool) { + x.ForceChangeAtNextLogin = v +} + +func (x *LocalCredentialOptions) HasOptions() bool { + if x == nil { + return false + } + return x.Options != nil +} + +func (x *LocalCredentialOptions) HasRandomPassword() bool { + if x == nil { + return false + } + _, ok := x.Options.(*LocalCredentialOptions_RandomPassword_) + return ok +} + +func (x *LocalCredentialOptions) HasNoPassword() bool { + if x == nil { + return false + } + _, ok := x.Options.(*LocalCredentialOptions_NoPassword_) + return ok +} + +func (x *LocalCredentialOptions) HasSso() bool { + if x == nil { + return false + } + _, ok := x.Options.(*LocalCredentialOptions_Sso) + return ok +} + +func (x *LocalCredentialOptions) HasPlaintextPassword() bool { + if x == nil { + return false + } + _, ok := x.Options.(*LocalCredentialOptions_PlaintextPassword_) + return ok +} + +func (x *LocalCredentialOptions) ClearOptions() { + x.Options = nil +} + +func (x *LocalCredentialOptions) ClearRandomPassword() { + if _, ok := x.Options.(*LocalCredentialOptions_RandomPassword_); ok { + x.Options = nil + } +} + +func (x *LocalCredentialOptions) ClearNoPassword() { + if _, ok := x.Options.(*LocalCredentialOptions_NoPassword_); ok { + x.Options = nil + } +} + +func (x *LocalCredentialOptions) ClearSso() { + if _, ok := x.Options.(*LocalCredentialOptions_Sso); ok { + x.Options = nil + } +} + +func (x *LocalCredentialOptions) ClearPlaintextPassword() { + if _, ok := x.Options.(*LocalCredentialOptions_PlaintextPassword_); ok { + x.Options = nil + } +} + +const LocalCredentialOptions_Options_not_set_case case_LocalCredentialOptions_Options = 0 +const LocalCredentialOptions_RandomPassword_case case_LocalCredentialOptions_Options = 100 +const LocalCredentialOptions_NoPassword_case case_LocalCredentialOptions_Options = 101 +const LocalCredentialOptions_Sso_case case_LocalCredentialOptions_Options = 102 +const LocalCredentialOptions_PlaintextPassword_case case_LocalCredentialOptions_Options = 103 + +func (x *LocalCredentialOptions) WhichOptions() case_LocalCredentialOptions_Options { + if x == nil { + return LocalCredentialOptions_Options_not_set_case + } + switch x.Options.(type) { + case *LocalCredentialOptions_RandomPassword_: + return LocalCredentialOptions_RandomPassword_case + case *LocalCredentialOptions_NoPassword_: + return LocalCredentialOptions_NoPassword_case + case *LocalCredentialOptions_Sso: + return LocalCredentialOptions_Sso_case + case *LocalCredentialOptions_PlaintextPassword_: + return LocalCredentialOptions_PlaintextPassword_case + default: + return LocalCredentialOptions_Options_not_set_case + } +} + +type LocalCredentialOptions_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // Fields of oneof Options: + RandomPassword *LocalCredentialOptions_RandomPassword + NoPassword *LocalCredentialOptions_NoPassword + Sso *LocalCredentialOptions_SSO + PlaintextPassword *LocalCredentialOptions_PlaintextPassword + // -- end of Options + ForceChangeAtNextLogin bool +} + +func (b0 LocalCredentialOptions_builder) Build() *LocalCredentialOptions { + m0 := &LocalCredentialOptions{} + b, x := &b0, m0 + _, _ = b, x + if b.RandomPassword != nil { + x.Options = &LocalCredentialOptions_RandomPassword_{b.RandomPassword} + } + if b.NoPassword != nil { + x.Options = &LocalCredentialOptions_NoPassword_{b.NoPassword} + } + if b.Sso != nil { + x.Options = &LocalCredentialOptions_Sso{b.Sso} + } + if b.PlaintextPassword != nil { + x.Options = &LocalCredentialOptions_PlaintextPassword_{b.PlaintextPassword} + } + x.ForceChangeAtNextLogin = b.ForceChangeAtNextLogin + return m0 +} + +type case_LocalCredentialOptions_Options protoreflect.FieldNumber + +func (x case_LocalCredentialOptions_Options) String() string { + md := file_c1_connector_v2_resource_proto_msgTypes[13].Descriptor() + if x == 0 { + return "not set" + } + return protoimpl.X.MessageFieldStringOf(md, protoreflect.FieldNumber(x)) +} + type isLocalCredentialOptions_Options interface { isLocalCredentialOptions_Options() } @@ -1068,7 +1803,7 @@ func (*LocalCredentialOptions_Sso) isLocalCredentialOptions_Options() {} func (*LocalCredentialOptions_PlaintextPassword_) isLocalCredentialOptions_Options() {} type PasswordConstraint struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` CharSet string `protobuf:"bytes,1,opt,name=char_set,json=charSet,proto3" json:"char_set,omitempty"` MinCount uint32 `protobuf:"varint,2,opt,name=min_count,json=minCount,proto3" json:"min_count,omitempty"` unknownFields protoimpl.UnknownFields @@ -1100,11 +1835,6 @@ func (x *PasswordConstraint) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use PasswordConstraint.ProtoReflect.Descriptor instead. -func (*PasswordConstraint) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_resource_proto_rawDescGZIP(), []int{14} -} - func (x *PasswordConstraint) GetCharSet() string { if x != nil { return x.CharSet @@ -1119,8 +1849,32 @@ func (x *PasswordConstraint) GetMinCount() uint32 { return 0 } +func (x *PasswordConstraint) SetCharSet(v string) { + x.CharSet = v +} + +func (x *PasswordConstraint) SetMinCount(v uint32) { + x.MinCount = v +} + +type PasswordConstraint_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + CharSet string + MinCount uint32 +} + +func (b0 PasswordConstraint_builder) Build() *PasswordConstraint { + m0 := &PasswordConstraint{} + b, x := &b0, m0 + _, _ = b, x + x.CharSet = b.CharSet + x.MinCount = b.MinCount + return m0 +} + type CreateAccountRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` AccountInfo *AccountInfo `protobuf:"bytes,1,opt,name=account_info,json=accountInfo,proto3" json:"account_info,omitempty"` CredentialOptions *CredentialOptions `protobuf:"bytes,2,opt,name=credential_options,json=credentialOptions,proto3" json:"credential_options,omitempty"` EncryptionConfigs []*EncryptionConfig `protobuf:"bytes,3,rep,name=encryption_configs,json=encryptionConfigs,proto3" json:"encryption_configs,omitempty"` @@ -1153,11 +1907,6 @@ func (x *CreateAccountRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use CreateAccountRequest.ProtoReflect.Descriptor instead. -func (*CreateAccountRequest) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_resource_proto_rawDescGZIP(), []int{15} -} - func (x *CreateAccountRequest) GetAccountInfo() *AccountInfo { if x != nil { return x.AccountInfo @@ -1179,12 +1928,66 @@ func (x *CreateAccountRequest) GetEncryptionConfigs() []*EncryptionConfig { return nil } +func (x *CreateAccountRequest) SetAccountInfo(v *AccountInfo) { + x.AccountInfo = v +} + +func (x *CreateAccountRequest) SetCredentialOptions(v *CredentialOptions) { + x.CredentialOptions = v +} + +func (x *CreateAccountRequest) SetEncryptionConfigs(v []*EncryptionConfig) { + x.EncryptionConfigs = v +} + +func (x *CreateAccountRequest) HasAccountInfo() bool { + if x == nil { + return false + } + return x.AccountInfo != nil +} + +func (x *CreateAccountRequest) HasCredentialOptions() bool { + if x == nil { + return false + } + return x.CredentialOptions != nil +} + +func (x *CreateAccountRequest) ClearAccountInfo() { + x.AccountInfo = nil +} + +func (x *CreateAccountRequest) ClearCredentialOptions() { + x.CredentialOptions = nil +} + +type CreateAccountRequest_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + AccountInfo *AccountInfo + CredentialOptions *CredentialOptions + EncryptionConfigs []*EncryptionConfig +} + +func (b0 CreateAccountRequest_builder) Build() *CreateAccountRequest { + m0 := &CreateAccountRequest{} + b, x := &b0, m0 + _, _ = b, x + x.AccountInfo = b.AccountInfo + x.CredentialOptions = b.CredentialOptions + x.EncryptionConfigs = b.EncryptionConfigs + return m0 +} + type CreateAccountResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` // Types that are valid to be assigned to Result: // // *CreateAccountResponse_Success // *CreateAccountResponse_ActionRequired + // *CreateAccountResponse_AlreadyExists + // *CreateAccountResponse_InProgress Result isCreateAccountResponse_Result `protobuf_oneof:"result"` EncryptedData []*EncryptedData `protobuf:"bytes,2,rep,name=encrypted_data,json=encryptedData,proto3" json:"encrypted_data,omitempty"` Annotations []*anypb.Any `protobuf:"bytes,3,rep,name=annotations,proto3" json:"annotations,omitempty"` @@ -1217,11 +2020,6 @@ func (x *CreateAccountResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use CreateAccountResponse.ProtoReflect.Descriptor instead. -func (*CreateAccountResponse) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_resource_proto_rawDescGZIP(), []int{16} -} - func (x *CreateAccountResponse) GetResult() isCreateAccountResponse_Result { if x != nil { return x.Result @@ -1247,6 +2045,24 @@ func (x *CreateAccountResponse) GetActionRequired() *CreateAccountResponse_Actio return nil } +func (x *CreateAccountResponse) GetAlreadyExists() *CreateAccountResponse_AlreadyExistsResult { + if x != nil { + if x, ok := x.Result.(*CreateAccountResponse_AlreadyExists); ok { + return x.AlreadyExists + } + } + return nil +} + +func (x *CreateAccountResponse) GetInProgress() *CreateAccountResponse_InProgressResult { + if x != nil { + if x, ok := x.Result.(*CreateAccountResponse_InProgress); ok { + return x.InProgress + } + } + return nil +} + func (x *CreateAccountResponse) GetEncryptedData() []*EncryptedData { if x != nil { return x.EncryptedData @@ -1261,6 +2077,181 @@ func (x *CreateAccountResponse) GetAnnotations() []*anypb.Any { return nil } +func (x *CreateAccountResponse) SetSuccess(v *CreateAccountResponse_SuccessResult) { + if v == nil { + x.Result = nil + return + } + x.Result = &CreateAccountResponse_Success{v} +} + +func (x *CreateAccountResponse) SetActionRequired(v *CreateAccountResponse_ActionRequiredResult) { + if v == nil { + x.Result = nil + return + } + x.Result = &CreateAccountResponse_ActionRequired{v} +} + +func (x *CreateAccountResponse) SetAlreadyExists(v *CreateAccountResponse_AlreadyExistsResult) { + if v == nil { + x.Result = nil + return + } + x.Result = &CreateAccountResponse_AlreadyExists{v} +} + +func (x *CreateAccountResponse) SetInProgress(v *CreateAccountResponse_InProgressResult) { + if v == nil { + x.Result = nil + return + } + x.Result = &CreateAccountResponse_InProgress{v} +} + +func (x *CreateAccountResponse) SetEncryptedData(v []*EncryptedData) { + x.EncryptedData = v +} + +func (x *CreateAccountResponse) SetAnnotations(v []*anypb.Any) { + x.Annotations = v +} + +func (x *CreateAccountResponse) HasResult() bool { + if x == nil { + return false + } + return x.Result != nil +} + +func (x *CreateAccountResponse) HasSuccess() bool { + if x == nil { + return false + } + _, ok := x.Result.(*CreateAccountResponse_Success) + return ok +} + +func (x *CreateAccountResponse) HasActionRequired() bool { + if x == nil { + return false + } + _, ok := x.Result.(*CreateAccountResponse_ActionRequired) + return ok +} + +func (x *CreateAccountResponse) HasAlreadyExists() bool { + if x == nil { + return false + } + _, ok := x.Result.(*CreateAccountResponse_AlreadyExists) + return ok +} + +func (x *CreateAccountResponse) HasInProgress() bool { + if x == nil { + return false + } + _, ok := x.Result.(*CreateAccountResponse_InProgress) + return ok +} + +func (x *CreateAccountResponse) ClearResult() { + x.Result = nil +} + +func (x *CreateAccountResponse) ClearSuccess() { + if _, ok := x.Result.(*CreateAccountResponse_Success); ok { + x.Result = nil + } +} + +func (x *CreateAccountResponse) ClearActionRequired() { + if _, ok := x.Result.(*CreateAccountResponse_ActionRequired); ok { + x.Result = nil + } +} + +func (x *CreateAccountResponse) ClearAlreadyExists() { + if _, ok := x.Result.(*CreateAccountResponse_AlreadyExists); ok { + x.Result = nil + } +} + +func (x *CreateAccountResponse) ClearInProgress() { + if _, ok := x.Result.(*CreateAccountResponse_InProgress); ok { + x.Result = nil + } +} + +const CreateAccountResponse_Result_not_set_case case_CreateAccountResponse_Result = 0 +const CreateAccountResponse_Success_case case_CreateAccountResponse_Result = 100 +const CreateAccountResponse_ActionRequired_case case_CreateAccountResponse_Result = 101 +const CreateAccountResponse_AlreadyExists_case case_CreateAccountResponse_Result = 102 +const CreateAccountResponse_InProgress_case case_CreateAccountResponse_Result = 103 + +func (x *CreateAccountResponse) WhichResult() case_CreateAccountResponse_Result { + if x == nil { + return CreateAccountResponse_Result_not_set_case + } + switch x.Result.(type) { + case *CreateAccountResponse_Success: + return CreateAccountResponse_Success_case + case *CreateAccountResponse_ActionRequired: + return CreateAccountResponse_ActionRequired_case + case *CreateAccountResponse_AlreadyExists: + return CreateAccountResponse_AlreadyExists_case + case *CreateAccountResponse_InProgress: + return CreateAccountResponse_InProgress_case + default: + return CreateAccountResponse_Result_not_set_case + } +} + +type CreateAccountResponse_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // Fields of oneof Result: + Success *CreateAccountResponse_SuccessResult + ActionRequired *CreateAccountResponse_ActionRequiredResult + AlreadyExists *CreateAccountResponse_AlreadyExistsResult + InProgress *CreateAccountResponse_InProgressResult + // -- end of Result + EncryptedData []*EncryptedData + Annotations []*anypb.Any +} + +func (b0 CreateAccountResponse_builder) Build() *CreateAccountResponse { + m0 := &CreateAccountResponse{} + b, x := &b0, m0 + _, _ = b, x + if b.Success != nil { + x.Result = &CreateAccountResponse_Success{b.Success} + } + if b.ActionRequired != nil { + x.Result = &CreateAccountResponse_ActionRequired{b.ActionRequired} + } + if b.AlreadyExists != nil { + x.Result = &CreateAccountResponse_AlreadyExists{b.AlreadyExists} + } + if b.InProgress != nil { + x.Result = &CreateAccountResponse_InProgress{b.InProgress} + } + x.EncryptedData = b.EncryptedData + x.Annotations = b.Annotations + return m0 +} + +type case_CreateAccountResponse_Result protoreflect.FieldNumber + +func (x case_CreateAccountResponse_Result) String() string { + md := file_c1_connector_v2_resource_proto_msgTypes[16].Descriptor() + if x == 0 { + return "not set" + } + return protoimpl.X.MessageFieldStringOf(md, protoreflect.FieldNumber(x)) +} + type isCreateAccountResponse_Result interface { isCreateAccountResponse_Result() } @@ -1273,12 +2264,24 @@ type CreateAccountResponse_ActionRequired struct { ActionRequired *CreateAccountResponse_ActionRequiredResult `protobuf:"bytes,101,opt,name=action_required,json=actionRequired,proto3,oneof"` } +type CreateAccountResponse_AlreadyExists struct { + AlreadyExists *CreateAccountResponse_AlreadyExistsResult `protobuf:"bytes,102,opt,name=already_exists,json=alreadyExists,proto3,oneof"` +} + +type CreateAccountResponse_InProgress struct { + InProgress *CreateAccountResponse_InProgressResult `protobuf:"bytes,103,opt,name=in_progress,json=inProgress,proto3,oneof"` +} + func (*CreateAccountResponse_Success) isCreateAccountResponse_Result() {} func (*CreateAccountResponse_ActionRequired) isCreateAccountResponse_Result() {} +func (*CreateAccountResponse_AlreadyExists) isCreateAccountResponse_Result() {} + +func (*CreateAccountResponse_InProgress) isCreateAccountResponse_Result() {} + type EncryptedData struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Provider string `protobuf:"bytes,1,opt,name=provider,proto3" json:"provider,omitempty"` // Deprecated: Marked as deprecated in c1/connector/v2/resource.proto. KeyId string `protobuf:"bytes,2,opt,name=key_id,json=keyId,proto3" json:"key_id,omitempty"` @@ -1316,11 +2319,6 @@ func (x *EncryptedData) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use EncryptedData.ProtoReflect.Descriptor instead. -func (*EncryptedData) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_resource_proto_rawDescGZIP(), []int{17} -} - func (x *EncryptedData) GetProvider() string { if x != nil { return x.Provider @@ -1371,8 +2369,67 @@ func (x *EncryptedData) GetKeyIds() []string { return nil } +func (x *EncryptedData) SetProvider(v string) { + x.Provider = v +} + +// Deprecated: Marked as deprecated in c1/connector/v2/resource.proto. +func (x *EncryptedData) SetKeyId(v string) { + x.KeyId = v +} + +func (x *EncryptedData) SetName(v string) { + x.Name = v +} + +func (x *EncryptedData) SetDescription(v string) { + x.Description = v +} + +func (x *EncryptedData) SetSchema(v string) { + x.Schema = v +} + +func (x *EncryptedData) SetEncryptedBytes(v []byte) { + if v == nil { + v = []byte{} + } + x.EncryptedBytes = v +} + +func (x *EncryptedData) SetKeyIds(v []string) { + x.KeyIds = v +} + +type EncryptedData_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Provider string + // Deprecated: Marked as deprecated in c1/connector/v2/resource.proto. + KeyId string + Name string + Description string + Schema string + EncryptedBytes []byte + KeyIds []string +} + +func (b0 EncryptedData_builder) Build() *EncryptedData { + m0 := &EncryptedData{} + b, x := &b0, m0 + _, _ = b, x + x.Provider = b.Provider + x.KeyId = b.KeyId + x.Name = b.Name + x.Description = b.Description + x.Schema = b.Schema + x.EncryptedBytes = b.EncryptedBytes + x.KeyIds = b.KeyIds + return m0 +} + type PlaintextData struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` Schema string `protobuf:"bytes,3,opt,name=schema,proto3" json:"schema,omitempty"` // optional @@ -1406,11 +2463,6 @@ func (x *PlaintextData) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use PlaintextData.ProtoReflect.Descriptor instead. -func (*PlaintextData) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_resource_proto_rawDescGZIP(), []int{18} -} - func (x *PlaintextData) GetName() string { if x != nil { return x.Name @@ -1439,8 +2491,47 @@ func (x *PlaintextData) GetBytes() []byte { return nil } +func (x *PlaintextData) SetName(v string) { + x.Name = v +} + +func (x *PlaintextData) SetDescription(v string) { + x.Description = v +} + +func (x *PlaintextData) SetSchema(v string) { + x.Schema = v +} + +func (x *PlaintextData) SetBytes(v []byte) { + if v == nil { + v = []byte{} + } + x.Bytes = v +} + +type PlaintextData_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Name string + Description string + Schema string + Bytes []byte +} + +func (b0 PlaintextData_builder) Build() *PlaintextData { + m0 := &PlaintextData{} + b, x := &b0, m0 + _, _ = b, x + x.Name = b.Name + x.Description = b.Description + x.Schema = b.Schema + x.Bytes = b.Bytes + return m0 +} + type EncryptionConfig struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Principal *Resource `protobuf:"bytes,1,opt,name=principal,proto3" json:"principal,omitempty"` Provider string `protobuf:"bytes,2,opt,name=provider,proto3" json:"provider,omitempty"` KeyId string `protobuf:"bytes,3,opt,name=key_id,json=keyId,proto3" json:"key_id,omitempty"` @@ -1477,11 +2568,6 @@ func (x *EncryptionConfig) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use EncryptionConfig.ProtoReflect.Descriptor instead. -func (*EncryptionConfig) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_resource_proto_rawDescGZIP(), []int{19} -} - func (x *EncryptionConfig) GetPrincipal() *Resource { if x != nil { return x.Principal @@ -1519,6 +2605,111 @@ func (x *EncryptionConfig) GetJwkPublicKeyConfig() *EncryptionConfig_JWKPublicKe return nil } +func (x *EncryptionConfig) SetPrincipal(v *Resource) { + x.Principal = v +} + +func (x *EncryptionConfig) SetProvider(v string) { + x.Provider = v +} + +func (x *EncryptionConfig) SetKeyId(v string) { + x.KeyId = v +} + +func (x *EncryptionConfig) SetJwkPublicKeyConfig(v *EncryptionConfig_JWKPublicKeyConfig) { + if v == nil { + x.Config = nil + return + } + x.Config = &EncryptionConfig_JwkPublicKeyConfig{v} +} + +func (x *EncryptionConfig) HasPrincipal() bool { + if x == nil { + return false + } + return x.Principal != nil +} + +func (x *EncryptionConfig) HasConfig() bool { + if x == nil { + return false + } + return x.Config != nil +} + +func (x *EncryptionConfig) HasJwkPublicKeyConfig() bool { + if x == nil { + return false + } + _, ok := x.Config.(*EncryptionConfig_JwkPublicKeyConfig) + return ok +} + +func (x *EncryptionConfig) ClearPrincipal() { + x.Principal = nil +} + +func (x *EncryptionConfig) ClearConfig() { + x.Config = nil +} + +func (x *EncryptionConfig) ClearJwkPublicKeyConfig() { + if _, ok := x.Config.(*EncryptionConfig_JwkPublicKeyConfig); ok { + x.Config = nil + } +} + +const EncryptionConfig_Config_not_set_case case_EncryptionConfig_Config = 0 +const EncryptionConfig_JwkPublicKeyConfig_case case_EncryptionConfig_Config = 100 + +func (x *EncryptionConfig) WhichConfig() case_EncryptionConfig_Config { + if x == nil { + return EncryptionConfig_Config_not_set_case + } + switch x.Config.(type) { + case *EncryptionConfig_JwkPublicKeyConfig: + return EncryptionConfig_JwkPublicKeyConfig_case + default: + return EncryptionConfig_Config_not_set_case + } +} + +type EncryptionConfig_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Principal *Resource + Provider string + KeyId string + // Fields of oneof Config: + JwkPublicKeyConfig *EncryptionConfig_JWKPublicKeyConfig + // -- end of Config +} + +func (b0 EncryptionConfig_builder) Build() *EncryptionConfig { + m0 := &EncryptionConfig{} + b, x := &b0, m0 + _, _ = b, x + x.Principal = b.Principal + x.Provider = b.Provider + x.KeyId = b.KeyId + if b.JwkPublicKeyConfig != nil { + x.Config = &EncryptionConfig_JwkPublicKeyConfig{b.JwkPublicKeyConfig} + } + return m0 +} + +type case_EncryptionConfig_Config protoreflect.FieldNumber + +func (x case_EncryptionConfig_Config) String() string { + md := file_c1_connector_v2_resource_proto_msgTypes[19].Descriptor() + if x == 0 { + return "not set" + } + return protoimpl.X.MessageFieldStringOf(md, protoreflect.FieldNumber(x)) +} + type isEncryptionConfig_Config interface { isEncryptionConfig_Config() } @@ -1530,7 +2721,7 @@ type EncryptionConfig_JwkPublicKeyConfig struct { func (*EncryptionConfig_JwkPublicKeyConfig) isEncryptionConfig_Config() {} type ResourceId struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` ResourceType string `protobuf:"bytes,1,opt,name=resource_type,json=resourceType,proto3" json:"resource_type,omitempty"` Resource string `protobuf:"bytes,2,opt,name=resource,proto3" json:"resource,omitempty"` BatonResource bool `protobuf:"varint,3,opt,name=baton_resource,json=batonResource,proto3" json:"baton_resource,omitempty"` @@ -1563,11 +2754,6 @@ func (x *ResourceId) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ResourceId.ProtoReflect.Descriptor instead. -func (*ResourceId) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_resource_proto_rawDescGZIP(), []int{20} -} - func (x *ResourceId) GetResourceType() string { if x != nil { return x.ResourceType @@ -1589,8 +2775,38 @@ func (x *ResourceId) GetBatonResource() bool { return false } +func (x *ResourceId) SetResourceType(v string) { + x.ResourceType = v +} + +func (x *ResourceId) SetResource(v string) { + x.Resource = v +} + +func (x *ResourceId) SetBatonResource(v bool) { + x.BatonResource = v +} + +type ResourceId_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + ResourceType string + Resource string + BatonResource bool +} + +func (b0 ResourceId_builder) Build() *ResourceId { + m0 := &ResourceId{} + b, x := &b0, m0 + _, _ = b, x + x.ResourceType = b.ResourceType + x.Resource = b.Resource + x.BatonResource = b.BatonResource + return m0 +} + type Resource struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Id *ResourceId `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` ParentResourceId *ResourceId `protobuf:"bytes,2,opt,name=parent_resource_id,json=parentResourceId,proto3" json:"parent_resource_id,omitempty"` DisplayName string `protobuf:"bytes,3,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` @@ -1628,11 +2844,6 @@ func (x *Resource) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use Resource.ProtoReflect.Descriptor instead. -func (*Resource) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_resource_proto_rawDescGZIP(), []int{21} -} - func (x *Resource) GetId() *ResourceId { if x != nil { return x.Id @@ -1689,13 +2900,107 @@ func (x *Resource) GetCreationSource() Resource_CreationSource { return Resource_CREATION_SOURCE_UNSPECIFIED } +func (x *Resource) SetId(v *ResourceId) { + x.Id = v +} + +func (x *Resource) SetParentResourceId(v *ResourceId) { + x.ParentResourceId = v +} + +func (x *Resource) SetDisplayName(v string) { + x.DisplayName = v +} + +func (x *Resource) SetAnnotations(v []*anypb.Any) { + x.Annotations = v +} + +func (x *Resource) SetDescription(v string) { + x.Description = v +} + +func (x *Resource) SetBatonResource(v bool) { + x.BatonResource = v +} + +func (x *Resource) SetExternalId(v *ExternalId) { + x.ExternalId = v +} + +func (x *Resource) SetCreationSource(v Resource_CreationSource) { + x.CreationSource = v +} + +func (x *Resource) HasId() bool { + if x == nil { + return false + } + return x.Id != nil +} + +func (x *Resource) HasParentResourceId() bool { + if x == nil { + return false + } + return x.ParentResourceId != nil +} + +func (x *Resource) HasExternalId() bool { + if x == nil { + return false + } + return x.ExternalId != nil +} + +func (x *Resource) ClearId() { + x.Id = nil +} + +func (x *Resource) ClearParentResourceId() { + x.ParentResourceId = nil +} + +func (x *Resource) ClearExternalId() { + x.ExternalId = nil +} + +type Resource_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Id *ResourceId + ParentResourceId *ResourceId + DisplayName string + Annotations []*anypb.Any + Description string + BatonResource bool + ExternalId *ExternalId + CreationSource Resource_CreationSource +} + +func (b0 Resource_builder) Build() *Resource { + m0 := &Resource{} + b, x := &b0, m0 + _, _ = b, x + x.Id = b.Id + x.ParentResourceId = b.ParentResourceId + x.DisplayName = b.DisplayName + x.Annotations = b.Annotations + x.Description = b.Description + x.BatonResource = b.BatonResource + x.ExternalId = b.ExternalId + x.CreationSource = b.CreationSource + return m0 +} + type ResourcesServiceListResourcesRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` ResourceTypeId string `protobuf:"bytes,1,opt,name=resource_type_id,json=resourceTypeId,proto3" json:"resource_type_id,omitempty"` ParentResourceId *ResourceId `protobuf:"bytes,2,opt,name=parent_resource_id,json=parentResourceId,proto3" json:"parent_resource_id,omitempty"` PageSize uint32 `protobuf:"varint,3,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` Annotations []*anypb.Any `protobuf:"bytes,5,rep,name=annotations,proto3" json:"annotations,omitempty"` + ActiveSyncId string `protobuf:"bytes,6,opt,name=active_sync_id,json=activeSyncId,proto3" json:"active_sync_id,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } @@ -1725,11 +3030,6 @@ func (x *ResourcesServiceListResourcesRequest) ProtoReflect() protoreflect.Messa return mi.MessageOf(x) } -// Deprecated: Use ResourcesServiceListResourcesRequest.ProtoReflect.Descriptor instead. -func (*ResourcesServiceListResourcesRequest) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_resource_proto_rawDescGZIP(), []int{22} -} - func (x *ResourcesServiceListResourcesRequest) GetResourceTypeId() string { if x != nil { return x.ResourceTypeId @@ -1765,8 +3065,74 @@ func (x *ResourcesServiceListResourcesRequest) GetAnnotations() []*anypb.Any { return nil } +func (x *ResourcesServiceListResourcesRequest) GetActiveSyncId() string { + if x != nil { + return x.ActiveSyncId + } + return "" +} + +func (x *ResourcesServiceListResourcesRequest) SetResourceTypeId(v string) { + x.ResourceTypeId = v +} + +func (x *ResourcesServiceListResourcesRequest) SetParentResourceId(v *ResourceId) { + x.ParentResourceId = v +} + +func (x *ResourcesServiceListResourcesRequest) SetPageSize(v uint32) { + x.PageSize = v +} + +func (x *ResourcesServiceListResourcesRequest) SetPageToken(v string) { + x.PageToken = v +} + +func (x *ResourcesServiceListResourcesRequest) SetAnnotations(v []*anypb.Any) { + x.Annotations = v +} + +func (x *ResourcesServiceListResourcesRequest) SetActiveSyncId(v string) { + x.ActiveSyncId = v +} + +func (x *ResourcesServiceListResourcesRequest) HasParentResourceId() bool { + if x == nil { + return false + } + return x.ParentResourceId != nil +} + +func (x *ResourcesServiceListResourcesRequest) ClearParentResourceId() { + x.ParentResourceId = nil +} + +type ResourcesServiceListResourcesRequest_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + ResourceTypeId string + ParentResourceId *ResourceId + PageSize uint32 + PageToken string + Annotations []*anypb.Any + ActiveSyncId string +} + +func (b0 ResourcesServiceListResourcesRequest_builder) Build() *ResourcesServiceListResourcesRequest { + m0 := &ResourcesServiceListResourcesRequest{} + b, x := &b0, m0 + _, _ = b, x + x.ResourceTypeId = b.ResourceTypeId + x.ParentResourceId = b.ParentResourceId + x.PageSize = b.PageSize + x.PageToken = b.PageToken + x.Annotations = b.Annotations + x.ActiveSyncId = b.ActiveSyncId + return m0 +} + type ResourcesServiceListResourcesResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` List []*Resource `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"` NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` Annotations []*anypb.Any `protobuf:"bytes,3,rep,name=annotations,proto3" json:"annotations,omitempty"` @@ -1799,11 +3165,6 @@ func (x *ResourcesServiceListResourcesResponse) ProtoReflect() protoreflect.Mess return mi.MessageOf(x) } -// Deprecated: Use ResourcesServiceListResourcesResponse.ProtoReflect.Descriptor instead. -func (*ResourcesServiceListResourcesResponse) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_resource_proto_rawDescGZIP(), []int{23} -} - func (x *ResourcesServiceListResourcesResponse) GetList() []*Resource { if x != nil { return x.List @@ -1825,11 +3186,42 @@ func (x *ResourcesServiceListResourcesResponse) GetAnnotations() []*anypb.Any { return nil } +func (x *ResourcesServiceListResourcesResponse) SetList(v []*Resource) { + x.List = v +} + +func (x *ResourcesServiceListResourcesResponse) SetNextPageToken(v string) { + x.NextPageToken = v +} + +func (x *ResourcesServiceListResourcesResponse) SetAnnotations(v []*anypb.Any) { + x.Annotations = v +} + +type ResourcesServiceListResourcesResponse_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + List []*Resource + NextPageToken string + Annotations []*anypb.Any +} + +func (b0 ResourcesServiceListResourcesResponse_builder) Build() *ResourcesServiceListResourcesResponse { + m0 := &ResourcesServiceListResourcesResponse{} + b, x := &b0, m0 + _, _ = b, x + x.List = b.List + x.NextPageToken = b.NextPageToken + x.Annotations = b.Annotations + return m0 +} + type ResourceGetterServiceGetResourceRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` ResourceId *ResourceId `protobuf:"bytes,1,opt,name=resource_id,json=resourceId,proto3" json:"resource_id,omitempty"` ParentResourceId *ResourceId `protobuf:"bytes,2,opt,name=parent_resource_id,json=parentResourceId,proto3" json:"parent_resource_id,omitempty"` Annotations []*anypb.Any `protobuf:"bytes,3,rep,name=annotations,proto3" json:"annotations,omitempty"` + ActiveSyncId string `protobuf:"bytes,4,opt,name=active_sync_id,json=activeSyncId,proto3" json:"active_sync_id,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } @@ -1859,11 +3251,6 @@ func (x *ResourceGetterServiceGetResourceRequest) ProtoReflect() protoreflect.Me return mi.MessageOf(x) } -// Deprecated: Use ResourceGetterServiceGetResourceRequest.ProtoReflect.Descriptor instead. -func (*ResourceGetterServiceGetResourceRequest) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_resource_proto_rawDescGZIP(), []int{24} -} - func (x *ResourceGetterServiceGetResourceRequest) GetResourceId() *ResourceId { if x != nil { return x.ResourceId @@ -1885,8 +3272,73 @@ func (x *ResourceGetterServiceGetResourceRequest) GetAnnotations() []*anypb.Any return nil } +func (x *ResourceGetterServiceGetResourceRequest) GetActiveSyncId() string { + if x != nil { + return x.ActiveSyncId + } + return "" +} + +func (x *ResourceGetterServiceGetResourceRequest) SetResourceId(v *ResourceId) { + x.ResourceId = v +} + +func (x *ResourceGetterServiceGetResourceRequest) SetParentResourceId(v *ResourceId) { + x.ParentResourceId = v +} + +func (x *ResourceGetterServiceGetResourceRequest) SetAnnotations(v []*anypb.Any) { + x.Annotations = v +} + +func (x *ResourceGetterServiceGetResourceRequest) SetActiveSyncId(v string) { + x.ActiveSyncId = v +} + +func (x *ResourceGetterServiceGetResourceRequest) HasResourceId() bool { + if x == nil { + return false + } + return x.ResourceId != nil +} + +func (x *ResourceGetterServiceGetResourceRequest) HasParentResourceId() bool { + if x == nil { + return false + } + return x.ParentResourceId != nil +} + +func (x *ResourceGetterServiceGetResourceRequest) ClearResourceId() { + x.ResourceId = nil +} + +func (x *ResourceGetterServiceGetResourceRequest) ClearParentResourceId() { + x.ParentResourceId = nil +} + +type ResourceGetterServiceGetResourceRequest_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + ResourceId *ResourceId + ParentResourceId *ResourceId + Annotations []*anypb.Any + ActiveSyncId string +} + +func (b0 ResourceGetterServiceGetResourceRequest_builder) Build() *ResourceGetterServiceGetResourceRequest { + m0 := &ResourceGetterServiceGetResourceRequest{} + b, x := &b0, m0 + _, _ = b, x + x.ResourceId = b.ResourceId + x.ParentResourceId = b.ParentResourceId + x.Annotations = b.Annotations + x.ActiveSyncId = b.ActiveSyncId + return m0 +} + type ResourceGetterServiceGetResourceResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Resource *Resource `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"` Annotations []*anypb.Any `protobuf:"bytes,2,rep,name=annotations,proto3" json:"annotations,omitempty"` unknownFields protoimpl.UnknownFields @@ -1918,11 +3370,6 @@ func (x *ResourceGetterServiceGetResourceResponse) ProtoReflect() protoreflect.M return mi.MessageOf(x) } -// Deprecated: Use ResourceGetterServiceGetResourceResponse.ProtoReflect.Descriptor instead. -func (*ResourceGetterServiceGetResourceResponse) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_resource_proto_rawDescGZIP(), []int{25} -} - func (x *ResourceGetterServiceGetResourceResponse) GetResource() *Resource { if x != nil { return x.Resource @@ -1937,8 +3384,43 @@ func (x *ResourceGetterServiceGetResourceResponse) GetAnnotations() []*anypb.Any return nil } +func (x *ResourceGetterServiceGetResourceResponse) SetResource(v *Resource) { + x.Resource = v +} + +func (x *ResourceGetterServiceGetResourceResponse) SetAnnotations(v []*anypb.Any) { + x.Annotations = v +} + +func (x *ResourceGetterServiceGetResourceResponse) HasResource() bool { + if x == nil { + return false + } + return x.Resource != nil +} + +func (x *ResourceGetterServiceGetResourceResponse) ClearResource() { + x.Resource = nil +} + +type ResourceGetterServiceGetResourceResponse_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Resource *Resource + Annotations []*anypb.Any +} + +func (b0 ResourceGetterServiceGetResourceResponse_builder) Build() *ResourceGetterServiceGetResourceResponse { + m0 := &ResourceGetterServiceGetResourceResponse{} + b, x := &b0, m0 + _, _ = b, x + x.Resource = b.Resource + x.Annotations = b.Annotations + return m0 +} + type ExternalId struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` Link string `protobuf:"bytes,2,opt,name=link,proto3" json:"link,omitempty"` Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` @@ -1971,11 +3453,6 @@ func (x *ExternalId) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ExternalId.ProtoReflect.Descriptor instead. -func (*ExternalId) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_resource_proto_rawDescGZIP(), []int{26} -} - func (x *ExternalId) GetId() string { if x != nil { return x.Id @@ -1997,8 +3474,38 @@ func (x *ExternalId) GetDescription() string { return "" } +func (x *ExternalId) SetId(v string) { + x.Id = v +} + +func (x *ExternalId) SetLink(v string) { + x.Link = v +} + +func (x *ExternalId) SetDescription(v string) { + x.Description = v +} + +type ExternalId_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Id string + Link string + Description string +} + +func (b0 ExternalId_builder) Build() *ExternalId { + m0 := &ExternalId{} + b, x := &b0, m0 + _, _ = b, x + x.Id = b.Id + x.Link = b.Link + x.Description = b.Description + return m0 +} + type AccountInfo_Email struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Address string `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` // Indicates if this is the user's primary email. Only one entry can be marked as primary. IsPrimary bool `protobuf:"varint,2,opt,name=is_primary,json=isPrimary,proto3" json:"is_primary,omitempty"` @@ -2031,11 +3538,6 @@ func (x *AccountInfo_Email) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use AccountInfo_Email.ProtoReflect.Descriptor instead. -func (*AccountInfo_Email) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_resource_proto_rawDescGZIP(), []int{11, 0} -} - func (x *AccountInfo_Email) GetAddress() string { if x != nil { return x.Address @@ -2050,8 +3552,33 @@ func (x *AccountInfo_Email) GetIsPrimary() bool { return false } +func (x *AccountInfo_Email) SetAddress(v string) { + x.Address = v +} + +func (x *AccountInfo_Email) SetIsPrimary(v bool) { + x.IsPrimary = v +} + +type AccountInfo_Email_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Address string + // Indicates if this is the user's primary email. Only one entry can be marked as primary. + IsPrimary bool +} + +func (b0 AccountInfo_Email_builder) Build() *AccountInfo_Email { + m0 := &AccountInfo_Email{} + b, x := &b0, m0 + _, _ = b, x + x.Address = b.Address + x.IsPrimary = b.IsPrimary + return m0 +} + type CredentialOptions_RandomPassword struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Length int64 `protobuf:"varint,1,opt,name=length,proto3" json:"length,omitempty"` Constraints []*PasswordConstraint `protobuf:"bytes,2,rep,name=constraints,proto3" json:"constraints,omitempty"` unknownFields protoimpl.UnknownFields @@ -2083,11 +3610,6 @@ func (x *CredentialOptions_RandomPassword) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use CredentialOptions_RandomPassword.ProtoReflect.Descriptor instead. -func (*CredentialOptions_RandomPassword) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_resource_proto_rawDescGZIP(), []int{12, 0} -} - func (x *CredentialOptions_RandomPassword) GetLength() int64 { if x != nil { return x.Length @@ -2102,8 +3624,32 @@ func (x *CredentialOptions_RandomPassword) GetConstraints() []*PasswordConstrain return nil } +func (x *CredentialOptions_RandomPassword) SetLength(v int64) { + x.Length = v +} + +func (x *CredentialOptions_RandomPassword) SetConstraints(v []*PasswordConstraint) { + x.Constraints = v +} + +type CredentialOptions_RandomPassword_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Length int64 + Constraints []*PasswordConstraint +} + +func (b0 CredentialOptions_RandomPassword_builder) Build() *CredentialOptions_RandomPassword { + m0 := &CredentialOptions_RandomPassword{} + b, x := &b0, m0 + _, _ = b, x + x.Length = b.Length + x.Constraints = b.Constraints + return m0 +} + type CredentialOptions_NoPassword struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } @@ -2133,13 +3679,20 @@ func (x *CredentialOptions_NoPassword) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use CredentialOptions_NoPassword.ProtoReflect.Descriptor instead. -func (*CredentialOptions_NoPassword) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_resource_proto_rawDescGZIP(), []int{12, 1} +type CredentialOptions_NoPassword_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + +} + +func (b0 CredentialOptions_NoPassword_builder) Build() *CredentialOptions_NoPassword { + m0 := &CredentialOptions_NoPassword{} + b, x := &b0, m0 + _, _ = b, x + return m0 } type CredentialOptions_SSO struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` SsoProvider string `protobuf:"bytes,1,opt,name=sso_provider,json=ssoProvider,proto3" json:"sso_provider,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache @@ -2170,11 +3723,6 @@ func (x *CredentialOptions_SSO) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use CredentialOptions_SSO.ProtoReflect.Descriptor instead. -func (*CredentialOptions_SSO) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_resource_proto_rawDescGZIP(), []int{12, 2} -} - func (x *CredentialOptions_SSO) GetSsoProvider() string { if x != nil { return x.SsoProvider @@ -2182,8 +3730,26 @@ func (x *CredentialOptions_SSO) GetSsoProvider() string { return "" } +func (x *CredentialOptions_SSO) SetSsoProvider(v string) { + x.SsoProvider = v +} + +type CredentialOptions_SSO_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + SsoProvider string +} + +func (b0 CredentialOptions_SSO_builder) Build() *CredentialOptions_SSO { + m0 := &CredentialOptions_SSO{} + b, x := &b0, m0 + _, _ = b, x + x.SsoProvider = b.SsoProvider + return m0 +} + type CredentialOptions_EncryptedPassword struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` EncryptedPasswords []*EncryptedData `protobuf:"bytes,1,rep,name=encrypted_passwords,json=encryptedPasswords,proto3" json:"encrypted_passwords,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache @@ -2214,11 +3780,6 @@ func (x *CredentialOptions_EncryptedPassword) ProtoReflect() protoreflect.Messag return mi.MessageOf(x) } -// Deprecated: Use CredentialOptions_EncryptedPassword.ProtoReflect.Descriptor instead. -func (*CredentialOptions_EncryptedPassword) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_resource_proto_rawDescGZIP(), []int{12, 3} -} - func (x *CredentialOptions_EncryptedPassword) GetEncryptedPasswords() []*EncryptedData { if x != nil { return x.EncryptedPasswords @@ -2226,8 +3787,26 @@ func (x *CredentialOptions_EncryptedPassword) GetEncryptedPasswords() []*Encrypt return nil } +func (x *CredentialOptions_EncryptedPassword) SetEncryptedPasswords(v []*EncryptedData) { + x.EncryptedPasswords = v +} + +type CredentialOptions_EncryptedPassword_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + EncryptedPasswords []*EncryptedData +} + +func (b0 CredentialOptions_EncryptedPassword_builder) Build() *CredentialOptions_EncryptedPassword { + m0 := &CredentialOptions_EncryptedPassword{} + b, x := &b0, m0 + _, _ = b, x + x.EncryptedPasswords = b.EncryptedPasswords + return m0 +} + type LocalCredentialOptions_RandomPassword struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Length int64 `protobuf:"varint,1,opt,name=length,proto3" json:"length,omitempty"` Constraints []*PasswordConstraint `protobuf:"bytes,2,rep,name=constraints,proto3" json:"constraints,omitempty"` unknownFields protoimpl.UnknownFields @@ -2259,11 +3838,6 @@ func (x *LocalCredentialOptions_RandomPassword) ProtoReflect() protoreflect.Mess return mi.MessageOf(x) } -// Deprecated: Use LocalCredentialOptions_RandomPassword.ProtoReflect.Descriptor instead. -func (*LocalCredentialOptions_RandomPassword) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_resource_proto_rawDescGZIP(), []int{13, 0} -} - func (x *LocalCredentialOptions_RandomPassword) GetLength() int64 { if x != nil { return x.Length @@ -2278,8 +3852,32 @@ func (x *LocalCredentialOptions_RandomPassword) GetConstraints() []*PasswordCons return nil } +func (x *LocalCredentialOptions_RandomPassword) SetLength(v int64) { + x.Length = v +} + +func (x *LocalCredentialOptions_RandomPassword) SetConstraints(v []*PasswordConstraint) { + x.Constraints = v +} + +type LocalCredentialOptions_RandomPassword_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Length int64 + Constraints []*PasswordConstraint +} + +func (b0 LocalCredentialOptions_RandomPassword_builder) Build() *LocalCredentialOptions_RandomPassword { + m0 := &LocalCredentialOptions_RandomPassword{} + b, x := &b0, m0 + _, _ = b, x + x.Length = b.Length + x.Constraints = b.Constraints + return m0 +} + type LocalCredentialOptions_NoPassword struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } @@ -2309,13 +3907,20 @@ func (x *LocalCredentialOptions_NoPassword) ProtoReflect() protoreflect.Message return mi.MessageOf(x) } -// Deprecated: Use LocalCredentialOptions_NoPassword.ProtoReflect.Descriptor instead. -func (*LocalCredentialOptions_NoPassword) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_resource_proto_rawDescGZIP(), []int{13, 1} +type LocalCredentialOptions_NoPassword_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + +} + +func (b0 LocalCredentialOptions_NoPassword_builder) Build() *LocalCredentialOptions_NoPassword { + m0 := &LocalCredentialOptions_NoPassword{} + b, x := &b0, m0 + _, _ = b, x + return m0 } type LocalCredentialOptions_SSO struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` SsoProvider string `protobuf:"bytes,1,opt,name=sso_provider,json=ssoProvider,proto3" json:"sso_provider,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache @@ -2346,11 +3951,6 @@ func (x *LocalCredentialOptions_SSO) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use LocalCredentialOptions_SSO.ProtoReflect.Descriptor instead. -func (*LocalCredentialOptions_SSO) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_resource_proto_rawDescGZIP(), []int{13, 2} -} - func (x *LocalCredentialOptions_SSO) GetSsoProvider() string { if x != nil { return x.SsoProvider @@ -2358,8 +3958,26 @@ func (x *LocalCredentialOptions_SSO) GetSsoProvider() string { return "" } +func (x *LocalCredentialOptions_SSO) SetSsoProvider(v string) { + x.SsoProvider = v +} + +type LocalCredentialOptions_SSO_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + SsoProvider string +} + +func (b0 LocalCredentialOptions_SSO_builder) Build() *LocalCredentialOptions_SSO { + m0 := &LocalCredentialOptions_SSO{} + b, x := &b0, m0 + _, _ = b, x + x.SsoProvider = b.SsoProvider + return m0 +} + type LocalCredentialOptions_PlaintextPassword struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` PlaintextPassword string `protobuf:"bytes,1,opt,name=plaintext_password,json=plaintextPassword,proto3" json:"plaintext_password,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache @@ -2390,11 +4008,6 @@ func (x *LocalCredentialOptions_PlaintextPassword) ProtoReflect() protoreflect.M return mi.MessageOf(x) } -// Deprecated: Use LocalCredentialOptions_PlaintextPassword.ProtoReflect.Descriptor instead. -func (*LocalCredentialOptions_PlaintextPassword) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_resource_proto_rawDescGZIP(), []int{13, 3} -} - func (x *LocalCredentialOptions_PlaintextPassword) GetPlaintextPassword() string { if x != nil { return x.PlaintextPassword @@ -2402,8 +4015,26 @@ func (x *LocalCredentialOptions_PlaintextPassword) GetPlaintextPassword() string return "" } +func (x *LocalCredentialOptions_PlaintextPassword) SetPlaintextPassword(v string) { + x.PlaintextPassword = v +} + +type LocalCredentialOptions_PlaintextPassword_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + PlaintextPassword string +} + +func (b0 LocalCredentialOptions_PlaintextPassword_builder) Build() *LocalCredentialOptions_PlaintextPassword { + m0 := &LocalCredentialOptions_PlaintextPassword{} + b, x := &b0, m0 + _, _ = b, x + x.PlaintextPassword = b.PlaintextPassword + return m0 +} + type CreateAccountResponse_SuccessResult struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Resource *Resource `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"` IsCreateAccountResult bool `protobuf:"varint,2,opt,name=is_create_account_result,json=isCreateAccountResult,proto3" json:"is_create_account_result,omitempty"` unknownFields protoimpl.UnknownFields @@ -2435,11 +4066,6 @@ func (x *CreateAccountResponse_SuccessResult) ProtoReflect() protoreflect.Messag return mi.MessageOf(x) } -// Deprecated: Use CreateAccountResponse_SuccessResult.ProtoReflect.Descriptor instead. -func (*CreateAccountResponse_SuccessResult) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_resource_proto_rawDescGZIP(), []int{16, 0} -} - func (x *CreateAccountResponse_SuccessResult) GetResource() *Resource { if x != nil { return x.Resource @@ -2454,8 +4080,43 @@ func (x *CreateAccountResponse_SuccessResult) GetIsCreateAccountResult() bool { return false } +func (x *CreateAccountResponse_SuccessResult) SetResource(v *Resource) { + x.Resource = v +} + +func (x *CreateAccountResponse_SuccessResult) SetIsCreateAccountResult(v bool) { + x.IsCreateAccountResult = v +} + +func (x *CreateAccountResponse_SuccessResult) HasResource() bool { + if x == nil { + return false + } + return x.Resource != nil +} + +func (x *CreateAccountResponse_SuccessResult) ClearResource() { + x.Resource = nil +} + +type CreateAccountResponse_SuccessResult_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Resource *Resource + IsCreateAccountResult bool +} + +func (b0 CreateAccountResponse_SuccessResult_builder) Build() *CreateAccountResponse_SuccessResult { + m0 := &CreateAccountResponse_SuccessResult{} + b, x := &b0, m0 + _, _ = b, x + x.Resource = b.Resource + x.IsCreateAccountResult = b.IsCreateAccountResult + return m0 +} + type CreateAccountResponse_ActionRequiredResult struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Resource *Resource `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"` Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` IsCreateAccountResult bool `protobuf:"varint,3,opt,name=is_create_account_result,json=isCreateAccountResult,proto3" json:"is_create_account_result,omitempty"` @@ -2488,11 +4149,6 @@ func (x *CreateAccountResponse_ActionRequiredResult) ProtoReflect() protoreflect return mi.MessageOf(x) } -// Deprecated: Use CreateAccountResponse_ActionRequiredResult.ProtoReflect.Descriptor instead. -func (*CreateAccountResponse_ActionRequiredResult) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_resource_proto_rawDescGZIP(), []int{16, 1} -} - func (x *CreateAccountResponse_ActionRequiredResult) GetResource() *Resource { if x != nil { return x.Resource @@ -2514,8 +4170,213 @@ func (x *CreateAccountResponse_ActionRequiredResult) GetIsCreateAccountResult() return false } +func (x *CreateAccountResponse_ActionRequiredResult) SetResource(v *Resource) { + x.Resource = v +} + +func (x *CreateAccountResponse_ActionRequiredResult) SetMessage(v string) { + x.Message = v +} + +func (x *CreateAccountResponse_ActionRequiredResult) SetIsCreateAccountResult(v bool) { + x.IsCreateAccountResult = v +} + +func (x *CreateAccountResponse_ActionRequiredResult) HasResource() bool { + if x == nil { + return false + } + return x.Resource != nil +} + +func (x *CreateAccountResponse_ActionRequiredResult) ClearResource() { + x.Resource = nil +} + +type CreateAccountResponse_ActionRequiredResult_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Resource *Resource + Message string + IsCreateAccountResult bool +} + +func (b0 CreateAccountResponse_ActionRequiredResult_builder) Build() *CreateAccountResponse_ActionRequiredResult { + m0 := &CreateAccountResponse_ActionRequiredResult{} + b, x := &b0, m0 + _, _ = b, x + x.Resource = b.Resource + x.Message = b.Message + x.IsCreateAccountResult = b.IsCreateAccountResult + return m0 +} + +type CreateAccountResponse_AlreadyExistsResult struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + Resource *Resource `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"` + IsCreateAccountResult bool `protobuf:"varint,2,opt,name=is_create_account_result,json=isCreateAccountResult,proto3" json:"is_create_account_result,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *CreateAccountResponse_AlreadyExistsResult) Reset() { + *x = CreateAccountResponse_AlreadyExistsResult{} + mi := &file_c1_connector_v2_resource_proto_msgTypes[38] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *CreateAccountResponse_AlreadyExistsResult) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateAccountResponse_AlreadyExistsResult) ProtoMessage() {} + +func (x *CreateAccountResponse_AlreadyExistsResult) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_resource_proto_msgTypes[38] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *CreateAccountResponse_AlreadyExistsResult) GetResource() *Resource { + if x != nil { + return x.Resource + } + return nil +} + +func (x *CreateAccountResponse_AlreadyExistsResult) GetIsCreateAccountResult() bool { + if x != nil { + return x.IsCreateAccountResult + } + return false +} + +func (x *CreateAccountResponse_AlreadyExistsResult) SetResource(v *Resource) { + x.Resource = v +} + +func (x *CreateAccountResponse_AlreadyExistsResult) SetIsCreateAccountResult(v bool) { + x.IsCreateAccountResult = v +} + +func (x *CreateAccountResponse_AlreadyExistsResult) HasResource() bool { + if x == nil { + return false + } + return x.Resource != nil +} + +func (x *CreateAccountResponse_AlreadyExistsResult) ClearResource() { + x.Resource = nil +} + +type CreateAccountResponse_AlreadyExistsResult_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Resource *Resource + IsCreateAccountResult bool +} + +func (b0 CreateAccountResponse_AlreadyExistsResult_builder) Build() *CreateAccountResponse_AlreadyExistsResult { + m0 := &CreateAccountResponse_AlreadyExistsResult{} + b, x := &b0, m0 + _, _ = b, x + x.Resource = b.Resource + x.IsCreateAccountResult = b.IsCreateAccountResult + return m0 +} + +type CreateAccountResponse_InProgressResult struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + Resource *Resource `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"` // Optional. + IsCreateAccountResult bool `protobuf:"varint,2,opt,name=is_create_account_result,json=isCreateAccountResult,proto3" json:"is_create_account_result,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *CreateAccountResponse_InProgressResult) Reset() { + *x = CreateAccountResponse_InProgressResult{} + mi := &file_c1_connector_v2_resource_proto_msgTypes[39] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *CreateAccountResponse_InProgressResult) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateAccountResponse_InProgressResult) ProtoMessage() {} + +func (x *CreateAccountResponse_InProgressResult) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_resource_proto_msgTypes[39] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *CreateAccountResponse_InProgressResult) GetResource() *Resource { + if x != nil { + return x.Resource + } + return nil +} + +func (x *CreateAccountResponse_InProgressResult) GetIsCreateAccountResult() bool { + if x != nil { + return x.IsCreateAccountResult + } + return false +} + +func (x *CreateAccountResponse_InProgressResult) SetResource(v *Resource) { + x.Resource = v +} + +func (x *CreateAccountResponse_InProgressResult) SetIsCreateAccountResult(v bool) { + x.IsCreateAccountResult = v +} + +func (x *CreateAccountResponse_InProgressResult) HasResource() bool { + if x == nil { + return false + } + return x.Resource != nil +} + +func (x *CreateAccountResponse_InProgressResult) ClearResource() { + x.Resource = nil +} + +type CreateAccountResponse_InProgressResult_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Resource *Resource + IsCreateAccountResult bool +} + +func (b0 CreateAccountResponse_InProgressResult_builder) Build() *CreateAccountResponse_InProgressResult { + m0 := &CreateAccountResponse_InProgressResult{} + b, x := &b0, m0 + _, _ = b, x + x.Resource = b.Resource + x.IsCreateAccountResult = b.IsCreateAccountResult + return m0 +} + type EncryptionConfig_JWKPublicKeyConfig struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` PubKey []byte `protobuf:"bytes,1,opt,name=pub_key,json=pubKey,proto3" json:"pub_key,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache @@ -2523,7 +4384,7 @@ type EncryptionConfig_JWKPublicKeyConfig struct { func (x *EncryptionConfig_JWKPublicKeyConfig) Reset() { *x = EncryptionConfig_JWKPublicKeyConfig{} - mi := &file_c1_connector_v2_resource_proto_msgTypes[38] + mi := &file_c1_connector_v2_resource_proto_msgTypes[40] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2535,7 +4396,7 @@ func (x *EncryptionConfig_JWKPublicKeyConfig) String() string { func (*EncryptionConfig_JWKPublicKeyConfig) ProtoMessage() {} func (x *EncryptionConfig_JWKPublicKeyConfig) ProtoReflect() protoreflect.Message { - mi := &file_c1_connector_v2_resource_proto_msgTypes[38] + mi := &file_c1_connector_v2_resource_proto_msgTypes[40] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2546,11 +4407,6 @@ func (x *EncryptionConfig_JWKPublicKeyConfig) ProtoReflect() protoreflect.Messag return mi.MessageOf(x) } -// Deprecated: Use EncryptionConfig_JWKPublicKeyConfig.ProtoReflect.Descriptor instead. -func (*EncryptionConfig_JWKPublicKeyConfig) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_resource_proto_rawDescGZIP(), []int{19, 0} -} - func (x *EncryptionConfig_JWKPublicKeyConfig) GetPubKey() []byte { if x != nil { return x.PubKey @@ -2558,553 +4414,256 @@ func (x *EncryptionConfig_JWKPublicKeyConfig) GetPubKey() []byte { return nil } -var File_c1_connector_v2_resource_proto protoreflect.FileDescriptor +func (x *EncryptionConfig_JWKPublicKeyConfig) SetPubKey(v []byte) { + if v == nil { + v = []byte{} + } + x.PubKey = v +} -var file_c1_connector_v2_resource_proto_rawDesc = string([]byte{ - 0x0a, 0x1e, 0x63, 0x31, 0x2f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f, 0x76, - 0x32, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x12, 0x0f, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, - 0x32, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, - 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, - 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x22, 0xb4, 0x03, 0x0a, 0x0c, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x54, 0x79, 0x70, 0x65, 0x12, 0x1a, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x42, 0x0a, 0xfa, 0x42, 0x07, 0x72, 0x05, 0x20, 0x01, 0x28, 0x80, 0x08, 0x52, 0x02, 0x69, 0x64, - 0x12, 0x30, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0d, 0xfa, 0x42, 0x0a, 0x72, 0x08, 0x20, 0x01, 0x28, - 0x80, 0x08, 0xd0, 0x01, 0x01, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e, 0x61, - 0x6d, 0x65, 0x12, 0x4c, 0x0a, 0x06, 0x74, 0x72, 0x61, 0x69, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, - 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, - 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, - 0x65, 0x2e, 0x54, 0x72, 0x61, 0x69, 0x74, 0x42, 0x0f, 0xfa, 0x42, 0x0c, 0x92, 0x01, 0x09, 0x18, - 0x01, 0x22, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x06, 0x74, 0x72, 0x61, 0x69, 0x74, 0x73, - 0x12, 0x36, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, - 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, - 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x2f, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, - 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0d, 0xfa, - 0x42, 0x0a, 0x72, 0x08, 0x20, 0x01, 0x28, 0x80, 0x20, 0xd0, 0x01, 0x01, 0x52, 0x0b, 0x64, 0x65, - 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2d, 0x0a, 0x12, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x64, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x6c, 0x79, 0x18, - 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x64, 0x45, 0x78, - 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x6c, 0x79, 0x22, 0x70, 0x0a, 0x05, 0x54, 0x72, 0x61, 0x69, - 0x74, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x52, 0x41, 0x49, 0x54, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, - 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x52, 0x41, 0x49, - 0x54, 0x5f, 0x55, 0x53, 0x45, 0x52, 0x10, 0x01, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x52, 0x41, 0x49, - 0x54, 0x5f, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x52, 0x41, - 0x49, 0x54, 0x5f, 0x52, 0x4f, 0x4c, 0x45, 0x10, 0x03, 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x52, 0x41, - 0x49, 0x54, 0x5f, 0x41, 0x50, 0x50, 0x10, 0x04, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x52, 0x41, 0x49, - 0x54, 0x5f, 0x53, 0x45, 0x43, 0x52, 0x45, 0x54, 0x10, 0x05, 0x22, 0xf1, 0x01, 0x0a, 0x2c, 0x52, - 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x73, 0x53, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, - 0x79, 0x70, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x31, 0x0a, 0x06, 0x70, - 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x63, 0x31, - 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x27, - 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0d, 0x42, 0x0a, 0xfa, 0x42, 0x07, 0x2a, 0x05, 0x18, 0xfa, 0x01, 0x40, 0x01, 0x52, 0x08, 0x70, - 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x2d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, - 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0e, 0xfa, 0x42, 0x0b, - 0x72, 0x09, 0x20, 0x01, 0x28, 0x80, 0x80, 0x40, 0xd0, 0x01, 0x01, 0x52, 0x09, 0x70, 0x61, 0x67, - 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x36, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, - 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xd2, - 0x01, 0x0a, 0x2d, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x73, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x31, 0x0a, 0x04, 0x6c, 0x69, 0x73, 0x74, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, - 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, - 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x6c, - 0x69, 0x73, 0x74, 0x12, 0x36, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, - 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0e, 0xfa, 0x42, - 0x0b, 0x72, 0x09, 0x20, 0x01, 0x28, 0x80, 0x80, 0x40, 0xd0, 0x01, 0x01, 0x52, 0x0d, 0x6e, 0x65, - 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x36, 0x0a, 0x0b, 0x61, - 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x22, 0x4e, 0x0a, 0x15, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x35, 0x0a, 0x08, - 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, - 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, - 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x22, 0x85, 0x01, 0x0a, 0x16, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x33, - 0x0a, 0x07, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x19, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, - 0x32, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x07, 0x63, 0x72, 0x65, 0x61, - 0x74, 0x65, 0x64, 0x12, 0x36, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0b, - 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa0, 0x01, 0x0a, 0x15, - 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3c, 0x0a, 0x0b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x63, 0x31, 0x2e, - 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x64, 0x52, 0x0a, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x49, 0x64, 0x12, 0x49, 0x0a, 0x12, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x72, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1b, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, - 0x32, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x64, 0x52, 0x10, 0x70, 0x61, - 0x72, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x64, 0x22, 0x50, - 0x0a, 0x16, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x36, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, - 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x41, 0x6e, 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x22, 0xa2, 0x01, 0x0a, 0x17, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x56, 0x32, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3c, 0x0a, 0x0b, - 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1b, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, - 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x64, 0x52, 0x0a, - 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x64, 0x12, 0x49, 0x0a, 0x12, 0x70, 0x61, - 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x69, 0x64, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, - 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x49, 0x64, 0x52, 0x10, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x49, 0x64, 0x22, 0x52, 0x0a, 0x18, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, - 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x56, 0x32, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x36, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0b, 0x61, 0x6e, - 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xfc, 0x01, 0x0a, 0x17, 0x52, 0x6f, - 0x74, 0x61, 0x74, 0x65, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3c, 0x0a, 0x0b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x63, 0x31, 0x2e, - 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x64, 0x52, 0x0a, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x49, 0x64, 0x12, 0x51, 0x0a, 0x12, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, - 0x6c, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x22, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, - 0x32, 0x2e, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x52, 0x11, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x50, 0x0a, 0x12, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x18, 0x03, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, - 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x11, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x22, 0xd7, 0x01, 0x0a, 0x18, 0x52, 0x6f, 0x74, - 0x61, 0x74, 0x65, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x45, 0x0a, 0x0e, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, - 0x65, 0x64, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, - 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, - 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x52, 0x0d, 0x65, - 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x12, 0x3c, 0x0a, 0x0b, - 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1b, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, - 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x64, 0x52, 0x0a, - 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x64, 0x12, 0x36, 0x0a, 0x0b, 0x61, 0x6e, - 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x22, 0x82, 0x02, 0x0a, 0x0b, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x49, 0x6e, - 0x66, 0x6f, 0x12, 0x3a, 0x0a, 0x06, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, - 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x49, 0x6e, 0x66, 0x6f, - 0x2e, 0x45, 0x6d, 0x61, 0x69, 0x6c, 0x52, 0x06, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x14, - 0x0a, 0x05, 0x6c, 0x6f, 0x67, 0x69, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6c, - 0x6f, 0x67, 0x69, 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x6c, 0x6f, 0x67, 0x69, 0x6e, 0x5f, 0x61, 0x6c, - 0x69, 0x61, 0x73, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x6c, 0x6f, 0x67, - 0x69, 0x6e, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x12, 0x31, 0x0a, 0x07, 0x70, 0x72, 0x6f, - 0x66, 0x69, 0x6c, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, - 0x75, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x1a, 0x49, 0x0a, 0x05, - 0x45, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x21, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x60, 0x01, 0x52, - 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x73, 0x5f, 0x70, - 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x69, 0x73, - 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x22, 0xd1, 0x05, 0x0a, 0x11, 0x43, 0x72, 0x65, 0x64, - 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5c, 0x0a, - 0x0f, 0x72, 0x61, 0x6e, 0x64, 0x6f, 0x6d, 0x5f, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, - 0x18, 0x64, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, - 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, - 0x69, 0x61, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x52, 0x61, 0x6e, 0x64, 0x6f, - 0x6d, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x48, 0x00, 0x52, 0x0e, 0x72, 0x61, 0x6e, - 0x64, 0x6f, 0x6d, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x12, 0x50, 0x0a, 0x0b, 0x6e, - 0x6f, 0x5f, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x18, 0x65, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x2d, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, - 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4e, 0x6f, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x48, - 0x00, 0x52, 0x0a, 0x6e, 0x6f, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x12, 0x3a, 0x0a, - 0x03, 0x73, 0x73, 0x6f, 0x18, 0x66, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x63, 0x31, 0x2e, - 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, - 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, - 0x53, 0x4f, 0x48, 0x00, 0x52, 0x03, 0x73, 0x73, 0x6f, 0x12, 0x65, 0x0a, 0x12, 0x65, 0x6e, 0x63, - 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x5f, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x18, - 0x67, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, - 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, - 0x61, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, - 0x74, 0x65, 0x64, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x48, 0x00, 0x52, 0x11, 0x65, - 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, - 0x12, 0x3a, 0x0a, 0x1a, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, - 0x5f, 0x61, 0x74, 0x5f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x6c, 0x6f, 0x67, 0x69, 0x6e, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x16, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, - 0x65, 0x41, 0x74, 0x4e, 0x65, 0x78, 0x74, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x1a, 0x7a, 0x0a, 0x0e, - 0x52, 0x61, 0x6e, 0x64, 0x6f, 0x6d, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x12, 0x21, - 0x0a, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x42, 0x09, - 0xfa, 0x42, 0x06, 0x22, 0x04, 0x18, 0x40, 0x28, 0x08, 0x52, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, - 0x68, 0x12, 0x45, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x72, 0x61, 0x69, 0x6e, 0x74, 0x73, - 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, - 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, - 0x64, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x72, 0x61, 0x69, 0x6e, 0x74, 0x52, 0x0b, 0x63, 0x6f, 0x6e, - 0x73, 0x74, 0x72, 0x61, 0x69, 0x6e, 0x74, 0x73, 0x1a, 0x0c, 0x0a, 0x0a, 0x4e, 0x6f, 0x50, 0x61, - 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x1a, 0x28, 0x0a, 0x03, 0x53, 0x53, 0x4f, 0x12, 0x21, 0x0a, - 0x0c, 0x73, 0x73, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x73, 0x6f, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, - 0x1a, 0x6e, 0x0a, 0x11, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x50, 0x61, 0x73, - 0x73, 0x77, 0x6f, 0x72, 0x64, 0x12, 0x59, 0x0a, 0x13, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, - 0x65, 0x64, 0x5f, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, - 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x44, 0x61, - 0x74, 0x61, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x01, 0x52, 0x12, 0x65, 0x6e, - 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x73, - 0x42, 0x09, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xbe, 0x05, 0x0a, 0x16, - 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x61, 0x0a, 0x0f, 0x72, 0x61, 0x6e, 0x64, 0x6f, 0x6d, - 0x5f, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x18, 0x64, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x36, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, - 0x32, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, - 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x52, 0x61, 0x6e, 0x64, 0x6f, 0x6d, 0x50, - 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x48, 0x00, 0x52, 0x0e, 0x72, 0x61, 0x6e, 0x64, 0x6f, - 0x6d, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x12, 0x55, 0x0a, 0x0b, 0x6e, 0x6f, 0x5f, - 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x18, 0x65, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, - 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, - 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4e, 0x6f, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, - 0x72, 0x64, 0x48, 0x00, 0x52, 0x0a, 0x6e, 0x6f, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, - 0x12, 0x3f, 0x0a, 0x03, 0x73, 0x73, 0x6f, 0x18, 0x66, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, - 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, - 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x53, 0x4f, 0x48, 0x00, 0x52, 0x03, 0x73, 0x73, - 0x6f, 0x12, 0x6a, 0x0a, 0x12, 0x70, 0x6c, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x70, - 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x18, 0x67, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x39, 0x2e, - 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, - 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x50, 0x6c, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x78, 0x74, - 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x48, 0x00, 0x52, 0x11, 0x70, 0x6c, 0x61, 0x69, - 0x6e, 0x74, 0x65, 0x78, 0x74, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x12, 0x3a, 0x0a, - 0x1a, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x5f, 0x61, 0x74, - 0x5f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x6c, 0x6f, 0x67, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x16, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x41, 0x74, - 0x4e, 0x65, 0x78, 0x74, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x1a, 0x7a, 0x0a, 0x0e, 0x52, 0x61, 0x6e, - 0x64, 0x6f, 0x6d, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x12, 0x21, 0x0a, 0x06, 0x6c, - 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x42, 0x09, 0xfa, 0x42, 0x06, - 0x22, 0x04, 0x18, 0x40, 0x28, 0x08, 0x52, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x45, - 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x72, 0x61, 0x69, 0x6e, 0x74, 0x73, 0x18, 0x02, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, - 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x43, 0x6f, - 0x6e, 0x73, 0x74, 0x72, 0x61, 0x69, 0x6e, 0x74, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x72, - 0x61, 0x69, 0x6e, 0x74, 0x73, 0x1a, 0x0c, 0x0a, 0x0a, 0x4e, 0x6f, 0x50, 0x61, 0x73, 0x73, 0x77, - 0x6f, 0x72, 0x64, 0x1a, 0x28, 0x0a, 0x03, 0x53, 0x53, 0x4f, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x73, - 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0b, 0x73, 0x73, 0x6f, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x1a, 0x42, 0x0a, - 0x11, 0x50, 0x6c, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, - 0x72, 0x64, 0x12, 0x2d, 0x0a, 0x12, 0x70, 0x6c, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x5f, - 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, - 0x70, 0x6c, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, - 0x64, 0x42, 0x09, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x4c, 0x0a, 0x12, - 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x72, 0x61, 0x69, - 0x6e, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x72, 0x5f, 0x73, 0x65, 0x74, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x63, 0x68, 0x61, 0x72, 0x53, 0x65, 0x74, 0x12, 0x1b, 0x0a, - 0x09, 0x6d, 0x69, 0x6e, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, - 0x52, 0x08, 0x6d, 0x69, 0x6e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0xfc, 0x01, 0x0a, 0x14, 0x43, - 0x72, 0x65, 0x61, 0x74, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x0c, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x69, - 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x63, 0x31, 0x2e, 0x63, - 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x63, 0x63, 0x6f, - 0x75, 0x6e, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0b, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, - 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x51, 0x0a, 0x12, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, - 0x61, 0x6c, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x22, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, - 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x11, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x50, 0x0a, 0x12, 0x65, 0x6e, 0x63, 0x72, 0x79, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x18, 0x03, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, - 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x11, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x22, 0xfe, 0x04, 0x0a, 0x15, 0x43, 0x72, - 0x65, 0x61, 0x74, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x50, 0x0a, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x64, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, - 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x41, 0x63, 0x63, - 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x75, 0x63, - 0x63, 0x65, 0x73, 0x73, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x48, 0x00, 0x52, 0x07, 0x73, 0x75, - 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x66, 0x0a, 0x0f, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, - 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, 0x65, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3b, - 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, - 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, - 0x75, 0x69, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x48, 0x00, 0x52, 0x0e, 0x61, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x12, 0x45, 0x0a, - 0x0e, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, - 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, - 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, - 0x64, 0x44, 0x61, 0x74, 0x61, 0x52, 0x0d, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, - 0x44, 0x61, 0x74, 0x61, 0x12, 0x36, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, - 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x7f, 0x0a, 0x0d, - 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x35, 0x0a, - 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x19, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, - 0x32, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x12, 0x37, 0x0a, 0x18, 0x69, 0x73, 0x5f, 0x63, 0x72, 0x65, 0x61, 0x74, - 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x69, 0x73, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, - 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x1a, 0xa0, 0x01, - 0x0a, 0x14, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, - 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x35, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, - 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x18, 0x0a, - 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, - 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x18, 0x69, 0x73, 0x5f, 0x63, 0x72, - 0x65, 0x61, 0x74, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x72, 0x65, 0x73, - 0x75, 0x6c, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x69, 0x73, 0x43, 0x72, 0x65, - 0x61, 0x74, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, - 0x42, 0x08, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x22, 0xd6, 0x01, 0x0a, 0x0d, 0x45, - 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x12, 0x1a, 0x0a, 0x08, - 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, - 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0x19, 0x0a, 0x06, 0x6b, 0x65, 0x79, 0x5f, - 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x05, 0x6b, 0x65, - 0x79, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, - 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x63, 0x68, - 0x65, 0x6d, 0x61, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, - 0x61, 0x12, 0x27, 0x0a, 0x0f, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x5f, 0x62, - 0x79, 0x74, 0x65, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0e, 0x65, 0x6e, 0x63, 0x72, - 0x79, 0x70, 0x74, 0x65, 0x64, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x17, 0x0a, 0x07, 0x6b, 0x65, - 0x79, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x6b, 0x65, 0x79, - 0x49, 0x64, 0x73, 0x22, 0x73, 0x0a, 0x0d, 0x50, 0x6c, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x78, 0x74, - 0x44, 0x61, 0x74, 0x61, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, - 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x63, - 0x68, 0x65, 0x6d, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, - 0x6d, 0x61, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x0c, 0x52, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x22, 0xa2, 0x02, 0x0a, 0x10, 0x45, 0x6e, 0x63, - 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x37, 0x0a, - 0x09, 0x70, 0x72, 0x69, 0x6e, 0x63, 0x69, 0x70, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x19, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, - 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x09, 0x70, 0x72, 0x69, - 0x6e, 0x63, 0x69, 0x70, 0x61, 0x6c, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, - 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, - 0x65, 0x72, 0x12, 0x15, 0x0a, 0x06, 0x6b, 0x65, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x05, 0x6b, 0x65, 0x79, 0x49, 0x64, 0x12, 0x69, 0x0a, 0x15, 0x6a, 0x77, 0x6b, - 0x5f, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x18, 0x64, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, - 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4a, 0x57, 0x4b, 0x50, - 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, - 0x52, 0x12, 0x6a, 0x77, 0x6b, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x2d, 0x0a, 0x12, 0x4a, 0x57, 0x4b, 0x50, 0x75, 0x62, 0x6c, 0x69, - 0x63, 0x4b, 0x65, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x17, 0x0a, 0x07, 0x70, 0x75, - 0x62, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x70, 0x75, 0x62, - 0x4b, 0x65, 0x79, 0x42, 0x08, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x8c, 0x01, - 0x0a, 0x0a, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x64, 0x12, 0x2f, 0x0a, 0x0d, - 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x42, 0x0a, 0xfa, 0x42, 0x07, 0x72, 0x05, 0x20, 0x01, 0x28, 0x80, 0x08, 0x52, - 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x26, 0x0a, - 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, - 0x0a, 0xfa, 0x42, 0x07, 0x72, 0x05, 0x20, 0x01, 0x28, 0x80, 0x08, 0x52, 0x08, 0x72, 0x65, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x62, 0x61, 0x74, 0x6f, 0x6e, 0x5f, 0x72, - 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x62, - 0x61, 0x74, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x22, 0xf0, 0x04, 0x0a, - 0x08, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x2b, 0x0a, 0x02, 0x69, 0x64, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, - 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x49, 0x64, 0x52, 0x02, 0x69, 0x64, 0x12, 0x49, 0x0a, 0x12, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, - 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, - 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x64, 0x52, - 0x10, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, - 0x64, 0x12, 0x30, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, - 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0d, 0xfa, 0x42, 0x0a, 0x72, 0x08, 0x20, 0x01, - 0x28, 0x80, 0x08, 0xd0, 0x01, 0x01, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e, - 0x61, 0x6d, 0x65, 0x12, 0x36, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0b, - 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x2f, 0x0a, 0x0b, 0x64, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, - 0x42, 0x0d, 0xfa, 0x42, 0x0a, 0x72, 0x08, 0x20, 0x01, 0x28, 0x80, 0x10, 0xd0, 0x01, 0x01, 0x52, - 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x25, 0x0a, 0x0e, - 0x62, 0x61, 0x74, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x62, 0x61, 0x74, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x12, 0x3c, 0x0a, 0x0b, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, - 0x69, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, - 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x72, - 0x6e, 0x61, 0x6c, 0x49, 0x64, 0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x49, - 0x64, 0x12, 0x51, 0x0a, 0x0f, 0x63, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x28, 0x2e, 0x63, 0x31, 0x2e, - 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x52, 0x0e, 0x63, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x22, 0x98, 0x01, 0x0a, 0x0e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x1f, 0x0a, 0x1b, 0x43, 0x52, 0x45, 0x41, 0x54, - 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, - 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x2c, 0x0a, 0x28, 0x43, 0x52, 0x45, 0x41, - 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x5f, 0x43, 0x4f, 0x4e, 0x4e, - 0x45, 0x43, 0x54, 0x4f, 0x52, 0x5f, 0x4c, 0x49, 0x53, 0x54, 0x5f, 0x52, 0x45, 0x53, 0x4f, 0x55, - 0x52, 0x43, 0x45, 0x53, 0x10, 0x01, 0x12, 0x37, 0x0a, 0x33, 0x43, 0x52, 0x45, 0x41, 0x54, 0x49, - 0x4f, 0x4e, 0x5f, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x5f, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, - 0x54, 0x4f, 0x52, 0x5f, 0x4c, 0x49, 0x53, 0x54, 0x5f, 0x47, 0x52, 0x41, 0x4e, 0x54, 0x53, 0x5f, - 0x50, 0x52, 0x49, 0x4e, 0x43, 0x49, 0x50, 0x41, 0x4c, 0x5f, 0x4a, 0x49, 0x54, 0x10, 0x02, 0x22, - 0xc1, 0x02, 0x0a, 0x24, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x34, 0x0a, 0x10, 0x72, 0x65, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x42, 0x0a, 0xfa, 0x42, 0x07, 0x72, 0x05, 0x20, 0x01, 0x28, 0x80, 0x08, 0x52, 0x0e, - 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x49, 0x64, 0x12, 0x53, - 0x0a, 0x12, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x63, 0x31, 0x2e, - 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x64, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, - 0x00, 0x52, 0x10, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x49, 0x64, 0x12, 0x27, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x42, 0x0a, 0xfa, 0x42, 0x07, 0x2a, 0x05, 0x18, 0xfa, 0x01, - 0x40, 0x01, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x2d, 0x0a, 0x0a, - 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, - 0x42, 0x0e, 0xfa, 0x42, 0x0b, 0x72, 0x09, 0x20, 0x01, 0x28, 0x80, 0x80, 0x40, 0xd0, 0x01, 0x01, - 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x36, 0x0a, 0x0b, 0x61, - 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x22, 0xc6, 0x01, 0x0a, 0x25, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2d, 0x0a, - 0x04, 0x6c, 0x69, 0x73, 0x74, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x63, 0x31, - 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x04, 0x6c, 0x69, 0x73, 0x74, 0x12, 0x36, 0x0a, 0x0f, - 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0e, 0xfa, 0x42, 0x0b, 0x72, 0x09, 0x20, 0x01, 0x28, 0x80, - 0x80, 0x40, 0xd0, 0x01, 0x01, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, - 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x36, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, - 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xf4, 0x01, 0x0a, - 0x27, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x47, 0x65, 0x74, 0x74, 0x65, 0x72, 0x53, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x47, 0x65, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3c, 0x0a, 0x0b, 0x72, 0x65, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, - 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, - 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x64, 0x52, 0x0a, 0x72, 0x65, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x49, 0x64, 0x12, 0x53, 0x0a, 0x12, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, - 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, - 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x64, 0x42, - 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x00, 0x52, 0x10, 0x70, 0x61, 0x72, 0x65, 0x6e, - 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x64, 0x12, 0x36, 0x0a, 0x0b, 0x61, - 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x22, 0x99, 0x01, 0x0a, 0x28, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x47, 0x65, 0x74, 0x74, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x47, 0x65, 0x74, - 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x35, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, - 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x08, 0x72, - 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x36, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, - 0x6e, 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, - 0x52, 0x0a, 0x0a, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x49, 0x64, 0x12, 0x0e, 0x0a, - 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x12, 0x0a, - 0x04, 0x6c, 0x69, 0x6e, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6c, 0x69, 0x6e, - 0x6b, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x32, 0xab, 0x01, 0x0a, 0x14, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x54, 0x79, 0x70, 0x65, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x92, 0x01, 0x0a, - 0x11, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, - 0x65, 0x73, 0x12, 0x3d, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, - 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, - 0x65, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x3e, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, - 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, - 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x32, 0x92, 0x01, 0x0a, 0x10, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x53, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x7e, 0x0a, 0x0d, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x35, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, - 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x36, - 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, - 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0x9c, 0x01, 0x0a, 0x15, 0x52, 0x65, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x47, 0x65, 0x74, 0x74, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x12, 0x82, 0x01, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x12, 0x38, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, - 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x47, 0x65, 0x74, 0x74, 0x65, - 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x47, 0x65, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x39, 0x2e, 0x63, 0x31, 0x2e, - 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x47, 0x65, 0x74, 0x74, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x47, 0x65, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0xde, 0x01, 0x0a, 0x16, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x12, 0x61, 0x0a, 0x0e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x12, 0x26, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, - 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x63, 0x31, 0x2e, - 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, - 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x61, 0x0a, 0x0e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x26, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, - 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, - 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, - 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0x81, 0x01, 0x0a, 0x16, 0x52, 0x65, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x12, 0x67, 0x0a, 0x10, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x56, 0x32, 0x12, 0x28, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, - 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x56, 0x32, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x29, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, - 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x56, 0x32, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0x83, 0x01, 0x0a, 0x18, 0x43, - 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x67, 0x0a, 0x10, 0x52, 0x6f, 0x74, 0x61, 0x74, - 0x65, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x12, 0x28, 0x2e, 0x63, 0x31, - 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x6f, - 0x74, 0x61, 0x74, 0x65, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, - 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x6f, 0x74, 0x61, 0x74, 0x65, 0x43, 0x72, - 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x32, 0x77, 0x0a, 0x15, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x4d, 0x61, 0x6e, 0x61, 0x67, - 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x5e, 0x0a, 0x0d, 0x43, 0x72, 0x65, - 0x61, 0x74, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x25, 0x2e, 0x63, 0x31, 0x2e, - 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, - 0x61, 0x74, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x26, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, - 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, - 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x36, 0x5a, 0x34, 0x67, 0x69, 0x74, - 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x64, 0x75, 0x63, 0x74, 0x6f, - 0x72, 0x6f, 0x6e, 0x65, 0x2f, 0x62, 0x61, 0x74, 0x6f, 0x6e, 0x2d, 0x73, 0x64, 0x6b, 0x2f, 0x70, - 0x62, 0x2f, 0x63, 0x31, 0x2f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f, 0x76, - 0x32, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -}) +type EncryptionConfig_JWKPublicKeyConfig_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. -var ( - file_c1_connector_v2_resource_proto_rawDescOnce sync.Once - file_c1_connector_v2_resource_proto_rawDescData []byte -) + PubKey []byte +} -func file_c1_connector_v2_resource_proto_rawDescGZIP() []byte { - file_c1_connector_v2_resource_proto_rawDescOnce.Do(func() { - file_c1_connector_v2_resource_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_c1_connector_v2_resource_proto_rawDesc), len(file_c1_connector_v2_resource_proto_rawDesc))) - }) - return file_c1_connector_v2_resource_proto_rawDescData +func (b0 EncryptionConfig_JWKPublicKeyConfig_builder) Build() *EncryptionConfig_JWKPublicKeyConfig { + m0 := &EncryptionConfig_JWKPublicKeyConfig{} + b, x := &b0, m0 + _, _ = b, x + x.PubKey = b.PubKey + return m0 } +var File_c1_connector_v2_resource_proto protoreflect.FileDescriptor + +const file_c1_connector_v2_resource_proto_rawDesc = "" + + "\n" + + "\x1ec1/connector/v2/resource.proto\x12\x0fc1.connector.v2\x1a\x19google/protobuf/any.proto\x1a\x1cgoogle/protobuf/struct.proto\x1a\x17validate/validate.proto\"\xd1\x03\n" + + "\fResourceType\x12\x1a\n" + + "\x02id\x18\x01 \x01(\tB\n" + + "\xfaB\ar\x05 \x01(\x80\bR\x02id\x120\n" + + "\fdisplay_name\x18\x02 \x01(\tB\r\xfaB\n" + + "r\b \x01(\x80\b\xd0\x01\x01R\vdisplayName\x12L\n" + + "\x06traits\x18\x03 \x03(\x0e2#.c1.connector.v2.ResourceType.TraitB\x0f\xfaB\f\x92\x01\t\x18\x01\"\x05\x82\x01\x02\x10\x01R\x06traits\x126\n" + + "\vannotations\x18\x04 \x03(\v2\x14.google.protobuf.AnyR\vannotations\x12/\n" + + "\vdescription\x18\x05 \x01(\tB\r\xfaB\n" + + "r\b \x01(\x80 \xd0\x01\x01R\vdescription\x12-\n" + + "\x12sourced_externally\x18\x06 \x01(\bR\x11sourcedExternally\"\x8c\x01\n" + + "\x05Trait\x12\x15\n" + + "\x11TRAIT_UNSPECIFIED\x10\x00\x12\x0e\n" + + "\n" + + "TRAIT_USER\x10\x01\x12\x0f\n" + + "\vTRAIT_GROUP\x10\x02\x12\x0e\n" + + "\n" + + "TRAIT_ROLE\x10\x03\x12\r\n" + + "\tTRAIT_APP\x10\x04\x12\x10\n" + + "\fTRAIT_SECRET\x10\x05\x12\x1a\n" + + "\x16TRAIT_SECURITY_INSIGHT\x10\x06\"\xa6\x02\n" + + ",ResourceTypesServiceListResourceTypesRequest\x121\n" + + "\x06parent\x18\x01 \x01(\v2\x19.c1.connector.v2.ResourceR\x06parent\x12'\n" + + "\tpage_size\x18\x02 \x01(\rB\n" + + "\xfaB\a*\x05\x18\xfa\x01@\x01R\bpageSize\x12-\n" + + "\n" + + "page_token\x18\x03 \x01(\tB\x0e\xfaB\vr\t \x01(\x80\x80@\xd0\x01\x01R\tpageToken\x126\n" + + "\vannotations\x18\x04 \x03(\v2\x14.google.protobuf.AnyR\vannotations\x123\n" + + "\x0eactive_sync_id\x18\x05 \x01(\tB\r\xfaB\n" + + "r\b \x01(\x80\b\xd0\x01\x01R\factiveSyncId\"\xd2\x01\n" + + "-ResourceTypesServiceListResourceTypesResponse\x121\n" + + "\x04list\x18\x01 \x03(\v2\x1d.c1.connector.v2.ResourceTypeR\x04list\x126\n" + + "\x0fnext_page_token\x18\x02 \x01(\tB\x0e\xfaB\vr\t \x01(\x80\x80@\xd0\x01\x01R\rnextPageToken\x126\n" + + "\vannotations\x18\x03 \x03(\v2\x14.google.protobuf.AnyR\vannotations\"N\n" + + "\x15CreateResourceRequest\x125\n" + + "\bresource\x18\x01 \x01(\v2\x19.c1.connector.v2.ResourceR\bresource\"\x85\x01\n" + + "\x16CreateResourceResponse\x123\n" + + "\acreated\x18\x01 \x01(\v2\x19.c1.connector.v2.ResourceR\acreated\x126\n" + + "\vannotations\x18\x02 \x03(\v2\x14.google.protobuf.AnyR\vannotations\"\xa0\x01\n" + + "\x15DeleteResourceRequest\x12<\n" + + "\vresource_id\x18\x01 \x01(\v2\x1b.c1.connector.v2.ResourceIdR\n" + + "resourceId\x12I\n" + + "\x12parent_resource_id\x18\x02 \x01(\v2\x1b.c1.connector.v2.ResourceIdR\x10parentResourceId\"P\n" + + "\x16DeleteResourceResponse\x126\n" + + "\vannotations\x18\x01 \x03(\v2\x14.google.protobuf.AnyR\vannotations\"\xa2\x01\n" + + "\x17DeleteResourceV2Request\x12<\n" + + "\vresource_id\x18\x01 \x01(\v2\x1b.c1.connector.v2.ResourceIdR\n" + + "resourceId\x12I\n" + + "\x12parent_resource_id\x18\x02 \x01(\v2\x1b.c1.connector.v2.ResourceIdR\x10parentResourceId\"R\n" + + "\x18DeleteResourceV2Response\x126\n" + + "\vannotations\x18\x01 \x03(\v2\x14.google.protobuf.AnyR\vannotations\"\xfc\x01\n" + + "\x17RotateCredentialRequest\x12<\n" + + "\vresource_id\x18\x01 \x01(\v2\x1b.c1.connector.v2.ResourceIdR\n" + + "resourceId\x12Q\n" + + "\x12credential_options\x18\x02 \x01(\v2\".c1.connector.v2.CredentialOptionsR\x11credentialOptions\x12P\n" + + "\x12encryption_configs\x18\x03 \x03(\v2!.c1.connector.v2.EncryptionConfigR\x11encryptionConfigs\"\xd7\x01\n" + + "\x18RotateCredentialResponse\x12E\n" + + "\x0eencrypted_data\x18\x01 \x03(\v2\x1e.c1.connector.v2.EncryptedDataR\rencryptedData\x12<\n" + + "\vresource_id\x18\x02 \x01(\v2\x1b.c1.connector.v2.ResourceIdR\n" + + "resourceId\x126\n" + + "\vannotations\x18\x03 \x03(\v2\x14.google.protobuf.AnyR\vannotations\"\x82\x02\n" + + "\vAccountInfo\x12:\n" + + "\x06emails\x18\x01 \x03(\v2\".c1.connector.v2.AccountInfo.EmailR\x06emails\x12\x14\n" + + "\x05login\x18\x02 \x01(\tR\x05login\x12#\n" + + "\rlogin_aliases\x18\x03 \x03(\tR\floginAliases\x121\n" + + "\aprofile\x18\x04 \x01(\v2\x17.google.protobuf.StructR\aprofile\x1aI\n" + + "\x05Email\x12!\n" + + "\aaddress\x18\x01 \x01(\tB\a\xfaB\x04r\x02`\x01R\aaddress\x12\x1d\n" + + "\n" + + "is_primary\x18\x02 \x01(\bR\tisPrimary\"\xd1\x05\n" + + "\x11CredentialOptions\x12\\\n" + + "\x0frandom_password\x18d \x01(\v21.c1.connector.v2.CredentialOptions.RandomPasswordH\x00R\x0erandomPassword\x12P\n" + + "\vno_password\x18e \x01(\v2-.c1.connector.v2.CredentialOptions.NoPasswordH\x00R\n" + + "noPassword\x12:\n" + + "\x03sso\x18f \x01(\v2&.c1.connector.v2.CredentialOptions.SSOH\x00R\x03sso\x12e\n" + + "\x12encrypted_password\x18g \x01(\v24.c1.connector.v2.CredentialOptions.EncryptedPasswordH\x00R\x11encryptedPassword\x12:\n" + + "\x1aforce_change_at_next_login\x18\x01 \x01(\bR\x16forceChangeAtNextLogin\x1az\n" + + "\x0eRandomPassword\x12!\n" + + "\x06length\x18\x01 \x01(\x03B\t\xfaB\x06\"\x04\x18@(\bR\x06length\x12E\n" + + "\vconstraints\x18\x02 \x03(\v2#.c1.connector.v2.PasswordConstraintR\vconstraints\x1a\f\n" + + "\n" + + "NoPassword\x1a(\n" + + "\x03SSO\x12!\n" + + "\fsso_provider\x18\x01 \x01(\tR\vssoProvider\x1an\n" + + "\x11EncryptedPassword\x12Y\n" + + "\x13encrypted_passwords\x18\x01 \x03(\v2\x1e.c1.connector.v2.EncryptedDataB\b\xfaB\x05\x92\x01\x02\b\x01R\x12encryptedPasswordsB\t\n" + + "\aoptions\"\xbe\x05\n" + + "\x16LocalCredentialOptions\x12a\n" + + "\x0frandom_password\x18d \x01(\v26.c1.connector.v2.LocalCredentialOptions.RandomPasswordH\x00R\x0erandomPassword\x12U\n" + + "\vno_password\x18e \x01(\v22.c1.connector.v2.LocalCredentialOptions.NoPasswordH\x00R\n" + + "noPassword\x12?\n" + + "\x03sso\x18f \x01(\v2+.c1.connector.v2.LocalCredentialOptions.SSOH\x00R\x03sso\x12j\n" + + "\x12plaintext_password\x18g \x01(\v29.c1.connector.v2.LocalCredentialOptions.PlaintextPasswordH\x00R\x11plaintextPassword\x12:\n" + + "\x1aforce_change_at_next_login\x18\x01 \x01(\bR\x16forceChangeAtNextLogin\x1az\n" + + "\x0eRandomPassword\x12!\n" + + "\x06length\x18\x01 \x01(\x03B\t\xfaB\x06\"\x04\x18@(\bR\x06length\x12E\n" + + "\vconstraints\x18\x02 \x03(\v2#.c1.connector.v2.PasswordConstraintR\vconstraints\x1a\f\n" + + "\n" + + "NoPassword\x1a(\n" + + "\x03SSO\x12!\n" + + "\fsso_provider\x18\x01 \x01(\tR\vssoProvider\x1aB\n" + + "\x11PlaintextPassword\x12-\n" + + "\x12plaintext_password\x18\x01 \x01(\tR\x11plaintextPasswordB\t\n" + + "\aoptions\"L\n" + + "\x12PasswordConstraint\x12\x19\n" + + "\bchar_set\x18\x01 \x01(\tR\acharSet\x12\x1b\n" + + "\tmin_count\x18\x02 \x01(\rR\bminCount\"\xfc\x01\n" + + "\x14CreateAccountRequest\x12?\n" + + "\faccount_info\x18\x01 \x01(\v2\x1c.c1.connector.v2.AccountInfoR\vaccountInfo\x12Q\n" + + "\x12credential_options\x18\x02 \x01(\v2\".c1.connector.v2.CredentialOptionsR\x11credentialOptions\x12P\n" + + "\x12encryption_configs\x18\x03 \x03(\v2!.c1.connector.v2.EncryptionConfigR\x11encryptionConfigs\"\xcc\b\n" + + "\x15CreateAccountResponse\x12P\n" + + "\asuccess\x18d \x01(\v24.c1.connector.v2.CreateAccountResponse.SuccessResultH\x00R\asuccess\x12f\n" + + "\x0faction_required\x18e \x01(\v2;.c1.connector.v2.CreateAccountResponse.ActionRequiredResultH\x00R\x0eactionRequired\x12c\n" + + "\x0ealready_exists\x18f \x01(\v2:.c1.connector.v2.CreateAccountResponse.AlreadyExistsResultH\x00R\ralreadyExists\x12Z\n" + + "\vin_progress\x18g \x01(\v27.c1.connector.v2.CreateAccountResponse.InProgressResultH\x00R\n" + + "inProgress\x12E\n" + + "\x0eencrypted_data\x18\x02 \x03(\v2\x1e.c1.connector.v2.EncryptedDataR\rencryptedData\x126\n" + + "\vannotations\x18\x03 \x03(\v2\x14.google.protobuf.AnyR\vannotations\x1a\x7f\n" + + "\rSuccessResult\x125\n" + + "\bresource\x18\x01 \x01(\v2\x19.c1.connector.v2.ResourceR\bresource\x127\n" + + "\x18is_create_account_result\x18\x02 \x01(\bR\x15isCreateAccountResult\x1a\xa0\x01\n" + + "\x14ActionRequiredResult\x125\n" + + "\bresource\x18\x01 \x01(\v2\x19.c1.connector.v2.ResourceR\bresource\x12\x18\n" + + "\amessage\x18\x02 \x01(\tR\amessage\x127\n" + + "\x18is_create_account_result\x18\x03 \x01(\bR\x15isCreateAccountResult\x1a\x85\x01\n" + + "\x13AlreadyExistsResult\x125\n" + + "\bresource\x18\x01 \x01(\v2\x19.c1.connector.v2.ResourceR\bresource\x127\n" + + "\x18is_create_account_result\x18\x02 \x01(\bR\x15isCreateAccountResult\x1a\x82\x01\n" + + "\x10InProgressResult\x125\n" + + "\bresource\x18\x01 \x01(\v2\x19.c1.connector.v2.ResourceR\bresource\x127\n" + + "\x18is_create_account_result\x18\x02 \x01(\bR\x15isCreateAccountResultB\b\n" + + "\x06result\"\xd6\x01\n" + + "\rEncryptedData\x12\x1a\n" + + "\bprovider\x18\x01 \x01(\tR\bprovider\x12\x19\n" + + "\x06key_id\x18\x02 \x01(\tB\x02\x18\x01R\x05keyId\x12\x12\n" + + "\x04name\x18\x03 \x01(\tR\x04name\x12 \n" + + "\vdescription\x18\x04 \x01(\tR\vdescription\x12\x16\n" + + "\x06schema\x18\x05 \x01(\tR\x06schema\x12'\n" + + "\x0fencrypted_bytes\x18\x06 \x01(\fR\x0eencryptedBytes\x12\x17\n" + + "\akey_ids\x18\a \x03(\tR\x06keyIds\"s\n" + + "\rPlaintextData\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\x12 \n" + + "\vdescription\x18\x02 \x01(\tR\vdescription\x12\x16\n" + + "\x06schema\x18\x03 \x01(\tR\x06schema\x12\x14\n" + + "\x05bytes\x18\x04 \x01(\fR\x05bytes\"\xa2\x02\n" + + "\x10EncryptionConfig\x127\n" + + "\tprincipal\x18\x01 \x01(\v2\x19.c1.connector.v2.ResourceR\tprincipal\x12\x1a\n" + + "\bprovider\x18\x02 \x01(\tR\bprovider\x12\x15\n" + + "\x06key_id\x18\x03 \x01(\tR\x05keyId\x12i\n" + + "\x15jwk_public_key_config\x18d \x01(\v24.c1.connector.v2.EncryptionConfig.JWKPublicKeyConfigH\x00R\x12jwkPublicKeyConfig\x1a-\n" + + "\x12JWKPublicKeyConfig\x12\x17\n" + + "\apub_key\x18\x01 \x01(\fR\x06pubKeyB\b\n" + + "\x06config\"\x8c\x01\n" + + "\n" + + "ResourceId\x12/\n" + + "\rresource_type\x18\x01 \x01(\tB\n" + + "\xfaB\ar\x05 \x01(\x80\bR\fresourceType\x12&\n" + + "\bresource\x18\x02 \x01(\tB\n" + + "\xfaB\ar\x05 \x01(\x80\bR\bresource\x12%\n" + + "\x0ebaton_resource\x18\x03 \x01(\bR\rbatonResource\"\xf0\x04\n" + + "\bResource\x12+\n" + + "\x02id\x18\x01 \x01(\v2\x1b.c1.connector.v2.ResourceIdR\x02id\x12I\n" + + "\x12parent_resource_id\x18\x02 \x01(\v2\x1b.c1.connector.v2.ResourceIdR\x10parentResourceId\x120\n" + + "\fdisplay_name\x18\x03 \x01(\tB\r\xfaB\n" + + "r\b \x01(\x80\b\xd0\x01\x01R\vdisplayName\x126\n" + + "\vannotations\x18\x04 \x03(\v2\x14.google.protobuf.AnyR\vannotations\x12/\n" + + "\vdescription\x18\x05 \x01(\tB\r\xfaB\n" + + "r\b \x01(\x80\x10\xd0\x01\x01R\vdescription\x12%\n" + + "\x0ebaton_resource\x18\x06 \x01(\bR\rbatonResource\x12<\n" + + "\vexternal_id\x18\a \x01(\v2\x1b.c1.connector.v2.ExternalIdR\n" + + "externalId\x12Q\n" + + "\x0fcreation_source\x18\b \x01(\x0e2(.c1.connector.v2.Resource.CreationSourceR\x0ecreationSource\"\x98\x01\n" + + "\x0eCreationSource\x12\x1f\n" + + "\x1bCREATION_SOURCE_UNSPECIFIED\x10\x00\x12,\n" + + "(CREATION_SOURCE_CONNECTOR_LIST_RESOURCES\x10\x01\x127\n" + + "3CREATION_SOURCE_CONNECTOR_LIST_GRANTS_PRINCIPAL_JIT\x10\x02\"\xf6\x02\n" + + "$ResourcesServiceListResourcesRequest\x124\n" + + "\x10resource_type_id\x18\x01 \x01(\tB\n" + + "\xfaB\ar\x05 \x01(\x80\bR\x0eresourceTypeId\x12S\n" + + "\x12parent_resource_id\x18\x02 \x01(\v2\x1b.c1.connector.v2.ResourceIdB\b\xfaB\x05\x8a\x01\x02\x10\x00R\x10parentResourceId\x12'\n" + + "\tpage_size\x18\x03 \x01(\rB\n" + + "\xfaB\a*\x05\x18\xfa\x01@\x01R\bpageSize\x12-\n" + + "\n" + + "page_token\x18\x04 \x01(\tB\x0e\xfaB\vr\t \x01(\x80\x80@\xd0\x01\x01R\tpageToken\x126\n" + + "\vannotations\x18\x05 \x03(\v2\x14.google.protobuf.AnyR\vannotations\x123\n" + + "\x0eactive_sync_id\x18\x06 \x01(\tB\r\xfaB\n" + + "r\b \x01(\x80\b\xd0\x01\x01R\factiveSyncId\"\xc6\x01\n" + + "%ResourcesServiceListResourcesResponse\x12-\n" + + "\x04list\x18\x01 \x03(\v2\x19.c1.connector.v2.ResourceR\x04list\x126\n" + + "\x0fnext_page_token\x18\x02 \x01(\tB\x0e\xfaB\vr\t \x01(\x80\x80@\xd0\x01\x01R\rnextPageToken\x126\n" + + "\vannotations\x18\x03 \x03(\v2\x14.google.protobuf.AnyR\vannotations\"\xa9\x02\n" + + "'ResourceGetterServiceGetResourceRequest\x12<\n" + + "\vresource_id\x18\x01 \x01(\v2\x1b.c1.connector.v2.ResourceIdR\n" + + "resourceId\x12S\n" + + "\x12parent_resource_id\x18\x02 \x01(\v2\x1b.c1.connector.v2.ResourceIdB\b\xfaB\x05\x8a\x01\x02\x10\x00R\x10parentResourceId\x126\n" + + "\vannotations\x18\x03 \x03(\v2\x14.google.protobuf.AnyR\vannotations\x123\n" + + "\x0eactive_sync_id\x18\x04 \x01(\tB\r\xfaB\n" + + "r\b \x01(\x80\b\xd0\x01\x01R\factiveSyncId\"\x99\x01\n" + + "(ResourceGetterServiceGetResourceResponse\x125\n" + + "\bresource\x18\x01 \x01(\v2\x19.c1.connector.v2.ResourceR\bresource\x126\n" + + "\vannotations\x18\x02 \x03(\v2\x14.google.protobuf.AnyR\vannotations\"R\n" + + "\n" + + "ExternalId\x12\x0e\n" + + "\x02id\x18\x01 \x01(\tR\x02id\x12\x12\n" + + "\x04link\x18\x02 \x01(\tR\x04link\x12 \n" + + "\vdescription\x18\x03 \x01(\tR\vdescription2\xab\x01\n" + + "\x14ResourceTypesService\x12\x92\x01\n" + + "\x11ListResourceTypes\x12=.c1.connector.v2.ResourceTypesServiceListResourceTypesRequest\x1a>.c1.connector.v2.ResourceTypesServiceListResourceTypesResponse2\x92\x01\n" + + "\x10ResourcesService\x12~\n" + + "\rListResources\x125.c1.connector.v2.ResourcesServiceListResourcesRequest\x1a6.c1.connector.v2.ResourcesServiceListResourcesResponse2\x9c\x01\n" + + "\x15ResourceGetterService\x12\x82\x01\n" + + "\vGetResource\x128.c1.connector.v2.ResourceGetterServiceGetResourceRequest\x1a9.c1.connector.v2.ResourceGetterServiceGetResourceResponse2\xde\x01\n" + + "\x16ResourceManagerService\x12a\n" + + "\x0eCreateResource\x12&.c1.connector.v2.CreateResourceRequest\x1a'.c1.connector.v2.CreateResourceResponse\x12a\n" + + "\x0eDeleteResource\x12&.c1.connector.v2.DeleteResourceRequest\x1a'.c1.connector.v2.DeleteResourceResponse2\x81\x01\n" + + "\x16ResourceDeleterService\x12g\n" + + "\x10DeleteResourceV2\x12(.c1.connector.v2.DeleteResourceV2Request\x1a).c1.connector.v2.DeleteResourceV2Response2\x83\x01\n" + + "\x18CredentialManagerService\x12g\n" + + "\x10RotateCredential\x12(.c1.connector.v2.RotateCredentialRequest\x1a).c1.connector.v2.RotateCredentialResponse2w\n" + + "\x15AccountManagerService\x12^\n" + + "\rCreateAccount\x12%.c1.connector.v2.CreateAccountRequest\x1a&.c1.connector.v2.CreateAccountResponseB6Z4github.com/conductorone/baton-sdk/pb/c1/connector/v2b\x06proto3" + var file_c1_connector_v2_resource_proto_enumTypes = make([]protoimpl.EnumInfo, 2) -var file_c1_connector_v2_resource_proto_msgTypes = make([]protoimpl.MessageInfo, 39) +var file_c1_connector_v2_resource_proto_msgTypes = make([]protoimpl.MessageInfo, 41) var file_c1_connector_v2_resource_proto_goTypes = []any{ (ResourceType_Trait)(0), // 0: c1.connector.v2.ResourceType.Trait (Resource_CreationSource)(0), // 1: c1.connector.v2.Resource.CreationSource @@ -3146,34 +4705,36 @@ var file_c1_connector_v2_resource_proto_goTypes = []any{ (*LocalCredentialOptions_PlaintextPassword)(nil), // 37: c1.connector.v2.LocalCredentialOptions.PlaintextPassword (*CreateAccountResponse_SuccessResult)(nil), // 38: c1.connector.v2.CreateAccountResponse.SuccessResult (*CreateAccountResponse_ActionRequiredResult)(nil), // 39: c1.connector.v2.CreateAccountResponse.ActionRequiredResult - (*EncryptionConfig_JWKPublicKeyConfig)(nil), // 40: c1.connector.v2.EncryptionConfig.JWKPublicKeyConfig - (*anypb.Any)(nil), // 41: google.protobuf.Any - (*structpb.Struct)(nil), // 42: google.protobuf.Struct + (*CreateAccountResponse_AlreadyExistsResult)(nil), // 40: c1.connector.v2.CreateAccountResponse.AlreadyExistsResult + (*CreateAccountResponse_InProgressResult)(nil), // 41: c1.connector.v2.CreateAccountResponse.InProgressResult + (*EncryptionConfig_JWKPublicKeyConfig)(nil), // 42: c1.connector.v2.EncryptionConfig.JWKPublicKeyConfig + (*anypb.Any)(nil), // 43: google.protobuf.Any + (*structpb.Struct)(nil), // 44: google.protobuf.Struct } var file_c1_connector_v2_resource_proto_depIdxs = []int32{ 0, // 0: c1.connector.v2.ResourceType.traits:type_name -> c1.connector.v2.ResourceType.Trait - 41, // 1: c1.connector.v2.ResourceType.annotations:type_name -> google.protobuf.Any + 43, // 1: c1.connector.v2.ResourceType.annotations:type_name -> google.protobuf.Any 23, // 2: c1.connector.v2.ResourceTypesServiceListResourceTypesRequest.parent:type_name -> c1.connector.v2.Resource - 41, // 3: c1.connector.v2.ResourceTypesServiceListResourceTypesRequest.annotations:type_name -> google.protobuf.Any + 43, // 3: c1.connector.v2.ResourceTypesServiceListResourceTypesRequest.annotations:type_name -> google.protobuf.Any 2, // 4: c1.connector.v2.ResourceTypesServiceListResourceTypesResponse.list:type_name -> c1.connector.v2.ResourceType - 41, // 5: c1.connector.v2.ResourceTypesServiceListResourceTypesResponse.annotations:type_name -> google.protobuf.Any + 43, // 5: c1.connector.v2.ResourceTypesServiceListResourceTypesResponse.annotations:type_name -> google.protobuf.Any 23, // 6: c1.connector.v2.CreateResourceRequest.resource:type_name -> c1.connector.v2.Resource 23, // 7: c1.connector.v2.CreateResourceResponse.created:type_name -> c1.connector.v2.Resource - 41, // 8: c1.connector.v2.CreateResourceResponse.annotations:type_name -> google.protobuf.Any + 43, // 8: c1.connector.v2.CreateResourceResponse.annotations:type_name -> google.protobuf.Any 22, // 9: c1.connector.v2.DeleteResourceRequest.resource_id:type_name -> c1.connector.v2.ResourceId 22, // 10: c1.connector.v2.DeleteResourceRequest.parent_resource_id:type_name -> c1.connector.v2.ResourceId - 41, // 11: c1.connector.v2.DeleteResourceResponse.annotations:type_name -> google.protobuf.Any + 43, // 11: c1.connector.v2.DeleteResourceResponse.annotations:type_name -> google.protobuf.Any 22, // 12: c1.connector.v2.DeleteResourceV2Request.resource_id:type_name -> c1.connector.v2.ResourceId 22, // 13: c1.connector.v2.DeleteResourceV2Request.parent_resource_id:type_name -> c1.connector.v2.ResourceId - 41, // 14: c1.connector.v2.DeleteResourceV2Response.annotations:type_name -> google.protobuf.Any + 43, // 14: c1.connector.v2.DeleteResourceV2Response.annotations:type_name -> google.protobuf.Any 22, // 15: c1.connector.v2.RotateCredentialRequest.resource_id:type_name -> c1.connector.v2.ResourceId 14, // 16: c1.connector.v2.RotateCredentialRequest.credential_options:type_name -> c1.connector.v2.CredentialOptions 21, // 17: c1.connector.v2.RotateCredentialRequest.encryption_configs:type_name -> c1.connector.v2.EncryptionConfig 19, // 18: c1.connector.v2.RotateCredentialResponse.encrypted_data:type_name -> c1.connector.v2.EncryptedData 22, // 19: c1.connector.v2.RotateCredentialResponse.resource_id:type_name -> c1.connector.v2.ResourceId - 41, // 20: c1.connector.v2.RotateCredentialResponse.annotations:type_name -> google.protobuf.Any + 43, // 20: c1.connector.v2.RotateCredentialResponse.annotations:type_name -> google.protobuf.Any 29, // 21: c1.connector.v2.AccountInfo.emails:type_name -> c1.connector.v2.AccountInfo.Email - 42, // 22: c1.connector.v2.AccountInfo.profile:type_name -> google.protobuf.Struct + 44, // 22: c1.connector.v2.AccountInfo.profile:type_name -> google.protobuf.Struct 30, // 23: c1.connector.v2.CredentialOptions.random_password:type_name -> c1.connector.v2.CredentialOptions.RandomPassword 31, // 24: c1.connector.v2.CredentialOptions.no_password:type_name -> c1.connector.v2.CredentialOptions.NoPassword 32, // 25: c1.connector.v2.CredentialOptions.sso:type_name -> c1.connector.v2.CredentialOptions.SSO @@ -3187,50 +4748,54 @@ var file_c1_connector_v2_resource_proto_depIdxs = []int32{ 21, // 33: c1.connector.v2.CreateAccountRequest.encryption_configs:type_name -> c1.connector.v2.EncryptionConfig 38, // 34: c1.connector.v2.CreateAccountResponse.success:type_name -> c1.connector.v2.CreateAccountResponse.SuccessResult 39, // 35: c1.connector.v2.CreateAccountResponse.action_required:type_name -> c1.connector.v2.CreateAccountResponse.ActionRequiredResult - 19, // 36: c1.connector.v2.CreateAccountResponse.encrypted_data:type_name -> c1.connector.v2.EncryptedData - 41, // 37: c1.connector.v2.CreateAccountResponse.annotations:type_name -> google.protobuf.Any - 23, // 38: c1.connector.v2.EncryptionConfig.principal:type_name -> c1.connector.v2.Resource - 40, // 39: c1.connector.v2.EncryptionConfig.jwk_public_key_config:type_name -> c1.connector.v2.EncryptionConfig.JWKPublicKeyConfig - 22, // 40: c1.connector.v2.Resource.id:type_name -> c1.connector.v2.ResourceId - 22, // 41: c1.connector.v2.Resource.parent_resource_id:type_name -> c1.connector.v2.ResourceId - 41, // 42: c1.connector.v2.Resource.annotations:type_name -> google.protobuf.Any - 28, // 43: c1.connector.v2.Resource.external_id:type_name -> c1.connector.v2.ExternalId - 1, // 44: c1.connector.v2.Resource.creation_source:type_name -> c1.connector.v2.Resource.CreationSource - 22, // 45: c1.connector.v2.ResourcesServiceListResourcesRequest.parent_resource_id:type_name -> c1.connector.v2.ResourceId - 41, // 46: c1.connector.v2.ResourcesServiceListResourcesRequest.annotations:type_name -> google.protobuf.Any - 23, // 47: c1.connector.v2.ResourcesServiceListResourcesResponse.list:type_name -> c1.connector.v2.Resource - 41, // 48: c1.connector.v2.ResourcesServiceListResourcesResponse.annotations:type_name -> google.protobuf.Any - 22, // 49: c1.connector.v2.ResourceGetterServiceGetResourceRequest.resource_id:type_name -> c1.connector.v2.ResourceId - 22, // 50: c1.connector.v2.ResourceGetterServiceGetResourceRequest.parent_resource_id:type_name -> c1.connector.v2.ResourceId - 41, // 51: c1.connector.v2.ResourceGetterServiceGetResourceRequest.annotations:type_name -> google.protobuf.Any - 23, // 52: c1.connector.v2.ResourceGetterServiceGetResourceResponse.resource:type_name -> c1.connector.v2.Resource - 41, // 53: c1.connector.v2.ResourceGetterServiceGetResourceResponse.annotations:type_name -> google.protobuf.Any - 16, // 54: c1.connector.v2.CredentialOptions.RandomPassword.constraints:type_name -> c1.connector.v2.PasswordConstraint - 19, // 55: c1.connector.v2.CredentialOptions.EncryptedPassword.encrypted_passwords:type_name -> c1.connector.v2.EncryptedData - 16, // 56: c1.connector.v2.LocalCredentialOptions.RandomPassword.constraints:type_name -> c1.connector.v2.PasswordConstraint - 23, // 57: c1.connector.v2.CreateAccountResponse.SuccessResult.resource:type_name -> c1.connector.v2.Resource - 23, // 58: c1.connector.v2.CreateAccountResponse.ActionRequiredResult.resource:type_name -> c1.connector.v2.Resource - 3, // 59: c1.connector.v2.ResourceTypesService.ListResourceTypes:input_type -> c1.connector.v2.ResourceTypesServiceListResourceTypesRequest - 24, // 60: c1.connector.v2.ResourcesService.ListResources:input_type -> c1.connector.v2.ResourcesServiceListResourcesRequest - 26, // 61: c1.connector.v2.ResourceGetterService.GetResource:input_type -> c1.connector.v2.ResourceGetterServiceGetResourceRequest - 5, // 62: c1.connector.v2.ResourceManagerService.CreateResource:input_type -> c1.connector.v2.CreateResourceRequest - 7, // 63: c1.connector.v2.ResourceManagerService.DeleteResource:input_type -> c1.connector.v2.DeleteResourceRequest - 9, // 64: c1.connector.v2.ResourceDeleterService.DeleteResourceV2:input_type -> c1.connector.v2.DeleteResourceV2Request - 11, // 65: c1.connector.v2.CredentialManagerService.RotateCredential:input_type -> c1.connector.v2.RotateCredentialRequest - 17, // 66: c1.connector.v2.AccountManagerService.CreateAccount:input_type -> c1.connector.v2.CreateAccountRequest - 4, // 67: c1.connector.v2.ResourceTypesService.ListResourceTypes:output_type -> c1.connector.v2.ResourceTypesServiceListResourceTypesResponse - 25, // 68: c1.connector.v2.ResourcesService.ListResources:output_type -> c1.connector.v2.ResourcesServiceListResourcesResponse - 27, // 69: c1.connector.v2.ResourceGetterService.GetResource:output_type -> c1.connector.v2.ResourceGetterServiceGetResourceResponse - 6, // 70: c1.connector.v2.ResourceManagerService.CreateResource:output_type -> c1.connector.v2.CreateResourceResponse - 8, // 71: c1.connector.v2.ResourceManagerService.DeleteResource:output_type -> c1.connector.v2.DeleteResourceResponse - 10, // 72: c1.connector.v2.ResourceDeleterService.DeleteResourceV2:output_type -> c1.connector.v2.DeleteResourceV2Response - 12, // 73: c1.connector.v2.CredentialManagerService.RotateCredential:output_type -> c1.connector.v2.RotateCredentialResponse - 18, // 74: c1.connector.v2.AccountManagerService.CreateAccount:output_type -> c1.connector.v2.CreateAccountResponse - 67, // [67:75] is the sub-list for method output_type - 59, // [59:67] is the sub-list for method input_type - 59, // [59:59] is the sub-list for extension type_name - 59, // [59:59] is the sub-list for extension extendee - 0, // [0:59] is the sub-list for field type_name + 40, // 36: c1.connector.v2.CreateAccountResponse.already_exists:type_name -> c1.connector.v2.CreateAccountResponse.AlreadyExistsResult + 41, // 37: c1.connector.v2.CreateAccountResponse.in_progress:type_name -> c1.connector.v2.CreateAccountResponse.InProgressResult + 19, // 38: c1.connector.v2.CreateAccountResponse.encrypted_data:type_name -> c1.connector.v2.EncryptedData + 43, // 39: c1.connector.v2.CreateAccountResponse.annotations:type_name -> google.protobuf.Any + 23, // 40: c1.connector.v2.EncryptionConfig.principal:type_name -> c1.connector.v2.Resource + 42, // 41: c1.connector.v2.EncryptionConfig.jwk_public_key_config:type_name -> c1.connector.v2.EncryptionConfig.JWKPublicKeyConfig + 22, // 42: c1.connector.v2.Resource.id:type_name -> c1.connector.v2.ResourceId + 22, // 43: c1.connector.v2.Resource.parent_resource_id:type_name -> c1.connector.v2.ResourceId + 43, // 44: c1.connector.v2.Resource.annotations:type_name -> google.protobuf.Any + 28, // 45: c1.connector.v2.Resource.external_id:type_name -> c1.connector.v2.ExternalId + 1, // 46: c1.connector.v2.Resource.creation_source:type_name -> c1.connector.v2.Resource.CreationSource + 22, // 47: c1.connector.v2.ResourcesServiceListResourcesRequest.parent_resource_id:type_name -> c1.connector.v2.ResourceId + 43, // 48: c1.connector.v2.ResourcesServiceListResourcesRequest.annotations:type_name -> google.protobuf.Any + 23, // 49: c1.connector.v2.ResourcesServiceListResourcesResponse.list:type_name -> c1.connector.v2.Resource + 43, // 50: c1.connector.v2.ResourcesServiceListResourcesResponse.annotations:type_name -> google.protobuf.Any + 22, // 51: c1.connector.v2.ResourceGetterServiceGetResourceRequest.resource_id:type_name -> c1.connector.v2.ResourceId + 22, // 52: c1.connector.v2.ResourceGetterServiceGetResourceRequest.parent_resource_id:type_name -> c1.connector.v2.ResourceId + 43, // 53: c1.connector.v2.ResourceGetterServiceGetResourceRequest.annotations:type_name -> google.protobuf.Any + 23, // 54: c1.connector.v2.ResourceGetterServiceGetResourceResponse.resource:type_name -> c1.connector.v2.Resource + 43, // 55: c1.connector.v2.ResourceGetterServiceGetResourceResponse.annotations:type_name -> google.protobuf.Any + 16, // 56: c1.connector.v2.CredentialOptions.RandomPassword.constraints:type_name -> c1.connector.v2.PasswordConstraint + 19, // 57: c1.connector.v2.CredentialOptions.EncryptedPassword.encrypted_passwords:type_name -> c1.connector.v2.EncryptedData + 16, // 58: c1.connector.v2.LocalCredentialOptions.RandomPassword.constraints:type_name -> c1.connector.v2.PasswordConstraint + 23, // 59: c1.connector.v2.CreateAccountResponse.SuccessResult.resource:type_name -> c1.connector.v2.Resource + 23, // 60: c1.connector.v2.CreateAccountResponse.ActionRequiredResult.resource:type_name -> c1.connector.v2.Resource + 23, // 61: c1.connector.v2.CreateAccountResponse.AlreadyExistsResult.resource:type_name -> c1.connector.v2.Resource + 23, // 62: c1.connector.v2.CreateAccountResponse.InProgressResult.resource:type_name -> c1.connector.v2.Resource + 3, // 63: c1.connector.v2.ResourceTypesService.ListResourceTypes:input_type -> c1.connector.v2.ResourceTypesServiceListResourceTypesRequest + 24, // 64: c1.connector.v2.ResourcesService.ListResources:input_type -> c1.connector.v2.ResourcesServiceListResourcesRequest + 26, // 65: c1.connector.v2.ResourceGetterService.GetResource:input_type -> c1.connector.v2.ResourceGetterServiceGetResourceRequest + 5, // 66: c1.connector.v2.ResourceManagerService.CreateResource:input_type -> c1.connector.v2.CreateResourceRequest + 7, // 67: c1.connector.v2.ResourceManagerService.DeleteResource:input_type -> c1.connector.v2.DeleteResourceRequest + 9, // 68: c1.connector.v2.ResourceDeleterService.DeleteResourceV2:input_type -> c1.connector.v2.DeleteResourceV2Request + 11, // 69: c1.connector.v2.CredentialManagerService.RotateCredential:input_type -> c1.connector.v2.RotateCredentialRequest + 17, // 70: c1.connector.v2.AccountManagerService.CreateAccount:input_type -> c1.connector.v2.CreateAccountRequest + 4, // 71: c1.connector.v2.ResourceTypesService.ListResourceTypes:output_type -> c1.connector.v2.ResourceTypesServiceListResourceTypesResponse + 25, // 72: c1.connector.v2.ResourcesService.ListResources:output_type -> c1.connector.v2.ResourcesServiceListResourcesResponse + 27, // 73: c1.connector.v2.ResourceGetterService.GetResource:output_type -> c1.connector.v2.ResourceGetterServiceGetResourceResponse + 6, // 74: c1.connector.v2.ResourceManagerService.CreateResource:output_type -> c1.connector.v2.CreateResourceResponse + 8, // 75: c1.connector.v2.ResourceManagerService.DeleteResource:output_type -> c1.connector.v2.DeleteResourceResponse + 10, // 76: c1.connector.v2.ResourceDeleterService.DeleteResourceV2:output_type -> c1.connector.v2.DeleteResourceV2Response + 12, // 77: c1.connector.v2.CredentialManagerService.RotateCredential:output_type -> c1.connector.v2.RotateCredentialResponse + 18, // 78: c1.connector.v2.AccountManagerService.CreateAccount:output_type -> c1.connector.v2.CreateAccountResponse + 71, // [71:79] is the sub-list for method output_type + 63, // [63:71] is the sub-list for method input_type + 63, // [63:63] is the sub-list for extension type_name + 63, // [63:63] is the sub-list for extension extendee + 0, // [0:63] is the sub-list for field type_name } func init() { file_c1_connector_v2_resource_proto_init() } @@ -3253,6 +4818,8 @@ func file_c1_connector_v2_resource_proto_init() { file_c1_connector_v2_resource_proto_msgTypes[16].OneofWrappers = []any{ (*CreateAccountResponse_Success)(nil), (*CreateAccountResponse_ActionRequired)(nil), + (*CreateAccountResponse_AlreadyExists)(nil), + (*CreateAccountResponse_InProgress)(nil), } file_c1_connector_v2_resource_proto_msgTypes[19].OneofWrappers = []any{ (*EncryptionConfig_JwkPublicKeyConfig)(nil), @@ -3263,7 +4830,7 @@ func file_c1_connector_v2_resource_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: unsafe.Slice(unsafe.StringData(file_c1_connector_v2_resource_proto_rawDesc), len(file_c1_connector_v2_resource_proto_rawDesc)), NumEnums: 2, - NumMessages: 39, + NumMessages: 41, NumExtensions: 0, NumServices: 7, }, diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/resource.pb.validate.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/resource.pb.validate.go index bd2901cb..691da265 100644 --- a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/resource.pb.validate.go +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/resource.pb.validate.go @@ -359,6 +359,21 @@ func (m *ResourceTypesServiceListResourceTypesRequest) validate(all bool) error } + if m.GetActiveSyncId() != "" { + + if l := len(m.GetActiveSyncId()); l < 1 || l > 1024 { + err := ResourceTypesServiceListResourceTypesRequestValidationError{ + field: "ActiveSyncId", + reason: "value length must be between 1 and 1024 bytes, inclusive", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + if len(errors) > 0 { return ResourceTypesServiceListResourceTypesRequestMultiError(errors) } @@ -3098,6 +3113,88 @@ func (m *CreateAccountResponse) validate(all bool) error { } } + case *CreateAccountResponse_AlreadyExists: + if v == nil { + err := CreateAccountResponseValidationError{ + field: "Result", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetAlreadyExists()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CreateAccountResponseValidationError{ + field: "AlreadyExists", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CreateAccountResponseValidationError{ + field: "AlreadyExists", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetAlreadyExists()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CreateAccountResponseValidationError{ + field: "AlreadyExists", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *CreateAccountResponse_InProgress: + if v == nil { + err := CreateAccountResponseValidationError{ + field: "Result", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetInProgress()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CreateAccountResponseValidationError{ + field: "InProgress", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CreateAccountResponseValidationError{ + field: "InProgress", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetInProgress()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CreateAccountResponseValidationError{ + field: "InProgress", + reason: "embedded message failed validation", + cause: err, + } + } + } + default: _ = v // ensures v is used } @@ -4085,6 +4182,21 @@ func (m *ResourcesServiceListResourcesRequest) validate(all bool) error { } + if m.GetActiveSyncId() != "" { + + if l := len(m.GetActiveSyncId()); l < 1 || l > 1024 { + err := ResourcesServiceListResourcesRequestValidationError{ + field: "ActiveSyncId", + reason: "value length must be between 1 and 1024 bytes, inclusive", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + if len(errors) > 0 { return ResourcesServiceListResourcesRequestMultiError(errors) } @@ -4471,6 +4583,21 @@ func (m *ResourceGetterServiceGetResourceRequest) validate(all bool) error { } + if m.GetActiveSyncId() != "" { + + if l := len(m.GetActiveSyncId()); l < 1 || l > 1024 { + err := ResourceGetterServiceGetResourceRequestValidationError{ + field: "ActiveSyncId", + reason: "value length must be between 1 and 1024 bytes, inclusive", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + if len(errors) > 0 { return ResourceGetterServiceGetResourceRequestMultiError(errors) } @@ -6242,6 +6369,280 @@ var _ interface { ErrorName() string } = CreateAccountResponse_ActionRequiredResultValidationError{} +// Validate checks the field values on +// CreateAccountResponse_AlreadyExistsResult with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *CreateAccountResponse_AlreadyExistsResult) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// CreateAccountResponse_AlreadyExistsResult with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in +// CreateAccountResponse_AlreadyExistsResultMultiError, or nil if none found. +func (m *CreateAccountResponse_AlreadyExistsResult) ValidateAll() error { + return m.validate(true) +} + +func (m *CreateAccountResponse_AlreadyExistsResult) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetResource()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CreateAccountResponse_AlreadyExistsResultValidationError{ + field: "Resource", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CreateAccountResponse_AlreadyExistsResultValidationError{ + field: "Resource", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetResource()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CreateAccountResponse_AlreadyExistsResultValidationError{ + field: "Resource", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for IsCreateAccountResult + + if len(errors) > 0 { + return CreateAccountResponse_AlreadyExistsResultMultiError(errors) + } + + return nil +} + +// CreateAccountResponse_AlreadyExistsResultMultiError is an error wrapping +// multiple validation errors returned by +// CreateAccountResponse_AlreadyExistsResult.ValidateAll() if the designated +// constraints aren't met. +type CreateAccountResponse_AlreadyExistsResultMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m CreateAccountResponse_AlreadyExistsResultMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m CreateAccountResponse_AlreadyExistsResultMultiError) AllErrors() []error { return m } + +// CreateAccountResponse_AlreadyExistsResultValidationError is the validation +// error returned by CreateAccountResponse_AlreadyExistsResult.Validate if the +// designated constraints aren't met. +type CreateAccountResponse_AlreadyExistsResultValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e CreateAccountResponse_AlreadyExistsResultValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e CreateAccountResponse_AlreadyExistsResultValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e CreateAccountResponse_AlreadyExistsResultValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e CreateAccountResponse_AlreadyExistsResultValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e CreateAccountResponse_AlreadyExistsResultValidationError) ErrorName() string { + return "CreateAccountResponse_AlreadyExistsResultValidationError" +} + +// Error satisfies the builtin error interface +func (e CreateAccountResponse_AlreadyExistsResultValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCreateAccountResponse_AlreadyExistsResult.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = CreateAccountResponse_AlreadyExistsResultValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = CreateAccountResponse_AlreadyExistsResultValidationError{} + +// Validate checks the field values on CreateAccountResponse_InProgressResult +// with the rules defined in the proto definition for this message. If any +// rules are violated, the first error encountered is returned, or nil if +// there are no violations. +func (m *CreateAccountResponse_InProgressResult) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// CreateAccountResponse_InProgressResult with the rules defined in the proto +// definition for this message. If any rules are violated, the result is a +// list of violation errors wrapped in +// CreateAccountResponse_InProgressResultMultiError, or nil if none found. +func (m *CreateAccountResponse_InProgressResult) ValidateAll() error { + return m.validate(true) +} + +func (m *CreateAccountResponse_InProgressResult) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetResource()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CreateAccountResponse_InProgressResultValidationError{ + field: "Resource", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CreateAccountResponse_InProgressResultValidationError{ + field: "Resource", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetResource()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CreateAccountResponse_InProgressResultValidationError{ + field: "Resource", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for IsCreateAccountResult + + if len(errors) > 0 { + return CreateAccountResponse_InProgressResultMultiError(errors) + } + + return nil +} + +// CreateAccountResponse_InProgressResultMultiError is an error wrapping +// multiple validation errors returned by +// CreateAccountResponse_InProgressResult.ValidateAll() if the designated +// constraints aren't met. +type CreateAccountResponse_InProgressResultMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m CreateAccountResponse_InProgressResultMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m CreateAccountResponse_InProgressResultMultiError) AllErrors() []error { return m } + +// CreateAccountResponse_InProgressResultValidationError is the validation +// error returned by CreateAccountResponse_InProgressResult.Validate if the +// designated constraints aren't met. +type CreateAccountResponse_InProgressResultValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e CreateAccountResponse_InProgressResultValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e CreateAccountResponse_InProgressResultValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e CreateAccountResponse_InProgressResultValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e CreateAccountResponse_InProgressResultValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e CreateAccountResponse_InProgressResultValidationError) ErrorName() string { + return "CreateAccountResponse_InProgressResultValidationError" +} + +// Error satisfies the builtin error interface +func (e CreateAccountResponse_InProgressResultValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCreateAccountResponse_InProgressResult.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = CreateAccountResponse_InProgressResultValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = CreateAccountResponse_InProgressResultValidationError{} + // Validate checks the field values on EncryptionConfig_JWKPublicKeyConfig with // the rules defined in the proto definition for this message. If any rules // are violated, the first error encountered is returned, or nil if there are diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/resource_protoopaque.pb.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/resource_protoopaque.pb.go new file mode 100644 index 00000000..211603ac --- /dev/null +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/resource_protoopaque.pb.go @@ -0,0 +1,4838 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.10 +// protoc (unknown) +// source: c1/connector/v2/resource.proto + +//go:build protoopaque + +package v2 + +import ( + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + structpb "google.golang.org/protobuf/types/known/structpb" + reflect "reflect" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type ResourceType_Trait int32 + +const ( + ResourceType_TRAIT_UNSPECIFIED ResourceType_Trait = 0 + ResourceType_TRAIT_USER ResourceType_Trait = 1 + ResourceType_TRAIT_GROUP ResourceType_Trait = 2 + ResourceType_TRAIT_ROLE ResourceType_Trait = 3 + ResourceType_TRAIT_APP ResourceType_Trait = 4 + ResourceType_TRAIT_SECRET ResourceType_Trait = 5 + ResourceType_TRAIT_SECURITY_INSIGHT ResourceType_Trait = 6 +) + +// Enum value maps for ResourceType_Trait. +var ( + ResourceType_Trait_name = map[int32]string{ + 0: "TRAIT_UNSPECIFIED", + 1: "TRAIT_USER", + 2: "TRAIT_GROUP", + 3: "TRAIT_ROLE", + 4: "TRAIT_APP", + 5: "TRAIT_SECRET", + 6: "TRAIT_SECURITY_INSIGHT", + } + ResourceType_Trait_value = map[string]int32{ + "TRAIT_UNSPECIFIED": 0, + "TRAIT_USER": 1, + "TRAIT_GROUP": 2, + "TRAIT_ROLE": 3, + "TRAIT_APP": 4, + "TRAIT_SECRET": 5, + "TRAIT_SECURITY_INSIGHT": 6, + } +) + +func (x ResourceType_Trait) Enum() *ResourceType_Trait { + p := new(ResourceType_Trait) + *p = x + return p +} + +func (x ResourceType_Trait) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ResourceType_Trait) Descriptor() protoreflect.EnumDescriptor { + return file_c1_connector_v2_resource_proto_enumTypes[0].Descriptor() +} + +func (ResourceType_Trait) Type() protoreflect.EnumType { + return &file_c1_connector_v2_resource_proto_enumTypes[0] +} + +func (x ResourceType_Trait) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// FIXME(mstanbCO): call this something else? Should it just be a bool? Possibly just use an annotation? +type Resource_CreationSource int32 + +const ( + Resource_CREATION_SOURCE_UNSPECIFIED Resource_CreationSource = 0 + Resource_CREATION_SOURCE_CONNECTOR_LIST_RESOURCES Resource_CreationSource = 1 + Resource_CREATION_SOURCE_CONNECTOR_LIST_GRANTS_PRINCIPAL_JIT Resource_CreationSource = 2 +) + +// Enum value maps for Resource_CreationSource. +var ( + Resource_CreationSource_name = map[int32]string{ + 0: "CREATION_SOURCE_UNSPECIFIED", + 1: "CREATION_SOURCE_CONNECTOR_LIST_RESOURCES", + 2: "CREATION_SOURCE_CONNECTOR_LIST_GRANTS_PRINCIPAL_JIT", + } + Resource_CreationSource_value = map[string]int32{ + "CREATION_SOURCE_UNSPECIFIED": 0, + "CREATION_SOURCE_CONNECTOR_LIST_RESOURCES": 1, + "CREATION_SOURCE_CONNECTOR_LIST_GRANTS_PRINCIPAL_JIT": 2, + } +) + +func (x Resource_CreationSource) Enum() *Resource_CreationSource { + p := new(Resource_CreationSource) + *p = x + return p +} + +func (x Resource_CreationSource) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Resource_CreationSource) Descriptor() protoreflect.EnumDescriptor { + return file_c1_connector_v2_resource_proto_enumTypes[1].Descriptor() +} + +func (Resource_CreationSource) Type() protoreflect.EnumType { + return &file_c1_connector_v2_resource_proto_enumTypes[1] +} + +func (x Resource_CreationSource) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +type ResourceType struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Id string `protobuf:"bytes,1,opt,name=id,proto3"` + xxx_hidden_DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3"` + xxx_hidden_Traits []ResourceType_Trait `protobuf:"varint,3,rep,packed,name=traits,proto3,enum=c1.connector.v2.ResourceType_Trait"` + xxx_hidden_Annotations *[]*anypb.Any `protobuf:"bytes,4,rep,name=annotations,proto3"` + xxx_hidden_Description string `protobuf:"bytes,5,opt,name=description,proto3"` + xxx_hidden_SourcedExternally bool `protobuf:"varint,6,opt,name=sourced_externally,json=sourcedExternally,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ResourceType) Reset() { + *x = ResourceType{} + mi := &file_c1_connector_v2_resource_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ResourceType) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResourceType) ProtoMessage() {} + +func (x *ResourceType) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_resource_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *ResourceType) GetId() string { + if x != nil { + return x.xxx_hidden_Id + } + return "" +} + +func (x *ResourceType) GetDisplayName() string { + if x != nil { + return x.xxx_hidden_DisplayName + } + return "" +} + +func (x *ResourceType) GetTraits() []ResourceType_Trait { + if x != nil { + return x.xxx_hidden_Traits + } + return nil +} + +func (x *ResourceType) GetAnnotations() []*anypb.Any { + if x != nil { + if x.xxx_hidden_Annotations != nil { + return *x.xxx_hidden_Annotations + } + } + return nil +} + +func (x *ResourceType) GetDescription() string { + if x != nil { + return x.xxx_hidden_Description + } + return "" +} + +func (x *ResourceType) GetSourcedExternally() bool { + if x != nil { + return x.xxx_hidden_SourcedExternally + } + return false +} + +func (x *ResourceType) SetId(v string) { + x.xxx_hidden_Id = v +} + +func (x *ResourceType) SetDisplayName(v string) { + x.xxx_hidden_DisplayName = v +} + +func (x *ResourceType) SetTraits(v []ResourceType_Trait) { + x.xxx_hidden_Traits = v +} + +func (x *ResourceType) SetAnnotations(v []*anypb.Any) { + x.xxx_hidden_Annotations = &v +} + +func (x *ResourceType) SetDescription(v string) { + x.xxx_hidden_Description = v +} + +func (x *ResourceType) SetSourcedExternally(v bool) { + x.xxx_hidden_SourcedExternally = v +} + +type ResourceType_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Id string + DisplayName string + Traits []ResourceType_Trait + Annotations []*anypb.Any + Description string + SourcedExternally bool +} + +func (b0 ResourceType_builder) Build() *ResourceType { + m0 := &ResourceType{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Id = b.Id + x.xxx_hidden_DisplayName = b.DisplayName + x.xxx_hidden_Traits = b.Traits + x.xxx_hidden_Annotations = &b.Annotations + x.xxx_hidden_Description = b.Description + x.xxx_hidden_SourcedExternally = b.SourcedExternally + return m0 +} + +type ResourceTypesServiceListResourceTypesRequest struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Parent *Resource `protobuf:"bytes,1,opt,name=parent,proto3"` + xxx_hidden_PageSize uint32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3"` + xxx_hidden_PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3"` + xxx_hidden_Annotations *[]*anypb.Any `protobuf:"bytes,4,rep,name=annotations,proto3"` + xxx_hidden_ActiveSyncId string `protobuf:"bytes,5,opt,name=active_sync_id,json=activeSyncId,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ResourceTypesServiceListResourceTypesRequest) Reset() { + *x = ResourceTypesServiceListResourceTypesRequest{} + mi := &file_c1_connector_v2_resource_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ResourceTypesServiceListResourceTypesRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResourceTypesServiceListResourceTypesRequest) ProtoMessage() {} + +func (x *ResourceTypesServiceListResourceTypesRequest) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_resource_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *ResourceTypesServiceListResourceTypesRequest) GetParent() *Resource { + if x != nil { + return x.xxx_hidden_Parent + } + return nil +} + +func (x *ResourceTypesServiceListResourceTypesRequest) GetPageSize() uint32 { + if x != nil { + return x.xxx_hidden_PageSize + } + return 0 +} + +func (x *ResourceTypesServiceListResourceTypesRequest) GetPageToken() string { + if x != nil { + return x.xxx_hidden_PageToken + } + return "" +} + +func (x *ResourceTypesServiceListResourceTypesRequest) GetAnnotations() []*anypb.Any { + if x != nil { + if x.xxx_hidden_Annotations != nil { + return *x.xxx_hidden_Annotations + } + } + return nil +} + +func (x *ResourceTypesServiceListResourceTypesRequest) GetActiveSyncId() string { + if x != nil { + return x.xxx_hidden_ActiveSyncId + } + return "" +} + +func (x *ResourceTypesServiceListResourceTypesRequest) SetParent(v *Resource) { + x.xxx_hidden_Parent = v +} + +func (x *ResourceTypesServiceListResourceTypesRequest) SetPageSize(v uint32) { + x.xxx_hidden_PageSize = v +} + +func (x *ResourceTypesServiceListResourceTypesRequest) SetPageToken(v string) { + x.xxx_hidden_PageToken = v +} + +func (x *ResourceTypesServiceListResourceTypesRequest) SetAnnotations(v []*anypb.Any) { + x.xxx_hidden_Annotations = &v +} + +func (x *ResourceTypesServiceListResourceTypesRequest) SetActiveSyncId(v string) { + x.xxx_hidden_ActiveSyncId = v +} + +func (x *ResourceTypesServiceListResourceTypesRequest) HasParent() bool { + if x == nil { + return false + } + return x.xxx_hidden_Parent != nil +} + +func (x *ResourceTypesServiceListResourceTypesRequest) ClearParent() { + x.xxx_hidden_Parent = nil +} + +type ResourceTypesServiceListResourceTypesRequest_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Parent *Resource + PageSize uint32 + PageToken string + Annotations []*anypb.Any + ActiveSyncId string +} + +func (b0 ResourceTypesServiceListResourceTypesRequest_builder) Build() *ResourceTypesServiceListResourceTypesRequest { + m0 := &ResourceTypesServiceListResourceTypesRequest{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Parent = b.Parent + x.xxx_hidden_PageSize = b.PageSize + x.xxx_hidden_PageToken = b.PageToken + x.xxx_hidden_Annotations = &b.Annotations + x.xxx_hidden_ActiveSyncId = b.ActiveSyncId + return m0 +} + +type ResourceTypesServiceListResourceTypesResponse struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_List *[]*ResourceType `protobuf:"bytes,1,rep,name=list,proto3"` + xxx_hidden_NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3"` + xxx_hidden_Annotations *[]*anypb.Any `protobuf:"bytes,3,rep,name=annotations,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ResourceTypesServiceListResourceTypesResponse) Reset() { + *x = ResourceTypesServiceListResourceTypesResponse{} + mi := &file_c1_connector_v2_resource_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ResourceTypesServiceListResourceTypesResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResourceTypesServiceListResourceTypesResponse) ProtoMessage() {} + +func (x *ResourceTypesServiceListResourceTypesResponse) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_resource_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *ResourceTypesServiceListResourceTypesResponse) GetList() []*ResourceType { + if x != nil { + if x.xxx_hidden_List != nil { + return *x.xxx_hidden_List + } + } + return nil +} + +func (x *ResourceTypesServiceListResourceTypesResponse) GetNextPageToken() string { + if x != nil { + return x.xxx_hidden_NextPageToken + } + return "" +} + +func (x *ResourceTypesServiceListResourceTypesResponse) GetAnnotations() []*anypb.Any { + if x != nil { + if x.xxx_hidden_Annotations != nil { + return *x.xxx_hidden_Annotations + } + } + return nil +} + +func (x *ResourceTypesServiceListResourceTypesResponse) SetList(v []*ResourceType) { + x.xxx_hidden_List = &v +} + +func (x *ResourceTypesServiceListResourceTypesResponse) SetNextPageToken(v string) { + x.xxx_hidden_NextPageToken = v +} + +func (x *ResourceTypesServiceListResourceTypesResponse) SetAnnotations(v []*anypb.Any) { + x.xxx_hidden_Annotations = &v +} + +type ResourceTypesServiceListResourceTypesResponse_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + List []*ResourceType + NextPageToken string + Annotations []*anypb.Any +} + +func (b0 ResourceTypesServiceListResourceTypesResponse_builder) Build() *ResourceTypesServiceListResourceTypesResponse { + m0 := &ResourceTypesServiceListResourceTypesResponse{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_List = &b.List + x.xxx_hidden_NextPageToken = b.NextPageToken + x.xxx_hidden_Annotations = &b.Annotations + return m0 +} + +type CreateResourceRequest struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Resource *Resource `protobuf:"bytes,1,opt,name=resource,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *CreateResourceRequest) Reset() { + *x = CreateResourceRequest{} + mi := &file_c1_connector_v2_resource_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *CreateResourceRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateResourceRequest) ProtoMessage() {} + +func (x *CreateResourceRequest) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_resource_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *CreateResourceRequest) GetResource() *Resource { + if x != nil { + return x.xxx_hidden_Resource + } + return nil +} + +func (x *CreateResourceRequest) SetResource(v *Resource) { + x.xxx_hidden_Resource = v +} + +func (x *CreateResourceRequest) HasResource() bool { + if x == nil { + return false + } + return x.xxx_hidden_Resource != nil +} + +func (x *CreateResourceRequest) ClearResource() { + x.xxx_hidden_Resource = nil +} + +type CreateResourceRequest_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Resource *Resource +} + +func (b0 CreateResourceRequest_builder) Build() *CreateResourceRequest { + m0 := &CreateResourceRequest{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Resource = b.Resource + return m0 +} + +type CreateResourceResponse struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Created *Resource `protobuf:"bytes,1,opt,name=created,proto3"` + xxx_hidden_Annotations *[]*anypb.Any `protobuf:"bytes,2,rep,name=annotations,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *CreateResourceResponse) Reset() { + *x = CreateResourceResponse{} + mi := &file_c1_connector_v2_resource_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *CreateResourceResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateResourceResponse) ProtoMessage() {} + +func (x *CreateResourceResponse) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_resource_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *CreateResourceResponse) GetCreated() *Resource { + if x != nil { + return x.xxx_hidden_Created + } + return nil +} + +func (x *CreateResourceResponse) GetAnnotations() []*anypb.Any { + if x != nil { + if x.xxx_hidden_Annotations != nil { + return *x.xxx_hidden_Annotations + } + } + return nil +} + +func (x *CreateResourceResponse) SetCreated(v *Resource) { + x.xxx_hidden_Created = v +} + +func (x *CreateResourceResponse) SetAnnotations(v []*anypb.Any) { + x.xxx_hidden_Annotations = &v +} + +func (x *CreateResourceResponse) HasCreated() bool { + if x == nil { + return false + } + return x.xxx_hidden_Created != nil +} + +func (x *CreateResourceResponse) ClearCreated() { + x.xxx_hidden_Created = nil +} + +type CreateResourceResponse_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Created *Resource + Annotations []*anypb.Any +} + +func (b0 CreateResourceResponse_builder) Build() *CreateResourceResponse { + m0 := &CreateResourceResponse{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Created = b.Created + x.xxx_hidden_Annotations = &b.Annotations + return m0 +} + +type DeleteResourceRequest struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_ResourceId *ResourceId `protobuf:"bytes,1,opt,name=resource_id,json=resourceId,proto3"` + xxx_hidden_ParentResourceId *ResourceId `protobuf:"bytes,2,opt,name=parent_resource_id,json=parentResourceId,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *DeleteResourceRequest) Reset() { + *x = DeleteResourceRequest{} + mi := &file_c1_connector_v2_resource_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *DeleteResourceRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteResourceRequest) ProtoMessage() {} + +func (x *DeleteResourceRequest) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_resource_proto_msgTypes[5] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *DeleteResourceRequest) GetResourceId() *ResourceId { + if x != nil { + return x.xxx_hidden_ResourceId + } + return nil +} + +func (x *DeleteResourceRequest) GetParentResourceId() *ResourceId { + if x != nil { + return x.xxx_hidden_ParentResourceId + } + return nil +} + +func (x *DeleteResourceRequest) SetResourceId(v *ResourceId) { + x.xxx_hidden_ResourceId = v +} + +func (x *DeleteResourceRequest) SetParentResourceId(v *ResourceId) { + x.xxx_hidden_ParentResourceId = v +} + +func (x *DeleteResourceRequest) HasResourceId() bool { + if x == nil { + return false + } + return x.xxx_hidden_ResourceId != nil +} + +func (x *DeleteResourceRequest) HasParentResourceId() bool { + if x == nil { + return false + } + return x.xxx_hidden_ParentResourceId != nil +} + +func (x *DeleteResourceRequest) ClearResourceId() { + x.xxx_hidden_ResourceId = nil +} + +func (x *DeleteResourceRequest) ClearParentResourceId() { + x.xxx_hidden_ParentResourceId = nil +} + +type DeleteResourceRequest_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + ResourceId *ResourceId + ParentResourceId *ResourceId +} + +func (b0 DeleteResourceRequest_builder) Build() *DeleteResourceRequest { + m0 := &DeleteResourceRequest{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_ResourceId = b.ResourceId + x.xxx_hidden_ParentResourceId = b.ParentResourceId + return m0 +} + +type DeleteResourceResponse struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Annotations *[]*anypb.Any `protobuf:"bytes,1,rep,name=annotations,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *DeleteResourceResponse) Reset() { + *x = DeleteResourceResponse{} + mi := &file_c1_connector_v2_resource_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *DeleteResourceResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteResourceResponse) ProtoMessage() {} + +func (x *DeleteResourceResponse) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_resource_proto_msgTypes[6] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *DeleteResourceResponse) GetAnnotations() []*anypb.Any { + if x != nil { + if x.xxx_hidden_Annotations != nil { + return *x.xxx_hidden_Annotations + } + } + return nil +} + +func (x *DeleteResourceResponse) SetAnnotations(v []*anypb.Any) { + x.xxx_hidden_Annotations = &v +} + +type DeleteResourceResponse_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Annotations []*anypb.Any +} + +func (b0 DeleteResourceResponse_builder) Build() *DeleteResourceResponse { + m0 := &DeleteResourceResponse{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Annotations = &b.Annotations + return m0 +} + +type DeleteResourceV2Request struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_ResourceId *ResourceId `protobuf:"bytes,1,opt,name=resource_id,json=resourceId,proto3"` + xxx_hidden_ParentResourceId *ResourceId `protobuf:"bytes,2,opt,name=parent_resource_id,json=parentResourceId,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *DeleteResourceV2Request) Reset() { + *x = DeleteResourceV2Request{} + mi := &file_c1_connector_v2_resource_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *DeleteResourceV2Request) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteResourceV2Request) ProtoMessage() {} + +func (x *DeleteResourceV2Request) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_resource_proto_msgTypes[7] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *DeleteResourceV2Request) GetResourceId() *ResourceId { + if x != nil { + return x.xxx_hidden_ResourceId + } + return nil +} + +func (x *DeleteResourceV2Request) GetParentResourceId() *ResourceId { + if x != nil { + return x.xxx_hidden_ParentResourceId + } + return nil +} + +func (x *DeleteResourceV2Request) SetResourceId(v *ResourceId) { + x.xxx_hidden_ResourceId = v +} + +func (x *DeleteResourceV2Request) SetParentResourceId(v *ResourceId) { + x.xxx_hidden_ParentResourceId = v +} + +func (x *DeleteResourceV2Request) HasResourceId() bool { + if x == nil { + return false + } + return x.xxx_hidden_ResourceId != nil +} + +func (x *DeleteResourceV2Request) HasParentResourceId() bool { + if x == nil { + return false + } + return x.xxx_hidden_ParentResourceId != nil +} + +func (x *DeleteResourceV2Request) ClearResourceId() { + x.xxx_hidden_ResourceId = nil +} + +func (x *DeleteResourceV2Request) ClearParentResourceId() { + x.xxx_hidden_ParentResourceId = nil +} + +type DeleteResourceV2Request_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + ResourceId *ResourceId + ParentResourceId *ResourceId +} + +func (b0 DeleteResourceV2Request_builder) Build() *DeleteResourceV2Request { + m0 := &DeleteResourceV2Request{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_ResourceId = b.ResourceId + x.xxx_hidden_ParentResourceId = b.ParentResourceId + return m0 +} + +type DeleteResourceV2Response struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Annotations *[]*anypb.Any `protobuf:"bytes,1,rep,name=annotations,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *DeleteResourceV2Response) Reset() { + *x = DeleteResourceV2Response{} + mi := &file_c1_connector_v2_resource_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *DeleteResourceV2Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteResourceV2Response) ProtoMessage() {} + +func (x *DeleteResourceV2Response) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_resource_proto_msgTypes[8] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *DeleteResourceV2Response) GetAnnotations() []*anypb.Any { + if x != nil { + if x.xxx_hidden_Annotations != nil { + return *x.xxx_hidden_Annotations + } + } + return nil +} + +func (x *DeleteResourceV2Response) SetAnnotations(v []*anypb.Any) { + x.xxx_hidden_Annotations = &v +} + +type DeleteResourceV2Response_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Annotations []*anypb.Any +} + +func (b0 DeleteResourceV2Response_builder) Build() *DeleteResourceV2Response { + m0 := &DeleteResourceV2Response{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Annotations = &b.Annotations + return m0 +} + +type RotateCredentialRequest struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_ResourceId *ResourceId `protobuf:"bytes,1,opt,name=resource_id,json=resourceId,proto3"` + xxx_hidden_CredentialOptions *CredentialOptions `protobuf:"bytes,2,opt,name=credential_options,json=credentialOptions,proto3"` + xxx_hidden_EncryptionConfigs *[]*EncryptionConfig `protobuf:"bytes,3,rep,name=encryption_configs,json=encryptionConfigs,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *RotateCredentialRequest) Reset() { + *x = RotateCredentialRequest{} + mi := &file_c1_connector_v2_resource_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *RotateCredentialRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RotateCredentialRequest) ProtoMessage() {} + +func (x *RotateCredentialRequest) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_resource_proto_msgTypes[9] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *RotateCredentialRequest) GetResourceId() *ResourceId { + if x != nil { + return x.xxx_hidden_ResourceId + } + return nil +} + +func (x *RotateCredentialRequest) GetCredentialOptions() *CredentialOptions { + if x != nil { + return x.xxx_hidden_CredentialOptions + } + return nil +} + +func (x *RotateCredentialRequest) GetEncryptionConfigs() []*EncryptionConfig { + if x != nil { + if x.xxx_hidden_EncryptionConfigs != nil { + return *x.xxx_hidden_EncryptionConfigs + } + } + return nil +} + +func (x *RotateCredentialRequest) SetResourceId(v *ResourceId) { + x.xxx_hidden_ResourceId = v +} + +func (x *RotateCredentialRequest) SetCredentialOptions(v *CredentialOptions) { + x.xxx_hidden_CredentialOptions = v +} + +func (x *RotateCredentialRequest) SetEncryptionConfigs(v []*EncryptionConfig) { + x.xxx_hidden_EncryptionConfigs = &v +} + +func (x *RotateCredentialRequest) HasResourceId() bool { + if x == nil { + return false + } + return x.xxx_hidden_ResourceId != nil +} + +func (x *RotateCredentialRequest) HasCredentialOptions() bool { + if x == nil { + return false + } + return x.xxx_hidden_CredentialOptions != nil +} + +func (x *RotateCredentialRequest) ClearResourceId() { + x.xxx_hidden_ResourceId = nil +} + +func (x *RotateCredentialRequest) ClearCredentialOptions() { + x.xxx_hidden_CredentialOptions = nil +} + +type RotateCredentialRequest_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + ResourceId *ResourceId + CredentialOptions *CredentialOptions + EncryptionConfigs []*EncryptionConfig +} + +func (b0 RotateCredentialRequest_builder) Build() *RotateCredentialRequest { + m0 := &RotateCredentialRequest{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_ResourceId = b.ResourceId + x.xxx_hidden_CredentialOptions = b.CredentialOptions + x.xxx_hidden_EncryptionConfigs = &b.EncryptionConfigs + return m0 +} + +type RotateCredentialResponse struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_EncryptedData *[]*EncryptedData `protobuf:"bytes,1,rep,name=encrypted_data,json=encryptedData,proto3"` + xxx_hidden_ResourceId *ResourceId `protobuf:"bytes,2,opt,name=resource_id,json=resourceId,proto3"` + xxx_hidden_Annotations *[]*anypb.Any `protobuf:"bytes,3,rep,name=annotations,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *RotateCredentialResponse) Reset() { + *x = RotateCredentialResponse{} + mi := &file_c1_connector_v2_resource_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *RotateCredentialResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RotateCredentialResponse) ProtoMessage() {} + +func (x *RotateCredentialResponse) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_resource_proto_msgTypes[10] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *RotateCredentialResponse) GetEncryptedData() []*EncryptedData { + if x != nil { + if x.xxx_hidden_EncryptedData != nil { + return *x.xxx_hidden_EncryptedData + } + } + return nil +} + +func (x *RotateCredentialResponse) GetResourceId() *ResourceId { + if x != nil { + return x.xxx_hidden_ResourceId + } + return nil +} + +func (x *RotateCredentialResponse) GetAnnotations() []*anypb.Any { + if x != nil { + if x.xxx_hidden_Annotations != nil { + return *x.xxx_hidden_Annotations + } + } + return nil +} + +func (x *RotateCredentialResponse) SetEncryptedData(v []*EncryptedData) { + x.xxx_hidden_EncryptedData = &v +} + +func (x *RotateCredentialResponse) SetResourceId(v *ResourceId) { + x.xxx_hidden_ResourceId = v +} + +func (x *RotateCredentialResponse) SetAnnotations(v []*anypb.Any) { + x.xxx_hidden_Annotations = &v +} + +func (x *RotateCredentialResponse) HasResourceId() bool { + if x == nil { + return false + } + return x.xxx_hidden_ResourceId != nil +} + +func (x *RotateCredentialResponse) ClearResourceId() { + x.xxx_hidden_ResourceId = nil +} + +type RotateCredentialResponse_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + EncryptedData []*EncryptedData + ResourceId *ResourceId + Annotations []*anypb.Any +} + +func (b0 RotateCredentialResponse_builder) Build() *RotateCredentialResponse { + m0 := &RotateCredentialResponse{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_EncryptedData = &b.EncryptedData + x.xxx_hidden_ResourceId = b.ResourceId + x.xxx_hidden_Annotations = &b.Annotations + return m0 +} + +type AccountInfo struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Emails *[]*AccountInfo_Email `protobuf:"bytes,1,rep,name=emails,proto3"` + xxx_hidden_Login string `protobuf:"bytes,2,opt,name=login,proto3"` + xxx_hidden_LoginAliases []string `protobuf:"bytes,3,rep,name=login_aliases,json=loginAliases,proto3"` + xxx_hidden_Profile *structpb.Struct `protobuf:"bytes,4,opt,name=profile,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *AccountInfo) Reset() { + *x = AccountInfo{} + mi := &file_c1_connector_v2_resource_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *AccountInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AccountInfo) ProtoMessage() {} + +func (x *AccountInfo) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_resource_proto_msgTypes[11] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *AccountInfo) GetEmails() []*AccountInfo_Email { + if x != nil { + if x.xxx_hidden_Emails != nil { + return *x.xxx_hidden_Emails + } + } + return nil +} + +func (x *AccountInfo) GetLogin() string { + if x != nil { + return x.xxx_hidden_Login + } + return "" +} + +func (x *AccountInfo) GetLoginAliases() []string { + if x != nil { + return x.xxx_hidden_LoginAliases + } + return nil +} + +func (x *AccountInfo) GetProfile() *structpb.Struct { + if x != nil { + return x.xxx_hidden_Profile + } + return nil +} + +func (x *AccountInfo) SetEmails(v []*AccountInfo_Email) { + x.xxx_hidden_Emails = &v +} + +func (x *AccountInfo) SetLogin(v string) { + x.xxx_hidden_Login = v +} + +func (x *AccountInfo) SetLoginAliases(v []string) { + x.xxx_hidden_LoginAliases = v +} + +func (x *AccountInfo) SetProfile(v *structpb.Struct) { + x.xxx_hidden_Profile = v +} + +func (x *AccountInfo) HasProfile() bool { + if x == nil { + return false + } + return x.xxx_hidden_Profile != nil +} + +func (x *AccountInfo) ClearProfile() { + x.xxx_hidden_Profile = nil +} + +type AccountInfo_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Emails []*AccountInfo_Email + // The user's login + Login string + // Any additional login aliases for the user + LoginAliases []string + Profile *structpb.Struct +} + +func (b0 AccountInfo_builder) Build() *AccountInfo { + m0 := &AccountInfo{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Emails = &b.Emails + x.xxx_hidden_Login = b.Login + x.xxx_hidden_LoginAliases = b.LoginAliases + x.xxx_hidden_Profile = b.Profile + return m0 +} + +type CredentialOptions struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Options isCredentialOptions_Options `protobuf_oneof:"options"` + xxx_hidden_ForceChangeAtNextLogin bool `protobuf:"varint,1,opt,name=force_change_at_next_login,json=forceChangeAtNextLogin,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *CredentialOptions) Reset() { + *x = CredentialOptions{} + mi := &file_c1_connector_v2_resource_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *CredentialOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CredentialOptions) ProtoMessage() {} + +func (x *CredentialOptions) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_resource_proto_msgTypes[12] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *CredentialOptions) GetRandomPassword() *CredentialOptions_RandomPassword { + if x != nil { + if x, ok := x.xxx_hidden_Options.(*credentialOptions_RandomPassword_); ok { + return x.RandomPassword + } + } + return nil +} + +func (x *CredentialOptions) GetNoPassword() *CredentialOptions_NoPassword { + if x != nil { + if x, ok := x.xxx_hidden_Options.(*credentialOptions_NoPassword_); ok { + return x.NoPassword + } + } + return nil +} + +func (x *CredentialOptions) GetSso() *CredentialOptions_SSO { + if x != nil { + if x, ok := x.xxx_hidden_Options.(*credentialOptions_Sso); ok { + return x.Sso + } + } + return nil +} + +func (x *CredentialOptions) GetEncryptedPassword() *CredentialOptions_EncryptedPassword { + if x != nil { + if x, ok := x.xxx_hidden_Options.(*credentialOptions_EncryptedPassword_); ok { + return x.EncryptedPassword + } + } + return nil +} + +func (x *CredentialOptions) GetForceChangeAtNextLogin() bool { + if x != nil { + return x.xxx_hidden_ForceChangeAtNextLogin + } + return false +} + +func (x *CredentialOptions) SetRandomPassword(v *CredentialOptions_RandomPassword) { + if v == nil { + x.xxx_hidden_Options = nil + return + } + x.xxx_hidden_Options = &credentialOptions_RandomPassword_{v} +} + +func (x *CredentialOptions) SetNoPassword(v *CredentialOptions_NoPassword) { + if v == nil { + x.xxx_hidden_Options = nil + return + } + x.xxx_hidden_Options = &credentialOptions_NoPassword_{v} +} + +func (x *CredentialOptions) SetSso(v *CredentialOptions_SSO) { + if v == nil { + x.xxx_hidden_Options = nil + return + } + x.xxx_hidden_Options = &credentialOptions_Sso{v} +} + +func (x *CredentialOptions) SetEncryptedPassword(v *CredentialOptions_EncryptedPassword) { + if v == nil { + x.xxx_hidden_Options = nil + return + } + x.xxx_hidden_Options = &credentialOptions_EncryptedPassword_{v} +} + +func (x *CredentialOptions) SetForceChangeAtNextLogin(v bool) { + x.xxx_hidden_ForceChangeAtNextLogin = v +} + +func (x *CredentialOptions) HasOptions() bool { + if x == nil { + return false + } + return x.xxx_hidden_Options != nil +} + +func (x *CredentialOptions) HasRandomPassword() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_Options.(*credentialOptions_RandomPassword_) + return ok +} + +func (x *CredentialOptions) HasNoPassword() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_Options.(*credentialOptions_NoPassword_) + return ok +} + +func (x *CredentialOptions) HasSso() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_Options.(*credentialOptions_Sso) + return ok +} + +func (x *CredentialOptions) HasEncryptedPassword() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_Options.(*credentialOptions_EncryptedPassword_) + return ok +} + +func (x *CredentialOptions) ClearOptions() { + x.xxx_hidden_Options = nil +} + +func (x *CredentialOptions) ClearRandomPassword() { + if _, ok := x.xxx_hidden_Options.(*credentialOptions_RandomPassword_); ok { + x.xxx_hidden_Options = nil + } +} + +func (x *CredentialOptions) ClearNoPassword() { + if _, ok := x.xxx_hidden_Options.(*credentialOptions_NoPassword_); ok { + x.xxx_hidden_Options = nil + } +} + +func (x *CredentialOptions) ClearSso() { + if _, ok := x.xxx_hidden_Options.(*credentialOptions_Sso); ok { + x.xxx_hidden_Options = nil + } +} + +func (x *CredentialOptions) ClearEncryptedPassword() { + if _, ok := x.xxx_hidden_Options.(*credentialOptions_EncryptedPassword_); ok { + x.xxx_hidden_Options = nil + } +} + +const CredentialOptions_Options_not_set_case case_CredentialOptions_Options = 0 +const CredentialOptions_RandomPassword_case case_CredentialOptions_Options = 100 +const CredentialOptions_NoPassword_case case_CredentialOptions_Options = 101 +const CredentialOptions_Sso_case case_CredentialOptions_Options = 102 +const CredentialOptions_EncryptedPassword_case case_CredentialOptions_Options = 103 + +func (x *CredentialOptions) WhichOptions() case_CredentialOptions_Options { + if x == nil { + return CredentialOptions_Options_not_set_case + } + switch x.xxx_hidden_Options.(type) { + case *credentialOptions_RandomPassword_: + return CredentialOptions_RandomPassword_case + case *credentialOptions_NoPassword_: + return CredentialOptions_NoPassword_case + case *credentialOptions_Sso: + return CredentialOptions_Sso_case + case *credentialOptions_EncryptedPassword_: + return CredentialOptions_EncryptedPassword_case + default: + return CredentialOptions_Options_not_set_case + } +} + +type CredentialOptions_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // Fields of oneof xxx_hidden_Options: + RandomPassword *CredentialOptions_RandomPassword + NoPassword *CredentialOptions_NoPassword + Sso *CredentialOptions_SSO + EncryptedPassword *CredentialOptions_EncryptedPassword + // -- end of xxx_hidden_Options + ForceChangeAtNextLogin bool +} + +func (b0 CredentialOptions_builder) Build() *CredentialOptions { + m0 := &CredentialOptions{} + b, x := &b0, m0 + _, _ = b, x + if b.RandomPassword != nil { + x.xxx_hidden_Options = &credentialOptions_RandomPassword_{b.RandomPassword} + } + if b.NoPassword != nil { + x.xxx_hidden_Options = &credentialOptions_NoPassword_{b.NoPassword} + } + if b.Sso != nil { + x.xxx_hidden_Options = &credentialOptions_Sso{b.Sso} + } + if b.EncryptedPassword != nil { + x.xxx_hidden_Options = &credentialOptions_EncryptedPassword_{b.EncryptedPassword} + } + x.xxx_hidden_ForceChangeAtNextLogin = b.ForceChangeAtNextLogin + return m0 +} + +type case_CredentialOptions_Options protoreflect.FieldNumber + +func (x case_CredentialOptions_Options) String() string { + md := file_c1_connector_v2_resource_proto_msgTypes[12].Descriptor() + if x == 0 { + return "not set" + } + return protoimpl.X.MessageFieldStringOf(md, protoreflect.FieldNumber(x)) +} + +type isCredentialOptions_Options interface { + isCredentialOptions_Options() +} + +type credentialOptions_RandomPassword_ struct { + RandomPassword *CredentialOptions_RandomPassword `protobuf:"bytes,100,opt,name=random_password,json=randomPassword,proto3,oneof"` +} + +type credentialOptions_NoPassword_ struct { + NoPassword *CredentialOptions_NoPassword `protobuf:"bytes,101,opt,name=no_password,json=noPassword,proto3,oneof"` +} + +type credentialOptions_Sso struct { + Sso *CredentialOptions_SSO `protobuf:"bytes,102,opt,name=sso,proto3,oneof"` +} + +type credentialOptions_EncryptedPassword_ struct { + EncryptedPassword *CredentialOptions_EncryptedPassword `protobuf:"bytes,103,opt,name=encrypted_password,json=encryptedPassword,proto3,oneof"` +} + +func (*credentialOptions_RandomPassword_) isCredentialOptions_Options() {} + +func (*credentialOptions_NoPassword_) isCredentialOptions_Options() {} + +func (*credentialOptions_Sso) isCredentialOptions_Options() {} + +func (*credentialOptions_EncryptedPassword_) isCredentialOptions_Options() {} + +// Do not use this in any RPC or any message that is in an RPC. +type LocalCredentialOptions struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Options isLocalCredentialOptions_Options `protobuf_oneof:"options"` + xxx_hidden_ForceChangeAtNextLogin bool `protobuf:"varint,1,opt,name=force_change_at_next_login,json=forceChangeAtNextLogin,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *LocalCredentialOptions) Reset() { + *x = LocalCredentialOptions{} + mi := &file_c1_connector_v2_resource_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *LocalCredentialOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LocalCredentialOptions) ProtoMessage() {} + +func (x *LocalCredentialOptions) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_resource_proto_msgTypes[13] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *LocalCredentialOptions) GetRandomPassword() *LocalCredentialOptions_RandomPassword { + if x != nil { + if x, ok := x.xxx_hidden_Options.(*localCredentialOptions_RandomPassword_); ok { + return x.RandomPassword + } + } + return nil +} + +func (x *LocalCredentialOptions) GetNoPassword() *LocalCredentialOptions_NoPassword { + if x != nil { + if x, ok := x.xxx_hidden_Options.(*localCredentialOptions_NoPassword_); ok { + return x.NoPassword + } + } + return nil +} + +func (x *LocalCredentialOptions) GetSso() *LocalCredentialOptions_SSO { + if x != nil { + if x, ok := x.xxx_hidden_Options.(*localCredentialOptions_Sso); ok { + return x.Sso + } + } + return nil +} + +func (x *LocalCredentialOptions) GetPlaintextPassword() *LocalCredentialOptions_PlaintextPassword { + if x != nil { + if x, ok := x.xxx_hidden_Options.(*localCredentialOptions_PlaintextPassword_); ok { + return x.PlaintextPassword + } + } + return nil +} + +func (x *LocalCredentialOptions) GetForceChangeAtNextLogin() bool { + if x != nil { + return x.xxx_hidden_ForceChangeAtNextLogin + } + return false +} + +func (x *LocalCredentialOptions) SetRandomPassword(v *LocalCredentialOptions_RandomPassword) { + if v == nil { + x.xxx_hidden_Options = nil + return + } + x.xxx_hidden_Options = &localCredentialOptions_RandomPassword_{v} +} + +func (x *LocalCredentialOptions) SetNoPassword(v *LocalCredentialOptions_NoPassword) { + if v == nil { + x.xxx_hidden_Options = nil + return + } + x.xxx_hidden_Options = &localCredentialOptions_NoPassword_{v} +} + +func (x *LocalCredentialOptions) SetSso(v *LocalCredentialOptions_SSO) { + if v == nil { + x.xxx_hidden_Options = nil + return + } + x.xxx_hidden_Options = &localCredentialOptions_Sso{v} +} + +func (x *LocalCredentialOptions) SetPlaintextPassword(v *LocalCredentialOptions_PlaintextPassword) { + if v == nil { + x.xxx_hidden_Options = nil + return + } + x.xxx_hidden_Options = &localCredentialOptions_PlaintextPassword_{v} +} + +func (x *LocalCredentialOptions) SetForceChangeAtNextLogin(v bool) { + x.xxx_hidden_ForceChangeAtNextLogin = v +} + +func (x *LocalCredentialOptions) HasOptions() bool { + if x == nil { + return false + } + return x.xxx_hidden_Options != nil +} + +func (x *LocalCredentialOptions) HasRandomPassword() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_Options.(*localCredentialOptions_RandomPassword_) + return ok +} + +func (x *LocalCredentialOptions) HasNoPassword() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_Options.(*localCredentialOptions_NoPassword_) + return ok +} + +func (x *LocalCredentialOptions) HasSso() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_Options.(*localCredentialOptions_Sso) + return ok +} + +func (x *LocalCredentialOptions) HasPlaintextPassword() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_Options.(*localCredentialOptions_PlaintextPassword_) + return ok +} + +func (x *LocalCredentialOptions) ClearOptions() { + x.xxx_hidden_Options = nil +} + +func (x *LocalCredentialOptions) ClearRandomPassword() { + if _, ok := x.xxx_hidden_Options.(*localCredentialOptions_RandomPassword_); ok { + x.xxx_hidden_Options = nil + } +} + +func (x *LocalCredentialOptions) ClearNoPassword() { + if _, ok := x.xxx_hidden_Options.(*localCredentialOptions_NoPassword_); ok { + x.xxx_hidden_Options = nil + } +} + +func (x *LocalCredentialOptions) ClearSso() { + if _, ok := x.xxx_hidden_Options.(*localCredentialOptions_Sso); ok { + x.xxx_hidden_Options = nil + } +} + +func (x *LocalCredentialOptions) ClearPlaintextPassword() { + if _, ok := x.xxx_hidden_Options.(*localCredentialOptions_PlaintextPassword_); ok { + x.xxx_hidden_Options = nil + } +} + +const LocalCredentialOptions_Options_not_set_case case_LocalCredentialOptions_Options = 0 +const LocalCredentialOptions_RandomPassword_case case_LocalCredentialOptions_Options = 100 +const LocalCredentialOptions_NoPassword_case case_LocalCredentialOptions_Options = 101 +const LocalCredentialOptions_Sso_case case_LocalCredentialOptions_Options = 102 +const LocalCredentialOptions_PlaintextPassword_case case_LocalCredentialOptions_Options = 103 + +func (x *LocalCredentialOptions) WhichOptions() case_LocalCredentialOptions_Options { + if x == nil { + return LocalCredentialOptions_Options_not_set_case + } + switch x.xxx_hidden_Options.(type) { + case *localCredentialOptions_RandomPassword_: + return LocalCredentialOptions_RandomPassword_case + case *localCredentialOptions_NoPassword_: + return LocalCredentialOptions_NoPassword_case + case *localCredentialOptions_Sso: + return LocalCredentialOptions_Sso_case + case *localCredentialOptions_PlaintextPassword_: + return LocalCredentialOptions_PlaintextPassword_case + default: + return LocalCredentialOptions_Options_not_set_case + } +} + +type LocalCredentialOptions_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // Fields of oneof xxx_hidden_Options: + RandomPassword *LocalCredentialOptions_RandomPassword + NoPassword *LocalCredentialOptions_NoPassword + Sso *LocalCredentialOptions_SSO + PlaintextPassword *LocalCredentialOptions_PlaintextPassword + // -- end of xxx_hidden_Options + ForceChangeAtNextLogin bool +} + +func (b0 LocalCredentialOptions_builder) Build() *LocalCredentialOptions { + m0 := &LocalCredentialOptions{} + b, x := &b0, m0 + _, _ = b, x + if b.RandomPassword != nil { + x.xxx_hidden_Options = &localCredentialOptions_RandomPassword_{b.RandomPassword} + } + if b.NoPassword != nil { + x.xxx_hidden_Options = &localCredentialOptions_NoPassword_{b.NoPassword} + } + if b.Sso != nil { + x.xxx_hidden_Options = &localCredentialOptions_Sso{b.Sso} + } + if b.PlaintextPassword != nil { + x.xxx_hidden_Options = &localCredentialOptions_PlaintextPassword_{b.PlaintextPassword} + } + x.xxx_hidden_ForceChangeAtNextLogin = b.ForceChangeAtNextLogin + return m0 +} + +type case_LocalCredentialOptions_Options protoreflect.FieldNumber + +func (x case_LocalCredentialOptions_Options) String() string { + md := file_c1_connector_v2_resource_proto_msgTypes[13].Descriptor() + if x == 0 { + return "not set" + } + return protoimpl.X.MessageFieldStringOf(md, protoreflect.FieldNumber(x)) +} + +type isLocalCredentialOptions_Options interface { + isLocalCredentialOptions_Options() +} + +type localCredentialOptions_RandomPassword_ struct { + RandomPassword *LocalCredentialOptions_RandomPassword `protobuf:"bytes,100,opt,name=random_password,json=randomPassword,proto3,oneof"` +} + +type localCredentialOptions_NoPassword_ struct { + NoPassword *LocalCredentialOptions_NoPassword `protobuf:"bytes,101,opt,name=no_password,json=noPassword,proto3,oneof"` +} + +type localCredentialOptions_Sso struct { + Sso *LocalCredentialOptions_SSO `protobuf:"bytes,102,opt,name=sso,proto3,oneof"` +} + +type localCredentialOptions_PlaintextPassword_ struct { + PlaintextPassword *LocalCredentialOptions_PlaintextPassword `protobuf:"bytes,103,opt,name=plaintext_password,json=plaintextPassword,proto3,oneof"` +} + +func (*localCredentialOptions_RandomPassword_) isLocalCredentialOptions_Options() {} + +func (*localCredentialOptions_NoPassword_) isLocalCredentialOptions_Options() {} + +func (*localCredentialOptions_Sso) isLocalCredentialOptions_Options() {} + +func (*localCredentialOptions_PlaintextPassword_) isLocalCredentialOptions_Options() {} + +type PasswordConstraint struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_CharSet string `protobuf:"bytes,1,opt,name=char_set,json=charSet,proto3"` + xxx_hidden_MinCount uint32 `protobuf:"varint,2,opt,name=min_count,json=minCount,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *PasswordConstraint) Reset() { + *x = PasswordConstraint{} + mi := &file_c1_connector_v2_resource_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *PasswordConstraint) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PasswordConstraint) ProtoMessage() {} + +func (x *PasswordConstraint) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_resource_proto_msgTypes[14] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *PasswordConstraint) GetCharSet() string { + if x != nil { + return x.xxx_hidden_CharSet + } + return "" +} + +func (x *PasswordConstraint) GetMinCount() uint32 { + if x != nil { + return x.xxx_hidden_MinCount + } + return 0 +} + +func (x *PasswordConstraint) SetCharSet(v string) { + x.xxx_hidden_CharSet = v +} + +func (x *PasswordConstraint) SetMinCount(v uint32) { + x.xxx_hidden_MinCount = v +} + +type PasswordConstraint_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + CharSet string + MinCount uint32 +} + +func (b0 PasswordConstraint_builder) Build() *PasswordConstraint { + m0 := &PasswordConstraint{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_CharSet = b.CharSet + x.xxx_hidden_MinCount = b.MinCount + return m0 +} + +type CreateAccountRequest struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_AccountInfo *AccountInfo `protobuf:"bytes,1,opt,name=account_info,json=accountInfo,proto3"` + xxx_hidden_CredentialOptions *CredentialOptions `protobuf:"bytes,2,opt,name=credential_options,json=credentialOptions,proto3"` + xxx_hidden_EncryptionConfigs *[]*EncryptionConfig `protobuf:"bytes,3,rep,name=encryption_configs,json=encryptionConfigs,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *CreateAccountRequest) Reset() { + *x = CreateAccountRequest{} + mi := &file_c1_connector_v2_resource_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *CreateAccountRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateAccountRequest) ProtoMessage() {} + +func (x *CreateAccountRequest) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_resource_proto_msgTypes[15] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *CreateAccountRequest) GetAccountInfo() *AccountInfo { + if x != nil { + return x.xxx_hidden_AccountInfo + } + return nil +} + +func (x *CreateAccountRequest) GetCredentialOptions() *CredentialOptions { + if x != nil { + return x.xxx_hidden_CredentialOptions + } + return nil +} + +func (x *CreateAccountRequest) GetEncryptionConfigs() []*EncryptionConfig { + if x != nil { + if x.xxx_hidden_EncryptionConfigs != nil { + return *x.xxx_hidden_EncryptionConfigs + } + } + return nil +} + +func (x *CreateAccountRequest) SetAccountInfo(v *AccountInfo) { + x.xxx_hidden_AccountInfo = v +} + +func (x *CreateAccountRequest) SetCredentialOptions(v *CredentialOptions) { + x.xxx_hidden_CredentialOptions = v +} + +func (x *CreateAccountRequest) SetEncryptionConfigs(v []*EncryptionConfig) { + x.xxx_hidden_EncryptionConfigs = &v +} + +func (x *CreateAccountRequest) HasAccountInfo() bool { + if x == nil { + return false + } + return x.xxx_hidden_AccountInfo != nil +} + +func (x *CreateAccountRequest) HasCredentialOptions() bool { + if x == nil { + return false + } + return x.xxx_hidden_CredentialOptions != nil +} + +func (x *CreateAccountRequest) ClearAccountInfo() { + x.xxx_hidden_AccountInfo = nil +} + +func (x *CreateAccountRequest) ClearCredentialOptions() { + x.xxx_hidden_CredentialOptions = nil +} + +type CreateAccountRequest_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + AccountInfo *AccountInfo + CredentialOptions *CredentialOptions + EncryptionConfigs []*EncryptionConfig +} + +func (b0 CreateAccountRequest_builder) Build() *CreateAccountRequest { + m0 := &CreateAccountRequest{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_AccountInfo = b.AccountInfo + x.xxx_hidden_CredentialOptions = b.CredentialOptions + x.xxx_hidden_EncryptionConfigs = &b.EncryptionConfigs + return m0 +} + +type CreateAccountResponse struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Result isCreateAccountResponse_Result `protobuf_oneof:"result"` + xxx_hidden_EncryptedData *[]*EncryptedData `protobuf:"bytes,2,rep,name=encrypted_data,json=encryptedData,proto3"` + xxx_hidden_Annotations *[]*anypb.Any `protobuf:"bytes,3,rep,name=annotations,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *CreateAccountResponse) Reset() { + *x = CreateAccountResponse{} + mi := &file_c1_connector_v2_resource_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *CreateAccountResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateAccountResponse) ProtoMessage() {} + +func (x *CreateAccountResponse) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_resource_proto_msgTypes[16] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *CreateAccountResponse) GetSuccess() *CreateAccountResponse_SuccessResult { + if x != nil { + if x, ok := x.xxx_hidden_Result.(*createAccountResponse_Success); ok { + return x.Success + } + } + return nil +} + +func (x *CreateAccountResponse) GetActionRequired() *CreateAccountResponse_ActionRequiredResult { + if x != nil { + if x, ok := x.xxx_hidden_Result.(*createAccountResponse_ActionRequired); ok { + return x.ActionRequired + } + } + return nil +} + +func (x *CreateAccountResponse) GetAlreadyExists() *CreateAccountResponse_AlreadyExistsResult { + if x != nil { + if x, ok := x.xxx_hidden_Result.(*createAccountResponse_AlreadyExists); ok { + return x.AlreadyExists + } + } + return nil +} + +func (x *CreateAccountResponse) GetInProgress() *CreateAccountResponse_InProgressResult { + if x != nil { + if x, ok := x.xxx_hidden_Result.(*createAccountResponse_InProgress); ok { + return x.InProgress + } + } + return nil +} + +func (x *CreateAccountResponse) GetEncryptedData() []*EncryptedData { + if x != nil { + if x.xxx_hidden_EncryptedData != nil { + return *x.xxx_hidden_EncryptedData + } + } + return nil +} + +func (x *CreateAccountResponse) GetAnnotations() []*anypb.Any { + if x != nil { + if x.xxx_hidden_Annotations != nil { + return *x.xxx_hidden_Annotations + } + } + return nil +} + +func (x *CreateAccountResponse) SetSuccess(v *CreateAccountResponse_SuccessResult) { + if v == nil { + x.xxx_hidden_Result = nil + return + } + x.xxx_hidden_Result = &createAccountResponse_Success{v} +} + +func (x *CreateAccountResponse) SetActionRequired(v *CreateAccountResponse_ActionRequiredResult) { + if v == nil { + x.xxx_hidden_Result = nil + return + } + x.xxx_hidden_Result = &createAccountResponse_ActionRequired{v} +} + +func (x *CreateAccountResponse) SetAlreadyExists(v *CreateAccountResponse_AlreadyExistsResult) { + if v == nil { + x.xxx_hidden_Result = nil + return + } + x.xxx_hidden_Result = &createAccountResponse_AlreadyExists{v} +} + +func (x *CreateAccountResponse) SetInProgress(v *CreateAccountResponse_InProgressResult) { + if v == nil { + x.xxx_hidden_Result = nil + return + } + x.xxx_hidden_Result = &createAccountResponse_InProgress{v} +} + +func (x *CreateAccountResponse) SetEncryptedData(v []*EncryptedData) { + x.xxx_hidden_EncryptedData = &v +} + +func (x *CreateAccountResponse) SetAnnotations(v []*anypb.Any) { + x.xxx_hidden_Annotations = &v +} + +func (x *CreateAccountResponse) HasResult() bool { + if x == nil { + return false + } + return x.xxx_hidden_Result != nil +} + +func (x *CreateAccountResponse) HasSuccess() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_Result.(*createAccountResponse_Success) + return ok +} + +func (x *CreateAccountResponse) HasActionRequired() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_Result.(*createAccountResponse_ActionRequired) + return ok +} + +func (x *CreateAccountResponse) HasAlreadyExists() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_Result.(*createAccountResponse_AlreadyExists) + return ok +} + +func (x *CreateAccountResponse) HasInProgress() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_Result.(*createAccountResponse_InProgress) + return ok +} + +func (x *CreateAccountResponse) ClearResult() { + x.xxx_hidden_Result = nil +} + +func (x *CreateAccountResponse) ClearSuccess() { + if _, ok := x.xxx_hidden_Result.(*createAccountResponse_Success); ok { + x.xxx_hidden_Result = nil + } +} + +func (x *CreateAccountResponse) ClearActionRequired() { + if _, ok := x.xxx_hidden_Result.(*createAccountResponse_ActionRequired); ok { + x.xxx_hidden_Result = nil + } +} + +func (x *CreateAccountResponse) ClearAlreadyExists() { + if _, ok := x.xxx_hidden_Result.(*createAccountResponse_AlreadyExists); ok { + x.xxx_hidden_Result = nil + } +} + +func (x *CreateAccountResponse) ClearInProgress() { + if _, ok := x.xxx_hidden_Result.(*createAccountResponse_InProgress); ok { + x.xxx_hidden_Result = nil + } +} + +const CreateAccountResponse_Result_not_set_case case_CreateAccountResponse_Result = 0 +const CreateAccountResponse_Success_case case_CreateAccountResponse_Result = 100 +const CreateAccountResponse_ActionRequired_case case_CreateAccountResponse_Result = 101 +const CreateAccountResponse_AlreadyExists_case case_CreateAccountResponse_Result = 102 +const CreateAccountResponse_InProgress_case case_CreateAccountResponse_Result = 103 + +func (x *CreateAccountResponse) WhichResult() case_CreateAccountResponse_Result { + if x == nil { + return CreateAccountResponse_Result_not_set_case + } + switch x.xxx_hidden_Result.(type) { + case *createAccountResponse_Success: + return CreateAccountResponse_Success_case + case *createAccountResponse_ActionRequired: + return CreateAccountResponse_ActionRequired_case + case *createAccountResponse_AlreadyExists: + return CreateAccountResponse_AlreadyExists_case + case *createAccountResponse_InProgress: + return CreateAccountResponse_InProgress_case + default: + return CreateAccountResponse_Result_not_set_case + } +} + +type CreateAccountResponse_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // Fields of oneof xxx_hidden_Result: + Success *CreateAccountResponse_SuccessResult + ActionRequired *CreateAccountResponse_ActionRequiredResult + AlreadyExists *CreateAccountResponse_AlreadyExistsResult + InProgress *CreateAccountResponse_InProgressResult + // -- end of xxx_hidden_Result + EncryptedData []*EncryptedData + Annotations []*anypb.Any +} + +func (b0 CreateAccountResponse_builder) Build() *CreateAccountResponse { + m0 := &CreateAccountResponse{} + b, x := &b0, m0 + _, _ = b, x + if b.Success != nil { + x.xxx_hidden_Result = &createAccountResponse_Success{b.Success} + } + if b.ActionRequired != nil { + x.xxx_hidden_Result = &createAccountResponse_ActionRequired{b.ActionRequired} + } + if b.AlreadyExists != nil { + x.xxx_hidden_Result = &createAccountResponse_AlreadyExists{b.AlreadyExists} + } + if b.InProgress != nil { + x.xxx_hidden_Result = &createAccountResponse_InProgress{b.InProgress} + } + x.xxx_hidden_EncryptedData = &b.EncryptedData + x.xxx_hidden_Annotations = &b.Annotations + return m0 +} + +type case_CreateAccountResponse_Result protoreflect.FieldNumber + +func (x case_CreateAccountResponse_Result) String() string { + md := file_c1_connector_v2_resource_proto_msgTypes[16].Descriptor() + if x == 0 { + return "not set" + } + return protoimpl.X.MessageFieldStringOf(md, protoreflect.FieldNumber(x)) +} + +type isCreateAccountResponse_Result interface { + isCreateAccountResponse_Result() +} + +type createAccountResponse_Success struct { + Success *CreateAccountResponse_SuccessResult `protobuf:"bytes,100,opt,name=success,proto3,oneof"` +} + +type createAccountResponse_ActionRequired struct { + ActionRequired *CreateAccountResponse_ActionRequiredResult `protobuf:"bytes,101,opt,name=action_required,json=actionRequired,proto3,oneof"` +} + +type createAccountResponse_AlreadyExists struct { + AlreadyExists *CreateAccountResponse_AlreadyExistsResult `protobuf:"bytes,102,opt,name=already_exists,json=alreadyExists,proto3,oneof"` +} + +type createAccountResponse_InProgress struct { + InProgress *CreateAccountResponse_InProgressResult `protobuf:"bytes,103,opt,name=in_progress,json=inProgress,proto3,oneof"` +} + +func (*createAccountResponse_Success) isCreateAccountResponse_Result() {} + +func (*createAccountResponse_ActionRequired) isCreateAccountResponse_Result() {} + +func (*createAccountResponse_AlreadyExists) isCreateAccountResponse_Result() {} + +func (*createAccountResponse_InProgress) isCreateAccountResponse_Result() {} + +type EncryptedData struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Provider string `protobuf:"bytes,1,opt,name=provider,proto3"` + xxx_hidden_KeyId string `protobuf:"bytes,2,opt,name=key_id,json=keyId,proto3"` + xxx_hidden_Name string `protobuf:"bytes,3,opt,name=name,proto3"` + xxx_hidden_Description string `protobuf:"bytes,4,opt,name=description,proto3"` + xxx_hidden_Schema string `protobuf:"bytes,5,opt,name=schema,proto3"` + xxx_hidden_EncryptedBytes []byte `protobuf:"bytes,6,opt,name=encrypted_bytes,json=encryptedBytes,proto3"` + xxx_hidden_KeyIds []string `protobuf:"bytes,7,rep,name=key_ids,json=keyIds,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *EncryptedData) Reset() { + *x = EncryptedData{} + mi := &file_c1_connector_v2_resource_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *EncryptedData) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EncryptedData) ProtoMessage() {} + +func (x *EncryptedData) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_resource_proto_msgTypes[17] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *EncryptedData) GetProvider() string { + if x != nil { + return x.xxx_hidden_Provider + } + return "" +} + +// Deprecated: Marked as deprecated in c1/connector/v2/resource.proto. +func (x *EncryptedData) GetKeyId() string { + if x != nil { + return x.xxx_hidden_KeyId + } + return "" +} + +func (x *EncryptedData) GetName() string { + if x != nil { + return x.xxx_hidden_Name + } + return "" +} + +func (x *EncryptedData) GetDescription() string { + if x != nil { + return x.xxx_hidden_Description + } + return "" +} + +func (x *EncryptedData) GetSchema() string { + if x != nil { + return x.xxx_hidden_Schema + } + return "" +} + +func (x *EncryptedData) GetEncryptedBytes() []byte { + if x != nil { + return x.xxx_hidden_EncryptedBytes + } + return nil +} + +func (x *EncryptedData) GetKeyIds() []string { + if x != nil { + return x.xxx_hidden_KeyIds + } + return nil +} + +func (x *EncryptedData) SetProvider(v string) { + x.xxx_hidden_Provider = v +} + +// Deprecated: Marked as deprecated in c1/connector/v2/resource.proto. +func (x *EncryptedData) SetKeyId(v string) { + x.xxx_hidden_KeyId = v +} + +func (x *EncryptedData) SetName(v string) { + x.xxx_hidden_Name = v +} + +func (x *EncryptedData) SetDescription(v string) { + x.xxx_hidden_Description = v +} + +func (x *EncryptedData) SetSchema(v string) { + x.xxx_hidden_Schema = v +} + +func (x *EncryptedData) SetEncryptedBytes(v []byte) { + if v == nil { + v = []byte{} + } + x.xxx_hidden_EncryptedBytes = v +} + +func (x *EncryptedData) SetKeyIds(v []string) { + x.xxx_hidden_KeyIds = v +} + +type EncryptedData_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Provider string + // Deprecated: Marked as deprecated in c1/connector/v2/resource.proto. + KeyId string + Name string + Description string + Schema string + EncryptedBytes []byte + KeyIds []string +} + +func (b0 EncryptedData_builder) Build() *EncryptedData { + m0 := &EncryptedData{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Provider = b.Provider + x.xxx_hidden_KeyId = b.KeyId + x.xxx_hidden_Name = b.Name + x.xxx_hidden_Description = b.Description + x.xxx_hidden_Schema = b.Schema + x.xxx_hidden_EncryptedBytes = b.EncryptedBytes + x.xxx_hidden_KeyIds = b.KeyIds + return m0 +} + +type PlaintextData struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Name string `protobuf:"bytes,1,opt,name=name,proto3"` + xxx_hidden_Description string `protobuf:"bytes,2,opt,name=description,proto3"` + xxx_hidden_Schema string `protobuf:"bytes,3,opt,name=schema,proto3"` + xxx_hidden_Bytes []byte `protobuf:"bytes,4,opt,name=bytes,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *PlaintextData) Reset() { + *x = PlaintextData{} + mi := &file_c1_connector_v2_resource_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *PlaintextData) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PlaintextData) ProtoMessage() {} + +func (x *PlaintextData) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_resource_proto_msgTypes[18] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *PlaintextData) GetName() string { + if x != nil { + return x.xxx_hidden_Name + } + return "" +} + +func (x *PlaintextData) GetDescription() string { + if x != nil { + return x.xxx_hidden_Description + } + return "" +} + +func (x *PlaintextData) GetSchema() string { + if x != nil { + return x.xxx_hidden_Schema + } + return "" +} + +func (x *PlaintextData) GetBytes() []byte { + if x != nil { + return x.xxx_hidden_Bytes + } + return nil +} + +func (x *PlaintextData) SetName(v string) { + x.xxx_hidden_Name = v +} + +func (x *PlaintextData) SetDescription(v string) { + x.xxx_hidden_Description = v +} + +func (x *PlaintextData) SetSchema(v string) { + x.xxx_hidden_Schema = v +} + +func (x *PlaintextData) SetBytes(v []byte) { + if v == nil { + v = []byte{} + } + x.xxx_hidden_Bytes = v +} + +type PlaintextData_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Name string + Description string + Schema string + Bytes []byte +} + +func (b0 PlaintextData_builder) Build() *PlaintextData { + m0 := &PlaintextData{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Name = b.Name + x.xxx_hidden_Description = b.Description + x.xxx_hidden_Schema = b.Schema + x.xxx_hidden_Bytes = b.Bytes + return m0 +} + +type EncryptionConfig struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Principal *Resource `protobuf:"bytes,1,opt,name=principal,proto3"` + xxx_hidden_Provider string `protobuf:"bytes,2,opt,name=provider,proto3"` + xxx_hidden_KeyId string `protobuf:"bytes,3,opt,name=key_id,json=keyId,proto3"` + xxx_hidden_Config isEncryptionConfig_Config `protobuf_oneof:"config"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *EncryptionConfig) Reset() { + *x = EncryptionConfig{} + mi := &file_c1_connector_v2_resource_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *EncryptionConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EncryptionConfig) ProtoMessage() {} + +func (x *EncryptionConfig) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_resource_proto_msgTypes[19] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *EncryptionConfig) GetPrincipal() *Resource { + if x != nil { + return x.xxx_hidden_Principal + } + return nil +} + +func (x *EncryptionConfig) GetProvider() string { + if x != nil { + return x.xxx_hidden_Provider + } + return "" +} + +func (x *EncryptionConfig) GetKeyId() string { + if x != nil { + return x.xxx_hidden_KeyId + } + return "" +} + +func (x *EncryptionConfig) GetJwkPublicKeyConfig() *EncryptionConfig_JWKPublicKeyConfig { + if x != nil { + if x, ok := x.xxx_hidden_Config.(*encryptionConfig_JwkPublicKeyConfig); ok { + return x.JwkPublicKeyConfig + } + } + return nil +} + +func (x *EncryptionConfig) SetPrincipal(v *Resource) { + x.xxx_hidden_Principal = v +} + +func (x *EncryptionConfig) SetProvider(v string) { + x.xxx_hidden_Provider = v +} + +func (x *EncryptionConfig) SetKeyId(v string) { + x.xxx_hidden_KeyId = v +} + +func (x *EncryptionConfig) SetJwkPublicKeyConfig(v *EncryptionConfig_JWKPublicKeyConfig) { + if v == nil { + x.xxx_hidden_Config = nil + return + } + x.xxx_hidden_Config = &encryptionConfig_JwkPublicKeyConfig{v} +} + +func (x *EncryptionConfig) HasPrincipal() bool { + if x == nil { + return false + } + return x.xxx_hidden_Principal != nil +} + +func (x *EncryptionConfig) HasConfig() bool { + if x == nil { + return false + } + return x.xxx_hidden_Config != nil +} + +func (x *EncryptionConfig) HasJwkPublicKeyConfig() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_Config.(*encryptionConfig_JwkPublicKeyConfig) + return ok +} + +func (x *EncryptionConfig) ClearPrincipal() { + x.xxx_hidden_Principal = nil +} + +func (x *EncryptionConfig) ClearConfig() { + x.xxx_hidden_Config = nil +} + +func (x *EncryptionConfig) ClearJwkPublicKeyConfig() { + if _, ok := x.xxx_hidden_Config.(*encryptionConfig_JwkPublicKeyConfig); ok { + x.xxx_hidden_Config = nil + } +} + +const EncryptionConfig_Config_not_set_case case_EncryptionConfig_Config = 0 +const EncryptionConfig_JwkPublicKeyConfig_case case_EncryptionConfig_Config = 100 + +func (x *EncryptionConfig) WhichConfig() case_EncryptionConfig_Config { + if x == nil { + return EncryptionConfig_Config_not_set_case + } + switch x.xxx_hidden_Config.(type) { + case *encryptionConfig_JwkPublicKeyConfig: + return EncryptionConfig_JwkPublicKeyConfig_case + default: + return EncryptionConfig_Config_not_set_case + } +} + +type EncryptionConfig_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Principal *Resource + Provider string + KeyId string + // Fields of oneof xxx_hidden_Config: + JwkPublicKeyConfig *EncryptionConfig_JWKPublicKeyConfig + // -- end of xxx_hidden_Config +} + +func (b0 EncryptionConfig_builder) Build() *EncryptionConfig { + m0 := &EncryptionConfig{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Principal = b.Principal + x.xxx_hidden_Provider = b.Provider + x.xxx_hidden_KeyId = b.KeyId + if b.JwkPublicKeyConfig != nil { + x.xxx_hidden_Config = &encryptionConfig_JwkPublicKeyConfig{b.JwkPublicKeyConfig} + } + return m0 +} + +type case_EncryptionConfig_Config protoreflect.FieldNumber + +func (x case_EncryptionConfig_Config) String() string { + md := file_c1_connector_v2_resource_proto_msgTypes[19].Descriptor() + if x == 0 { + return "not set" + } + return protoimpl.X.MessageFieldStringOf(md, protoreflect.FieldNumber(x)) +} + +type isEncryptionConfig_Config interface { + isEncryptionConfig_Config() +} + +type encryptionConfig_JwkPublicKeyConfig struct { + JwkPublicKeyConfig *EncryptionConfig_JWKPublicKeyConfig `protobuf:"bytes,100,opt,name=jwk_public_key_config,json=jwkPublicKeyConfig,proto3,oneof"` +} + +func (*encryptionConfig_JwkPublicKeyConfig) isEncryptionConfig_Config() {} + +type ResourceId struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_ResourceType string `protobuf:"bytes,1,opt,name=resource_type,json=resourceType,proto3"` + xxx_hidden_Resource string `protobuf:"bytes,2,opt,name=resource,proto3"` + xxx_hidden_BatonResource bool `protobuf:"varint,3,opt,name=baton_resource,json=batonResource,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ResourceId) Reset() { + *x = ResourceId{} + mi := &file_c1_connector_v2_resource_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ResourceId) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResourceId) ProtoMessage() {} + +func (x *ResourceId) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_resource_proto_msgTypes[20] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *ResourceId) GetResourceType() string { + if x != nil { + return x.xxx_hidden_ResourceType + } + return "" +} + +func (x *ResourceId) GetResource() string { + if x != nil { + return x.xxx_hidden_Resource + } + return "" +} + +func (x *ResourceId) GetBatonResource() bool { + if x != nil { + return x.xxx_hidden_BatonResource + } + return false +} + +func (x *ResourceId) SetResourceType(v string) { + x.xxx_hidden_ResourceType = v +} + +func (x *ResourceId) SetResource(v string) { + x.xxx_hidden_Resource = v +} + +func (x *ResourceId) SetBatonResource(v bool) { + x.xxx_hidden_BatonResource = v +} + +type ResourceId_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + ResourceType string + Resource string + BatonResource bool +} + +func (b0 ResourceId_builder) Build() *ResourceId { + m0 := &ResourceId{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_ResourceType = b.ResourceType + x.xxx_hidden_Resource = b.Resource + x.xxx_hidden_BatonResource = b.BatonResource + return m0 +} + +type Resource struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Id *ResourceId `protobuf:"bytes,1,opt,name=id,proto3"` + xxx_hidden_ParentResourceId *ResourceId `protobuf:"bytes,2,opt,name=parent_resource_id,json=parentResourceId,proto3"` + xxx_hidden_DisplayName string `protobuf:"bytes,3,opt,name=display_name,json=displayName,proto3"` + xxx_hidden_Annotations *[]*anypb.Any `protobuf:"bytes,4,rep,name=annotations,proto3"` + xxx_hidden_Description string `protobuf:"bytes,5,opt,name=description,proto3"` + xxx_hidden_BatonResource bool `protobuf:"varint,6,opt,name=baton_resource,json=batonResource,proto3"` + xxx_hidden_ExternalId *ExternalId `protobuf:"bytes,7,opt,name=external_id,json=externalId,proto3"` + xxx_hidden_CreationSource Resource_CreationSource `protobuf:"varint,8,opt,name=creation_source,json=creationSource,proto3,enum=c1.connector.v2.Resource_CreationSource"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Resource) Reset() { + *x = Resource{} + mi := &file_c1_connector_v2_resource_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Resource) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Resource) ProtoMessage() {} + +func (x *Resource) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_resource_proto_msgTypes[21] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *Resource) GetId() *ResourceId { + if x != nil { + return x.xxx_hidden_Id + } + return nil +} + +func (x *Resource) GetParentResourceId() *ResourceId { + if x != nil { + return x.xxx_hidden_ParentResourceId + } + return nil +} + +func (x *Resource) GetDisplayName() string { + if x != nil { + return x.xxx_hidden_DisplayName + } + return "" +} + +func (x *Resource) GetAnnotations() []*anypb.Any { + if x != nil { + if x.xxx_hidden_Annotations != nil { + return *x.xxx_hidden_Annotations + } + } + return nil +} + +func (x *Resource) GetDescription() string { + if x != nil { + return x.xxx_hidden_Description + } + return "" +} + +func (x *Resource) GetBatonResource() bool { + if x != nil { + return x.xxx_hidden_BatonResource + } + return false +} + +func (x *Resource) GetExternalId() *ExternalId { + if x != nil { + return x.xxx_hidden_ExternalId + } + return nil +} + +func (x *Resource) GetCreationSource() Resource_CreationSource { + if x != nil { + return x.xxx_hidden_CreationSource + } + return Resource_CREATION_SOURCE_UNSPECIFIED +} + +func (x *Resource) SetId(v *ResourceId) { + x.xxx_hidden_Id = v +} + +func (x *Resource) SetParentResourceId(v *ResourceId) { + x.xxx_hidden_ParentResourceId = v +} + +func (x *Resource) SetDisplayName(v string) { + x.xxx_hidden_DisplayName = v +} + +func (x *Resource) SetAnnotations(v []*anypb.Any) { + x.xxx_hidden_Annotations = &v +} + +func (x *Resource) SetDescription(v string) { + x.xxx_hidden_Description = v +} + +func (x *Resource) SetBatonResource(v bool) { + x.xxx_hidden_BatonResource = v +} + +func (x *Resource) SetExternalId(v *ExternalId) { + x.xxx_hidden_ExternalId = v +} + +func (x *Resource) SetCreationSource(v Resource_CreationSource) { + x.xxx_hidden_CreationSource = v +} + +func (x *Resource) HasId() bool { + if x == nil { + return false + } + return x.xxx_hidden_Id != nil +} + +func (x *Resource) HasParentResourceId() bool { + if x == nil { + return false + } + return x.xxx_hidden_ParentResourceId != nil +} + +func (x *Resource) HasExternalId() bool { + if x == nil { + return false + } + return x.xxx_hidden_ExternalId != nil +} + +func (x *Resource) ClearId() { + x.xxx_hidden_Id = nil +} + +func (x *Resource) ClearParentResourceId() { + x.xxx_hidden_ParentResourceId = nil +} + +func (x *Resource) ClearExternalId() { + x.xxx_hidden_ExternalId = nil +} + +type Resource_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Id *ResourceId + ParentResourceId *ResourceId + DisplayName string + Annotations []*anypb.Any + Description string + BatonResource bool + ExternalId *ExternalId + CreationSource Resource_CreationSource +} + +func (b0 Resource_builder) Build() *Resource { + m0 := &Resource{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Id = b.Id + x.xxx_hidden_ParentResourceId = b.ParentResourceId + x.xxx_hidden_DisplayName = b.DisplayName + x.xxx_hidden_Annotations = &b.Annotations + x.xxx_hidden_Description = b.Description + x.xxx_hidden_BatonResource = b.BatonResource + x.xxx_hidden_ExternalId = b.ExternalId + x.xxx_hidden_CreationSource = b.CreationSource + return m0 +} + +type ResourcesServiceListResourcesRequest struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_ResourceTypeId string `protobuf:"bytes,1,opt,name=resource_type_id,json=resourceTypeId,proto3"` + xxx_hidden_ParentResourceId *ResourceId `protobuf:"bytes,2,opt,name=parent_resource_id,json=parentResourceId,proto3"` + xxx_hidden_PageSize uint32 `protobuf:"varint,3,opt,name=page_size,json=pageSize,proto3"` + xxx_hidden_PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken,proto3"` + xxx_hidden_Annotations *[]*anypb.Any `protobuf:"bytes,5,rep,name=annotations,proto3"` + xxx_hidden_ActiveSyncId string `protobuf:"bytes,6,opt,name=active_sync_id,json=activeSyncId,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ResourcesServiceListResourcesRequest) Reset() { + *x = ResourcesServiceListResourcesRequest{} + mi := &file_c1_connector_v2_resource_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ResourcesServiceListResourcesRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResourcesServiceListResourcesRequest) ProtoMessage() {} + +func (x *ResourcesServiceListResourcesRequest) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_resource_proto_msgTypes[22] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *ResourcesServiceListResourcesRequest) GetResourceTypeId() string { + if x != nil { + return x.xxx_hidden_ResourceTypeId + } + return "" +} + +func (x *ResourcesServiceListResourcesRequest) GetParentResourceId() *ResourceId { + if x != nil { + return x.xxx_hidden_ParentResourceId + } + return nil +} + +func (x *ResourcesServiceListResourcesRequest) GetPageSize() uint32 { + if x != nil { + return x.xxx_hidden_PageSize + } + return 0 +} + +func (x *ResourcesServiceListResourcesRequest) GetPageToken() string { + if x != nil { + return x.xxx_hidden_PageToken + } + return "" +} + +func (x *ResourcesServiceListResourcesRequest) GetAnnotations() []*anypb.Any { + if x != nil { + if x.xxx_hidden_Annotations != nil { + return *x.xxx_hidden_Annotations + } + } + return nil +} + +func (x *ResourcesServiceListResourcesRequest) GetActiveSyncId() string { + if x != nil { + return x.xxx_hidden_ActiveSyncId + } + return "" +} + +func (x *ResourcesServiceListResourcesRequest) SetResourceTypeId(v string) { + x.xxx_hidden_ResourceTypeId = v +} + +func (x *ResourcesServiceListResourcesRequest) SetParentResourceId(v *ResourceId) { + x.xxx_hidden_ParentResourceId = v +} + +func (x *ResourcesServiceListResourcesRequest) SetPageSize(v uint32) { + x.xxx_hidden_PageSize = v +} + +func (x *ResourcesServiceListResourcesRequest) SetPageToken(v string) { + x.xxx_hidden_PageToken = v +} + +func (x *ResourcesServiceListResourcesRequest) SetAnnotations(v []*anypb.Any) { + x.xxx_hidden_Annotations = &v +} + +func (x *ResourcesServiceListResourcesRequest) SetActiveSyncId(v string) { + x.xxx_hidden_ActiveSyncId = v +} + +func (x *ResourcesServiceListResourcesRequest) HasParentResourceId() bool { + if x == nil { + return false + } + return x.xxx_hidden_ParentResourceId != nil +} + +func (x *ResourcesServiceListResourcesRequest) ClearParentResourceId() { + x.xxx_hidden_ParentResourceId = nil +} + +type ResourcesServiceListResourcesRequest_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + ResourceTypeId string + ParentResourceId *ResourceId + PageSize uint32 + PageToken string + Annotations []*anypb.Any + ActiveSyncId string +} + +func (b0 ResourcesServiceListResourcesRequest_builder) Build() *ResourcesServiceListResourcesRequest { + m0 := &ResourcesServiceListResourcesRequest{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_ResourceTypeId = b.ResourceTypeId + x.xxx_hidden_ParentResourceId = b.ParentResourceId + x.xxx_hidden_PageSize = b.PageSize + x.xxx_hidden_PageToken = b.PageToken + x.xxx_hidden_Annotations = &b.Annotations + x.xxx_hidden_ActiveSyncId = b.ActiveSyncId + return m0 +} + +type ResourcesServiceListResourcesResponse struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_List *[]*Resource `protobuf:"bytes,1,rep,name=list,proto3"` + xxx_hidden_NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3"` + xxx_hidden_Annotations *[]*anypb.Any `protobuf:"bytes,3,rep,name=annotations,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ResourcesServiceListResourcesResponse) Reset() { + *x = ResourcesServiceListResourcesResponse{} + mi := &file_c1_connector_v2_resource_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ResourcesServiceListResourcesResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResourcesServiceListResourcesResponse) ProtoMessage() {} + +func (x *ResourcesServiceListResourcesResponse) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_resource_proto_msgTypes[23] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *ResourcesServiceListResourcesResponse) GetList() []*Resource { + if x != nil { + if x.xxx_hidden_List != nil { + return *x.xxx_hidden_List + } + } + return nil +} + +func (x *ResourcesServiceListResourcesResponse) GetNextPageToken() string { + if x != nil { + return x.xxx_hidden_NextPageToken + } + return "" +} + +func (x *ResourcesServiceListResourcesResponse) GetAnnotations() []*anypb.Any { + if x != nil { + if x.xxx_hidden_Annotations != nil { + return *x.xxx_hidden_Annotations + } + } + return nil +} + +func (x *ResourcesServiceListResourcesResponse) SetList(v []*Resource) { + x.xxx_hidden_List = &v +} + +func (x *ResourcesServiceListResourcesResponse) SetNextPageToken(v string) { + x.xxx_hidden_NextPageToken = v +} + +func (x *ResourcesServiceListResourcesResponse) SetAnnotations(v []*anypb.Any) { + x.xxx_hidden_Annotations = &v +} + +type ResourcesServiceListResourcesResponse_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + List []*Resource + NextPageToken string + Annotations []*anypb.Any +} + +func (b0 ResourcesServiceListResourcesResponse_builder) Build() *ResourcesServiceListResourcesResponse { + m0 := &ResourcesServiceListResourcesResponse{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_List = &b.List + x.xxx_hidden_NextPageToken = b.NextPageToken + x.xxx_hidden_Annotations = &b.Annotations + return m0 +} + +type ResourceGetterServiceGetResourceRequest struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_ResourceId *ResourceId `protobuf:"bytes,1,opt,name=resource_id,json=resourceId,proto3"` + xxx_hidden_ParentResourceId *ResourceId `protobuf:"bytes,2,opt,name=parent_resource_id,json=parentResourceId,proto3"` + xxx_hidden_Annotations *[]*anypb.Any `protobuf:"bytes,3,rep,name=annotations,proto3"` + xxx_hidden_ActiveSyncId string `protobuf:"bytes,4,opt,name=active_sync_id,json=activeSyncId,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ResourceGetterServiceGetResourceRequest) Reset() { + *x = ResourceGetterServiceGetResourceRequest{} + mi := &file_c1_connector_v2_resource_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ResourceGetterServiceGetResourceRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResourceGetterServiceGetResourceRequest) ProtoMessage() {} + +func (x *ResourceGetterServiceGetResourceRequest) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_resource_proto_msgTypes[24] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *ResourceGetterServiceGetResourceRequest) GetResourceId() *ResourceId { + if x != nil { + return x.xxx_hidden_ResourceId + } + return nil +} + +func (x *ResourceGetterServiceGetResourceRequest) GetParentResourceId() *ResourceId { + if x != nil { + return x.xxx_hidden_ParentResourceId + } + return nil +} + +func (x *ResourceGetterServiceGetResourceRequest) GetAnnotations() []*anypb.Any { + if x != nil { + if x.xxx_hidden_Annotations != nil { + return *x.xxx_hidden_Annotations + } + } + return nil +} + +func (x *ResourceGetterServiceGetResourceRequest) GetActiveSyncId() string { + if x != nil { + return x.xxx_hidden_ActiveSyncId + } + return "" +} + +func (x *ResourceGetterServiceGetResourceRequest) SetResourceId(v *ResourceId) { + x.xxx_hidden_ResourceId = v +} + +func (x *ResourceGetterServiceGetResourceRequest) SetParentResourceId(v *ResourceId) { + x.xxx_hidden_ParentResourceId = v +} + +func (x *ResourceGetterServiceGetResourceRequest) SetAnnotations(v []*anypb.Any) { + x.xxx_hidden_Annotations = &v +} + +func (x *ResourceGetterServiceGetResourceRequest) SetActiveSyncId(v string) { + x.xxx_hidden_ActiveSyncId = v +} + +func (x *ResourceGetterServiceGetResourceRequest) HasResourceId() bool { + if x == nil { + return false + } + return x.xxx_hidden_ResourceId != nil +} + +func (x *ResourceGetterServiceGetResourceRequest) HasParentResourceId() bool { + if x == nil { + return false + } + return x.xxx_hidden_ParentResourceId != nil +} + +func (x *ResourceGetterServiceGetResourceRequest) ClearResourceId() { + x.xxx_hidden_ResourceId = nil +} + +func (x *ResourceGetterServiceGetResourceRequest) ClearParentResourceId() { + x.xxx_hidden_ParentResourceId = nil +} + +type ResourceGetterServiceGetResourceRequest_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + ResourceId *ResourceId + ParentResourceId *ResourceId + Annotations []*anypb.Any + ActiveSyncId string +} + +func (b0 ResourceGetterServiceGetResourceRequest_builder) Build() *ResourceGetterServiceGetResourceRequest { + m0 := &ResourceGetterServiceGetResourceRequest{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_ResourceId = b.ResourceId + x.xxx_hidden_ParentResourceId = b.ParentResourceId + x.xxx_hidden_Annotations = &b.Annotations + x.xxx_hidden_ActiveSyncId = b.ActiveSyncId + return m0 +} + +type ResourceGetterServiceGetResourceResponse struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Resource *Resource `protobuf:"bytes,1,opt,name=resource,proto3"` + xxx_hidden_Annotations *[]*anypb.Any `protobuf:"bytes,2,rep,name=annotations,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ResourceGetterServiceGetResourceResponse) Reset() { + *x = ResourceGetterServiceGetResourceResponse{} + mi := &file_c1_connector_v2_resource_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ResourceGetterServiceGetResourceResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResourceGetterServiceGetResourceResponse) ProtoMessage() {} + +func (x *ResourceGetterServiceGetResourceResponse) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_resource_proto_msgTypes[25] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *ResourceGetterServiceGetResourceResponse) GetResource() *Resource { + if x != nil { + return x.xxx_hidden_Resource + } + return nil +} + +func (x *ResourceGetterServiceGetResourceResponse) GetAnnotations() []*anypb.Any { + if x != nil { + if x.xxx_hidden_Annotations != nil { + return *x.xxx_hidden_Annotations + } + } + return nil +} + +func (x *ResourceGetterServiceGetResourceResponse) SetResource(v *Resource) { + x.xxx_hidden_Resource = v +} + +func (x *ResourceGetterServiceGetResourceResponse) SetAnnotations(v []*anypb.Any) { + x.xxx_hidden_Annotations = &v +} + +func (x *ResourceGetterServiceGetResourceResponse) HasResource() bool { + if x == nil { + return false + } + return x.xxx_hidden_Resource != nil +} + +func (x *ResourceGetterServiceGetResourceResponse) ClearResource() { + x.xxx_hidden_Resource = nil +} + +type ResourceGetterServiceGetResourceResponse_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Resource *Resource + Annotations []*anypb.Any +} + +func (b0 ResourceGetterServiceGetResourceResponse_builder) Build() *ResourceGetterServiceGetResourceResponse { + m0 := &ResourceGetterServiceGetResourceResponse{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Resource = b.Resource + x.xxx_hidden_Annotations = &b.Annotations + return m0 +} + +type ExternalId struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Id string `protobuf:"bytes,1,opt,name=id,proto3"` + xxx_hidden_Link string `protobuf:"bytes,2,opt,name=link,proto3"` + xxx_hidden_Description string `protobuf:"bytes,3,opt,name=description,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ExternalId) Reset() { + *x = ExternalId{} + mi := &file_c1_connector_v2_resource_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ExternalId) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExternalId) ProtoMessage() {} + +func (x *ExternalId) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_resource_proto_msgTypes[26] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *ExternalId) GetId() string { + if x != nil { + return x.xxx_hidden_Id + } + return "" +} + +func (x *ExternalId) GetLink() string { + if x != nil { + return x.xxx_hidden_Link + } + return "" +} + +func (x *ExternalId) GetDescription() string { + if x != nil { + return x.xxx_hidden_Description + } + return "" +} + +func (x *ExternalId) SetId(v string) { + x.xxx_hidden_Id = v +} + +func (x *ExternalId) SetLink(v string) { + x.xxx_hidden_Link = v +} + +func (x *ExternalId) SetDescription(v string) { + x.xxx_hidden_Description = v +} + +type ExternalId_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Id string + Link string + Description string +} + +func (b0 ExternalId_builder) Build() *ExternalId { + m0 := &ExternalId{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Id = b.Id + x.xxx_hidden_Link = b.Link + x.xxx_hidden_Description = b.Description + return m0 +} + +type AccountInfo_Email struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Address string `protobuf:"bytes,1,opt,name=address,proto3"` + xxx_hidden_IsPrimary bool `protobuf:"varint,2,opt,name=is_primary,json=isPrimary,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *AccountInfo_Email) Reset() { + *x = AccountInfo_Email{} + mi := &file_c1_connector_v2_resource_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *AccountInfo_Email) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AccountInfo_Email) ProtoMessage() {} + +func (x *AccountInfo_Email) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_resource_proto_msgTypes[27] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *AccountInfo_Email) GetAddress() string { + if x != nil { + return x.xxx_hidden_Address + } + return "" +} + +func (x *AccountInfo_Email) GetIsPrimary() bool { + if x != nil { + return x.xxx_hidden_IsPrimary + } + return false +} + +func (x *AccountInfo_Email) SetAddress(v string) { + x.xxx_hidden_Address = v +} + +func (x *AccountInfo_Email) SetIsPrimary(v bool) { + x.xxx_hidden_IsPrimary = v +} + +type AccountInfo_Email_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Address string + // Indicates if this is the user's primary email. Only one entry can be marked as primary. + IsPrimary bool +} + +func (b0 AccountInfo_Email_builder) Build() *AccountInfo_Email { + m0 := &AccountInfo_Email{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Address = b.Address + x.xxx_hidden_IsPrimary = b.IsPrimary + return m0 +} + +type CredentialOptions_RandomPassword struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Length int64 `protobuf:"varint,1,opt,name=length,proto3"` + xxx_hidden_Constraints *[]*PasswordConstraint `protobuf:"bytes,2,rep,name=constraints,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *CredentialOptions_RandomPassword) Reset() { + *x = CredentialOptions_RandomPassword{} + mi := &file_c1_connector_v2_resource_proto_msgTypes[28] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *CredentialOptions_RandomPassword) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CredentialOptions_RandomPassword) ProtoMessage() {} + +func (x *CredentialOptions_RandomPassword) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_resource_proto_msgTypes[28] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *CredentialOptions_RandomPassword) GetLength() int64 { + if x != nil { + return x.xxx_hidden_Length + } + return 0 +} + +func (x *CredentialOptions_RandomPassword) GetConstraints() []*PasswordConstraint { + if x != nil { + if x.xxx_hidden_Constraints != nil { + return *x.xxx_hidden_Constraints + } + } + return nil +} + +func (x *CredentialOptions_RandomPassword) SetLength(v int64) { + x.xxx_hidden_Length = v +} + +func (x *CredentialOptions_RandomPassword) SetConstraints(v []*PasswordConstraint) { + x.xxx_hidden_Constraints = &v +} + +type CredentialOptions_RandomPassword_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Length int64 + Constraints []*PasswordConstraint +} + +func (b0 CredentialOptions_RandomPassword_builder) Build() *CredentialOptions_RandomPassword { + m0 := &CredentialOptions_RandomPassword{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Length = b.Length + x.xxx_hidden_Constraints = &b.Constraints + return m0 +} + +type CredentialOptions_NoPassword struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *CredentialOptions_NoPassword) Reset() { + *x = CredentialOptions_NoPassword{} + mi := &file_c1_connector_v2_resource_proto_msgTypes[29] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *CredentialOptions_NoPassword) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CredentialOptions_NoPassword) ProtoMessage() {} + +func (x *CredentialOptions_NoPassword) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_resource_proto_msgTypes[29] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +type CredentialOptions_NoPassword_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + +} + +func (b0 CredentialOptions_NoPassword_builder) Build() *CredentialOptions_NoPassword { + m0 := &CredentialOptions_NoPassword{} + b, x := &b0, m0 + _, _ = b, x + return m0 +} + +type CredentialOptions_SSO struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_SsoProvider string `protobuf:"bytes,1,opt,name=sso_provider,json=ssoProvider,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *CredentialOptions_SSO) Reset() { + *x = CredentialOptions_SSO{} + mi := &file_c1_connector_v2_resource_proto_msgTypes[30] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *CredentialOptions_SSO) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CredentialOptions_SSO) ProtoMessage() {} + +func (x *CredentialOptions_SSO) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_resource_proto_msgTypes[30] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *CredentialOptions_SSO) GetSsoProvider() string { + if x != nil { + return x.xxx_hidden_SsoProvider + } + return "" +} + +func (x *CredentialOptions_SSO) SetSsoProvider(v string) { + x.xxx_hidden_SsoProvider = v +} + +type CredentialOptions_SSO_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + SsoProvider string +} + +func (b0 CredentialOptions_SSO_builder) Build() *CredentialOptions_SSO { + m0 := &CredentialOptions_SSO{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_SsoProvider = b.SsoProvider + return m0 +} + +type CredentialOptions_EncryptedPassword struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_EncryptedPasswords *[]*EncryptedData `protobuf:"bytes,1,rep,name=encrypted_passwords,json=encryptedPasswords,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *CredentialOptions_EncryptedPassword) Reset() { + *x = CredentialOptions_EncryptedPassword{} + mi := &file_c1_connector_v2_resource_proto_msgTypes[31] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *CredentialOptions_EncryptedPassword) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CredentialOptions_EncryptedPassword) ProtoMessage() {} + +func (x *CredentialOptions_EncryptedPassword) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_resource_proto_msgTypes[31] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *CredentialOptions_EncryptedPassword) GetEncryptedPasswords() []*EncryptedData { + if x != nil { + if x.xxx_hidden_EncryptedPasswords != nil { + return *x.xxx_hidden_EncryptedPasswords + } + } + return nil +} + +func (x *CredentialOptions_EncryptedPassword) SetEncryptedPasswords(v []*EncryptedData) { + x.xxx_hidden_EncryptedPasswords = &v +} + +type CredentialOptions_EncryptedPassword_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + EncryptedPasswords []*EncryptedData +} + +func (b0 CredentialOptions_EncryptedPassword_builder) Build() *CredentialOptions_EncryptedPassword { + m0 := &CredentialOptions_EncryptedPassword{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_EncryptedPasswords = &b.EncryptedPasswords + return m0 +} + +type LocalCredentialOptions_RandomPassword struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Length int64 `protobuf:"varint,1,opt,name=length,proto3"` + xxx_hidden_Constraints *[]*PasswordConstraint `protobuf:"bytes,2,rep,name=constraints,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *LocalCredentialOptions_RandomPassword) Reset() { + *x = LocalCredentialOptions_RandomPassword{} + mi := &file_c1_connector_v2_resource_proto_msgTypes[32] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *LocalCredentialOptions_RandomPassword) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LocalCredentialOptions_RandomPassword) ProtoMessage() {} + +func (x *LocalCredentialOptions_RandomPassword) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_resource_proto_msgTypes[32] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *LocalCredentialOptions_RandomPassword) GetLength() int64 { + if x != nil { + return x.xxx_hidden_Length + } + return 0 +} + +func (x *LocalCredentialOptions_RandomPassword) GetConstraints() []*PasswordConstraint { + if x != nil { + if x.xxx_hidden_Constraints != nil { + return *x.xxx_hidden_Constraints + } + } + return nil +} + +func (x *LocalCredentialOptions_RandomPassword) SetLength(v int64) { + x.xxx_hidden_Length = v +} + +func (x *LocalCredentialOptions_RandomPassword) SetConstraints(v []*PasswordConstraint) { + x.xxx_hidden_Constraints = &v +} + +type LocalCredentialOptions_RandomPassword_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Length int64 + Constraints []*PasswordConstraint +} + +func (b0 LocalCredentialOptions_RandomPassword_builder) Build() *LocalCredentialOptions_RandomPassword { + m0 := &LocalCredentialOptions_RandomPassword{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Length = b.Length + x.xxx_hidden_Constraints = &b.Constraints + return m0 +} + +type LocalCredentialOptions_NoPassword struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *LocalCredentialOptions_NoPassword) Reset() { + *x = LocalCredentialOptions_NoPassword{} + mi := &file_c1_connector_v2_resource_proto_msgTypes[33] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *LocalCredentialOptions_NoPassword) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LocalCredentialOptions_NoPassword) ProtoMessage() {} + +func (x *LocalCredentialOptions_NoPassword) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_resource_proto_msgTypes[33] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +type LocalCredentialOptions_NoPassword_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + +} + +func (b0 LocalCredentialOptions_NoPassword_builder) Build() *LocalCredentialOptions_NoPassword { + m0 := &LocalCredentialOptions_NoPassword{} + b, x := &b0, m0 + _, _ = b, x + return m0 +} + +type LocalCredentialOptions_SSO struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_SsoProvider string `protobuf:"bytes,1,opt,name=sso_provider,json=ssoProvider,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *LocalCredentialOptions_SSO) Reset() { + *x = LocalCredentialOptions_SSO{} + mi := &file_c1_connector_v2_resource_proto_msgTypes[34] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *LocalCredentialOptions_SSO) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LocalCredentialOptions_SSO) ProtoMessage() {} + +func (x *LocalCredentialOptions_SSO) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_resource_proto_msgTypes[34] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *LocalCredentialOptions_SSO) GetSsoProvider() string { + if x != nil { + return x.xxx_hidden_SsoProvider + } + return "" +} + +func (x *LocalCredentialOptions_SSO) SetSsoProvider(v string) { + x.xxx_hidden_SsoProvider = v +} + +type LocalCredentialOptions_SSO_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + SsoProvider string +} + +func (b0 LocalCredentialOptions_SSO_builder) Build() *LocalCredentialOptions_SSO { + m0 := &LocalCredentialOptions_SSO{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_SsoProvider = b.SsoProvider + return m0 +} + +type LocalCredentialOptions_PlaintextPassword struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_PlaintextPassword string `protobuf:"bytes,1,opt,name=plaintext_password,json=plaintextPassword,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *LocalCredentialOptions_PlaintextPassword) Reset() { + *x = LocalCredentialOptions_PlaintextPassword{} + mi := &file_c1_connector_v2_resource_proto_msgTypes[35] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *LocalCredentialOptions_PlaintextPassword) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LocalCredentialOptions_PlaintextPassword) ProtoMessage() {} + +func (x *LocalCredentialOptions_PlaintextPassword) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_resource_proto_msgTypes[35] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *LocalCredentialOptions_PlaintextPassword) GetPlaintextPassword() string { + if x != nil { + return x.xxx_hidden_PlaintextPassword + } + return "" +} + +func (x *LocalCredentialOptions_PlaintextPassword) SetPlaintextPassword(v string) { + x.xxx_hidden_PlaintextPassword = v +} + +type LocalCredentialOptions_PlaintextPassword_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + PlaintextPassword string +} + +func (b0 LocalCredentialOptions_PlaintextPassword_builder) Build() *LocalCredentialOptions_PlaintextPassword { + m0 := &LocalCredentialOptions_PlaintextPassword{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_PlaintextPassword = b.PlaintextPassword + return m0 +} + +type CreateAccountResponse_SuccessResult struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Resource *Resource `protobuf:"bytes,1,opt,name=resource,proto3"` + xxx_hidden_IsCreateAccountResult bool `protobuf:"varint,2,opt,name=is_create_account_result,json=isCreateAccountResult,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *CreateAccountResponse_SuccessResult) Reset() { + *x = CreateAccountResponse_SuccessResult{} + mi := &file_c1_connector_v2_resource_proto_msgTypes[36] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *CreateAccountResponse_SuccessResult) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateAccountResponse_SuccessResult) ProtoMessage() {} + +func (x *CreateAccountResponse_SuccessResult) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_resource_proto_msgTypes[36] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *CreateAccountResponse_SuccessResult) GetResource() *Resource { + if x != nil { + return x.xxx_hidden_Resource + } + return nil +} + +func (x *CreateAccountResponse_SuccessResult) GetIsCreateAccountResult() bool { + if x != nil { + return x.xxx_hidden_IsCreateAccountResult + } + return false +} + +func (x *CreateAccountResponse_SuccessResult) SetResource(v *Resource) { + x.xxx_hidden_Resource = v +} + +func (x *CreateAccountResponse_SuccessResult) SetIsCreateAccountResult(v bool) { + x.xxx_hidden_IsCreateAccountResult = v +} + +func (x *CreateAccountResponse_SuccessResult) HasResource() bool { + if x == nil { + return false + } + return x.xxx_hidden_Resource != nil +} + +func (x *CreateAccountResponse_SuccessResult) ClearResource() { + x.xxx_hidden_Resource = nil +} + +type CreateAccountResponse_SuccessResult_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Resource *Resource + IsCreateAccountResult bool +} + +func (b0 CreateAccountResponse_SuccessResult_builder) Build() *CreateAccountResponse_SuccessResult { + m0 := &CreateAccountResponse_SuccessResult{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Resource = b.Resource + x.xxx_hidden_IsCreateAccountResult = b.IsCreateAccountResult + return m0 +} + +type CreateAccountResponse_ActionRequiredResult struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Resource *Resource `protobuf:"bytes,1,opt,name=resource,proto3"` + xxx_hidden_Message string `protobuf:"bytes,2,opt,name=message,proto3"` + xxx_hidden_IsCreateAccountResult bool `protobuf:"varint,3,opt,name=is_create_account_result,json=isCreateAccountResult,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *CreateAccountResponse_ActionRequiredResult) Reset() { + *x = CreateAccountResponse_ActionRequiredResult{} + mi := &file_c1_connector_v2_resource_proto_msgTypes[37] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *CreateAccountResponse_ActionRequiredResult) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateAccountResponse_ActionRequiredResult) ProtoMessage() {} + +func (x *CreateAccountResponse_ActionRequiredResult) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_resource_proto_msgTypes[37] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *CreateAccountResponse_ActionRequiredResult) GetResource() *Resource { + if x != nil { + return x.xxx_hidden_Resource + } + return nil +} + +func (x *CreateAccountResponse_ActionRequiredResult) GetMessage() string { + if x != nil { + return x.xxx_hidden_Message + } + return "" +} + +func (x *CreateAccountResponse_ActionRequiredResult) GetIsCreateAccountResult() bool { + if x != nil { + return x.xxx_hidden_IsCreateAccountResult + } + return false +} + +func (x *CreateAccountResponse_ActionRequiredResult) SetResource(v *Resource) { + x.xxx_hidden_Resource = v +} + +func (x *CreateAccountResponse_ActionRequiredResult) SetMessage(v string) { + x.xxx_hidden_Message = v +} + +func (x *CreateAccountResponse_ActionRequiredResult) SetIsCreateAccountResult(v bool) { + x.xxx_hidden_IsCreateAccountResult = v +} + +func (x *CreateAccountResponse_ActionRequiredResult) HasResource() bool { + if x == nil { + return false + } + return x.xxx_hidden_Resource != nil +} + +func (x *CreateAccountResponse_ActionRequiredResult) ClearResource() { + x.xxx_hidden_Resource = nil +} + +type CreateAccountResponse_ActionRequiredResult_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Resource *Resource + Message string + IsCreateAccountResult bool +} + +func (b0 CreateAccountResponse_ActionRequiredResult_builder) Build() *CreateAccountResponse_ActionRequiredResult { + m0 := &CreateAccountResponse_ActionRequiredResult{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Resource = b.Resource + x.xxx_hidden_Message = b.Message + x.xxx_hidden_IsCreateAccountResult = b.IsCreateAccountResult + return m0 +} + +type CreateAccountResponse_AlreadyExistsResult struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Resource *Resource `protobuf:"bytes,1,opt,name=resource,proto3"` + xxx_hidden_IsCreateAccountResult bool `protobuf:"varint,2,opt,name=is_create_account_result,json=isCreateAccountResult,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *CreateAccountResponse_AlreadyExistsResult) Reset() { + *x = CreateAccountResponse_AlreadyExistsResult{} + mi := &file_c1_connector_v2_resource_proto_msgTypes[38] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *CreateAccountResponse_AlreadyExistsResult) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateAccountResponse_AlreadyExistsResult) ProtoMessage() {} + +func (x *CreateAccountResponse_AlreadyExistsResult) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_resource_proto_msgTypes[38] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *CreateAccountResponse_AlreadyExistsResult) GetResource() *Resource { + if x != nil { + return x.xxx_hidden_Resource + } + return nil +} + +func (x *CreateAccountResponse_AlreadyExistsResult) GetIsCreateAccountResult() bool { + if x != nil { + return x.xxx_hidden_IsCreateAccountResult + } + return false +} + +func (x *CreateAccountResponse_AlreadyExistsResult) SetResource(v *Resource) { + x.xxx_hidden_Resource = v +} + +func (x *CreateAccountResponse_AlreadyExistsResult) SetIsCreateAccountResult(v bool) { + x.xxx_hidden_IsCreateAccountResult = v +} + +func (x *CreateAccountResponse_AlreadyExistsResult) HasResource() bool { + if x == nil { + return false + } + return x.xxx_hidden_Resource != nil +} + +func (x *CreateAccountResponse_AlreadyExistsResult) ClearResource() { + x.xxx_hidden_Resource = nil +} + +type CreateAccountResponse_AlreadyExistsResult_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Resource *Resource + IsCreateAccountResult bool +} + +func (b0 CreateAccountResponse_AlreadyExistsResult_builder) Build() *CreateAccountResponse_AlreadyExistsResult { + m0 := &CreateAccountResponse_AlreadyExistsResult{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Resource = b.Resource + x.xxx_hidden_IsCreateAccountResult = b.IsCreateAccountResult + return m0 +} + +type CreateAccountResponse_InProgressResult struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Resource *Resource `protobuf:"bytes,1,opt,name=resource,proto3"` + xxx_hidden_IsCreateAccountResult bool `protobuf:"varint,2,opt,name=is_create_account_result,json=isCreateAccountResult,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *CreateAccountResponse_InProgressResult) Reset() { + *x = CreateAccountResponse_InProgressResult{} + mi := &file_c1_connector_v2_resource_proto_msgTypes[39] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *CreateAccountResponse_InProgressResult) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateAccountResponse_InProgressResult) ProtoMessage() {} + +func (x *CreateAccountResponse_InProgressResult) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_resource_proto_msgTypes[39] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *CreateAccountResponse_InProgressResult) GetResource() *Resource { + if x != nil { + return x.xxx_hidden_Resource + } + return nil +} + +func (x *CreateAccountResponse_InProgressResult) GetIsCreateAccountResult() bool { + if x != nil { + return x.xxx_hidden_IsCreateAccountResult + } + return false +} + +func (x *CreateAccountResponse_InProgressResult) SetResource(v *Resource) { + x.xxx_hidden_Resource = v +} + +func (x *CreateAccountResponse_InProgressResult) SetIsCreateAccountResult(v bool) { + x.xxx_hidden_IsCreateAccountResult = v +} + +func (x *CreateAccountResponse_InProgressResult) HasResource() bool { + if x == nil { + return false + } + return x.xxx_hidden_Resource != nil +} + +func (x *CreateAccountResponse_InProgressResult) ClearResource() { + x.xxx_hidden_Resource = nil +} + +type CreateAccountResponse_InProgressResult_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Resource *Resource + IsCreateAccountResult bool +} + +func (b0 CreateAccountResponse_InProgressResult_builder) Build() *CreateAccountResponse_InProgressResult { + m0 := &CreateAccountResponse_InProgressResult{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Resource = b.Resource + x.xxx_hidden_IsCreateAccountResult = b.IsCreateAccountResult + return m0 +} + +type EncryptionConfig_JWKPublicKeyConfig struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_PubKey []byte `protobuf:"bytes,1,opt,name=pub_key,json=pubKey,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *EncryptionConfig_JWKPublicKeyConfig) Reset() { + *x = EncryptionConfig_JWKPublicKeyConfig{} + mi := &file_c1_connector_v2_resource_proto_msgTypes[40] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *EncryptionConfig_JWKPublicKeyConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EncryptionConfig_JWKPublicKeyConfig) ProtoMessage() {} + +func (x *EncryptionConfig_JWKPublicKeyConfig) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_resource_proto_msgTypes[40] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *EncryptionConfig_JWKPublicKeyConfig) GetPubKey() []byte { + if x != nil { + return x.xxx_hidden_PubKey + } + return nil +} + +func (x *EncryptionConfig_JWKPublicKeyConfig) SetPubKey(v []byte) { + if v == nil { + v = []byte{} + } + x.xxx_hidden_PubKey = v +} + +type EncryptionConfig_JWKPublicKeyConfig_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + PubKey []byte +} + +func (b0 EncryptionConfig_JWKPublicKeyConfig_builder) Build() *EncryptionConfig_JWKPublicKeyConfig { + m0 := &EncryptionConfig_JWKPublicKeyConfig{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_PubKey = b.PubKey + return m0 +} + +var File_c1_connector_v2_resource_proto protoreflect.FileDescriptor + +const file_c1_connector_v2_resource_proto_rawDesc = "" + + "\n" + + "\x1ec1/connector/v2/resource.proto\x12\x0fc1.connector.v2\x1a\x19google/protobuf/any.proto\x1a\x1cgoogle/protobuf/struct.proto\x1a\x17validate/validate.proto\"\xd1\x03\n" + + "\fResourceType\x12\x1a\n" + + "\x02id\x18\x01 \x01(\tB\n" + + "\xfaB\ar\x05 \x01(\x80\bR\x02id\x120\n" + + "\fdisplay_name\x18\x02 \x01(\tB\r\xfaB\n" + + "r\b \x01(\x80\b\xd0\x01\x01R\vdisplayName\x12L\n" + + "\x06traits\x18\x03 \x03(\x0e2#.c1.connector.v2.ResourceType.TraitB\x0f\xfaB\f\x92\x01\t\x18\x01\"\x05\x82\x01\x02\x10\x01R\x06traits\x126\n" + + "\vannotations\x18\x04 \x03(\v2\x14.google.protobuf.AnyR\vannotations\x12/\n" + + "\vdescription\x18\x05 \x01(\tB\r\xfaB\n" + + "r\b \x01(\x80 \xd0\x01\x01R\vdescription\x12-\n" + + "\x12sourced_externally\x18\x06 \x01(\bR\x11sourcedExternally\"\x8c\x01\n" + + "\x05Trait\x12\x15\n" + + "\x11TRAIT_UNSPECIFIED\x10\x00\x12\x0e\n" + + "\n" + + "TRAIT_USER\x10\x01\x12\x0f\n" + + "\vTRAIT_GROUP\x10\x02\x12\x0e\n" + + "\n" + + "TRAIT_ROLE\x10\x03\x12\r\n" + + "\tTRAIT_APP\x10\x04\x12\x10\n" + + "\fTRAIT_SECRET\x10\x05\x12\x1a\n" + + "\x16TRAIT_SECURITY_INSIGHT\x10\x06\"\xa6\x02\n" + + ",ResourceTypesServiceListResourceTypesRequest\x121\n" + + "\x06parent\x18\x01 \x01(\v2\x19.c1.connector.v2.ResourceR\x06parent\x12'\n" + + "\tpage_size\x18\x02 \x01(\rB\n" + + "\xfaB\a*\x05\x18\xfa\x01@\x01R\bpageSize\x12-\n" + + "\n" + + "page_token\x18\x03 \x01(\tB\x0e\xfaB\vr\t \x01(\x80\x80@\xd0\x01\x01R\tpageToken\x126\n" + + "\vannotations\x18\x04 \x03(\v2\x14.google.protobuf.AnyR\vannotations\x123\n" + + "\x0eactive_sync_id\x18\x05 \x01(\tB\r\xfaB\n" + + "r\b \x01(\x80\b\xd0\x01\x01R\factiveSyncId\"\xd2\x01\n" + + "-ResourceTypesServiceListResourceTypesResponse\x121\n" + + "\x04list\x18\x01 \x03(\v2\x1d.c1.connector.v2.ResourceTypeR\x04list\x126\n" + + "\x0fnext_page_token\x18\x02 \x01(\tB\x0e\xfaB\vr\t \x01(\x80\x80@\xd0\x01\x01R\rnextPageToken\x126\n" + + "\vannotations\x18\x03 \x03(\v2\x14.google.protobuf.AnyR\vannotations\"N\n" + + "\x15CreateResourceRequest\x125\n" + + "\bresource\x18\x01 \x01(\v2\x19.c1.connector.v2.ResourceR\bresource\"\x85\x01\n" + + "\x16CreateResourceResponse\x123\n" + + "\acreated\x18\x01 \x01(\v2\x19.c1.connector.v2.ResourceR\acreated\x126\n" + + "\vannotations\x18\x02 \x03(\v2\x14.google.protobuf.AnyR\vannotations\"\xa0\x01\n" + + "\x15DeleteResourceRequest\x12<\n" + + "\vresource_id\x18\x01 \x01(\v2\x1b.c1.connector.v2.ResourceIdR\n" + + "resourceId\x12I\n" + + "\x12parent_resource_id\x18\x02 \x01(\v2\x1b.c1.connector.v2.ResourceIdR\x10parentResourceId\"P\n" + + "\x16DeleteResourceResponse\x126\n" + + "\vannotations\x18\x01 \x03(\v2\x14.google.protobuf.AnyR\vannotations\"\xa2\x01\n" + + "\x17DeleteResourceV2Request\x12<\n" + + "\vresource_id\x18\x01 \x01(\v2\x1b.c1.connector.v2.ResourceIdR\n" + + "resourceId\x12I\n" + + "\x12parent_resource_id\x18\x02 \x01(\v2\x1b.c1.connector.v2.ResourceIdR\x10parentResourceId\"R\n" + + "\x18DeleteResourceV2Response\x126\n" + + "\vannotations\x18\x01 \x03(\v2\x14.google.protobuf.AnyR\vannotations\"\xfc\x01\n" + + "\x17RotateCredentialRequest\x12<\n" + + "\vresource_id\x18\x01 \x01(\v2\x1b.c1.connector.v2.ResourceIdR\n" + + "resourceId\x12Q\n" + + "\x12credential_options\x18\x02 \x01(\v2\".c1.connector.v2.CredentialOptionsR\x11credentialOptions\x12P\n" + + "\x12encryption_configs\x18\x03 \x03(\v2!.c1.connector.v2.EncryptionConfigR\x11encryptionConfigs\"\xd7\x01\n" + + "\x18RotateCredentialResponse\x12E\n" + + "\x0eencrypted_data\x18\x01 \x03(\v2\x1e.c1.connector.v2.EncryptedDataR\rencryptedData\x12<\n" + + "\vresource_id\x18\x02 \x01(\v2\x1b.c1.connector.v2.ResourceIdR\n" + + "resourceId\x126\n" + + "\vannotations\x18\x03 \x03(\v2\x14.google.protobuf.AnyR\vannotations\"\x82\x02\n" + + "\vAccountInfo\x12:\n" + + "\x06emails\x18\x01 \x03(\v2\".c1.connector.v2.AccountInfo.EmailR\x06emails\x12\x14\n" + + "\x05login\x18\x02 \x01(\tR\x05login\x12#\n" + + "\rlogin_aliases\x18\x03 \x03(\tR\floginAliases\x121\n" + + "\aprofile\x18\x04 \x01(\v2\x17.google.protobuf.StructR\aprofile\x1aI\n" + + "\x05Email\x12!\n" + + "\aaddress\x18\x01 \x01(\tB\a\xfaB\x04r\x02`\x01R\aaddress\x12\x1d\n" + + "\n" + + "is_primary\x18\x02 \x01(\bR\tisPrimary\"\xd1\x05\n" + + "\x11CredentialOptions\x12\\\n" + + "\x0frandom_password\x18d \x01(\v21.c1.connector.v2.CredentialOptions.RandomPasswordH\x00R\x0erandomPassword\x12P\n" + + "\vno_password\x18e \x01(\v2-.c1.connector.v2.CredentialOptions.NoPasswordH\x00R\n" + + "noPassword\x12:\n" + + "\x03sso\x18f \x01(\v2&.c1.connector.v2.CredentialOptions.SSOH\x00R\x03sso\x12e\n" + + "\x12encrypted_password\x18g \x01(\v24.c1.connector.v2.CredentialOptions.EncryptedPasswordH\x00R\x11encryptedPassword\x12:\n" + + "\x1aforce_change_at_next_login\x18\x01 \x01(\bR\x16forceChangeAtNextLogin\x1az\n" + + "\x0eRandomPassword\x12!\n" + + "\x06length\x18\x01 \x01(\x03B\t\xfaB\x06\"\x04\x18@(\bR\x06length\x12E\n" + + "\vconstraints\x18\x02 \x03(\v2#.c1.connector.v2.PasswordConstraintR\vconstraints\x1a\f\n" + + "\n" + + "NoPassword\x1a(\n" + + "\x03SSO\x12!\n" + + "\fsso_provider\x18\x01 \x01(\tR\vssoProvider\x1an\n" + + "\x11EncryptedPassword\x12Y\n" + + "\x13encrypted_passwords\x18\x01 \x03(\v2\x1e.c1.connector.v2.EncryptedDataB\b\xfaB\x05\x92\x01\x02\b\x01R\x12encryptedPasswordsB\t\n" + + "\aoptions\"\xbe\x05\n" + + "\x16LocalCredentialOptions\x12a\n" + + "\x0frandom_password\x18d \x01(\v26.c1.connector.v2.LocalCredentialOptions.RandomPasswordH\x00R\x0erandomPassword\x12U\n" + + "\vno_password\x18e \x01(\v22.c1.connector.v2.LocalCredentialOptions.NoPasswordH\x00R\n" + + "noPassword\x12?\n" + + "\x03sso\x18f \x01(\v2+.c1.connector.v2.LocalCredentialOptions.SSOH\x00R\x03sso\x12j\n" + + "\x12plaintext_password\x18g \x01(\v29.c1.connector.v2.LocalCredentialOptions.PlaintextPasswordH\x00R\x11plaintextPassword\x12:\n" + + "\x1aforce_change_at_next_login\x18\x01 \x01(\bR\x16forceChangeAtNextLogin\x1az\n" + + "\x0eRandomPassword\x12!\n" + + "\x06length\x18\x01 \x01(\x03B\t\xfaB\x06\"\x04\x18@(\bR\x06length\x12E\n" + + "\vconstraints\x18\x02 \x03(\v2#.c1.connector.v2.PasswordConstraintR\vconstraints\x1a\f\n" + + "\n" + + "NoPassword\x1a(\n" + + "\x03SSO\x12!\n" + + "\fsso_provider\x18\x01 \x01(\tR\vssoProvider\x1aB\n" + + "\x11PlaintextPassword\x12-\n" + + "\x12plaintext_password\x18\x01 \x01(\tR\x11plaintextPasswordB\t\n" + + "\aoptions\"L\n" + + "\x12PasswordConstraint\x12\x19\n" + + "\bchar_set\x18\x01 \x01(\tR\acharSet\x12\x1b\n" + + "\tmin_count\x18\x02 \x01(\rR\bminCount\"\xfc\x01\n" + + "\x14CreateAccountRequest\x12?\n" + + "\faccount_info\x18\x01 \x01(\v2\x1c.c1.connector.v2.AccountInfoR\vaccountInfo\x12Q\n" + + "\x12credential_options\x18\x02 \x01(\v2\".c1.connector.v2.CredentialOptionsR\x11credentialOptions\x12P\n" + + "\x12encryption_configs\x18\x03 \x03(\v2!.c1.connector.v2.EncryptionConfigR\x11encryptionConfigs\"\xcc\b\n" + + "\x15CreateAccountResponse\x12P\n" + + "\asuccess\x18d \x01(\v24.c1.connector.v2.CreateAccountResponse.SuccessResultH\x00R\asuccess\x12f\n" + + "\x0faction_required\x18e \x01(\v2;.c1.connector.v2.CreateAccountResponse.ActionRequiredResultH\x00R\x0eactionRequired\x12c\n" + + "\x0ealready_exists\x18f \x01(\v2:.c1.connector.v2.CreateAccountResponse.AlreadyExistsResultH\x00R\ralreadyExists\x12Z\n" + + "\vin_progress\x18g \x01(\v27.c1.connector.v2.CreateAccountResponse.InProgressResultH\x00R\n" + + "inProgress\x12E\n" + + "\x0eencrypted_data\x18\x02 \x03(\v2\x1e.c1.connector.v2.EncryptedDataR\rencryptedData\x126\n" + + "\vannotations\x18\x03 \x03(\v2\x14.google.protobuf.AnyR\vannotations\x1a\x7f\n" + + "\rSuccessResult\x125\n" + + "\bresource\x18\x01 \x01(\v2\x19.c1.connector.v2.ResourceR\bresource\x127\n" + + "\x18is_create_account_result\x18\x02 \x01(\bR\x15isCreateAccountResult\x1a\xa0\x01\n" + + "\x14ActionRequiredResult\x125\n" + + "\bresource\x18\x01 \x01(\v2\x19.c1.connector.v2.ResourceR\bresource\x12\x18\n" + + "\amessage\x18\x02 \x01(\tR\amessage\x127\n" + + "\x18is_create_account_result\x18\x03 \x01(\bR\x15isCreateAccountResult\x1a\x85\x01\n" + + "\x13AlreadyExistsResult\x125\n" + + "\bresource\x18\x01 \x01(\v2\x19.c1.connector.v2.ResourceR\bresource\x127\n" + + "\x18is_create_account_result\x18\x02 \x01(\bR\x15isCreateAccountResult\x1a\x82\x01\n" + + "\x10InProgressResult\x125\n" + + "\bresource\x18\x01 \x01(\v2\x19.c1.connector.v2.ResourceR\bresource\x127\n" + + "\x18is_create_account_result\x18\x02 \x01(\bR\x15isCreateAccountResultB\b\n" + + "\x06result\"\xd6\x01\n" + + "\rEncryptedData\x12\x1a\n" + + "\bprovider\x18\x01 \x01(\tR\bprovider\x12\x19\n" + + "\x06key_id\x18\x02 \x01(\tB\x02\x18\x01R\x05keyId\x12\x12\n" + + "\x04name\x18\x03 \x01(\tR\x04name\x12 \n" + + "\vdescription\x18\x04 \x01(\tR\vdescription\x12\x16\n" + + "\x06schema\x18\x05 \x01(\tR\x06schema\x12'\n" + + "\x0fencrypted_bytes\x18\x06 \x01(\fR\x0eencryptedBytes\x12\x17\n" + + "\akey_ids\x18\a \x03(\tR\x06keyIds\"s\n" + + "\rPlaintextData\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\x12 \n" + + "\vdescription\x18\x02 \x01(\tR\vdescription\x12\x16\n" + + "\x06schema\x18\x03 \x01(\tR\x06schema\x12\x14\n" + + "\x05bytes\x18\x04 \x01(\fR\x05bytes\"\xa2\x02\n" + + "\x10EncryptionConfig\x127\n" + + "\tprincipal\x18\x01 \x01(\v2\x19.c1.connector.v2.ResourceR\tprincipal\x12\x1a\n" + + "\bprovider\x18\x02 \x01(\tR\bprovider\x12\x15\n" + + "\x06key_id\x18\x03 \x01(\tR\x05keyId\x12i\n" + + "\x15jwk_public_key_config\x18d \x01(\v24.c1.connector.v2.EncryptionConfig.JWKPublicKeyConfigH\x00R\x12jwkPublicKeyConfig\x1a-\n" + + "\x12JWKPublicKeyConfig\x12\x17\n" + + "\apub_key\x18\x01 \x01(\fR\x06pubKeyB\b\n" + + "\x06config\"\x8c\x01\n" + + "\n" + + "ResourceId\x12/\n" + + "\rresource_type\x18\x01 \x01(\tB\n" + + "\xfaB\ar\x05 \x01(\x80\bR\fresourceType\x12&\n" + + "\bresource\x18\x02 \x01(\tB\n" + + "\xfaB\ar\x05 \x01(\x80\bR\bresource\x12%\n" + + "\x0ebaton_resource\x18\x03 \x01(\bR\rbatonResource\"\xf0\x04\n" + + "\bResource\x12+\n" + + "\x02id\x18\x01 \x01(\v2\x1b.c1.connector.v2.ResourceIdR\x02id\x12I\n" + + "\x12parent_resource_id\x18\x02 \x01(\v2\x1b.c1.connector.v2.ResourceIdR\x10parentResourceId\x120\n" + + "\fdisplay_name\x18\x03 \x01(\tB\r\xfaB\n" + + "r\b \x01(\x80\b\xd0\x01\x01R\vdisplayName\x126\n" + + "\vannotations\x18\x04 \x03(\v2\x14.google.protobuf.AnyR\vannotations\x12/\n" + + "\vdescription\x18\x05 \x01(\tB\r\xfaB\n" + + "r\b \x01(\x80\x10\xd0\x01\x01R\vdescription\x12%\n" + + "\x0ebaton_resource\x18\x06 \x01(\bR\rbatonResource\x12<\n" + + "\vexternal_id\x18\a \x01(\v2\x1b.c1.connector.v2.ExternalIdR\n" + + "externalId\x12Q\n" + + "\x0fcreation_source\x18\b \x01(\x0e2(.c1.connector.v2.Resource.CreationSourceR\x0ecreationSource\"\x98\x01\n" + + "\x0eCreationSource\x12\x1f\n" + + "\x1bCREATION_SOURCE_UNSPECIFIED\x10\x00\x12,\n" + + "(CREATION_SOURCE_CONNECTOR_LIST_RESOURCES\x10\x01\x127\n" + + "3CREATION_SOURCE_CONNECTOR_LIST_GRANTS_PRINCIPAL_JIT\x10\x02\"\xf6\x02\n" + + "$ResourcesServiceListResourcesRequest\x124\n" + + "\x10resource_type_id\x18\x01 \x01(\tB\n" + + "\xfaB\ar\x05 \x01(\x80\bR\x0eresourceTypeId\x12S\n" + + "\x12parent_resource_id\x18\x02 \x01(\v2\x1b.c1.connector.v2.ResourceIdB\b\xfaB\x05\x8a\x01\x02\x10\x00R\x10parentResourceId\x12'\n" + + "\tpage_size\x18\x03 \x01(\rB\n" + + "\xfaB\a*\x05\x18\xfa\x01@\x01R\bpageSize\x12-\n" + + "\n" + + "page_token\x18\x04 \x01(\tB\x0e\xfaB\vr\t \x01(\x80\x80@\xd0\x01\x01R\tpageToken\x126\n" + + "\vannotations\x18\x05 \x03(\v2\x14.google.protobuf.AnyR\vannotations\x123\n" + + "\x0eactive_sync_id\x18\x06 \x01(\tB\r\xfaB\n" + + "r\b \x01(\x80\b\xd0\x01\x01R\factiveSyncId\"\xc6\x01\n" + + "%ResourcesServiceListResourcesResponse\x12-\n" + + "\x04list\x18\x01 \x03(\v2\x19.c1.connector.v2.ResourceR\x04list\x126\n" + + "\x0fnext_page_token\x18\x02 \x01(\tB\x0e\xfaB\vr\t \x01(\x80\x80@\xd0\x01\x01R\rnextPageToken\x126\n" + + "\vannotations\x18\x03 \x03(\v2\x14.google.protobuf.AnyR\vannotations\"\xa9\x02\n" + + "'ResourceGetterServiceGetResourceRequest\x12<\n" + + "\vresource_id\x18\x01 \x01(\v2\x1b.c1.connector.v2.ResourceIdR\n" + + "resourceId\x12S\n" + + "\x12parent_resource_id\x18\x02 \x01(\v2\x1b.c1.connector.v2.ResourceIdB\b\xfaB\x05\x8a\x01\x02\x10\x00R\x10parentResourceId\x126\n" + + "\vannotations\x18\x03 \x03(\v2\x14.google.protobuf.AnyR\vannotations\x123\n" + + "\x0eactive_sync_id\x18\x04 \x01(\tB\r\xfaB\n" + + "r\b \x01(\x80\b\xd0\x01\x01R\factiveSyncId\"\x99\x01\n" + + "(ResourceGetterServiceGetResourceResponse\x125\n" + + "\bresource\x18\x01 \x01(\v2\x19.c1.connector.v2.ResourceR\bresource\x126\n" + + "\vannotations\x18\x02 \x03(\v2\x14.google.protobuf.AnyR\vannotations\"R\n" + + "\n" + + "ExternalId\x12\x0e\n" + + "\x02id\x18\x01 \x01(\tR\x02id\x12\x12\n" + + "\x04link\x18\x02 \x01(\tR\x04link\x12 \n" + + "\vdescription\x18\x03 \x01(\tR\vdescription2\xab\x01\n" + + "\x14ResourceTypesService\x12\x92\x01\n" + + "\x11ListResourceTypes\x12=.c1.connector.v2.ResourceTypesServiceListResourceTypesRequest\x1a>.c1.connector.v2.ResourceTypesServiceListResourceTypesResponse2\x92\x01\n" + + "\x10ResourcesService\x12~\n" + + "\rListResources\x125.c1.connector.v2.ResourcesServiceListResourcesRequest\x1a6.c1.connector.v2.ResourcesServiceListResourcesResponse2\x9c\x01\n" + + "\x15ResourceGetterService\x12\x82\x01\n" + + "\vGetResource\x128.c1.connector.v2.ResourceGetterServiceGetResourceRequest\x1a9.c1.connector.v2.ResourceGetterServiceGetResourceResponse2\xde\x01\n" + + "\x16ResourceManagerService\x12a\n" + + "\x0eCreateResource\x12&.c1.connector.v2.CreateResourceRequest\x1a'.c1.connector.v2.CreateResourceResponse\x12a\n" + + "\x0eDeleteResource\x12&.c1.connector.v2.DeleteResourceRequest\x1a'.c1.connector.v2.DeleteResourceResponse2\x81\x01\n" + + "\x16ResourceDeleterService\x12g\n" + + "\x10DeleteResourceV2\x12(.c1.connector.v2.DeleteResourceV2Request\x1a).c1.connector.v2.DeleteResourceV2Response2\x83\x01\n" + + "\x18CredentialManagerService\x12g\n" + + "\x10RotateCredential\x12(.c1.connector.v2.RotateCredentialRequest\x1a).c1.connector.v2.RotateCredentialResponse2w\n" + + "\x15AccountManagerService\x12^\n" + + "\rCreateAccount\x12%.c1.connector.v2.CreateAccountRequest\x1a&.c1.connector.v2.CreateAccountResponseB6Z4github.com/conductorone/baton-sdk/pb/c1/connector/v2b\x06proto3" + +var file_c1_connector_v2_resource_proto_enumTypes = make([]protoimpl.EnumInfo, 2) +var file_c1_connector_v2_resource_proto_msgTypes = make([]protoimpl.MessageInfo, 41) +var file_c1_connector_v2_resource_proto_goTypes = []any{ + (ResourceType_Trait)(0), // 0: c1.connector.v2.ResourceType.Trait + (Resource_CreationSource)(0), // 1: c1.connector.v2.Resource.CreationSource + (*ResourceType)(nil), // 2: c1.connector.v2.ResourceType + (*ResourceTypesServiceListResourceTypesRequest)(nil), // 3: c1.connector.v2.ResourceTypesServiceListResourceTypesRequest + (*ResourceTypesServiceListResourceTypesResponse)(nil), // 4: c1.connector.v2.ResourceTypesServiceListResourceTypesResponse + (*CreateResourceRequest)(nil), // 5: c1.connector.v2.CreateResourceRequest + (*CreateResourceResponse)(nil), // 6: c1.connector.v2.CreateResourceResponse + (*DeleteResourceRequest)(nil), // 7: c1.connector.v2.DeleteResourceRequest + (*DeleteResourceResponse)(nil), // 8: c1.connector.v2.DeleteResourceResponse + (*DeleteResourceV2Request)(nil), // 9: c1.connector.v2.DeleteResourceV2Request + (*DeleteResourceV2Response)(nil), // 10: c1.connector.v2.DeleteResourceV2Response + (*RotateCredentialRequest)(nil), // 11: c1.connector.v2.RotateCredentialRequest + (*RotateCredentialResponse)(nil), // 12: c1.connector.v2.RotateCredentialResponse + (*AccountInfo)(nil), // 13: c1.connector.v2.AccountInfo + (*CredentialOptions)(nil), // 14: c1.connector.v2.CredentialOptions + (*LocalCredentialOptions)(nil), // 15: c1.connector.v2.LocalCredentialOptions + (*PasswordConstraint)(nil), // 16: c1.connector.v2.PasswordConstraint + (*CreateAccountRequest)(nil), // 17: c1.connector.v2.CreateAccountRequest + (*CreateAccountResponse)(nil), // 18: c1.connector.v2.CreateAccountResponse + (*EncryptedData)(nil), // 19: c1.connector.v2.EncryptedData + (*PlaintextData)(nil), // 20: c1.connector.v2.PlaintextData + (*EncryptionConfig)(nil), // 21: c1.connector.v2.EncryptionConfig + (*ResourceId)(nil), // 22: c1.connector.v2.ResourceId + (*Resource)(nil), // 23: c1.connector.v2.Resource + (*ResourcesServiceListResourcesRequest)(nil), // 24: c1.connector.v2.ResourcesServiceListResourcesRequest + (*ResourcesServiceListResourcesResponse)(nil), // 25: c1.connector.v2.ResourcesServiceListResourcesResponse + (*ResourceGetterServiceGetResourceRequest)(nil), // 26: c1.connector.v2.ResourceGetterServiceGetResourceRequest + (*ResourceGetterServiceGetResourceResponse)(nil), // 27: c1.connector.v2.ResourceGetterServiceGetResourceResponse + (*ExternalId)(nil), // 28: c1.connector.v2.ExternalId + (*AccountInfo_Email)(nil), // 29: c1.connector.v2.AccountInfo.Email + (*CredentialOptions_RandomPassword)(nil), // 30: c1.connector.v2.CredentialOptions.RandomPassword + (*CredentialOptions_NoPassword)(nil), // 31: c1.connector.v2.CredentialOptions.NoPassword + (*CredentialOptions_SSO)(nil), // 32: c1.connector.v2.CredentialOptions.SSO + (*CredentialOptions_EncryptedPassword)(nil), // 33: c1.connector.v2.CredentialOptions.EncryptedPassword + (*LocalCredentialOptions_RandomPassword)(nil), // 34: c1.connector.v2.LocalCredentialOptions.RandomPassword + (*LocalCredentialOptions_NoPassword)(nil), // 35: c1.connector.v2.LocalCredentialOptions.NoPassword + (*LocalCredentialOptions_SSO)(nil), // 36: c1.connector.v2.LocalCredentialOptions.SSO + (*LocalCredentialOptions_PlaintextPassword)(nil), // 37: c1.connector.v2.LocalCredentialOptions.PlaintextPassword + (*CreateAccountResponse_SuccessResult)(nil), // 38: c1.connector.v2.CreateAccountResponse.SuccessResult + (*CreateAccountResponse_ActionRequiredResult)(nil), // 39: c1.connector.v2.CreateAccountResponse.ActionRequiredResult + (*CreateAccountResponse_AlreadyExistsResult)(nil), // 40: c1.connector.v2.CreateAccountResponse.AlreadyExistsResult + (*CreateAccountResponse_InProgressResult)(nil), // 41: c1.connector.v2.CreateAccountResponse.InProgressResult + (*EncryptionConfig_JWKPublicKeyConfig)(nil), // 42: c1.connector.v2.EncryptionConfig.JWKPublicKeyConfig + (*anypb.Any)(nil), // 43: google.protobuf.Any + (*structpb.Struct)(nil), // 44: google.protobuf.Struct +} +var file_c1_connector_v2_resource_proto_depIdxs = []int32{ + 0, // 0: c1.connector.v2.ResourceType.traits:type_name -> c1.connector.v2.ResourceType.Trait + 43, // 1: c1.connector.v2.ResourceType.annotations:type_name -> google.protobuf.Any + 23, // 2: c1.connector.v2.ResourceTypesServiceListResourceTypesRequest.parent:type_name -> c1.connector.v2.Resource + 43, // 3: c1.connector.v2.ResourceTypesServiceListResourceTypesRequest.annotations:type_name -> google.protobuf.Any + 2, // 4: c1.connector.v2.ResourceTypesServiceListResourceTypesResponse.list:type_name -> c1.connector.v2.ResourceType + 43, // 5: c1.connector.v2.ResourceTypesServiceListResourceTypesResponse.annotations:type_name -> google.protobuf.Any + 23, // 6: c1.connector.v2.CreateResourceRequest.resource:type_name -> c1.connector.v2.Resource + 23, // 7: c1.connector.v2.CreateResourceResponse.created:type_name -> c1.connector.v2.Resource + 43, // 8: c1.connector.v2.CreateResourceResponse.annotations:type_name -> google.protobuf.Any + 22, // 9: c1.connector.v2.DeleteResourceRequest.resource_id:type_name -> c1.connector.v2.ResourceId + 22, // 10: c1.connector.v2.DeleteResourceRequest.parent_resource_id:type_name -> c1.connector.v2.ResourceId + 43, // 11: c1.connector.v2.DeleteResourceResponse.annotations:type_name -> google.protobuf.Any + 22, // 12: c1.connector.v2.DeleteResourceV2Request.resource_id:type_name -> c1.connector.v2.ResourceId + 22, // 13: c1.connector.v2.DeleteResourceV2Request.parent_resource_id:type_name -> c1.connector.v2.ResourceId + 43, // 14: c1.connector.v2.DeleteResourceV2Response.annotations:type_name -> google.protobuf.Any + 22, // 15: c1.connector.v2.RotateCredentialRequest.resource_id:type_name -> c1.connector.v2.ResourceId + 14, // 16: c1.connector.v2.RotateCredentialRequest.credential_options:type_name -> c1.connector.v2.CredentialOptions + 21, // 17: c1.connector.v2.RotateCredentialRequest.encryption_configs:type_name -> c1.connector.v2.EncryptionConfig + 19, // 18: c1.connector.v2.RotateCredentialResponse.encrypted_data:type_name -> c1.connector.v2.EncryptedData + 22, // 19: c1.connector.v2.RotateCredentialResponse.resource_id:type_name -> c1.connector.v2.ResourceId + 43, // 20: c1.connector.v2.RotateCredentialResponse.annotations:type_name -> google.protobuf.Any + 29, // 21: c1.connector.v2.AccountInfo.emails:type_name -> c1.connector.v2.AccountInfo.Email + 44, // 22: c1.connector.v2.AccountInfo.profile:type_name -> google.protobuf.Struct + 30, // 23: c1.connector.v2.CredentialOptions.random_password:type_name -> c1.connector.v2.CredentialOptions.RandomPassword + 31, // 24: c1.connector.v2.CredentialOptions.no_password:type_name -> c1.connector.v2.CredentialOptions.NoPassword + 32, // 25: c1.connector.v2.CredentialOptions.sso:type_name -> c1.connector.v2.CredentialOptions.SSO + 33, // 26: c1.connector.v2.CredentialOptions.encrypted_password:type_name -> c1.connector.v2.CredentialOptions.EncryptedPassword + 34, // 27: c1.connector.v2.LocalCredentialOptions.random_password:type_name -> c1.connector.v2.LocalCredentialOptions.RandomPassword + 35, // 28: c1.connector.v2.LocalCredentialOptions.no_password:type_name -> c1.connector.v2.LocalCredentialOptions.NoPassword + 36, // 29: c1.connector.v2.LocalCredentialOptions.sso:type_name -> c1.connector.v2.LocalCredentialOptions.SSO + 37, // 30: c1.connector.v2.LocalCredentialOptions.plaintext_password:type_name -> c1.connector.v2.LocalCredentialOptions.PlaintextPassword + 13, // 31: c1.connector.v2.CreateAccountRequest.account_info:type_name -> c1.connector.v2.AccountInfo + 14, // 32: c1.connector.v2.CreateAccountRequest.credential_options:type_name -> c1.connector.v2.CredentialOptions + 21, // 33: c1.connector.v2.CreateAccountRequest.encryption_configs:type_name -> c1.connector.v2.EncryptionConfig + 38, // 34: c1.connector.v2.CreateAccountResponse.success:type_name -> c1.connector.v2.CreateAccountResponse.SuccessResult + 39, // 35: c1.connector.v2.CreateAccountResponse.action_required:type_name -> c1.connector.v2.CreateAccountResponse.ActionRequiredResult + 40, // 36: c1.connector.v2.CreateAccountResponse.already_exists:type_name -> c1.connector.v2.CreateAccountResponse.AlreadyExistsResult + 41, // 37: c1.connector.v2.CreateAccountResponse.in_progress:type_name -> c1.connector.v2.CreateAccountResponse.InProgressResult + 19, // 38: c1.connector.v2.CreateAccountResponse.encrypted_data:type_name -> c1.connector.v2.EncryptedData + 43, // 39: c1.connector.v2.CreateAccountResponse.annotations:type_name -> google.protobuf.Any + 23, // 40: c1.connector.v2.EncryptionConfig.principal:type_name -> c1.connector.v2.Resource + 42, // 41: c1.connector.v2.EncryptionConfig.jwk_public_key_config:type_name -> c1.connector.v2.EncryptionConfig.JWKPublicKeyConfig + 22, // 42: c1.connector.v2.Resource.id:type_name -> c1.connector.v2.ResourceId + 22, // 43: c1.connector.v2.Resource.parent_resource_id:type_name -> c1.connector.v2.ResourceId + 43, // 44: c1.connector.v2.Resource.annotations:type_name -> google.protobuf.Any + 28, // 45: c1.connector.v2.Resource.external_id:type_name -> c1.connector.v2.ExternalId + 1, // 46: c1.connector.v2.Resource.creation_source:type_name -> c1.connector.v2.Resource.CreationSource + 22, // 47: c1.connector.v2.ResourcesServiceListResourcesRequest.parent_resource_id:type_name -> c1.connector.v2.ResourceId + 43, // 48: c1.connector.v2.ResourcesServiceListResourcesRequest.annotations:type_name -> google.protobuf.Any + 23, // 49: c1.connector.v2.ResourcesServiceListResourcesResponse.list:type_name -> c1.connector.v2.Resource + 43, // 50: c1.connector.v2.ResourcesServiceListResourcesResponse.annotations:type_name -> google.protobuf.Any + 22, // 51: c1.connector.v2.ResourceGetterServiceGetResourceRequest.resource_id:type_name -> c1.connector.v2.ResourceId + 22, // 52: c1.connector.v2.ResourceGetterServiceGetResourceRequest.parent_resource_id:type_name -> c1.connector.v2.ResourceId + 43, // 53: c1.connector.v2.ResourceGetterServiceGetResourceRequest.annotations:type_name -> google.protobuf.Any + 23, // 54: c1.connector.v2.ResourceGetterServiceGetResourceResponse.resource:type_name -> c1.connector.v2.Resource + 43, // 55: c1.connector.v2.ResourceGetterServiceGetResourceResponse.annotations:type_name -> google.protobuf.Any + 16, // 56: c1.connector.v2.CredentialOptions.RandomPassword.constraints:type_name -> c1.connector.v2.PasswordConstraint + 19, // 57: c1.connector.v2.CredentialOptions.EncryptedPassword.encrypted_passwords:type_name -> c1.connector.v2.EncryptedData + 16, // 58: c1.connector.v2.LocalCredentialOptions.RandomPassword.constraints:type_name -> c1.connector.v2.PasswordConstraint + 23, // 59: c1.connector.v2.CreateAccountResponse.SuccessResult.resource:type_name -> c1.connector.v2.Resource + 23, // 60: c1.connector.v2.CreateAccountResponse.ActionRequiredResult.resource:type_name -> c1.connector.v2.Resource + 23, // 61: c1.connector.v2.CreateAccountResponse.AlreadyExistsResult.resource:type_name -> c1.connector.v2.Resource + 23, // 62: c1.connector.v2.CreateAccountResponse.InProgressResult.resource:type_name -> c1.connector.v2.Resource + 3, // 63: c1.connector.v2.ResourceTypesService.ListResourceTypes:input_type -> c1.connector.v2.ResourceTypesServiceListResourceTypesRequest + 24, // 64: c1.connector.v2.ResourcesService.ListResources:input_type -> c1.connector.v2.ResourcesServiceListResourcesRequest + 26, // 65: c1.connector.v2.ResourceGetterService.GetResource:input_type -> c1.connector.v2.ResourceGetterServiceGetResourceRequest + 5, // 66: c1.connector.v2.ResourceManagerService.CreateResource:input_type -> c1.connector.v2.CreateResourceRequest + 7, // 67: c1.connector.v2.ResourceManagerService.DeleteResource:input_type -> c1.connector.v2.DeleteResourceRequest + 9, // 68: c1.connector.v2.ResourceDeleterService.DeleteResourceV2:input_type -> c1.connector.v2.DeleteResourceV2Request + 11, // 69: c1.connector.v2.CredentialManagerService.RotateCredential:input_type -> c1.connector.v2.RotateCredentialRequest + 17, // 70: c1.connector.v2.AccountManagerService.CreateAccount:input_type -> c1.connector.v2.CreateAccountRequest + 4, // 71: c1.connector.v2.ResourceTypesService.ListResourceTypes:output_type -> c1.connector.v2.ResourceTypesServiceListResourceTypesResponse + 25, // 72: c1.connector.v2.ResourcesService.ListResources:output_type -> c1.connector.v2.ResourcesServiceListResourcesResponse + 27, // 73: c1.connector.v2.ResourceGetterService.GetResource:output_type -> c1.connector.v2.ResourceGetterServiceGetResourceResponse + 6, // 74: c1.connector.v2.ResourceManagerService.CreateResource:output_type -> c1.connector.v2.CreateResourceResponse + 8, // 75: c1.connector.v2.ResourceManagerService.DeleteResource:output_type -> c1.connector.v2.DeleteResourceResponse + 10, // 76: c1.connector.v2.ResourceDeleterService.DeleteResourceV2:output_type -> c1.connector.v2.DeleteResourceV2Response + 12, // 77: c1.connector.v2.CredentialManagerService.RotateCredential:output_type -> c1.connector.v2.RotateCredentialResponse + 18, // 78: c1.connector.v2.AccountManagerService.CreateAccount:output_type -> c1.connector.v2.CreateAccountResponse + 71, // [71:79] is the sub-list for method output_type + 63, // [63:71] is the sub-list for method input_type + 63, // [63:63] is the sub-list for extension type_name + 63, // [63:63] is the sub-list for extension extendee + 0, // [0:63] is the sub-list for field type_name +} + +func init() { file_c1_connector_v2_resource_proto_init() } +func file_c1_connector_v2_resource_proto_init() { + if File_c1_connector_v2_resource_proto != nil { + return + } + file_c1_connector_v2_resource_proto_msgTypes[12].OneofWrappers = []any{ + (*credentialOptions_RandomPassword_)(nil), + (*credentialOptions_NoPassword_)(nil), + (*credentialOptions_Sso)(nil), + (*credentialOptions_EncryptedPassword_)(nil), + } + file_c1_connector_v2_resource_proto_msgTypes[13].OneofWrappers = []any{ + (*localCredentialOptions_RandomPassword_)(nil), + (*localCredentialOptions_NoPassword_)(nil), + (*localCredentialOptions_Sso)(nil), + (*localCredentialOptions_PlaintextPassword_)(nil), + } + file_c1_connector_v2_resource_proto_msgTypes[16].OneofWrappers = []any{ + (*createAccountResponse_Success)(nil), + (*createAccountResponse_ActionRequired)(nil), + (*createAccountResponse_AlreadyExists)(nil), + (*createAccountResponse_InProgress)(nil), + } + file_c1_connector_v2_resource_proto_msgTypes[19].OneofWrappers = []any{ + (*encryptionConfig_JwkPublicKeyConfig)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_c1_connector_v2_resource_proto_rawDesc), len(file_c1_connector_v2_resource_proto_rawDesc)), + NumEnums: 2, + NumMessages: 41, + NumExtensions: 0, + NumServices: 7, + }, + GoTypes: file_c1_connector_v2_resource_proto_goTypes, + DependencyIndexes: file_c1_connector_v2_resource_proto_depIdxs, + EnumInfos: file_c1_connector_v2_resource_proto_enumTypes, + MessageInfos: file_c1_connector_v2_resource_proto_msgTypes, + }.Build() + File_c1_connector_v2_resource_proto = out.File + file_c1_connector_v2_resource_proto_goTypes = nil + file_c1_connector_v2_resource_proto_depIdxs = nil +} diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/ticket.pb.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/ticket.pb.go index f659e0d8..a13a28a6 100644 --- a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/ticket.pb.go +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/ticket.pb.go @@ -1,9 +1,11 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.4 +// protoc-gen-go v1.36.10 // protoc (unknown) // source: c1/connector/v2/ticket.proto +//go:build !protoopaque + package v2 import ( @@ -14,7 +16,6 @@ import ( timestamppb "google.golang.org/protobuf/types/known/timestamppb" wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" reflect "reflect" - sync "sync" unsafe "unsafe" ) @@ -26,7 +27,7 @@ const ( ) type TicketSchema struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` Types []*TicketType `protobuf:"bytes,3,rep,name=types,proto3" json:"types,omitempty"` @@ -62,11 +63,6 @@ func (x *TicketSchema) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use TicketSchema.ProtoReflect.Descriptor instead. -func (*TicketSchema) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_ticket_proto_rawDescGZIP(), []int{0} -} - func (x *TicketSchema) GetId() string { if x != nil { return x.Id @@ -109,8 +105,56 @@ func (x *TicketSchema) GetAnnotations() []*anypb.Any { return nil } +func (x *TicketSchema) SetId(v string) { + x.Id = v +} + +func (x *TicketSchema) SetDisplayName(v string) { + x.DisplayName = v +} + +func (x *TicketSchema) SetTypes(v []*TicketType) { + x.Types = v +} + +func (x *TicketSchema) SetStatuses(v []*TicketStatus) { + x.Statuses = v +} + +func (x *TicketSchema) SetCustomFields(v map[string]*TicketCustomField) { + x.CustomFields = v +} + +func (x *TicketSchema) SetAnnotations(v []*anypb.Any) { + x.Annotations = v +} + +type TicketSchema_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Id string + DisplayName string + Types []*TicketType + Statuses []*TicketStatus + CustomFields map[string]*TicketCustomField + Annotations []*anypb.Any +} + +func (b0 TicketSchema_builder) Build() *TicketSchema { + m0 := &TicketSchema{} + b, x := &b0, m0 + _, _ = b, x + x.Id = b.Id + x.DisplayName = b.DisplayName + x.Types = b.Types + x.Statuses = b.Statuses + x.CustomFields = b.CustomFields + x.Annotations = b.Annotations + return m0 +} + type TicketCustomField struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` Required bool `protobuf:"varint,3,opt,name=required,proto3" json:"required,omitempty"` @@ -156,11 +200,6 @@ func (x *TicketCustomField) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use TicketCustomField.ProtoReflect.Descriptor instead. -func (*TicketCustomField) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_ticket_proto_rawDescGZIP(), []int{1} -} - func (x *TicketCustomField) GetId() string { if x != nil { return x.Id @@ -277,6 +316,338 @@ func (x *TicketCustomField) GetAnnotations() []*anypb.Any { return nil } +func (x *TicketCustomField) SetId(v string) { + x.Id = v +} + +func (x *TicketCustomField) SetDisplayName(v string) { + x.DisplayName = v +} + +func (x *TicketCustomField) SetRequired(v bool) { + x.Required = v +} + +func (x *TicketCustomField) SetStringValue(v *TicketCustomFieldStringValue) { + if v == nil { + x.Value = nil + return + } + x.Value = &TicketCustomField_StringValue{v} +} + +func (x *TicketCustomField) SetStringValues(v *TicketCustomFieldStringValues) { + if v == nil { + x.Value = nil + return + } + x.Value = &TicketCustomField_StringValues{v} +} + +func (x *TicketCustomField) SetBoolValue(v *TicketCustomFieldBoolValue) { + if v == nil { + x.Value = nil + return + } + x.Value = &TicketCustomField_BoolValue{v} +} + +func (x *TicketCustomField) SetTimestampValue(v *TicketCustomFieldTimestampValue) { + if v == nil { + x.Value = nil + return + } + x.Value = &TicketCustomField_TimestampValue{v} +} + +func (x *TicketCustomField) SetPickStringValue(v *TicketCustomFieldPickStringValue) { + if v == nil { + x.Value = nil + return + } + x.Value = &TicketCustomField_PickStringValue{v} +} + +func (x *TicketCustomField) SetPickMultipleStringValues(v *TicketCustomFieldPickMultipleStringValues) { + if v == nil { + x.Value = nil + return + } + x.Value = &TicketCustomField_PickMultipleStringValues{v} +} + +func (x *TicketCustomField) SetPickObjectValue(v *TicketCustomFieldPickObjectValue) { + if v == nil { + x.Value = nil + return + } + x.Value = &TicketCustomField_PickObjectValue{v} +} + +func (x *TicketCustomField) SetPickMultipleObjectValues(v *TicketCustomFieldPickMultipleObjectValues) { + if v == nil { + x.Value = nil + return + } + x.Value = &TicketCustomField_PickMultipleObjectValues{v} +} + +func (x *TicketCustomField) SetNumberValue(v *TicketCustomFieldNumberValue) { + if v == nil { + x.Value = nil + return + } + x.Value = &TicketCustomField_NumberValue{v} +} + +func (x *TicketCustomField) SetAnnotations(v []*anypb.Any) { + x.Annotations = v +} + +func (x *TicketCustomField) HasValue() bool { + if x == nil { + return false + } + return x.Value != nil +} + +func (x *TicketCustomField) HasStringValue() bool { + if x == nil { + return false + } + _, ok := x.Value.(*TicketCustomField_StringValue) + return ok +} + +func (x *TicketCustomField) HasStringValues() bool { + if x == nil { + return false + } + _, ok := x.Value.(*TicketCustomField_StringValues) + return ok +} + +func (x *TicketCustomField) HasBoolValue() bool { + if x == nil { + return false + } + _, ok := x.Value.(*TicketCustomField_BoolValue) + return ok +} + +func (x *TicketCustomField) HasTimestampValue() bool { + if x == nil { + return false + } + _, ok := x.Value.(*TicketCustomField_TimestampValue) + return ok +} + +func (x *TicketCustomField) HasPickStringValue() bool { + if x == nil { + return false + } + _, ok := x.Value.(*TicketCustomField_PickStringValue) + return ok +} + +func (x *TicketCustomField) HasPickMultipleStringValues() bool { + if x == nil { + return false + } + _, ok := x.Value.(*TicketCustomField_PickMultipleStringValues) + return ok +} + +func (x *TicketCustomField) HasPickObjectValue() bool { + if x == nil { + return false + } + _, ok := x.Value.(*TicketCustomField_PickObjectValue) + return ok +} + +func (x *TicketCustomField) HasPickMultipleObjectValues() bool { + if x == nil { + return false + } + _, ok := x.Value.(*TicketCustomField_PickMultipleObjectValues) + return ok +} + +func (x *TicketCustomField) HasNumberValue() bool { + if x == nil { + return false + } + _, ok := x.Value.(*TicketCustomField_NumberValue) + return ok +} + +func (x *TicketCustomField) ClearValue() { + x.Value = nil +} + +func (x *TicketCustomField) ClearStringValue() { + if _, ok := x.Value.(*TicketCustomField_StringValue); ok { + x.Value = nil + } +} + +func (x *TicketCustomField) ClearStringValues() { + if _, ok := x.Value.(*TicketCustomField_StringValues); ok { + x.Value = nil + } +} + +func (x *TicketCustomField) ClearBoolValue() { + if _, ok := x.Value.(*TicketCustomField_BoolValue); ok { + x.Value = nil + } +} + +func (x *TicketCustomField) ClearTimestampValue() { + if _, ok := x.Value.(*TicketCustomField_TimestampValue); ok { + x.Value = nil + } +} + +func (x *TicketCustomField) ClearPickStringValue() { + if _, ok := x.Value.(*TicketCustomField_PickStringValue); ok { + x.Value = nil + } +} + +func (x *TicketCustomField) ClearPickMultipleStringValues() { + if _, ok := x.Value.(*TicketCustomField_PickMultipleStringValues); ok { + x.Value = nil + } +} + +func (x *TicketCustomField) ClearPickObjectValue() { + if _, ok := x.Value.(*TicketCustomField_PickObjectValue); ok { + x.Value = nil + } +} + +func (x *TicketCustomField) ClearPickMultipleObjectValues() { + if _, ok := x.Value.(*TicketCustomField_PickMultipleObjectValues); ok { + x.Value = nil + } +} + +func (x *TicketCustomField) ClearNumberValue() { + if _, ok := x.Value.(*TicketCustomField_NumberValue); ok { + x.Value = nil + } +} + +const TicketCustomField_Value_not_set_case case_TicketCustomField_Value = 0 +const TicketCustomField_StringValue_case case_TicketCustomField_Value = 100 +const TicketCustomField_StringValues_case case_TicketCustomField_Value = 101 +const TicketCustomField_BoolValue_case case_TicketCustomField_Value = 102 +const TicketCustomField_TimestampValue_case case_TicketCustomField_Value = 103 +const TicketCustomField_PickStringValue_case case_TicketCustomField_Value = 104 +const TicketCustomField_PickMultipleStringValues_case case_TicketCustomField_Value = 105 +const TicketCustomField_PickObjectValue_case case_TicketCustomField_Value = 106 +const TicketCustomField_PickMultipleObjectValues_case case_TicketCustomField_Value = 107 +const TicketCustomField_NumberValue_case case_TicketCustomField_Value = 108 + +func (x *TicketCustomField) WhichValue() case_TicketCustomField_Value { + if x == nil { + return TicketCustomField_Value_not_set_case + } + switch x.Value.(type) { + case *TicketCustomField_StringValue: + return TicketCustomField_StringValue_case + case *TicketCustomField_StringValues: + return TicketCustomField_StringValues_case + case *TicketCustomField_BoolValue: + return TicketCustomField_BoolValue_case + case *TicketCustomField_TimestampValue: + return TicketCustomField_TimestampValue_case + case *TicketCustomField_PickStringValue: + return TicketCustomField_PickStringValue_case + case *TicketCustomField_PickMultipleStringValues: + return TicketCustomField_PickMultipleStringValues_case + case *TicketCustomField_PickObjectValue: + return TicketCustomField_PickObjectValue_case + case *TicketCustomField_PickMultipleObjectValues: + return TicketCustomField_PickMultipleObjectValues_case + case *TicketCustomField_NumberValue: + return TicketCustomField_NumberValue_case + default: + return TicketCustomField_Value_not_set_case + } +} + +type TicketCustomField_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Id string + DisplayName string + Required bool + // Fields of oneof Value: + StringValue *TicketCustomFieldStringValue + StringValues *TicketCustomFieldStringValues + BoolValue *TicketCustomFieldBoolValue + TimestampValue *TicketCustomFieldTimestampValue + PickStringValue *TicketCustomFieldPickStringValue + PickMultipleStringValues *TicketCustomFieldPickMultipleStringValues + PickObjectValue *TicketCustomFieldPickObjectValue + PickMultipleObjectValues *TicketCustomFieldPickMultipleObjectValues + NumberValue *TicketCustomFieldNumberValue + // -- end of Value + Annotations []*anypb.Any +} + +func (b0 TicketCustomField_builder) Build() *TicketCustomField { + m0 := &TicketCustomField{} + b, x := &b0, m0 + _, _ = b, x + x.Id = b.Id + x.DisplayName = b.DisplayName + x.Required = b.Required + if b.StringValue != nil { + x.Value = &TicketCustomField_StringValue{b.StringValue} + } + if b.StringValues != nil { + x.Value = &TicketCustomField_StringValues{b.StringValues} + } + if b.BoolValue != nil { + x.Value = &TicketCustomField_BoolValue{b.BoolValue} + } + if b.TimestampValue != nil { + x.Value = &TicketCustomField_TimestampValue{b.TimestampValue} + } + if b.PickStringValue != nil { + x.Value = &TicketCustomField_PickStringValue{b.PickStringValue} + } + if b.PickMultipleStringValues != nil { + x.Value = &TicketCustomField_PickMultipleStringValues{b.PickMultipleStringValues} + } + if b.PickObjectValue != nil { + x.Value = &TicketCustomField_PickObjectValue{b.PickObjectValue} + } + if b.PickMultipleObjectValues != nil { + x.Value = &TicketCustomField_PickMultipleObjectValues{b.PickMultipleObjectValues} + } + if b.NumberValue != nil { + x.Value = &TicketCustomField_NumberValue{b.NumberValue} + } + x.Annotations = b.Annotations + return m0 +} + +type case_TicketCustomField_Value protoreflect.FieldNumber + +func (x case_TicketCustomField_Value) String() string { + md := file_c1_connector_v2_ticket_proto_msgTypes[1].Descriptor() + if x == 0 { + return "not set" + } + return protoimpl.X.MessageFieldStringOf(md, protoreflect.FieldNumber(x)) +} + type isTicketCustomField_Value interface { isTicketCustomField_Value() } @@ -336,7 +707,7 @@ func (*TicketCustomField_PickMultipleObjectValues) isTicketCustomField_Value() { func (*TicketCustomField_NumberValue) isTicketCustomField_Value() {} type TicketCustomFieldStringValue struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` DefaultValue string `protobuf:"bytes,2,opt,name=default_value,json=defaultValue,proto3" json:"default_value,omitempty"` unknownFields protoimpl.UnknownFields @@ -368,11 +739,6 @@ func (x *TicketCustomFieldStringValue) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use TicketCustomFieldStringValue.ProtoReflect.Descriptor instead. -func (*TicketCustomFieldStringValue) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_ticket_proto_rawDescGZIP(), []int{2} -} - func (x *TicketCustomFieldStringValue) GetValue() string { if x != nil { return x.Value @@ -387,8 +753,32 @@ func (x *TicketCustomFieldStringValue) GetDefaultValue() string { return "" } +func (x *TicketCustomFieldStringValue) SetValue(v string) { + x.Value = v +} + +func (x *TicketCustomFieldStringValue) SetDefaultValue(v string) { + x.DefaultValue = v +} + +type TicketCustomFieldStringValue_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Value string + DefaultValue string +} + +func (b0 TicketCustomFieldStringValue_builder) Build() *TicketCustomFieldStringValue { + m0 := &TicketCustomFieldStringValue{} + b, x := &b0, m0 + _, _ = b, x + x.Value = b.Value + x.DefaultValue = b.DefaultValue + return m0 +} + type TicketCustomFieldStringValues struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Values []string `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` DefaultValues []string `protobuf:"bytes,2,rep,name=default_values,json=defaultValues,proto3" json:"default_values,omitempty"` unknownFields protoimpl.UnknownFields @@ -420,11 +810,6 @@ func (x *TicketCustomFieldStringValues) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use TicketCustomFieldStringValues.ProtoReflect.Descriptor instead. -func (*TicketCustomFieldStringValues) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_ticket_proto_rawDescGZIP(), []int{3} -} - func (x *TicketCustomFieldStringValues) GetValues() []string { if x != nil { return x.Values @@ -439,8 +824,32 @@ func (x *TicketCustomFieldStringValues) GetDefaultValues() []string { return nil } +func (x *TicketCustomFieldStringValues) SetValues(v []string) { + x.Values = v +} + +func (x *TicketCustomFieldStringValues) SetDefaultValues(v []string) { + x.DefaultValues = v +} + +type TicketCustomFieldStringValues_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Values []string + DefaultValues []string +} + +func (b0 TicketCustomFieldStringValues_builder) Build() *TicketCustomFieldStringValues { + m0 := &TicketCustomFieldStringValues{} + b, x := &b0, m0 + _, _ = b, x + x.Values = b.Values + x.DefaultValues = b.DefaultValues + return m0 +} + type TicketCustomFieldBoolValue struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Value bool `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache @@ -471,11 +880,6 @@ func (x *TicketCustomFieldBoolValue) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use TicketCustomFieldBoolValue.ProtoReflect.Descriptor instead. -func (*TicketCustomFieldBoolValue) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_ticket_proto_rawDescGZIP(), []int{4} -} - func (x *TicketCustomFieldBoolValue) GetValue() bool { if x != nil { return x.Value @@ -483,8 +887,26 @@ func (x *TicketCustomFieldBoolValue) GetValue() bool { return false } +func (x *TicketCustomFieldBoolValue) SetValue(v bool) { + x.Value = v +} + +type TicketCustomFieldBoolValue_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Value bool +} + +func (b0 TicketCustomFieldBoolValue_builder) Build() *TicketCustomFieldBoolValue { + m0 := &TicketCustomFieldBoolValue{} + b, x := &b0, m0 + _, _ = b, x + x.Value = b.Value + return m0 +} + type TicketCustomFieldNumberValue struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Value *wrapperspb.FloatValue `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` DefaultValue *wrapperspb.FloatValue `protobuf:"bytes,2,opt,name=default_value,json=defaultValue,proto3" json:"default_value,omitempty"` unknownFields protoimpl.UnknownFields @@ -516,11 +938,6 @@ func (x *TicketCustomFieldNumberValue) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use TicketCustomFieldNumberValue.ProtoReflect.Descriptor instead. -func (*TicketCustomFieldNumberValue) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_ticket_proto_rawDescGZIP(), []int{5} -} - func (x *TicketCustomFieldNumberValue) GetValue() *wrapperspb.FloatValue { if x != nil { return x.Value @@ -535,8 +952,54 @@ func (x *TicketCustomFieldNumberValue) GetDefaultValue() *wrapperspb.FloatValue return nil } +func (x *TicketCustomFieldNumberValue) SetValue(v *wrapperspb.FloatValue) { + x.Value = v +} + +func (x *TicketCustomFieldNumberValue) SetDefaultValue(v *wrapperspb.FloatValue) { + x.DefaultValue = v +} + +func (x *TicketCustomFieldNumberValue) HasValue() bool { + if x == nil { + return false + } + return x.Value != nil +} + +func (x *TicketCustomFieldNumberValue) HasDefaultValue() bool { + if x == nil { + return false + } + return x.DefaultValue != nil +} + +func (x *TicketCustomFieldNumberValue) ClearValue() { + x.Value = nil +} + +func (x *TicketCustomFieldNumberValue) ClearDefaultValue() { + x.DefaultValue = nil +} + +type TicketCustomFieldNumberValue_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Value *wrapperspb.FloatValue + DefaultValue *wrapperspb.FloatValue +} + +func (b0 TicketCustomFieldNumberValue_builder) Build() *TicketCustomFieldNumberValue { + m0 := &TicketCustomFieldNumberValue{} + b, x := &b0, m0 + _, _ = b, x + x.Value = b.Value + x.DefaultValue = b.DefaultValue + return m0 +} + type TicketCustomFieldTimestampValue struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Value *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` DefaultValue *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=default_value,json=defaultValue,proto3" json:"default_value,omitempty"` unknownFields protoimpl.UnknownFields @@ -568,11 +1031,6 @@ func (x *TicketCustomFieldTimestampValue) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use TicketCustomFieldTimestampValue.ProtoReflect.Descriptor instead. -func (*TicketCustomFieldTimestampValue) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_ticket_proto_rawDescGZIP(), []int{6} -} - func (x *TicketCustomFieldTimestampValue) GetValue() *timestamppb.Timestamp { if x != nil { return x.Value @@ -587,8 +1045,54 @@ func (x *TicketCustomFieldTimestampValue) GetDefaultValue() *timestamppb.Timesta return nil } +func (x *TicketCustomFieldTimestampValue) SetValue(v *timestamppb.Timestamp) { + x.Value = v +} + +func (x *TicketCustomFieldTimestampValue) SetDefaultValue(v *timestamppb.Timestamp) { + x.DefaultValue = v +} + +func (x *TicketCustomFieldTimestampValue) HasValue() bool { + if x == nil { + return false + } + return x.Value != nil +} + +func (x *TicketCustomFieldTimestampValue) HasDefaultValue() bool { + if x == nil { + return false + } + return x.DefaultValue != nil +} + +func (x *TicketCustomFieldTimestampValue) ClearValue() { + x.Value = nil +} + +func (x *TicketCustomFieldTimestampValue) ClearDefaultValue() { + x.DefaultValue = nil +} + +type TicketCustomFieldTimestampValue_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Value *timestamppb.Timestamp + DefaultValue *timestamppb.Timestamp +} + +func (b0 TicketCustomFieldTimestampValue_builder) Build() *TicketCustomFieldTimestampValue { + m0 := &TicketCustomFieldTimestampValue{} + b, x := &b0, m0 + _, _ = b, x + x.Value = b.Value + x.DefaultValue = b.DefaultValue + return m0 +} + type TicketCustomFieldPickStringValue struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` AllowedValues []string `protobuf:"bytes,2,rep,name=allowed_values,json=allowedValues,proto3" json:"allowed_values,omitempty"` DefaultValue string `protobuf:"bytes,3,opt,name=default_value,json=defaultValue,proto3" json:"default_value,omitempty"` @@ -621,11 +1125,6 @@ func (x *TicketCustomFieldPickStringValue) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use TicketCustomFieldPickStringValue.ProtoReflect.Descriptor instead. -func (*TicketCustomFieldPickStringValue) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_ticket_proto_rawDescGZIP(), []int{7} -} - func (x *TicketCustomFieldPickStringValue) GetValue() string { if x != nil { return x.Value @@ -647,8 +1146,38 @@ func (x *TicketCustomFieldPickStringValue) GetDefaultValue() string { return "" } +func (x *TicketCustomFieldPickStringValue) SetValue(v string) { + x.Value = v +} + +func (x *TicketCustomFieldPickStringValue) SetAllowedValues(v []string) { + x.AllowedValues = v +} + +func (x *TicketCustomFieldPickStringValue) SetDefaultValue(v string) { + x.DefaultValue = v +} + +type TicketCustomFieldPickStringValue_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Value string + AllowedValues []string + DefaultValue string +} + +func (b0 TicketCustomFieldPickStringValue_builder) Build() *TicketCustomFieldPickStringValue { + m0 := &TicketCustomFieldPickStringValue{} + b, x := &b0, m0 + _, _ = b, x + x.Value = b.Value + x.AllowedValues = b.AllowedValues + x.DefaultValue = b.DefaultValue + return m0 +} + type TicketCustomFieldPickMultipleStringValues struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Values []string `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` AllowedValues []string `protobuf:"bytes,2,rep,name=allowed_values,json=allowedValues,proto3" json:"allowed_values,omitempty"` DefaultValues []string `protobuf:"bytes,3,rep,name=default_values,json=defaultValues,proto3" json:"default_values,omitempty"` @@ -681,11 +1210,6 @@ func (x *TicketCustomFieldPickMultipleStringValues) ProtoReflect() protoreflect. return mi.MessageOf(x) } -// Deprecated: Use TicketCustomFieldPickMultipleStringValues.ProtoReflect.Descriptor instead. -func (*TicketCustomFieldPickMultipleStringValues) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_ticket_proto_rawDescGZIP(), []int{8} -} - func (x *TicketCustomFieldPickMultipleStringValues) GetValues() []string { if x != nil { return x.Values @@ -707,8 +1231,38 @@ func (x *TicketCustomFieldPickMultipleStringValues) GetDefaultValues() []string return nil } +func (x *TicketCustomFieldPickMultipleStringValues) SetValues(v []string) { + x.Values = v +} + +func (x *TicketCustomFieldPickMultipleStringValues) SetAllowedValues(v []string) { + x.AllowedValues = v +} + +func (x *TicketCustomFieldPickMultipleStringValues) SetDefaultValues(v []string) { + x.DefaultValues = v +} + +type TicketCustomFieldPickMultipleStringValues_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Values []string + AllowedValues []string + DefaultValues []string +} + +func (b0 TicketCustomFieldPickMultipleStringValues_builder) Build() *TicketCustomFieldPickMultipleStringValues { + m0 := &TicketCustomFieldPickMultipleStringValues{} + b, x := &b0, m0 + _, _ = b, x + x.Values = b.Values + x.AllowedValues = b.AllowedValues + x.DefaultValues = b.DefaultValues + return m0 +} + type TicketCustomFieldPickObjectValue struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Value *TicketCustomFieldObjectValue `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` AllowedValues []*TicketCustomFieldObjectValue `protobuf:"bytes,2,rep,name=allowed_values,json=allowedValues,proto3" json:"allowed_values,omitempty"` DefaultValue *TicketCustomFieldObjectValue `protobuf:"bytes,3,opt,name=default_value,json=defaultValue,proto3" json:"default_value,omitempty"` @@ -741,11 +1295,6 @@ func (x *TicketCustomFieldPickObjectValue) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use TicketCustomFieldPickObjectValue.ProtoReflect.Descriptor instead. -func (*TicketCustomFieldPickObjectValue) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_ticket_proto_rawDescGZIP(), []int{9} -} - func (x *TicketCustomFieldPickObjectValue) GetValue() *TicketCustomFieldObjectValue { if x != nil { return x.Value @@ -767,8 +1316,60 @@ func (x *TicketCustomFieldPickObjectValue) GetDefaultValue() *TicketCustomFieldO return nil } +func (x *TicketCustomFieldPickObjectValue) SetValue(v *TicketCustomFieldObjectValue) { + x.Value = v +} + +func (x *TicketCustomFieldPickObjectValue) SetAllowedValues(v []*TicketCustomFieldObjectValue) { + x.AllowedValues = v +} + +func (x *TicketCustomFieldPickObjectValue) SetDefaultValue(v *TicketCustomFieldObjectValue) { + x.DefaultValue = v +} + +func (x *TicketCustomFieldPickObjectValue) HasValue() bool { + if x == nil { + return false + } + return x.Value != nil +} + +func (x *TicketCustomFieldPickObjectValue) HasDefaultValue() bool { + if x == nil { + return false + } + return x.DefaultValue != nil +} + +func (x *TicketCustomFieldPickObjectValue) ClearValue() { + x.Value = nil +} + +func (x *TicketCustomFieldPickObjectValue) ClearDefaultValue() { + x.DefaultValue = nil +} + +type TicketCustomFieldPickObjectValue_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Value *TicketCustomFieldObjectValue + AllowedValues []*TicketCustomFieldObjectValue + DefaultValue *TicketCustomFieldObjectValue +} + +func (b0 TicketCustomFieldPickObjectValue_builder) Build() *TicketCustomFieldPickObjectValue { + m0 := &TicketCustomFieldPickObjectValue{} + b, x := &b0, m0 + _, _ = b, x + x.Value = b.Value + x.AllowedValues = b.AllowedValues + x.DefaultValue = b.DefaultValue + return m0 +} + type TicketCustomFieldPickMultipleObjectValues struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Values []*TicketCustomFieldObjectValue `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` AllowedValues []*TicketCustomFieldObjectValue `protobuf:"bytes,2,rep,name=allowed_values,json=allowedValues,proto3" json:"allowed_values,omitempty"` DefaultValues []*TicketCustomFieldObjectValue `protobuf:"bytes,3,rep,name=default_values,json=defaultValues,proto3" json:"default_values,omitempty"` @@ -801,11 +1402,6 @@ func (x *TicketCustomFieldPickMultipleObjectValues) ProtoReflect() protoreflect. return mi.MessageOf(x) } -// Deprecated: Use TicketCustomFieldPickMultipleObjectValues.ProtoReflect.Descriptor instead. -func (*TicketCustomFieldPickMultipleObjectValues) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_ticket_proto_rawDescGZIP(), []int{10} -} - func (x *TicketCustomFieldPickMultipleObjectValues) GetValues() []*TicketCustomFieldObjectValue { if x != nil { return x.Values @@ -827,8 +1423,38 @@ func (x *TicketCustomFieldPickMultipleObjectValues) GetDefaultValues() []*Ticket return nil } +func (x *TicketCustomFieldPickMultipleObjectValues) SetValues(v []*TicketCustomFieldObjectValue) { + x.Values = v +} + +func (x *TicketCustomFieldPickMultipleObjectValues) SetAllowedValues(v []*TicketCustomFieldObjectValue) { + x.AllowedValues = v +} + +func (x *TicketCustomFieldPickMultipleObjectValues) SetDefaultValues(v []*TicketCustomFieldObjectValue) { + x.DefaultValues = v +} + +type TicketCustomFieldPickMultipleObjectValues_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Values []*TicketCustomFieldObjectValue + AllowedValues []*TicketCustomFieldObjectValue + DefaultValues []*TicketCustomFieldObjectValue +} + +func (b0 TicketCustomFieldPickMultipleObjectValues_builder) Build() *TicketCustomFieldPickMultipleObjectValues { + m0 := &TicketCustomFieldPickMultipleObjectValues{} + b, x := &b0, m0 + _, _ = b, x + x.Values = b.Values + x.AllowedValues = b.AllowedValues + x.DefaultValues = b.DefaultValues + return m0 +} + type TicketCustomFieldObjectValue struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` unknownFields protoimpl.UnknownFields @@ -860,11 +1486,6 @@ func (x *TicketCustomFieldObjectValue) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use TicketCustomFieldObjectValue.ProtoReflect.Descriptor instead. -func (*TicketCustomFieldObjectValue) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_ticket_proto_rawDescGZIP(), []int{11} -} - func (x *TicketCustomFieldObjectValue) GetId() string { if x != nil { return x.Id @@ -879,8 +1500,32 @@ func (x *TicketCustomFieldObjectValue) GetDisplayName() string { return "" } +func (x *TicketCustomFieldObjectValue) SetId(v string) { + x.Id = v +} + +func (x *TicketCustomFieldObjectValue) SetDisplayName(v string) { + x.DisplayName = v +} + +type TicketCustomFieldObjectValue_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Id string + DisplayName string +} + +func (b0 TicketCustomFieldObjectValue_builder) Build() *TicketCustomFieldObjectValue { + m0 := &TicketCustomFieldObjectValue{} + b, x := &b0, m0 + _, _ = b, x + x.Id = b.Id + x.DisplayName = b.DisplayName + return m0 +} + type TicketStatus struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` unknownFields protoimpl.UnknownFields @@ -912,11 +1557,6 @@ func (x *TicketStatus) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use TicketStatus.ProtoReflect.Descriptor instead. -func (*TicketStatus) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_ticket_proto_rawDescGZIP(), []int{12} -} - func (x *TicketStatus) GetId() string { if x != nil { return x.Id @@ -931,8 +1571,32 @@ func (x *TicketStatus) GetDisplayName() string { return "" } +func (x *TicketStatus) SetId(v string) { + x.Id = v +} + +func (x *TicketStatus) SetDisplayName(v string) { + x.DisplayName = v +} + +type TicketStatus_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Id string + DisplayName string +} + +func (b0 TicketStatus_builder) Build() *TicketStatus { + m0 := &TicketStatus{} + b, x := &b0, m0 + _, _ = b, x + x.Id = b.Id + x.DisplayName = b.DisplayName + return m0 +} + type TicketsServiceGetTicketSchemaRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` Annotations []*anypb.Any `protobuf:"bytes,2,rep,name=annotations,proto3" json:"annotations,omitempty"` unknownFields protoimpl.UnknownFields @@ -964,11 +1628,6 @@ func (x *TicketsServiceGetTicketSchemaRequest) ProtoReflect() protoreflect.Messa return mi.MessageOf(x) } -// Deprecated: Use TicketsServiceGetTicketSchemaRequest.ProtoReflect.Descriptor instead. -func (*TicketsServiceGetTicketSchemaRequest) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_ticket_proto_rawDescGZIP(), []int{13} -} - func (x *TicketsServiceGetTicketSchemaRequest) GetId() string { if x != nil { return x.Id @@ -983,8 +1642,32 @@ func (x *TicketsServiceGetTicketSchemaRequest) GetAnnotations() []*anypb.Any { return nil } +func (x *TicketsServiceGetTicketSchemaRequest) SetId(v string) { + x.Id = v +} + +func (x *TicketsServiceGetTicketSchemaRequest) SetAnnotations(v []*anypb.Any) { + x.Annotations = v +} + +type TicketsServiceGetTicketSchemaRequest_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Id string + Annotations []*anypb.Any +} + +func (b0 TicketsServiceGetTicketSchemaRequest_builder) Build() *TicketsServiceGetTicketSchemaRequest { + m0 := &TicketsServiceGetTicketSchemaRequest{} + b, x := &b0, m0 + _, _ = b, x + x.Id = b.Id + x.Annotations = b.Annotations + return m0 +} + type TicketsServiceGetTicketSchemaResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Schema *TicketSchema `protobuf:"bytes,1,opt,name=schema,proto3" json:"schema,omitempty"` Annotations []*anypb.Any `protobuf:"bytes,2,rep,name=annotations,proto3" json:"annotations,omitempty"` unknownFields protoimpl.UnknownFields @@ -1016,11 +1699,6 @@ func (x *TicketsServiceGetTicketSchemaResponse) ProtoReflect() protoreflect.Mess return mi.MessageOf(x) } -// Deprecated: Use TicketsServiceGetTicketSchemaResponse.ProtoReflect.Descriptor instead. -func (*TicketsServiceGetTicketSchemaResponse) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_ticket_proto_rawDescGZIP(), []int{14} -} - func (x *TicketsServiceGetTicketSchemaResponse) GetSchema() *TicketSchema { if x != nil { return x.Schema @@ -1028,15 +1706,50 @@ func (x *TicketsServiceGetTicketSchemaResponse) GetSchema() *TicketSchema { return nil } -func (x *TicketsServiceGetTicketSchemaResponse) GetAnnotations() []*anypb.Any { - if x != nil { - return x.Annotations - } - return nil +func (x *TicketsServiceGetTicketSchemaResponse) GetAnnotations() []*anypb.Any { + if x != nil { + return x.Annotations + } + return nil +} + +func (x *TicketsServiceGetTicketSchemaResponse) SetSchema(v *TicketSchema) { + x.Schema = v +} + +func (x *TicketsServiceGetTicketSchemaResponse) SetAnnotations(v []*anypb.Any) { + x.Annotations = v +} + +func (x *TicketsServiceGetTicketSchemaResponse) HasSchema() bool { + if x == nil { + return false + } + return x.Schema != nil +} + +func (x *TicketsServiceGetTicketSchemaResponse) ClearSchema() { + x.Schema = nil +} + +type TicketsServiceGetTicketSchemaResponse_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Schema *TicketSchema + Annotations []*anypb.Any +} + +func (b0 TicketsServiceGetTicketSchemaResponse_builder) Build() *TicketsServiceGetTicketSchemaResponse { + m0 := &TicketsServiceGetTicketSchemaResponse{} + b, x := &b0, m0 + _, _ = b, x + x.Schema = b.Schema + x.Annotations = b.Annotations + return m0 } type TicketsServiceListTicketSchemasRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` PageSize uint32 `protobuf:"varint,1,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` PageToken string `protobuf:"bytes,2,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` Annotations []*anypb.Any `protobuf:"bytes,3,rep,name=annotations,proto3" json:"annotations,omitempty"` @@ -1069,11 +1782,6 @@ func (x *TicketsServiceListTicketSchemasRequest) ProtoReflect() protoreflect.Mes return mi.MessageOf(x) } -// Deprecated: Use TicketsServiceListTicketSchemasRequest.ProtoReflect.Descriptor instead. -func (*TicketsServiceListTicketSchemasRequest) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_ticket_proto_rawDescGZIP(), []int{15} -} - func (x *TicketsServiceListTicketSchemasRequest) GetPageSize() uint32 { if x != nil { return x.PageSize @@ -1095,8 +1803,38 @@ func (x *TicketsServiceListTicketSchemasRequest) GetAnnotations() []*anypb.Any { return nil } +func (x *TicketsServiceListTicketSchemasRequest) SetPageSize(v uint32) { + x.PageSize = v +} + +func (x *TicketsServiceListTicketSchemasRequest) SetPageToken(v string) { + x.PageToken = v +} + +func (x *TicketsServiceListTicketSchemasRequest) SetAnnotations(v []*anypb.Any) { + x.Annotations = v +} + +type TicketsServiceListTicketSchemasRequest_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + PageSize uint32 + PageToken string + Annotations []*anypb.Any +} + +func (b0 TicketsServiceListTicketSchemasRequest_builder) Build() *TicketsServiceListTicketSchemasRequest { + m0 := &TicketsServiceListTicketSchemasRequest{} + b, x := &b0, m0 + _, _ = b, x + x.PageSize = b.PageSize + x.PageToken = b.PageToken + x.Annotations = b.Annotations + return m0 +} + type TicketsServiceListTicketSchemasResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` List []*TicketSchema `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"` NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` Annotations []*anypb.Any `protobuf:"bytes,3,rep,name=annotations,proto3" json:"annotations,omitempty"` @@ -1129,11 +1867,6 @@ func (x *TicketsServiceListTicketSchemasResponse) ProtoReflect() protoreflect.Me return mi.MessageOf(x) } -// Deprecated: Use TicketsServiceListTicketSchemasResponse.ProtoReflect.Descriptor instead. -func (*TicketsServiceListTicketSchemasResponse) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_ticket_proto_rawDescGZIP(), []int{16} -} - func (x *TicketsServiceListTicketSchemasResponse) GetList() []*TicketSchema { if x != nil { return x.List @@ -1155,8 +1888,38 @@ func (x *TicketsServiceListTicketSchemasResponse) GetAnnotations() []*anypb.Any return nil } +func (x *TicketsServiceListTicketSchemasResponse) SetList(v []*TicketSchema) { + x.List = v +} + +func (x *TicketsServiceListTicketSchemasResponse) SetNextPageToken(v string) { + x.NextPageToken = v +} + +func (x *TicketsServiceListTicketSchemasResponse) SetAnnotations(v []*anypb.Any) { + x.Annotations = v +} + +type TicketsServiceListTicketSchemasResponse_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + List []*TicketSchema + NextPageToken string + Annotations []*anypb.Any +} + +func (b0 TicketsServiceListTicketSchemasResponse_builder) Build() *TicketsServiceListTicketSchemasResponse { + m0 := &TicketsServiceListTicketSchemasResponse{} + b, x := &b0, m0 + _, _ = b, x + x.List = b.List + x.NextPageToken = b.NextPageToken + x.Annotations = b.Annotations + return m0 +} + type Ticket struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` @@ -1200,11 +1963,6 @@ func (x *Ticket) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use Ticket.ProtoReflect.Descriptor instead. -func (*Ticket) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_ticket_proto_rawDescGZIP(), []int{17} -} - func (x *Ticket) GetId() string { if x != nil { return x.Id @@ -1303,8 +2061,181 @@ func (x *Ticket) GetRequestedFor() *Resource { return nil } +func (x *Ticket) SetId(v string) { + x.Id = v +} + +func (x *Ticket) SetDisplayName(v string) { + x.DisplayName = v +} + +func (x *Ticket) SetDescription(v string) { + x.Description = v +} + +func (x *Ticket) SetAssignees(v []*Resource) { + x.Assignees = v +} + +func (x *Ticket) SetReporter(v *Resource) { + x.Reporter = v +} + +func (x *Ticket) SetStatus(v *TicketStatus) { + x.Status = v +} + +func (x *Ticket) SetType(v *TicketType) { + x.Type = v +} + +func (x *Ticket) SetLabels(v []string) { + x.Labels = v +} + +func (x *Ticket) SetUrl(v string) { + x.Url = v +} + +func (x *Ticket) SetCustomFields(v map[string]*TicketCustomField) { + x.CustomFields = v +} + +func (x *Ticket) SetCreatedAt(v *timestamppb.Timestamp) { + x.CreatedAt = v +} + +func (x *Ticket) SetUpdatedAt(v *timestamppb.Timestamp) { + x.UpdatedAt = v +} + +func (x *Ticket) SetCompletedAt(v *timestamppb.Timestamp) { + x.CompletedAt = v +} + +func (x *Ticket) SetRequestedFor(v *Resource) { + x.RequestedFor = v +} + +func (x *Ticket) HasReporter() bool { + if x == nil { + return false + } + return x.Reporter != nil +} + +func (x *Ticket) HasStatus() bool { + if x == nil { + return false + } + return x.Status != nil +} + +func (x *Ticket) HasType() bool { + if x == nil { + return false + } + return x.Type != nil +} + +func (x *Ticket) HasCreatedAt() bool { + if x == nil { + return false + } + return x.CreatedAt != nil +} + +func (x *Ticket) HasUpdatedAt() bool { + if x == nil { + return false + } + return x.UpdatedAt != nil +} + +func (x *Ticket) HasCompletedAt() bool { + if x == nil { + return false + } + return x.CompletedAt != nil +} + +func (x *Ticket) HasRequestedFor() bool { + if x == nil { + return false + } + return x.RequestedFor != nil +} + +func (x *Ticket) ClearReporter() { + x.Reporter = nil +} + +func (x *Ticket) ClearStatus() { + x.Status = nil +} + +func (x *Ticket) ClearType() { + x.Type = nil +} + +func (x *Ticket) ClearCreatedAt() { + x.CreatedAt = nil +} + +func (x *Ticket) ClearUpdatedAt() { + x.UpdatedAt = nil +} + +func (x *Ticket) ClearCompletedAt() { + x.CompletedAt = nil +} + +func (x *Ticket) ClearRequestedFor() { + x.RequestedFor = nil +} + +type Ticket_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Id string + DisplayName string + Description string + Assignees []*Resource + Reporter *Resource + Status *TicketStatus + Type *TicketType + Labels []string + Url string + CustomFields map[string]*TicketCustomField + CreatedAt *timestamppb.Timestamp + UpdatedAt *timestamppb.Timestamp + CompletedAt *timestamppb.Timestamp + RequestedFor *Resource +} + +func (b0 Ticket_builder) Build() *Ticket { + m0 := &Ticket{} + b, x := &b0, m0 + _, _ = b, x + x.Id = b.Id + x.DisplayName = b.DisplayName + x.Description = b.Description + x.Assignees = b.Assignees + x.Reporter = b.Reporter + x.Status = b.Status + x.Type = b.Type + x.Labels = b.Labels + x.Url = b.Url + x.CustomFields = b.CustomFields + x.CreatedAt = b.CreatedAt + x.UpdatedAt = b.UpdatedAt + x.CompletedAt = b.CompletedAt + x.RequestedFor = b.RequestedFor + return m0 +} + type TicketType struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` unknownFields protoimpl.UnknownFields @@ -1336,11 +2267,6 @@ func (x *TicketType) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use TicketType.ProtoReflect.Descriptor instead. -func (*TicketType) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_ticket_proto_rawDescGZIP(), []int{18} -} - func (x *TicketType) GetId() string { if x != nil { return x.Id @@ -1355,8 +2281,32 @@ func (x *TicketType) GetDisplayName() string { return "" } +func (x *TicketType) SetId(v string) { + x.Id = v +} + +func (x *TicketType) SetDisplayName(v string) { + x.DisplayName = v +} + +type TicketType_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Id string + DisplayName string +} + +func (b0 TicketType_builder) Build() *TicketType { + m0 := &TicketType{} + b, x := &b0, m0 + _, _ = b, x + x.Id = b.Id + x.DisplayName = b.DisplayName + return m0 +} + type TicketRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` DisplayName string `protobuf:"bytes,1,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` Status *TicketStatus `protobuf:"bytes,3,opt,name=status,proto3" json:"status,omitempty"` @@ -1393,11 +2343,6 @@ func (x *TicketRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use TicketRequest.ProtoReflect.Descriptor instead. -func (*TicketRequest) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_ticket_proto_rawDescGZIP(), []int{19} -} - func (x *TicketRequest) GetDisplayName() string { if x != nil { return x.DisplayName @@ -1447,8 +2392,95 @@ func (x *TicketRequest) GetRequestedFor() *Resource { return nil } +func (x *TicketRequest) SetDisplayName(v string) { + x.DisplayName = v +} + +func (x *TicketRequest) SetDescription(v string) { + x.Description = v +} + +func (x *TicketRequest) SetStatus(v *TicketStatus) { + x.Status = v +} + +func (x *TicketRequest) SetType(v *TicketType) { + x.Type = v +} + +func (x *TicketRequest) SetLabels(v []string) { + x.Labels = v +} + +func (x *TicketRequest) SetCustomFields(v map[string]*TicketCustomField) { + x.CustomFields = v +} + +func (x *TicketRequest) SetRequestedFor(v *Resource) { + x.RequestedFor = v +} + +func (x *TicketRequest) HasStatus() bool { + if x == nil { + return false + } + return x.Status != nil +} + +func (x *TicketRequest) HasType() bool { + if x == nil { + return false + } + return x.Type != nil +} + +func (x *TicketRequest) HasRequestedFor() bool { + if x == nil { + return false + } + return x.RequestedFor != nil +} + +func (x *TicketRequest) ClearStatus() { + x.Status = nil +} + +func (x *TicketRequest) ClearType() { + x.Type = nil +} + +func (x *TicketRequest) ClearRequestedFor() { + x.RequestedFor = nil +} + +type TicketRequest_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + DisplayName string + Description string + Status *TicketStatus + Type *TicketType + Labels []string + CustomFields map[string]*TicketCustomField + RequestedFor *Resource +} + +func (b0 TicketRequest_builder) Build() *TicketRequest { + m0 := &TicketRequest{} + b, x := &b0, m0 + _, _ = b, x + x.DisplayName = b.DisplayName + x.Description = b.Description + x.Status = b.Status + x.Type = b.Type + x.Labels = b.Labels + x.CustomFields = b.CustomFields + x.RequestedFor = b.RequestedFor + return m0 +} + type TicketsServiceCreateTicketRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Request *TicketRequest `protobuf:"bytes,1,opt,name=request,proto3" json:"request,omitempty"` Schema *TicketSchema `protobuf:"bytes,2,opt,name=schema,proto3" json:"schema,omitempty"` Annotations []*anypb.Any `protobuf:"bytes,8,rep,name=annotations,proto3" json:"annotations,omitempty"` @@ -1481,11 +2513,6 @@ func (x *TicketsServiceCreateTicketRequest) ProtoReflect() protoreflect.Message return mi.MessageOf(x) } -// Deprecated: Use TicketsServiceCreateTicketRequest.ProtoReflect.Descriptor instead. -func (*TicketsServiceCreateTicketRequest) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_ticket_proto_rawDescGZIP(), []int{20} -} - func (x *TicketsServiceCreateTicketRequest) GetRequest() *TicketRequest { if x != nil { return x.Request @@ -1507,9 +2534,61 @@ func (x *TicketsServiceCreateTicketRequest) GetAnnotations() []*anypb.Any { return nil } +func (x *TicketsServiceCreateTicketRequest) SetRequest(v *TicketRequest) { + x.Request = v +} + +func (x *TicketsServiceCreateTicketRequest) SetSchema(v *TicketSchema) { + x.Schema = v +} + +func (x *TicketsServiceCreateTicketRequest) SetAnnotations(v []*anypb.Any) { + x.Annotations = v +} + +func (x *TicketsServiceCreateTicketRequest) HasRequest() bool { + if x == nil { + return false + } + return x.Request != nil +} + +func (x *TicketsServiceCreateTicketRequest) HasSchema() bool { + if x == nil { + return false + } + return x.Schema != nil +} + +func (x *TicketsServiceCreateTicketRequest) ClearRequest() { + x.Request = nil +} + +func (x *TicketsServiceCreateTicketRequest) ClearSchema() { + x.Schema = nil +} + +type TicketsServiceCreateTicketRequest_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Request *TicketRequest + Schema *TicketSchema + Annotations []*anypb.Any +} + +func (b0 TicketsServiceCreateTicketRequest_builder) Build() *TicketsServiceCreateTicketRequest { + m0 := &TicketsServiceCreateTicketRequest{} + b, x := &b0, m0 + _, _ = b, x + x.Request = b.Request + x.Schema = b.Schema + x.Annotations = b.Annotations + return m0 +} + // TODO(lauren) maybe the error should be a separate proto so we can store retryable error type TicketsServiceCreateTicketResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Ticket *Ticket `protobuf:"bytes,1,opt,name=ticket,proto3" json:"ticket,omitempty"` Annotations []*anypb.Any `protobuf:"bytes,2,rep,name=annotations,proto3" json:"annotations,omitempty"` Error string `protobuf:"bytes,3,opt,name=error,proto3" json:"error,omitempty"` @@ -1542,11 +2621,6 @@ func (x *TicketsServiceCreateTicketResponse) ProtoReflect() protoreflect.Message return mi.MessageOf(x) } -// Deprecated: Use TicketsServiceCreateTicketResponse.ProtoReflect.Descriptor instead. -func (*TicketsServiceCreateTicketResponse) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_ticket_proto_rawDescGZIP(), []int{21} -} - func (x *TicketsServiceCreateTicketResponse) GetTicket() *Ticket { if x != nil { return x.Ticket @@ -1568,8 +2642,49 @@ func (x *TicketsServiceCreateTicketResponse) GetError() string { return "" } +func (x *TicketsServiceCreateTicketResponse) SetTicket(v *Ticket) { + x.Ticket = v +} + +func (x *TicketsServiceCreateTicketResponse) SetAnnotations(v []*anypb.Any) { + x.Annotations = v +} + +func (x *TicketsServiceCreateTicketResponse) SetError(v string) { + x.Error = v +} + +func (x *TicketsServiceCreateTicketResponse) HasTicket() bool { + if x == nil { + return false + } + return x.Ticket != nil +} + +func (x *TicketsServiceCreateTicketResponse) ClearTicket() { + x.Ticket = nil +} + +type TicketsServiceCreateTicketResponse_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Ticket *Ticket + Annotations []*anypb.Any + Error string +} + +func (b0 TicketsServiceCreateTicketResponse_builder) Build() *TicketsServiceCreateTicketResponse { + m0 := &TicketsServiceCreateTicketResponse{} + b, x := &b0, m0 + _, _ = b, x + x.Ticket = b.Ticket + x.Annotations = b.Annotations + x.Error = b.Error + return m0 +} + type TicketsServiceGetTicketRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` Annotations []*anypb.Any `protobuf:"bytes,2,rep,name=annotations,proto3" json:"annotations,omitempty"` unknownFields protoimpl.UnknownFields @@ -1601,11 +2716,6 @@ func (x *TicketsServiceGetTicketRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use TicketsServiceGetTicketRequest.ProtoReflect.Descriptor instead. -func (*TicketsServiceGetTicketRequest) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_ticket_proto_rawDescGZIP(), []int{22} -} - func (x *TicketsServiceGetTicketRequest) GetId() string { if x != nil { return x.Id @@ -1620,8 +2730,32 @@ func (x *TicketsServiceGetTicketRequest) GetAnnotations() []*anypb.Any { return nil } +func (x *TicketsServiceGetTicketRequest) SetId(v string) { + x.Id = v +} + +func (x *TicketsServiceGetTicketRequest) SetAnnotations(v []*anypb.Any) { + x.Annotations = v +} + +type TicketsServiceGetTicketRequest_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Id string + Annotations []*anypb.Any +} + +func (b0 TicketsServiceGetTicketRequest_builder) Build() *TicketsServiceGetTicketRequest { + m0 := &TicketsServiceGetTicketRequest{} + b, x := &b0, m0 + _, _ = b, x + x.Id = b.Id + x.Annotations = b.Annotations + return m0 +} + type TicketsServiceGetTicketResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Ticket *Ticket `protobuf:"bytes,1,opt,name=ticket,proto3" json:"ticket,omitempty"` Annotations []*anypb.Any `protobuf:"bytes,2,rep,name=annotations,proto3" json:"annotations,omitempty"` Error string `protobuf:"bytes,3,opt,name=error,proto3" json:"error,omitempty"` @@ -1654,11 +2788,6 @@ func (x *TicketsServiceGetTicketResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use TicketsServiceGetTicketResponse.ProtoReflect.Descriptor instead. -func (*TicketsServiceGetTicketResponse) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_ticket_proto_rawDescGZIP(), []int{23} -} - func (x *TicketsServiceGetTicketResponse) GetTicket() *Ticket { if x != nil { return x.Ticket @@ -1680,8 +2809,49 @@ func (x *TicketsServiceGetTicketResponse) GetError() string { return "" } +func (x *TicketsServiceGetTicketResponse) SetTicket(v *Ticket) { + x.Ticket = v +} + +func (x *TicketsServiceGetTicketResponse) SetAnnotations(v []*anypb.Any) { + x.Annotations = v +} + +func (x *TicketsServiceGetTicketResponse) SetError(v string) { + x.Error = v +} + +func (x *TicketsServiceGetTicketResponse) HasTicket() bool { + if x == nil { + return false + } + return x.Ticket != nil +} + +func (x *TicketsServiceGetTicketResponse) ClearTicket() { + x.Ticket = nil +} + +type TicketsServiceGetTicketResponse_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Ticket *Ticket + Annotations []*anypb.Any + Error string +} + +func (b0 TicketsServiceGetTicketResponse_builder) Build() *TicketsServiceGetTicketResponse { + m0 := &TicketsServiceGetTicketResponse{} + b, x := &b0, m0 + _, _ = b, x + x.Ticket = b.Ticket + x.Annotations = b.Annotations + x.Error = b.Error + return m0 +} + type TicketsServiceBulkCreateTicketsRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` TicketRequests []*TicketsServiceCreateTicketRequest `protobuf:"bytes,1,rep,name=ticket_requests,json=ticketRequests,proto3" json:"ticket_requests,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache @@ -1712,11 +2882,6 @@ func (x *TicketsServiceBulkCreateTicketsRequest) ProtoReflect() protoreflect.Mes return mi.MessageOf(x) } -// Deprecated: Use TicketsServiceBulkCreateTicketsRequest.ProtoReflect.Descriptor instead. -func (*TicketsServiceBulkCreateTicketsRequest) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_ticket_proto_rawDescGZIP(), []int{24} -} - func (x *TicketsServiceBulkCreateTicketsRequest) GetTicketRequests() []*TicketsServiceCreateTicketRequest { if x != nil { return x.TicketRequests @@ -1724,8 +2889,26 @@ func (x *TicketsServiceBulkCreateTicketsRequest) GetTicketRequests() []*TicketsS return nil } +func (x *TicketsServiceBulkCreateTicketsRequest) SetTicketRequests(v []*TicketsServiceCreateTicketRequest) { + x.TicketRequests = v +} + +type TicketsServiceBulkCreateTicketsRequest_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + TicketRequests []*TicketsServiceCreateTicketRequest +} + +func (b0 TicketsServiceBulkCreateTicketsRequest_builder) Build() *TicketsServiceBulkCreateTicketsRequest { + m0 := &TicketsServiceBulkCreateTicketsRequest{} + b, x := &b0, m0 + _, _ = b, x + x.TicketRequests = b.TicketRequests + return m0 +} + type TicketsServiceBulkCreateTicketsResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Tickets []*TicketsServiceCreateTicketResponse `protobuf:"bytes,1,rep,name=tickets,proto3" json:"tickets,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache @@ -1756,11 +2939,6 @@ func (x *TicketsServiceBulkCreateTicketsResponse) ProtoReflect() protoreflect.Me return mi.MessageOf(x) } -// Deprecated: Use TicketsServiceBulkCreateTicketsResponse.ProtoReflect.Descriptor instead. -func (*TicketsServiceBulkCreateTicketsResponse) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_ticket_proto_rawDescGZIP(), []int{25} -} - func (x *TicketsServiceBulkCreateTicketsResponse) GetTickets() []*TicketsServiceCreateTicketResponse { if x != nil { return x.Tickets @@ -1768,8 +2946,26 @@ func (x *TicketsServiceBulkCreateTicketsResponse) GetTickets() []*TicketsService return nil } +func (x *TicketsServiceBulkCreateTicketsResponse) SetTickets(v []*TicketsServiceCreateTicketResponse) { + x.Tickets = v +} + +type TicketsServiceBulkCreateTicketsResponse_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Tickets []*TicketsServiceCreateTicketResponse +} + +func (b0 TicketsServiceBulkCreateTicketsResponse_builder) Build() *TicketsServiceBulkCreateTicketsResponse { + m0 := &TicketsServiceBulkCreateTicketsResponse{} + b, x := &b0, m0 + _, _ = b, x + x.Tickets = b.Tickets + return m0 +} + type TicketsServiceBulkGetTicketsRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` TicketRequests []*TicketsServiceGetTicketRequest `protobuf:"bytes,1,rep,name=ticket_requests,json=ticketRequests,proto3" json:"ticket_requests,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache @@ -1800,11 +2996,6 @@ func (x *TicketsServiceBulkGetTicketsRequest) ProtoReflect() protoreflect.Messag return mi.MessageOf(x) } -// Deprecated: Use TicketsServiceBulkGetTicketsRequest.ProtoReflect.Descriptor instead. -func (*TicketsServiceBulkGetTicketsRequest) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_ticket_proto_rawDescGZIP(), []int{26} -} - func (x *TicketsServiceBulkGetTicketsRequest) GetTicketRequests() []*TicketsServiceGetTicketRequest { if x != nil { return x.TicketRequests @@ -1812,8 +3003,26 @@ func (x *TicketsServiceBulkGetTicketsRequest) GetTicketRequests() []*TicketsServ return nil } +func (x *TicketsServiceBulkGetTicketsRequest) SetTicketRequests(v []*TicketsServiceGetTicketRequest) { + x.TicketRequests = v +} + +type TicketsServiceBulkGetTicketsRequest_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + TicketRequests []*TicketsServiceGetTicketRequest +} + +func (b0 TicketsServiceBulkGetTicketsRequest_builder) Build() *TicketsServiceBulkGetTicketsRequest { + m0 := &TicketsServiceBulkGetTicketsRequest{} + b, x := &b0, m0 + _, _ = b, x + x.TicketRequests = b.TicketRequests + return m0 +} + type TicketsServiceBulkGetTicketsResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Tickets []*TicketsServiceGetTicketResponse `protobuf:"bytes,1,rep,name=tickets,proto3" json:"tickets,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache @@ -1844,11 +3053,6 @@ func (x *TicketsServiceBulkGetTicketsResponse) ProtoReflect() protoreflect.Messa return mi.MessageOf(x) } -// Deprecated: Use TicketsServiceBulkGetTicketsResponse.ProtoReflect.Descriptor instead. -func (*TicketsServiceBulkGetTicketsResponse) Descriptor() ([]byte, []int) { - return file_c1_connector_v2_ticket_proto_rawDescGZIP(), []int{27} -} - func (x *TicketsServiceBulkGetTicketsResponse) GetTickets() []*TicketsServiceGetTicketResponse { if x != nil { return x.Tickets @@ -1856,471 +3060,176 @@ func (x *TicketsServiceBulkGetTicketsResponse) GetTickets() []*TicketsServiceGet return nil } -var File_c1_connector_v2_ticket_proto protoreflect.FileDescriptor +func (x *TicketsServiceBulkGetTicketsResponse) SetTickets(v []*TicketsServiceGetTicketResponse) { + x.Tickets = v +} -var file_c1_connector_v2_ticket_proto_rawDesc = string([]byte{ - 0x0a, 0x1c, 0x63, 0x31, 0x2f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f, 0x76, - 0x32, 0x2f, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, - 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x1a, - 0x1e, 0x63, 0x31, 0x2f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f, 0x76, 0x32, - 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, - 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, - 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, - 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, - 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa2, 0x03, 0x0a, 0x0c, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x53, - 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, - 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, - 0x70, 0x6c, 0x61, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x31, 0x0a, 0x05, 0x74, 0x79, 0x70, 0x65, - 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, - 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, - 0x54, 0x79, 0x70, 0x65, 0x52, 0x05, 0x74, 0x79, 0x70, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x08, 0x73, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, - 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, - 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x08, 0x73, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x12, 0x54, 0x0a, 0x0d, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, - 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, - 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, - 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x43, 0x75, 0x73, - 0x74, 0x6f, 0x6d, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0c, - 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x12, 0x36, 0x0a, 0x0b, - 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x63, 0x0a, 0x11, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x46, 0x69, - 0x65, 0x6c, 0x64, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x38, 0x0a, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x63, 0x31, 0x2e, - 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x69, 0x63, - 0x6b, 0x65, 0x74, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x52, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x89, 0x08, 0x0a, 0x11, 0x54, 0x69, - 0x63, 0x6b, 0x65, 0x74, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x12, - 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, - 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e, 0x61, - 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x12, 0x52, - 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x64, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, - 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x43, 0x75, 0x73, - 0x74, 0x6f, 0x6d, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, - 0x75, 0x65, 0x12, 0x55, 0x0a, 0x0d, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x73, 0x18, 0x65, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x63, 0x31, 0x2e, 0x63, - 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x69, 0x63, 0x6b, - 0x65, 0x74, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x53, 0x74, 0x72, - 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x48, 0x00, 0x52, 0x0c, 0x73, 0x74, 0x72, - 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x4c, 0x0a, 0x0a, 0x62, 0x6f, 0x6f, - 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x66, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, - 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, - 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x46, 0x69, 0x65, 0x6c, - 0x64, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x62, 0x6f, - 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x5b, 0x0a, 0x0f, 0x74, 0x69, 0x6d, 0x65, 0x73, - 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x67, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x30, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, - 0x76, 0x32, 0x2e, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x46, - 0x69, 0x65, 0x6c, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x56, 0x61, 0x6c, - 0x75, 0x65, 0x48, 0x00, 0x52, 0x0e, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x12, 0x5f, 0x0a, 0x11, 0x70, 0x69, 0x63, 0x6b, 0x5f, 0x73, 0x74, 0x72, - 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x68, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x31, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, - 0x32, 0x2e, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x46, 0x69, - 0x65, 0x6c, 0x64, 0x50, 0x69, 0x63, 0x6b, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, - 0x75, 0x65, 0x48, 0x00, 0x52, 0x0f, 0x70, 0x69, 0x63, 0x6b, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, - 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x7b, 0x0a, 0x1b, 0x70, 0x69, 0x63, 0x6b, 0x5f, 0x6d, 0x75, - 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x73, 0x18, 0x69, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x63, 0x31, 0x2e, - 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x69, 0x63, - 0x6b, 0x65, 0x74, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x50, 0x69, - 0x63, 0x6b, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, - 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x48, 0x00, 0x52, 0x18, 0x70, 0x69, 0x63, 0x6b, 0x4d, 0x75, - 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x73, 0x12, 0x5f, 0x0a, 0x11, 0x70, 0x69, 0x63, 0x6b, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x6a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, - 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, - 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x46, 0x69, 0x65, 0x6c, - 0x64, 0x50, 0x69, 0x63, 0x6b, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, - 0x48, 0x00, 0x52, 0x0f, 0x70, 0x69, 0x63, 0x6b, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x12, 0x7b, 0x0a, 0x1b, 0x70, 0x69, 0x63, 0x6b, 0x5f, 0x6d, 0x75, 0x6c, 0x74, - 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x73, 0x18, 0x6b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, - 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x69, 0x63, 0x6b, 0x65, - 0x74, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x50, 0x69, 0x63, 0x6b, - 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x73, 0x48, 0x00, 0x52, 0x18, 0x70, 0x69, 0x63, 0x6b, 0x4d, 0x75, 0x6c, 0x74, - 0x69, 0x70, 0x6c, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, - 0x12, 0x52, 0x0a, 0x0c, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x18, 0x6c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, - 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x43, - 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, - 0x56, 0x61, 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x0b, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x12, 0x36, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, - 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x07, 0x0a, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x59, 0x0a, 0x1c, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x43, - 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, - 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x64, - 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, - 0x22, 0x5e, 0x0a, 0x1d, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, - 0x46, 0x69, 0x65, 0x6c, 0x64, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, - 0x73, 0x12, 0x16, 0x0a, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x09, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x64, 0x65, 0x66, - 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, - 0x09, 0x52, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, - 0x22, 0x32, 0x0a, 0x1a, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, - 0x46, 0x69, 0x65, 0x6c, 0x64, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, - 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x22, 0x93, 0x01, 0x0a, 0x1c, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x43, - 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, - 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x31, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x40, 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, - 0x75, 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x64, 0x65, - 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x94, 0x01, 0x0a, 0x1f, 0x54, - 0x69, 0x63, 0x6b, 0x65, 0x74, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x46, 0x69, 0x65, 0x6c, 0x64, - 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x30, - 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x12, 0x3f, 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, - 0x61, 0x6d, 0x70, 0x52, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x22, 0x84, 0x01, 0x0a, 0x20, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x43, 0x75, 0x73, 0x74, - 0x6f, 0x6d, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x50, 0x69, 0x63, 0x6b, 0x53, 0x74, 0x72, 0x69, 0x6e, - 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x25, 0x0a, 0x0e, - 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x02, - 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x56, 0x61, 0x6c, - 0x75, 0x65, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x65, 0x66, 0x61, - 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x91, 0x01, 0x0a, 0x29, 0x54, 0x69, 0x63, - 0x6b, 0x65, 0x74, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x50, 0x69, - 0x63, 0x6b, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, - 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, - 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x25, - 0x0a, 0x0e, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, - 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, - 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x64, - 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x22, 0x91, 0x02, 0x0a, - 0x20, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x46, 0x69, 0x65, - 0x6c, 0x64, 0x50, 0x69, 0x63, 0x6b, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x12, 0x43, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x2d, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, - 0x76, 0x32, 0x2e, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x46, - 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x54, 0x0a, 0x0e, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x65, - 0x64, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, - 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, - 0x2e, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x46, 0x69, 0x65, - 0x6c, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0d, 0x61, - 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x52, 0x0a, 0x0d, - 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, - 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x43, 0x75, 0x73, 0x74, - 0x6f, 0x6d, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x56, 0x61, 0x6c, - 0x75, 0x65, 0x52, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, - 0x22, 0x9e, 0x02, 0x0a, 0x29, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x43, 0x75, 0x73, 0x74, 0x6f, - 0x6d, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x50, 0x69, 0x63, 0x6b, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, - 0x6c, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x45, - 0x0a, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, - 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, - 0x2e, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x46, 0x69, 0x65, - 0x6c, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x54, 0x0a, 0x0e, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, - 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, - 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, - 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x46, 0x69, 0x65, 0x6c, - 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0d, 0x61, 0x6c, - 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x54, 0x0a, 0x0e, 0x64, - 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x03, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, - 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x43, 0x75, 0x73, 0x74, - 0x6f, 0x6d, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x56, 0x61, 0x6c, - 0x75, 0x65, 0x52, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, - 0x73, 0x22, 0x51, 0x0a, 0x1c, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x43, 0x75, 0x73, 0x74, 0x6f, - 0x6d, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, - 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, - 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x41, 0x0a, 0x0c, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x02, 0x69, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, - 0x6c, 0x61, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x6e, 0x0a, 0x24, 0x54, 0x69, 0x63, 0x6b, 0x65, - 0x74, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x47, 0x65, 0x74, 0x54, 0x69, 0x63, 0x6b, - 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, - 0x36, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, - 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x96, 0x01, 0x0a, 0x25, 0x54, 0x69, 0x63, 0x6b, - 0x65, 0x74, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x47, 0x65, 0x74, 0x54, 0x69, 0x63, - 0x6b, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x35, 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1d, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, - 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, - 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x36, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, - 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x41, 0x6e, 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x22, 0xb7, 0x01, 0x0a, 0x26, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x53, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x63, 0x68, - 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x27, 0x0a, 0x09, 0x70, - 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x42, 0x0a, - 0xfa, 0x42, 0x07, 0x2a, 0x05, 0x18, 0xfa, 0x01, 0x40, 0x01, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, - 0x53, 0x69, 0x7a, 0x65, 0x12, 0x2c, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, - 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0d, 0xfa, 0x42, 0x0a, 0x72, 0x08, 0x20, - 0x01, 0x28, 0x80, 0x20, 0xd0, 0x01, 0x01, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, - 0x65, 0x6e, 0x12, 0x36, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0b, 0x61, - 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xcb, 0x01, 0x0a, 0x27, 0x54, - 0x69, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x69, 0x73, - 0x74, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x31, 0x0a, 0x04, 0x6c, 0x69, 0x73, 0x74, 0x18, 0x01, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, - 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x63, 0x68, - 0x65, 0x6d, 0x61, 0x52, 0x04, 0x6c, 0x69, 0x73, 0x74, 0x12, 0x35, 0x0a, 0x0f, 0x6e, 0x65, 0x78, - 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x42, 0x0d, 0xfa, 0x42, 0x0a, 0x72, 0x08, 0x20, 0x01, 0x28, 0x80, 0x20, 0xd0, 0x01, - 0x01, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, - 0x12, 0x36, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, - 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, - 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x89, 0x06, 0x0a, 0x06, 0x54, 0x69, 0x63, - 0x6b, 0x65, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x02, 0x69, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, - 0x61, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x37, 0x0a, 0x09, 0x61, 0x73, 0x73, 0x69, - 0x67, 0x6e, 0x65, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x63, 0x31, - 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x09, 0x61, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x65, - 0x73, 0x12, 0x35, 0x0a, 0x08, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, - 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x08, - 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x72, 0x12, 0x35, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, - 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x69, 0x63, 0x6b, 0x65, - 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, - 0x2f, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, - 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, - 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, - 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x09, - 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, - 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x12, 0x4e, 0x0a, 0x0d, 0x63, 0x75, - 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x0b, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x29, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, - 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, - 0x6d, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0c, 0x63, 0x75, - 0x73, 0x74, 0x6f, 0x6d, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x12, 0x39, 0x0a, 0x0a, 0x63, 0x72, - 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x63, 0x72, 0x65, 0x61, - 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x39, 0x0a, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, - 0x5f, 0x61, 0x74, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, - 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, - 0x12, 0x3d, 0x0a, 0x0c, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, - 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, - 0x6d, 0x70, 0x52, 0x0b, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, - 0x3e, 0x0a, 0x0d, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x6f, 0x72, - 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, - 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x52, 0x0c, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x46, 0x6f, 0x72, 0x1a, - 0x63, 0x0a, 0x11, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x38, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, - 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x43, 0x75, - 0x73, 0x74, 0x6f, 0x6d, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x3a, 0x02, 0x38, 0x01, 0x22, 0x3f, 0x0a, 0x0a, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x54, 0x79, - 0x70, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, - 0x69, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, - 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, - 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0xd0, 0x03, 0x0a, 0x0d, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, - 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, - 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, - 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x35, 0x0a, 0x06, - 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x63, - 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x54, - 0x69, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x12, 0x2f, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1b, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, - 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, - 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x05, - 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x55, 0x0a, 0x0d, - 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x06, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, - 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, - 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0c, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x46, 0x69, 0x65, - 0x6c, 0x64, 0x73, 0x12, 0x3e, 0x0a, 0x0d, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, - 0x5f, 0x66, 0x6f, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x63, 0x31, 0x2e, - 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x0c, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, - 0x46, 0x6f, 0x72, 0x1a, 0x63, 0x0a, 0x11, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x46, 0x69, 0x65, - 0x6c, 0x64, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x38, 0x0a, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x63, 0x31, 0x2e, 0x63, - 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x69, 0x63, 0x6b, - 0x65, 0x74, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x52, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xcc, 0x01, 0x0a, 0x21, 0x54, 0x69, 0x63, - 0x6b, 0x65, 0x74, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x72, 0x65, 0x61, 0x74, - 0x65, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, - 0x0a, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1e, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, - 0x32, 0x2e, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, - 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x35, 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, - 0x6d, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, - 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x69, 0x63, 0x6b, 0x65, - 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, - 0x36, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, - 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa3, 0x01, 0x0a, 0x22, 0x54, 0x69, 0x63, 0x6b, - 0x65, 0x74, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, - 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, - 0x0a, 0x06, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, - 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, - 0x2e, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x12, - 0x36, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, - 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x68, 0x0a, - 0x1e, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x47, - 0x65, 0x74, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, - 0x36, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, - 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa0, 0x01, 0x0a, 0x1f, 0x54, 0x69, 0x63, 0x6b, - 0x65, 0x74, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x47, 0x65, 0x74, 0x54, 0x69, 0x63, - 0x6b, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x74, - 0x69, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x63, 0x31, - 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x69, - 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x36, 0x0a, 0x0b, - 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x85, 0x01, 0x0a, 0x26, 0x54, - 0x69, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x42, 0x75, 0x6c, - 0x6b, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x5b, 0x0a, 0x0f, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x5f, - 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, - 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, - 0x2e, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, - 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x52, 0x0e, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x73, 0x22, 0x78, 0x0a, 0x27, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x42, 0x75, 0x6c, 0x6b, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, - 0x63, 0x6b, 0x65, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4d, 0x0a, - 0x07, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, - 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, - 0x2e, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, - 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x52, 0x07, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x22, 0x7f, 0x0a, 0x23, - 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x42, 0x75, - 0x6c, 0x6b, 0x47, 0x65, 0x74, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x58, 0x0a, 0x0f, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x72, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x63, - 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x54, - 0x69, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x47, 0x65, 0x74, - 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x0e, 0x74, - 0x69, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x22, 0x72, 0x0a, - 0x24, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x42, - 0x75, 0x6c, 0x6b, 0x47, 0x65, 0x74, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4a, 0x0a, 0x07, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x73, - 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, - 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x73, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x47, 0x65, 0x74, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x07, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, - 0x73, 0x32, 0x8d, 0x06, 0x0a, 0x0e, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x12, 0x77, 0x0a, 0x0c, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, - 0x63, 0x6b, 0x65, 0x74, 0x12, 0x32, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, - 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x53, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x63, 0x6b, 0x65, - 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x33, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, - 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x69, 0x63, 0x6b, 0x65, - 0x74, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, - 0x69, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6e, 0x0a, - 0x09, 0x47, 0x65, 0x74, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x2f, 0x2e, 0x63, 0x31, 0x2e, - 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x69, 0x63, - 0x6b, 0x65, 0x74, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x47, 0x65, 0x74, 0x54, 0x69, - 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x30, 0x2e, 0x63, 0x31, - 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x69, - 0x63, 0x6b, 0x65, 0x74, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x47, 0x65, 0x74, 0x54, - 0x69, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x86, 0x01, - 0x0a, 0x11, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, - 0x6d, 0x61, 0x73, 0x12, 0x37, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, - 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x63, - 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x38, 0x2e, 0x63, - 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x54, - 0x69, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x69, 0x73, - 0x74, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x80, 0x01, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x54, 0x69, - 0x63, 0x6b, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x35, 0x2e, 0x63, 0x31, 0x2e, - 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x69, 0x63, - 0x6b, 0x65, 0x74, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x47, 0x65, 0x74, 0x54, 0x69, - 0x63, 0x6b, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x36, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, - 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x47, 0x65, 0x74, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, - 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x86, 0x01, 0x0a, 0x11, 0x42, 0x75, - 0x6c, 0x6b, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x12, - 0x37, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, - 0x32, 0x2e, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x42, 0x75, 0x6c, 0x6b, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, - 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x38, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, - 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x69, 0x63, 0x6b, 0x65, - 0x74, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x42, 0x75, 0x6c, 0x6b, 0x43, 0x72, 0x65, - 0x61, 0x74, 0x65, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x7d, 0x0a, 0x0e, 0x42, 0x75, 0x6c, 0x6b, 0x47, 0x65, 0x74, 0x54, 0x69, 0x63, - 0x6b, 0x65, 0x74, 0x73, 0x12, 0x34, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, - 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x53, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x42, 0x75, 0x6c, 0x6b, 0x47, 0x65, 0x74, 0x54, 0x69, 0x63, 0x6b, - 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x35, 0x2e, 0x63, 0x31, 0x2e, - 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x69, 0x63, - 0x6b, 0x65, 0x74, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x42, 0x75, 0x6c, 0x6b, 0x47, - 0x65, 0x74, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x42, 0x36, 0x5a, 0x34, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, - 0x63, 0x6f, 0x6e, 0x64, 0x75, 0x63, 0x74, 0x6f, 0x72, 0x6f, 0x6e, 0x65, 0x2f, 0x62, 0x61, 0x74, - 0x6f, 0x6e, 0x2d, 0x73, 0x64, 0x6b, 0x2f, 0x70, 0x62, 0x2f, 0x63, 0x31, 0x2f, 0x63, 0x6f, 0x6e, - 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f, 0x76, 0x32, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x33, -}) - -var ( - file_c1_connector_v2_ticket_proto_rawDescOnce sync.Once - file_c1_connector_v2_ticket_proto_rawDescData []byte -) +type TicketsServiceBulkGetTicketsResponse_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Tickets []*TicketsServiceGetTicketResponse +} -func file_c1_connector_v2_ticket_proto_rawDescGZIP() []byte { - file_c1_connector_v2_ticket_proto_rawDescOnce.Do(func() { - file_c1_connector_v2_ticket_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_c1_connector_v2_ticket_proto_rawDesc), len(file_c1_connector_v2_ticket_proto_rawDesc))) - }) - return file_c1_connector_v2_ticket_proto_rawDescData +func (b0 TicketsServiceBulkGetTicketsResponse_builder) Build() *TicketsServiceBulkGetTicketsResponse { + m0 := &TicketsServiceBulkGetTicketsResponse{} + b, x := &b0, m0 + _, _ = b, x + x.Tickets = b.Tickets + return m0 } +var File_c1_connector_v2_ticket_proto protoreflect.FileDescriptor + +const file_c1_connector_v2_ticket_proto_rawDesc = "" + + "\n" + + "\x1cc1/connector/v2/ticket.proto\x12\x0fc1.connector.v2\x1a\x1ec1/connector/v2/resource.proto\x1a\x19google/protobuf/any.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1egoogle/protobuf/wrappers.proto\x1a\x17validate/validate.proto\"\xa2\x03\n" + + "\fTicketSchema\x12\x0e\n" + + "\x02id\x18\x01 \x01(\tR\x02id\x12!\n" + + "\fdisplay_name\x18\x02 \x01(\tR\vdisplayName\x121\n" + + "\x05types\x18\x03 \x03(\v2\x1b.c1.connector.v2.TicketTypeR\x05types\x129\n" + + "\bstatuses\x18\x04 \x03(\v2\x1d.c1.connector.v2.TicketStatusR\bstatuses\x12T\n" + + "\rcustom_fields\x18\x05 \x03(\v2/.c1.connector.v2.TicketSchema.CustomFieldsEntryR\fcustomFields\x126\n" + + "\vannotations\x18\x06 \x03(\v2\x14.google.protobuf.AnyR\vannotations\x1ac\n" + + "\x11CustomFieldsEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x128\n" + + "\x05value\x18\x02 \x01(\v2\".c1.connector.v2.TicketCustomFieldR\x05value:\x028\x01\"\x89\b\n" + + "\x11TicketCustomField\x12\x0e\n" + + "\x02id\x18\x01 \x01(\tR\x02id\x12!\n" + + "\fdisplay_name\x18\x02 \x01(\tR\vdisplayName\x12\x1a\n" + + "\brequired\x18\x03 \x01(\bR\brequired\x12R\n" + + "\fstring_value\x18d \x01(\v2-.c1.connector.v2.TicketCustomFieldStringValueH\x00R\vstringValue\x12U\n" + + "\rstring_values\x18e \x01(\v2..c1.connector.v2.TicketCustomFieldStringValuesH\x00R\fstringValues\x12L\n" + + "\n" + + "bool_value\x18f \x01(\v2+.c1.connector.v2.TicketCustomFieldBoolValueH\x00R\tboolValue\x12[\n" + + "\x0ftimestamp_value\x18g \x01(\v20.c1.connector.v2.TicketCustomFieldTimestampValueH\x00R\x0etimestampValue\x12_\n" + + "\x11pick_string_value\x18h \x01(\v21.c1.connector.v2.TicketCustomFieldPickStringValueH\x00R\x0fpickStringValue\x12{\n" + + "\x1bpick_multiple_string_values\x18i \x01(\v2:.c1.connector.v2.TicketCustomFieldPickMultipleStringValuesH\x00R\x18pickMultipleStringValues\x12_\n" + + "\x11pick_object_value\x18j \x01(\v21.c1.connector.v2.TicketCustomFieldPickObjectValueH\x00R\x0fpickObjectValue\x12{\n" + + "\x1bpick_multiple_object_values\x18k \x01(\v2:.c1.connector.v2.TicketCustomFieldPickMultipleObjectValuesH\x00R\x18pickMultipleObjectValues\x12R\n" + + "\fnumber_value\x18l \x01(\v2-.c1.connector.v2.TicketCustomFieldNumberValueH\x00R\vnumberValue\x126\n" + + "\vannotations\x18\x04 \x03(\v2\x14.google.protobuf.AnyR\vannotationsB\a\n" + + "\x05value\"Y\n" + + "\x1cTicketCustomFieldStringValue\x12\x14\n" + + "\x05value\x18\x01 \x01(\tR\x05value\x12#\n" + + "\rdefault_value\x18\x02 \x01(\tR\fdefaultValue\"^\n" + + "\x1dTicketCustomFieldStringValues\x12\x16\n" + + "\x06values\x18\x01 \x03(\tR\x06values\x12%\n" + + "\x0edefault_values\x18\x02 \x03(\tR\rdefaultValues\"2\n" + + "\x1aTicketCustomFieldBoolValue\x12\x14\n" + + "\x05value\x18\x01 \x01(\bR\x05value\"\x93\x01\n" + + "\x1cTicketCustomFieldNumberValue\x121\n" + + "\x05value\x18\x01 \x01(\v2\x1b.google.protobuf.FloatValueR\x05value\x12@\n" + + "\rdefault_value\x18\x02 \x01(\v2\x1b.google.protobuf.FloatValueR\fdefaultValue\"\x94\x01\n" + + "\x1fTicketCustomFieldTimestampValue\x120\n" + + "\x05value\x18\x01 \x01(\v2\x1a.google.protobuf.TimestampR\x05value\x12?\n" + + "\rdefault_value\x18\x02 \x01(\v2\x1a.google.protobuf.TimestampR\fdefaultValue\"\x84\x01\n" + + " TicketCustomFieldPickStringValue\x12\x14\n" + + "\x05value\x18\x01 \x01(\tR\x05value\x12%\n" + + "\x0eallowed_values\x18\x02 \x03(\tR\rallowedValues\x12#\n" + + "\rdefault_value\x18\x03 \x01(\tR\fdefaultValue\"\x91\x01\n" + + ")TicketCustomFieldPickMultipleStringValues\x12\x16\n" + + "\x06values\x18\x01 \x03(\tR\x06values\x12%\n" + + "\x0eallowed_values\x18\x02 \x03(\tR\rallowedValues\x12%\n" + + "\x0edefault_values\x18\x03 \x03(\tR\rdefaultValues\"\x91\x02\n" + + " TicketCustomFieldPickObjectValue\x12C\n" + + "\x05value\x18\x01 \x01(\v2-.c1.connector.v2.TicketCustomFieldObjectValueR\x05value\x12T\n" + + "\x0eallowed_values\x18\x02 \x03(\v2-.c1.connector.v2.TicketCustomFieldObjectValueR\rallowedValues\x12R\n" + + "\rdefault_value\x18\x03 \x01(\v2-.c1.connector.v2.TicketCustomFieldObjectValueR\fdefaultValue\"\x9e\x02\n" + + ")TicketCustomFieldPickMultipleObjectValues\x12E\n" + + "\x06values\x18\x01 \x03(\v2-.c1.connector.v2.TicketCustomFieldObjectValueR\x06values\x12T\n" + + "\x0eallowed_values\x18\x02 \x03(\v2-.c1.connector.v2.TicketCustomFieldObjectValueR\rallowedValues\x12T\n" + + "\x0edefault_values\x18\x03 \x03(\v2-.c1.connector.v2.TicketCustomFieldObjectValueR\rdefaultValues\"Q\n" + + "\x1cTicketCustomFieldObjectValue\x12\x0e\n" + + "\x02id\x18\x01 \x01(\tR\x02id\x12!\n" + + "\fdisplay_name\x18\x02 \x01(\tR\vdisplayName\"A\n" + + "\fTicketStatus\x12\x0e\n" + + "\x02id\x18\x01 \x01(\tR\x02id\x12!\n" + + "\fdisplay_name\x18\x02 \x01(\tR\vdisplayName\"n\n" + + "$TicketsServiceGetTicketSchemaRequest\x12\x0e\n" + + "\x02id\x18\x01 \x01(\tR\x02id\x126\n" + + "\vannotations\x18\x02 \x03(\v2\x14.google.protobuf.AnyR\vannotations\"\x96\x01\n" + + "%TicketsServiceGetTicketSchemaResponse\x125\n" + + "\x06schema\x18\x01 \x01(\v2\x1d.c1.connector.v2.TicketSchemaR\x06schema\x126\n" + + "\vannotations\x18\x02 \x03(\v2\x14.google.protobuf.AnyR\vannotations\"\xb7\x01\n" + + "&TicketsServiceListTicketSchemasRequest\x12'\n" + + "\tpage_size\x18\x01 \x01(\rB\n" + + "\xfaB\a*\x05\x18\xfa\x01@\x01R\bpageSize\x12,\n" + + "\n" + + "page_token\x18\x02 \x01(\tB\r\xfaB\n" + + "r\b \x01(\x80 \xd0\x01\x01R\tpageToken\x126\n" + + "\vannotations\x18\x03 \x03(\v2\x14.google.protobuf.AnyR\vannotations\"\xcb\x01\n" + + "'TicketsServiceListTicketSchemasResponse\x121\n" + + "\x04list\x18\x01 \x03(\v2\x1d.c1.connector.v2.TicketSchemaR\x04list\x125\n" + + "\x0fnext_page_token\x18\x02 \x01(\tB\r\xfaB\n" + + "r\b \x01(\x80 \xd0\x01\x01R\rnextPageToken\x126\n" + + "\vannotations\x18\x03 \x03(\v2\x14.google.protobuf.AnyR\vannotations\"\x89\x06\n" + + "\x06Ticket\x12\x0e\n" + + "\x02id\x18\x01 \x01(\tR\x02id\x12!\n" + + "\fdisplay_name\x18\x02 \x01(\tR\vdisplayName\x12 \n" + + "\vdescription\x18\x03 \x01(\tR\vdescription\x127\n" + + "\tassignees\x18\x04 \x03(\v2\x19.c1.connector.v2.ResourceR\tassignees\x125\n" + + "\breporter\x18\x05 \x01(\v2\x19.c1.connector.v2.ResourceR\breporter\x125\n" + + "\x06status\x18\a \x01(\v2\x1d.c1.connector.v2.TicketStatusR\x06status\x12/\n" + + "\x04type\x18\b \x01(\v2\x1b.c1.connector.v2.TicketTypeR\x04type\x12\x16\n" + + "\x06labels\x18\t \x03(\tR\x06labels\x12\x10\n" + + "\x03url\x18\n" + + " \x01(\tR\x03url\x12N\n" + + "\rcustom_fields\x18\v \x03(\v2).c1.connector.v2.Ticket.CustomFieldsEntryR\fcustomFields\x129\n" + + "\n" + + "created_at\x18\f \x01(\v2\x1a.google.protobuf.TimestampR\tcreatedAt\x129\n" + + "\n" + + "updated_at\x18\r \x01(\v2\x1a.google.protobuf.TimestampR\tupdatedAt\x12=\n" + + "\fcompleted_at\x18\x0e \x01(\v2\x1a.google.protobuf.TimestampR\vcompletedAt\x12>\n" + + "\rrequested_for\x18\x0f \x01(\v2\x19.c1.connector.v2.ResourceR\frequestedFor\x1ac\n" + + "\x11CustomFieldsEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x128\n" + + "\x05value\x18\x02 \x01(\v2\".c1.connector.v2.TicketCustomFieldR\x05value:\x028\x01\"?\n" + + "\n" + + "TicketType\x12\x0e\n" + + "\x02id\x18\x01 \x01(\tR\x02id\x12!\n" + + "\fdisplay_name\x18\x02 \x01(\tR\vdisplayName\"\xd0\x03\n" + + "\rTicketRequest\x12!\n" + + "\fdisplay_name\x18\x01 \x01(\tR\vdisplayName\x12 \n" + + "\vdescription\x18\x02 \x01(\tR\vdescription\x125\n" + + "\x06status\x18\x03 \x01(\v2\x1d.c1.connector.v2.TicketStatusR\x06status\x12/\n" + + "\x04type\x18\x04 \x01(\v2\x1b.c1.connector.v2.TicketTypeR\x04type\x12\x16\n" + + "\x06labels\x18\x05 \x03(\tR\x06labels\x12U\n" + + "\rcustom_fields\x18\x06 \x03(\v20.c1.connector.v2.TicketRequest.CustomFieldsEntryR\fcustomFields\x12>\n" + + "\rrequested_for\x18\a \x01(\v2\x19.c1.connector.v2.ResourceR\frequestedFor\x1ac\n" + + "\x11CustomFieldsEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x128\n" + + "\x05value\x18\x02 \x01(\v2\".c1.connector.v2.TicketCustomFieldR\x05value:\x028\x01\"\xcc\x01\n" + + "!TicketsServiceCreateTicketRequest\x128\n" + + "\arequest\x18\x01 \x01(\v2\x1e.c1.connector.v2.TicketRequestR\arequest\x125\n" + + "\x06schema\x18\x02 \x01(\v2\x1d.c1.connector.v2.TicketSchemaR\x06schema\x126\n" + + "\vannotations\x18\b \x03(\v2\x14.google.protobuf.AnyR\vannotations\"\xa3\x01\n" + + "\"TicketsServiceCreateTicketResponse\x12/\n" + + "\x06ticket\x18\x01 \x01(\v2\x17.c1.connector.v2.TicketR\x06ticket\x126\n" + + "\vannotations\x18\x02 \x03(\v2\x14.google.protobuf.AnyR\vannotations\x12\x14\n" + + "\x05error\x18\x03 \x01(\tR\x05error\"h\n" + + "\x1eTicketsServiceGetTicketRequest\x12\x0e\n" + + "\x02id\x18\x01 \x01(\tR\x02id\x126\n" + + "\vannotations\x18\x02 \x03(\v2\x14.google.protobuf.AnyR\vannotations\"\xa0\x01\n" + + "\x1fTicketsServiceGetTicketResponse\x12/\n" + + "\x06ticket\x18\x01 \x01(\v2\x17.c1.connector.v2.TicketR\x06ticket\x126\n" + + "\vannotations\x18\x02 \x03(\v2\x14.google.protobuf.AnyR\vannotations\x12\x14\n" + + "\x05error\x18\x03 \x01(\tR\x05error\"\x85\x01\n" + + "&TicketsServiceBulkCreateTicketsRequest\x12[\n" + + "\x0fticket_requests\x18\x01 \x03(\v22.c1.connector.v2.TicketsServiceCreateTicketRequestR\x0eticketRequests\"x\n" + + "'TicketsServiceBulkCreateTicketsResponse\x12M\n" + + "\atickets\x18\x01 \x03(\v23.c1.connector.v2.TicketsServiceCreateTicketResponseR\atickets\"\x7f\n" + + "#TicketsServiceBulkGetTicketsRequest\x12X\n" + + "\x0fticket_requests\x18\x01 \x03(\v2/.c1.connector.v2.TicketsServiceGetTicketRequestR\x0eticketRequests\"r\n" + + "$TicketsServiceBulkGetTicketsResponse\x12J\n" + + "\atickets\x18\x01 \x03(\v20.c1.connector.v2.TicketsServiceGetTicketResponseR\atickets2\x8d\x06\n" + + "\x0eTicketsService\x12w\n" + + "\fCreateTicket\x122.c1.connector.v2.TicketsServiceCreateTicketRequest\x1a3.c1.connector.v2.TicketsServiceCreateTicketResponse\x12n\n" + + "\tGetTicket\x12/.c1.connector.v2.TicketsServiceGetTicketRequest\x1a0.c1.connector.v2.TicketsServiceGetTicketResponse\x12\x86\x01\n" + + "\x11ListTicketSchemas\x127.c1.connector.v2.TicketsServiceListTicketSchemasRequest\x1a8.c1.connector.v2.TicketsServiceListTicketSchemasResponse\x12\x80\x01\n" + + "\x0fGetTicketSchema\x125.c1.connector.v2.TicketsServiceGetTicketSchemaRequest\x1a6.c1.connector.v2.TicketsServiceGetTicketSchemaResponse\x12\x86\x01\n" + + "\x11BulkCreateTickets\x127.c1.connector.v2.TicketsServiceBulkCreateTicketsRequest\x1a8.c1.connector.v2.TicketsServiceBulkCreateTicketsResponse\x12}\n" + + "\x0eBulkGetTickets\x124.c1.connector.v2.TicketsServiceBulkGetTicketsRequest\x1a5.c1.connector.v2.TicketsServiceBulkGetTicketsResponseB6Z4github.com/conductorone/baton-sdk/pb/c1/connector/v2b\x06proto3" + var file_c1_connector_v2_ticket_proto_msgTypes = make([]protoimpl.MessageInfo, 31) var file_c1_connector_v2_ticket_proto_goTypes = []any{ (*TicketSchema)(nil), // 0: c1.connector.v2.TicketSchema diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/ticket_protoopaque.pb.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/ticket_protoopaque.pb.go new file mode 100644 index 00000000..e1916abf --- /dev/null +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/ticket_protoopaque.pb.go @@ -0,0 +1,3409 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.10 +// protoc (unknown) +// source: c1/connector/v2/ticket.proto + +//go:build protoopaque + +package v2 + +import ( + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" + reflect "reflect" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type TicketSchema struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Id string `protobuf:"bytes,1,opt,name=id,proto3"` + xxx_hidden_DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3"` + xxx_hidden_Types *[]*TicketType `protobuf:"bytes,3,rep,name=types,proto3"` + xxx_hidden_Statuses *[]*TicketStatus `protobuf:"bytes,4,rep,name=statuses,proto3"` + xxx_hidden_CustomFields map[string]*TicketCustomField `protobuf:"bytes,5,rep,name=custom_fields,json=customFields,proto3" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + xxx_hidden_Annotations *[]*anypb.Any `protobuf:"bytes,6,rep,name=annotations,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *TicketSchema) Reset() { + *x = TicketSchema{} + mi := &file_c1_connector_v2_ticket_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *TicketSchema) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TicketSchema) ProtoMessage() {} + +func (x *TicketSchema) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_ticket_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *TicketSchema) GetId() string { + if x != nil { + return x.xxx_hidden_Id + } + return "" +} + +func (x *TicketSchema) GetDisplayName() string { + if x != nil { + return x.xxx_hidden_DisplayName + } + return "" +} + +func (x *TicketSchema) GetTypes() []*TicketType { + if x != nil { + if x.xxx_hidden_Types != nil { + return *x.xxx_hidden_Types + } + } + return nil +} + +func (x *TicketSchema) GetStatuses() []*TicketStatus { + if x != nil { + if x.xxx_hidden_Statuses != nil { + return *x.xxx_hidden_Statuses + } + } + return nil +} + +func (x *TicketSchema) GetCustomFields() map[string]*TicketCustomField { + if x != nil { + return x.xxx_hidden_CustomFields + } + return nil +} + +func (x *TicketSchema) GetAnnotations() []*anypb.Any { + if x != nil { + if x.xxx_hidden_Annotations != nil { + return *x.xxx_hidden_Annotations + } + } + return nil +} + +func (x *TicketSchema) SetId(v string) { + x.xxx_hidden_Id = v +} + +func (x *TicketSchema) SetDisplayName(v string) { + x.xxx_hidden_DisplayName = v +} + +func (x *TicketSchema) SetTypes(v []*TicketType) { + x.xxx_hidden_Types = &v +} + +func (x *TicketSchema) SetStatuses(v []*TicketStatus) { + x.xxx_hidden_Statuses = &v +} + +func (x *TicketSchema) SetCustomFields(v map[string]*TicketCustomField) { + x.xxx_hidden_CustomFields = v +} + +func (x *TicketSchema) SetAnnotations(v []*anypb.Any) { + x.xxx_hidden_Annotations = &v +} + +type TicketSchema_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Id string + DisplayName string + Types []*TicketType + Statuses []*TicketStatus + CustomFields map[string]*TicketCustomField + Annotations []*anypb.Any +} + +func (b0 TicketSchema_builder) Build() *TicketSchema { + m0 := &TicketSchema{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Id = b.Id + x.xxx_hidden_DisplayName = b.DisplayName + x.xxx_hidden_Types = &b.Types + x.xxx_hidden_Statuses = &b.Statuses + x.xxx_hidden_CustomFields = b.CustomFields + x.xxx_hidden_Annotations = &b.Annotations + return m0 +} + +type TicketCustomField struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Id string `protobuf:"bytes,1,opt,name=id,proto3"` + xxx_hidden_DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3"` + xxx_hidden_Required bool `protobuf:"varint,3,opt,name=required,proto3"` + xxx_hidden_Value isTicketCustomField_Value `protobuf_oneof:"value"` + xxx_hidden_Annotations *[]*anypb.Any `protobuf:"bytes,4,rep,name=annotations,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *TicketCustomField) Reset() { + *x = TicketCustomField{} + mi := &file_c1_connector_v2_ticket_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *TicketCustomField) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TicketCustomField) ProtoMessage() {} + +func (x *TicketCustomField) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_ticket_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *TicketCustomField) GetId() string { + if x != nil { + return x.xxx_hidden_Id + } + return "" +} + +func (x *TicketCustomField) GetDisplayName() string { + if x != nil { + return x.xxx_hidden_DisplayName + } + return "" +} + +func (x *TicketCustomField) GetRequired() bool { + if x != nil { + return x.xxx_hidden_Required + } + return false +} + +func (x *TicketCustomField) GetStringValue() *TicketCustomFieldStringValue { + if x != nil { + if x, ok := x.xxx_hidden_Value.(*ticketCustomField_StringValue); ok { + return x.StringValue + } + } + return nil +} + +func (x *TicketCustomField) GetStringValues() *TicketCustomFieldStringValues { + if x != nil { + if x, ok := x.xxx_hidden_Value.(*ticketCustomField_StringValues); ok { + return x.StringValues + } + } + return nil +} + +func (x *TicketCustomField) GetBoolValue() *TicketCustomFieldBoolValue { + if x != nil { + if x, ok := x.xxx_hidden_Value.(*ticketCustomField_BoolValue); ok { + return x.BoolValue + } + } + return nil +} + +func (x *TicketCustomField) GetTimestampValue() *TicketCustomFieldTimestampValue { + if x != nil { + if x, ok := x.xxx_hidden_Value.(*ticketCustomField_TimestampValue); ok { + return x.TimestampValue + } + } + return nil +} + +func (x *TicketCustomField) GetPickStringValue() *TicketCustomFieldPickStringValue { + if x != nil { + if x, ok := x.xxx_hidden_Value.(*ticketCustomField_PickStringValue); ok { + return x.PickStringValue + } + } + return nil +} + +func (x *TicketCustomField) GetPickMultipleStringValues() *TicketCustomFieldPickMultipleStringValues { + if x != nil { + if x, ok := x.xxx_hidden_Value.(*ticketCustomField_PickMultipleStringValues); ok { + return x.PickMultipleStringValues + } + } + return nil +} + +func (x *TicketCustomField) GetPickObjectValue() *TicketCustomFieldPickObjectValue { + if x != nil { + if x, ok := x.xxx_hidden_Value.(*ticketCustomField_PickObjectValue); ok { + return x.PickObjectValue + } + } + return nil +} + +func (x *TicketCustomField) GetPickMultipleObjectValues() *TicketCustomFieldPickMultipleObjectValues { + if x != nil { + if x, ok := x.xxx_hidden_Value.(*ticketCustomField_PickMultipleObjectValues); ok { + return x.PickMultipleObjectValues + } + } + return nil +} + +func (x *TicketCustomField) GetNumberValue() *TicketCustomFieldNumberValue { + if x != nil { + if x, ok := x.xxx_hidden_Value.(*ticketCustomField_NumberValue); ok { + return x.NumberValue + } + } + return nil +} + +func (x *TicketCustomField) GetAnnotations() []*anypb.Any { + if x != nil { + if x.xxx_hidden_Annotations != nil { + return *x.xxx_hidden_Annotations + } + } + return nil +} + +func (x *TicketCustomField) SetId(v string) { + x.xxx_hidden_Id = v +} + +func (x *TicketCustomField) SetDisplayName(v string) { + x.xxx_hidden_DisplayName = v +} + +func (x *TicketCustomField) SetRequired(v bool) { + x.xxx_hidden_Required = v +} + +func (x *TicketCustomField) SetStringValue(v *TicketCustomFieldStringValue) { + if v == nil { + x.xxx_hidden_Value = nil + return + } + x.xxx_hidden_Value = &ticketCustomField_StringValue{v} +} + +func (x *TicketCustomField) SetStringValues(v *TicketCustomFieldStringValues) { + if v == nil { + x.xxx_hidden_Value = nil + return + } + x.xxx_hidden_Value = &ticketCustomField_StringValues{v} +} + +func (x *TicketCustomField) SetBoolValue(v *TicketCustomFieldBoolValue) { + if v == nil { + x.xxx_hidden_Value = nil + return + } + x.xxx_hidden_Value = &ticketCustomField_BoolValue{v} +} + +func (x *TicketCustomField) SetTimestampValue(v *TicketCustomFieldTimestampValue) { + if v == nil { + x.xxx_hidden_Value = nil + return + } + x.xxx_hidden_Value = &ticketCustomField_TimestampValue{v} +} + +func (x *TicketCustomField) SetPickStringValue(v *TicketCustomFieldPickStringValue) { + if v == nil { + x.xxx_hidden_Value = nil + return + } + x.xxx_hidden_Value = &ticketCustomField_PickStringValue{v} +} + +func (x *TicketCustomField) SetPickMultipleStringValues(v *TicketCustomFieldPickMultipleStringValues) { + if v == nil { + x.xxx_hidden_Value = nil + return + } + x.xxx_hidden_Value = &ticketCustomField_PickMultipleStringValues{v} +} + +func (x *TicketCustomField) SetPickObjectValue(v *TicketCustomFieldPickObjectValue) { + if v == nil { + x.xxx_hidden_Value = nil + return + } + x.xxx_hidden_Value = &ticketCustomField_PickObjectValue{v} +} + +func (x *TicketCustomField) SetPickMultipleObjectValues(v *TicketCustomFieldPickMultipleObjectValues) { + if v == nil { + x.xxx_hidden_Value = nil + return + } + x.xxx_hidden_Value = &ticketCustomField_PickMultipleObjectValues{v} +} + +func (x *TicketCustomField) SetNumberValue(v *TicketCustomFieldNumberValue) { + if v == nil { + x.xxx_hidden_Value = nil + return + } + x.xxx_hidden_Value = &ticketCustomField_NumberValue{v} +} + +func (x *TicketCustomField) SetAnnotations(v []*anypb.Any) { + x.xxx_hidden_Annotations = &v +} + +func (x *TicketCustomField) HasValue() bool { + if x == nil { + return false + } + return x.xxx_hidden_Value != nil +} + +func (x *TicketCustomField) HasStringValue() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_Value.(*ticketCustomField_StringValue) + return ok +} + +func (x *TicketCustomField) HasStringValues() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_Value.(*ticketCustomField_StringValues) + return ok +} + +func (x *TicketCustomField) HasBoolValue() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_Value.(*ticketCustomField_BoolValue) + return ok +} + +func (x *TicketCustomField) HasTimestampValue() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_Value.(*ticketCustomField_TimestampValue) + return ok +} + +func (x *TicketCustomField) HasPickStringValue() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_Value.(*ticketCustomField_PickStringValue) + return ok +} + +func (x *TicketCustomField) HasPickMultipleStringValues() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_Value.(*ticketCustomField_PickMultipleStringValues) + return ok +} + +func (x *TicketCustomField) HasPickObjectValue() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_Value.(*ticketCustomField_PickObjectValue) + return ok +} + +func (x *TicketCustomField) HasPickMultipleObjectValues() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_Value.(*ticketCustomField_PickMultipleObjectValues) + return ok +} + +func (x *TicketCustomField) HasNumberValue() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_Value.(*ticketCustomField_NumberValue) + return ok +} + +func (x *TicketCustomField) ClearValue() { + x.xxx_hidden_Value = nil +} + +func (x *TicketCustomField) ClearStringValue() { + if _, ok := x.xxx_hidden_Value.(*ticketCustomField_StringValue); ok { + x.xxx_hidden_Value = nil + } +} + +func (x *TicketCustomField) ClearStringValues() { + if _, ok := x.xxx_hidden_Value.(*ticketCustomField_StringValues); ok { + x.xxx_hidden_Value = nil + } +} + +func (x *TicketCustomField) ClearBoolValue() { + if _, ok := x.xxx_hidden_Value.(*ticketCustomField_BoolValue); ok { + x.xxx_hidden_Value = nil + } +} + +func (x *TicketCustomField) ClearTimestampValue() { + if _, ok := x.xxx_hidden_Value.(*ticketCustomField_TimestampValue); ok { + x.xxx_hidden_Value = nil + } +} + +func (x *TicketCustomField) ClearPickStringValue() { + if _, ok := x.xxx_hidden_Value.(*ticketCustomField_PickStringValue); ok { + x.xxx_hidden_Value = nil + } +} + +func (x *TicketCustomField) ClearPickMultipleStringValues() { + if _, ok := x.xxx_hidden_Value.(*ticketCustomField_PickMultipleStringValues); ok { + x.xxx_hidden_Value = nil + } +} + +func (x *TicketCustomField) ClearPickObjectValue() { + if _, ok := x.xxx_hidden_Value.(*ticketCustomField_PickObjectValue); ok { + x.xxx_hidden_Value = nil + } +} + +func (x *TicketCustomField) ClearPickMultipleObjectValues() { + if _, ok := x.xxx_hidden_Value.(*ticketCustomField_PickMultipleObjectValues); ok { + x.xxx_hidden_Value = nil + } +} + +func (x *TicketCustomField) ClearNumberValue() { + if _, ok := x.xxx_hidden_Value.(*ticketCustomField_NumberValue); ok { + x.xxx_hidden_Value = nil + } +} + +const TicketCustomField_Value_not_set_case case_TicketCustomField_Value = 0 +const TicketCustomField_StringValue_case case_TicketCustomField_Value = 100 +const TicketCustomField_StringValues_case case_TicketCustomField_Value = 101 +const TicketCustomField_BoolValue_case case_TicketCustomField_Value = 102 +const TicketCustomField_TimestampValue_case case_TicketCustomField_Value = 103 +const TicketCustomField_PickStringValue_case case_TicketCustomField_Value = 104 +const TicketCustomField_PickMultipleStringValues_case case_TicketCustomField_Value = 105 +const TicketCustomField_PickObjectValue_case case_TicketCustomField_Value = 106 +const TicketCustomField_PickMultipleObjectValues_case case_TicketCustomField_Value = 107 +const TicketCustomField_NumberValue_case case_TicketCustomField_Value = 108 + +func (x *TicketCustomField) WhichValue() case_TicketCustomField_Value { + if x == nil { + return TicketCustomField_Value_not_set_case + } + switch x.xxx_hidden_Value.(type) { + case *ticketCustomField_StringValue: + return TicketCustomField_StringValue_case + case *ticketCustomField_StringValues: + return TicketCustomField_StringValues_case + case *ticketCustomField_BoolValue: + return TicketCustomField_BoolValue_case + case *ticketCustomField_TimestampValue: + return TicketCustomField_TimestampValue_case + case *ticketCustomField_PickStringValue: + return TicketCustomField_PickStringValue_case + case *ticketCustomField_PickMultipleStringValues: + return TicketCustomField_PickMultipleStringValues_case + case *ticketCustomField_PickObjectValue: + return TicketCustomField_PickObjectValue_case + case *ticketCustomField_PickMultipleObjectValues: + return TicketCustomField_PickMultipleObjectValues_case + case *ticketCustomField_NumberValue: + return TicketCustomField_NumberValue_case + default: + return TicketCustomField_Value_not_set_case + } +} + +type TicketCustomField_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Id string + DisplayName string + Required bool + // Fields of oneof xxx_hidden_Value: + StringValue *TicketCustomFieldStringValue + StringValues *TicketCustomFieldStringValues + BoolValue *TicketCustomFieldBoolValue + TimestampValue *TicketCustomFieldTimestampValue + PickStringValue *TicketCustomFieldPickStringValue + PickMultipleStringValues *TicketCustomFieldPickMultipleStringValues + PickObjectValue *TicketCustomFieldPickObjectValue + PickMultipleObjectValues *TicketCustomFieldPickMultipleObjectValues + NumberValue *TicketCustomFieldNumberValue + // -- end of xxx_hidden_Value + Annotations []*anypb.Any +} + +func (b0 TicketCustomField_builder) Build() *TicketCustomField { + m0 := &TicketCustomField{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Id = b.Id + x.xxx_hidden_DisplayName = b.DisplayName + x.xxx_hidden_Required = b.Required + if b.StringValue != nil { + x.xxx_hidden_Value = &ticketCustomField_StringValue{b.StringValue} + } + if b.StringValues != nil { + x.xxx_hidden_Value = &ticketCustomField_StringValues{b.StringValues} + } + if b.BoolValue != nil { + x.xxx_hidden_Value = &ticketCustomField_BoolValue{b.BoolValue} + } + if b.TimestampValue != nil { + x.xxx_hidden_Value = &ticketCustomField_TimestampValue{b.TimestampValue} + } + if b.PickStringValue != nil { + x.xxx_hidden_Value = &ticketCustomField_PickStringValue{b.PickStringValue} + } + if b.PickMultipleStringValues != nil { + x.xxx_hidden_Value = &ticketCustomField_PickMultipleStringValues{b.PickMultipleStringValues} + } + if b.PickObjectValue != nil { + x.xxx_hidden_Value = &ticketCustomField_PickObjectValue{b.PickObjectValue} + } + if b.PickMultipleObjectValues != nil { + x.xxx_hidden_Value = &ticketCustomField_PickMultipleObjectValues{b.PickMultipleObjectValues} + } + if b.NumberValue != nil { + x.xxx_hidden_Value = &ticketCustomField_NumberValue{b.NumberValue} + } + x.xxx_hidden_Annotations = &b.Annotations + return m0 +} + +type case_TicketCustomField_Value protoreflect.FieldNumber + +func (x case_TicketCustomField_Value) String() string { + md := file_c1_connector_v2_ticket_proto_msgTypes[1].Descriptor() + if x == 0 { + return "not set" + } + return protoimpl.X.MessageFieldStringOf(md, protoreflect.FieldNumber(x)) +} + +type isTicketCustomField_Value interface { + isTicketCustomField_Value() +} + +type ticketCustomField_StringValue struct { + StringValue *TicketCustomFieldStringValue `protobuf:"bytes,100,opt,name=string_value,json=stringValue,proto3,oneof"` +} + +type ticketCustomField_StringValues struct { + StringValues *TicketCustomFieldStringValues `protobuf:"bytes,101,opt,name=string_values,json=stringValues,proto3,oneof"` +} + +type ticketCustomField_BoolValue struct { + BoolValue *TicketCustomFieldBoolValue `protobuf:"bytes,102,opt,name=bool_value,json=boolValue,proto3,oneof"` +} + +type ticketCustomField_TimestampValue struct { + TimestampValue *TicketCustomFieldTimestampValue `protobuf:"bytes,103,opt,name=timestamp_value,json=timestampValue,proto3,oneof"` +} + +type ticketCustomField_PickStringValue struct { + PickStringValue *TicketCustomFieldPickStringValue `protobuf:"bytes,104,opt,name=pick_string_value,json=pickStringValue,proto3,oneof"` +} + +type ticketCustomField_PickMultipleStringValues struct { + PickMultipleStringValues *TicketCustomFieldPickMultipleStringValues `protobuf:"bytes,105,opt,name=pick_multiple_string_values,json=pickMultipleStringValues,proto3,oneof"` +} + +type ticketCustomField_PickObjectValue struct { + PickObjectValue *TicketCustomFieldPickObjectValue `protobuf:"bytes,106,opt,name=pick_object_value,json=pickObjectValue,proto3,oneof"` +} + +type ticketCustomField_PickMultipleObjectValues struct { + PickMultipleObjectValues *TicketCustomFieldPickMultipleObjectValues `protobuf:"bytes,107,opt,name=pick_multiple_object_values,json=pickMultipleObjectValues,proto3,oneof"` +} + +type ticketCustomField_NumberValue struct { + NumberValue *TicketCustomFieldNumberValue `protobuf:"bytes,108,opt,name=number_value,json=numberValue,proto3,oneof"` +} + +func (*ticketCustomField_StringValue) isTicketCustomField_Value() {} + +func (*ticketCustomField_StringValues) isTicketCustomField_Value() {} + +func (*ticketCustomField_BoolValue) isTicketCustomField_Value() {} + +func (*ticketCustomField_TimestampValue) isTicketCustomField_Value() {} + +func (*ticketCustomField_PickStringValue) isTicketCustomField_Value() {} + +func (*ticketCustomField_PickMultipleStringValues) isTicketCustomField_Value() {} + +func (*ticketCustomField_PickObjectValue) isTicketCustomField_Value() {} + +func (*ticketCustomField_PickMultipleObjectValues) isTicketCustomField_Value() {} + +func (*ticketCustomField_NumberValue) isTicketCustomField_Value() {} + +type TicketCustomFieldStringValue struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Value string `protobuf:"bytes,1,opt,name=value,proto3"` + xxx_hidden_DefaultValue string `protobuf:"bytes,2,opt,name=default_value,json=defaultValue,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *TicketCustomFieldStringValue) Reset() { + *x = TicketCustomFieldStringValue{} + mi := &file_c1_connector_v2_ticket_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *TicketCustomFieldStringValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TicketCustomFieldStringValue) ProtoMessage() {} + +func (x *TicketCustomFieldStringValue) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_ticket_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *TicketCustomFieldStringValue) GetValue() string { + if x != nil { + return x.xxx_hidden_Value + } + return "" +} + +func (x *TicketCustomFieldStringValue) GetDefaultValue() string { + if x != nil { + return x.xxx_hidden_DefaultValue + } + return "" +} + +func (x *TicketCustomFieldStringValue) SetValue(v string) { + x.xxx_hidden_Value = v +} + +func (x *TicketCustomFieldStringValue) SetDefaultValue(v string) { + x.xxx_hidden_DefaultValue = v +} + +type TicketCustomFieldStringValue_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Value string + DefaultValue string +} + +func (b0 TicketCustomFieldStringValue_builder) Build() *TicketCustomFieldStringValue { + m0 := &TicketCustomFieldStringValue{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Value = b.Value + x.xxx_hidden_DefaultValue = b.DefaultValue + return m0 +} + +type TicketCustomFieldStringValues struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Values []string `protobuf:"bytes,1,rep,name=values,proto3"` + xxx_hidden_DefaultValues []string `protobuf:"bytes,2,rep,name=default_values,json=defaultValues,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *TicketCustomFieldStringValues) Reset() { + *x = TicketCustomFieldStringValues{} + mi := &file_c1_connector_v2_ticket_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *TicketCustomFieldStringValues) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TicketCustomFieldStringValues) ProtoMessage() {} + +func (x *TicketCustomFieldStringValues) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_ticket_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *TicketCustomFieldStringValues) GetValues() []string { + if x != nil { + return x.xxx_hidden_Values + } + return nil +} + +func (x *TicketCustomFieldStringValues) GetDefaultValues() []string { + if x != nil { + return x.xxx_hidden_DefaultValues + } + return nil +} + +func (x *TicketCustomFieldStringValues) SetValues(v []string) { + x.xxx_hidden_Values = v +} + +func (x *TicketCustomFieldStringValues) SetDefaultValues(v []string) { + x.xxx_hidden_DefaultValues = v +} + +type TicketCustomFieldStringValues_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Values []string + DefaultValues []string +} + +func (b0 TicketCustomFieldStringValues_builder) Build() *TicketCustomFieldStringValues { + m0 := &TicketCustomFieldStringValues{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Values = b.Values + x.xxx_hidden_DefaultValues = b.DefaultValues + return m0 +} + +type TicketCustomFieldBoolValue struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Value bool `protobuf:"varint,1,opt,name=value,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *TicketCustomFieldBoolValue) Reset() { + *x = TicketCustomFieldBoolValue{} + mi := &file_c1_connector_v2_ticket_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *TicketCustomFieldBoolValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TicketCustomFieldBoolValue) ProtoMessage() {} + +func (x *TicketCustomFieldBoolValue) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_ticket_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *TicketCustomFieldBoolValue) GetValue() bool { + if x != nil { + return x.xxx_hidden_Value + } + return false +} + +func (x *TicketCustomFieldBoolValue) SetValue(v bool) { + x.xxx_hidden_Value = v +} + +type TicketCustomFieldBoolValue_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Value bool +} + +func (b0 TicketCustomFieldBoolValue_builder) Build() *TicketCustomFieldBoolValue { + m0 := &TicketCustomFieldBoolValue{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Value = b.Value + return m0 +} + +type TicketCustomFieldNumberValue struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Value *wrapperspb.FloatValue `protobuf:"bytes,1,opt,name=value,proto3"` + xxx_hidden_DefaultValue *wrapperspb.FloatValue `protobuf:"bytes,2,opt,name=default_value,json=defaultValue,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *TicketCustomFieldNumberValue) Reset() { + *x = TicketCustomFieldNumberValue{} + mi := &file_c1_connector_v2_ticket_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *TicketCustomFieldNumberValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TicketCustomFieldNumberValue) ProtoMessage() {} + +func (x *TicketCustomFieldNumberValue) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_ticket_proto_msgTypes[5] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *TicketCustomFieldNumberValue) GetValue() *wrapperspb.FloatValue { + if x != nil { + return x.xxx_hidden_Value + } + return nil +} + +func (x *TicketCustomFieldNumberValue) GetDefaultValue() *wrapperspb.FloatValue { + if x != nil { + return x.xxx_hidden_DefaultValue + } + return nil +} + +func (x *TicketCustomFieldNumberValue) SetValue(v *wrapperspb.FloatValue) { + x.xxx_hidden_Value = v +} + +func (x *TicketCustomFieldNumberValue) SetDefaultValue(v *wrapperspb.FloatValue) { + x.xxx_hidden_DefaultValue = v +} + +func (x *TicketCustomFieldNumberValue) HasValue() bool { + if x == nil { + return false + } + return x.xxx_hidden_Value != nil +} + +func (x *TicketCustomFieldNumberValue) HasDefaultValue() bool { + if x == nil { + return false + } + return x.xxx_hidden_DefaultValue != nil +} + +func (x *TicketCustomFieldNumberValue) ClearValue() { + x.xxx_hidden_Value = nil +} + +func (x *TicketCustomFieldNumberValue) ClearDefaultValue() { + x.xxx_hidden_DefaultValue = nil +} + +type TicketCustomFieldNumberValue_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Value *wrapperspb.FloatValue + DefaultValue *wrapperspb.FloatValue +} + +func (b0 TicketCustomFieldNumberValue_builder) Build() *TicketCustomFieldNumberValue { + m0 := &TicketCustomFieldNumberValue{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Value = b.Value + x.xxx_hidden_DefaultValue = b.DefaultValue + return m0 +} + +type TicketCustomFieldTimestampValue struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Value *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=value,proto3"` + xxx_hidden_DefaultValue *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=default_value,json=defaultValue,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *TicketCustomFieldTimestampValue) Reset() { + *x = TicketCustomFieldTimestampValue{} + mi := &file_c1_connector_v2_ticket_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *TicketCustomFieldTimestampValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TicketCustomFieldTimestampValue) ProtoMessage() {} + +func (x *TicketCustomFieldTimestampValue) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_ticket_proto_msgTypes[6] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *TicketCustomFieldTimestampValue) GetValue() *timestamppb.Timestamp { + if x != nil { + return x.xxx_hidden_Value + } + return nil +} + +func (x *TicketCustomFieldTimestampValue) GetDefaultValue() *timestamppb.Timestamp { + if x != nil { + return x.xxx_hidden_DefaultValue + } + return nil +} + +func (x *TicketCustomFieldTimestampValue) SetValue(v *timestamppb.Timestamp) { + x.xxx_hidden_Value = v +} + +func (x *TicketCustomFieldTimestampValue) SetDefaultValue(v *timestamppb.Timestamp) { + x.xxx_hidden_DefaultValue = v +} + +func (x *TicketCustomFieldTimestampValue) HasValue() bool { + if x == nil { + return false + } + return x.xxx_hidden_Value != nil +} + +func (x *TicketCustomFieldTimestampValue) HasDefaultValue() bool { + if x == nil { + return false + } + return x.xxx_hidden_DefaultValue != nil +} + +func (x *TicketCustomFieldTimestampValue) ClearValue() { + x.xxx_hidden_Value = nil +} + +func (x *TicketCustomFieldTimestampValue) ClearDefaultValue() { + x.xxx_hidden_DefaultValue = nil +} + +type TicketCustomFieldTimestampValue_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Value *timestamppb.Timestamp + DefaultValue *timestamppb.Timestamp +} + +func (b0 TicketCustomFieldTimestampValue_builder) Build() *TicketCustomFieldTimestampValue { + m0 := &TicketCustomFieldTimestampValue{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Value = b.Value + x.xxx_hidden_DefaultValue = b.DefaultValue + return m0 +} + +type TicketCustomFieldPickStringValue struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Value string `protobuf:"bytes,1,opt,name=value,proto3"` + xxx_hidden_AllowedValues []string `protobuf:"bytes,2,rep,name=allowed_values,json=allowedValues,proto3"` + xxx_hidden_DefaultValue string `protobuf:"bytes,3,opt,name=default_value,json=defaultValue,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *TicketCustomFieldPickStringValue) Reset() { + *x = TicketCustomFieldPickStringValue{} + mi := &file_c1_connector_v2_ticket_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *TicketCustomFieldPickStringValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TicketCustomFieldPickStringValue) ProtoMessage() {} + +func (x *TicketCustomFieldPickStringValue) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_ticket_proto_msgTypes[7] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *TicketCustomFieldPickStringValue) GetValue() string { + if x != nil { + return x.xxx_hidden_Value + } + return "" +} + +func (x *TicketCustomFieldPickStringValue) GetAllowedValues() []string { + if x != nil { + return x.xxx_hidden_AllowedValues + } + return nil +} + +func (x *TicketCustomFieldPickStringValue) GetDefaultValue() string { + if x != nil { + return x.xxx_hidden_DefaultValue + } + return "" +} + +func (x *TicketCustomFieldPickStringValue) SetValue(v string) { + x.xxx_hidden_Value = v +} + +func (x *TicketCustomFieldPickStringValue) SetAllowedValues(v []string) { + x.xxx_hidden_AllowedValues = v +} + +func (x *TicketCustomFieldPickStringValue) SetDefaultValue(v string) { + x.xxx_hidden_DefaultValue = v +} + +type TicketCustomFieldPickStringValue_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Value string + AllowedValues []string + DefaultValue string +} + +func (b0 TicketCustomFieldPickStringValue_builder) Build() *TicketCustomFieldPickStringValue { + m0 := &TicketCustomFieldPickStringValue{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Value = b.Value + x.xxx_hidden_AllowedValues = b.AllowedValues + x.xxx_hidden_DefaultValue = b.DefaultValue + return m0 +} + +type TicketCustomFieldPickMultipleStringValues struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Values []string `protobuf:"bytes,1,rep,name=values,proto3"` + xxx_hidden_AllowedValues []string `protobuf:"bytes,2,rep,name=allowed_values,json=allowedValues,proto3"` + xxx_hidden_DefaultValues []string `protobuf:"bytes,3,rep,name=default_values,json=defaultValues,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *TicketCustomFieldPickMultipleStringValues) Reset() { + *x = TicketCustomFieldPickMultipleStringValues{} + mi := &file_c1_connector_v2_ticket_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *TicketCustomFieldPickMultipleStringValues) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TicketCustomFieldPickMultipleStringValues) ProtoMessage() {} + +func (x *TicketCustomFieldPickMultipleStringValues) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_ticket_proto_msgTypes[8] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *TicketCustomFieldPickMultipleStringValues) GetValues() []string { + if x != nil { + return x.xxx_hidden_Values + } + return nil +} + +func (x *TicketCustomFieldPickMultipleStringValues) GetAllowedValues() []string { + if x != nil { + return x.xxx_hidden_AllowedValues + } + return nil +} + +func (x *TicketCustomFieldPickMultipleStringValues) GetDefaultValues() []string { + if x != nil { + return x.xxx_hidden_DefaultValues + } + return nil +} + +func (x *TicketCustomFieldPickMultipleStringValues) SetValues(v []string) { + x.xxx_hidden_Values = v +} + +func (x *TicketCustomFieldPickMultipleStringValues) SetAllowedValues(v []string) { + x.xxx_hidden_AllowedValues = v +} + +func (x *TicketCustomFieldPickMultipleStringValues) SetDefaultValues(v []string) { + x.xxx_hidden_DefaultValues = v +} + +type TicketCustomFieldPickMultipleStringValues_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Values []string + AllowedValues []string + DefaultValues []string +} + +func (b0 TicketCustomFieldPickMultipleStringValues_builder) Build() *TicketCustomFieldPickMultipleStringValues { + m0 := &TicketCustomFieldPickMultipleStringValues{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Values = b.Values + x.xxx_hidden_AllowedValues = b.AllowedValues + x.xxx_hidden_DefaultValues = b.DefaultValues + return m0 +} + +type TicketCustomFieldPickObjectValue struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Value *TicketCustomFieldObjectValue `protobuf:"bytes,1,opt,name=value,proto3"` + xxx_hidden_AllowedValues *[]*TicketCustomFieldObjectValue `protobuf:"bytes,2,rep,name=allowed_values,json=allowedValues,proto3"` + xxx_hidden_DefaultValue *TicketCustomFieldObjectValue `protobuf:"bytes,3,opt,name=default_value,json=defaultValue,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *TicketCustomFieldPickObjectValue) Reset() { + *x = TicketCustomFieldPickObjectValue{} + mi := &file_c1_connector_v2_ticket_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *TicketCustomFieldPickObjectValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TicketCustomFieldPickObjectValue) ProtoMessage() {} + +func (x *TicketCustomFieldPickObjectValue) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_ticket_proto_msgTypes[9] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *TicketCustomFieldPickObjectValue) GetValue() *TicketCustomFieldObjectValue { + if x != nil { + return x.xxx_hidden_Value + } + return nil +} + +func (x *TicketCustomFieldPickObjectValue) GetAllowedValues() []*TicketCustomFieldObjectValue { + if x != nil { + if x.xxx_hidden_AllowedValues != nil { + return *x.xxx_hidden_AllowedValues + } + } + return nil +} + +func (x *TicketCustomFieldPickObjectValue) GetDefaultValue() *TicketCustomFieldObjectValue { + if x != nil { + return x.xxx_hidden_DefaultValue + } + return nil +} + +func (x *TicketCustomFieldPickObjectValue) SetValue(v *TicketCustomFieldObjectValue) { + x.xxx_hidden_Value = v +} + +func (x *TicketCustomFieldPickObjectValue) SetAllowedValues(v []*TicketCustomFieldObjectValue) { + x.xxx_hidden_AllowedValues = &v +} + +func (x *TicketCustomFieldPickObjectValue) SetDefaultValue(v *TicketCustomFieldObjectValue) { + x.xxx_hidden_DefaultValue = v +} + +func (x *TicketCustomFieldPickObjectValue) HasValue() bool { + if x == nil { + return false + } + return x.xxx_hidden_Value != nil +} + +func (x *TicketCustomFieldPickObjectValue) HasDefaultValue() bool { + if x == nil { + return false + } + return x.xxx_hidden_DefaultValue != nil +} + +func (x *TicketCustomFieldPickObjectValue) ClearValue() { + x.xxx_hidden_Value = nil +} + +func (x *TicketCustomFieldPickObjectValue) ClearDefaultValue() { + x.xxx_hidden_DefaultValue = nil +} + +type TicketCustomFieldPickObjectValue_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Value *TicketCustomFieldObjectValue + AllowedValues []*TicketCustomFieldObjectValue + DefaultValue *TicketCustomFieldObjectValue +} + +func (b0 TicketCustomFieldPickObjectValue_builder) Build() *TicketCustomFieldPickObjectValue { + m0 := &TicketCustomFieldPickObjectValue{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Value = b.Value + x.xxx_hidden_AllowedValues = &b.AllowedValues + x.xxx_hidden_DefaultValue = b.DefaultValue + return m0 +} + +type TicketCustomFieldPickMultipleObjectValues struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Values *[]*TicketCustomFieldObjectValue `protobuf:"bytes,1,rep,name=values,proto3"` + xxx_hidden_AllowedValues *[]*TicketCustomFieldObjectValue `protobuf:"bytes,2,rep,name=allowed_values,json=allowedValues,proto3"` + xxx_hidden_DefaultValues *[]*TicketCustomFieldObjectValue `protobuf:"bytes,3,rep,name=default_values,json=defaultValues,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *TicketCustomFieldPickMultipleObjectValues) Reset() { + *x = TicketCustomFieldPickMultipleObjectValues{} + mi := &file_c1_connector_v2_ticket_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *TicketCustomFieldPickMultipleObjectValues) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TicketCustomFieldPickMultipleObjectValues) ProtoMessage() {} + +func (x *TicketCustomFieldPickMultipleObjectValues) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_ticket_proto_msgTypes[10] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *TicketCustomFieldPickMultipleObjectValues) GetValues() []*TicketCustomFieldObjectValue { + if x != nil { + if x.xxx_hidden_Values != nil { + return *x.xxx_hidden_Values + } + } + return nil +} + +func (x *TicketCustomFieldPickMultipleObjectValues) GetAllowedValues() []*TicketCustomFieldObjectValue { + if x != nil { + if x.xxx_hidden_AllowedValues != nil { + return *x.xxx_hidden_AllowedValues + } + } + return nil +} + +func (x *TicketCustomFieldPickMultipleObjectValues) GetDefaultValues() []*TicketCustomFieldObjectValue { + if x != nil { + if x.xxx_hidden_DefaultValues != nil { + return *x.xxx_hidden_DefaultValues + } + } + return nil +} + +func (x *TicketCustomFieldPickMultipleObjectValues) SetValues(v []*TicketCustomFieldObjectValue) { + x.xxx_hidden_Values = &v +} + +func (x *TicketCustomFieldPickMultipleObjectValues) SetAllowedValues(v []*TicketCustomFieldObjectValue) { + x.xxx_hidden_AllowedValues = &v +} + +func (x *TicketCustomFieldPickMultipleObjectValues) SetDefaultValues(v []*TicketCustomFieldObjectValue) { + x.xxx_hidden_DefaultValues = &v +} + +type TicketCustomFieldPickMultipleObjectValues_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Values []*TicketCustomFieldObjectValue + AllowedValues []*TicketCustomFieldObjectValue + DefaultValues []*TicketCustomFieldObjectValue +} + +func (b0 TicketCustomFieldPickMultipleObjectValues_builder) Build() *TicketCustomFieldPickMultipleObjectValues { + m0 := &TicketCustomFieldPickMultipleObjectValues{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Values = &b.Values + x.xxx_hidden_AllowedValues = &b.AllowedValues + x.xxx_hidden_DefaultValues = &b.DefaultValues + return m0 +} + +type TicketCustomFieldObjectValue struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Id string `protobuf:"bytes,1,opt,name=id,proto3"` + xxx_hidden_DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *TicketCustomFieldObjectValue) Reset() { + *x = TicketCustomFieldObjectValue{} + mi := &file_c1_connector_v2_ticket_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *TicketCustomFieldObjectValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TicketCustomFieldObjectValue) ProtoMessage() {} + +func (x *TicketCustomFieldObjectValue) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_ticket_proto_msgTypes[11] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *TicketCustomFieldObjectValue) GetId() string { + if x != nil { + return x.xxx_hidden_Id + } + return "" +} + +func (x *TicketCustomFieldObjectValue) GetDisplayName() string { + if x != nil { + return x.xxx_hidden_DisplayName + } + return "" +} + +func (x *TicketCustomFieldObjectValue) SetId(v string) { + x.xxx_hidden_Id = v +} + +func (x *TicketCustomFieldObjectValue) SetDisplayName(v string) { + x.xxx_hidden_DisplayName = v +} + +type TicketCustomFieldObjectValue_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Id string + DisplayName string +} + +func (b0 TicketCustomFieldObjectValue_builder) Build() *TicketCustomFieldObjectValue { + m0 := &TicketCustomFieldObjectValue{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Id = b.Id + x.xxx_hidden_DisplayName = b.DisplayName + return m0 +} + +type TicketStatus struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Id string `protobuf:"bytes,1,opt,name=id,proto3"` + xxx_hidden_DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *TicketStatus) Reset() { + *x = TicketStatus{} + mi := &file_c1_connector_v2_ticket_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *TicketStatus) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TicketStatus) ProtoMessage() {} + +func (x *TicketStatus) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_ticket_proto_msgTypes[12] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *TicketStatus) GetId() string { + if x != nil { + return x.xxx_hidden_Id + } + return "" +} + +func (x *TicketStatus) GetDisplayName() string { + if x != nil { + return x.xxx_hidden_DisplayName + } + return "" +} + +func (x *TicketStatus) SetId(v string) { + x.xxx_hidden_Id = v +} + +func (x *TicketStatus) SetDisplayName(v string) { + x.xxx_hidden_DisplayName = v +} + +type TicketStatus_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Id string + DisplayName string +} + +func (b0 TicketStatus_builder) Build() *TicketStatus { + m0 := &TicketStatus{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Id = b.Id + x.xxx_hidden_DisplayName = b.DisplayName + return m0 +} + +type TicketsServiceGetTicketSchemaRequest struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Id string `protobuf:"bytes,1,opt,name=id,proto3"` + xxx_hidden_Annotations *[]*anypb.Any `protobuf:"bytes,2,rep,name=annotations,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *TicketsServiceGetTicketSchemaRequest) Reset() { + *x = TicketsServiceGetTicketSchemaRequest{} + mi := &file_c1_connector_v2_ticket_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *TicketsServiceGetTicketSchemaRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TicketsServiceGetTicketSchemaRequest) ProtoMessage() {} + +func (x *TicketsServiceGetTicketSchemaRequest) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_ticket_proto_msgTypes[13] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *TicketsServiceGetTicketSchemaRequest) GetId() string { + if x != nil { + return x.xxx_hidden_Id + } + return "" +} + +func (x *TicketsServiceGetTicketSchemaRequest) GetAnnotations() []*anypb.Any { + if x != nil { + if x.xxx_hidden_Annotations != nil { + return *x.xxx_hidden_Annotations + } + } + return nil +} + +func (x *TicketsServiceGetTicketSchemaRequest) SetId(v string) { + x.xxx_hidden_Id = v +} + +func (x *TicketsServiceGetTicketSchemaRequest) SetAnnotations(v []*anypb.Any) { + x.xxx_hidden_Annotations = &v +} + +type TicketsServiceGetTicketSchemaRequest_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Id string + Annotations []*anypb.Any +} + +func (b0 TicketsServiceGetTicketSchemaRequest_builder) Build() *TicketsServiceGetTicketSchemaRequest { + m0 := &TicketsServiceGetTicketSchemaRequest{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Id = b.Id + x.xxx_hidden_Annotations = &b.Annotations + return m0 +} + +type TicketsServiceGetTicketSchemaResponse struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Schema *TicketSchema `protobuf:"bytes,1,opt,name=schema,proto3"` + xxx_hidden_Annotations *[]*anypb.Any `protobuf:"bytes,2,rep,name=annotations,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *TicketsServiceGetTicketSchemaResponse) Reset() { + *x = TicketsServiceGetTicketSchemaResponse{} + mi := &file_c1_connector_v2_ticket_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *TicketsServiceGetTicketSchemaResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TicketsServiceGetTicketSchemaResponse) ProtoMessage() {} + +func (x *TicketsServiceGetTicketSchemaResponse) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_ticket_proto_msgTypes[14] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *TicketsServiceGetTicketSchemaResponse) GetSchema() *TicketSchema { + if x != nil { + return x.xxx_hidden_Schema + } + return nil +} + +func (x *TicketsServiceGetTicketSchemaResponse) GetAnnotations() []*anypb.Any { + if x != nil { + if x.xxx_hidden_Annotations != nil { + return *x.xxx_hidden_Annotations + } + } + return nil +} + +func (x *TicketsServiceGetTicketSchemaResponse) SetSchema(v *TicketSchema) { + x.xxx_hidden_Schema = v +} + +func (x *TicketsServiceGetTicketSchemaResponse) SetAnnotations(v []*anypb.Any) { + x.xxx_hidden_Annotations = &v +} + +func (x *TicketsServiceGetTicketSchemaResponse) HasSchema() bool { + if x == nil { + return false + } + return x.xxx_hidden_Schema != nil +} + +func (x *TicketsServiceGetTicketSchemaResponse) ClearSchema() { + x.xxx_hidden_Schema = nil +} + +type TicketsServiceGetTicketSchemaResponse_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Schema *TicketSchema + Annotations []*anypb.Any +} + +func (b0 TicketsServiceGetTicketSchemaResponse_builder) Build() *TicketsServiceGetTicketSchemaResponse { + m0 := &TicketsServiceGetTicketSchemaResponse{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Schema = b.Schema + x.xxx_hidden_Annotations = &b.Annotations + return m0 +} + +type TicketsServiceListTicketSchemasRequest struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_PageSize uint32 `protobuf:"varint,1,opt,name=page_size,json=pageSize,proto3"` + xxx_hidden_PageToken string `protobuf:"bytes,2,opt,name=page_token,json=pageToken,proto3"` + xxx_hidden_Annotations *[]*anypb.Any `protobuf:"bytes,3,rep,name=annotations,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *TicketsServiceListTicketSchemasRequest) Reset() { + *x = TicketsServiceListTicketSchemasRequest{} + mi := &file_c1_connector_v2_ticket_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *TicketsServiceListTicketSchemasRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TicketsServiceListTicketSchemasRequest) ProtoMessage() {} + +func (x *TicketsServiceListTicketSchemasRequest) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_ticket_proto_msgTypes[15] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *TicketsServiceListTicketSchemasRequest) GetPageSize() uint32 { + if x != nil { + return x.xxx_hidden_PageSize + } + return 0 +} + +func (x *TicketsServiceListTicketSchemasRequest) GetPageToken() string { + if x != nil { + return x.xxx_hidden_PageToken + } + return "" +} + +func (x *TicketsServiceListTicketSchemasRequest) GetAnnotations() []*anypb.Any { + if x != nil { + if x.xxx_hidden_Annotations != nil { + return *x.xxx_hidden_Annotations + } + } + return nil +} + +func (x *TicketsServiceListTicketSchemasRequest) SetPageSize(v uint32) { + x.xxx_hidden_PageSize = v +} + +func (x *TicketsServiceListTicketSchemasRequest) SetPageToken(v string) { + x.xxx_hidden_PageToken = v +} + +func (x *TicketsServiceListTicketSchemasRequest) SetAnnotations(v []*anypb.Any) { + x.xxx_hidden_Annotations = &v +} + +type TicketsServiceListTicketSchemasRequest_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + PageSize uint32 + PageToken string + Annotations []*anypb.Any +} + +func (b0 TicketsServiceListTicketSchemasRequest_builder) Build() *TicketsServiceListTicketSchemasRequest { + m0 := &TicketsServiceListTicketSchemasRequest{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_PageSize = b.PageSize + x.xxx_hidden_PageToken = b.PageToken + x.xxx_hidden_Annotations = &b.Annotations + return m0 +} + +type TicketsServiceListTicketSchemasResponse struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_List *[]*TicketSchema `protobuf:"bytes,1,rep,name=list,proto3"` + xxx_hidden_NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3"` + xxx_hidden_Annotations *[]*anypb.Any `protobuf:"bytes,3,rep,name=annotations,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *TicketsServiceListTicketSchemasResponse) Reset() { + *x = TicketsServiceListTicketSchemasResponse{} + mi := &file_c1_connector_v2_ticket_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *TicketsServiceListTicketSchemasResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TicketsServiceListTicketSchemasResponse) ProtoMessage() {} + +func (x *TicketsServiceListTicketSchemasResponse) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_ticket_proto_msgTypes[16] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *TicketsServiceListTicketSchemasResponse) GetList() []*TicketSchema { + if x != nil { + if x.xxx_hidden_List != nil { + return *x.xxx_hidden_List + } + } + return nil +} + +func (x *TicketsServiceListTicketSchemasResponse) GetNextPageToken() string { + if x != nil { + return x.xxx_hidden_NextPageToken + } + return "" +} + +func (x *TicketsServiceListTicketSchemasResponse) GetAnnotations() []*anypb.Any { + if x != nil { + if x.xxx_hidden_Annotations != nil { + return *x.xxx_hidden_Annotations + } + } + return nil +} + +func (x *TicketsServiceListTicketSchemasResponse) SetList(v []*TicketSchema) { + x.xxx_hidden_List = &v +} + +func (x *TicketsServiceListTicketSchemasResponse) SetNextPageToken(v string) { + x.xxx_hidden_NextPageToken = v +} + +func (x *TicketsServiceListTicketSchemasResponse) SetAnnotations(v []*anypb.Any) { + x.xxx_hidden_Annotations = &v +} + +type TicketsServiceListTicketSchemasResponse_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + List []*TicketSchema + NextPageToken string + Annotations []*anypb.Any +} + +func (b0 TicketsServiceListTicketSchemasResponse_builder) Build() *TicketsServiceListTicketSchemasResponse { + m0 := &TicketsServiceListTicketSchemasResponse{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_List = &b.List + x.xxx_hidden_NextPageToken = b.NextPageToken + x.xxx_hidden_Annotations = &b.Annotations + return m0 +} + +type Ticket struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Id string `protobuf:"bytes,1,opt,name=id,proto3"` + xxx_hidden_DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3"` + xxx_hidden_Description string `protobuf:"bytes,3,opt,name=description,proto3"` + xxx_hidden_Assignees *[]*Resource `protobuf:"bytes,4,rep,name=assignees,proto3"` + xxx_hidden_Reporter *Resource `protobuf:"bytes,5,opt,name=reporter,proto3"` + xxx_hidden_Status *TicketStatus `protobuf:"bytes,7,opt,name=status,proto3"` + xxx_hidden_Type *TicketType `protobuf:"bytes,8,opt,name=type,proto3"` + xxx_hidden_Labels []string `protobuf:"bytes,9,rep,name=labels,proto3"` + xxx_hidden_Url string `protobuf:"bytes,10,opt,name=url,proto3"` + xxx_hidden_CustomFields map[string]*TicketCustomField `protobuf:"bytes,11,rep,name=custom_fields,json=customFields,proto3" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + xxx_hidden_CreatedAt *timestamppb.Timestamp `protobuf:"bytes,12,opt,name=created_at,json=createdAt,proto3"` + xxx_hidden_UpdatedAt *timestamppb.Timestamp `protobuf:"bytes,13,opt,name=updated_at,json=updatedAt,proto3"` + xxx_hidden_CompletedAt *timestamppb.Timestamp `protobuf:"bytes,14,opt,name=completed_at,json=completedAt,proto3"` + xxx_hidden_RequestedFor *Resource `protobuf:"bytes,15,opt,name=requested_for,json=requestedFor,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Ticket) Reset() { + *x = Ticket{} + mi := &file_c1_connector_v2_ticket_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Ticket) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Ticket) ProtoMessage() {} + +func (x *Ticket) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_ticket_proto_msgTypes[17] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *Ticket) GetId() string { + if x != nil { + return x.xxx_hidden_Id + } + return "" +} + +func (x *Ticket) GetDisplayName() string { + if x != nil { + return x.xxx_hidden_DisplayName + } + return "" +} + +func (x *Ticket) GetDescription() string { + if x != nil { + return x.xxx_hidden_Description + } + return "" +} + +func (x *Ticket) GetAssignees() []*Resource { + if x != nil { + if x.xxx_hidden_Assignees != nil { + return *x.xxx_hidden_Assignees + } + } + return nil +} + +func (x *Ticket) GetReporter() *Resource { + if x != nil { + return x.xxx_hidden_Reporter + } + return nil +} + +func (x *Ticket) GetStatus() *TicketStatus { + if x != nil { + return x.xxx_hidden_Status + } + return nil +} + +func (x *Ticket) GetType() *TicketType { + if x != nil { + return x.xxx_hidden_Type + } + return nil +} + +func (x *Ticket) GetLabels() []string { + if x != nil { + return x.xxx_hidden_Labels + } + return nil +} + +func (x *Ticket) GetUrl() string { + if x != nil { + return x.xxx_hidden_Url + } + return "" +} + +func (x *Ticket) GetCustomFields() map[string]*TicketCustomField { + if x != nil { + return x.xxx_hidden_CustomFields + } + return nil +} + +func (x *Ticket) GetCreatedAt() *timestamppb.Timestamp { + if x != nil { + return x.xxx_hidden_CreatedAt + } + return nil +} + +func (x *Ticket) GetUpdatedAt() *timestamppb.Timestamp { + if x != nil { + return x.xxx_hidden_UpdatedAt + } + return nil +} + +func (x *Ticket) GetCompletedAt() *timestamppb.Timestamp { + if x != nil { + return x.xxx_hidden_CompletedAt + } + return nil +} + +func (x *Ticket) GetRequestedFor() *Resource { + if x != nil { + return x.xxx_hidden_RequestedFor + } + return nil +} + +func (x *Ticket) SetId(v string) { + x.xxx_hidden_Id = v +} + +func (x *Ticket) SetDisplayName(v string) { + x.xxx_hidden_DisplayName = v +} + +func (x *Ticket) SetDescription(v string) { + x.xxx_hidden_Description = v +} + +func (x *Ticket) SetAssignees(v []*Resource) { + x.xxx_hidden_Assignees = &v +} + +func (x *Ticket) SetReporter(v *Resource) { + x.xxx_hidden_Reporter = v +} + +func (x *Ticket) SetStatus(v *TicketStatus) { + x.xxx_hidden_Status = v +} + +func (x *Ticket) SetType(v *TicketType) { + x.xxx_hidden_Type = v +} + +func (x *Ticket) SetLabels(v []string) { + x.xxx_hidden_Labels = v +} + +func (x *Ticket) SetUrl(v string) { + x.xxx_hidden_Url = v +} + +func (x *Ticket) SetCustomFields(v map[string]*TicketCustomField) { + x.xxx_hidden_CustomFields = v +} + +func (x *Ticket) SetCreatedAt(v *timestamppb.Timestamp) { + x.xxx_hidden_CreatedAt = v +} + +func (x *Ticket) SetUpdatedAt(v *timestamppb.Timestamp) { + x.xxx_hidden_UpdatedAt = v +} + +func (x *Ticket) SetCompletedAt(v *timestamppb.Timestamp) { + x.xxx_hidden_CompletedAt = v +} + +func (x *Ticket) SetRequestedFor(v *Resource) { + x.xxx_hidden_RequestedFor = v +} + +func (x *Ticket) HasReporter() bool { + if x == nil { + return false + } + return x.xxx_hidden_Reporter != nil +} + +func (x *Ticket) HasStatus() bool { + if x == nil { + return false + } + return x.xxx_hidden_Status != nil +} + +func (x *Ticket) HasType() bool { + if x == nil { + return false + } + return x.xxx_hidden_Type != nil +} + +func (x *Ticket) HasCreatedAt() bool { + if x == nil { + return false + } + return x.xxx_hidden_CreatedAt != nil +} + +func (x *Ticket) HasUpdatedAt() bool { + if x == nil { + return false + } + return x.xxx_hidden_UpdatedAt != nil +} + +func (x *Ticket) HasCompletedAt() bool { + if x == nil { + return false + } + return x.xxx_hidden_CompletedAt != nil +} + +func (x *Ticket) HasRequestedFor() bool { + if x == nil { + return false + } + return x.xxx_hidden_RequestedFor != nil +} + +func (x *Ticket) ClearReporter() { + x.xxx_hidden_Reporter = nil +} + +func (x *Ticket) ClearStatus() { + x.xxx_hidden_Status = nil +} + +func (x *Ticket) ClearType() { + x.xxx_hidden_Type = nil +} + +func (x *Ticket) ClearCreatedAt() { + x.xxx_hidden_CreatedAt = nil +} + +func (x *Ticket) ClearUpdatedAt() { + x.xxx_hidden_UpdatedAt = nil +} + +func (x *Ticket) ClearCompletedAt() { + x.xxx_hidden_CompletedAt = nil +} + +func (x *Ticket) ClearRequestedFor() { + x.xxx_hidden_RequestedFor = nil +} + +type Ticket_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Id string + DisplayName string + Description string + Assignees []*Resource + Reporter *Resource + Status *TicketStatus + Type *TicketType + Labels []string + Url string + CustomFields map[string]*TicketCustomField + CreatedAt *timestamppb.Timestamp + UpdatedAt *timestamppb.Timestamp + CompletedAt *timestamppb.Timestamp + RequestedFor *Resource +} + +func (b0 Ticket_builder) Build() *Ticket { + m0 := &Ticket{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Id = b.Id + x.xxx_hidden_DisplayName = b.DisplayName + x.xxx_hidden_Description = b.Description + x.xxx_hidden_Assignees = &b.Assignees + x.xxx_hidden_Reporter = b.Reporter + x.xxx_hidden_Status = b.Status + x.xxx_hidden_Type = b.Type + x.xxx_hidden_Labels = b.Labels + x.xxx_hidden_Url = b.Url + x.xxx_hidden_CustomFields = b.CustomFields + x.xxx_hidden_CreatedAt = b.CreatedAt + x.xxx_hidden_UpdatedAt = b.UpdatedAt + x.xxx_hidden_CompletedAt = b.CompletedAt + x.xxx_hidden_RequestedFor = b.RequestedFor + return m0 +} + +type TicketType struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Id string `protobuf:"bytes,1,opt,name=id,proto3"` + xxx_hidden_DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *TicketType) Reset() { + *x = TicketType{} + mi := &file_c1_connector_v2_ticket_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *TicketType) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TicketType) ProtoMessage() {} + +func (x *TicketType) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_ticket_proto_msgTypes[18] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *TicketType) GetId() string { + if x != nil { + return x.xxx_hidden_Id + } + return "" +} + +func (x *TicketType) GetDisplayName() string { + if x != nil { + return x.xxx_hidden_DisplayName + } + return "" +} + +func (x *TicketType) SetId(v string) { + x.xxx_hidden_Id = v +} + +func (x *TicketType) SetDisplayName(v string) { + x.xxx_hidden_DisplayName = v +} + +type TicketType_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Id string + DisplayName string +} + +func (b0 TicketType_builder) Build() *TicketType { + m0 := &TicketType{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Id = b.Id + x.xxx_hidden_DisplayName = b.DisplayName + return m0 +} + +type TicketRequest struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_DisplayName string `protobuf:"bytes,1,opt,name=display_name,json=displayName,proto3"` + xxx_hidden_Description string `protobuf:"bytes,2,opt,name=description,proto3"` + xxx_hidden_Status *TicketStatus `protobuf:"bytes,3,opt,name=status,proto3"` + xxx_hidden_Type *TicketType `protobuf:"bytes,4,opt,name=type,proto3"` + xxx_hidden_Labels []string `protobuf:"bytes,5,rep,name=labels,proto3"` + xxx_hidden_CustomFields map[string]*TicketCustomField `protobuf:"bytes,6,rep,name=custom_fields,json=customFields,proto3" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + xxx_hidden_RequestedFor *Resource `protobuf:"bytes,7,opt,name=requested_for,json=requestedFor,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *TicketRequest) Reset() { + *x = TicketRequest{} + mi := &file_c1_connector_v2_ticket_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *TicketRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TicketRequest) ProtoMessage() {} + +func (x *TicketRequest) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_ticket_proto_msgTypes[19] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *TicketRequest) GetDisplayName() string { + if x != nil { + return x.xxx_hidden_DisplayName + } + return "" +} + +func (x *TicketRequest) GetDescription() string { + if x != nil { + return x.xxx_hidden_Description + } + return "" +} + +func (x *TicketRequest) GetStatus() *TicketStatus { + if x != nil { + return x.xxx_hidden_Status + } + return nil +} + +func (x *TicketRequest) GetType() *TicketType { + if x != nil { + return x.xxx_hidden_Type + } + return nil +} + +func (x *TicketRequest) GetLabels() []string { + if x != nil { + return x.xxx_hidden_Labels + } + return nil +} + +func (x *TicketRequest) GetCustomFields() map[string]*TicketCustomField { + if x != nil { + return x.xxx_hidden_CustomFields + } + return nil +} + +func (x *TicketRequest) GetRequestedFor() *Resource { + if x != nil { + return x.xxx_hidden_RequestedFor + } + return nil +} + +func (x *TicketRequest) SetDisplayName(v string) { + x.xxx_hidden_DisplayName = v +} + +func (x *TicketRequest) SetDescription(v string) { + x.xxx_hidden_Description = v +} + +func (x *TicketRequest) SetStatus(v *TicketStatus) { + x.xxx_hidden_Status = v +} + +func (x *TicketRequest) SetType(v *TicketType) { + x.xxx_hidden_Type = v +} + +func (x *TicketRequest) SetLabels(v []string) { + x.xxx_hidden_Labels = v +} + +func (x *TicketRequest) SetCustomFields(v map[string]*TicketCustomField) { + x.xxx_hidden_CustomFields = v +} + +func (x *TicketRequest) SetRequestedFor(v *Resource) { + x.xxx_hidden_RequestedFor = v +} + +func (x *TicketRequest) HasStatus() bool { + if x == nil { + return false + } + return x.xxx_hidden_Status != nil +} + +func (x *TicketRequest) HasType() bool { + if x == nil { + return false + } + return x.xxx_hidden_Type != nil +} + +func (x *TicketRequest) HasRequestedFor() bool { + if x == nil { + return false + } + return x.xxx_hidden_RequestedFor != nil +} + +func (x *TicketRequest) ClearStatus() { + x.xxx_hidden_Status = nil +} + +func (x *TicketRequest) ClearType() { + x.xxx_hidden_Type = nil +} + +func (x *TicketRequest) ClearRequestedFor() { + x.xxx_hidden_RequestedFor = nil +} + +type TicketRequest_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + DisplayName string + Description string + Status *TicketStatus + Type *TicketType + Labels []string + CustomFields map[string]*TicketCustomField + RequestedFor *Resource +} + +func (b0 TicketRequest_builder) Build() *TicketRequest { + m0 := &TicketRequest{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_DisplayName = b.DisplayName + x.xxx_hidden_Description = b.Description + x.xxx_hidden_Status = b.Status + x.xxx_hidden_Type = b.Type + x.xxx_hidden_Labels = b.Labels + x.xxx_hidden_CustomFields = b.CustomFields + x.xxx_hidden_RequestedFor = b.RequestedFor + return m0 +} + +type TicketsServiceCreateTicketRequest struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Request *TicketRequest `protobuf:"bytes,1,opt,name=request,proto3"` + xxx_hidden_Schema *TicketSchema `protobuf:"bytes,2,opt,name=schema,proto3"` + xxx_hidden_Annotations *[]*anypb.Any `protobuf:"bytes,8,rep,name=annotations,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *TicketsServiceCreateTicketRequest) Reset() { + *x = TicketsServiceCreateTicketRequest{} + mi := &file_c1_connector_v2_ticket_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *TicketsServiceCreateTicketRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TicketsServiceCreateTicketRequest) ProtoMessage() {} + +func (x *TicketsServiceCreateTicketRequest) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_ticket_proto_msgTypes[20] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *TicketsServiceCreateTicketRequest) GetRequest() *TicketRequest { + if x != nil { + return x.xxx_hidden_Request + } + return nil +} + +func (x *TicketsServiceCreateTicketRequest) GetSchema() *TicketSchema { + if x != nil { + return x.xxx_hidden_Schema + } + return nil +} + +func (x *TicketsServiceCreateTicketRequest) GetAnnotations() []*anypb.Any { + if x != nil { + if x.xxx_hidden_Annotations != nil { + return *x.xxx_hidden_Annotations + } + } + return nil +} + +func (x *TicketsServiceCreateTicketRequest) SetRequest(v *TicketRequest) { + x.xxx_hidden_Request = v +} + +func (x *TicketsServiceCreateTicketRequest) SetSchema(v *TicketSchema) { + x.xxx_hidden_Schema = v +} + +func (x *TicketsServiceCreateTicketRequest) SetAnnotations(v []*anypb.Any) { + x.xxx_hidden_Annotations = &v +} + +func (x *TicketsServiceCreateTicketRequest) HasRequest() bool { + if x == nil { + return false + } + return x.xxx_hidden_Request != nil +} + +func (x *TicketsServiceCreateTicketRequest) HasSchema() bool { + if x == nil { + return false + } + return x.xxx_hidden_Schema != nil +} + +func (x *TicketsServiceCreateTicketRequest) ClearRequest() { + x.xxx_hidden_Request = nil +} + +func (x *TicketsServiceCreateTicketRequest) ClearSchema() { + x.xxx_hidden_Schema = nil +} + +type TicketsServiceCreateTicketRequest_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Request *TicketRequest + Schema *TicketSchema + Annotations []*anypb.Any +} + +func (b0 TicketsServiceCreateTicketRequest_builder) Build() *TicketsServiceCreateTicketRequest { + m0 := &TicketsServiceCreateTicketRequest{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Request = b.Request + x.xxx_hidden_Schema = b.Schema + x.xxx_hidden_Annotations = &b.Annotations + return m0 +} + +// TODO(lauren) maybe the error should be a separate proto so we can store retryable error +type TicketsServiceCreateTicketResponse struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Ticket *Ticket `protobuf:"bytes,1,opt,name=ticket,proto3"` + xxx_hidden_Annotations *[]*anypb.Any `protobuf:"bytes,2,rep,name=annotations,proto3"` + xxx_hidden_Error string `protobuf:"bytes,3,opt,name=error,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *TicketsServiceCreateTicketResponse) Reset() { + *x = TicketsServiceCreateTicketResponse{} + mi := &file_c1_connector_v2_ticket_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *TicketsServiceCreateTicketResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TicketsServiceCreateTicketResponse) ProtoMessage() {} + +func (x *TicketsServiceCreateTicketResponse) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_ticket_proto_msgTypes[21] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *TicketsServiceCreateTicketResponse) GetTicket() *Ticket { + if x != nil { + return x.xxx_hidden_Ticket + } + return nil +} + +func (x *TicketsServiceCreateTicketResponse) GetAnnotations() []*anypb.Any { + if x != nil { + if x.xxx_hidden_Annotations != nil { + return *x.xxx_hidden_Annotations + } + } + return nil +} + +func (x *TicketsServiceCreateTicketResponse) GetError() string { + if x != nil { + return x.xxx_hidden_Error + } + return "" +} + +func (x *TicketsServiceCreateTicketResponse) SetTicket(v *Ticket) { + x.xxx_hidden_Ticket = v +} + +func (x *TicketsServiceCreateTicketResponse) SetAnnotations(v []*anypb.Any) { + x.xxx_hidden_Annotations = &v +} + +func (x *TicketsServiceCreateTicketResponse) SetError(v string) { + x.xxx_hidden_Error = v +} + +func (x *TicketsServiceCreateTicketResponse) HasTicket() bool { + if x == nil { + return false + } + return x.xxx_hidden_Ticket != nil +} + +func (x *TicketsServiceCreateTicketResponse) ClearTicket() { + x.xxx_hidden_Ticket = nil +} + +type TicketsServiceCreateTicketResponse_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Ticket *Ticket + Annotations []*anypb.Any + Error string +} + +func (b0 TicketsServiceCreateTicketResponse_builder) Build() *TicketsServiceCreateTicketResponse { + m0 := &TicketsServiceCreateTicketResponse{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Ticket = b.Ticket + x.xxx_hidden_Annotations = &b.Annotations + x.xxx_hidden_Error = b.Error + return m0 +} + +type TicketsServiceGetTicketRequest struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Id string `protobuf:"bytes,1,opt,name=id,proto3"` + xxx_hidden_Annotations *[]*anypb.Any `protobuf:"bytes,2,rep,name=annotations,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *TicketsServiceGetTicketRequest) Reset() { + *x = TicketsServiceGetTicketRequest{} + mi := &file_c1_connector_v2_ticket_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *TicketsServiceGetTicketRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TicketsServiceGetTicketRequest) ProtoMessage() {} + +func (x *TicketsServiceGetTicketRequest) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_ticket_proto_msgTypes[22] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *TicketsServiceGetTicketRequest) GetId() string { + if x != nil { + return x.xxx_hidden_Id + } + return "" +} + +func (x *TicketsServiceGetTicketRequest) GetAnnotations() []*anypb.Any { + if x != nil { + if x.xxx_hidden_Annotations != nil { + return *x.xxx_hidden_Annotations + } + } + return nil +} + +func (x *TicketsServiceGetTicketRequest) SetId(v string) { + x.xxx_hidden_Id = v +} + +func (x *TicketsServiceGetTicketRequest) SetAnnotations(v []*anypb.Any) { + x.xxx_hidden_Annotations = &v +} + +type TicketsServiceGetTicketRequest_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Id string + Annotations []*anypb.Any +} + +func (b0 TicketsServiceGetTicketRequest_builder) Build() *TicketsServiceGetTicketRequest { + m0 := &TicketsServiceGetTicketRequest{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Id = b.Id + x.xxx_hidden_Annotations = &b.Annotations + return m0 +} + +type TicketsServiceGetTicketResponse struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Ticket *Ticket `protobuf:"bytes,1,opt,name=ticket,proto3"` + xxx_hidden_Annotations *[]*anypb.Any `protobuf:"bytes,2,rep,name=annotations,proto3"` + xxx_hidden_Error string `protobuf:"bytes,3,opt,name=error,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *TicketsServiceGetTicketResponse) Reset() { + *x = TicketsServiceGetTicketResponse{} + mi := &file_c1_connector_v2_ticket_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *TicketsServiceGetTicketResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TicketsServiceGetTicketResponse) ProtoMessage() {} + +func (x *TicketsServiceGetTicketResponse) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_ticket_proto_msgTypes[23] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *TicketsServiceGetTicketResponse) GetTicket() *Ticket { + if x != nil { + return x.xxx_hidden_Ticket + } + return nil +} + +func (x *TicketsServiceGetTicketResponse) GetAnnotations() []*anypb.Any { + if x != nil { + if x.xxx_hidden_Annotations != nil { + return *x.xxx_hidden_Annotations + } + } + return nil +} + +func (x *TicketsServiceGetTicketResponse) GetError() string { + if x != nil { + return x.xxx_hidden_Error + } + return "" +} + +func (x *TicketsServiceGetTicketResponse) SetTicket(v *Ticket) { + x.xxx_hidden_Ticket = v +} + +func (x *TicketsServiceGetTicketResponse) SetAnnotations(v []*anypb.Any) { + x.xxx_hidden_Annotations = &v +} + +func (x *TicketsServiceGetTicketResponse) SetError(v string) { + x.xxx_hidden_Error = v +} + +func (x *TicketsServiceGetTicketResponse) HasTicket() bool { + if x == nil { + return false + } + return x.xxx_hidden_Ticket != nil +} + +func (x *TicketsServiceGetTicketResponse) ClearTicket() { + x.xxx_hidden_Ticket = nil +} + +type TicketsServiceGetTicketResponse_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Ticket *Ticket + Annotations []*anypb.Any + Error string +} + +func (b0 TicketsServiceGetTicketResponse_builder) Build() *TicketsServiceGetTicketResponse { + m0 := &TicketsServiceGetTicketResponse{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Ticket = b.Ticket + x.xxx_hidden_Annotations = &b.Annotations + x.xxx_hidden_Error = b.Error + return m0 +} + +type TicketsServiceBulkCreateTicketsRequest struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_TicketRequests *[]*TicketsServiceCreateTicketRequest `protobuf:"bytes,1,rep,name=ticket_requests,json=ticketRequests,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *TicketsServiceBulkCreateTicketsRequest) Reset() { + *x = TicketsServiceBulkCreateTicketsRequest{} + mi := &file_c1_connector_v2_ticket_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *TicketsServiceBulkCreateTicketsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TicketsServiceBulkCreateTicketsRequest) ProtoMessage() {} + +func (x *TicketsServiceBulkCreateTicketsRequest) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_ticket_proto_msgTypes[24] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *TicketsServiceBulkCreateTicketsRequest) GetTicketRequests() []*TicketsServiceCreateTicketRequest { + if x != nil { + if x.xxx_hidden_TicketRequests != nil { + return *x.xxx_hidden_TicketRequests + } + } + return nil +} + +func (x *TicketsServiceBulkCreateTicketsRequest) SetTicketRequests(v []*TicketsServiceCreateTicketRequest) { + x.xxx_hidden_TicketRequests = &v +} + +type TicketsServiceBulkCreateTicketsRequest_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + TicketRequests []*TicketsServiceCreateTicketRequest +} + +func (b0 TicketsServiceBulkCreateTicketsRequest_builder) Build() *TicketsServiceBulkCreateTicketsRequest { + m0 := &TicketsServiceBulkCreateTicketsRequest{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_TicketRequests = &b.TicketRequests + return m0 +} + +type TicketsServiceBulkCreateTicketsResponse struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Tickets *[]*TicketsServiceCreateTicketResponse `protobuf:"bytes,1,rep,name=tickets,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *TicketsServiceBulkCreateTicketsResponse) Reset() { + *x = TicketsServiceBulkCreateTicketsResponse{} + mi := &file_c1_connector_v2_ticket_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *TicketsServiceBulkCreateTicketsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TicketsServiceBulkCreateTicketsResponse) ProtoMessage() {} + +func (x *TicketsServiceBulkCreateTicketsResponse) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_ticket_proto_msgTypes[25] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *TicketsServiceBulkCreateTicketsResponse) GetTickets() []*TicketsServiceCreateTicketResponse { + if x != nil { + if x.xxx_hidden_Tickets != nil { + return *x.xxx_hidden_Tickets + } + } + return nil +} + +func (x *TicketsServiceBulkCreateTicketsResponse) SetTickets(v []*TicketsServiceCreateTicketResponse) { + x.xxx_hidden_Tickets = &v +} + +type TicketsServiceBulkCreateTicketsResponse_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Tickets []*TicketsServiceCreateTicketResponse +} + +func (b0 TicketsServiceBulkCreateTicketsResponse_builder) Build() *TicketsServiceBulkCreateTicketsResponse { + m0 := &TicketsServiceBulkCreateTicketsResponse{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Tickets = &b.Tickets + return m0 +} + +type TicketsServiceBulkGetTicketsRequest struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_TicketRequests *[]*TicketsServiceGetTicketRequest `protobuf:"bytes,1,rep,name=ticket_requests,json=ticketRequests,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *TicketsServiceBulkGetTicketsRequest) Reset() { + *x = TicketsServiceBulkGetTicketsRequest{} + mi := &file_c1_connector_v2_ticket_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *TicketsServiceBulkGetTicketsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TicketsServiceBulkGetTicketsRequest) ProtoMessage() {} + +func (x *TicketsServiceBulkGetTicketsRequest) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_ticket_proto_msgTypes[26] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *TicketsServiceBulkGetTicketsRequest) GetTicketRequests() []*TicketsServiceGetTicketRequest { + if x != nil { + if x.xxx_hidden_TicketRequests != nil { + return *x.xxx_hidden_TicketRequests + } + } + return nil +} + +func (x *TicketsServiceBulkGetTicketsRequest) SetTicketRequests(v []*TicketsServiceGetTicketRequest) { + x.xxx_hidden_TicketRequests = &v +} + +type TicketsServiceBulkGetTicketsRequest_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + TicketRequests []*TicketsServiceGetTicketRequest +} + +func (b0 TicketsServiceBulkGetTicketsRequest_builder) Build() *TicketsServiceBulkGetTicketsRequest { + m0 := &TicketsServiceBulkGetTicketsRequest{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_TicketRequests = &b.TicketRequests + return m0 +} + +type TicketsServiceBulkGetTicketsResponse struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Tickets *[]*TicketsServiceGetTicketResponse `protobuf:"bytes,1,rep,name=tickets,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *TicketsServiceBulkGetTicketsResponse) Reset() { + *x = TicketsServiceBulkGetTicketsResponse{} + mi := &file_c1_connector_v2_ticket_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *TicketsServiceBulkGetTicketsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TicketsServiceBulkGetTicketsResponse) ProtoMessage() {} + +func (x *TicketsServiceBulkGetTicketsResponse) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_ticket_proto_msgTypes[27] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *TicketsServiceBulkGetTicketsResponse) GetTickets() []*TicketsServiceGetTicketResponse { + if x != nil { + if x.xxx_hidden_Tickets != nil { + return *x.xxx_hidden_Tickets + } + } + return nil +} + +func (x *TicketsServiceBulkGetTicketsResponse) SetTickets(v []*TicketsServiceGetTicketResponse) { + x.xxx_hidden_Tickets = &v +} + +type TicketsServiceBulkGetTicketsResponse_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Tickets []*TicketsServiceGetTicketResponse +} + +func (b0 TicketsServiceBulkGetTicketsResponse_builder) Build() *TicketsServiceBulkGetTicketsResponse { + m0 := &TicketsServiceBulkGetTicketsResponse{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Tickets = &b.Tickets + return m0 +} + +var File_c1_connector_v2_ticket_proto protoreflect.FileDescriptor + +const file_c1_connector_v2_ticket_proto_rawDesc = "" + + "\n" + + "\x1cc1/connector/v2/ticket.proto\x12\x0fc1.connector.v2\x1a\x1ec1/connector/v2/resource.proto\x1a\x19google/protobuf/any.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1egoogle/protobuf/wrappers.proto\x1a\x17validate/validate.proto\"\xa2\x03\n" + + "\fTicketSchema\x12\x0e\n" + + "\x02id\x18\x01 \x01(\tR\x02id\x12!\n" + + "\fdisplay_name\x18\x02 \x01(\tR\vdisplayName\x121\n" + + "\x05types\x18\x03 \x03(\v2\x1b.c1.connector.v2.TicketTypeR\x05types\x129\n" + + "\bstatuses\x18\x04 \x03(\v2\x1d.c1.connector.v2.TicketStatusR\bstatuses\x12T\n" + + "\rcustom_fields\x18\x05 \x03(\v2/.c1.connector.v2.TicketSchema.CustomFieldsEntryR\fcustomFields\x126\n" + + "\vannotations\x18\x06 \x03(\v2\x14.google.protobuf.AnyR\vannotations\x1ac\n" + + "\x11CustomFieldsEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x128\n" + + "\x05value\x18\x02 \x01(\v2\".c1.connector.v2.TicketCustomFieldR\x05value:\x028\x01\"\x89\b\n" + + "\x11TicketCustomField\x12\x0e\n" + + "\x02id\x18\x01 \x01(\tR\x02id\x12!\n" + + "\fdisplay_name\x18\x02 \x01(\tR\vdisplayName\x12\x1a\n" + + "\brequired\x18\x03 \x01(\bR\brequired\x12R\n" + + "\fstring_value\x18d \x01(\v2-.c1.connector.v2.TicketCustomFieldStringValueH\x00R\vstringValue\x12U\n" + + "\rstring_values\x18e \x01(\v2..c1.connector.v2.TicketCustomFieldStringValuesH\x00R\fstringValues\x12L\n" + + "\n" + + "bool_value\x18f \x01(\v2+.c1.connector.v2.TicketCustomFieldBoolValueH\x00R\tboolValue\x12[\n" + + "\x0ftimestamp_value\x18g \x01(\v20.c1.connector.v2.TicketCustomFieldTimestampValueH\x00R\x0etimestampValue\x12_\n" + + "\x11pick_string_value\x18h \x01(\v21.c1.connector.v2.TicketCustomFieldPickStringValueH\x00R\x0fpickStringValue\x12{\n" + + "\x1bpick_multiple_string_values\x18i \x01(\v2:.c1.connector.v2.TicketCustomFieldPickMultipleStringValuesH\x00R\x18pickMultipleStringValues\x12_\n" + + "\x11pick_object_value\x18j \x01(\v21.c1.connector.v2.TicketCustomFieldPickObjectValueH\x00R\x0fpickObjectValue\x12{\n" + + "\x1bpick_multiple_object_values\x18k \x01(\v2:.c1.connector.v2.TicketCustomFieldPickMultipleObjectValuesH\x00R\x18pickMultipleObjectValues\x12R\n" + + "\fnumber_value\x18l \x01(\v2-.c1.connector.v2.TicketCustomFieldNumberValueH\x00R\vnumberValue\x126\n" + + "\vannotations\x18\x04 \x03(\v2\x14.google.protobuf.AnyR\vannotationsB\a\n" + + "\x05value\"Y\n" + + "\x1cTicketCustomFieldStringValue\x12\x14\n" + + "\x05value\x18\x01 \x01(\tR\x05value\x12#\n" + + "\rdefault_value\x18\x02 \x01(\tR\fdefaultValue\"^\n" + + "\x1dTicketCustomFieldStringValues\x12\x16\n" + + "\x06values\x18\x01 \x03(\tR\x06values\x12%\n" + + "\x0edefault_values\x18\x02 \x03(\tR\rdefaultValues\"2\n" + + "\x1aTicketCustomFieldBoolValue\x12\x14\n" + + "\x05value\x18\x01 \x01(\bR\x05value\"\x93\x01\n" + + "\x1cTicketCustomFieldNumberValue\x121\n" + + "\x05value\x18\x01 \x01(\v2\x1b.google.protobuf.FloatValueR\x05value\x12@\n" + + "\rdefault_value\x18\x02 \x01(\v2\x1b.google.protobuf.FloatValueR\fdefaultValue\"\x94\x01\n" + + "\x1fTicketCustomFieldTimestampValue\x120\n" + + "\x05value\x18\x01 \x01(\v2\x1a.google.protobuf.TimestampR\x05value\x12?\n" + + "\rdefault_value\x18\x02 \x01(\v2\x1a.google.protobuf.TimestampR\fdefaultValue\"\x84\x01\n" + + " TicketCustomFieldPickStringValue\x12\x14\n" + + "\x05value\x18\x01 \x01(\tR\x05value\x12%\n" + + "\x0eallowed_values\x18\x02 \x03(\tR\rallowedValues\x12#\n" + + "\rdefault_value\x18\x03 \x01(\tR\fdefaultValue\"\x91\x01\n" + + ")TicketCustomFieldPickMultipleStringValues\x12\x16\n" + + "\x06values\x18\x01 \x03(\tR\x06values\x12%\n" + + "\x0eallowed_values\x18\x02 \x03(\tR\rallowedValues\x12%\n" + + "\x0edefault_values\x18\x03 \x03(\tR\rdefaultValues\"\x91\x02\n" + + " TicketCustomFieldPickObjectValue\x12C\n" + + "\x05value\x18\x01 \x01(\v2-.c1.connector.v2.TicketCustomFieldObjectValueR\x05value\x12T\n" + + "\x0eallowed_values\x18\x02 \x03(\v2-.c1.connector.v2.TicketCustomFieldObjectValueR\rallowedValues\x12R\n" + + "\rdefault_value\x18\x03 \x01(\v2-.c1.connector.v2.TicketCustomFieldObjectValueR\fdefaultValue\"\x9e\x02\n" + + ")TicketCustomFieldPickMultipleObjectValues\x12E\n" + + "\x06values\x18\x01 \x03(\v2-.c1.connector.v2.TicketCustomFieldObjectValueR\x06values\x12T\n" + + "\x0eallowed_values\x18\x02 \x03(\v2-.c1.connector.v2.TicketCustomFieldObjectValueR\rallowedValues\x12T\n" + + "\x0edefault_values\x18\x03 \x03(\v2-.c1.connector.v2.TicketCustomFieldObjectValueR\rdefaultValues\"Q\n" + + "\x1cTicketCustomFieldObjectValue\x12\x0e\n" + + "\x02id\x18\x01 \x01(\tR\x02id\x12!\n" + + "\fdisplay_name\x18\x02 \x01(\tR\vdisplayName\"A\n" + + "\fTicketStatus\x12\x0e\n" + + "\x02id\x18\x01 \x01(\tR\x02id\x12!\n" + + "\fdisplay_name\x18\x02 \x01(\tR\vdisplayName\"n\n" + + "$TicketsServiceGetTicketSchemaRequest\x12\x0e\n" + + "\x02id\x18\x01 \x01(\tR\x02id\x126\n" + + "\vannotations\x18\x02 \x03(\v2\x14.google.protobuf.AnyR\vannotations\"\x96\x01\n" + + "%TicketsServiceGetTicketSchemaResponse\x125\n" + + "\x06schema\x18\x01 \x01(\v2\x1d.c1.connector.v2.TicketSchemaR\x06schema\x126\n" + + "\vannotations\x18\x02 \x03(\v2\x14.google.protobuf.AnyR\vannotations\"\xb7\x01\n" + + "&TicketsServiceListTicketSchemasRequest\x12'\n" + + "\tpage_size\x18\x01 \x01(\rB\n" + + "\xfaB\a*\x05\x18\xfa\x01@\x01R\bpageSize\x12,\n" + + "\n" + + "page_token\x18\x02 \x01(\tB\r\xfaB\n" + + "r\b \x01(\x80 \xd0\x01\x01R\tpageToken\x126\n" + + "\vannotations\x18\x03 \x03(\v2\x14.google.protobuf.AnyR\vannotations\"\xcb\x01\n" + + "'TicketsServiceListTicketSchemasResponse\x121\n" + + "\x04list\x18\x01 \x03(\v2\x1d.c1.connector.v2.TicketSchemaR\x04list\x125\n" + + "\x0fnext_page_token\x18\x02 \x01(\tB\r\xfaB\n" + + "r\b \x01(\x80 \xd0\x01\x01R\rnextPageToken\x126\n" + + "\vannotations\x18\x03 \x03(\v2\x14.google.protobuf.AnyR\vannotations\"\x89\x06\n" + + "\x06Ticket\x12\x0e\n" + + "\x02id\x18\x01 \x01(\tR\x02id\x12!\n" + + "\fdisplay_name\x18\x02 \x01(\tR\vdisplayName\x12 \n" + + "\vdescription\x18\x03 \x01(\tR\vdescription\x127\n" + + "\tassignees\x18\x04 \x03(\v2\x19.c1.connector.v2.ResourceR\tassignees\x125\n" + + "\breporter\x18\x05 \x01(\v2\x19.c1.connector.v2.ResourceR\breporter\x125\n" + + "\x06status\x18\a \x01(\v2\x1d.c1.connector.v2.TicketStatusR\x06status\x12/\n" + + "\x04type\x18\b \x01(\v2\x1b.c1.connector.v2.TicketTypeR\x04type\x12\x16\n" + + "\x06labels\x18\t \x03(\tR\x06labels\x12\x10\n" + + "\x03url\x18\n" + + " \x01(\tR\x03url\x12N\n" + + "\rcustom_fields\x18\v \x03(\v2).c1.connector.v2.Ticket.CustomFieldsEntryR\fcustomFields\x129\n" + + "\n" + + "created_at\x18\f \x01(\v2\x1a.google.protobuf.TimestampR\tcreatedAt\x129\n" + + "\n" + + "updated_at\x18\r \x01(\v2\x1a.google.protobuf.TimestampR\tupdatedAt\x12=\n" + + "\fcompleted_at\x18\x0e \x01(\v2\x1a.google.protobuf.TimestampR\vcompletedAt\x12>\n" + + "\rrequested_for\x18\x0f \x01(\v2\x19.c1.connector.v2.ResourceR\frequestedFor\x1ac\n" + + "\x11CustomFieldsEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x128\n" + + "\x05value\x18\x02 \x01(\v2\".c1.connector.v2.TicketCustomFieldR\x05value:\x028\x01\"?\n" + + "\n" + + "TicketType\x12\x0e\n" + + "\x02id\x18\x01 \x01(\tR\x02id\x12!\n" + + "\fdisplay_name\x18\x02 \x01(\tR\vdisplayName\"\xd0\x03\n" + + "\rTicketRequest\x12!\n" + + "\fdisplay_name\x18\x01 \x01(\tR\vdisplayName\x12 \n" + + "\vdescription\x18\x02 \x01(\tR\vdescription\x125\n" + + "\x06status\x18\x03 \x01(\v2\x1d.c1.connector.v2.TicketStatusR\x06status\x12/\n" + + "\x04type\x18\x04 \x01(\v2\x1b.c1.connector.v2.TicketTypeR\x04type\x12\x16\n" + + "\x06labels\x18\x05 \x03(\tR\x06labels\x12U\n" + + "\rcustom_fields\x18\x06 \x03(\v20.c1.connector.v2.TicketRequest.CustomFieldsEntryR\fcustomFields\x12>\n" + + "\rrequested_for\x18\a \x01(\v2\x19.c1.connector.v2.ResourceR\frequestedFor\x1ac\n" + + "\x11CustomFieldsEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x128\n" + + "\x05value\x18\x02 \x01(\v2\".c1.connector.v2.TicketCustomFieldR\x05value:\x028\x01\"\xcc\x01\n" + + "!TicketsServiceCreateTicketRequest\x128\n" + + "\arequest\x18\x01 \x01(\v2\x1e.c1.connector.v2.TicketRequestR\arequest\x125\n" + + "\x06schema\x18\x02 \x01(\v2\x1d.c1.connector.v2.TicketSchemaR\x06schema\x126\n" + + "\vannotations\x18\b \x03(\v2\x14.google.protobuf.AnyR\vannotations\"\xa3\x01\n" + + "\"TicketsServiceCreateTicketResponse\x12/\n" + + "\x06ticket\x18\x01 \x01(\v2\x17.c1.connector.v2.TicketR\x06ticket\x126\n" + + "\vannotations\x18\x02 \x03(\v2\x14.google.protobuf.AnyR\vannotations\x12\x14\n" + + "\x05error\x18\x03 \x01(\tR\x05error\"h\n" + + "\x1eTicketsServiceGetTicketRequest\x12\x0e\n" + + "\x02id\x18\x01 \x01(\tR\x02id\x126\n" + + "\vannotations\x18\x02 \x03(\v2\x14.google.protobuf.AnyR\vannotations\"\xa0\x01\n" + + "\x1fTicketsServiceGetTicketResponse\x12/\n" + + "\x06ticket\x18\x01 \x01(\v2\x17.c1.connector.v2.TicketR\x06ticket\x126\n" + + "\vannotations\x18\x02 \x03(\v2\x14.google.protobuf.AnyR\vannotations\x12\x14\n" + + "\x05error\x18\x03 \x01(\tR\x05error\"\x85\x01\n" + + "&TicketsServiceBulkCreateTicketsRequest\x12[\n" + + "\x0fticket_requests\x18\x01 \x03(\v22.c1.connector.v2.TicketsServiceCreateTicketRequestR\x0eticketRequests\"x\n" + + "'TicketsServiceBulkCreateTicketsResponse\x12M\n" + + "\atickets\x18\x01 \x03(\v23.c1.connector.v2.TicketsServiceCreateTicketResponseR\atickets\"\x7f\n" + + "#TicketsServiceBulkGetTicketsRequest\x12X\n" + + "\x0fticket_requests\x18\x01 \x03(\v2/.c1.connector.v2.TicketsServiceGetTicketRequestR\x0eticketRequests\"r\n" + + "$TicketsServiceBulkGetTicketsResponse\x12J\n" + + "\atickets\x18\x01 \x03(\v20.c1.connector.v2.TicketsServiceGetTicketResponseR\atickets2\x8d\x06\n" + + "\x0eTicketsService\x12w\n" + + "\fCreateTicket\x122.c1.connector.v2.TicketsServiceCreateTicketRequest\x1a3.c1.connector.v2.TicketsServiceCreateTicketResponse\x12n\n" + + "\tGetTicket\x12/.c1.connector.v2.TicketsServiceGetTicketRequest\x1a0.c1.connector.v2.TicketsServiceGetTicketResponse\x12\x86\x01\n" + + "\x11ListTicketSchemas\x127.c1.connector.v2.TicketsServiceListTicketSchemasRequest\x1a8.c1.connector.v2.TicketsServiceListTicketSchemasResponse\x12\x80\x01\n" + + "\x0fGetTicketSchema\x125.c1.connector.v2.TicketsServiceGetTicketSchemaRequest\x1a6.c1.connector.v2.TicketsServiceGetTicketSchemaResponse\x12\x86\x01\n" + + "\x11BulkCreateTickets\x127.c1.connector.v2.TicketsServiceBulkCreateTicketsRequest\x1a8.c1.connector.v2.TicketsServiceBulkCreateTicketsResponse\x12}\n" + + "\x0eBulkGetTickets\x124.c1.connector.v2.TicketsServiceBulkGetTicketsRequest\x1a5.c1.connector.v2.TicketsServiceBulkGetTicketsResponseB6Z4github.com/conductorone/baton-sdk/pb/c1/connector/v2b\x06proto3" + +var file_c1_connector_v2_ticket_proto_msgTypes = make([]protoimpl.MessageInfo, 31) +var file_c1_connector_v2_ticket_proto_goTypes = []any{ + (*TicketSchema)(nil), // 0: c1.connector.v2.TicketSchema + (*TicketCustomField)(nil), // 1: c1.connector.v2.TicketCustomField + (*TicketCustomFieldStringValue)(nil), // 2: c1.connector.v2.TicketCustomFieldStringValue + (*TicketCustomFieldStringValues)(nil), // 3: c1.connector.v2.TicketCustomFieldStringValues + (*TicketCustomFieldBoolValue)(nil), // 4: c1.connector.v2.TicketCustomFieldBoolValue + (*TicketCustomFieldNumberValue)(nil), // 5: c1.connector.v2.TicketCustomFieldNumberValue + (*TicketCustomFieldTimestampValue)(nil), // 6: c1.connector.v2.TicketCustomFieldTimestampValue + (*TicketCustomFieldPickStringValue)(nil), // 7: c1.connector.v2.TicketCustomFieldPickStringValue + (*TicketCustomFieldPickMultipleStringValues)(nil), // 8: c1.connector.v2.TicketCustomFieldPickMultipleStringValues + (*TicketCustomFieldPickObjectValue)(nil), // 9: c1.connector.v2.TicketCustomFieldPickObjectValue + (*TicketCustomFieldPickMultipleObjectValues)(nil), // 10: c1.connector.v2.TicketCustomFieldPickMultipleObjectValues + (*TicketCustomFieldObjectValue)(nil), // 11: c1.connector.v2.TicketCustomFieldObjectValue + (*TicketStatus)(nil), // 12: c1.connector.v2.TicketStatus + (*TicketsServiceGetTicketSchemaRequest)(nil), // 13: c1.connector.v2.TicketsServiceGetTicketSchemaRequest + (*TicketsServiceGetTicketSchemaResponse)(nil), // 14: c1.connector.v2.TicketsServiceGetTicketSchemaResponse + (*TicketsServiceListTicketSchemasRequest)(nil), // 15: c1.connector.v2.TicketsServiceListTicketSchemasRequest + (*TicketsServiceListTicketSchemasResponse)(nil), // 16: c1.connector.v2.TicketsServiceListTicketSchemasResponse + (*Ticket)(nil), // 17: c1.connector.v2.Ticket + (*TicketType)(nil), // 18: c1.connector.v2.TicketType + (*TicketRequest)(nil), // 19: c1.connector.v2.TicketRequest + (*TicketsServiceCreateTicketRequest)(nil), // 20: c1.connector.v2.TicketsServiceCreateTicketRequest + (*TicketsServiceCreateTicketResponse)(nil), // 21: c1.connector.v2.TicketsServiceCreateTicketResponse + (*TicketsServiceGetTicketRequest)(nil), // 22: c1.connector.v2.TicketsServiceGetTicketRequest + (*TicketsServiceGetTicketResponse)(nil), // 23: c1.connector.v2.TicketsServiceGetTicketResponse + (*TicketsServiceBulkCreateTicketsRequest)(nil), // 24: c1.connector.v2.TicketsServiceBulkCreateTicketsRequest + (*TicketsServiceBulkCreateTicketsResponse)(nil), // 25: c1.connector.v2.TicketsServiceBulkCreateTicketsResponse + (*TicketsServiceBulkGetTicketsRequest)(nil), // 26: c1.connector.v2.TicketsServiceBulkGetTicketsRequest + (*TicketsServiceBulkGetTicketsResponse)(nil), // 27: c1.connector.v2.TicketsServiceBulkGetTicketsResponse + nil, // 28: c1.connector.v2.TicketSchema.CustomFieldsEntry + nil, // 29: c1.connector.v2.Ticket.CustomFieldsEntry + nil, // 30: c1.connector.v2.TicketRequest.CustomFieldsEntry + (*anypb.Any)(nil), // 31: google.protobuf.Any + (*wrapperspb.FloatValue)(nil), // 32: google.protobuf.FloatValue + (*timestamppb.Timestamp)(nil), // 33: google.protobuf.Timestamp + (*Resource)(nil), // 34: c1.connector.v2.Resource +} +var file_c1_connector_v2_ticket_proto_depIdxs = []int32{ + 18, // 0: c1.connector.v2.TicketSchema.types:type_name -> c1.connector.v2.TicketType + 12, // 1: c1.connector.v2.TicketSchema.statuses:type_name -> c1.connector.v2.TicketStatus + 28, // 2: c1.connector.v2.TicketSchema.custom_fields:type_name -> c1.connector.v2.TicketSchema.CustomFieldsEntry + 31, // 3: c1.connector.v2.TicketSchema.annotations:type_name -> google.protobuf.Any + 2, // 4: c1.connector.v2.TicketCustomField.string_value:type_name -> c1.connector.v2.TicketCustomFieldStringValue + 3, // 5: c1.connector.v2.TicketCustomField.string_values:type_name -> c1.connector.v2.TicketCustomFieldStringValues + 4, // 6: c1.connector.v2.TicketCustomField.bool_value:type_name -> c1.connector.v2.TicketCustomFieldBoolValue + 6, // 7: c1.connector.v2.TicketCustomField.timestamp_value:type_name -> c1.connector.v2.TicketCustomFieldTimestampValue + 7, // 8: c1.connector.v2.TicketCustomField.pick_string_value:type_name -> c1.connector.v2.TicketCustomFieldPickStringValue + 8, // 9: c1.connector.v2.TicketCustomField.pick_multiple_string_values:type_name -> c1.connector.v2.TicketCustomFieldPickMultipleStringValues + 9, // 10: c1.connector.v2.TicketCustomField.pick_object_value:type_name -> c1.connector.v2.TicketCustomFieldPickObjectValue + 10, // 11: c1.connector.v2.TicketCustomField.pick_multiple_object_values:type_name -> c1.connector.v2.TicketCustomFieldPickMultipleObjectValues + 5, // 12: c1.connector.v2.TicketCustomField.number_value:type_name -> c1.connector.v2.TicketCustomFieldNumberValue + 31, // 13: c1.connector.v2.TicketCustomField.annotations:type_name -> google.protobuf.Any + 32, // 14: c1.connector.v2.TicketCustomFieldNumberValue.value:type_name -> google.protobuf.FloatValue + 32, // 15: c1.connector.v2.TicketCustomFieldNumberValue.default_value:type_name -> google.protobuf.FloatValue + 33, // 16: c1.connector.v2.TicketCustomFieldTimestampValue.value:type_name -> google.protobuf.Timestamp + 33, // 17: c1.connector.v2.TicketCustomFieldTimestampValue.default_value:type_name -> google.protobuf.Timestamp + 11, // 18: c1.connector.v2.TicketCustomFieldPickObjectValue.value:type_name -> c1.connector.v2.TicketCustomFieldObjectValue + 11, // 19: c1.connector.v2.TicketCustomFieldPickObjectValue.allowed_values:type_name -> c1.connector.v2.TicketCustomFieldObjectValue + 11, // 20: c1.connector.v2.TicketCustomFieldPickObjectValue.default_value:type_name -> c1.connector.v2.TicketCustomFieldObjectValue + 11, // 21: c1.connector.v2.TicketCustomFieldPickMultipleObjectValues.values:type_name -> c1.connector.v2.TicketCustomFieldObjectValue + 11, // 22: c1.connector.v2.TicketCustomFieldPickMultipleObjectValues.allowed_values:type_name -> c1.connector.v2.TicketCustomFieldObjectValue + 11, // 23: c1.connector.v2.TicketCustomFieldPickMultipleObjectValues.default_values:type_name -> c1.connector.v2.TicketCustomFieldObjectValue + 31, // 24: c1.connector.v2.TicketsServiceGetTicketSchemaRequest.annotations:type_name -> google.protobuf.Any + 0, // 25: c1.connector.v2.TicketsServiceGetTicketSchemaResponse.schema:type_name -> c1.connector.v2.TicketSchema + 31, // 26: c1.connector.v2.TicketsServiceGetTicketSchemaResponse.annotations:type_name -> google.protobuf.Any + 31, // 27: c1.connector.v2.TicketsServiceListTicketSchemasRequest.annotations:type_name -> google.protobuf.Any + 0, // 28: c1.connector.v2.TicketsServiceListTicketSchemasResponse.list:type_name -> c1.connector.v2.TicketSchema + 31, // 29: c1.connector.v2.TicketsServiceListTicketSchemasResponse.annotations:type_name -> google.protobuf.Any + 34, // 30: c1.connector.v2.Ticket.assignees:type_name -> c1.connector.v2.Resource + 34, // 31: c1.connector.v2.Ticket.reporter:type_name -> c1.connector.v2.Resource + 12, // 32: c1.connector.v2.Ticket.status:type_name -> c1.connector.v2.TicketStatus + 18, // 33: c1.connector.v2.Ticket.type:type_name -> c1.connector.v2.TicketType + 29, // 34: c1.connector.v2.Ticket.custom_fields:type_name -> c1.connector.v2.Ticket.CustomFieldsEntry + 33, // 35: c1.connector.v2.Ticket.created_at:type_name -> google.protobuf.Timestamp + 33, // 36: c1.connector.v2.Ticket.updated_at:type_name -> google.protobuf.Timestamp + 33, // 37: c1.connector.v2.Ticket.completed_at:type_name -> google.protobuf.Timestamp + 34, // 38: c1.connector.v2.Ticket.requested_for:type_name -> c1.connector.v2.Resource + 12, // 39: c1.connector.v2.TicketRequest.status:type_name -> c1.connector.v2.TicketStatus + 18, // 40: c1.connector.v2.TicketRequest.type:type_name -> c1.connector.v2.TicketType + 30, // 41: c1.connector.v2.TicketRequest.custom_fields:type_name -> c1.connector.v2.TicketRequest.CustomFieldsEntry + 34, // 42: c1.connector.v2.TicketRequest.requested_for:type_name -> c1.connector.v2.Resource + 19, // 43: c1.connector.v2.TicketsServiceCreateTicketRequest.request:type_name -> c1.connector.v2.TicketRequest + 0, // 44: c1.connector.v2.TicketsServiceCreateTicketRequest.schema:type_name -> c1.connector.v2.TicketSchema + 31, // 45: c1.connector.v2.TicketsServiceCreateTicketRequest.annotations:type_name -> google.protobuf.Any + 17, // 46: c1.connector.v2.TicketsServiceCreateTicketResponse.ticket:type_name -> c1.connector.v2.Ticket + 31, // 47: c1.connector.v2.TicketsServiceCreateTicketResponse.annotations:type_name -> google.protobuf.Any + 31, // 48: c1.connector.v2.TicketsServiceGetTicketRequest.annotations:type_name -> google.protobuf.Any + 17, // 49: c1.connector.v2.TicketsServiceGetTicketResponse.ticket:type_name -> c1.connector.v2.Ticket + 31, // 50: c1.connector.v2.TicketsServiceGetTicketResponse.annotations:type_name -> google.protobuf.Any + 20, // 51: c1.connector.v2.TicketsServiceBulkCreateTicketsRequest.ticket_requests:type_name -> c1.connector.v2.TicketsServiceCreateTicketRequest + 21, // 52: c1.connector.v2.TicketsServiceBulkCreateTicketsResponse.tickets:type_name -> c1.connector.v2.TicketsServiceCreateTicketResponse + 22, // 53: c1.connector.v2.TicketsServiceBulkGetTicketsRequest.ticket_requests:type_name -> c1.connector.v2.TicketsServiceGetTicketRequest + 23, // 54: c1.connector.v2.TicketsServiceBulkGetTicketsResponse.tickets:type_name -> c1.connector.v2.TicketsServiceGetTicketResponse + 1, // 55: c1.connector.v2.TicketSchema.CustomFieldsEntry.value:type_name -> c1.connector.v2.TicketCustomField + 1, // 56: c1.connector.v2.Ticket.CustomFieldsEntry.value:type_name -> c1.connector.v2.TicketCustomField + 1, // 57: c1.connector.v2.TicketRequest.CustomFieldsEntry.value:type_name -> c1.connector.v2.TicketCustomField + 20, // 58: c1.connector.v2.TicketsService.CreateTicket:input_type -> c1.connector.v2.TicketsServiceCreateTicketRequest + 22, // 59: c1.connector.v2.TicketsService.GetTicket:input_type -> c1.connector.v2.TicketsServiceGetTicketRequest + 15, // 60: c1.connector.v2.TicketsService.ListTicketSchemas:input_type -> c1.connector.v2.TicketsServiceListTicketSchemasRequest + 13, // 61: c1.connector.v2.TicketsService.GetTicketSchema:input_type -> c1.connector.v2.TicketsServiceGetTicketSchemaRequest + 24, // 62: c1.connector.v2.TicketsService.BulkCreateTickets:input_type -> c1.connector.v2.TicketsServiceBulkCreateTicketsRequest + 26, // 63: c1.connector.v2.TicketsService.BulkGetTickets:input_type -> c1.connector.v2.TicketsServiceBulkGetTicketsRequest + 21, // 64: c1.connector.v2.TicketsService.CreateTicket:output_type -> c1.connector.v2.TicketsServiceCreateTicketResponse + 23, // 65: c1.connector.v2.TicketsService.GetTicket:output_type -> c1.connector.v2.TicketsServiceGetTicketResponse + 16, // 66: c1.connector.v2.TicketsService.ListTicketSchemas:output_type -> c1.connector.v2.TicketsServiceListTicketSchemasResponse + 14, // 67: c1.connector.v2.TicketsService.GetTicketSchema:output_type -> c1.connector.v2.TicketsServiceGetTicketSchemaResponse + 25, // 68: c1.connector.v2.TicketsService.BulkCreateTickets:output_type -> c1.connector.v2.TicketsServiceBulkCreateTicketsResponse + 27, // 69: c1.connector.v2.TicketsService.BulkGetTickets:output_type -> c1.connector.v2.TicketsServiceBulkGetTicketsResponse + 64, // [64:70] is the sub-list for method output_type + 58, // [58:64] is the sub-list for method input_type + 58, // [58:58] is the sub-list for extension type_name + 58, // [58:58] is the sub-list for extension extendee + 0, // [0:58] is the sub-list for field type_name +} + +func init() { file_c1_connector_v2_ticket_proto_init() } +func file_c1_connector_v2_ticket_proto_init() { + if File_c1_connector_v2_ticket_proto != nil { + return + } + file_c1_connector_v2_resource_proto_init() + file_c1_connector_v2_ticket_proto_msgTypes[1].OneofWrappers = []any{ + (*ticketCustomField_StringValue)(nil), + (*ticketCustomField_StringValues)(nil), + (*ticketCustomField_BoolValue)(nil), + (*ticketCustomField_TimestampValue)(nil), + (*ticketCustomField_PickStringValue)(nil), + (*ticketCustomField_PickMultipleStringValues)(nil), + (*ticketCustomField_PickObjectValue)(nil), + (*ticketCustomField_PickMultipleObjectValues)(nil), + (*ticketCustomField_NumberValue)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_c1_connector_v2_ticket_proto_rawDesc), len(file_c1_connector_v2_ticket_proto_rawDesc)), + NumEnums: 0, + NumMessages: 31, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_c1_connector_v2_ticket_proto_goTypes, + DependencyIndexes: file_c1_connector_v2_ticket_proto_depIdxs, + MessageInfos: file_c1_connector_v2_ticket_proto_msgTypes, + }.Build() + File_c1_connector_v2_ticket_proto = out.File + file_c1_connector_v2_ticket_proto_goTypes = nil + file_c1_connector_v2_ticket_proto_depIdxs = nil +} diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector_wrapper/v1/connector_wrapper.pb.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector_wrapper/v1/connector_wrapper.pb.go index ce416ac6..2ea7f87a 100644 --- a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector_wrapper/v1/connector_wrapper.pb.go +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector_wrapper/v1/connector_wrapper.pb.go @@ -1,9 +1,11 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.4 +// protoc-gen-go v1.36.10 // protoc (unknown) // source: c1/connector_wrapper/v1/connector_wrapper.proto +//go:build !protoopaque + package v1 import ( @@ -12,7 +14,6 @@ import ( protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" - sync "sync" unsafe "unsafe" ) @@ -24,12 +25,13 @@ const ( ) type ServerConfig struct { - state protoimpl.MessageState `protogen:"open.v1"` - Credential *v1.Credential `protobuf:"bytes,1,opt,name=credential,proto3" json:"credential,omitempty"` - RateLimiterConfig *v11.RateLimiterConfig `protobuf:"bytes,2,opt,name=rate_limiter_config,json=rateLimiterConfig,proto3" json:"rate_limiter_config,omitempty"` - ListenPort uint32 `protobuf:"varint,3,opt,name=listen_port,json=listenPort,proto3" json:"listen_port,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"hybrid.v1"` + Credential *v1.Credential `protobuf:"bytes,1,opt,name=credential,proto3" json:"credential,omitempty"` + RateLimiterConfig *v11.RateLimiterConfig `protobuf:"bytes,2,opt,name=rate_limiter_config,json=rateLimiterConfig,proto3" json:"rate_limiter_config,omitempty"` + ListenPort uint32 `protobuf:"varint,3,opt,name=listen_port,json=listenPort,proto3" json:"listen_port,omitempty"` // The port the connector (child process) will listen on for incoming connector requests. + SessionStoreListenPort uint32 `protobuf:"varint,4,opt,name=session_store_listen_port,json=sessionStoreListenPort,proto3" json:"session_store_listen_port,omitempty"` // The port the sdk (parent process) will listen on for incoming cache requests. + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *ServerConfig) Reset() { @@ -57,11 +59,6 @@ func (x *ServerConfig) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ServerConfig.ProtoReflect.Descriptor instead. -func (*ServerConfig) Descriptor() ([]byte, []int) { - return file_c1_connector_wrapper_v1_connector_wrapper_proto_rawDescGZIP(), []int{0} -} - func (x *ServerConfig) GetCredential() *v1.Credential { if x != nil { return x.Credential @@ -83,48 +80,85 @@ func (x *ServerConfig) GetListenPort() uint32 { return 0 } -var File_c1_connector_wrapper_v1_connector_wrapper_proto protoreflect.FileDescriptor +func (x *ServerConfig) GetSessionStoreListenPort() uint32 { + if x != nil { + return x.SessionStoreListenPort + } + return 0 +} -var file_c1_connector_wrapper_v1_connector_wrapper_proto_rawDesc = string([]byte{ - 0x0a, 0x2f, 0x63, 0x31, 0x2f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x5f, 0x77, - 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x2f, 0x76, 0x31, 0x2f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, - 0x74, 0x6f, 0x72, 0x5f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x12, 0x17, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x5f, - 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x1a, 0x1f, 0x63, 0x31, 0x2f, 0x72, - 0x61, 0x74, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x61, 0x74, 0x65, - 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x14, 0x63, 0x31, 0x2f, - 0x75, 0x74, 0x6c, 0x73, 0x2f, 0x76, 0x31, 0x2f, 0x74, 0x6c, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x22, 0xbb, 0x01, 0x0a, 0x0c, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x12, 0x36, 0x0a, 0x0a, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x63, 0x31, 0x2e, 0x75, 0x74, 0x6c, 0x73, - 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x52, 0x0a, - 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x12, 0x52, 0x0a, 0x13, 0x72, 0x61, - 0x74, 0x65, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x63, 0x31, 0x2e, 0x72, 0x61, 0x74, - 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, - 0x6d, 0x69, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x11, 0x72, 0x61, 0x74, - 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1f, - 0x0a, 0x0b, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x50, 0x6f, 0x72, 0x74, 0x42, - 0x3e, 0x5a, 0x3c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, - 0x6e, 0x64, 0x75, 0x63, 0x74, 0x6f, 0x72, 0x6f, 0x6e, 0x65, 0x2f, 0x62, 0x61, 0x74, 0x6f, 0x6e, - 0x2d, 0x73, 0x64, 0x6b, 0x2f, 0x70, 0x62, 0x2f, 0x63, 0x31, 0x2f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, - 0x63, 0x74, 0x6f, 0x72, 0x5f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x2f, 0x76, 0x31, 0x62, - 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -}) - -var ( - file_c1_connector_wrapper_v1_connector_wrapper_proto_rawDescOnce sync.Once - file_c1_connector_wrapper_v1_connector_wrapper_proto_rawDescData []byte -) +func (x *ServerConfig) SetCredential(v *v1.Credential) { + x.Credential = v +} + +func (x *ServerConfig) SetRateLimiterConfig(v *v11.RateLimiterConfig) { + x.RateLimiterConfig = v +} + +func (x *ServerConfig) SetListenPort(v uint32) { + x.ListenPort = v +} + +func (x *ServerConfig) SetSessionStoreListenPort(v uint32) { + x.SessionStoreListenPort = v +} -func file_c1_connector_wrapper_v1_connector_wrapper_proto_rawDescGZIP() []byte { - file_c1_connector_wrapper_v1_connector_wrapper_proto_rawDescOnce.Do(func() { - file_c1_connector_wrapper_v1_connector_wrapper_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_c1_connector_wrapper_v1_connector_wrapper_proto_rawDesc), len(file_c1_connector_wrapper_v1_connector_wrapper_proto_rawDesc))) - }) - return file_c1_connector_wrapper_v1_connector_wrapper_proto_rawDescData +func (x *ServerConfig) HasCredential() bool { + if x == nil { + return false + } + return x.Credential != nil +} + +func (x *ServerConfig) HasRateLimiterConfig() bool { + if x == nil { + return false + } + return x.RateLimiterConfig != nil +} + +func (x *ServerConfig) ClearCredential() { + x.Credential = nil +} + +func (x *ServerConfig) ClearRateLimiterConfig() { + x.RateLimiterConfig = nil } +type ServerConfig_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Credential *v1.Credential + RateLimiterConfig *v11.RateLimiterConfig + ListenPort uint32 + SessionStoreListenPort uint32 +} + +func (b0 ServerConfig_builder) Build() *ServerConfig { + m0 := &ServerConfig{} + b, x := &b0, m0 + _, _ = b, x + x.Credential = b.Credential + x.RateLimiterConfig = b.RateLimiterConfig + x.ListenPort = b.ListenPort + x.SessionStoreListenPort = b.SessionStoreListenPort + return m0 +} + +var File_c1_connector_wrapper_v1_connector_wrapper_proto protoreflect.FileDescriptor + +const file_c1_connector_wrapper_v1_connector_wrapper_proto_rawDesc = "" + + "\n" + + "/c1/connector_wrapper/v1/connector_wrapper.proto\x12\x17c1.connector_wrapper.v1\x1a\x1fc1/ratelimit/v1/ratelimit.proto\x1a\x14c1/utls/v1/tls.proto\"\xf6\x01\n" + + "\fServerConfig\x126\n" + + "\n" + + "credential\x18\x01 \x01(\v2\x16.c1.utls.v1.CredentialR\n" + + "credential\x12R\n" + + "\x13rate_limiter_config\x18\x02 \x01(\v2\".c1.ratelimit.v1.RateLimiterConfigR\x11rateLimiterConfig\x12\x1f\n" + + "\vlisten_port\x18\x03 \x01(\rR\n" + + "listenPort\x129\n" + + "\x19session_store_listen_port\x18\x04 \x01(\rR\x16sessionStoreListenPortB>Z 0 { return ServerConfigMultiError(errors) } diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector_wrapper/v1/connector_wrapper_protoopaque.pb.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector_wrapper/v1/connector_wrapper_protoopaque.pb.go new file mode 100644 index 00000000..f3cb7afe --- /dev/null +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector_wrapper/v1/connector_wrapper_protoopaque.pb.go @@ -0,0 +1,200 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.10 +// protoc (unknown) +// source: c1/connector_wrapper/v1/connector_wrapper.proto + +//go:build protoopaque + +package v1 + +import ( + v11 "github.com/conductorone/baton-sdk/pb/c1/ratelimit/v1" + v1 "github.com/conductorone/baton-sdk/pb/c1/utls/v1" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type ServerConfig struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Credential *v1.Credential `protobuf:"bytes,1,opt,name=credential,proto3"` + xxx_hidden_RateLimiterConfig *v11.RateLimiterConfig `protobuf:"bytes,2,opt,name=rate_limiter_config,json=rateLimiterConfig,proto3"` + xxx_hidden_ListenPort uint32 `protobuf:"varint,3,opt,name=listen_port,json=listenPort,proto3"` + xxx_hidden_SessionStoreListenPort uint32 `protobuf:"varint,4,opt,name=session_store_listen_port,json=sessionStoreListenPort,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ServerConfig) Reset() { + *x = ServerConfig{} + mi := &file_c1_connector_wrapper_v1_connector_wrapper_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ServerConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServerConfig) ProtoMessage() {} + +func (x *ServerConfig) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_wrapper_v1_connector_wrapper_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *ServerConfig) GetCredential() *v1.Credential { + if x != nil { + return x.xxx_hidden_Credential + } + return nil +} + +func (x *ServerConfig) GetRateLimiterConfig() *v11.RateLimiterConfig { + if x != nil { + return x.xxx_hidden_RateLimiterConfig + } + return nil +} + +func (x *ServerConfig) GetListenPort() uint32 { + if x != nil { + return x.xxx_hidden_ListenPort + } + return 0 +} + +func (x *ServerConfig) GetSessionStoreListenPort() uint32 { + if x != nil { + return x.xxx_hidden_SessionStoreListenPort + } + return 0 +} + +func (x *ServerConfig) SetCredential(v *v1.Credential) { + x.xxx_hidden_Credential = v +} + +func (x *ServerConfig) SetRateLimiterConfig(v *v11.RateLimiterConfig) { + x.xxx_hidden_RateLimiterConfig = v +} + +func (x *ServerConfig) SetListenPort(v uint32) { + x.xxx_hidden_ListenPort = v +} + +func (x *ServerConfig) SetSessionStoreListenPort(v uint32) { + x.xxx_hidden_SessionStoreListenPort = v +} + +func (x *ServerConfig) HasCredential() bool { + if x == nil { + return false + } + return x.xxx_hidden_Credential != nil +} + +func (x *ServerConfig) HasRateLimiterConfig() bool { + if x == nil { + return false + } + return x.xxx_hidden_RateLimiterConfig != nil +} + +func (x *ServerConfig) ClearCredential() { + x.xxx_hidden_Credential = nil +} + +func (x *ServerConfig) ClearRateLimiterConfig() { + x.xxx_hidden_RateLimiterConfig = nil +} + +type ServerConfig_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Credential *v1.Credential + RateLimiterConfig *v11.RateLimiterConfig + ListenPort uint32 + SessionStoreListenPort uint32 +} + +func (b0 ServerConfig_builder) Build() *ServerConfig { + m0 := &ServerConfig{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Credential = b.Credential + x.xxx_hidden_RateLimiterConfig = b.RateLimiterConfig + x.xxx_hidden_ListenPort = b.ListenPort + x.xxx_hidden_SessionStoreListenPort = b.SessionStoreListenPort + return m0 +} + +var File_c1_connector_wrapper_v1_connector_wrapper_proto protoreflect.FileDescriptor + +const file_c1_connector_wrapper_v1_connector_wrapper_proto_rawDesc = "" + + "\n" + + "/c1/connector_wrapper/v1/connector_wrapper.proto\x12\x17c1.connector_wrapper.v1\x1a\x1fc1/ratelimit/v1/ratelimit.proto\x1a\x14c1/utls/v1/tls.proto\"\xf6\x01\n" + + "\fServerConfig\x126\n" + + "\n" + + "credential\x18\x01 \x01(\v2\x16.c1.utls.v1.CredentialR\n" + + "credential\x12R\n" + + "\x13rate_limiter_config\x18\x02 \x01(\v2\".c1.ratelimit.v1.RateLimiterConfigR\x11rateLimiterConfig\x12\x1f\n" + + "\vlisten_port\x18\x03 \x01(\rR\n" + + "listenPort\x129\n" + + "\x19session_store_listen_port\x18\x04 \x01(\rR\x16sessionStoreListenPortB>Z c1.utls.v1.Credential + 2, // 1: c1.connector_wrapper.v1.ServerConfig.rate_limiter_config:type_name -> c1.ratelimit.v1.RateLimiterConfig + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_c1_connector_wrapper_v1_connector_wrapper_proto_init() } +func file_c1_connector_wrapper_v1_connector_wrapper_proto_init() { + if File_c1_connector_wrapper_v1_connector_wrapper_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_c1_connector_wrapper_v1_connector_wrapper_proto_rawDesc), len(file_c1_connector_wrapper_v1_connector_wrapper_proto_rawDesc)), + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_c1_connector_wrapper_v1_connector_wrapper_proto_goTypes, + DependencyIndexes: file_c1_connector_wrapper_v1_connector_wrapper_proto_depIdxs, + MessageInfos: file_c1_connector_wrapper_v1_connector_wrapper_proto_msgTypes, + }.Build() + File_c1_connector_wrapper_v1_connector_wrapper_proto = out.File + file_c1_connector_wrapper_v1_connector_wrapper_proto_goTypes = nil + file_c1_connector_wrapper_v1_connector_wrapper_proto_depIdxs = nil +} diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/connectorapi/baton/v1/baton.pb.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/connectorapi/baton/v1/baton.pb.go index 4190d800..9a188337 100644 --- a/vendor/github.com/conductorone/baton-sdk/pb/c1/connectorapi/baton/v1/baton.pb.go +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/connectorapi/baton/v1/baton.pb.go @@ -1,9 +1,11 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.4 +// protoc-gen-go v1.36.10 // protoc (unknown) // source: c1/connectorapi/baton/v1/baton.proto +//go:build !protoopaque + package v1 import ( @@ -17,7 +19,6 @@ import ( structpb "google.golang.org/protobuf/types/known/structpb" timestamppb "google.golang.org/protobuf/types/known/timestamppb" reflect "reflect" - sync "sync" unsafe "unsafe" ) @@ -78,13 +79,8 @@ func (x Task_Status) Number() protoreflect.EnumNumber { return protoreflect.EnumNumber(x) } -// Deprecated: Use Task_Status.Descriptor instead. -func (Task_Status) EnumDescriptor() ([]byte, []int) { - return file_c1_connectorapi_baton_v1_baton_proto_rawDescGZIP(), []int{0, 0} -} - type Task struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` Status Task_Status `protobuf:"varint,2,opt,name=status,proto3,enum=c1.connectorapi.baton.v1.Task_Status" json:"status,omitempty"` // Types that are valid to be assigned to TaskType: @@ -141,11 +137,6 @@ func (x *Task) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use Task.ProtoReflect.Descriptor instead. -func (*Task) Descriptor() ([]byte, []int) { - return file_c1_connectorapi_baton_v1_baton_proto_rawDescGZIP(), []int{0} -} - func (x *Task) GetId() string { if x != nil { return x.Id @@ -363,6 +354,680 @@ func (x *Task) GetDebug() bool { return false } +func (x *Task) SetId(v string) { + x.Id = v +} + +func (x *Task) SetStatus(v Task_Status) { + x.Status = v +} + +func (x *Task) SetNone(v *Task_NoneTask) { + if v == nil { + x.TaskType = nil + return + } + x.TaskType = &Task_None{v} +} + +func (x *Task) SetHello(v *Task_HelloTask) { + if v == nil { + x.TaskType = nil + return + } + x.TaskType = &Task_Hello{v} +} + +func (x *Task) SetSyncFull(v *Task_SyncFullTask) { + if v == nil { + x.TaskType = nil + return + } + x.TaskType = &Task_SyncFull{v} +} + +func (x *Task) SetGrant(v *Task_GrantTask) { + if v == nil { + x.TaskType = nil + return + } + x.TaskType = &Task_Grant{v} +} + +func (x *Task) SetRevoke(v *Task_RevokeTask) { + if v == nil { + x.TaskType = nil + return + } + x.TaskType = &Task_Revoke{v} +} + +func (x *Task) SetCreateAccount(v *Task_CreateAccountTask) { + if v == nil { + x.TaskType = nil + return + } + x.TaskType = &Task_CreateAccount{v} +} + +func (x *Task) SetCreateResource(v *Task_CreateResourceTask) { + if v == nil { + x.TaskType = nil + return + } + x.TaskType = &Task_CreateResource{v} +} + +func (x *Task) SetDeleteResource(v *Task_DeleteResourceTask) { + if v == nil { + x.TaskType = nil + return + } + x.TaskType = &Task_DeleteResource{v} +} + +func (x *Task) SetRotateCredentials(v *Task_RotateCredentialsTask) { + if v == nil { + x.TaskType = nil + return + } + x.TaskType = &Task_RotateCredentials{v} +} + +func (x *Task) SetEventFeed(v *Task_EventFeedTask) { + if v == nil { + x.TaskType = nil + return + } + x.TaskType = &Task_EventFeed{v} +} + +func (x *Task) SetCreateTicketTask(v *Task_CreateTicketTask) { + if v == nil { + x.TaskType = nil + return + } + x.TaskType = &Task_CreateTicketTask_{v} +} + +func (x *Task) SetListTicketSchemas(v *Task_ListTicketSchemasTask) { + if v == nil { + x.TaskType = nil + return + } + x.TaskType = &Task_ListTicketSchemas{v} +} + +func (x *Task) SetGetTicket(v *Task_GetTicketTask) { + if v == nil { + x.TaskType = nil + return + } + x.TaskType = &Task_GetTicket{v} +} + +func (x *Task) SetBulkCreateTickets(v *Task_BulkCreateTicketsTask) { + if v == nil { + x.TaskType = nil + return + } + x.TaskType = &Task_BulkCreateTickets{v} +} + +func (x *Task) SetBulkGetTickets(v *Task_BulkGetTicketsTask) { + if v == nil { + x.TaskType = nil + return + } + x.TaskType = &Task_BulkGetTickets{v} +} + +func (x *Task) SetActionListSchemas(v *Task_ActionListSchemasTask) { + if v == nil { + x.TaskType = nil + return + } + x.TaskType = &Task_ActionListSchemas{v} +} + +func (x *Task) SetActionGetSchema(v *Task_ActionGetSchemaTask) { + if v == nil { + x.TaskType = nil + return + } + x.TaskType = &Task_ActionGetSchema{v} +} + +func (x *Task) SetActionInvoke(v *Task_ActionInvokeTask) { + if v == nil { + x.TaskType = nil + return + } + x.TaskType = &Task_ActionInvoke{v} +} + +func (x *Task) SetActionStatus(v *Task_ActionStatusTask) { + if v == nil { + x.TaskType = nil + return + } + x.TaskType = &Task_ActionStatus{v} +} + +func (x *Task) SetCreateSyncDiff(v *Task_CreateSyncDiffTask) { + if v == nil { + x.TaskType = nil + return + } + x.TaskType = &Task_CreateSyncDiff{v} +} + +func (x *Task) SetCompactSyncs(v *Task_CompactSyncs) { + if v == nil { + x.TaskType = nil + return + } + x.TaskType = &Task_CompactSyncs_{v} +} + +func (x *Task) SetDebug(v bool) { + x.Debug = v +} + +func (x *Task) HasTaskType() bool { + if x == nil { + return false + } + return x.TaskType != nil +} + +func (x *Task) HasNone() bool { + if x == nil { + return false + } + _, ok := x.TaskType.(*Task_None) + return ok +} + +func (x *Task) HasHello() bool { + if x == nil { + return false + } + _, ok := x.TaskType.(*Task_Hello) + return ok +} + +func (x *Task) HasSyncFull() bool { + if x == nil { + return false + } + _, ok := x.TaskType.(*Task_SyncFull) + return ok +} + +func (x *Task) HasGrant() bool { + if x == nil { + return false + } + _, ok := x.TaskType.(*Task_Grant) + return ok +} + +func (x *Task) HasRevoke() bool { + if x == nil { + return false + } + _, ok := x.TaskType.(*Task_Revoke) + return ok +} + +func (x *Task) HasCreateAccount() bool { + if x == nil { + return false + } + _, ok := x.TaskType.(*Task_CreateAccount) + return ok +} + +func (x *Task) HasCreateResource() bool { + if x == nil { + return false + } + _, ok := x.TaskType.(*Task_CreateResource) + return ok +} + +func (x *Task) HasDeleteResource() bool { + if x == nil { + return false + } + _, ok := x.TaskType.(*Task_DeleteResource) + return ok +} + +func (x *Task) HasRotateCredentials() bool { + if x == nil { + return false + } + _, ok := x.TaskType.(*Task_RotateCredentials) + return ok +} + +func (x *Task) HasEventFeed() bool { + if x == nil { + return false + } + _, ok := x.TaskType.(*Task_EventFeed) + return ok +} + +func (x *Task) HasCreateTicketTask() bool { + if x == nil { + return false + } + _, ok := x.TaskType.(*Task_CreateTicketTask_) + return ok +} + +func (x *Task) HasListTicketSchemas() bool { + if x == nil { + return false + } + _, ok := x.TaskType.(*Task_ListTicketSchemas) + return ok +} + +func (x *Task) HasGetTicket() bool { + if x == nil { + return false + } + _, ok := x.TaskType.(*Task_GetTicket) + return ok +} + +func (x *Task) HasBulkCreateTickets() bool { + if x == nil { + return false + } + _, ok := x.TaskType.(*Task_BulkCreateTickets) + return ok +} + +func (x *Task) HasBulkGetTickets() bool { + if x == nil { + return false + } + _, ok := x.TaskType.(*Task_BulkGetTickets) + return ok +} + +func (x *Task) HasActionListSchemas() bool { + if x == nil { + return false + } + _, ok := x.TaskType.(*Task_ActionListSchemas) + return ok +} + +func (x *Task) HasActionGetSchema() bool { + if x == nil { + return false + } + _, ok := x.TaskType.(*Task_ActionGetSchema) + return ok +} + +func (x *Task) HasActionInvoke() bool { + if x == nil { + return false + } + _, ok := x.TaskType.(*Task_ActionInvoke) + return ok +} + +func (x *Task) HasActionStatus() bool { + if x == nil { + return false + } + _, ok := x.TaskType.(*Task_ActionStatus) + return ok +} + +func (x *Task) HasCreateSyncDiff() bool { + if x == nil { + return false + } + _, ok := x.TaskType.(*Task_CreateSyncDiff) + return ok +} + +func (x *Task) HasCompactSyncs() bool { + if x == nil { + return false + } + _, ok := x.TaskType.(*Task_CompactSyncs_) + return ok +} + +func (x *Task) ClearTaskType() { + x.TaskType = nil +} + +func (x *Task) ClearNone() { + if _, ok := x.TaskType.(*Task_None); ok { + x.TaskType = nil + } +} + +func (x *Task) ClearHello() { + if _, ok := x.TaskType.(*Task_Hello); ok { + x.TaskType = nil + } +} + +func (x *Task) ClearSyncFull() { + if _, ok := x.TaskType.(*Task_SyncFull); ok { + x.TaskType = nil + } +} + +func (x *Task) ClearGrant() { + if _, ok := x.TaskType.(*Task_Grant); ok { + x.TaskType = nil + } +} + +func (x *Task) ClearRevoke() { + if _, ok := x.TaskType.(*Task_Revoke); ok { + x.TaskType = nil + } +} + +func (x *Task) ClearCreateAccount() { + if _, ok := x.TaskType.(*Task_CreateAccount); ok { + x.TaskType = nil + } +} + +func (x *Task) ClearCreateResource() { + if _, ok := x.TaskType.(*Task_CreateResource); ok { + x.TaskType = nil + } +} + +func (x *Task) ClearDeleteResource() { + if _, ok := x.TaskType.(*Task_DeleteResource); ok { + x.TaskType = nil + } +} + +func (x *Task) ClearRotateCredentials() { + if _, ok := x.TaskType.(*Task_RotateCredentials); ok { + x.TaskType = nil + } +} + +func (x *Task) ClearEventFeed() { + if _, ok := x.TaskType.(*Task_EventFeed); ok { + x.TaskType = nil + } +} + +func (x *Task) ClearCreateTicketTask() { + if _, ok := x.TaskType.(*Task_CreateTicketTask_); ok { + x.TaskType = nil + } +} + +func (x *Task) ClearListTicketSchemas() { + if _, ok := x.TaskType.(*Task_ListTicketSchemas); ok { + x.TaskType = nil + } +} + +func (x *Task) ClearGetTicket() { + if _, ok := x.TaskType.(*Task_GetTicket); ok { + x.TaskType = nil + } +} + +func (x *Task) ClearBulkCreateTickets() { + if _, ok := x.TaskType.(*Task_BulkCreateTickets); ok { + x.TaskType = nil + } +} + +func (x *Task) ClearBulkGetTickets() { + if _, ok := x.TaskType.(*Task_BulkGetTickets); ok { + x.TaskType = nil + } +} + +func (x *Task) ClearActionListSchemas() { + if _, ok := x.TaskType.(*Task_ActionListSchemas); ok { + x.TaskType = nil + } +} + +func (x *Task) ClearActionGetSchema() { + if _, ok := x.TaskType.(*Task_ActionGetSchema); ok { + x.TaskType = nil + } +} + +func (x *Task) ClearActionInvoke() { + if _, ok := x.TaskType.(*Task_ActionInvoke); ok { + x.TaskType = nil + } +} + +func (x *Task) ClearActionStatus() { + if _, ok := x.TaskType.(*Task_ActionStatus); ok { + x.TaskType = nil + } +} + +func (x *Task) ClearCreateSyncDiff() { + if _, ok := x.TaskType.(*Task_CreateSyncDiff); ok { + x.TaskType = nil + } +} + +func (x *Task) ClearCompactSyncs() { + if _, ok := x.TaskType.(*Task_CompactSyncs_); ok { + x.TaskType = nil + } +} + +const Task_TaskType_not_set_case case_Task_TaskType = 0 +const Task_None_case case_Task_TaskType = 100 +const Task_Hello_case case_Task_TaskType = 101 +const Task_SyncFull_case case_Task_TaskType = 102 +const Task_Grant_case case_Task_TaskType = 103 +const Task_Revoke_case case_Task_TaskType = 104 +const Task_CreateAccount_case case_Task_TaskType = 105 +const Task_CreateResource_case case_Task_TaskType = 106 +const Task_DeleteResource_case case_Task_TaskType = 107 +const Task_RotateCredentials_case case_Task_TaskType = 108 +const Task_EventFeed_case case_Task_TaskType = 109 +const Task_CreateTicketTask_case case_Task_TaskType = 110 +const Task_ListTicketSchemas_case case_Task_TaskType = 111 +const Task_GetTicket_case case_Task_TaskType = 112 +const Task_BulkCreateTickets_case case_Task_TaskType = 113 +const Task_BulkGetTickets_case case_Task_TaskType = 114 +const Task_ActionListSchemas_case case_Task_TaskType = 115 +const Task_ActionGetSchema_case case_Task_TaskType = 116 +const Task_ActionInvoke_case case_Task_TaskType = 117 +const Task_ActionStatus_case case_Task_TaskType = 118 +const Task_CreateSyncDiff_case case_Task_TaskType = 119 +const Task_CompactSyncs_case case_Task_TaskType = 120 + +func (x *Task) WhichTaskType() case_Task_TaskType { + if x == nil { + return Task_TaskType_not_set_case + } + switch x.TaskType.(type) { + case *Task_None: + return Task_None_case + case *Task_Hello: + return Task_Hello_case + case *Task_SyncFull: + return Task_SyncFull_case + case *Task_Grant: + return Task_Grant_case + case *Task_Revoke: + return Task_Revoke_case + case *Task_CreateAccount: + return Task_CreateAccount_case + case *Task_CreateResource: + return Task_CreateResource_case + case *Task_DeleteResource: + return Task_DeleteResource_case + case *Task_RotateCredentials: + return Task_RotateCredentials_case + case *Task_EventFeed: + return Task_EventFeed_case + case *Task_CreateTicketTask_: + return Task_CreateTicketTask_case + case *Task_ListTicketSchemas: + return Task_ListTicketSchemas_case + case *Task_GetTicket: + return Task_GetTicket_case + case *Task_BulkCreateTickets: + return Task_BulkCreateTickets_case + case *Task_BulkGetTickets: + return Task_BulkGetTickets_case + case *Task_ActionListSchemas: + return Task_ActionListSchemas_case + case *Task_ActionGetSchema: + return Task_ActionGetSchema_case + case *Task_ActionInvoke: + return Task_ActionInvoke_case + case *Task_ActionStatus: + return Task_ActionStatus_case + case *Task_CreateSyncDiff: + return Task_CreateSyncDiff_case + case *Task_CompactSyncs_: + return Task_CompactSyncs_case + default: + return Task_TaskType_not_set_case + } +} + +type Task_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Id string + Status Task_Status + // Fields of oneof TaskType: + None *Task_NoneTask + Hello *Task_HelloTask + SyncFull *Task_SyncFullTask + Grant *Task_GrantTask + Revoke *Task_RevokeTask + CreateAccount *Task_CreateAccountTask + CreateResource *Task_CreateResourceTask + DeleteResource *Task_DeleteResourceTask + RotateCredentials *Task_RotateCredentialsTask + EventFeed *Task_EventFeedTask + CreateTicketTask *Task_CreateTicketTask + ListTicketSchemas *Task_ListTicketSchemasTask + GetTicket *Task_GetTicketTask + BulkCreateTickets *Task_BulkCreateTicketsTask + BulkGetTickets *Task_BulkGetTicketsTask + ActionListSchemas *Task_ActionListSchemasTask + ActionGetSchema *Task_ActionGetSchemaTask + ActionInvoke *Task_ActionInvokeTask + ActionStatus *Task_ActionStatusTask + CreateSyncDiff *Task_CreateSyncDiffTask + CompactSyncs *Task_CompactSyncs + // -- end of TaskType + Debug bool +} + +func (b0 Task_builder) Build() *Task { + m0 := &Task{} + b, x := &b0, m0 + _, _ = b, x + x.Id = b.Id + x.Status = b.Status + if b.None != nil { + x.TaskType = &Task_None{b.None} + } + if b.Hello != nil { + x.TaskType = &Task_Hello{b.Hello} + } + if b.SyncFull != nil { + x.TaskType = &Task_SyncFull{b.SyncFull} + } + if b.Grant != nil { + x.TaskType = &Task_Grant{b.Grant} + } + if b.Revoke != nil { + x.TaskType = &Task_Revoke{b.Revoke} + } + if b.CreateAccount != nil { + x.TaskType = &Task_CreateAccount{b.CreateAccount} + } + if b.CreateResource != nil { + x.TaskType = &Task_CreateResource{b.CreateResource} + } + if b.DeleteResource != nil { + x.TaskType = &Task_DeleteResource{b.DeleteResource} + } + if b.RotateCredentials != nil { + x.TaskType = &Task_RotateCredentials{b.RotateCredentials} + } + if b.EventFeed != nil { + x.TaskType = &Task_EventFeed{b.EventFeed} + } + if b.CreateTicketTask != nil { + x.TaskType = &Task_CreateTicketTask_{b.CreateTicketTask} + } + if b.ListTicketSchemas != nil { + x.TaskType = &Task_ListTicketSchemas{b.ListTicketSchemas} + } + if b.GetTicket != nil { + x.TaskType = &Task_GetTicket{b.GetTicket} + } + if b.BulkCreateTickets != nil { + x.TaskType = &Task_BulkCreateTickets{b.BulkCreateTickets} + } + if b.BulkGetTickets != nil { + x.TaskType = &Task_BulkGetTickets{b.BulkGetTickets} + } + if b.ActionListSchemas != nil { + x.TaskType = &Task_ActionListSchemas{b.ActionListSchemas} + } + if b.ActionGetSchema != nil { + x.TaskType = &Task_ActionGetSchema{b.ActionGetSchema} + } + if b.ActionInvoke != nil { + x.TaskType = &Task_ActionInvoke{b.ActionInvoke} + } + if b.ActionStatus != nil { + x.TaskType = &Task_ActionStatus{b.ActionStatus} + } + if b.CreateSyncDiff != nil { + x.TaskType = &Task_CreateSyncDiff{b.CreateSyncDiff} + } + if b.CompactSyncs != nil { + x.TaskType = &Task_CompactSyncs_{b.CompactSyncs} + } + x.Debug = b.Debug + return m0 +} + +type case_Task_TaskType protoreflect.FieldNumber + +func (x case_Task_TaskType) String() string { + md := file_c1_connectorapi_baton_v1_baton_proto_msgTypes[0].Descriptor() + if x == 0 { + return "not set" + } + return protoimpl.X.MessageFieldStringOf(md, protoreflect.FieldNumber(x)) +} + type isTask_TaskType interface { isTask_TaskType() } @@ -494,7 +1159,7 @@ func (*Task_CreateSyncDiff) isTask_TaskType() {} func (*Task_CompactSyncs_) isTask_TaskType() {} type BatonServiceHelloRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` HostId string `protobuf:"bytes,1,opt,name=host_id,json=hostId,proto3" json:"host_id,omitempty"` TaskId string `protobuf:"bytes,2,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` // You may say Hello anytime, but if you are responding to a task, you must provide the task_id BuildInfo *BatonServiceHelloRequest_BuildInfo `protobuf:"bytes,3,opt,name=build_info,json=buildInfo,proto3" json:"build_info,omitempty"` @@ -530,11 +1195,6 @@ func (x *BatonServiceHelloRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use BatonServiceHelloRequest.ProtoReflect.Descriptor instead. -func (*BatonServiceHelloRequest) Descriptor() ([]byte, []int) { - return file_c1_connectorapi_baton_v1_baton_proto_rawDescGZIP(), []int{1} -} - func (x *BatonServiceHelloRequest) GetHostId() string { if x != nil { return x.HostId @@ -577,8 +1237,89 @@ func (x *BatonServiceHelloRequest) GetAnnotations() []*anypb.Any { return nil } +func (x *BatonServiceHelloRequest) SetHostId(v string) { + x.HostId = v +} + +func (x *BatonServiceHelloRequest) SetTaskId(v string) { + x.TaskId = v +} + +func (x *BatonServiceHelloRequest) SetBuildInfo(v *BatonServiceHelloRequest_BuildInfo) { + x.BuildInfo = v +} + +func (x *BatonServiceHelloRequest) SetOsInfo(v *BatonServiceHelloRequest_OSInfo) { + x.OsInfo = v +} + +func (x *BatonServiceHelloRequest) SetConnectorMetadata(v *v2.ConnectorMetadata) { + x.ConnectorMetadata = v +} + +func (x *BatonServiceHelloRequest) SetAnnotations(v []*anypb.Any) { + x.Annotations = v +} + +func (x *BatonServiceHelloRequest) HasBuildInfo() bool { + if x == nil { + return false + } + return x.BuildInfo != nil +} + +func (x *BatonServiceHelloRequest) HasOsInfo() bool { + if x == nil { + return false + } + return x.OsInfo != nil +} + +func (x *BatonServiceHelloRequest) HasConnectorMetadata() bool { + if x == nil { + return false + } + return x.ConnectorMetadata != nil +} + +func (x *BatonServiceHelloRequest) ClearBuildInfo() { + x.BuildInfo = nil +} + +func (x *BatonServiceHelloRequest) ClearOsInfo() { + x.OsInfo = nil +} + +func (x *BatonServiceHelloRequest) ClearConnectorMetadata() { + x.ConnectorMetadata = nil +} + +type BatonServiceHelloRequest_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + HostId string + TaskId string + BuildInfo *BatonServiceHelloRequest_BuildInfo + OsInfo *BatonServiceHelloRequest_OSInfo + ConnectorMetadata *v2.ConnectorMetadata + Annotations []*anypb.Any +} + +func (b0 BatonServiceHelloRequest_builder) Build() *BatonServiceHelloRequest { + m0 := &BatonServiceHelloRequest{} + b, x := &b0, m0 + _, _ = b, x + x.HostId = b.HostId + x.TaskId = b.TaskId + x.BuildInfo = b.BuildInfo + x.OsInfo = b.OsInfo + x.ConnectorMetadata = b.ConnectorMetadata + x.Annotations = b.Annotations + return m0 +} + type BatonServiceHelloResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Annotations []*anypb.Any `protobuf:"bytes,1,rep,name=annotations,proto3" json:"annotations,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache @@ -609,11 +1350,6 @@ func (x *BatonServiceHelloResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use BatonServiceHelloResponse.ProtoReflect.Descriptor instead. -func (*BatonServiceHelloResponse) Descriptor() ([]byte, []int) { - return file_c1_connectorapi_baton_v1_baton_proto_rawDescGZIP(), []int{2} -} - func (x *BatonServiceHelloResponse) GetAnnotations() []*anypb.Any { if x != nil { return x.Annotations @@ -621,8 +1357,26 @@ func (x *BatonServiceHelloResponse) GetAnnotations() []*anypb.Any { return nil } +func (x *BatonServiceHelloResponse) SetAnnotations(v []*anypb.Any) { + x.Annotations = v +} + +type BatonServiceHelloResponse_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Annotations []*anypb.Any +} + +func (b0 BatonServiceHelloResponse_builder) Build() *BatonServiceHelloResponse { + m0 := &BatonServiceHelloResponse{} + b, x := &b0, m0 + _, _ = b, x + x.Annotations = b.Annotations + return m0 +} + type BatonServiceGetTaskRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` HostId string `protobuf:"bytes,1,opt,name=host_id,json=hostId,proto3" json:"host_id,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache @@ -653,11 +1407,6 @@ func (x *BatonServiceGetTaskRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use BatonServiceGetTaskRequest.ProtoReflect.Descriptor instead. -func (*BatonServiceGetTaskRequest) Descriptor() ([]byte, []int) { - return file_c1_connectorapi_baton_v1_baton_proto_rawDescGZIP(), []int{3} -} - func (x *BatonServiceGetTaskRequest) GetHostId() string { if x != nil { return x.HostId @@ -665,8 +1414,26 @@ func (x *BatonServiceGetTaskRequest) GetHostId() string { return "" } +func (x *BatonServiceGetTaskRequest) SetHostId(v string) { + x.HostId = v +} + +type BatonServiceGetTaskRequest_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + HostId string +} + +func (b0 BatonServiceGetTaskRequest_builder) Build() *BatonServiceGetTaskRequest { + m0 := &BatonServiceGetTaskRequest{} + b, x := &b0, m0 + _, _ = b, x + x.HostId = b.HostId + return m0 +} + type BatonServiceGetTaskResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Task *Task `protobuf:"bytes,1,opt,name=task,proto3" json:"task,omitempty"` NextPoll *durationpb.Duration `protobuf:"bytes,2,opt,name=next_poll,json=nextPoll,proto3" json:"next_poll,omitempty"` NextHeartbeat *durationpb.Duration `protobuf:"bytes,3,opt,name=next_heartbeat,json=nextHeartbeat,proto3" json:"next_heartbeat,omitempty"` @@ -700,11 +1467,6 @@ func (x *BatonServiceGetTaskResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use BatonServiceGetTaskResponse.ProtoReflect.Descriptor instead. -func (*BatonServiceGetTaskResponse) Descriptor() ([]byte, []int) { - return file_c1_connectorapi_baton_v1_baton_proto_rawDescGZIP(), []int{4} -} - func (x *BatonServiceGetTaskResponse) GetTask() *Task { if x != nil { return x.Task @@ -733,8 +1495,77 @@ func (x *BatonServiceGetTaskResponse) GetAnnotations() []*anypb.Any { return nil } +func (x *BatonServiceGetTaskResponse) SetTask(v *Task) { + x.Task = v +} + +func (x *BatonServiceGetTaskResponse) SetNextPoll(v *durationpb.Duration) { + x.NextPoll = v +} + +func (x *BatonServiceGetTaskResponse) SetNextHeartbeat(v *durationpb.Duration) { + x.NextHeartbeat = v +} + +func (x *BatonServiceGetTaskResponse) SetAnnotations(v []*anypb.Any) { + x.Annotations = v +} + +func (x *BatonServiceGetTaskResponse) HasTask() bool { + if x == nil { + return false + } + return x.Task != nil +} + +func (x *BatonServiceGetTaskResponse) HasNextPoll() bool { + if x == nil { + return false + } + return x.NextPoll != nil +} + +func (x *BatonServiceGetTaskResponse) HasNextHeartbeat() bool { + if x == nil { + return false + } + return x.NextHeartbeat != nil +} + +func (x *BatonServiceGetTaskResponse) ClearTask() { + x.Task = nil +} + +func (x *BatonServiceGetTaskResponse) ClearNextPoll() { + x.NextPoll = nil +} + +func (x *BatonServiceGetTaskResponse) ClearNextHeartbeat() { + x.NextHeartbeat = nil +} + +type BatonServiceGetTaskResponse_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Task *Task + NextPoll *durationpb.Duration + NextHeartbeat *durationpb.Duration + Annotations []*anypb.Any +} + +func (b0 BatonServiceGetTaskResponse_builder) Build() *BatonServiceGetTaskResponse { + m0 := &BatonServiceGetTaskResponse{} + b, x := &b0, m0 + _, _ = b, x + x.Task = b.Task + x.NextPoll = b.NextPoll + x.NextHeartbeat = b.NextHeartbeat + x.Annotations = b.Annotations + return m0 +} + type BatonServiceHeartbeatRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` HostId string `protobuf:"bytes,1,opt,name=host_id,json=hostId,proto3" json:"host_id,omitempty"` TaskId string `protobuf:"bytes,2,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` Annotations []*anypb.Any `protobuf:"bytes,3,rep,name=annotations,proto3" json:"annotations,omitempty"` // Useful for returning status updates on the task @@ -767,11 +1598,6 @@ func (x *BatonServiceHeartbeatRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use BatonServiceHeartbeatRequest.ProtoReflect.Descriptor instead. -func (*BatonServiceHeartbeatRequest) Descriptor() ([]byte, []int) { - return file_c1_connectorapi_baton_v1_baton_proto_rawDescGZIP(), []int{5} -} - func (x *BatonServiceHeartbeatRequest) GetHostId() string { if x != nil { return x.HostId @@ -793,8 +1619,38 @@ func (x *BatonServiceHeartbeatRequest) GetAnnotations() []*anypb.Any { return nil } +func (x *BatonServiceHeartbeatRequest) SetHostId(v string) { + x.HostId = v +} + +func (x *BatonServiceHeartbeatRequest) SetTaskId(v string) { + x.TaskId = v +} + +func (x *BatonServiceHeartbeatRequest) SetAnnotations(v []*anypb.Any) { + x.Annotations = v +} + +type BatonServiceHeartbeatRequest_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + HostId string + TaskId string + Annotations []*anypb.Any +} + +func (b0 BatonServiceHeartbeatRequest_builder) Build() *BatonServiceHeartbeatRequest { + m0 := &BatonServiceHeartbeatRequest{} + b, x := &b0, m0 + _, _ = b, x + x.HostId = b.HostId + x.TaskId = b.TaskId + x.Annotations = b.Annotations + return m0 +} + type BatonServiceHeartbeatResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` NextHeartbeat *durationpb.Duration `protobuf:"bytes,1,opt,name=next_heartbeat,json=nextHeartbeat,proto3" json:"next_heartbeat,omitempty"` Cancelled bool `protobuf:"varint,2,opt,name=cancelled,proto3" json:"cancelled,omitempty"` // Backend has decided that we should stop working on this Annotations []*anypb.Any `protobuf:"bytes,3,rep,name=annotations,proto3" json:"annotations,omitempty"` @@ -827,11 +1683,6 @@ func (x *BatonServiceHeartbeatResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use BatonServiceHeartbeatResponse.ProtoReflect.Descriptor instead. -func (*BatonServiceHeartbeatResponse) Descriptor() ([]byte, []int) { - return file_c1_connectorapi_baton_v1_baton_proto_rawDescGZIP(), []int{6} -} - func (x *BatonServiceHeartbeatResponse) GetNextHeartbeat() *durationpb.Duration { if x != nil { return x.NextHeartbeat @@ -853,8 +1704,49 @@ func (x *BatonServiceHeartbeatResponse) GetAnnotations() []*anypb.Any { return nil } +func (x *BatonServiceHeartbeatResponse) SetNextHeartbeat(v *durationpb.Duration) { + x.NextHeartbeat = v +} + +func (x *BatonServiceHeartbeatResponse) SetCancelled(v bool) { + x.Cancelled = v +} + +func (x *BatonServiceHeartbeatResponse) SetAnnotations(v []*anypb.Any) { + x.Annotations = v +} + +func (x *BatonServiceHeartbeatResponse) HasNextHeartbeat() bool { + if x == nil { + return false + } + return x.NextHeartbeat != nil +} + +func (x *BatonServiceHeartbeatResponse) ClearNextHeartbeat() { + x.NextHeartbeat = nil +} + +type BatonServiceHeartbeatResponse_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + NextHeartbeat *durationpb.Duration + Cancelled bool + Annotations []*anypb.Any +} + +func (b0 BatonServiceHeartbeatResponse_builder) Build() *BatonServiceHeartbeatResponse { + m0 := &BatonServiceHeartbeatResponse{} + b, x := &b0, m0 + _, _ = b, x + x.NextHeartbeat = b.NextHeartbeat + x.Cancelled = b.Cancelled + x.Annotations = b.Annotations + return m0 +} + type BatonServiceUploadAssetRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` // Types that are valid to be assigned to Msg: // // *BatonServiceUploadAssetRequest_Metadata @@ -890,11 +1782,6 @@ func (x *BatonServiceUploadAssetRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use BatonServiceUploadAssetRequest.ProtoReflect.Descriptor instead. -func (*BatonServiceUploadAssetRequest) Descriptor() ([]byte, []int) { - return file_c1_connectorapi_baton_v1_baton_proto_rawDescGZIP(), []int{7} -} - func (x *BatonServiceUploadAssetRequest) GetMsg() isBatonServiceUploadAssetRequest_Msg { if x != nil { return x.Msg @@ -929,6 +1816,140 @@ func (x *BatonServiceUploadAssetRequest) GetEof() *BatonServiceUploadAssetReques return nil } +func (x *BatonServiceUploadAssetRequest) SetMetadata(v *BatonServiceUploadAssetRequest_UploadMetadata) { + if v == nil { + x.Msg = nil + return + } + x.Msg = &BatonServiceUploadAssetRequest_Metadata{v} +} + +func (x *BatonServiceUploadAssetRequest) SetData(v *BatonServiceUploadAssetRequest_UploadData) { + if v == nil { + x.Msg = nil + return + } + x.Msg = &BatonServiceUploadAssetRequest_Data{v} +} + +func (x *BatonServiceUploadAssetRequest) SetEof(v *BatonServiceUploadAssetRequest_UploadEOF) { + if v == nil { + x.Msg = nil + return + } + x.Msg = &BatonServiceUploadAssetRequest_Eof{v} +} + +func (x *BatonServiceUploadAssetRequest) HasMsg() bool { + if x == nil { + return false + } + return x.Msg != nil +} + +func (x *BatonServiceUploadAssetRequest) HasMetadata() bool { + if x == nil { + return false + } + _, ok := x.Msg.(*BatonServiceUploadAssetRequest_Metadata) + return ok +} + +func (x *BatonServiceUploadAssetRequest) HasData() bool { + if x == nil { + return false + } + _, ok := x.Msg.(*BatonServiceUploadAssetRequest_Data) + return ok +} + +func (x *BatonServiceUploadAssetRequest) HasEof() bool { + if x == nil { + return false + } + _, ok := x.Msg.(*BatonServiceUploadAssetRequest_Eof) + return ok +} + +func (x *BatonServiceUploadAssetRequest) ClearMsg() { + x.Msg = nil +} + +func (x *BatonServiceUploadAssetRequest) ClearMetadata() { + if _, ok := x.Msg.(*BatonServiceUploadAssetRequest_Metadata); ok { + x.Msg = nil + } +} + +func (x *BatonServiceUploadAssetRequest) ClearData() { + if _, ok := x.Msg.(*BatonServiceUploadAssetRequest_Data); ok { + x.Msg = nil + } +} + +func (x *BatonServiceUploadAssetRequest) ClearEof() { + if _, ok := x.Msg.(*BatonServiceUploadAssetRequest_Eof); ok { + x.Msg = nil + } +} + +const BatonServiceUploadAssetRequest_Msg_not_set_case case_BatonServiceUploadAssetRequest_Msg = 0 +const BatonServiceUploadAssetRequest_Metadata_case case_BatonServiceUploadAssetRequest_Msg = 100 +const BatonServiceUploadAssetRequest_Data_case case_BatonServiceUploadAssetRequest_Msg = 101 +const BatonServiceUploadAssetRequest_Eof_case case_BatonServiceUploadAssetRequest_Msg = 102 + +func (x *BatonServiceUploadAssetRequest) WhichMsg() case_BatonServiceUploadAssetRequest_Msg { + if x == nil { + return BatonServiceUploadAssetRequest_Msg_not_set_case + } + switch x.Msg.(type) { + case *BatonServiceUploadAssetRequest_Metadata: + return BatonServiceUploadAssetRequest_Metadata_case + case *BatonServiceUploadAssetRequest_Data: + return BatonServiceUploadAssetRequest_Data_case + case *BatonServiceUploadAssetRequest_Eof: + return BatonServiceUploadAssetRequest_Eof_case + default: + return BatonServiceUploadAssetRequest_Msg_not_set_case + } +} + +type BatonServiceUploadAssetRequest_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // Fields of oneof Msg: + Metadata *BatonServiceUploadAssetRequest_UploadMetadata + Data *BatonServiceUploadAssetRequest_UploadData + Eof *BatonServiceUploadAssetRequest_UploadEOF + // -- end of Msg +} + +func (b0 BatonServiceUploadAssetRequest_builder) Build() *BatonServiceUploadAssetRequest { + m0 := &BatonServiceUploadAssetRequest{} + b, x := &b0, m0 + _, _ = b, x + if b.Metadata != nil { + x.Msg = &BatonServiceUploadAssetRequest_Metadata{b.Metadata} + } + if b.Data != nil { + x.Msg = &BatonServiceUploadAssetRequest_Data{b.Data} + } + if b.Eof != nil { + x.Msg = &BatonServiceUploadAssetRequest_Eof{b.Eof} + } + return m0 +} + +type case_BatonServiceUploadAssetRequest_Msg protoreflect.FieldNumber + +func (x case_BatonServiceUploadAssetRequest_Msg) String() string { + md := file_c1_connectorapi_baton_v1_baton_proto_msgTypes[7].Descriptor() + if x == 0 { + return "not set" + } + return protoimpl.X.MessageFieldStringOf(md, protoreflect.FieldNumber(x)) +} + type isBatonServiceUploadAssetRequest_Msg interface { isBatonServiceUploadAssetRequest_Msg() } @@ -952,7 +1973,7 @@ func (*BatonServiceUploadAssetRequest_Data) isBatonServiceUploadAssetRequest_Msg func (*BatonServiceUploadAssetRequest_Eof) isBatonServiceUploadAssetRequest_Msg() {} type BatonServiceUploadAssetResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Annotations []*anypb.Any `protobuf:"bytes,1,rep,name=annotations,proto3" json:"annotations,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache @@ -983,11 +2004,6 @@ func (x *BatonServiceUploadAssetResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use BatonServiceUploadAssetResponse.ProtoReflect.Descriptor instead. -func (*BatonServiceUploadAssetResponse) Descriptor() ([]byte, []int) { - return file_c1_connectorapi_baton_v1_baton_proto_rawDescGZIP(), []int{8} -} - func (x *BatonServiceUploadAssetResponse) GetAnnotations() []*anypb.Any { if x != nil { return x.Annotations @@ -995,9 +2011,27 @@ func (x *BatonServiceUploadAssetResponse) GetAnnotations() []*anypb.Any { return nil } +func (x *BatonServiceUploadAssetResponse) SetAnnotations(v []*anypb.Any) { + x.Annotations = v +} + +type BatonServiceUploadAssetResponse_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Annotations []*anypb.Any +} + +func (b0 BatonServiceUploadAssetResponse_builder) Build() *BatonServiceUploadAssetResponse { + m0 := &BatonServiceUploadAssetResponse{} + b, x := &b0, m0 + _, _ = b, x + x.Annotations = b.Annotations + return m0 +} + // Connector submits a task has been finished. It should always be removed from the queue, and if it isn't a fatal error, we should re-add it to the queue to try again. type BatonServiceFinishTaskRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` HostId string `protobuf:"bytes,1,opt,name=host_id,json=hostId,proto3" json:"host_id,omitempty"` TaskId string `protobuf:"bytes,2,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` Status *status.Status `protobuf:"bytes,3,opt,name=status,proto3" json:"status,omitempty"` @@ -1035,11 +2069,6 @@ func (x *BatonServiceFinishTaskRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use BatonServiceFinishTaskRequest.ProtoReflect.Descriptor instead. -func (*BatonServiceFinishTaskRequest) Descriptor() ([]byte, []int) { - return file_c1_connectorapi_baton_v1_baton_proto_rawDescGZIP(), []int{9} -} - func (x *BatonServiceFinishTaskRequest) GetHostId() string { if x != nil { return x.HostId @@ -1058,32 +2087,166 @@ func (x *BatonServiceFinishTaskRequest) GetStatus() *status.Status { if x != nil { return x.Status } - return nil + return nil +} + +func (x *BatonServiceFinishTaskRequest) GetFinalState() isBatonServiceFinishTaskRequest_FinalState { + if x != nil { + return x.FinalState + } + return nil +} + +func (x *BatonServiceFinishTaskRequest) GetError() *BatonServiceFinishTaskRequest_Error { + if x != nil { + if x, ok := x.FinalState.(*BatonServiceFinishTaskRequest_Error_); ok { + return x.Error + } + } + return nil +} + +func (x *BatonServiceFinishTaskRequest) GetSuccess() *BatonServiceFinishTaskRequest_Success { + if x != nil { + if x, ok := x.FinalState.(*BatonServiceFinishTaskRequest_Success_); ok { + return x.Success + } + } + return nil +} + +func (x *BatonServiceFinishTaskRequest) SetHostId(v string) { + x.HostId = v +} + +func (x *BatonServiceFinishTaskRequest) SetTaskId(v string) { + x.TaskId = v +} + +func (x *BatonServiceFinishTaskRequest) SetStatus(v *status.Status) { + x.Status = v +} + +func (x *BatonServiceFinishTaskRequest) SetError(v *BatonServiceFinishTaskRequest_Error) { + if v == nil { + x.FinalState = nil + return + } + x.FinalState = &BatonServiceFinishTaskRequest_Error_{v} +} + +func (x *BatonServiceFinishTaskRequest) SetSuccess(v *BatonServiceFinishTaskRequest_Success) { + if v == nil { + x.FinalState = nil + return + } + x.FinalState = &BatonServiceFinishTaskRequest_Success_{v} +} + +func (x *BatonServiceFinishTaskRequest) HasStatus() bool { + if x == nil { + return false + } + return x.Status != nil +} + +func (x *BatonServiceFinishTaskRequest) HasFinalState() bool { + if x == nil { + return false + } + return x.FinalState != nil +} + +func (x *BatonServiceFinishTaskRequest) HasError() bool { + if x == nil { + return false + } + _, ok := x.FinalState.(*BatonServiceFinishTaskRequest_Error_) + return ok +} + +func (x *BatonServiceFinishTaskRequest) HasSuccess() bool { + if x == nil { + return false + } + _, ok := x.FinalState.(*BatonServiceFinishTaskRequest_Success_) + return ok +} + +func (x *BatonServiceFinishTaskRequest) ClearStatus() { + x.Status = nil +} + +func (x *BatonServiceFinishTaskRequest) ClearFinalState() { + x.FinalState = nil +} + +func (x *BatonServiceFinishTaskRequest) ClearError() { + if _, ok := x.FinalState.(*BatonServiceFinishTaskRequest_Error_); ok { + x.FinalState = nil + } +} + +func (x *BatonServiceFinishTaskRequest) ClearSuccess() { + if _, ok := x.FinalState.(*BatonServiceFinishTaskRequest_Success_); ok { + x.FinalState = nil + } +} + +const BatonServiceFinishTaskRequest_FinalState_not_set_case case_BatonServiceFinishTaskRequest_FinalState = 0 +const BatonServiceFinishTaskRequest_Error_case case_BatonServiceFinishTaskRequest_FinalState = 100 +const BatonServiceFinishTaskRequest_Success_case case_BatonServiceFinishTaskRequest_FinalState = 101 + +func (x *BatonServiceFinishTaskRequest) WhichFinalState() case_BatonServiceFinishTaskRequest_FinalState { + if x == nil { + return BatonServiceFinishTaskRequest_FinalState_not_set_case + } + switch x.FinalState.(type) { + case *BatonServiceFinishTaskRequest_Error_: + return BatonServiceFinishTaskRequest_Error_case + case *BatonServiceFinishTaskRequest_Success_: + return BatonServiceFinishTaskRequest_Success_case + default: + return BatonServiceFinishTaskRequest_FinalState_not_set_case + } } -func (x *BatonServiceFinishTaskRequest) GetFinalState() isBatonServiceFinishTaskRequest_FinalState { - if x != nil { - return x.FinalState - } - return nil +type BatonServiceFinishTaskRequest_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + HostId string + TaskId string + Status *status.Status + // Fields of oneof FinalState: + Error *BatonServiceFinishTaskRequest_Error + Success *BatonServiceFinishTaskRequest_Success + // -- end of FinalState } -func (x *BatonServiceFinishTaskRequest) GetError() *BatonServiceFinishTaskRequest_Error { - if x != nil { - if x, ok := x.FinalState.(*BatonServiceFinishTaskRequest_Error_); ok { - return x.Error - } +func (b0 BatonServiceFinishTaskRequest_builder) Build() *BatonServiceFinishTaskRequest { + m0 := &BatonServiceFinishTaskRequest{} + b, x := &b0, m0 + _, _ = b, x + x.HostId = b.HostId + x.TaskId = b.TaskId + x.Status = b.Status + if b.Error != nil { + x.FinalState = &BatonServiceFinishTaskRequest_Error_{b.Error} } - return nil + if b.Success != nil { + x.FinalState = &BatonServiceFinishTaskRequest_Success_{b.Success} + } + return m0 } -func (x *BatonServiceFinishTaskRequest) GetSuccess() *BatonServiceFinishTaskRequest_Success { - if x != nil { - if x, ok := x.FinalState.(*BatonServiceFinishTaskRequest_Success_); ok { - return x.Success - } +type case_BatonServiceFinishTaskRequest_FinalState protoreflect.FieldNumber + +func (x case_BatonServiceFinishTaskRequest_FinalState) String() string { + md := file_c1_connectorapi_baton_v1_baton_proto_msgTypes[9].Descriptor() + if x == 0 { + return "not set" } - return nil + return protoimpl.X.MessageFieldStringOf(md, protoreflect.FieldNumber(x)) } type isBatonServiceFinishTaskRequest_FinalState interface { @@ -1103,7 +2266,7 @@ func (*BatonServiceFinishTaskRequest_Error_) isBatonServiceFinishTaskRequest_Fin func (*BatonServiceFinishTaskRequest_Success_) isBatonServiceFinishTaskRequest_FinalState() {} type BatonServiceFinishTaskResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Annotations []*anypb.Any `protobuf:"bytes,1,rep,name=annotations,proto3" json:"annotations,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache @@ -1134,11 +2297,6 @@ func (x *BatonServiceFinishTaskResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use BatonServiceFinishTaskResponse.ProtoReflect.Descriptor instead. -func (*BatonServiceFinishTaskResponse) Descriptor() ([]byte, []int) { - return file_c1_connectorapi_baton_v1_baton_proto_rawDescGZIP(), []int{10} -} - func (x *BatonServiceFinishTaskResponse) GetAnnotations() []*anypb.Any { if x != nil { return x.Annotations @@ -1146,8 +2304,26 @@ func (x *BatonServiceFinishTaskResponse) GetAnnotations() []*anypb.Any { return nil } +func (x *BatonServiceFinishTaskResponse) SetAnnotations(v []*anypb.Any) { + x.Annotations = v +} + +type BatonServiceFinishTaskResponse_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Annotations []*anypb.Any +} + +func (b0 BatonServiceFinishTaskResponse_builder) Build() *BatonServiceFinishTaskResponse { + m0 := &BatonServiceFinishTaskResponse{} + b, x := &b0, m0 + _, _ = b, x + x.Annotations = b.Annotations + return m0 +} + type StartDebuggingRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } @@ -1177,13 +2353,20 @@ func (x *StartDebuggingRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use StartDebuggingRequest.ProtoReflect.Descriptor instead. -func (*StartDebuggingRequest) Descriptor() ([]byte, []int) { - return file_c1_connectorapi_baton_v1_baton_proto_rawDescGZIP(), []int{11} +type StartDebuggingRequest_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + +} + +func (b0 StartDebuggingRequest_builder) Build() *StartDebuggingRequest { + m0 := &StartDebuggingRequest{} + b, x := &b0, m0 + _, _ = b, x + return m0 } type StartDebuggingResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Status bool `protobuf:"varint,1,opt,name=status,proto3" json:"status,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache @@ -1214,11 +2397,6 @@ func (x *StartDebuggingResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use StartDebuggingResponse.ProtoReflect.Descriptor instead. -func (*StartDebuggingResponse) Descriptor() ([]byte, []int) { - return file_c1_connectorapi_baton_v1_baton_proto_rawDescGZIP(), []int{12} -} - func (x *StartDebuggingResponse) GetStatus() bool { if x != nil { return x.Status @@ -1226,8 +2404,26 @@ func (x *StartDebuggingResponse) GetStatus() bool { return false } +func (x *StartDebuggingResponse) SetStatus(v bool) { + x.Status = v +} + +type StartDebuggingResponse_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Status bool +} + +func (b0 StartDebuggingResponse_builder) Build() *StartDebuggingResponse { + m0 := &StartDebuggingResponse{} + b, x := &b0, m0 + _, _ = b, x + x.Status = b.Status + return m0 +} + type Task_NoneTask struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Annotations []*anypb.Any `protobuf:"bytes,1,rep,name=annotations,proto3" json:"annotations,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache @@ -1258,11 +2454,6 @@ func (x *Task_NoneTask) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use Task_NoneTask.ProtoReflect.Descriptor instead. -func (*Task_NoneTask) Descriptor() ([]byte, []int) { - return file_c1_connectorapi_baton_v1_baton_proto_rawDescGZIP(), []int{0, 0} -} - func (x *Task_NoneTask) GetAnnotations() []*anypb.Any { if x != nil { return x.Annotations @@ -1270,8 +2461,26 @@ func (x *Task_NoneTask) GetAnnotations() []*anypb.Any { return nil } +func (x *Task_NoneTask) SetAnnotations(v []*anypb.Any) { + x.Annotations = v +} + +type Task_NoneTask_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Annotations []*anypb.Any +} + +func (b0 Task_NoneTask_builder) Build() *Task_NoneTask { + m0 := &Task_NoneTask{} + b, x := &b0, m0 + _, _ = b, x + x.Annotations = b.Annotations + return m0 +} + type Task_HelloTask struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Annotations []*anypb.Any `protobuf:"bytes,1,rep,name=annotations,proto3" json:"annotations,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache @@ -1302,11 +2511,6 @@ func (x *Task_HelloTask) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use Task_HelloTask.ProtoReflect.Descriptor instead. -func (*Task_HelloTask) Descriptor() ([]byte, []int) { - return file_c1_connectorapi_baton_v1_baton_proto_rawDescGZIP(), []int{0, 1} -} - func (x *Task_HelloTask) GetAnnotations() []*anypb.Any { if x != nil { return x.Annotations @@ -1314,11 +2518,30 @@ func (x *Task_HelloTask) GetAnnotations() []*anypb.Any { return nil } +func (x *Task_HelloTask) SetAnnotations(v []*anypb.Any) { + x.Annotations = v +} + +type Task_HelloTask_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Annotations []*anypb.Any +} + +func (b0 Task_HelloTask_builder) Build() *Task_HelloTask { + m0 := &Task_HelloTask{} + b, x := &b0, m0 + _, _ = b, x + x.Annotations = b.Annotations + return m0 +} + type Task_SyncFullTask struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Annotations []*anypb.Any `protobuf:"bytes,1,rep,name=annotations,proto3" json:"annotations,omitempty"` SkipExpandGrants bool `protobuf:"varint,2,opt,name=skip_expand_grants,json=skipExpandGrants,proto3" json:"skip_expand_grants,omitempty"` SkipEntitlementsAndGrants bool `protobuf:"varint,3,opt,name=skip_entitlements_and_grants,json=skipEntitlementsAndGrants,proto3" json:"skip_entitlements_and_grants,omitempty"` + TargetedSyncResources []*v2.Resource `protobuf:"bytes,4,rep,name=targeted_sync_resources,json=targetedSyncResources,proto3" json:"targeted_sync_resources,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } @@ -1348,11 +2571,6 @@ func (x *Task_SyncFullTask) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use Task_SyncFullTask.ProtoReflect.Descriptor instead. -func (*Task_SyncFullTask) Descriptor() ([]byte, []int) { - return file_c1_connectorapi_baton_v1_baton_proto_rawDescGZIP(), []int{0, 2} -} - func (x *Task_SyncFullTask) GetAnnotations() []*anypb.Any { if x != nil { return x.Annotations @@ -1374,8 +2592,51 @@ func (x *Task_SyncFullTask) GetSkipEntitlementsAndGrants() bool { return false } +func (x *Task_SyncFullTask) GetTargetedSyncResources() []*v2.Resource { + if x != nil { + return x.TargetedSyncResources + } + return nil +} + +func (x *Task_SyncFullTask) SetAnnotations(v []*anypb.Any) { + x.Annotations = v +} + +func (x *Task_SyncFullTask) SetSkipExpandGrants(v bool) { + x.SkipExpandGrants = v +} + +func (x *Task_SyncFullTask) SetSkipEntitlementsAndGrants(v bool) { + x.SkipEntitlementsAndGrants = v +} + +func (x *Task_SyncFullTask) SetTargetedSyncResources(v []*v2.Resource) { + x.TargetedSyncResources = v +} + +type Task_SyncFullTask_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Annotations []*anypb.Any + SkipExpandGrants bool + SkipEntitlementsAndGrants bool + TargetedSyncResources []*v2.Resource +} + +func (b0 Task_SyncFullTask_builder) Build() *Task_SyncFullTask { + m0 := &Task_SyncFullTask{} + b, x := &b0, m0 + _, _ = b, x + x.Annotations = b.Annotations + x.SkipExpandGrants = b.SkipExpandGrants + x.SkipEntitlementsAndGrants = b.SkipEntitlementsAndGrants + x.TargetedSyncResources = b.TargetedSyncResources + return m0 +} + type Task_EventFeedTask struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Annotations []*anypb.Any `protobuf:"bytes,1,rep,name=annotations,proto3" json:"annotations,omitempty"` StartAt *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=start_at,json=startAt,proto3" json:"start_at,omitempty"` unknownFields protoimpl.UnknownFields @@ -1407,11 +2668,6 @@ func (x *Task_EventFeedTask) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use Task_EventFeedTask.ProtoReflect.Descriptor instead. -func (*Task_EventFeedTask) Descriptor() ([]byte, []int) { - return file_c1_connectorapi_baton_v1_baton_proto_rawDescGZIP(), []int{0, 3} -} - func (x *Task_EventFeedTask) GetAnnotations() []*anypb.Any { if x != nil { return x.Annotations @@ -1426,8 +2682,43 @@ func (x *Task_EventFeedTask) GetStartAt() *timestamppb.Timestamp { return nil } +func (x *Task_EventFeedTask) SetAnnotations(v []*anypb.Any) { + x.Annotations = v +} + +func (x *Task_EventFeedTask) SetStartAt(v *timestamppb.Timestamp) { + x.StartAt = v +} + +func (x *Task_EventFeedTask) HasStartAt() bool { + if x == nil { + return false + } + return x.StartAt != nil +} + +func (x *Task_EventFeedTask) ClearStartAt() { + x.StartAt = nil +} + +type Task_EventFeedTask_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Annotations []*anypb.Any + StartAt *timestamppb.Timestamp +} + +func (b0 Task_EventFeedTask_builder) Build() *Task_EventFeedTask { + m0 := &Task_EventFeedTask{} + b, x := &b0, m0 + _, _ = b, x + x.Annotations = b.Annotations + x.StartAt = b.StartAt + return m0 +} + type Task_GrantTask struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Entitlement *v2.Entitlement `protobuf:"bytes,1,opt,name=entitlement,proto3" json:"entitlement,omitempty"` Principal *v2.Resource `protobuf:"bytes,2,opt,name=principal,proto3" json:"principal,omitempty"` Annotations []*anypb.Any `protobuf:"bytes,3,rep,name=annotations,proto3" json:"annotations,omitempty"` @@ -1461,11 +2752,6 @@ func (x *Task_GrantTask) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use Task_GrantTask.ProtoReflect.Descriptor instead. -func (*Task_GrantTask) Descriptor() ([]byte, []int) { - return file_c1_connectorapi_baton_v1_baton_proto_rawDescGZIP(), []int{0, 4} -} - func (x *Task_GrantTask) GetEntitlement() *v2.Entitlement { if x != nil { return x.Entitlement @@ -1494,8 +2780,77 @@ func (x *Task_GrantTask) GetDuration() *durationpb.Duration { return nil } +func (x *Task_GrantTask) SetEntitlement(v *v2.Entitlement) { + x.Entitlement = v +} + +func (x *Task_GrantTask) SetPrincipal(v *v2.Resource) { + x.Principal = v +} + +func (x *Task_GrantTask) SetAnnotations(v []*anypb.Any) { + x.Annotations = v +} + +func (x *Task_GrantTask) SetDuration(v *durationpb.Duration) { + x.Duration = v +} + +func (x *Task_GrantTask) HasEntitlement() bool { + if x == nil { + return false + } + return x.Entitlement != nil +} + +func (x *Task_GrantTask) HasPrincipal() bool { + if x == nil { + return false + } + return x.Principal != nil +} + +func (x *Task_GrantTask) HasDuration() bool { + if x == nil { + return false + } + return x.Duration != nil +} + +func (x *Task_GrantTask) ClearEntitlement() { + x.Entitlement = nil +} + +func (x *Task_GrantTask) ClearPrincipal() { + x.Principal = nil +} + +func (x *Task_GrantTask) ClearDuration() { + x.Duration = nil +} + +type Task_GrantTask_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Entitlement *v2.Entitlement + Principal *v2.Resource + Annotations []*anypb.Any + Duration *durationpb.Duration +} + +func (b0 Task_GrantTask_builder) Build() *Task_GrantTask { + m0 := &Task_GrantTask{} + b, x := &b0, m0 + _, _ = b, x + x.Entitlement = b.Entitlement + x.Principal = b.Principal + x.Annotations = b.Annotations + x.Duration = b.Duration + return m0 +} + type Task_RevokeTask struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Grant *v2.Grant `protobuf:"bytes,1,opt,name=grant,proto3" json:"grant,omitempty"` Annotations []*anypb.Any `protobuf:"bytes,2,rep,name=annotations,proto3" json:"annotations,omitempty"` unknownFields protoimpl.UnknownFields @@ -1527,11 +2882,6 @@ func (x *Task_RevokeTask) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use Task_RevokeTask.ProtoReflect.Descriptor instead. -func (*Task_RevokeTask) Descriptor() ([]byte, []int) { - return file_c1_connectorapi_baton_v1_baton_proto_rawDescGZIP(), []int{0, 5} -} - func (x *Task_RevokeTask) GetGrant() *v2.Grant { if x != nil { return x.Grant @@ -1546,8 +2896,43 @@ func (x *Task_RevokeTask) GetAnnotations() []*anypb.Any { return nil } +func (x *Task_RevokeTask) SetGrant(v *v2.Grant) { + x.Grant = v +} + +func (x *Task_RevokeTask) SetAnnotations(v []*anypb.Any) { + x.Annotations = v +} + +func (x *Task_RevokeTask) HasGrant() bool { + if x == nil { + return false + } + return x.Grant != nil +} + +func (x *Task_RevokeTask) ClearGrant() { + x.Grant = nil +} + +type Task_RevokeTask_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Grant *v2.Grant + Annotations []*anypb.Any +} + +func (b0 Task_RevokeTask_builder) Build() *Task_RevokeTask { + m0 := &Task_RevokeTask{} + b, x := &b0, m0 + _, _ = b, x + x.Grant = b.Grant + x.Annotations = b.Annotations + return m0 +} + type Task_CreateAccountTask struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` AccountInfo *v2.AccountInfo `protobuf:"bytes,1,opt,name=account_info,json=accountInfo,proto3" json:"account_info,omitempty"` CredentialOptions *v2.CredentialOptions `protobuf:"bytes,2,opt,name=credential_options,json=credentialOptions,proto3" json:"credential_options,omitempty"` EncryptionConfigs []*v2.EncryptionConfig `protobuf:"bytes,3,rep,name=encryption_configs,json=encryptionConfigs,proto3" json:"encryption_configs,omitempty"` @@ -1580,11 +2965,6 @@ func (x *Task_CreateAccountTask) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use Task_CreateAccountTask.ProtoReflect.Descriptor instead. -func (*Task_CreateAccountTask) Descriptor() ([]byte, []int) { - return file_c1_connectorapi_baton_v1_baton_proto_rawDescGZIP(), []int{0, 6} -} - func (x *Task_CreateAccountTask) GetAccountInfo() *v2.AccountInfo { if x != nil { return x.AccountInfo @@ -1606,8 +2986,60 @@ func (x *Task_CreateAccountTask) GetEncryptionConfigs() []*v2.EncryptionConfig { return nil } +func (x *Task_CreateAccountTask) SetAccountInfo(v *v2.AccountInfo) { + x.AccountInfo = v +} + +func (x *Task_CreateAccountTask) SetCredentialOptions(v *v2.CredentialOptions) { + x.CredentialOptions = v +} + +func (x *Task_CreateAccountTask) SetEncryptionConfigs(v []*v2.EncryptionConfig) { + x.EncryptionConfigs = v +} + +func (x *Task_CreateAccountTask) HasAccountInfo() bool { + if x == nil { + return false + } + return x.AccountInfo != nil +} + +func (x *Task_CreateAccountTask) HasCredentialOptions() bool { + if x == nil { + return false + } + return x.CredentialOptions != nil +} + +func (x *Task_CreateAccountTask) ClearAccountInfo() { + x.AccountInfo = nil +} + +func (x *Task_CreateAccountTask) ClearCredentialOptions() { + x.CredentialOptions = nil +} + +type Task_CreateAccountTask_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + AccountInfo *v2.AccountInfo + CredentialOptions *v2.CredentialOptions + EncryptionConfigs []*v2.EncryptionConfig +} + +func (b0 Task_CreateAccountTask_builder) Build() *Task_CreateAccountTask { + m0 := &Task_CreateAccountTask{} + b, x := &b0, m0 + _, _ = b, x + x.AccountInfo = b.AccountInfo + x.CredentialOptions = b.CredentialOptions + x.EncryptionConfigs = b.EncryptionConfigs + return m0 +} + type Task_CreateResourceTask struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Resource *v2.Resource `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache @@ -1638,11 +3070,6 @@ func (x *Task_CreateResourceTask) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use Task_CreateResourceTask.ProtoReflect.Descriptor instead. -func (*Task_CreateResourceTask) Descriptor() ([]byte, []int) { - return file_c1_connectorapi_baton_v1_baton_proto_rawDescGZIP(), []int{0, 7} -} - func (x *Task_CreateResourceTask) GetResource() *v2.Resource { if x != nil { return x.Resource @@ -1650,8 +3077,37 @@ func (x *Task_CreateResourceTask) GetResource() *v2.Resource { return nil } +func (x *Task_CreateResourceTask) SetResource(v *v2.Resource) { + x.Resource = v +} + +func (x *Task_CreateResourceTask) HasResource() bool { + if x == nil { + return false + } + return x.Resource != nil +} + +func (x *Task_CreateResourceTask) ClearResource() { + x.Resource = nil +} + +type Task_CreateResourceTask_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Resource *v2.Resource +} + +func (b0 Task_CreateResourceTask_builder) Build() *Task_CreateResourceTask { + m0 := &Task_CreateResourceTask{} + b, x := &b0, m0 + _, _ = b, x + x.Resource = b.Resource + return m0 +} + type Task_DeleteResourceTask struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` ResourceId *v2.ResourceId `protobuf:"bytes,1,opt,name=resource_id,json=resourceId,proto3" json:"resource_id,omitempty"` ParentResourceId *v2.ResourceId `protobuf:"bytes,2,opt,name=parent_resource_id,json=parentResourceId,proto3" json:"parent_resource_id,omitempty"` unknownFields protoimpl.UnknownFields @@ -1683,11 +3139,6 @@ func (x *Task_DeleteResourceTask) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use Task_DeleteResourceTask.ProtoReflect.Descriptor instead. -func (*Task_DeleteResourceTask) Descriptor() ([]byte, []int) { - return file_c1_connectorapi_baton_v1_baton_proto_rawDescGZIP(), []int{0, 8} -} - func (x *Task_DeleteResourceTask) GetResourceId() *v2.ResourceId { if x != nil { return x.ResourceId @@ -1702,8 +3153,54 @@ func (x *Task_DeleteResourceTask) GetParentResourceId() *v2.ResourceId { return nil } +func (x *Task_DeleteResourceTask) SetResourceId(v *v2.ResourceId) { + x.ResourceId = v +} + +func (x *Task_DeleteResourceTask) SetParentResourceId(v *v2.ResourceId) { + x.ParentResourceId = v +} + +func (x *Task_DeleteResourceTask) HasResourceId() bool { + if x == nil { + return false + } + return x.ResourceId != nil +} + +func (x *Task_DeleteResourceTask) HasParentResourceId() bool { + if x == nil { + return false + } + return x.ParentResourceId != nil +} + +func (x *Task_DeleteResourceTask) ClearResourceId() { + x.ResourceId = nil +} + +func (x *Task_DeleteResourceTask) ClearParentResourceId() { + x.ParentResourceId = nil +} + +type Task_DeleteResourceTask_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + ResourceId *v2.ResourceId + ParentResourceId *v2.ResourceId +} + +func (b0 Task_DeleteResourceTask_builder) Build() *Task_DeleteResourceTask { + m0 := &Task_DeleteResourceTask{} + b, x := &b0, m0 + _, _ = b, x + x.ResourceId = b.ResourceId + x.ParentResourceId = b.ParentResourceId + return m0 +} + type Task_RotateCredentialsTask struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` ResourceId *v2.ResourceId `protobuf:"bytes,1,opt,name=resource_id,json=resourceId,proto3" json:"resource_id,omitempty"` CredentialOptions *v2.CredentialOptions `protobuf:"bytes,2,opt,name=credential_options,json=credentialOptions,proto3" json:"credential_options,omitempty"` EncryptionConfigs []*v2.EncryptionConfig `protobuf:"bytes,3,rep,name=encryption_configs,json=encryptionConfigs,proto3" json:"encryption_configs,omitempty"` @@ -1736,11 +3233,6 @@ func (x *Task_RotateCredentialsTask) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use Task_RotateCredentialsTask.ProtoReflect.Descriptor instead. -func (*Task_RotateCredentialsTask) Descriptor() ([]byte, []int) { - return file_c1_connectorapi_baton_v1_baton_proto_rawDescGZIP(), []int{0, 9} -} - func (x *Task_RotateCredentialsTask) GetResourceId() *v2.ResourceId { if x != nil { return x.ResourceId @@ -1762,8 +3254,60 @@ func (x *Task_RotateCredentialsTask) GetEncryptionConfigs() []*v2.EncryptionConf return nil } +func (x *Task_RotateCredentialsTask) SetResourceId(v *v2.ResourceId) { + x.ResourceId = v +} + +func (x *Task_RotateCredentialsTask) SetCredentialOptions(v *v2.CredentialOptions) { + x.CredentialOptions = v +} + +func (x *Task_RotateCredentialsTask) SetEncryptionConfigs(v []*v2.EncryptionConfig) { + x.EncryptionConfigs = v +} + +func (x *Task_RotateCredentialsTask) HasResourceId() bool { + if x == nil { + return false + } + return x.ResourceId != nil +} + +func (x *Task_RotateCredentialsTask) HasCredentialOptions() bool { + if x == nil { + return false + } + return x.CredentialOptions != nil +} + +func (x *Task_RotateCredentialsTask) ClearResourceId() { + x.ResourceId = nil +} + +func (x *Task_RotateCredentialsTask) ClearCredentialOptions() { + x.CredentialOptions = nil +} + +type Task_RotateCredentialsTask_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + ResourceId *v2.ResourceId + CredentialOptions *v2.CredentialOptions + EncryptionConfigs []*v2.EncryptionConfig +} + +func (b0 Task_RotateCredentialsTask_builder) Build() *Task_RotateCredentialsTask { + m0 := &Task_RotateCredentialsTask{} + b, x := &b0, m0 + _, _ = b, x + x.ResourceId = b.ResourceId + x.CredentialOptions = b.CredentialOptions + x.EncryptionConfigs = b.EncryptionConfigs + return m0 +} + type Task_CreateTicketTask struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` TicketRequest *v2.TicketRequest `protobuf:"bytes,1,opt,name=ticket_request,json=ticketRequest,proto3" json:"ticket_request,omitempty"` TicketSchema *v2.TicketSchema `protobuf:"bytes,2,opt,name=ticket_schema,json=ticketSchema,proto3" json:"ticket_schema,omitempty"` Annotations []*anypb.Any `protobuf:"bytes,3,rep,name=annotations,proto3" json:"annotations,omitempty"` @@ -1796,11 +3340,6 @@ func (x *Task_CreateTicketTask) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use Task_CreateTicketTask.ProtoReflect.Descriptor instead. -func (*Task_CreateTicketTask) Descriptor() ([]byte, []int) { - return file_c1_connectorapi_baton_v1_baton_proto_rawDescGZIP(), []int{0, 10} -} - func (x *Task_CreateTicketTask) GetTicketRequest() *v2.TicketRequest { if x != nil { return x.TicketRequest @@ -1822,8 +3361,60 @@ func (x *Task_CreateTicketTask) GetAnnotations() []*anypb.Any { return nil } +func (x *Task_CreateTicketTask) SetTicketRequest(v *v2.TicketRequest) { + x.TicketRequest = v +} + +func (x *Task_CreateTicketTask) SetTicketSchema(v *v2.TicketSchema) { + x.TicketSchema = v +} + +func (x *Task_CreateTicketTask) SetAnnotations(v []*anypb.Any) { + x.Annotations = v +} + +func (x *Task_CreateTicketTask) HasTicketRequest() bool { + if x == nil { + return false + } + return x.TicketRequest != nil +} + +func (x *Task_CreateTicketTask) HasTicketSchema() bool { + if x == nil { + return false + } + return x.TicketSchema != nil +} + +func (x *Task_CreateTicketTask) ClearTicketRequest() { + x.TicketRequest = nil +} + +func (x *Task_CreateTicketTask) ClearTicketSchema() { + x.TicketSchema = nil +} + +type Task_CreateTicketTask_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + TicketRequest *v2.TicketRequest + TicketSchema *v2.TicketSchema + Annotations []*anypb.Any +} + +func (b0 Task_CreateTicketTask_builder) Build() *Task_CreateTicketTask { + m0 := &Task_CreateTicketTask{} + b, x := &b0, m0 + _, _ = b, x + x.TicketRequest = b.TicketRequest + x.TicketSchema = b.TicketSchema + x.Annotations = b.Annotations + return m0 +} + type Task_BulkCreateTicketsTask struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` TicketRequests []*Task_CreateTicketTask `protobuf:"bytes,1,rep,name=ticket_requests,json=ticketRequests,proto3" json:"ticket_requests,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache @@ -1854,20 +3445,33 @@ func (x *Task_BulkCreateTicketsTask) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use Task_BulkCreateTicketsTask.ProtoReflect.Descriptor instead. -func (*Task_BulkCreateTicketsTask) Descriptor() ([]byte, []int) { - return file_c1_connectorapi_baton_v1_baton_proto_rawDescGZIP(), []int{0, 11} +func (x *Task_BulkCreateTicketsTask) GetTicketRequests() []*Task_CreateTicketTask { + if x != nil { + return x.TicketRequests + } + return nil +} + +func (x *Task_BulkCreateTicketsTask) SetTicketRequests(v []*Task_CreateTicketTask) { + x.TicketRequests = v +} + +type Task_BulkCreateTicketsTask_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + TicketRequests []*Task_CreateTicketTask } -func (x *Task_BulkCreateTicketsTask) GetTicketRequests() []*Task_CreateTicketTask { - if x != nil { - return x.TicketRequests - } - return nil +func (b0 Task_BulkCreateTicketsTask_builder) Build() *Task_BulkCreateTicketsTask { + m0 := &Task_BulkCreateTicketsTask{} + b, x := &b0, m0 + _, _ = b, x + x.TicketRequests = b.TicketRequests + return m0 } type Task_BulkGetTicketsTask struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` TicketRequests []*Task_GetTicketTask `protobuf:"bytes,1,rep,name=ticket_requests,json=ticketRequests,proto3" json:"ticket_requests,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache @@ -1898,11 +3502,6 @@ func (x *Task_BulkGetTicketsTask) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use Task_BulkGetTicketsTask.ProtoReflect.Descriptor instead. -func (*Task_BulkGetTicketsTask) Descriptor() ([]byte, []int) { - return file_c1_connectorapi_baton_v1_baton_proto_rawDescGZIP(), []int{0, 12} -} - func (x *Task_BulkGetTicketsTask) GetTicketRequests() []*Task_GetTicketTask { if x != nil { return x.TicketRequests @@ -1910,8 +3509,26 @@ func (x *Task_BulkGetTicketsTask) GetTicketRequests() []*Task_GetTicketTask { return nil } +func (x *Task_BulkGetTicketsTask) SetTicketRequests(v []*Task_GetTicketTask) { + x.TicketRequests = v +} + +type Task_BulkGetTicketsTask_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + TicketRequests []*Task_GetTicketTask +} + +func (b0 Task_BulkGetTicketsTask_builder) Build() *Task_BulkGetTicketsTask { + m0 := &Task_BulkGetTicketsTask{} + b, x := &b0, m0 + _, _ = b, x + x.TicketRequests = b.TicketRequests + return m0 +} + type Task_ListTicketSchemasTask struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Annotations []*anypb.Any `protobuf:"bytes,1,rep,name=annotations,proto3" json:"annotations,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache @@ -1942,11 +3559,6 @@ func (x *Task_ListTicketSchemasTask) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use Task_ListTicketSchemasTask.ProtoReflect.Descriptor instead. -func (*Task_ListTicketSchemasTask) Descriptor() ([]byte, []int) { - return file_c1_connectorapi_baton_v1_baton_proto_rawDescGZIP(), []int{0, 13} -} - func (x *Task_ListTicketSchemasTask) GetAnnotations() []*anypb.Any { if x != nil { return x.Annotations @@ -1954,8 +3566,26 @@ func (x *Task_ListTicketSchemasTask) GetAnnotations() []*anypb.Any { return nil } +func (x *Task_ListTicketSchemasTask) SetAnnotations(v []*anypb.Any) { + x.Annotations = v +} + +type Task_ListTicketSchemasTask_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Annotations []*anypb.Any +} + +func (b0 Task_ListTicketSchemasTask_builder) Build() *Task_ListTicketSchemasTask { + m0 := &Task_ListTicketSchemasTask{} + b, x := &b0, m0 + _, _ = b, x + x.Annotations = b.Annotations + return m0 +} + type Task_GetTicketTask struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` TicketId string `protobuf:"bytes,1,opt,name=ticket_id,json=ticketId,proto3" json:"ticket_id,omitempty"` Annotations []*anypb.Any `protobuf:"bytes,2,rep,name=annotations,proto3" json:"annotations,omitempty"` unknownFields protoimpl.UnknownFields @@ -1987,11 +3617,6 @@ func (x *Task_GetTicketTask) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use Task_GetTicketTask.ProtoReflect.Descriptor instead. -func (*Task_GetTicketTask) Descriptor() ([]byte, []int) { - return file_c1_connectorapi_baton_v1_baton_proto_rawDescGZIP(), []int{0, 14} -} - func (x *Task_GetTicketTask) GetTicketId() string { if x != nil { return x.TicketId @@ -2006,11 +3631,37 @@ func (x *Task_GetTicketTask) GetAnnotations() []*anypb.Any { return nil } +func (x *Task_GetTicketTask) SetTicketId(v string) { + x.TicketId = v +} + +func (x *Task_GetTicketTask) SetAnnotations(v []*anypb.Any) { + x.Annotations = v +} + +type Task_GetTicketTask_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + TicketId string + Annotations []*anypb.Any +} + +func (b0 Task_GetTicketTask_builder) Build() *Task_GetTicketTask { + m0 := &Task_GetTicketTask{} + b, x := &b0, m0 + _, _ = b, x + x.TicketId = b.TicketId + x.Annotations = b.Annotations + return m0 +} + type Task_ActionListSchemasTask struct { - state protoimpl.MessageState `protogen:"open.v1"` - Annotations []*anypb.Any `protobuf:"bytes,1,rep,name=annotations,proto3" json:"annotations,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"hybrid.v1"` + Annotations []*anypb.Any `protobuf:"bytes,1,rep,name=annotations,proto3" json:"annotations,omitempty"` + // Optional: filter to only return actions for a specific resource type + ResourceTypeId string `protobuf:"bytes,2,opt,name=resource_type_id,json=resourceTypeId,proto3" json:"resource_type_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *Task_ActionListSchemasTask) Reset() { @@ -2038,11 +3689,6 @@ func (x *Task_ActionListSchemasTask) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use Task_ActionListSchemasTask.ProtoReflect.Descriptor instead. -func (*Task_ActionListSchemasTask) Descriptor() ([]byte, []int) { - return file_c1_connectorapi_baton_v1_baton_proto_rawDescGZIP(), []int{0, 15} -} - func (x *Task_ActionListSchemasTask) GetAnnotations() []*anypb.Any { if x != nil { return x.Annotations @@ -2050,8 +3696,40 @@ func (x *Task_ActionListSchemasTask) GetAnnotations() []*anypb.Any { return nil } +func (x *Task_ActionListSchemasTask) GetResourceTypeId() string { + if x != nil { + return x.ResourceTypeId + } + return "" +} + +func (x *Task_ActionListSchemasTask) SetAnnotations(v []*anypb.Any) { + x.Annotations = v +} + +func (x *Task_ActionListSchemasTask) SetResourceTypeId(v string) { + x.ResourceTypeId = v +} + +type Task_ActionListSchemasTask_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Annotations []*anypb.Any + // Optional: filter to only return actions for a specific resource type + ResourceTypeId string +} + +func (b0 Task_ActionListSchemasTask_builder) Build() *Task_ActionListSchemasTask { + m0 := &Task_ActionListSchemasTask{} + b, x := &b0, m0 + _, _ = b, x + x.Annotations = b.Annotations + x.ResourceTypeId = b.ResourceTypeId + return m0 +} + type Task_ActionGetSchemaTask struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` Annotations []*anypb.Any `protobuf:"bytes,2,rep,name=annotations,proto3" json:"annotations,omitempty"` unknownFields protoimpl.UnknownFields @@ -2083,11 +3761,6 @@ func (x *Task_ActionGetSchemaTask) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use Task_ActionGetSchemaTask.ProtoReflect.Descriptor instead. -func (*Task_ActionGetSchemaTask) Descriptor() ([]byte, []int) { - return file_c1_connectorapi_baton_v1_baton_proto_rawDescGZIP(), []int{0, 16} -} - func (x *Task_ActionGetSchemaTask) GetName() string { if x != nil { return x.Name @@ -2102,13 +3775,39 @@ func (x *Task_ActionGetSchemaTask) GetAnnotations() []*anypb.Any { return nil } +func (x *Task_ActionGetSchemaTask) SetName(v string) { + x.Name = v +} + +func (x *Task_ActionGetSchemaTask) SetAnnotations(v []*anypb.Any) { + x.Annotations = v +} + +type Task_ActionGetSchemaTask_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Name string + Annotations []*anypb.Any +} + +func (b0 Task_ActionGetSchemaTask_builder) Build() *Task_ActionGetSchemaTask { + m0 := &Task_ActionGetSchemaTask{} + b, x := &b0, m0 + _, _ = b, x + x.Name = b.Name + x.Annotations = b.Annotations + return m0 +} + type Task_ActionInvokeTask struct { - state protoimpl.MessageState `protogen:"open.v1"` - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Args *structpb.Struct `protobuf:"bytes,2,opt,name=args,proto3" json:"args,omitempty"` - Annotations []*anypb.Any `protobuf:"bytes,3,rep,name=annotations,proto3" json:"annotations,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"hybrid.v1"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Args *structpb.Struct `protobuf:"bytes,2,opt,name=args,proto3" json:"args,omitempty"` + Annotations []*anypb.Any `protobuf:"bytes,3,rep,name=annotations,proto3" json:"annotations,omitempty"` + // Optional: if set, invokes a resource-scoped action + ResourceTypeId string `protobuf:"bytes,4,opt,name=resource_type_id,json=resourceTypeId,proto3" json:"resource_type_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *Task_ActionInvokeTask) Reset() { @@ -2136,11 +3835,6 @@ func (x *Task_ActionInvokeTask) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use Task_ActionInvokeTask.ProtoReflect.Descriptor instead. -func (*Task_ActionInvokeTask) Descriptor() ([]byte, []int) { - return file_c1_connectorapi_baton_v1_baton_proto_rawDescGZIP(), []int{0, 17} -} - func (x *Task_ActionInvokeTask) GetName() string { if x != nil { return x.Name @@ -2162,8 +3856,63 @@ func (x *Task_ActionInvokeTask) GetAnnotations() []*anypb.Any { return nil } +func (x *Task_ActionInvokeTask) GetResourceTypeId() string { + if x != nil { + return x.ResourceTypeId + } + return "" +} + +func (x *Task_ActionInvokeTask) SetName(v string) { + x.Name = v +} + +func (x *Task_ActionInvokeTask) SetArgs(v *structpb.Struct) { + x.Args = v +} + +func (x *Task_ActionInvokeTask) SetAnnotations(v []*anypb.Any) { + x.Annotations = v +} + +func (x *Task_ActionInvokeTask) SetResourceTypeId(v string) { + x.ResourceTypeId = v +} + +func (x *Task_ActionInvokeTask) HasArgs() bool { + if x == nil { + return false + } + return x.Args != nil +} + +func (x *Task_ActionInvokeTask) ClearArgs() { + x.Args = nil +} + +type Task_ActionInvokeTask_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Name string + Args *structpb.Struct + Annotations []*anypb.Any + // Optional: if set, invokes a resource-scoped action + ResourceTypeId string +} + +func (b0 Task_ActionInvokeTask_builder) Build() *Task_ActionInvokeTask { + m0 := &Task_ActionInvokeTask{} + b, x := &b0, m0 + _, _ = b, x + x.Name = b.Name + x.Args = b.Args + x.Annotations = b.Annotations + x.ResourceTypeId = b.ResourceTypeId + return m0 +} + type Task_ActionStatusTask struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` Annotations []*anypb.Any `protobuf:"bytes,3,rep,name=annotations,proto3" json:"annotations,omitempty"` @@ -2196,11 +3945,6 @@ func (x *Task_ActionStatusTask) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use Task_ActionStatusTask.ProtoReflect.Descriptor instead. -func (*Task_ActionStatusTask) Descriptor() ([]byte, []int) { - return file_c1_connectorapi_baton_v1_baton_proto_rawDescGZIP(), []int{0, 18} -} - func (x *Task_ActionStatusTask) GetName() string { if x != nil { return x.Name @@ -2222,8 +3966,38 @@ func (x *Task_ActionStatusTask) GetAnnotations() []*anypb.Any { return nil } +func (x *Task_ActionStatusTask) SetName(v string) { + x.Name = v +} + +func (x *Task_ActionStatusTask) SetId(v string) { + x.Id = v +} + +func (x *Task_ActionStatusTask) SetAnnotations(v []*anypb.Any) { + x.Annotations = v +} + +type Task_ActionStatusTask_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Name string + Id string + Annotations []*anypb.Any +} + +func (b0 Task_ActionStatusTask_builder) Build() *Task_ActionStatusTask { + m0 := &Task_ActionStatusTask{} + b, x := &b0, m0 + _, _ = b, x + x.Name = b.Name + x.Id = b.Id + x.Annotations = b.Annotations + return m0 +} + type Task_CreateSyncDiffTask struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` // Open to suggestions here BaseSyncId string `protobuf:"bytes,1,opt,name=base_sync_id,json=baseSyncId,proto3" json:"base_sync_id,omitempty"` NewSyncId string `protobuf:"bytes,2,opt,name=new_sync_id,json=newSyncId,proto3" json:"new_sync_id,omitempty"` @@ -2257,11 +4031,6 @@ func (x *Task_CreateSyncDiffTask) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use Task_CreateSyncDiffTask.ProtoReflect.Descriptor instead. -func (*Task_CreateSyncDiffTask) Descriptor() ([]byte, []int) { - return file_c1_connectorapi_baton_v1_baton_proto_rawDescGZIP(), []int{0, 19} -} - func (x *Task_CreateSyncDiffTask) GetBaseSyncId() string { if x != nil { return x.BaseSyncId @@ -2283,8 +4052,39 @@ func (x *Task_CreateSyncDiffTask) GetAnnotations() []*anypb.Any { return nil } +func (x *Task_CreateSyncDiffTask) SetBaseSyncId(v string) { + x.BaseSyncId = v +} + +func (x *Task_CreateSyncDiffTask) SetNewSyncId(v string) { + x.NewSyncId = v +} + +func (x *Task_CreateSyncDiffTask) SetAnnotations(v []*anypb.Any) { + x.Annotations = v +} + +type Task_CreateSyncDiffTask_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // Open to suggestions here + BaseSyncId string + NewSyncId string + Annotations []*anypb.Any +} + +func (b0 Task_CreateSyncDiffTask_builder) Build() *Task_CreateSyncDiffTask { + m0 := &Task_CreateSyncDiffTask{} + b, x := &b0, m0 + _, _ = b, x + x.BaseSyncId = b.BaseSyncId + x.NewSyncId = b.NewSyncId + x.Annotations = b.Annotations + return m0 +} + type Task_CompactSyncs struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` CompactableSyncs []*Task_CompactSyncs_CompactableSync `protobuf:"bytes,1,rep,name=compactable_syncs,json=compactableSyncs,proto3" json:"compactable_syncs,omitempty"` Annotations []*anypb.Any `protobuf:"bytes,2,rep,name=annotations,proto3" json:"annotations,omitempty"` unknownFields protoimpl.UnknownFields @@ -2316,11 +4116,6 @@ func (x *Task_CompactSyncs) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use Task_CompactSyncs.ProtoReflect.Descriptor instead. -func (*Task_CompactSyncs) Descriptor() ([]byte, []int) { - return file_c1_connectorapi_baton_v1_baton_proto_rawDescGZIP(), []int{0, 20} -} - func (x *Task_CompactSyncs) GetCompactableSyncs() []*Task_CompactSyncs_CompactableSync { if x != nil { return x.CompactableSyncs @@ -2335,8 +4130,32 @@ func (x *Task_CompactSyncs) GetAnnotations() []*anypb.Any { return nil } +func (x *Task_CompactSyncs) SetCompactableSyncs(v []*Task_CompactSyncs_CompactableSync) { + x.CompactableSyncs = v +} + +func (x *Task_CompactSyncs) SetAnnotations(v []*anypb.Any) { + x.Annotations = v +} + +type Task_CompactSyncs_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + CompactableSyncs []*Task_CompactSyncs_CompactableSync + Annotations []*anypb.Any +} + +func (b0 Task_CompactSyncs_builder) Build() *Task_CompactSyncs { + m0 := &Task_CompactSyncs{} + b, x := &b0, m0 + _, _ = b, x + x.CompactableSyncs = b.CompactableSyncs + x.Annotations = b.Annotations + return m0 +} + type Task_CompactSyncs_CompactableSync struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` FilePath string `protobuf:"bytes,1,opt,name=file_path,json=filePath,proto3" json:"file_path,omitempty"` SyncId string `protobuf:"bytes,2,opt,name=sync_id,json=syncId,proto3" json:"sync_id,omitempty"` unknownFields protoimpl.UnknownFields @@ -2368,11 +4187,6 @@ func (x *Task_CompactSyncs_CompactableSync) ProtoReflect() protoreflect.Message return mi.MessageOf(x) } -// Deprecated: Use Task_CompactSyncs_CompactableSync.ProtoReflect.Descriptor instead. -func (*Task_CompactSyncs_CompactableSync) Descriptor() ([]byte, []int) { - return file_c1_connectorapi_baton_v1_baton_proto_rawDescGZIP(), []int{0, 20, 0} -} - func (x *Task_CompactSyncs_CompactableSync) GetFilePath() string { if x != nil { return x.FilePath @@ -2387,8 +4201,32 @@ func (x *Task_CompactSyncs_CompactableSync) GetSyncId() string { return "" } +func (x *Task_CompactSyncs_CompactableSync) SetFilePath(v string) { + x.FilePath = v +} + +func (x *Task_CompactSyncs_CompactableSync) SetSyncId(v string) { + x.SyncId = v +} + +type Task_CompactSyncs_CompactableSync_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + FilePath string + SyncId string +} + +func (b0 Task_CompactSyncs_CompactableSync_builder) Build() *Task_CompactSyncs_CompactableSync { + m0 := &Task_CompactSyncs_CompactableSync{} + b, x := &b0, m0 + _, _ = b, x + x.FilePath = b.FilePath + x.SyncId = b.SyncId + return m0 +} + type BatonServiceHelloRequest_BuildInfo struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` LangVersion string `protobuf:"bytes,1,opt,name=lang_version,json=langVersion,proto3" json:"lang_version,omitempty"` Package string `protobuf:"bytes,2,opt,name=package,proto3" json:"package,omitempty"` PackageVersion string `protobuf:"bytes,3,opt,name=package_version,json=packageVersion,proto3" json:"package_version,omitempty"` @@ -2421,11 +4259,6 @@ func (x *BatonServiceHelloRequest_BuildInfo) ProtoReflect() protoreflect.Message return mi.MessageOf(x) } -// Deprecated: Use BatonServiceHelloRequest_BuildInfo.ProtoReflect.Descriptor instead. -func (*BatonServiceHelloRequest_BuildInfo) Descriptor() ([]byte, []int) { - return file_c1_connectorapi_baton_v1_baton_proto_rawDescGZIP(), []int{1, 0} -} - func (x *BatonServiceHelloRequest_BuildInfo) GetLangVersion() string { if x != nil { return x.LangVersion @@ -2447,8 +4280,38 @@ func (x *BatonServiceHelloRequest_BuildInfo) GetPackageVersion() string { return "" } +func (x *BatonServiceHelloRequest_BuildInfo) SetLangVersion(v string) { + x.LangVersion = v +} + +func (x *BatonServiceHelloRequest_BuildInfo) SetPackage(v string) { + x.Package = v +} + +func (x *BatonServiceHelloRequest_BuildInfo) SetPackageVersion(v string) { + x.PackageVersion = v +} + +type BatonServiceHelloRequest_BuildInfo_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + LangVersion string + Package string + PackageVersion string +} + +func (b0 BatonServiceHelloRequest_BuildInfo_builder) Build() *BatonServiceHelloRequest_BuildInfo { + m0 := &BatonServiceHelloRequest_BuildInfo{} + b, x := &b0, m0 + _, _ = b, x + x.LangVersion = b.LangVersion + x.Package = b.Package + x.PackageVersion = b.PackageVersion + return m0 +} + type BatonServiceHelloRequest_OSInfo struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Hostname string `protobuf:"bytes,1,opt,name=hostname,proto3" json:"hostname,omitempty"` Os string `protobuf:"bytes,2,opt,name=os,proto3" json:"os,omitempty"` Platform string `protobuf:"bytes,3,opt,name=platform,proto3" json:"platform,omitempty"` @@ -2486,11 +4349,6 @@ func (x *BatonServiceHelloRequest_OSInfo) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use BatonServiceHelloRequest_OSInfo.ProtoReflect.Descriptor instead. -func (*BatonServiceHelloRequest_OSInfo) Descriptor() ([]byte, []int) { - return file_c1_connectorapi_baton_v1_baton_proto_rawDescGZIP(), []int{1, 1} -} - func (x *BatonServiceHelloRequest_OSInfo) GetHostname() string { if x != nil { return x.Hostname @@ -2547,8 +4405,68 @@ func (x *BatonServiceHelloRequest_OSInfo) GetVirtualizationSystem() string { return "" } +func (x *BatonServiceHelloRequest_OSInfo) SetHostname(v string) { + x.Hostname = v +} + +func (x *BatonServiceHelloRequest_OSInfo) SetOs(v string) { + x.Os = v +} + +func (x *BatonServiceHelloRequest_OSInfo) SetPlatform(v string) { + x.Platform = v +} + +func (x *BatonServiceHelloRequest_OSInfo) SetPlatformVersion(v string) { + x.PlatformVersion = v +} + +func (x *BatonServiceHelloRequest_OSInfo) SetPlatformFamily(v string) { + x.PlatformFamily = v +} + +func (x *BatonServiceHelloRequest_OSInfo) SetKernelVersion(v string) { + x.KernelVersion = v +} + +func (x *BatonServiceHelloRequest_OSInfo) SetKernelArch(v string) { + x.KernelArch = v +} + +func (x *BatonServiceHelloRequest_OSInfo) SetVirtualizationSystem(v string) { + x.VirtualizationSystem = v +} + +type BatonServiceHelloRequest_OSInfo_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Hostname string + Os string + Platform string + PlatformVersion string + PlatformFamily string + KernelVersion string + KernelArch string + VirtualizationSystem string +} + +func (b0 BatonServiceHelloRequest_OSInfo_builder) Build() *BatonServiceHelloRequest_OSInfo { + m0 := &BatonServiceHelloRequest_OSInfo{} + b, x := &b0, m0 + _, _ = b, x + x.Hostname = b.Hostname + x.Os = b.Os + x.Platform = b.Platform + x.PlatformVersion = b.PlatformVersion + x.PlatformFamily = b.PlatformFamily + x.KernelVersion = b.KernelVersion + x.KernelArch = b.KernelArch + x.VirtualizationSystem = b.VirtualizationSystem + return m0 +} + type BatonServiceUploadAssetRequest_UploadMetadata struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` HostId string `protobuf:"bytes,1,opt,name=host_id,json=hostId,proto3" json:"host_id,omitempty"` TaskId string `protobuf:"bytes,2,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` Annotations []*anypb.Any `protobuf:"bytes,4,rep,name=annotations,proto3" json:"annotations,omitempty"` @@ -2581,11 +4499,6 @@ func (x *BatonServiceUploadAssetRequest_UploadMetadata) ProtoReflect() protorefl return mi.MessageOf(x) } -// Deprecated: Use BatonServiceUploadAssetRequest_UploadMetadata.ProtoReflect.Descriptor instead. -func (*BatonServiceUploadAssetRequest_UploadMetadata) Descriptor() ([]byte, []int) { - return file_c1_connectorapi_baton_v1_baton_proto_rawDescGZIP(), []int{7, 0} -} - func (x *BatonServiceUploadAssetRequest_UploadMetadata) GetHostId() string { if x != nil { return x.HostId @@ -2607,8 +4520,38 @@ func (x *BatonServiceUploadAssetRequest_UploadMetadata) GetAnnotations() []*anyp return nil } +func (x *BatonServiceUploadAssetRequest_UploadMetadata) SetHostId(v string) { + x.HostId = v +} + +func (x *BatonServiceUploadAssetRequest_UploadMetadata) SetTaskId(v string) { + x.TaskId = v +} + +func (x *BatonServiceUploadAssetRequest_UploadMetadata) SetAnnotations(v []*anypb.Any) { + x.Annotations = v +} + +type BatonServiceUploadAssetRequest_UploadMetadata_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + HostId string + TaskId string + Annotations []*anypb.Any +} + +func (b0 BatonServiceUploadAssetRequest_UploadMetadata_builder) Build() *BatonServiceUploadAssetRequest_UploadMetadata { + m0 := &BatonServiceUploadAssetRequest_UploadMetadata{} + b, x := &b0, m0 + _, _ = b, x + x.HostId = b.HostId + x.TaskId = b.TaskId + x.Annotations = b.Annotations + return m0 +} + type BatonServiceUploadAssetRequest_UploadData struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` // 1MB limit Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` unknownFields protoimpl.UnknownFields @@ -2640,11 +4583,6 @@ func (x *BatonServiceUploadAssetRequest_UploadData) ProtoReflect() protoreflect. return mi.MessageOf(x) } -// Deprecated: Use BatonServiceUploadAssetRequest_UploadData.ProtoReflect.Descriptor instead. -func (*BatonServiceUploadAssetRequest_UploadData) Descriptor() ([]byte, []int) { - return file_c1_connectorapi_baton_v1_baton_proto_rawDescGZIP(), []int{7, 1} -} - func (x *BatonServiceUploadAssetRequest_UploadData) GetData() []byte { if x != nil { return x.Data @@ -2652,8 +4590,30 @@ func (x *BatonServiceUploadAssetRequest_UploadData) GetData() []byte { return nil } +func (x *BatonServiceUploadAssetRequest_UploadData) SetData(v []byte) { + if v == nil { + v = []byte{} + } + x.Data = v +} + +type BatonServiceUploadAssetRequest_UploadData_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // 1MB limit + Data []byte +} + +func (b0 BatonServiceUploadAssetRequest_UploadData_builder) Build() *BatonServiceUploadAssetRequest_UploadData { + m0 := &BatonServiceUploadAssetRequest_UploadData{} + b, x := &b0, m0 + _, _ = b, x + x.Data = b.Data + return m0 +} + type BatonServiceUploadAssetRequest_UploadEOF struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Sha256Checksum []byte `protobuf:"bytes,1,opt,name=sha256_checksum,json=sha256Checksum,proto3" json:"sha256_checksum,omitempty"` Annotations []*anypb.Any `protobuf:"bytes,2,rep,name=annotations,proto3" json:"annotations,omitempty"` unknownFields protoimpl.UnknownFields @@ -2685,11 +4645,6 @@ func (x *BatonServiceUploadAssetRequest_UploadEOF) ProtoReflect() protoreflect.M return mi.MessageOf(x) } -// Deprecated: Use BatonServiceUploadAssetRequest_UploadEOF.ProtoReflect.Descriptor instead. -func (*BatonServiceUploadAssetRequest_UploadEOF) Descriptor() ([]byte, []int) { - return file_c1_connectorapi_baton_v1_baton_proto_rawDescGZIP(), []int{7, 2} -} - func (x *BatonServiceUploadAssetRequest_UploadEOF) GetSha256Checksum() []byte { if x != nil { return x.Sha256Checksum @@ -2704,8 +4659,35 @@ func (x *BatonServiceUploadAssetRequest_UploadEOF) GetAnnotations() []*anypb.Any return nil } +func (x *BatonServiceUploadAssetRequest_UploadEOF) SetSha256Checksum(v []byte) { + if v == nil { + v = []byte{} + } + x.Sha256Checksum = v +} + +func (x *BatonServiceUploadAssetRequest_UploadEOF) SetAnnotations(v []*anypb.Any) { + x.Annotations = v +} + +type BatonServiceUploadAssetRequest_UploadEOF_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Sha256Checksum []byte + Annotations []*anypb.Any +} + +func (b0 BatonServiceUploadAssetRequest_UploadEOF_builder) Build() *BatonServiceUploadAssetRequest_UploadEOF { + m0 := &BatonServiceUploadAssetRequest_UploadEOF{} + b, x := &b0, m0 + _, _ = b, x + x.Sha256Checksum = b.Sha256Checksum + x.Annotations = b.Annotations + return m0 +} + type BatonServiceFinishTaskRequest_Error struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` NonRetryable bool `protobuf:"varint,1,opt,name=non_retryable,json=nonRetryable,proto3" json:"non_retryable,omitempty"` Annotations []*anypb.Any `protobuf:"bytes,2,rep,name=annotations,proto3" json:"annotations,omitempty"` // The response from the connector, if any. @@ -2739,11 +4721,6 @@ func (x *BatonServiceFinishTaskRequest_Error) ProtoReflect() protoreflect.Messag return mi.MessageOf(x) } -// Deprecated: Use BatonServiceFinishTaskRequest_Error.ProtoReflect.Descriptor instead. -func (*BatonServiceFinishTaskRequest_Error) Descriptor() ([]byte, []int) { - return file_c1_connectorapi_baton_v1_baton_proto_rawDescGZIP(), []int{9, 0} -} - func (x *BatonServiceFinishTaskRequest_Error) GetNonRetryable() bool { if x != nil { return x.NonRetryable @@ -2765,8 +4742,50 @@ func (x *BatonServiceFinishTaskRequest_Error) GetResponse() *anypb.Any { return nil } +func (x *BatonServiceFinishTaskRequest_Error) SetNonRetryable(v bool) { + x.NonRetryable = v +} + +func (x *BatonServiceFinishTaskRequest_Error) SetAnnotations(v []*anypb.Any) { + x.Annotations = v +} + +func (x *BatonServiceFinishTaskRequest_Error) SetResponse(v *anypb.Any) { + x.Response = v +} + +func (x *BatonServiceFinishTaskRequest_Error) HasResponse() bool { + if x == nil { + return false + } + return x.Response != nil +} + +func (x *BatonServiceFinishTaskRequest_Error) ClearResponse() { + x.Response = nil +} + +type BatonServiceFinishTaskRequest_Error_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + NonRetryable bool + Annotations []*anypb.Any + // The response from the connector, if any. + Response *anypb.Any +} + +func (b0 BatonServiceFinishTaskRequest_Error_builder) Build() *BatonServiceFinishTaskRequest_Error { + m0 := &BatonServiceFinishTaskRequest_Error{} + b, x := &b0, m0 + _, _ = b, x + x.NonRetryable = b.NonRetryable + x.Annotations = b.Annotations + x.Response = b.Response + return m0 +} + type BatonServiceFinishTaskRequest_Success struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Annotations []*anypb.Any `protobuf:"bytes,1,rep,name=annotations,proto3" json:"annotations,omitempty"` // The response from the connector, if any. Response *anypb.Any `protobuf:"bytes,2,opt,name=response,proto3" json:"response,omitempty"` @@ -2799,11 +4818,6 @@ func (x *BatonServiceFinishTaskRequest_Success) ProtoReflect() protoreflect.Mess return mi.MessageOf(x) } -// Deprecated: Use BatonServiceFinishTaskRequest_Success.ProtoReflect.Descriptor instead. -func (*BatonServiceFinishTaskRequest_Success) Descriptor() ([]byte, []int) { - return file_c1_connectorapi_baton_v1_baton_proto_rawDescGZIP(), []int{9, 1} -} - func (x *BatonServiceFinishTaskRequest_Success) GetAnnotations() []*anypb.Any { if x != nil { return x.Annotations @@ -2818,630 +4832,257 @@ func (x *BatonServiceFinishTaskRequest_Success) GetResponse() *anypb.Any { return nil } -var File_c1_connectorapi_baton_v1_baton_proto protoreflect.FileDescriptor +func (x *BatonServiceFinishTaskRequest_Success) SetAnnotations(v []*anypb.Any) { + x.Annotations = v +} -var file_c1_connectorapi_baton_v1_baton_proto_rawDesc = string([]byte{ - 0x0a, 0x24, 0x63, 0x31, 0x2f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x61, 0x70, - 0x69, 0x2f, 0x62, 0x61, 0x74, 0x6f, 0x6e, 0x2f, 0x76, 0x31, 0x2f, 0x62, 0x61, 0x74, 0x6f, 0x6e, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x18, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, - 0x63, 0x74, 0x6f, 0x72, 0x61, 0x70, 0x69, 0x2e, 0x62, 0x61, 0x74, 0x6f, 0x6e, 0x2e, 0x76, 0x31, - 0x1a, 0x1f, 0x63, 0x31, 0x2f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f, 0x76, - 0x32, 0x2f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x1a, 0x21, 0x63, 0x31, 0x2f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f, - 0x76, 0x32, 0x2f, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x63, 0x31, 0x2f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, - 0x6f, 0x72, 0x2f, 0x76, 0x32, 0x2f, 0x67, 0x72, 0x61, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x1a, 0x1e, 0x63, 0x31, 0x2f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f, - 0x76, 0x32, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x1a, 0x1c, 0x63, 0x31, 0x2f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f, - 0x76, 0x32, 0x2f, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, - 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, - 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, - 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, - 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xd9, 0x27, 0x0a, 0x04, - 0x54, 0x61, 0x73, 0x6b, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x02, 0x69, 0x64, 0x12, 0x3d, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x25, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, - 0x74, 0x6f, 0x72, 0x61, 0x70, 0x69, 0x2e, 0x62, 0x61, 0x74, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, - 0x54, 0x61, 0x73, 0x6b, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x12, 0x3d, 0x0a, 0x04, 0x6e, 0x6f, 0x6e, 0x65, 0x18, 0x64, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x27, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, - 0x61, 0x70, 0x69, 0x2e, 0x62, 0x61, 0x74, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x61, 0x73, - 0x6b, 0x2e, 0x4e, 0x6f, 0x6e, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x48, 0x00, 0x52, 0x04, 0x6e, 0x6f, - 0x6e, 0x65, 0x12, 0x40, 0x0a, 0x05, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x18, 0x65, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x28, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, - 0x61, 0x70, 0x69, 0x2e, 0x62, 0x61, 0x74, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x61, 0x73, - 0x6b, 0x2e, 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x54, 0x61, 0x73, 0x6b, 0x48, 0x00, 0x52, 0x05, 0x68, - 0x65, 0x6c, 0x6c, 0x6f, 0x12, 0x4a, 0x0a, 0x09, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x66, 0x75, 0x6c, - 0x6c, 0x18, 0x66, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, - 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x61, 0x70, 0x69, 0x2e, 0x62, 0x61, 0x74, 0x6f, 0x6e, 0x2e, - 0x76, 0x31, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x46, 0x75, 0x6c, 0x6c, - 0x54, 0x61, 0x73, 0x6b, 0x48, 0x00, 0x52, 0x08, 0x73, 0x79, 0x6e, 0x63, 0x46, 0x75, 0x6c, 0x6c, - 0x12, 0x40, 0x0a, 0x05, 0x67, 0x72, 0x61, 0x6e, 0x74, 0x18, 0x67, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x28, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x61, 0x70, - 0x69, 0x2e, 0x62, 0x61, 0x74, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x2e, - 0x47, 0x72, 0x61, 0x6e, 0x74, 0x54, 0x61, 0x73, 0x6b, 0x48, 0x00, 0x52, 0x05, 0x67, 0x72, 0x61, - 0x6e, 0x74, 0x12, 0x43, 0x0a, 0x06, 0x72, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x18, 0x68, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, - 0x72, 0x61, 0x70, 0x69, 0x2e, 0x62, 0x61, 0x74, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x61, - 0x73, 0x6b, 0x2e, 0x52, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x48, 0x00, 0x52, - 0x06, 0x72, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x12, 0x59, 0x0a, 0x0e, 0x63, 0x72, 0x65, 0x61, 0x74, - 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x69, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x30, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x61, 0x70, - 0x69, 0x2e, 0x62, 0x61, 0x74, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x2e, - 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x54, 0x61, 0x73, - 0x6b, 0x48, 0x00, 0x52, 0x0d, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, - 0x6e, 0x74, 0x12, 0x5c, 0x0a, 0x0f, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x72, 0x65, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x6a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x63, 0x31, - 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x61, 0x70, 0x69, 0x2e, 0x62, 0x61, - 0x74, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x2e, 0x43, 0x72, 0x65, 0x61, - 0x74, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x48, 0x00, - 0x52, 0x0e, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x12, 0x5c, 0x0a, 0x0f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x18, 0x6b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x63, 0x31, 0x2e, 0x63, - 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x61, 0x70, 0x69, 0x2e, 0x62, 0x61, 0x74, 0x6f, - 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, - 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x48, 0x00, 0x52, 0x0e, - 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x65, - 0x0a, 0x12, 0x72, 0x6f, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, - 0x69, 0x61, 0x6c, 0x73, 0x18, 0x6c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x63, 0x31, 0x2e, - 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x61, 0x70, 0x69, 0x2e, 0x62, 0x61, 0x74, - 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x2e, 0x52, 0x6f, 0x74, 0x61, 0x74, - 0x65, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x54, 0x61, 0x73, 0x6b, - 0x48, 0x00, 0x52, 0x11, 0x72, 0x6f, 0x74, 0x61, 0x74, 0x65, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, - 0x74, 0x69, 0x61, 0x6c, 0x73, 0x12, 0x4d, 0x0a, 0x0a, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x66, - 0x65, 0x65, 0x64, 0x18, 0x6d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x63, 0x31, 0x2e, 0x63, - 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x61, 0x70, 0x69, 0x2e, 0x62, 0x61, 0x74, 0x6f, - 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x46, - 0x65, 0x65, 0x64, 0x54, 0x61, 0x73, 0x6b, 0x48, 0x00, 0x52, 0x09, 0x65, 0x76, 0x65, 0x6e, 0x74, - 0x46, 0x65, 0x65, 0x64, 0x12, 0x5f, 0x0a, 0x12, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, - 0x69, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x18, 0x6e, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x2f, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x61, - 0x70, 0x69, 0x2e, 0x62, 0x61, 0x74, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x61, 0x73, 0x6b, - 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x54, 0x61, 0x73, - 0x6b, 0x48, 0x00, 0x52, 0x10, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x63, 0x6b, 0x65, - 0x74, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x66, 0x0a, 0x13, 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x74, 0x69, - 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x18, 0x6f, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, - 0x72, 0x61, 0x70, 0x69, 0x2e, 0x62, 0x61, 0x74, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x61, - 0x73, 0x6b, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x63, 0x68, - 0x65, 0x6d, 0x61, 0x73, 0x54, 0x61, 0x73, 0x6b, 0x48, 0x00, 0x52, 0x11, 0x6c, 0x69, 0x73, 0x74, - 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x12, 0x4d, 0x0a, - 0x0a, 0x67, 0x65, 0x74, 0x5f, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x70, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x2c, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, - 0x61, 0x70, 0x69, 0x2e, 0x62, 0x61, 0x74, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x61, 0x73, - 0x6b, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x54, 0x61, 0x73, 0x6b, 0x48, - 0x00, 0x52, 0x09, 0x67, 0x65, 0x74, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x66, 0x0a, 0x13, - 0x62, 0x75, 0x6c, 0x6b, 0x5f, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x63, 0x6b, - 0x65, 0x74, 0x73, 0x18, 0x71, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x63, 0x31, 0x2e, 0x63, - 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x61, 0x70, 0x69, 0x2e, 0x62, 0x61, 0x74, 0x6f, - 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x2e, 0x42, 0x75, 0x6c, 0x6b, 0x43, 0x72, - 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x54, 0x61, 0x73, 0x6b, 0x48, - 0x00, 0x52, 0x11, 0x62, 0x75, 0x6c, 0x6b, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x63, - 0x6b, 0x65, 0x74, 0x73, 0x12, 0x5d, 0x0a, 0x10, 0x62, 0x75, 0x6c, 0x6b, 0x5f, 0x67, 0x65, 0x74, - 0x5f, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x18, 0x72, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, - 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x61, 0x70, 0x69, - 0x2e, 0x62, 0x61, 0x74, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x2e, 0x42, - 0x75, 0x6c, 0x6b, 0x47, 0x65, 0x74, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x54, 0x61, 0x73, - 0x6b, 0x48, 0x00, 0x52, 0x0e, 0x62, 0x75, 0x6c, 0x6b, 0x47, 0x65, 0x74, 0x54, 0x69, 0x63, 0x6b, - 0x65, 0x74, 0x73, 0x12, 0x66, 0x0a, 0x13, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x69, - 0x73, 0x74, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x18, 0x73, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x34, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x61, - 0x70, 0x69, 0x2e, 0x62, 0x61, 0x74, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x61, 0x73, 0x6b, - 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, - 0x61, 0x73, 0x54, 0x61, 0x73, 0x6b, 0x48, 0x00, 0x52, 0x11, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x4c, 0x69, 0x73, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x12, 0x60, 0x0a, 0x11, 0x61, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x67, 0x65, 0x74, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, - 0x18, 0x74, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, - 0x65, 0x63, 0x74, 0x6f, 0x72, 0x61, 0x70, 0x69, 0x2e, 0x62, 0x61, 0x74, 0x6f, 0x6e, 0x2e, 0x76, - 0x31, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x47, 0x65, 0x74, - 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x54, 0x61, 0x73, 0x6b, 0x48, 0x00, 0x52, 0x0f, 0x61, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x56, 0x0a, - 0x0d, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x76, 0x6f, 0x6b, 0x65, 0x18, 0x75, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, - 0x74, 0x6f, 0x72, 0x61, 0x70, 0x69, 0x2e, 0x62, 0x61, 0x74, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, - 0x54, 0x61, 0x73, 0x6b, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x76, 0x6f, 0x6b, - 0x65, 0x54, 0x61, 0x73, 0x6b, 0x48, 0x00, 0x52, 0x0c, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, - 0x6e, 0x76, 0x6f, 0x6b, 0x65, 0x12, 0x56, 0x0a, 0x0d, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, - 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x76, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x63, - 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x61, 0x70, 0x69, 0x2e, 0x62, - 0x61, 0x74, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x2e, 0x41, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x54, 0x61, 0x73, 0x6b, 0x48, 0x00, 0x52, - 0x0c, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x5d, 0x0a, - 0x10, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x64, 0x69, 0x66, - 0x66, 0x18, 0x77, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, - 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x61, 0x70, 0x69, 0x2e, 0x62, 0x61, 0x74, 0x6f, 0x6e, 0x2e, - 0x76, 0x31, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x79, - 0x6e, 0x63, 0x44, 0x69, 0x66, 0x66, 0x54, 0x61, 0x73, 0x6b, 0x48, 0x00, 0x52, 0x0e, 0x63, 0x72, - 0x65, 0x61, 0x74, 0x65, 0x53, 0x79, 0x6e, 0x63, 0x44, 0x69, 0x66, 0x66, 0x12, 0x52, 0x0a, 0x0d, - 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x5f, 0x73, 0x79, 0x6e, 0x63, 0x73, 0x18, 0x78, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, - 0x6f, 0x72, 0x61, 0x70, 0x69, 0x2e, 0x62, 0x61, 0x74, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x54, - 0x61, 0x73, 0x6b, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x53, 0x79, 0x6e, 0x63, 0x73, - 0x48, 0x00, 0x52, 0x0c, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x53, 0x79, 0x6e, 0x63, 0x73, - 0x12, 0x14, 0x0a, 0x05, 0x64, 0x65, 0x62, 0x75, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x05, 0x64, 0x65, 0x62, 0x75, 0x67, 0x1a, 0x42, 0x0a, 0x08, 0x4e, 0x6f, 0x6e, 0x65, 0x54, 0x61, - 0x73, 0x6b, 0x12, 0x36, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0b, 0x61, - 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x43, 0x0a, 0x09, 0x48, 0x65, - 0x6c, 0x6c, 0x6f, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x36, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, - 0x6e, 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, - 0xb5, 0x01, 0x0a, 0x0c, 0x53, 0x79, 0x6e, 0x63, 0x46, 0x75, 0x6c, 0x6c, 0x54, 0x61, 0x73, 0x6b, - 0x12, 0x36, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, - 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, - 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x73, 0x6b, 0x69, 0x70, - 0x5f, 0x65, 0x78, 0x70, 0x61, 0x6e, 0x64, 0x5f, 0x67, 0x72, 0x61, 0x6e, 0x74, 0x73, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x73, 0x6b, 0x69, 0x70, 0x45, 0x78, 0x70, 0x61, 0x6e, 0x64, - 0x47, 0x72, 0x61, 0x6e, 0x74, 0x73, 0x12, 0x3f, 0x0a, 0x1c, 0x73, 0x6b, 0x69, 0x70, 0x5f, 0x65, - 0x6e, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x5f, 0x61, 0x6e, 0x64, 0x5f, - 0x67, 0x72, 0x61, 0x6e, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x19, 0x73, 0x6b, - 0x69, 0x70, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x41, 0x6e, - 0x64, 0x47, 0x72, 0x61, 0x6e, 0x74, 0x73, 0x1a, 0x7e, 0x0a, 0x0d, 0x45, 0x76, 0x65, 0x6e, 0x74, - 0x46, 0x65, 0x65, 0x64, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x36, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, - 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x41, 0x6e, 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x12, 0x35, 0x0a, 0x08, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x61, 0x74, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x07, - 0x73, 0x74, 0x61, 0x72, 0x74, 0x41, 0x74, 0x1a, 0xf3, 0x01, 0x0a, 0x09, 0x47, 0x72, 0x61, 0x6e, - 0x74, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x3e, 0x0a, 0x0b, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x6c, 0x65, - 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x63, 0x31, 0x2e, - 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x6e, 0x74, - 0x69, 0x74, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x0b, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x6c, - 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x37, 0x0a, 0x09, 0x70, 0x72, 0x69, 0x6e, 0x63, 0x69, 0x70, - 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, - 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x52, 0x09, 0x70, 0x72, 0x69, 0x6e, 0x63, 0x69, 0x70, 0x61, 0x6c, 0x12, 0x36, - 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x35, 0x0a, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x72, 0x0a, - 0x0a, 0x52, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x2c, 0x0a, 0x05, 0x67, - 0x72, 0x61, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x63, 0x31, 0x2e, - 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x72, 0x61, - 0x6e, 0x74, 0x52, 0x05, 0x67, 0x72, 0x61, 0x6e, 0x74, 0x12, 0x36, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, - 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x1a, 0xf9, 0x01, 0x0a, 0x11, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x41, 0x63, 0x63, 0x6f, - 0x75, 0x6e, 0x74, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x3f, 0x0a, 0x0c, 0x61, 0x63, 0x63, 0x6f, 0x75, - 0x6e, 0x74, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, - 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, - 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0b, 0x61, 0x63, 0x63, - 0x6f, 0x75, 0x6e, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x51, 0x0a, 0x12, 0x63, 0x72, 0x65, 0x64, - 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, - 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, - 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x11, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, - 0x74, 0x69, 0x61, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x50, 0x0a, 0x12, 0x65, - 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, - 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x11, 0x65, 0x6e, 0x63, 0x72, - 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x1a, 0x4b, 0x0a, - 0x12, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, - 0x61, 0x73, 0x6b, 0x12, 0x35, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, - 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x1a, 0x9d, 0x01, 0x0a, 0x12, 0x44, - 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x61, 0x73, - 0x6b, 0x12, 0x3c, 0x0a, 0x0b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x69, 0x64, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, - 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x49, 0x64, 0x52, 0x0a, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x64, 0x12, - 0x49, 0x0a, 0x12, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x63, 0x31, - 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x64, 0x52, 0x10, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, - 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x64, 0x1a, 0xfa, 0x01, 0x0a, 0x15, 0x52, - 0x6f, 0x74, 0x61, 0x74, 0x65, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, - 0x54, 0x61, 0x73, 0x6b, 0x12, 0x3c, 0x0a, 0x0b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x63, 0x31, 0x2e, 0x63, - 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x49, 0x64, 0x52, 0x0a, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x49, 0x64, 0x12, 0x51, 0x0a, 0x12, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, - 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, - 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, - 0x2e, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x52, 0x11, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x50, 0x0a, 0x12, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x21, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, - 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x52, 0x11, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x1a, 0xd5, 0x01, 0x0a, 0x10, 0x43, 0x72, 0x65, 0x61, - 0x74, 0x65, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x45, 0x0a, 0x0e, - 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, - 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x52, 0x0d, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x42, 0x0a, 0x0d, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x73, 0x63, - 0x68, 0x65, 0x6d, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x63, 0x31, 0x2e, - 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x69, 0x63, - 0x6b, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x0c, 0x74, 0x69, 0x63, 0x6b, 0x65, - 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x36, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, - 0x6e, 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, - 0x71, 0x0a, 0x15, 0x42, 0x75, 0x6c, 0x6b, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x63, - 0x6b, 0x65, 0x74, 0x73, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x58, 0x0a, 0x0f, 0x74, 0x69, 0x63, 0x6b, - 0x65, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x2f, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, - 0x61, 0x70, 0x69, 0x2e, 0x62, 0x61, 0x74, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x61, 0x73, - 0x6b, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x54, 0x61, - 0x73, 0x6b, 0x52, 0x0e, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x73, 0x1a, 0x6b, 0x0a, 0x12, 0x42, 0x75, 0x6c, 0x6b, 0x47, 0x65, 0x74, 0x54, 0x69, 0x63, - 0x6b, 0x65, 0x74, 0x73, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x55, 0x0a, 0x0f, 0x74, 0x69, 0x63, 0x6b, - 0x65, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x2c, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, - 0x61, 0x70, 0x69, 0x2e, 0x62, 0x61, 0x74, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x61, 0x73, - 0x6b, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x54, 0x61, 0x73, 0x6b, 0x52, - 0x0e, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x1a, - 0x4f, 0x0a, 0x15, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x63, 0x68, - 0x65, 0x6d, 0x61, 0x73, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x36, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, - 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x41, 0x6e, 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x1a, 0x64, 0x0a, 0x0d, 0x47, 0x65, 0x74, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x54, 0x61, 0x73, - 0x6b, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x49, 0x64, 0x12, 0x36, - 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x4f, 0x0a, 0x15, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x4c, 0x69, 0x73, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x54, 0x61, 0x73, 0x6b, 0x12, - 0x36, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, - 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x61, 0x0a, 0x13, 0x41, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x12, - 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x12, 0x36, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0b, 0x61, - 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x8b, 0x01, 0x0a, 0x10, 0x41, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x76, 0x6f, 0x6b, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x12, - 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x12, 0x2b, 0x0a, 0x04, 0x61, 0x72, 0x67, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x04, 0x61, 0x72, 0x67, 0x73, - 0x12, 0x36, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, - 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, - 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x6e, 0x0a, 0x10, 0x41, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x12, 0x0a, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, - 0x12, 0x36, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, - 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, - 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x8e, 0x01, 0x0a, 0x12, 0x43, 0x72, 0x65, - 0x61, 0x74, 0x65, 0x53, 0x79, 0x6e, 0x63, 0x44, 0x69, 0x66, 0x66, 0x54, 0x61, 0x73, 0x6b, 0x12, - 0x20, 0x0a, 0x0c, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x69, 0x64, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x62, 0x61, 0x73, 0x65, 0x53, 0x79, 0x6e, 0x63, 0x49, - 0x64, 0x12, 0x1e, 0x0a, 0x0b, 0x6e, 0x65, 0x77, 0x5f, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x69, 0x64, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x65, 0x77, 0x53, 0x79, 0x6e, 0x63, 0x49, - 0x64, 0x12, 0x36, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0b, 0x61, 0x6e, - 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0xf9, 0x01, 0x0a, 0x0c, 0x43, 0x6f, - 0x6d, 0x70, 0x61, 0x63, 0x74, 0x53, 0x79, 0x6e, 0x63, 0x73, 0x12, 0x68, 0x0a, 0x11, 0x63, 0x6f, - 0x6d, 0x70, 0x61, 0x63, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x79, 0x6e, 0x63, 0x73, 0x18, - 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3b, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, - 0x63, 0x74, 0x6f, 0x72, 0x61, 0x70, 0x69, 0x2e, 0x62, 0x61, 0x74, 0x6f, 0x6e, 0x2e, 0x76, 0x31, - 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x53, 0x79, 0x6e, - 0x63, 0x73, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x79, - 0x6e, 0x63, 0x52, 0x10, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x53, - 0x79, 0x6e, 0x63, 0x73, 0x12, 0x36, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, - 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x47, 0x0a, 0x0f, - 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x79, 0x6e, 0x63, 0x12, - 0x1b, 0x0a, 0x09, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x08, 0x66, 0x69, 0x6c, 0x65, 0x50, 0x61, 0x74, 0x68, 0x12, 0x17, 0x0a, 0x07, - 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, - 0x79, 0x6e, 0x63, 0x49, 0x64, 0x22, 0x73, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, - 0x16, 0x0a, 0x12, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, - 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x12, 0x0a, 0x0e, 0x53, 0x54, 0x41, 0x54, 0x55, - 0x53, 0x5f, 0x50, 0x45, 0x4e, 0x44, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x14, 0x0a, 0x10, 0x53, - 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x53, 0x43, 0x48, 0x45, 0x44, 0x55, 0x4c, 0x45, 0x44, 0x10, - 0x02, 0x12, 0x12, 0x0a, 0x0e, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x52, 0x55, 0x4e, 0x4e, - 0x49, 0x4e, 0x47, 0x10, 0x03, 0x12, 0x13, 0x0a, 0x0f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, - 0x46, 0x49, 0x4e, 0x49, 0x53, 0x48, 0x45, 0x44, 0x10, 0x04, 0x42, 0x0b, 0x0a, 0x09, 0x74, 0x61, - 0x73, 0x6b, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x22, 0xf3, 0x07, 0x0a, 0x18, 0x42, 0x61, 0x74, 0x6f, - 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x23, 0x0a, 0x07, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0a, 0xfa, 0x42, 0x07, 0x72, 0x05, 0x10, 0x01, 0x18, 0x80, - 0x02, 0x52, 0x06, 0x68, 0x6f, 0x73, 0x74, 0x49, 0x64, 0x12, 0x32, 0x0a, 0x07, 0x74, 0x61, 0x73, - 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x19, 0xfa, 0x42, 0x16, 0x72, - 0x14, 0x32, 0x12, 0x5e, 0x5b, 0x61, 0x2d, 0x7a, 0x41, 0x2d, 0x5a, 0x30, 0x2d, 0x39, 0x5d, 0x7b, - 0x32, 0x37, 0x7d, 0x7c, 0x24, 0x52, 0x06, 0x74, 0x61, 0x73, 0x6b, 0x49, 0x64, 0x12, 0x65, 0x0a, - 0x0a, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x3c, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, - 0x61, 0x70, 0x69, 0x2e, 0x62, 0x61, 0x74, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x61, 0x74, - 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x42, - 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x09, 0x62, 0x75, 0x69, 0x6c, 0x64, - 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x5c, 0x0a, 0x07, 0x6f, 0x73, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, - 0x63, 0x74, 0x6f, 0x72, 0x61, 0x70, 0x69, 0x2e, 0x62, 0x61, 0x74, 0x6f, 0x6e, 0x2e, 0x76, 0x31, - 0x2e, 0x42, 0x61, 0x74, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x48, 0x65, 0x6c, - 0x6c, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x4f, 0x53, 0x49, 0x6e, 0x66, 0x6f, - 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x06, 0x6f, 0x73, 0x49, 0x6e, - 0x66, 0x6f, 0x12, 0x5b, 0x0a, 0x12, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x5f, - 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, - 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, - 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, - 0x74, 0x61, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x11, 0x63, 0x6f, - 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, - 0x40, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x06, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, - 0x01, 0x02, 0x10, 0x10, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x1a, 0x95, 0x01, 0x0a, 0x09, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x12, - 0x2d, 0x0a, 0x0c, 0x6c, 0x61, 0x6e, 0x67, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0a, 0xfa, 0x42, 0x07, 0x72, 0x05, 0x10, 0x01, 0x18, 0x80, - 0x02, 0x52, 0x0b, 0x6c, 0x61, 0x6e, 0x67, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x24, - 0x0a, 0x07, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, - 0x0a, 0xfa, 0x42, 0x07, 0x72, 0x05, 0x10, 0x01, 0x18, 0x80, 0x02, 0x52, 0x07, 0x70, 0x61, 0x63, - 0x6b, 0x61, 0x67, 0x65, 0x12, 0x33, 0x0a, 0x0f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x5f, - 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0a, 0xfa, - 0x42, 0x07, 0x72, 0x05, 0x10, 0x01, 0x18, 0x80, 0x02, 0x52, 0x0e, 0x70, 0x61, 0x63, 0x6b, 0x61, - 0x67, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x1a, 0x81, 0x03, 0x0a, 0x06, 0x4f, 0x53, - 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x26, 0x0a, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0a, 0xfa, 0x42, 0x07, 0x72, 0x05, 0x10, 0x01, 0x18, - 0x80, 0x02, 0x52, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x02, - 0x6f, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0a, 0xfa, 0x42, 0x07, 0x72, 0x05, 0x10, - 0x01, 0x18, 0x80, 0x02, 0x52, 0x02, 0x6f, 0x73, 0x12, 0x26, 0x0a, 0x08, 0x70, 0x6c, 0x61, 0x74, - 0x66, 0x6f, 0x72, 0x6d, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0a, 0xfa, 0x42, 0x07, 0x72, - 0x05, 0x10, 0x01, 0x18, 0x80, 0x02, 0x52, 0x08, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, - 0x12, 0x35, 0x0a, 0x10, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x5f, 0x76, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0a, 0xfa, 0x42, 0x07, 0x72, - 0x05, 0x10, 0x01, 0x18, 0x80, 0x02, 0x52, 0x0f, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, - 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x33, 0x0a, 0x0f, 0x70, 0x6c, 0x61, 0x74, 0x66, - 0x6f, 0x72, 0x6d, 0x5f, 0x66, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, - 0x42, 0x0a, 0xfa, 0x42, 0x07, 0x72, 0x05, 0x10, 0x01, 0x18, 0x80, 0x02, 0x52, 0x0e, 0x70, 0x6c, - 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x12, 0x31, 0x0a, 0x0e, - 0x6b, 0x65, 0x72, 0x6e, 0x65, 0x6c, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x09, 0x42, 0x0a, 0xfa, 0x42, 0x07, 0x72, 0x05, 0x10, 0x01, 0x18, 0x80, 0x02, - 0x52, 0x0d, 0x6b, 0x65, 0x72, 0x6e, 0x65, 0x6c, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, - 0x2b, 0x0a, 0x0b, 0x6b, 0x65, 0x72, 0x6e, 0x65, 0x6c, 0x5f, 0x61, 0x72, 0x63, 0x68, 0x18, 0x07, - 0x20, 0x01, 0x28, 0x09, 0x42, 0x0a, 0xfa, 0x42, 0x07, 0x72, 0x05, 0x10, 0x01, 0x18, 0x80, 0x02, - 0x52, 0x0a, 0x6b, 0x65, 0x72, 0x6e, 0x65, 0x6c, 0x41, 0x72, 0x63, 0x68, 0x12, 0x3f, 0x0a, 0x15, - 0x76, 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, - 0x79, 0x73, 0x74, 0x65, 0x6d, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0a, 0xfa, 0x42, 0x07, - 0x72, 0x05, 0x10, 0x01, 0x18, 0x80, 0x02, 0x52, 0x14, 0x76, 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, - 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x22, 0x53, 0x0a, - 0x19, 0x42, 0x61, 0x74, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x48, 0x65, 0x6c, - 0x6c, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x36, 0x0a, 0x0b, 0x61, 0x6e, - 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x22, 0x41, 0x0a, 0x1a, 0x42, 0x61, 0x74, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x47, 0x65, 0x74, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x23, 0x0a, 0x07, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x42, 0x0a, 0xfa, 0x42, 0x07, 0x72, 0x05, 0x10, 0x01, 0x18, 0x80, 0x02, 0x52, 0x06, 0x68, - 0x6f, 0x73, 0x74, 0x49, 0x64, 0x22, 0x83, 0x02, 0x0a, 0x1b, 0x42, 0x61, 0x74, 0x6f, 0x6e, 0x53, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x47, 0x65, 0x74, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x32, 0x0a, 0x04, 0x74, 0x61, 0x73, 0x6b, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, - 0x6f, 0x72, 0x61, 0x70, 0x69, 0x2e, 0x62, 0x61, 0x74, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x54, - 0x61, 0x73, 0x6b, 0x52, 0x04, 0x74, 0x61, 0x73, 0x6b, 0x12, 0x36, 0x0a, 0x09, 0x6e, 0x65, 0x78, - 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, - 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x6f, 0x6c, - 0x6c, 0x12, 0x40, 0x0a, 0x0e, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x72, 0x74, 0x62, - 0x65, 0x61, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x48, 0x65, 0x61, 0x72, 0x74, 0x62, - 0x65, 0x61, 0x74, 0x12, 0x36, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0b, - 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xb8, 0x01, 0x0a, 0x1c, - 0x42, 0x61, 0x74, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x48, 0x65, 0x61, 0x72, - 0x74, 0x62, 0x65, 0x61, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x23, 0x0a, 0x07, - 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0a, 0xfa, - 0x42, 0x07, 0x72, 0x05, 0x10, 0x01, 0x18, 0x80, 0x02, 0x52, 0x06, 0x68, 0x6f, 0x73, 0x74, 0x49, - 0x64, 0x12, 0x31, 0x0a, 0x07, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x42, 0x18, 0xfa, 0x42, 0x15, 0x72, 0x13, 0x32, 0x11, 0x5e, 0x5b, 0x61, 0x2d, 0x7a, - 0x41, 0x2d, 0x5a, 0x30, 0x2d, 0x39, 0x5d, 0x7b, 0x32, 0x37, 0x7d, 0x24, 0x52, 0x06, 0x74, 0x61, - 0x73, 0x6b, 0x49, 0x64, 0x12, 0x40, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x42, - 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x10, 0x10, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xb7, 0x01, 0x0a, 0x1d, 0x42, 0x61, 0x74, 0x6f, 0x6e, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x48, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x40, 0x0a, 0x0e, 0x6e, 0x65, 0x78, 0x74, - 0x5f, 0x68, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x6e, 0x65, 0x78, - 0x74, 0x48, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x63, 0x61, - 0x6e, 0x63, 0x65, 0x6c, 0x6c, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x63, - 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x6c, 0x65, 0x64, 0x12, 0x36, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, - 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x41, 0x6e, 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x22, 0xa3, 0x05, 0x0a, 0x1e, 0x42, 0x61, 0x74, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x41, 0x73, 0x73, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x65, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, - 0x64, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x47, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, - 0x63, 0x74, 0x6f, 0x72, 0x61, 0x70, 0x69, 0x2e, 0x62, 0x61, 0x74, 0x6f, 0x6e, 0x2e, 0x76, 0x31, - 0x2e, 0x42, 0x61, 0x74, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x55, 0x70, 0x6c, - 0x6f, 0x61, 0x64, 0x41, 0x73, 0x73, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, - 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x48, 0x00, - 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x59, 0x0a, 0x04, 0x64, 0x61, - 0x74, 0x61, 0x18, 0x65, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x43, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, - 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x61, 0x70, 0x69, 0x2e, 0x62, 0x61, 0x74, 0x6f, 0x6e, - 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x61, 0x74, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x41, 0x73, 0x73, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x2e, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x44, 0x61, 0x74, 0x61, 0x48, 0x00, 0x52, - 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x56, 0x0a, 0x03, 0x65, 0x6f, 0x66, 0x18, 0x66, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x42, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, - 0x72, 0x61, 0x70, 0x69, 0x2e, 0x62, 0x61, 0x74, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x61, - 0x74, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, - 0x41, 0x73, 0x73, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x55, 0x70, 0x6c, - 0x6f, 0x61, 0x64, 0x45, 0x4f, 0x46, 0x48, 0x00, 0x52, 0x03, 0x65, 0x6f, 0x66, 0x1a, 0xaa, 0x01, - 0x0a, 0x0e, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, - 0x12, 0x23, 0x0a, 0x07, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x42, 0x0a, 0xfa, 0x42, 0x07, 0x72, 0x05, 0x10, 0x01, 0x18, 0x80, 0x02, 0x52, 0x06, 0x68, - 0x6f, 0x73, 0x74, 0x49, 0x64, 0x12, 0x31, 0x0a, 0x07, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x69, 0x64, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x18, 0xfa, 0x42, 0x15, 0x72, 0x13, 0x32, 0x11, 0x5e, - 0x5b, 0x61, 0x2d, 0x7a, 0x41, 0x2d, 0x5a, 0x30, 0x2d, 0x39, 0x5d, 0x7b, 0x32, 0x37, 0x7d, 0x24, - 0x52, 0x06, 0x74, 0x61, 0x73, 0x6b, 0x49, 0x64, 0x12, 0x40, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, - 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x41, 0x6e, 0x79, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x10, 0x10, 0x52, 0x0b, 0x61, - 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x2d, 0x0a, 0x0a, 0x55, 0x70, - 0x6c, 0x6f, 0x61, 0x64, 0x44, 0x61, 0x74, 0x61, 0x12, 0x1f, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x0b, 0xfa, 0x42, 0x08, 0x7a, 0x06, 0x10, 0x01, 0x18, - 0x80, 0x80, 0x40, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x1a, 0x7f, 0x0a, 0x09, 0x55, 0x70, 0x6c, - 0x6f, 0x61, 0x64, 0x45, 0x4f, 0x46, 0x12, 0x30, 0x0a, 0x0f, 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, - 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x42, - 0x07, 0xfa, 0x42, 0x04, 0x7a, 0x02, 0x68, 0x20, 0x52, 0x0e, 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, - 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x12, 0x40, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, - 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x41, 0x6e, 0x79, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x10, 0x10, 0x52, 0x0b, 0x61, - 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x0a, 0x0a, 0x03, 0x6d, 0x73, - 0x67, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x22, 0x59, 0x0a, 0x1f, 0x42, 0x61, 0x74, 0x6f, 0x6e, 0x53, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x41, 0x73, 0x73, 0x65, - 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x36, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, - 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x22, 0x8d, 0x05, 0x0a, 0x1d, 0x42, 0x61, 0x74, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x46, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x23, 0x0a, 0x07, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x42, 0x0a, 0xfa, 0x42, 0x07, 0x72, 0x05, 0x10, 0x01, 0x18, 0x80, 0x02, - 0x52, 0x06, 0x68, 0x6f, 0x73, 0x74, 0x49, 0x64, 0x12, 0x31, 0x0a, 0x07, 0x74, 0x61, 0x73, 0x6b, - 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x18, 0xfa, 0x42, 0x15, 0x72, 0x13, - 0x32, 0x11, 0x5e, 0x5b, 0x61, 0x2d, 0x7a, 0x41, 0x2d, 0x5a, 0x30, 0x2d, 0x39, 0x5d, 0x7b, 0x32, - 0x37, 0x7d, 0x24, 0x52, 0x06, 0x74, 0x61, 0x73, 0x6b, 0x49, 0x64, 0x12, 0x2a, 0x0a, 0x06, 0x73, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, - 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x55, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, - 0x18, 0x64, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3d, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, - 0x65, 0x63, 0x74, 0x6f, 0x72, 0x61, 0x70, 0x69, 0x2e, 0x62, 0x61, 0x74, 0x6f, 0x6e, 0x2e, 0x76, - 0x31, 0x2e, 0x42, 0x61, 0x74, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x46, 0x69, - 0x6e, 0x69, 0x73, 0x68, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, - 0x45, 0x72, 0x72, 0x6f, 0x72, 0x48, 0x00, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x5b, - 0x0a, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x65, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x3f, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x61, 0x70, - 0x69, 0x2e, 0x62, 0x61, 0x74, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x61, 0x74, 0x6f, 0x6e, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x46, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x54, 0x61, 0x73, - 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, - 0x48, 0x00, 0x52, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x1a, 0xa0, 0x01, 0x0a, 0x05, - 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x23, 0x0a, 0x0d, 0x6e, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x74, - 0x72, 0x79, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x6e, 0x6f, - 0x6e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x61, 0x6e, - 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x10, 0x10, 0x52, - 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x30, 0x0a, 0x08, - 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x1a, 0x7d, - 0x0a, 0x07, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x40, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, - 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x41, 0x6e, 0x79, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x10, 0x10, 0x52, 0x0b, - 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x30, 0x0a, 0x08, 0x72, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x41, 0x6e, 0x79, 0x52, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x12, 0x0a, - 0x0b, 0x66, 0x69, 0x6e, 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x03, 0xf8, 0x42, - 0x01, 0x22, 0x58, 0x0a, 0x1e, 0x42, 0x61, 0x74, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x46, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x36, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0b, - 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x17, 0x0a, 0x15, 0x53, - 0x74, 0x61, 0x72, 0x74, 0x44, 0x65, 0x62, 0x75, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x22, 0x30, 0x0a, 0x16, 0x53, 0x74, 0x61, 0x72, 0x74, 0x44, 0x65, 0x62, - 0x75, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, - 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, - 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x32, 0x80, 0x06, 0x0a, 0x0c, 0x42, 0x61, 0x74, 0x6f, 0x6e, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x72, 0x0a, 0x05, 0x48, 0x65, 0x6c, 0x6c, 0x6f, - 0x12, 0x32, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x61, - 0x70, 0x69, 0x2e, 0x62, 0x61, 0x74, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x61, 0x74, 0x6f, - 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x33, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, - 0x74, 0x6f, 0x72, 0x61, 0x70, 0x69, 0x2e, 0x62, 0x61, 0x74, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, - 0x42, 0x61, 0x74, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x48, 0x65, 0x6c, 0x6c, - 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x78, 0x0a, 0x07, 0x47, - 0x65, 0x74, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x34, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, - 0x65, 0x63, 0x74, 0x6f, 0x72, 0x61, 0x70, 0x69, 0x2e, 0x62, 0x61, 0x74, 0x6f, 0x6e, 0x2e, 0x76, - 0x31, 0x2e, 0x42, 0x61, 0x74, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x47, 0x65, - 0x74, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x35, 0x2e, 0x63, - 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x61, 0x70, 0x69, 0x2e, 0x62, - 0x61, 0x74, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x61, 0x74, 0x6f, 0x6e, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x47, 0x65, 0x74, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x7e, 0x0a, 0x09, 0x48, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, - 0x61, 0x74, 0x12, 0x36, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, - 0x72, 0x61, 0x70, 0x69, 0x2e, 0x62, 0x61, 0x74, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x61, - 0x74, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x48, 0x65, 0x61, 0x72, 0x74, 0x62, - 0x65, 0x61, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x37, 0x2e, 0x63, 0x31, 0x2e, - 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x61, 0x70, 0x69, 0x2e, 0x62, 0x61, 0x74, - 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x61, 0x74, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x48, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x81, 0x01, 0x0a, 0x0a, 0x46, 0x69, 0x6e, 0x69, 0x73, 0x68, - 0x54, 0x61, 0x73, 0x6b, 0x12, 0x37, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, - 0x74, 0x6f, 0x72, 0x61, 0x70, 0x69, 0x2e, 0x62, 0x61, 0x74, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, - 0x42, 0x61, 0x74, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x46, 0x69, 0x6e, 0x69, - 0x73, 0x68, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x38, 0x2e, - 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x61, 0x70, 0x69, 0x2e, - 0x62, 0x61, 0x74, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x61, 0x74, 0x6f, 0x6e, 0x53, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x46, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x54, 0x61, 0x73, 0x6b, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x86, 0x01, 0x0a, 0x0b, 0x55, 0x70, - 0x6c, 0x6f, 0x61, 0x64, 0x41, 0x73, 0x73, 0x65, 0x74, 0x12, 0x38, 0x2e, 0x63, 0x31, 0x2e, 0x63, - 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x61, 0x70, 0x69, 0x2e, 0x62, 0x61, 0x74, 0x6f, - 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x61, 0x74, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x41, 0x73, 0x73, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x39, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, - 0x6f, 0x72, 0x61, 0x70, 0x69, 0x2e, 0x62, 0x61, 0x74, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x42, - 0x61, 0x74, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x55, 0x70, 0x6c, 0x6f, 0x61, - 0x64, 0x41, 0x73, 0x73, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, - 0x28, 0x01, 0x12, 0x75, 0x0a, 0x0e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x44, 0x65, 0x62, 0x75, 0x67, - 0x67, 0x69, 0x6e, 0x67, 0x12, 0x2f, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, - 0x74, 0x6f, 0x72, 0x61, 0x70, 0x69, 0x2e, 0x62, 0x61, 0x74, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, - 0x53, 0x74, 0x61, 0x72, 0x74, 0x44, 0x65, 0x62, 0x75, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x30, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, - 0x63, 0x74, 0x6f, 0x72, 0x61, 0x70, 0x69, 0x2e, 0x62, 0x61, 0x74, 0x6f, 0x6e, 0x2e, 0x76, 0x31, - 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x44, 0x65, 0x62, 0x75, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x37, 0x5a, 0x35, 0x67, 0x69, 0x74, - 0x6c, 0x61, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x64, 0x75, 0x63, 0x74, 0x6f, 0x6e, 0x65, 0x2f, - 0x63, 0x31, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x70, 0x62, 0x2f, 0x63, 0x31, 0x2f, 0x63, 0x6f, 0x6e, - 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x61, 0x70, 0x69, 0x2f, 0x62, 0x61, 0x74, 0x6f, 0x6e, 0x2f, - 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -}) +func (x *BatonServiceFinishTaskRequest_Success) SetResponse(v *anypb.Any) { + x.Response = v +} -var ( - file_c1_connectorapi_baton_v1_baton_proto_rawDescOnce sync.Once - file_c1_connectorapi_baton_v1_baton_proto_rawDescData []byte -) +func (x *BatonServiceFinishTaskRequest_Success) HasResponse() bool { + if x == nil { + return false + } + return x.Response != nil +} + +func (x *BatonServiceFinishTaskRequest_Success) ClearResponse() { + x.Response = nil +} + +type BatonServiceFinishTaskRequest_Success_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Annotations []*anypb.Any + // The response from the connector, if any. + Response *anypb.Any +} -func file_c1_connectorapi_baton_v1_baton_proto_rawDescGZIP() []byte { - file_c1_connectorapi_baton_v1_baton_proto_rawDescOnce.Do(func() { - file_c1_connectorapi_baton_v1_baton_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_c1_connectorapi_baton_v1_baton_proto_rawDesc), len(file_c1_connectorapi_baton_v1_baton_proto_rawDesc))) - }) - return file_c1_connectorapi_baton_v1_baton_proto_rawDescData +func (b0 BatonServiceFinishTaskRequest_Success_builder) Build() *BatonServiceFinishTaskRequest_Success { + m0 := &BatonServiceFinishTaskRequest_Success{} + b, x := &b0, m0 + _, _ = b, x + x.Annotations = b.Annotations + x.Response = b.Response + return m0 } +var File_c1_connectorapi_baton_v1_baton_proto protoreflect.FileDescriptor + +const file_c1_connectorapi_baton_v1_baton_proto_rawDesc = "" + + "\n" + + "$c1/connectorapi/baton/v1/baton.proto\x12\x18c1.connectorapi.baton.v1\x1a\x1fc1/connector/v2/connector.proto\x1a!c1/connector/v2/entitlement.proto\x1a\x1bc1/connector/v2/grant.proto\x1a\x1ec1/connector/v2/resource.proto\x1a\x1cc1/connector/v2/ticket.proto\x1a\x19google/protobuf/any.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1cgoogle/protobuf/struct.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x17google/rpc/status.proto\x1a\x17validate/validate.proto\"\x80)\n" + + "\x04Task\x12\x0e\n" + + "\x02id\x18\x01 \x01(\tR\x02id\x12=\n" + + "\x06status\x18\x02 \x01(\x0e2%.c1.connectorapi.baton.v1.Task.StatusR\x06status\x12=\n" + + "\x04none\x18d \x01(\v2'.c1.connectorapi.baton.v1.Task.NoneTaskH\x00R\x04none\x12@\n" + + "\x05hello\x18e \x01(\v2(.c1.connectorapi.baton.v1.Task.HelloTaskH\x00R\x05hello\x12J\n" + + "\tsync_full\x18f \x01(\v2+.c1.connectorapi.baton.v1.Task.SyncFullTaskH\x00R\bsyncFull\x12@\n" + + "\x05grant\x18g \x01(\v2(.c1.connectorapi.baton.v1.Task.GrantTaskH\x00R\x05grant\x12C\n" + + "\x06revoke\x18h \x01(\v2).c1.connectorapi.baton.v1.Task.RevokeTaskH\x00R\x06revoke\x12Y\n" + + "\x0ecreate_account\x18i \x01(\v20.c1.connectorapi.baton.v1.Task.CreateAccountTaskH\x00R\rcreateAccount\x12\\\n" + + "\x0fcreate_resource\x18j \x01(\v21.c1.connectorapi.baton.v1.Task.CreateResourceTaskH\x00R\x0ecreateResource\x12\\\n" + + "\x0fdelete_resource\x18k \x01(\v21.c1.connectorapi.baton.v1.Task.DeleteResourceTaskH\x00R\x0edeleteResource\x12e\n" + + "\x12rotate_credentials\x18l \x01(\v24.c1.connectorapi.baton.v1.Task.RotateCredentialsTaskH\x00R\x11rotateCredentials\x12M\n" + + "\n" + + "event_feed\x18m \x01(\v2,.c1.connectorapi.baton.v1.Task.EventFeedTaskH\x00R\teventFeed\x12_\n" + + "\x12create_ticket_task\x18n \x01(\v2/.c1.connectorapi.baton.v1.Task.CreateTicketTaskH\x00R\x10createTicketTask\x12f\n" + + "\x13list_ticket_schemas\x18o \x01(\v24.c1.connectorapi.baton.v1.Task.ListTicketSchemasTaskH\x00R\x11listTicketSchemas\x12M\n" + + "\n" + + "get_ticket\x18p \x01(\v2,.c1.connectorapi.baton.v1.Task.GetTicketTaskH\x00R\tgetTicket\x12f\n" + + "\x13bulk_create_tickets\x18q \x01(\v24.c1.connectorapi.baton.v1.Task.BulkCreateTicketsTaskH\x00R\x11bulkCreateTickets\x12]\n" + + "\x10bulk_get_tickets\x18r \x01(\v21.c1.connectorapi.baton.v1.Task.BulkGetTicketsTaskH\x00R\x0ebulkGetTickets\x12f\n" + + "\x13action_list_schemas\x18s \x01(\v24.c1.connectorapi.baton.v1.Task.ActionListSchemasTaskH\x00R\x11actionListSchemas\x12`\n" + + "\x11action_get_schema\x18t \x01(\v22.c1.connectorapi.baton.v1.Task.ActionGetSchemaTaskH\x00R\x0factionGetSchema\x12V\n" + + "\raction_invoke\x18u \x01(\v2/.c1.connectorapi.baton.v1.Task.ActionInvokeTaskH\x00R\factionInvoke\x12V\n" + + "\raction_status\x18v \x01(\v2/.c1.connectorapi.baton.v1.Task.ActionStatusTaskH\x00R\factionStatus\x12]\n" + + "\x10create_sync_diff\x18w \x01(\v21.c1.connectorapi.baton.v1.Task.CreateSyncDiffTaskH\x00R\x0ecreateSyncDiff\x12R\n" + + "\rcompact_syncs\x18x \x01(\v2+.c1.connectorapi.baton.v1.Task.CompactSyncsH\x00R\fcompactSyncs\x12\x14\n" + + "\x05debug\x18\x03 \x01(\bR\x05debug\x1aB\n" + + "\bNoneTask\x126\n" + + "\vannotations\x18\x01 \x03(\v2\x14.google.protobuf.AnyR\vannotations\x1aC\n" + + "\tHelloTask\x126\n" + + "\vannotations\x18\x01 \x03(\v2\x14.google.protobuf.AnyR\vannotations\x1a\x88\x02\n" + + "\fSyncFullTask\x126\n" + + "\vannotations\x18\x01 \x03(\v2\x14.google.protobuf.AnyR\vannotations\x12,\n" + + "\x12skip_expand_grants\x18\x02 \x01(\bR\x10skipExpandGrants\x12?\n" + + "\x1cskip_entitlements_and_grants\x18\x03 \x01(\bR\x19skipEntitlementsAndGrants\x12Q\n" + + "\x17targeted_sync_resources\x18\x04 \x03(\v2\x19.c1.connector.v2.ResourceR\x15targetedSyncResources\x1a~\n" + + "\rEventFeedTask\x126\n" + + "\vannotations\x18\x01 \x03(\v2\x14.google.protobuf.AnyR\vannotations\x125\n" + + "\bstart_at\x18\x02 \x01(\v2\x1a.google.protobuf.TimestampR\astartAt\x1a\xf3\x01\n" + + "\tGrantTask\x12>\n" + + "\ventitlement\x18\x01 \x01(\v2\x1c.c1.connector.v2.EntitlementR\ventitlement\x127\n" + + "\tprincipal\x18\x02 \x01(\v2\x19.c1.connector.v2.ResourceR\tprincipal\x126\n" + + "\vannotations\x18\x03 \x03(\v2\x14.google.protobuf.AnyR\vannotations\x125\n" + + "\bduration\x18\x04 \x01(\v2\x19.google.protobuf.DurationR\bduration\x1ar\n" + + "\n" + + "RevokeTask\x12,\n" + + "\x05grant\x18\x01 \x01(\v2\x16.c1.connector.v2.GrantR\x05grant\x126\n" + + "\vannotations\x18\x02 \x03(\v2\x14.google.protobuf.AnyR\vannotations\x1a\xf9\x01\n" + + "\x11CreateAccountTask\x12?\n" + + "\faccount_info\x18\x01 \x01(\v2\x1c.c1.connector.v2.AccountInfoR\vaccountInfo\x12Q\n" + + "\x12credential_options\x18\x02 \x01(\v2\".c1.connector.v2.CredentialOptionsR\x11credentialOptions\x12P\n" + + "\x12encryption_configs\x18\x03 \x03(\v2!.c1.connector.v2.EncryptionConfigR\x11encryptionConfigs\x1aK\n" + + "\x12CreateResourceTask\x125\n" + + "\bresource\x18\x01 \x01(\v2\x19.c1.connector.v2.ResourceR\bresource\x1a\x9d\x01\n" + + "\x12DeleteResourceTask\x12<\n" + + "\vresource_id\x18\x01 \x01(\v2\x1b.c1.connector.v2.ResourceIdR\n" + + "resourceId\x12I\n" + + "\x12parent_resource_id\x18\x02 \x01(\v2\x1b.c1.connector.v2.ResourceIdR\x10parentResourceId\x1a\xfa\x01\n" + + "\x15RotateCredentialsTask\x12<\n" + + "\vresource_id\x18\x01 \x01(\v2\x1b.c1.connector.v2.ResourceIdR\n" + + "resourceId\x12Q\n" + + "\x12credential_options\x18\x02 \x01(\v2\".c1.connector.v2.CredentialOptionsR\x11credentialOptions\x12P\n" + + "\x12encryption_configs\x18\x03 \x03(\v2!.c1.connector.v2.EncryptionConfigR\x11encryptionConfigs\x1a\xd5\x01\n" + + "\x10CreateTicketTask\x12E\n" + + "\x0eticket_request\x18\x01 \x01(\v2\x1e.c1.connector.v2.TicketRequestR\rticketRequest\x12B\n" + + "\rticket_schema\x18\x02 \x01(\v2\x1d.c1.connector.v2.TicketSchemaR\fticketSchema\x126\n" + + "\vannotations\x18\x03 \x03(\v2\x14.google.protobuf.AnyR\vannotations\x1aq\n" + + "\x15BulkCreateTicketsTask\x12X\n" + + "\x0fticket_requests\x18\x01 \x03(\v2/.c1.connectorapi.baton.v1.Task.CreateTicketTaskR\x0eticketRequests\x1ak\n" + + "\x12BulkGetTicketsTask\x12U\n" + + "\x0fticket_requests\x18\x01 \x03(\v2,.c1.connectorapi.baton.v1.Task.GetTicketTaskR\x0eticketRequests\x1aO\n" + + "\x15ListTicketSchemasTask\x126\n" + + "\vannotations\x18\x01 \x03(\v2\x14.google.protobuf.AnyR\vannotations\x1ad\n" + + "\rGetTicketTask\x12\x1b\n" + + "\tticket_id\x18\x01 \x01(\tR\bticketId\x126\n" + + "\vannotations\x18\x02 \x03(\v2\x14.google.protobuf.AnyR\vannotations\x1ay\n" + + "\x15ActionListSchemasTask\x126\n" + + "\vannotations\x18\x01 \x03(\v2\x14.google.protobuf.AnyR\vannotations\x12(\n" + + "\x10resource_type_id\x18\x02 \x01(\tR\x0eresourceTypeId\x1aa\n" + + "\x13ActionGetSchemaTask\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\x126\n" + + "\vannotations\x18\x02 \x03(\v2\x14.google.protobuf.AnyR\vannotations\x1a\xb5\x01\n" + + "\x10ActionInvokeTask\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\x12+\n" + + "\x04args\x18\x02 \x01(\v2\x17.google.protobuf.StructR\x04args\x126\n" + + "\vannotations\x18\x03 \x03(\v2\x14.google.protobuf.AnyR\vannotations\x12(\n" + + "\x10resource_type_id\x18\x04 \x01(\tR\x0eresourceTypeId\x1an\n" + + "\x10ActionStatusTask\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\x12\x0e\n" + + "\x02id\x18\x02 \x01(\tR\x02id\x126\n" + + "\vannotations\x18\x03 \x03(\v2\x14.google.protobuf.AnyR\vannotations\x1a\x8e\x01\n" + + "\x12CreateSyncDiffTask\x12 \n" + + "\fbase_sync_id\x18\x01 \x01(\tR\n" + + "baseSyncId\x12\x1e\n" + + "\vnew_sync_id\x18\x02 \x01(\tR\tnewSyncId\x126\n" + + "\vannotations\x18\x03 \x03(\v2\x14.google.protobuf.AnyR\vannotations\x1a\xf9\x01\n" + + "\fCompactSyncs\x12h\n" + + "\x11compactable_syncs\x18\x01 \x03(\v2;.c1.connectorapi.baton.v1.Task.CompactSyncs.CompactableSyncR\x10compactableSyncs\x126\n" + + "\vannotations\x18\x02 \x03(\v2\x14.google.protobuf.AnyR\vannotations\x1aG\n" + + "\x0fCompactableSync\x12\x1b\n" + + "\tfile_path\x18\x01 \x01(\tR\bfilePath\x12\x17\n" + + "\async_id\x18\x02 \x01(\tR\x06syncId\"s\n" + + "\x06Status\x12\x16\n" + + "\x12STATUS_UNSPECIFIED\x10\x00\x12\x12\n" + + "\x0eSTATUS_PENDING\x10\x01\x12\x14\n" + + "\x10STATUS_SCHEDULED\x10\x02\x12\x12\n" + + "\x0eSTATUS_RUNNING\x10\x03\x12\x13\n" + + "\x0fSTATUS_FINISHED\x10\x04B\v\n" + + "\ttask_type\"\xf3\a\n" + + "\x18BatonServiceHelloRequest\x12#\n" + + "\ahost_id\x18\x01 \x01(\tB\n" + + "\xfaB\ar\x05\x10\x01\x18\x80\x02R\x06hostId\x122\n" + + "\atask_id\x18\x02 \x01(\tB\x19\xfaB\x16r\x142\x12^[a-zA-Z0-9]{27}|$R\x06taskId\x12e\n" + + "\n" + + "build_info\x18\x03 \x01(\v2<.c1.connectorapi.baton.v1.BatonServiceHelloRequest.BuildInfoB\b\xfaB\x05\x8a\x01\x02\x10\x01R\tbuildInfo\x12\\\n" + + "\aos_info\x18\x04 \x01(\v29.c1.connectorapi.baton.v1.BatonServiceHelloRequest.OSInfoB\b\xfaB\x05\x8a\x01\x02\x10\x01R\x06osInfo\x12[\n" + + "\x12connector_metadata\x18\x05 \x01(\v2\".c1.connector.v2.ConnectorMetadataB\b\xfaB\x05\x8a\x01\x02\x10\x01R\x11connectorMetadata\x12@\n" + + "\vannotations\x18\x06 \x03(\v2\x14.google.protobuf.AnyB\b\xfaB\x05\x92\x01\x02\x10\x10R\vannotations\x1a\x95\x01\n" + + "\tBuildInfo\x12-\n" + + "\flang_version\x18\x01 \x01(\tB\n" + + "\xfaB\ar\x05\x10\x01\x18\x80\x02R\vlangVersion\x12$\n" + + "\apackage\x18\x02 \x01(\tB\n" + + "\xfaB\ar\x05\x10\x01\x18\x80\x02R\apackage\x123\n" + + "\x0fpackage_version\x18\x03 \x01(\tB\n" + + "\xfaB\ar\x05\x10\x01\x18\x80\x02R\x0epackageVersion\x1a\x81\x03\n" + + "\x06OSInfo\x12&\n" + + "\bhostname\x18\x01 \x01(\tB\n" + + "\xfaB\ar\x05\x10\x01\x18\x80\x02R\bhostname\x12\x1a\n" + + "\x02os\x18\x02 \x01(\tB\n" + + "\xfaB\ar\x05\x10\x01\x18\x80\x02R\x02os\x12&\n" + + "\bplatform\x18\x03 \x01(\tB\n" + + "\xfaB\ar\x05\x10\x01\x18\x80\x02R\bplatform\x125\n" + + "\x10platform_version\x18\x04 \x01(\tB\n" + + "\xfaB\ar\x05\x10\x01\x18\x80\x02R\x0fplatformVersion\x123\n" + + "\x0fplatform_family\x18\x05 \x01(\tB\n" + + "\xfaB\ar\x05\x10\x01\x18\x80\x02R\x0eplatformFamily\x121\n" + + "\x0ekernel_version\x18\x06 \x01(\tB\n" + + "\xfaB\ar\x05\x10\x01\x18\x80\x02R\rkernelVersion\x12+\n" + + "\vkernel_arch\x18\a \x01(\tB\n" + + "\xfaB\ar\x05\x10\x01\x18\x80\x02R\n" + + "kernelArch\x12?\n" + + "\x15virtualization_system\x18\b \x01(\tB\n" + + "\xfaB\ar\x05\x10\x01\x18\x80\x02R\x14virtualizationSystem\"S\n" + + "\x19BatonServiceHelloResponse\x126\n" + + "\vannotations\x18\x01 \x03(\v2\x14.google.protobuf.AnyR\vannotations\"A\n" + + "\x1aBatonServiceGetTaskRequest\x12#\n" + + "\ahost_id\x18\x01 \x01(\tB\n" + + "\xfaB\ar\x05\x10\x01\x18\x80\x02R\x06hostId\"\x83\x02\n" + + "\x1bBatonServiceGetTaskResponse\x122\n" + + "\x04task\x18\x01 \x01(\v2\x1e.c1.connectorapi.baton.v1.TaskR\x04task\x126\n" + + "\tnext_poll\x18\x02 \x01(\v2\x19.google.protobuf.DurationR\bnextPoll\x12@\n" + + "\x0enext_heartbeat\x18\x03 \x01(\v2\x19.google.protobuf.DurationR\rnextHeartbeat\x126\n" + + "\vannotations\x18\x04 \x03(\v2\x14.google.protobuf.AnyR\vannotations\"\xb8\x01\n" + + "\x1cBatonServiceHeartbeatRequest\x12#\n" + + "\ahost_id\x18\x01 \x01(\tB\n" + + "\xfaB\ar\x05\x10\x01\x18\x80\x02R\x06hostId\x121\n" + + "\atask_id\x18\x02 \x01(\tB\x18\xfaB\x15r\x132\x11^[a-zA-Z0-9]{27}$R\x06taskId\x12@\n" + + "\vannotations\x18\x03 \x03(\v2\x14.google.protobuf.AnyB\b\xfaB\x05\x92\x01\x02\x10\x10R\vannotations\"\xb7\x01\n" + + "\x1dBatonServiceHeartbeatResponse\x12@\n" + + "\x0enext_heartbeat\x18\x01 \x01(\v2\x19.google.protobuf.DurationR\rnextHeartbeat\x12\x1c\n" + + "\tcancelled\x18\x02 \x01(\bR\tcancelled\x126\n" + + "\vannotations\x18\x03 \x03(\v2\x14.google.protobuf.AnyR\vannotations\"\xa3\x05\n" + + "\x1eBatonServiceUploadAssetRequest\x12e\n" + + "\bmetadata\x18d \x01(\v2G.c1.connectorapi.baton.v1.BatonServiceUploadAssetRequest.UploadMetadataH\x00R\bmetadata\x12Y\n" + + "\x04data\x18e \x01(\v2C.c1.connectorapi.baton.v1.BatonServiceUploadAssetRequest.UploadDataH\x00R\x04data\x12V\n" + + "\x03eof\x18f \x01(\v2B.c1.connectorapi.baton.v1.BatonServiceUploadAssetRequest.UploadEOFH\x00R\x03eof\x1a\xaa\x01\n" + + "\x0eUploadMetadata\x12#\n" + + "\ahost_id\x18\x01 \x01(\tB\n" + + "\xfaB\ar\x05\x10\x01\x18\x80\x02R\x06hostId\x121\n" + + "\atask_id\x18\x02 \x01(\tB\x18\xfaB\x15r\x132\x11^[a-zA-Z0-9]{27}$R\x06taskId\x12@\n" + + "\vannotations\x18\x04 \x03(\v2\x14.google.protobuf.AnyB\b\xfaB\x05\x92\x01\x02\x10\x10R\vannotations\x1a-\n" + + "\n" + + "UploadData\x12\x1f\n" + + "\x04data\x18\x01 \x01(\fB\v\xfaB\bz\x06\x10\x01\x18\x80\x80@R\x04data\x1a\x7f\n" + + "\tUploadEOF\x120\n" + + "\x0fsha256_checksum\x18\x01 \x01(\fB\a\xfaB\x04z\x02h R\x0esha256Checksum\x12@\n" + + "\vannotations\x18\x02 \x03(\v2\x14.google.protobuf.AnyB\b\xfaB\x05\x92\x01\x02\x10\x10R\vannotationsB\n" + + "\n" + + "\x03msg\x12\x03\xf8B\x01\"Y\n" + + "\x1fBatonServiceUploadAssetResponse\x126\n" + + "\vannotations\x18\x01 \x03(\v2\x14.google.protobuf.AnyR\vannotations\"\x8d\x05\n" + + "\x1dBatonServiceFinishTaskRequest\x12#\n" + + "\ahost_id\x18\x01 \x01(\tB\n" + + "\xfaB\ar\x05\x10\x01\x18\x80\x02R\x06hostId\x121\n" + + "\atask_id\x18\x02 \x01(\tB\x18\xfaB\x15r\x132\x11^[a-zA-Z0-9]{27}$R\x06taskId\x12*\n" + + "\x06status\x18\x03 \x01(\v2\x12.google.rpc.StatusR\x06status\x12U\n" + + "\x05error\x18d \x01(\v2=.c1.connectorapi.baton.v1.BatonServiceFinishTaskRequest.ErrorH\x00R\x05error\x12[\n" + + "\asuccess\x18e \x01(\v2?.c1.connectorapi.baton.v1.BatonServiceFinishTaskRequest.SuccessH\x00R\asuccess\x1a\xa0\x01\n" + + "\x05Error\x12#\n" + + "\rnon_retryable\x18\x01 \x01(\bR\fnonRetryable\x12@\n" + + "\vannotations\x18\x02 \x03(\v2\x14.google.protobuf.AnyB\b\xfaB\x05\x92\x01\x02\x10\x10R\vannotations\x120\n" + + "\bresponse\x18\x03 \x01(\v2\x14.google.protobuf.AnyR\bresponse\x1a}\n" + + "\aSuccess\x12@\n" + + "\vannotations\x18\x01 \x03(\v2\x14.google.protobuf.AnyB\b\xfaB\x05\x92\x01\x02\x10\x10R\vannotations\x120\n" + + "\bresponse\x18\x02 \x01(\v2\x14.google.protobuf.AnyR\bresponseB\x12\n" + + "\vfinal_state\x12\x03\xf8B\x01\"X\n" + + "\x1eBatonServiceFinishTaskResponse\x126\n" + + "\vannotations\x18\x01 \x03(\v2\x14.google.protobuf.AnyR\vannotations\"\x17\n" + + "\x15StartDebuggingRequest\"0\n" + + "\x16StartDebuggingResponse\x12\x16\n" + + "\x06status\x18\x01 \x01(\bR\x06status2\x80\x06\n" + + "\fBatonService\x12r\n" + + "\x05Hello\x122.c1.connectorapi.baton.v1.BatonServiceHelloRequest\x1a3.c1.connectorapi.baton.v1.BatonServiceHelloResponse\"\x00\x12x\n" + + "\aGetTask\x124.c1.connectorapi.baton.v1.BatonServiceGetTaskRequest\x1a5.c1.connectorapi.baton.v1.BatonServiceGetTaskResponse\"\x00\x12~\n" + + "\tHeartbeat\x126.c1.connectorapi.baton.v1.BatonServiceHeartbeatRequest\x1a7.c1.connectorapi.baton.v1.BatonServiceHeartbeatResponse\"\x00\x12\x81\x01\n" + + "\n" + + "FinishTask\x127.c1.connectorapi.baton.v1.BatonServiceFinishTaskRequest\x1a8.c1.connectorapi.baton.v1.BatonServiceFinishTaskResponse\"\x00\x12\x86\x01\n" + + "\vUploadAsset\x128.c1.connectorapi.baton.v1.BatonServiceUploadAssetRequest\x1a9.c1.connectorapi.baton.v1.BatonServiceUploadAssetResponse\"\x00(\x01\x12u\n" + + "\x0eStartDebugging\x12/.c1.connectorapi.baton.v1.StartDebuggingRequest\x1a0.c1.connectorapi.baton.v1.StartDebuggingResponse\"\x00B7Z5gitlab.com/ductone/c1/pkg/pb/c1/connectorapi/baton/v1b\x06proto3" + var file_c1_connectorapi_baton_v1_baton_proto_enumTypes = make([]protoimpl.EnumInfo, 1) var file_c1_connectorapi_baton_v1_baton_proto_msgTypes = make([]protoimpl.MessageInfo, 42) var file_c1_connectorapi_baton_v1_baton_proto_goTypes = []any{ @@ -3492,9 +5133,9 @@ var file_c1_connectorapi_baton_v1_baton_proto_goTypes = []any{ (*anypb.Any)(nil), // 44: google.protobuf.Any (*durationpb.Duration)(nil), // 45: google.protobuf.Duration (*status.Status)(nil), // 46: google.rpc.Status - (*timestamppb.Timestamp)(nil), // 47: google.protobuf.Timestamp - (*v2.Entitlement)(nil), // 48: c1.connector.v2.Entitlement - (*v2.Resource)(nil), // 49: c1.connector.v2.Resource + (*v2.Resource)(nil), // 47: c1.connector.v2.Resource + (*timestamppb.Timestamp)(nil), // 48: google.protobuf.Timestamp + (*v2.Entitlement)(nil), // 49: c1.connector.v2.Entitlement (*v2.Grant)(nil), // 50: c1.connector.v2.Grant (*v2.AccountInfo)(nil), // 51: c1.connector.v2.AccountInfo (*v2.CredentialOptions)(nil), // 52: c1.connector.v2.CredentialOptions @@ -3550,61 +5191,62 @@ var file_c1_connectorapi_baton_v1_baton_proto_depIdxs = []int32{ 44, // 42: c1.connectorapi.baton.v1.Task.NoneTask.annotations:type_name -> google.protobuf.Any 44, // 43: c1.connectorapi.baton.v1.Task.HelloTask.annotations:type_name -> google.protobuf.Any 44, // 44: c1.connectorapi.baton.v1.Task.SyncFullTask.annotations:type_name -> google.protobuf.Any - 44, // 45: c1.connectorapi.baton.v1.Task.EventFeedTask.annotations:type_name -> google.protobuf.Any - 47, // 46: c1.connectorapi.baton.v1.Task.EventFeedTask.start_at:type_name -> google.protobuf.Timestamp - 48, // 47: c1.connectorapi.baton.v1.Task.GrantTask.entitlement:type_name -> c1.connector.v2.Entitlement - 49, // 48: c1.connectorapi.baton.v1.Task.GrantTask.principal:type_name -> c1.connector.v2.Resource - 44, // 49: c1.connectorapi.baton.v1.Task.GrantTask.annotations:type_name -> google.protobuf.Any - 45, // 50: c1.connectorapi.baton.v1.Task.GrantTask.duration:type_name -> google.protobuf.Duration - 50, // 51: c1.connectorapi.baton.v1.Task.RevokeTask.grant:type_name -> c1.connector.v2.Grant - 44, // 52: c1.connectorapi.baton.v1.Task.RevokeTask.annotations:type_name -> google.protobuf.Any - 51, // 53: c1.connectorapi.baton.v1.Task.CreateAccountTask.account_info:type_name -> c1.connector.v2.AccountInfo - 52, // 54: c1.connectorapi.baton.v1.Task.CreateAccountTask.credential_options:type_name -> c1.connector.v2.CredentialOptions - 53, // 55: c1.connectorapi.baton.v1.Task.CreateAccountTask.encryption_configs:type_name -> c1.connector.v2.EncryptionConfig - 49, // 56: c1.connectorapi.baton.v1.Task.CreateResourceTask.resource:type_name -> c1.connector.v2.Resource - 54, // 57: c1.connectorapi.baton.v1.Task.DeleteResourceTask.resource_id:type_name -> c1.connector.v2.ResourceId - 54, // 58: c1.connectorapi.baton.v1.Task.DeleteResourceTask.parent_resource_id:type_name -> c1.connector.v2.ResourceId - 54, // 59: c1.connectorapi.baton.v1.Task.RotateCredentialsTask.resource_id:type_name -> c1.connector.v2.ResourceId - 52, // 60: c1.connectorapi.baton.v1.Task.RotateCredentialsTask.credential_options:type_name -> c1.connector.v2.CredentialOptions - 53, // 61: c1.connectorapi.baton.v1.Task.RotateCredentialsTask.encryption_configs:type_name -> c1.connector.v2.EncryptionConfig - 55, // 62: c1.connectorapi.baton.v1.Task.CreateTicketTask.ticket_request:type_name -> c1.connector.v2.TicketRequest - 56, // 63: c1.connectorapi.baton.v1.Task.CreateTicketTask.ticket_schema:type_name -> c1.connector.v2.TicketSchema - 44, // 64: c1.connectorapi.baton.v1.Task.CreateTicketTask.annotations:type_name -> google.protobuf.Any - 24, // 65: c1.connectorapi.baton.v1.Task.BulkCreateTicketsTask.ticket_requests:type_name -> c1.connectorapi.baton.v1.Task.CreateTicketTask - 28, // 66: c1.connectorapi.baton.v1.Task.BulkGetTicketsTask.ticket_requests:type_name -> c1.connectorapi.baton.v1.Task.GetTicketTask - 44, // 67: c1.connectorapi.baton.v1.Task.ListTicketSchemasTask.annotations:type_name -> google.protobuf.Any - 44, // 68: c1.connectorapi.baton.v1.Task.GetTicketTask.annotations:type_name -> google.protobuf.Any - 44, // 69: c1.connectorapi.baton.v1.Task.ActionListSchemasTask.annotations:type_name -> google.protobuf.Any - 44, // 70: c1.connectorapi.baton.v1.Task.ActionGetSchemaTask.annotations:type_name -> google.protobuf.Any - 57, // 71: c1.connectorapi.baton.v1.Task.ActionInvokeTask.args:type_name -> google.protobuf.Struct - 44, // 72: c1.connectorapi.baton.v1.Task.ActionInvokeTask.annotations:type_name -> google.protobuf.Any - 44, // 73: c1.connectorapi.baton.v1.Task.ActionStatusTask.annotations:type_name -> google.protobuf.Any - 44, // 74: c1.connectorapi.baton.v1.Task.CreateSyncDiffTask.annotations:type_name -> google.protobuf.Any - 35, // 75: c1.connectorapi.baton.v1.Task.CompactSyncs.compactable_syncs:type_name -> c1.connectorapi.baton.v1.Task.CompactSyncs.CompactableSync - 44, // 76: c1.connectorapi.baton.v1.Task.CompactSyncs.annotations:type_name -> google.protobuf.Any - 44, // 77: c1.connectorapi.baton.v1.BatonServiceUploadAssetRequest.UploadMetadata.annotations:type_name -> google.protobuf.Any - 44, // 78: c1.connectorapi.baton.v1.BatonServiceUploadAssetRequest.UploadEOF.annotations:type_name -> google.protobuf.Any - 44, // 79: c1.connectorapi.baton.v1.BatonServiceFinishTaskRequest.Error.annotations:type_name -> google.protobuf.Any - 44, // 80: c1.connectorapi.baton.v1.BatonServiceFinishTaskRequest.Error.response:type_name -> google.protobuf.Any - 44, // 81: c1.connectorapi.baton.v1.BatonServiceFinishTaskRequest.Success.annotations:type_name -> google.protobuf.Any - 44, // 82: c1.connectorapi.baton.v1.BatonServiceFinishTaskRequest.Success.response:type_name -> google.protobuf.Any - 2, // 83: c1.connectorapi.baton.v1.BatonService.Hello:input_type -> c1.connectorapi.baton.v1.BatonServiceHelloRequest - 4, // 84: c1.connectorapi.baton.v1.BatonService.GetTask:input_type -> c1.connectorapi.baton.v1.BatonServiceGetTaskRequest - 6, // 85: c1.connectorapi.baton.v1.BatonService.Heartbeat:input_type -> c1.connectorapi.baton.v1.BatonServiceHeartbeatRequest - 10, // 86: c1.connectorapi.baton.v1.BatonService.FinishTask:input_type -> c1.connectorapi.baton.v1.BatonServiceFinishTaskRequest - 8, // 87: c1.connectorapi.baton.v1.BatonService.UploadAsset:input_type -> c1.connectorapi.baton.v1.BatonServiceUploadAssetRequest - 12, // 88: c1.connectorapi.baton.v1.BatonService.StartDebugging:input_type -> c1.connectorapi.baton.v1.StartDebuggingRequest - 3, // 89: c1.connectorapi.baton.v1.BatonService.Hello:output_type -> c1.connectorapi.baton.v1.BatonServiceHelloResponse - 5, // 90: c1.connectorapi.baton.v1.BatonService.GetTask:output_type -> c1.connectorapi.baton.v1.BatonServiceGetTaskResponse - 7, // 91: c1.connectorapi.baton.v1.BatonService.Heartbeat:output_type -> c1.connectorapi.baton.v1.BatonServiceHeartbeatResponse - 11, // 92: c1.connectorapi.baton.v1.BatonService.FinishTask:output_type -> c1.connectorapi.baton.v1.BatonServiceFinishTaskResponse - 9, // 93: c1.connectorapi.baton.v1.BatonService.UploadAsset:output_type -> c1.connectorapi.baton.v1.BatonServiceUploadAssetResponse - 13, // 94: c1.connectorapi.baton.v1.BatonService.StartDebugging:output_type -> c1.connectorapi.baton.v1.StartDebuggingResponse - 89, // [89:95] is the sub-list for method output_type - 83, // [83:89] is the sub-list for method input_type - 83, // [83:83] is the sub-list for extension type_name - 83, // [83:83] is the sub-list for extension extendee - 0, // [0:83] is the sub-list for field type_name + 47, // 45: c1.connectorapi.baton.v1.Task.SyncFullTask.targeted_sync_resources:type_name -> c1.connector.v2.Resource + 44, // 46: c1.connectorapi.baton.v1.Task.EventFeedTask.annotations:type_name -> google.protobuf.Any + 48, // 47: c1.connectorapi.baton.v1.Task.EventFeedTask.start_at:type_name -> google.protobuf.Timestamp + 49, // 48: c1.connectorapi.baton.v1.Task.GrantTask.entitlement:type_name -> c1.connector.v2.Entitlement + 47, // 49: c1.connectorapi.baton.v1.Task.GrantTask.principal:type_name -> c1.connector.v2.Resource + 44, // 50: c1.connectorapi.baton.v1.Task.GrantTask.annotations:type_name -> google.protobuf.Any + 45, // 51: c1.connectorapi.baton.v1.Task.GrantTask.duration:type_name -> google.protobuf.Duration + 50, // 52: c1.connectorapi.baton.v1.Task.RevokeTask.grant:type_name -> c1.connector.v2.Grant + 44, // 53: c1.connectorapi.baton.v1.Task.RevokeTask.annotations:type_name -> google.protobuf.Any + 51, // 54: c1.connectorapi.baton.v1.Task.CreateAccountTask.account_info:type_name -> c1.connector.v2.AccountInfo + 52, // 55: c1.connectorapi.baton.v1.Task.CreateAccountTask.credential_options:type_name -> c1.connector.v2.CredentialOptions + 53, // 56: c1.connectorapi.baton.v1.Task.CreateAccountTask.encryption_configs:type_name -> c1.connector.v2.EncryptionConfig + 47, // 57: c1.connectorapi.baton.v1.Task.CreateResourceTask.resource:type_name -> c1.connector.v2.Resource + 54, // 58: c1.connectorapi.baton.v1.Task.DeleteResourceTask.resource_id:type_name -> c1.connector.v2.ResourceId + 54, // 59: c1.connectorapi.baton.v1.Task.DeleteResourceTask.parent_resource_id:type_name -> c1.connector.v2.ResourceId + 54, // 60: c1.connectorapi.baton.v1.Task.RotateCredentialsTask.resource_id:type_name -> c1.connector.v2.ResourceId + 52, // 61: c1.connectorapi.baton.v1.Task.RotateCredentialsTask.credential_options:type_name -> c1.connector.v2.CredentialOptions + 53, // 62: c1.connectorapi.baton.v1.Task.RotateCredentialsTask.encryption_configs:type_name -> c1.connector.v2.EncryptionConfig + 55, // 63: c1.connectorapi.baton.v1.Task.CreateTicketTask.ticket_request:type_name -> c1.connector.v2.TicketRequest + 56, // 64: c1.connectorapi.baton.v1.Task.CreateTicketTask.ticket_schema:type_name -> c1.connector.v2.TicketSchema + 44, // 65: c1.connectorapi.baton.v1.Task.CreateTicketTask.annotations:type_name -> google.protobuf.Any + 24, // 66: c1.connectorapi.baton.v1.Task.BulkCreateTicketsTask.ticket_requests:type_name -> c1.connectorapi.baton.v1.Task.CreateTicketTask + 28, // 67: c1.connectorapi.baton.v1.Task.BulkGetTicketsTask.ticket_requests:type_name -> c1.connectorapi.baton.v1.Task.GetTicketTask + 44, // 68: c1.connectorapi.baton.v1.Task.ListTicketSchemasTask.annotations:type_name -> google.protobuf.Any + 44, // 69: c1.connectorapi.baton.v1.Task.GetTicketTask.annotations:type_name -> google.protobuf.Any + 44, // 70: c1.connectorapi.baton.v1.Task.ActionListSchemasTask.annotations:type_name -> google.protobuf.Any + 44, // 71: c1.connectorapi.baton.v1.Task.ActionGetSchemaTask.annotations:type_name -> google.protobuf.Any + 57, // 72: c1.connectorapi.baton.v1.Task.ActionInvokeTask.args:type_name -> google.protobuf.Struct + 44, // 73: c1.connectorapi.baton.v1.Task.ActionInvokeTask.annotations:type_name -> google.protobuf.Any + 44, // 74: c1.connectorapi.baton.v1.Task.ActionStatusTask.annotations:type_name -> google.protobuf.Any + 44, // 75: c1.connectorapi.baton.v1.Task.CreateSyncDiffTask.annotations:type_name -> google.protobuf.Any + 35, // 76: c1.connectorapi.baton.v1.Task.CompactSyncs.compactable_syncs:type_name -> c1.connectorapi.baton.v1.Task.CompactSyncs.CompactableSync + 44, // 77: c1.connectorapi.baton.v1.Task.CompactSyncs.annotations:type_name -> google.protobuf.Any + 44, // 78: c1.connectorapi.baton.v1.BatonServiceUploadAssetRequest.UploadMetadata.annotations:type_name -> google.protobuf.Any + 44, // 79: c1.connectorapi.baton.v1.BatonServiceUploadAssetRequest.UploadEOF.annotations:type_name -> google.protobuf.Any + 44, // 80: c1.connectorapi.baton.v1.BatonServiceFinishTaskRequest.Error.annotations:type_name -> google.protobuf.Any + 44, // 81: c1.connectorapi.baton.v1.BatonServiceFinishTaskRequest.Error.response:type_name -> google.protobuf.Any + 44, // 82: c1.connectorapi.baton.v1.BatonServiceFinishTaskRequest.Success.annotations:type_name -> google.protobuf.Any + 44, // 83: c1.connectorapi.baton.v1.BatonServiceFinishTaskRequest.Success.response:type_name -> google.protobuf.Any + 2, // 84: c1.connectorapi.baton.v1.BatonService.Hello:input_type -> c1.connectorapi.baton.v1.BatonServiceHelloRequest + 4, // 85: c1.connectorapi.baton.v1.BatonService.GetTask:input_type -> c1.connectorapi.baton.v1.BatonServiceGetTaskRequest + 6, // 86: c1.connectorapi.baton.v1.BatonService.Heartbeat:input_type -> c1.connectorapi.baton.v1.BatonServiceHeartbeatRequest + 10, // 87: c1.connectorapi.baton.v1.BatonService.FinishTask:input_type -> c1.connectorapi.baton.v1.BatonServiceFinishTaskRequest + 8, // 88: c1.connectorapi.baton.v1.BatonService.UploadAsset:input_type -> c1.connectorapi.baton.v1.BatonServiceUploadAssetRequest + 12, // 89: c1.connectorapi.baton.v1.BatonService.StartDebugging:input_type -> c1.connectorapi.baton.v1.StartDebuggingRequest + 3, // 90: c1.connectorapi.baton.v1.BatonService.Hello:output_type -> c1.connectorapi.baton.v1.BatonServiceHelloResponse + 5, // 91: c1.connectorapi.baton.v1.BatonService.GetTask:output_type -> c1.connectorapi.baton.v1.BatonServiceGetTaskResponse + 7, // 92: c1.connectorapi.baton.v1.BatonService.Heartbeat:output_type -> c1.connectorapi.baton.v1.BatonServiceHeartbeatResponse + 11, // 93: c1.connectorapi.baton.v1.BatonService.FinishTask:output_type -> c1.connectorapi.baton.v1.BatonServiceFinishTaskResponse + 9, // 94: c1.connectorapi.baton.v1.BatonService.UploadAsset:output_type -> c1.connectorapi.baton.v1.BatonServiceUploadAssetResponse + 13, // 95: c1.connectorapi.baton.v1.BatonService.StartDebugging:output_type -> c1.connectorapi.baton.v1.StartDebuggingResponse + 90, // [90:96] is the sub-list for method output_type + 84, // [84:90] is the sub-list for method input_type + 84, // [84:84] is the sub-list for extension type_name + 84, // [84:84] is the sub-list for extension extendee + 0, // [0:84] is the sub-list for field type_name } func init() { file_c1_connectorapi_baton_v1_baton_proto_init() } diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/connectorapi/baton/v1/baton.pb.validate.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/connectorapi/baton/v1/baton.pb.validate.go index bf4b6afe..937bae11 100644 --- a/vendor/github.com/conductorone/baton-sdk/pb/c1/connectorapi/baton/v1/baton.pb.validate.go +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/connectorapi/baton/v1/baton.pb.validate.go @@ -3418,6 +3418,40 @@ func (m *Task_SyncFullTask) validate(all bool) error { // no validation rules for SkipEntitlementsAndGrants + for idx, item := range m.GetTargetedSyncResources() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Task_SyncFullTaskValidationError{ + field: fmt.Sprintf("TargetedSyncResources[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Task_SyncFullTaskValidationError{ + field: fmt.Sprintf("TargetedSyncResources[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Task_SyncFullTaskValidationError{ + field: fmt.Sprintf("TargetedSyncResources[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + if len(errors) > 0 { return Task_SyncFullTaskMultiError(errors) } @@ -5522,6 +5556,8 @@ func (m *Task_ActionListSchemasTask) validate(all bool) error { } + // no validation rules for ResourceTypeId + if len(errors) > 0 { return Task_ActionListSchemasTaskMultiError(errors) } @@ -5827,6 +5863,8 @@ func (m *Task_ActionInvokeTask) validate(all bool) error { } + // no validation rules for ResourceTypeId + if len(errors) > 0 { return Task_ActionInvokeTaskMultiError(errors) } diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/connectorapi/baton/v1/baton_protoopaque.pb.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/connectorapi/baton/v1/baton_protoopaque.pb.go new file mode 100644 index 00000000..bdf204f7 --- /dev/null +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/connectorapi/baton/v1/baton_protoopaque.pb.go @@ -0,0 +1,5312 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.10 +// protoc (unknown) +// source: c1/connectorapi/baton/v1/baton.proto + +//go:build protoopaque + +package v1 + +import ( + v2 "github.com/conductorone/baton-sdk/pb/c1/connector/v2" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + status "google.golang.org/genproto/googleapis/rpc/status" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + durationpb "google.golang.org/protobuf/types/known/durationpb" + structpb "google.golang.org/protobuf/types/known/structpb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type Task_Status int32 + +const ( + Task_STATUS_UNSPECIFIED Task_Status = 0 + Task_STATUS_PENDING Task_Status = 1 + Task_STATUS_SCHEDULED Task_Status = 2 + Task_STATUS_RUNNING Task_Status = 3 + Task_STATUS_FINISHED Task_Status = 4 +) + +// Enum value maps for Task_Status. +var ( + Task_Status_name = map[int32]string{ + 0: "STATUS_UNSPECIFIED", + 1: "STATUS_PENDING", + 2: "STATUS_SCHEDULED", + 3: "STATUS_RUNNING", + 4: "STATUS_FINISHED", + } + Task_Status_value = map[string]int32{ + "STATUS_UNSPECIFIED": 0, + "STATUS_PENDING": 1, + "STATUS_SCHEDULED": 2, + "STATUS_RUNNING": 3, + "STATUS_FINISHED": 4, + } +) + +func (x Task_Status) Enum() *Task_Status { + p := new(Task_Status) + *p = x + return p +} + +func (x Task_Status) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Task_Status) Descriptor() protoreflect.EnumDescriptor { + return file_c1_connectorapi_baton_v1_baton_proto_enumTypes[0].Descriptor() +} + +func (Task_Status) Type() protoreflect.EnumType { + return &file_c1_connectorapi_baton_v1_baton_proto_enumTypes[0] +} + +func (x Task_Status) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +type Task struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Id string `protobuf:"bytes,1,opt,name=id,proto3"` + xxx_hidden_Status Task_Status `protobuf:"varint,2,opt,name=status,proto3,enum=c1.connectorapi.baton.v1.Task_Status"` + xxx_hidden_TaskType isTask_TaskType `protobuf_oneof:"task_type"` + xxx_hidden_Debug bool `protobuf:"varint,3,opt,name=debug,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Task) Reset() { + *x = Task{} + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Task) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Task) ProtoMessage() {} + +func (x *Task) ProtoReflect() protoreflect.Message { + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *Task) GetId() string { + if x != nil { + return x.xxx_hidden_Id + } + return "" +} + +func (x *Task) GetStatus() Task_Status { + if x != nil { + return x.xxx_hidden_Status + } + return Task_STATUS_UNSPECIFIED +} + +func (x *Task) GetNone() *Task_NoneTask { + if x != nil { + if x, ok := x.xxx_hidden_TaskType.(*task_None); ok { + return x.None + } + } + return nil +} + +func (x *Task) GetHello() *Task_HelloTask { + if x != nil { + if x, ok := x.xxx_hidden_TaskType.(*task_Hello); ok { + return x.Hello + } + } + return nil +} + +func (x *Task) GetSyncFull() *Task_SyncFullTask { + if x != nil { + if x, ok := x.xxx_hidden_TaskType.(*task_SyncFull); ok { + return x.SyncFull + } + } + return nil +} + +func (x *Task) GetGrant() *Task_GrantTask { + if x != nil { + if x, ok := x.xxx_hidden_TaskType.(*task_Grant); ok { + return x.Grant + } + } + return nil +} + +func (x *Task) GetRevoke() *Task_RevokeTask { + if x != nil { + if x, ok := x.xxx_hidden_TaskType.(*task_Revoke); ok { + return x.Revoke + } + } + return nil +} + +func (x *Task) GetCreateAccount() *Task_CreateAccountTask { + if x != nil { + if x, ok := x.xxx_hidden_TaskType.(*task_CreateAccount); ok { + return x.CreateAccount + } + } + return nil +} + +func (x *Task) GetCreateResource() *Task_CreateResourceTask { + if x != nil { + if x, ok := x.xxx_hidden_TaskType.(*task_CreateResource); ok { + return x.CreateResource + } + } + return nil +} + +func (x *Task) GetDeleteResource() *Task_DeleteResourceTask { + if x != nil { + if x, ok := x.xxx_hidden_TaskType.(*task_DeleteResource); ok { + return x.DeleteResource + } + } + return nil +} + +func (x *Task) GetRotateCredentials() *Task_RotateCredentialsTask { + if x != nil { + if x, ok := x.xxx_hidden_TaskType.(*task_RotateCredentials); ok { + return x.RotateCredentials + } + } + return nil +} + +func (x *Task) GetEventFeed() *Task_EventFeedTask { + if x != nil { + if x, ok := x.xxx_hidden_TaskType.(*task_EventFeed); ok { + return x.EventFeed + } + } + return nil +} + +func (x *Task) GetCreateTicketTask() *Task_CreateTicketTask { + if x != nil { + if x, ok := x.xxx_hidden_TaskType.(*task_CreateTicketTask_); ok { + return x.CreateTicketTask + } + } + return nil +} + +func (x *Task) GetListTicketSchemas() *Task_ListTicketSchemasTask { + if x != nil { + if x, ok := x.xxx_hidden_TaskType.(*task_ListTicketSchemas); ok { + return x.ListTicketSchemas + } + } + return nil +} + +func (x *Task) GetGetTicket() *Task_GetTicketTask { + if x != nil { + if x, ok := x.xxx_hidden_TaskType.(*task_GetTicket); ok { + return x.GetTicket + } + } + return nil +} + +func (x *Task) GetBulkCreateTickets() *Task_BulkCreateTicketsTask { + if x != nil { + if x, ok := x.xxx_hidden_TaskType.(*task_BulkCreateTickets); ok { + return x.BulkCreateTickets + } + } + return nil +} + +func (x *Task) GetBulkGetTickets() *Task_BulkGetTicketsTask { + if x != nil { + if x, ok := x.xxx_hidden_TaskType.(*task_BulkGetTickets); ok { + return x.BulkGetTickets + } + } + return nil +} + +func (x *Task) GetActionListSchemas() *Task_ActionListSchemasTask { + if x != nil { + if x, ok := x.xxx_hidden_TaskType.(*task_ActionListSchemas); ok { + return x.ActionListSchemas + } + } + return nil +} + +func (x *Task) GetActionGetSchema() *Task_ActionGetSchemaTask { + if x != nil { + if x, ok := x.xxx_hidden_TaskType.(*task_ActionGetSchema); ok { + return x.ActionGetSchema + } + } + return nil +} + +func (x *Task) GetActionInvoke() *Task_ActionInvokeTask { + if x != nil { + if x, ok := x.xxx_hidden_TaskType.(*task_ActionInvoke); ok { + return x.ActionInvoke + } + } + return nil +} + +func (x *Task) GetActionStatus() *Task_ActionStatusTask { + if x != nil { + if x, ok := x.xxx_hidden_TaskType.(*task_ActionStatus); ok { + return x.ActionStatus + } + } + return nil +} + +func (x *Task) GetCreateSyncDiff() *Task_CreateSyncDiffTask { + if x != nil { + if x, ok := x.xxx_hidden_TaskType.(*task_CreateSyncDiff); ok { + return x.CreateSyncDiff + } + } + return nil +} + +func (x *Task) GetCompactSyncs() *Task_CompactSyncs { + if x != nil { + if x, ok := x.xxx_hidden_TaskType.(*task_CompactSyncs_); ok { + return x.CompactSyncs + } + } + return nil +} + +func (x *Task) GetDebug() bool { + if x != nil { + return x.xxx_hidden_Debug + } + return false +} + +func (x *Task) SetId(v string) { + x.xxx_hidden_Id = v +} + +func (x *Task) SetStatus(v Task_Status) { + x.xxx_hidden_Status = v +} + +func (x *Task) SetNone(v *Task_NoneTask) { + if v == nil { + x.xxx_hidden_TaskType = nil + return + } + x.xxx_hidden_TaskType = &task_None{v} +} + +func (x *Task) SetHello(v *Task_HelloTask) { + if v == nil { + x.xxx_hidden_TaskType = nil + return + } + x.xxx_hidden_TaskType = &task_Hello{v} +} + +func (x *Task) SetSyncFull(v *Task_SyncFullTask) { + if v == nil { + x.xxx_hidden_TaskType = nil + return + } + x.xxx_hidden_TaskType = &task_SyncFull{v} +} + +func (x *Task) SetGrant(v *Task_GrantTask) { + if v == nil { + x.xxx_hidden_TaskType = nil + return + } + x.xxx_hidden_TaskType = &task_Grant{v} +} + +func (x *Task) SetRevoke(v *Task_RevokeTask) { + if v == nil { + x.xxx_hidden_TaskType = nil + return + } + x.xxx_hidden_TaskType = &task_Revoke{v} +} + +func (x *Task) SetCreateAccount(v *Task_CreateAccountTask) { + if v == nil { + x.xxx_hidden_TaskType = nil + return + } + x.xxx_hidden_TaskType = &task_CreateAccount{v} +} + +func (x *Task) SetCreateResource(v *Task_CreateResourceTask) { + if v == nil { + x.xxx_hidden_TaskType = nil + return + } + x.xxx_hidden_TaskType = &task_CreateResource{v} +} + +func (x *Task) SetDeleteResource(v *Task_DeleteResourceTask) { + if v == nil { + x.xxx_hidden_TaskType = nil + return + } + x.xxx_hidden_TaskType = &task_DeleteResource{v} +} + +func (x *Task) SetRotateCredentials(v *Task_RotateCredentialsTask) { + if v == nil { + x.xxx_hidden_TaskType = nil + return + } + x.xxx_hidden_TaskType = &task_RotateCredentials{v} +} + +func (x *Task) SetEventFeed(v *Task_EventFeedTask) { + if v == nil { + x.xxx_hidden_TaskType = nil + return + } + x.xxx_hidden_TaskType = &task_EventFeed{v} +} + +func (x *Task) SetCreateTicketTask(v *Task_CreateTicketTask) { + if v == nil { + x.xxx_hidden_TaskType = nil + return + } + x.xxx_hidden_TaskType = &task_CreateTicketTask_{v} +} + +func (x *Task) SetListTicketSchemas(v *Task_ListTicketSchemasTask) { + if v == nil { + x.xxx_hidden_TaskType = nil + return + } + x.xxx_hidden_TaskType = &task_ListTicketSchemas{v} +} + +func (x *Task) SetGetTicket(v *Task_GetTicketTask) { + if v == nil { + x.xxx_hidden_TaskType = nil + return + } + x.xxx_hidden_TaskType = &task_GetTicket{v} +} + +func (x *Task) SetBulkCreateTickets(v *Task_BulkCreateTicketsTask) { + if v == nil { + x.xxx_hidden_TaskType = nil + return + } + x.xxx_hidden_TaskType = &task_BulkCreateTickets{v} +} + +func (x *Task) SetBulkGetTickets(v *Task_BulkGetTicketsTask) { + if v == nil { + x.xxx_hidden_TaskType = nil + return + } + x.xxx_hidden_TaskType = &task_BulkGetTickets{v} +} + +func (x *Task) SetActionListSchemas(v *Task_ActionListSchemasTask) { + if v == nil { + x.xxx_hidden_TaskType = nil + return + } + x.xxx_hidden_TaskType = &task_ActionListSchemas{v} +} + +func (x *Task) SetActionGetSchema(v *Task_ActionGetSchemaTask) { + if v == nil { + x.xxx_hidden_TaskType = nil + return + } + x.xxx_hidden_TaskType = &task_ActionGetSchema{v} +} + +func (x *Task) SetActionInvoke(v *Task_ActionInvokeTask) { + if v == nil { + x.xxx_hidden_TaskType = nil + return + } + x.xxx_hidden_TaskType = &task_ActionInvoke{v} +} + +func (x *Task) SetActionStatus(v *Task_ActionStatusTask) { + if v == nil { + x.xxx_hidden_TaskType = nil + return + } + x.xxx_hidden_TaskType = &task_ActionStatus{v} +} + +func (x *Task) SetCreateSyncDiff(v *Task_CreateSyncDiffTask) { + if v == nil { + x.xxx_hidden_TaskType = nil + return + } + x.xxx_hidden_TaskType = &task_CreateSyncDiff{v} +} + +func (x *Task) SetCompactSyncs(v *Task_CompactSyncs) { + if v == nil { + x.xxx_hidden_TaskType = nil + return + } + x.xxx_hidden_TaskType = &task_CompactSyncs_{v} +} + +func (x *Task) SetDebug(v bool) { + x.xxx_hidden_Debug = v +} + +func (x *Task) HasTaskType() bool { + if x == nil { + return false + } + return x.xxx_hidden_TaskType != nil +} + +func (x *Task) HasNone() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_TaskType.(*task_None) + return ok +} + +func (x *Task) HasHello() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_TaskType.(*task_Hello) + return ok +} + +func (x *Task) HasSyncFull() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_TaskType.(*task_SyncFull) + return ok +} + +func (x *Task) HasGrant() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_TaskType.(*task_Grant) + return ok +} + +func (x *Task) HasRevoke() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_TaskType.(*task_Revoke) + return ok +} + +func (x *Task) HasCreateAccount() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_TaskType.(*task_CreateAccount) + return ok +} + +func (x *Task) HasCreateResource() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_TaskType.(*task_CreateResource) + return ok +} + +func (x *Task) HasDeleteResource() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_TaskType.(*task_DeleteResource) + return ok +} + +func (x *Task) HasRotateCredentials() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_TaskType.(*task_RotateCredentials) + return ok +} + +func (x *Task) HasEventFeed() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_TaskType.(*task_EventFeed) + return ok +} + +func (x *Task) HasCreateTicketTask() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_TaskType.(*task_CreateTicketTask_) + return ok +} + +func (x *Task) HasListTicketSchemas() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_TaskType.(*task_ListTicketSchemas) + return ok +} + +func (x *Task) HasGetTicket() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_TaskType.(*task_GetTicket) + return ok +} + +func (x *Task) HasBulkCreateTickets() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_TaskType.(*task_BulkCreateTickets) + return ok +} + +func (x *Task) HasBulkGetTickets() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_TaskType.(*task_BulkGetTickets) + return ok +} + +func (x *Task) HasActionListSchemas() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_TaskType.(*task_ActionListSchemas) + return ok +} + +func (x *Task) HasActionGetSchema() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_TaskType.(*task_ActionGetSchema) + return ok +} + +func (x *Task) HasActionInvoke() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_TaskType.(*task_ActionInvoke) + return ok +} + +func (x *Task) HasActionStatus() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_TaskType.(*task_ActionStatus) + return ok +} + +func (x *Task) HasCreateSyncDiff() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_TaskType.(*task_CreateSyncDiff) + return ok +} + +func (x *Task) HasCompactSyncs() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_TaskType.(*task_CompactSyncs_) + return ok +} + +func (x *Task) ClearTaskType() { + x.xxx_hidden_TaskType = nil +} + +func (x *Task) ClearNone() { + if _, ok := x.xxx_hidden_TaskType.(*task_None); ok { + x.xxx_hidden_TaskType = nil + } +} + +func (x *Task) ClearHello() { + if _, ok := x.xxx_hidden_TaskType.(*task_Hello); ok { + x.xxx_hidden_TaskType = nil + } +} + +func (x *Task) ClearSyncFull() { + if _, ok := x.xxx_hidden_TaskType.(*task_SyncFull); ok { + x.xxx_hidden_TaskType = nil + } +} + +func (x *Task) ClearGrant() { + if _, ok := x.xxx_hidden_TaskType.(*task_Grant); ok { + x.xxx_hidden_TaskType = nil + } +} + +func (x *Task) ClearRevoke() { + if _, ok := x.xxx_hidden_TaskType.(*task_Revoke); ok { + x.xxx_hidden_TaskType = nil + } +} + +func (x *Task) ClearCreateAccount() { + if _, ok := x.xxx_hidden_TaskType.(*task_CreateAccount); ok { + x.xxx_hidden_TaskType = nil + } +} + +func (x *Task) ClearCreateResource() { + if _, ok := x.xxx_hidden_TaskType.(*task_CreateResource); ok { + x.xxx_hidden_TaskType = nil + } +} + +func (x *Task) ClearDeleteResource() { + if _, ok := x.xxx_hidden_TaskType.(*task_DeleteResource); ok { + x.xxx_hidden_TaskType = nil + } +} + +func (x *Task) ClearRotateCredentials() { + if _, ok := x.xxx_hidden_TaskType.(*task_RotateCredentials); ok { + x.xxx_hidden_TaskType = nil + } +} + +func (x *Task) ClearEventFeed() { + if _, ok := x.xxx_hidden_TaskType.(*task_EventFeed); ok { + x.xxx_hidden_TaskType = nil + } +} + +func (x *Task) ClearCreateTicketTask() { + if _, ok := x.xxx_hidden_TaskType.(*task_CreateTicketTask_); ok { + x.xxx_hidden_TaskType = nil + } +} + +func (x *Task) ClearListTicketSchemas() { + if _, ok := x.xxx_hidden_TaskType.(*task_ListTicketSchemas); ok { + x.xxx_hidden_TaskType = nil + } +} + +func (x *Task) ClearGetTicket() { + if _, ok := x.xxx_hidden_TaskType.(*task_GetTicket); ok { + x.xxx_hidden_TaskType = nil + } +} + +func (x *Task) ClearBulkCreateTickets() { + if _, ok := x.xxx_hidden_TaskType.(*task_BulkCreateTickets); ok { + x.xxx_hidden_TaskType = nil + } +} + +func (x *Task) ClearBulkGetTickets() { + if _, ok := x.xxx_hidden_TaskType.(*task_BulkGetTickets); ok { + x.xxx_hidden_TaskType = nil + } +} + +func (x *Task) ClearActionListSchemas() { + if _, ok := x.xxx_hidden_TaskType.(*task_ActionListSchemas); ok { + x.xxx_hidden_TaskType = nil + } +} + +func (x *Task) ClearActionGetSchema() { + if _, ok := x.xxx_hidden_TaskType.(*task_ActionGetSchema); ok { + x.xxx_hidden_TaskType = nil + } +} + +func (x *Task) ClearActionInvoke() { + if _, ok := x.xxx_hidden_TaskType.(*task_ActionInvoke); ok { + x.xxx_hidden_TaskType = nil + } +} + +func (x *Task) ClearActionStatus() { + if _, ok := x.xxx_hidden_TaskType.(*task_ActionStatus); ok { + x.xxx_hidden_TaskType = nil + } +} + +func (x *Task) ClearCreateSyncDiff() { + if _, ok := x.xxx_hidden_TaskType.(*task_CreateSyncDiff); ok { + x.xxx_hidden_TaskType = nil + } +} + +func (x *Task) ClearCompactSyncs() { + if _, ok := x.xxx_hidden_TaskType.(*task_CompactSyncs_); ok { + x.xxx_hidden_TaskType = nil + } +} + +const Task_TaskType_not_set_case case_Task_TaskType = 0 +const Task_None_case case_Task_TaskType = 100 +const Task_Hello_case case_Task_TaskType = 101 +const Task_SyncFull_case case_Task_TaskType = 102 +const Task_Grant_case case_Task_TaskType = 103 +const Task_Revoke_case case_Task_TaskType = 104 +const Task_CreateAccount_case case_Task_TaskType = 105 +const Task_CreateResource_case case_Task_TaskType = 106 +const Task_DeleteResource_case case_Task_TaskType = 107 +const Task_RotateCredentials_case case_Task_TaskType = 108 +const Task_EventFeed_case case_Task_TaskType = 109 +const Task_CreateTicketTask_case case_Task_TaskType = 110 +const Task_ListTicketSchemas_case case_Task_TaskType = 111 +const Task_GetTicket_case case_Task_TaskType = 112 +const Task_BulkCreateTickets_case case_Task_TaskType = 113 +const Task_BulkGetTickets_case case_Task_TaskType = 114 +const Task_ActionListSchemas_case case_Task_TaskType = 115 +const Task_ActionGetSchema_case case_Task_TaskType = 116 +const Task_ActionInvoke_case case_Task_TaskType = 117 +const Task_ActionStatus_case case_Task_TaskType = 118 +const Task_CreateSyncDiff_case case_Task_TaskType = 119 +const Task_CompactSyncs_case case_Task_TaskType = 120 + +func (x *Task) WhichTaskType() case_Task_TaskType { + if x == nil { + return Task_TaskType_not_set_case + } + switch x.xxx_hidden_TaskType.(type) { + case *task_None: + return Task_None_case + case *task_Hello: + return Task_Hello_case + case *task_SyncFull: + return Task_SyncFull_case + case *task_Grant: + return Task_Grant_case + case *task_Revoke: + return Task_Revoke_case + case *task_CreateAccount: + return Task_CreateAccount_case + case *task_CreateResource: + return Task_CreateResource_case + case *task_DeleteResource: + return Task_DeleteResource_case + case *task_RotateCredentials: + return Task_RotateCredentials_case + case *task_EventFeed: + return Task_EventFeed_case + case *task_CreateTicketTask_: + return Task_CreateTicketTask_case + case *task_ListTicketSchemas: + return Task_ListTicketSchemas_case + case *task_GetTicket: + return Task_GetTicket_case + case *task_BulkCreateTickets: + return Task_BulkCreateTickets_case + case *task_BulkGetTickets: + return Task_BulkGetTickets_case + case *task_ActionListSchemas: + return Task_ActionListSchemas_case + case *task_ActionGetSchema: + return Task_ActionGetSchema_case + case *task_ActionInvoke: + return Task_ActionInvoke_case + case *task_ActionStatus: + return Task_ActionStatus_case + case *task_CreateSyncDiff: + return Task_CreateSyncDiff_case + case *task_CompactSyncs_: + return Task_CompactSyncs_case + default: + return Task_TaskType_not_set_case + } +} + +type Task_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Id string + Status Task_Status + // Fields of oneof xxx_hidden_TaskType: + None *Task_NoneTask + Hello *Task_HelloTask + SyncFull *Task_SyncFullTask + Grant *Task_GrantTask + Revoke *Task_RevokeTask + CreateAccount *Task_CreateAccountTask + CreateResource *Task_CreateResourceTask + DeleteResource *Task_DeleteResourceTask + RotateCredentials *Task_RotateCredentialsTask + EventFeed *Task_EventFeedTask + CreateTicketTask *Task_CreateTicketTask + ListTicketSchemas *Task_ListTicketSchemasTask + GetTicket *Task_GetTicketTask + BulkCreateTickets *Task_BulkCreateTicketsTask + BulkGetTickets *Task_BulkGetTicketsTask + ActionListSchemas *Task_ActionListSchemasTask + ActionGetSchema *Task_ActionGetSchemaTask + ActionInvoke *Task_ActionInvokeTask + ActionStatus *Task_ActionStatusTask + CreateSyncDiff *Task_CreateSyncDiffTask + CompactSyncs *Task_CompactSyncs + // -- end of xxx_hidden_TaskType + Debug bool +} + +func (b0 Task_builder) Build() *Task { + m0 := &Task{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Id = b.Id + x.xxx_hidden_Status = b.Status + if b.None != nil { + x.xxx_hidden_TaskType = &task_None{b.None} + } + if b.Hello != nil { + x.xxx_hidden_TaskType = &task_Hello{b.Hello} + } + if b.SyncFull != nil { + x.xxx_hidden_TaskType = &task_SyncFull{b.SyncFull} + } + if b.Grant != nil { + x.xxx_hidden_TaskType = &task_Grant{b.Grant} + } + if b.Revoke != nil { + x.xxx_hidden_TaskType = &task_Revoke{b.Revoke} + } + if b.CreateAccount != nil { + x.xxx_hidden_TaskType = &task_CreateAccount{b.CreateAccount} + } + if b.CreateResource != nil { + x.xxx_hidden_TaskType = &task_CreateResource{b.CreateResource} + } + if b.DeleteResource != nil { + x.xxx_hidden_TaskType = &task_DeleteResource{b.DeleteResource} + } + if b.RotateCredentials != nil { + x.xxx_hidden_TaskType = &task_RotateCredentials{b.RotateCredentials} + } + if b.EventFeed != nil { + x.xxx_hidden_TaskType = &task_EventFeed{b.EventFeed} + } + if b.CreateTicketTask != nil { + x.xxx_hidden_TaskType = &task_CreateTicketTask_{b.CreateTicketTask} + } + if b.ListTicketSchemas != nil { + x.xxx_hidden_TaskType = &task_ListTicketSchemas{b.ListTicketSchemas} + } + if b.GetTicket != nil { + x.xxx_hidden_TaskType = &task_GetTicket{b.GetTicket} + } + if b.BulkCreateTickets != nil { + x.xxx_hidden_TaskType = &task_BulkCreateTickets{b.BulkCreateTickets} + } + if b.BulkGetTickets != nil { + x.xxx_hidden_TaskType = &task_BulkGetTickets{b.BulkGetTickets} + } + if b.ActionListSchemas != nil { + x.xxx_hidden_TaskType = &task_ActionListSchemas{b.ActionListSchemas} + } + if b.ActionGetSchema != nil { + x.xxx_hidden_TaskType = &task_ActionGetSchema{b.ActionGetSchema} + } + if b.ActionInvoke != nil { + x.xxx_hidden_TaskType = &task_ActionInvoke{b.ActionInvoke} + } + if b.ActionStatus != nil { + x.xxx_hidden_TaskType = &task_ActionStatus{b.ActionStatus} + } + if b.CreateSyncDiff != nil { + x.xxx_hidden_TaskType = &task_CreateSyncDiff{b.CreateSyncDiff} + } + if b.CompactSyncs != nil { + x.xxx_hidden_TaskType = &task_CompactSyncs_{b.CompactSyncs} + } + x.xxx_hidden_Debug = b.Debug + return m0 +} + +type case_Task_TaskType protoreflect.FieldNumber + +func (x case_Task_TaskType) String() string { + md := file_c1_connectorapi_baton_v1_baton_proto_msgTypes[0].Descriptor() + if x == 0 { + return "not set" + } + return protoimpl.X.MessageFieldStringOf(md, protoreflect.FieldNumber(x)) +} + +type isTask_TaskType interface { + isTask_TaskType() +} + +type task_None struct { + None *Task_NoneTask `protobuf:"bytes,100,opt,name=none,proto3,oneof"` +} + +type task_Hello struct { + Hello *Task_HelloTask `protobuf:"bytes,101,opt,name=hello,proto3,oneof"` +} + +type task_SyncFull struct { + SyncFull *Task_SyncFullTask `protobuf:"bytes,102,opt,name=sync_full,json=syncFull,proto3,oneof"` +} + +type task_Grant struct { + Grant *Task_GrantTask `protobuf:"bytes,103,opt,name=grant,proto3,oneof"` +} + +type task_Revoke struct { + Revoke *Task_RevokeTask `protobuf:"bytes,104,opt,name=revoke,proto3,oneof"` +} + +type task_CreateAccount struct { + CreateAccount *Task_CreateAccountTask `protobuf:"bytes,105,opt,name=create_account,json=createAccount,proto3,oneof"` +} + +type task_CreateResource struct { + CreateResource *Task_CreateResourceTask `protobuf:"bytes,106,opt,name=create_resource,json=createResource,proto3,oneof"` +} + +type task_DeleteResource struct { + DeleteResource *Task_DeleteResourceTask `protobuf:"bytes,107,opt,name=delete_resource,json=deleteResource,proto3,oneof"` +} + +type task_RotateCredentials struct { + RotateCredentials *Task_RotateCredentialsTask `protobuf:"bytes,108,opt,name=rotate_credentials,json=rotateCredentials,proto3,oneof"` +} + +type task_EventFeed struct { + EventFeed *Task_EventFeedTask `protobuf:"bytes,109,opt,name=event_feed,json=eventFeed,proto3,oneof"` +} + +type task_CreateTicketTask_ struct { + CreateTicketTask *Task_CreateTicketTask `protobuf:"bytes,110,opt,name=create_ticket_task,json=createTicketTask,proto3,oneof"` +} + +type task_ListTicketSchemas struct { + ListTicketSchemas *Task_ListTicketSchemasTask `protobuf:"bytes,111,opt,name=list_ticket_schemas,json=listTicketSchemas,proto3,oneof"` +} + +type task_GetTicket struct { + GetTicket *Task_GetTicketTask `protobuf:"bytes,112,opt,name=get_ticket,json=getTicket,proto3,oneof"` +} + +type task_BulkCreateTickets struct { + BulkCreateTickets *Task_BulkCreateTicketsTask `protobuf:"bytes,113,opt,name=bulk_create_tickets,json=bulkCreateTickets,proto3,oneof"` +} + +type task_BulkGetTickets struct { + BulkGetTickets *Task_BulkGetTicketsTask `protobuf:"bytes,114,opt,name=bulk_get_tickets,json=bulkGetTickets,proto3,oneof"` +} + +type task_ActionListSchemas struct { + ActionListSchemas *Task_ActionListSchemasTask `protobuf:"bytes,115,opt,name=action_list_schemas,json=actionListSchemas,proto3,oneof"` +} + +type task_ActionGetSchema struct { + ActionGetSchema *Task_ActionGetSchemaTask `protobuf:"bytes,116,opt,name=action_get_schema,json=actionGetSchema,proto3,oneof"` +} + +type task_ActionInvoke struct { + ActionInvoke *Task_ActionInvokeTask `protobuf:"bytes,117,opt,name=action_invoke,json=actionInvoke,proto3,oneof"` +} + +type task_ActionStatus struct { + ActionStatus *Task_ActionStatusTask `protobuf:"bytes,118,opt,name=action_status,json=actionStatus,proto3,oneof"` +} + +type task_CreateSyncDiff struct { + CreateSyncDiff *Task_CreateSyncDiffTask `protobuf:"bytes,119,opt,name=create_sync_diff,json=createSyncDiff,proto3,oneof"` +} + +type task_CompactSyncs_ struct { + CompactSyncs *Task_CompactSyncs `protobuf:"bytes,120,opt,name=compact_syncs,json=compactSyncs,proto3,oneof"` +} + +func (*task_None) isTask_TaskType() {} + +func (*task_Hello) isTask_TaskType() {} + +func (*task_SyncFull) isTask_TaskType() {} + +func (*task_Grant) isTask_TaskType() {} + +func (*task_Revoke) isTask_TaskType() {} + +func (*task_CreateAccount) isTask_TaskType() {} + +func (*task_CreateResource) isTask_TaskType() {} + +func (*task_DeleteResource) isTask_TaskType() {} + +func (*task_RotateCredentials) isTask_TaskType() {} + +func (*task_EventFeed) isTask_TaskType() {} + +func (*task_CreateTicketTask_) isTask_TaskType() {} + +func (*task_ListTicketSchemas) isTask_TaskType() {} + +func (*task_GetTicket) isTask_TaskType() {} + +func (*task_BulkCreateTickets) isTask_TaskType() {} + +func (*task_BulkGetTickets) isTask_TaskType() {} + +func (*task_ActionListSchemas) isTask_TaskType() {} + +func (*task_ActionGetSchema) isTask_TaskType() {} + +func (*task_ActionInvoke) isTask_TaskType() {} + +func (*task_ActionStatus) isTask_TaskType() {} + +func (*task_CreateSyncDiff) isTask_TaskType() {} + +func (*task_CompactSyncs_) isTask_TaskType() {} + +type BatonServiceHelloRequest struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_HostId string `protobuf:"bytes,1,opt,name=host_id,json=hostId,proto3"` + xxx_hidden_TaskId string `protobuf:"bytes,2,opt,name=task_id,json=taskId,proto3"` + xxx_hidden_BuildInfo *BatonServiceHelloRequest_BuildInfo `protobuf:"bytes,3,opt,name=build_info,json=buildInfo,proto3"` + xxx_hidden_OsInfo *BatonServiceHelloRequest_OSInfo `protobuf:"bytes,4,opt,name=os_info,json=osInfo,proto3"` + xxx_hidden_ConnectorMetadata *v2.ConnectorMetadata `protobuf:"bytes,5,opt,name=connector_metadata,json=connectorMetadata,proto3"` + xxx_hidden_Annotations *[]*anypb.Any `protobuf:"bytes,6,rep,name=annotations,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *BatonServiceHelloRequest) Reset() { + *x = BatonServiceHelloRequest{} + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *BatonServiceHelloRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BatonServiceHelloRequest) ProtoMessage() {} + +func (x *BatonServiceHelloRequest) ProtoReflect() protoreflect.Message { + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *BatonServiceHelloRequest) GetHostId() string { + if x != nil { + return x.xxx_hidden_HostId + } + return "" +} + +func (x *BatonServiceHelloRequest) GetTaskId() string { + if x != nil { + return x.xxx_hidden_TaskId + } + return "" +} + +func (x *BatonServiceHelloRequest) GetBuildInfo() *BatonServiceHelloRequest_BuildInfo { + if x != nil { + return x.xxx_hidden_BuildInfo + } + return nil +} + +func (x *BatonServiceHelloRequest) GetOsInfo() *BatonServiceHelloRequest_OSInfo { + if x != nil { + return x.xxx_hidden_OsInfo + } + return nil +} + +func (x *BatonServiceHelloRequest) GetConnectorMetadata() *v2.ConnectorMetadata { + if x != nil { + return x.xxx_hidden_ConnectorMetadata + } + return nil +} + +func (x *BatonServiceHelloRequest) GetAnnotations() []*anypb.Any { + if x != nil { + if x.xxx_hidden_Annotations != nil { + return *x.xxx_hidden_Annotations + } + } + return nil +} + +func (x *BatonServiceHelloRequest) SetHostId(v string) { + x.xxx_hidden_HostId = v +} + +func (x *BatonServiceHelloRequest) SetTaskId(v string) { + x.xxx_hidden_TaskId = v +} + +func (x *BatonServiceHelloRequest) SetBuildInfo(v *BatonServiceHelloRequest_BuildInfo) { + x.xxx_hidden_BuildInfo = v +} + +func (x *BatonServiceHelloRequest) SetOsInfo(v *BatonServiceHelloRequest_OSInfo) { + x.xxx_hidden_OsInfo = v +} + +func (x *BatonServiceHelloRequest) SetConnectorMetadata(v *v2.ConnectorMetadata) { + x.xxx_hidden_ConnectorMetadata = v +} + +func (x *BatonServiceHelloRequest) SetAnnotations(v []*anypb.Any) { + x.xxx_hidden_Annotations = &v +} + +func (x *BatonServiceHelloRequest) HasBuildInfo() bool { + if x == nil { + return false + } + return x.xxx_hidden_BuildInfo != nil +} + +func (x *BatonServiceHelloRequest) HasOsInfo() bool { + if x == nil { + return false + } + return x.xxx_hidden_OsInfo != nil +} + +func (x *BatonServiceHelloRequest) HasConnectorMetadata() bool { + if x == nil { + return false + } + return x.xxx_hidden_ConnectorMetadata != nil +} + +func (x *BatonServiceHelloRequest) ClearBuildInfo() { + x.xxx_hidden_BuildInfo = nil +} + +func (x *BatonServiceHelloRequest) ClearOsInfo() { + x.xxx_hidden_OsInfo = nil +} + +func (x *BatonServiceHelloRequest) ClearConnectorMetadata() { + x.xxx_hidden_ConnectorMetadata = nil +} + +type BatonServiceHelloRequest_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + HostId string + TaskId string + BuildInfo *BatonServiceHelloRequest_BuildInfo + OsInfo *BatonServiceHelloRequest_OSInfo + ConnectorMetadata *v2.ConnectorMetadata + Annotations []*anypb.Any +} + +func (b0 BatonServiceHelloRequest_builder) Build() *BatonServiceHelloRequest { + m0 := &BatonServiceHelloRequest{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_HostId = b.HostId + x.xxx_hidden_TaskId = b.TaskId + x.xxx_hidden_BuildInfo = b.BuildInfo + x.xxx_hidden_OsInfo = b.OsInfo + x.xxx_hidden_ConnectorMetadata = b.ConnectorMetadata + x.xxx_hidden_Annotations = &b.Annotations + return m0 +} + +type BatonServiceHelloResponse struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Annotations *[]*anypb.Any `protobuf:"bytes,1,rep,name=annotations,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *BatonServiceHelloResponse) Reset() { + *x = BatonServiceHelloResponse{} + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *BatonServiceHelloResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BatonServiceHelloResponse) ProtoMessage() {} + +func (x *BatonServiceHelloResponse) ProtoReflect() protoreflect.Message { + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *BatonServiceHelloResponse) GetAnnotations() []*anypb.Any { + if x != nil { + if x.xxx_hidden_Annotations != nil { + return *x.xxx_hidden_Annotations + } + } + return nil +} + +func (x *BatonServiceHelloResponse) SetAnnotations(v []*anypb.Any) { + x.xxx_hidden_Annotations = &v +} + +type BatonServiceHelloResponse_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Annotations []*anypb.Any +} + +func (b0 BatonServiceHelloResponse_builder) Build() *BatonServiceHelloResponse { + m0 := &BatonServiceHelloResponse{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Annotations = &b.Annotations + return m0 +} + +type BatonServiceGetTaskRequest struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_HostId string `protobuf:"bytes,1,opt,name=host_id,json=hostId,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *BatonServiceGetTaskRequest) Reset() { + *x = BatonServiceGetTaskRequest{} + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *BatonServiceGetTaskRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BatonServiceGetTaskRequest) ProtoMessage() {} + +func (x *BatonServiceGetTaskRequest) ProtoReflect() protoreflect.Message { + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *BatonServiceGetTaskRequest) GetHostId() string { + if x != nil { + return x.xxx_hidden_HostId + } + return "" +} + +func (x *BatonServiceGetTaskRequest) SetHostId(v string) { + x.xxx_hidden_HostId = v +} + +type BatonServiceGetTaskRequest_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + HostId string +} + +func (b0 BatonServiceGetTaskRequest_builder) Build() *BatonServiceGetTaskRequest { + m0 := &BatonServiceGetTaskRequest{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_HostId = b.HostId + return m0 +} + +type BatonServiceGetTaskResponse struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Task *Task `protobuf:"bytes,1,opt,name=task,proto3"` + xxx_hidden_NextPoll *durationpb.Duration `protobuf:"bytes,2,opt,name=next_poll,json=nextPoll,proto3"` + xxx_hidden_NextHeartbeat *durationpb.Duration `protobuf:"bytes,3,opt,name=next_heartbeat,json=nextHeartbeat,proto3"` + xxx_hidden_Annotations *[]*anypb.Any `protobuf:"bytes,4,rep,name=annotations,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *BatonServiceGetTaskResponse) Reset() { + *x = BatonServiceGetTaskResponse{} + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *BatonServiceGetTaskResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BatonServiceGetTaskResponse) ProtoMessage() {} + +func (x *BatonServiceGetTaskResponse) ProtoReflect() protoreflect.Message { + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *BatonServiceGetTaskResponse) GetTask() *Task { + if x != nil { + return x.xxx_hidden_Task + } + return nil +} + +func (x *BatonServiceGetTaskResponse) GetNextPoll() *durationpb.Duration { + if x != nil { + return x.xxx_hidden_NextPoll + } + return nil +} + +func (x *BatonServiceGetTaskResponse) GetNextHeartbeat() *durationpb.Duration { + if x != nil { + return x.xxx_hidden_NextHeartbeat + } + return nil +} + +func (x *BatonServiceGetTaskResponse) GetAnnotations() []*anypb.Any { + if x != nil { + if x.xxx_hidden_Annotations != nil { + return *x.xxx_hidden_Annotations + } + } + return nil +} + +func (x *BatonServiceGetTaskResponse) SetTask(v *Task) { + x.xxx_hidden_Task = v +} + +func (x *BatonServiceGetTaskResponse) SetNextPoll(v *durationpb.Duration) { + x.xxx_hidden_NextPoll = v +} + +func (x *BatonServiceGetTaskResponse) SetNextHeartbeat(v *durationpb.Duration) { + x.xxx_hidden_NextHeartbeat = v +} + +func (x *BatonServiceGetTaskResponse) SetAnnotations(v []*anypb.Any) { + x.xxx_hidden_Annotations = &v +} + +func (x *BatonServiceGetTaskResponse) HasTask() bool { + if x == nil { + return false + } + return x.xxx_hidden_Task != nil +} + +func (x *BatonServiceGetTaskResponse) HasNextPoll() bool { + if x == nil { + return false + } + return x.xxx_hidden_NextPoll != nil +} + +func (x *BatonServiceGetTaskResponse) HasNextHeartbeat() bool { + if x == nil { + return false + } + return x.xxx_hidden_NextHeartbeat != nil +} + +func (x *BatonServiceGetTaskResponse) ClearTask() { + x.xxx_hidden_Task = nil +} + +func (x *BatonServiceGetTaskResponse) ClearNextPoll() { + x.xxx_hidden_NextPoll = nil +} + +func (x *BatonServiceGetTaskResponse) ClearNextHeartbeat() { + x.xxx_hidden_NextHeartbeat = nil +} + +type BatonServiceGetTaskResponse_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Task *Task + NextPoll *durationpb.Duration + NextHeartbeat *durationpb.Duration + Annotations []*anypb.Any +} + +func (b0 BatonServiceGetTaskResponse_builder) Build() *BatonServiceGetTaskResponse { + m0 := &BatonServiceGetTaskResponse{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Task = b.Task + x.xxx_hidden_NextPoll = b.NextPoll + x.xxx_hidden_NextHeartbeat = b.NextHeartbeat + x.xxx_hidden_Annotations = &b.Annotations + return m0 +} + +type BatonServiceHeartbeatRequest struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_HostId string `protobuf:"bytes,1,opt,name=host_id,json=hostId,proto3"` + xxx_hidden_TaskId string `protobuf:"bytes,2,opt,name=task_id,json=taskId,proto3"` + xxx_hidden_Annotations *[]*anypb.Any `protobuf:"bytes,3,rep,name=annotations,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *BatonServiceHeartbeatRequest) Reset() { + *x = BatonServiceHeartbeatRequest{} + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *BatonServiceHeartbeatRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BatonServiceHeartbeatRequest) ProtoMessage() {} + +func (x *BatonServiceHeartbeatRequest) ProtoReflect() protoreflect.Message { + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[5] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *BatonServiceHeartbeatRequest) GetHostId() string { + if x != nil { + return x.xxx_hidden_HostId + } + return "" +} + +func (x *BatonServiceHeartbeatRequest) GetTaskId() string { + if x != nil { + return x.xxx_hidden_TaskId + } + return "" +} + +func (x *BatonServiceHeartbeatRequest) GetAnnotations() []*anypb.Any { + if x != nil { + if x.xxx_hidden_Annotations != nil { + return *x.xxx_hidden_Annotations + } + } + return nil +} + +func (x *BatonServiceHeartbeatRequest) SetHostId(v string) { + x.xxx_hidden_HostId = v +} + +func (x *BatonServiceHeartbeatRequest) SetTaskId(v string) { + x.xxx_hidden_TaskId = v +} + +func (x *BatonServiceHeartbeatRequest) SetAnnotations(v []*anypb.Any) { + x.xxx_hidden_Annotations = &v +} + +type BatonServiceHeartbeatRequest_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + HostId string + TaskId string + Annotations []*anypb.Any +} + +func (b0 BatonServiceHeartbeatRequest_builder) Build() *BatonServiceHeartbeatRequest { + m0 := &BatonServiceHeartbeatRequest{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_HostId = b.HostId + x.xxx_hidden_TaskId = b.TaskId + x.xxx_hidden_Annotations = &b.Annotations + return m0 +} + +type BatonServiceHeartbeatResponse struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_NextHeartbeat *durationpb.Duration `protobuf:"bytes,1,opt,name=next_heartbeat,json=nextHeartbeat,proto3"` + xxx_hidden_Cancelled bool `protobuf:"varint,2,opt,name=cancelled,proto3"` + xxx_hidden_Annotations *[]*anypb.Any `protobuf:"bytes,3,rep,name=annotations,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *BatonServiceHeartbeatResponse) Reset() { + *x = BatonServiceHeartbeatResponse{} + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *BatonServiceHeartbeatResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BatonServiceHeartbeatResponse) ProtoMessage() {} + +func (x *BatonServiceHeartbeatResponse) ProtoReflect() protoreflect.Message { + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[6] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *BatonServiceHeartbeatResponse) GetNextHeartbeat() *durationpb.Duration { + if x != nil { + return x.xxx_hidden_NextHeartbeat + } + return nil +} + +func (x *BatonServiceHeartbeatResponse) GetCancelled() bool { + if x != nil { + return x.xxx_hidden_Cancelled + } + return false +} + +func (x *BatonServiceHeartbeatResponse) GetAnnotations() []*anypb.Any { + if x != nil { + if x.xxx_hidden_Annotations != nil { + return *x.xxx_hidden_Annotations + } + } + return nil +} + +func (x *BatonServiceHeartbeatResponse) SetNextHeartbeat(v *durationpb.Duration) { + x.xxx_hidden_NextHeartbeat = v +} + +func (x *BatonServiceHeartbeatResponse) SetCancelled(v bool) { + x.xxx_hidden_Cancelled = v +} + +func (x *BatonServiceHeartbeatResponse) SetAnnotations(v []*anypb.Any) { + x.xxx_hidden_Annotations = &v +} + +func (x *BatonServiceHeartbeatResponse) HasNextHeartbeat() bool { + if x == nil { + return false + } + return x.xxx_hidden_NextHeartbeat != nil +} + +func (x *BatonServiceHeartbeatResponse) ClearNextHeartbeat() { + x.xxx_hidden_NextHeartbeat = nil +} + +type BatonServiceHeartbeatResponse_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + NextHeartbeat *durationpb.Duration + Cancelled bool + Annotations []*anypb.Any +} + +func (b0 BatonServiceHeartbeatResponse_builder) Build() *BatonServiceHeartbeatResponse { + m0 := &BatonServiceHeartbeatResponse{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_NextHeartbeat = b.NextHeartbeat + x.xxx_hidden_Cancelled = b.Cancelled + x.xxx_hidden_Annotations = &b.Annotations + return m0 +} + +type BatonServiceUploadAssetRequest struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Msg isBatonServiceUploadAssetRequest_Msg `protobuf_oneof:"msg"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *BatonServiceUploadAssetRequest) Reset() { + *x = BatonServiceUploadAssetRequest{} + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *BatonServiceUploadAssetRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BatonServiceUploadAssetRequest) ProtoMessage() {} + +func (x *BatonServiceUploadAssetRequest) ProtoReflect() protoreflect.Message { + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[7] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *BatonServiceUploadAssetRequest) GetMetadata() *BatonServiceUploadAssetRequest_UploadMetadata { + if x != nil { + if x, ok := x.xxx_hidden_Msg.(*batonServiceUploadAssetRequest_Metadata); ok { + return x.Metadata + } + } + return nil +} + +func (x *BatonServiceUploadAssetRequest) GetData() *BatonServiceUploadAssetRequest_UploadData { + if x != nil { + if x, ok := x.xxx_hidden_Msg.(*batonServiceUploadAssetRequest_Data); ok { + return x.Data + } + } + return nil +} + +func (x *BatonServiceUploadAssetRequest) GetEof() *BatonServiceUploadAssetRequest_UploadEOF { + if x != nil { + if x, ok := x.xxx_hidden_Msg.(*batonServiceUploadAssetRequest_Eof); ok { + return x.Eof + } + } + return nil +} + +func (x *BatonServiceUploadAssetRequest) SetMetadata(v *BatonServiceUploadAssetRequest_UploadMetadata) { + if v == nil { + x.xxx_hidden_Msg = nil + return + } + x.xxx_hidden_Msg = &batonServiceUploadAssetRequest_Metadata{v} +} + +func (x *BatonServiceUploadAssetRequest) SetData(v *BatonServiceUploadAssetRequest_UploadData) { + if v == nil { + x.xxx_hidden_Msg = nil + return + } + x.xxx_hidden_Msg = &batonServiceUploadAssetRequest_Data{v} +} + +func (x *BatonServiceUploadAssetRequest) SetEof(v *BatonServiceUploadAssetRequest_UploadEOF) { + if v == nil { + x.xxx_hidden_Msg = nil + return + } + x.xxx_hidden_Msg = &batonServiceUploadAssetRequest_Eof{v} +} + +func (x *BatonServiceUploadAssetRequest) HasMsg() bool { + if x == nil { + return false + } + return x.xxx_hidden_Msg != nil +} + +func (x *BatonServiceUploadAssetRequest) HasMetadata() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_Msg.(*batonServiceUploadAssetRequest_Metadata) + return ok +} + +func (x *BatonServiceUploadAssetRequest) HasData() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_Msg.(*batonServiceUploadAssetRequest_Data) + return ok +} + +func (x *BatonServiceUploadAssetRequest) HasEof() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_Msg.(*batonServiceUploadAssetRequest_Eof) + return ok +} + +func (x *BatonServiceUploadAssetRequest) ClearMsg() { + x.xxx_hidden_Msg = nil +} + +func (x *BatonServiceUploadAssetRequest) ClearMetadata() { + if _, ok := x.xxx_hidden_Msg.(*batonServiceUploadAssetRequest_Metadata); ok { + x.xxx_hidden_Msg = nil + } +} + +func (x *BatonServiceUploadAssetRequest) ClearData() { + if _, ok := x.xxx_hidden_Msg.(*batonServiceUploadAssetRequest_Data); ok { + x.xxx_hidden_Msg = nil + } +} + +func (x *BatonServiceUploadAssetRequest) ClearEof() { + if _, ok := x.xxx_hidden_Msg.(*batonServiceUploadAssetRequest_Eof); ok { + x.xxx_hidden_Msg = nil + } +} + +const BatonServiceUploadAssetRequest_Msg_not_set_case case_BatonServiceUploadAssetRequest_Msg = 0 +const BatonServiceUploadAssetRequest_Metadata_case case_BatonServiceUploadAssetRequest_Msg = 100 +const BatonServiceUploadAssetRequest_Data_case case_BatonServiceUploadAssetRequest_Msg = 101 +const BatonServiceUploadAssetRequest_Eof_case case_BatonServiceUploadAssetRequest_Msg = 102 + +func (x *BatonServiceUploadAssetRequest) WhichMsg() case_BatonServiceUploadAssetRequest_Msg { + if x == nil { + return BatonServiceUploadAssetRequest_Msg_not_set_case + } + switch x.xxx_hidden_Msg.(type) { + case *batonServiceUploadAssetRequest_Metadata: + return BatonServiceUploadAssetRequest_Metadata_case + case *batonServiceUploadAssetRequest_Data: + return BatonServiceUploadAssetRequest_Data_case + case *batonServiceUploadAssetRequest_Eof: + return BatonServiceUploadAssetRequest_Eof_case + default: + return BatonServiceUploadAssetRequest_Msg_not_set_case + } +} + +type BatonServiceUploadAssetRequest_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // Fields of oneof xxx_hidden_Msg: + Metadata *BatonServiceUploadAssetRequest_UploadMetadata + Data *BatonServiceUploadAssetRequest_UploadData + Eof *BatonServiceUploadAssetRequest_UploadEOF + // -- end of xxx_hidden_Msg +} + +func (b0 BatonServiceUploadAssetRequest_builder) Build() *BatonServiceUploadAssetRequest { + m0 := &BatonServiceUploadAssetRequest{} + b, x := &b0, m0 + _, _ = b, x + if b.Metadata != nil { + x.xxx_hidden_Msg = &batonServiceUploadAssetRequest_Metadata{b.Metadata} + } + if b.Data != nil { + x.xxx_hidden_Msg = &batonServiceUploadAssetRequest_Data{b.Data} + } + if b.Eof != nil { + x.xxx_hidden_Msg = &batonServiceUploadAssetRequest_Eof{b.Eof} + } + return m0 +} + +type case_BatonServiceUploadAssetRequest_Msg protoreflect.FieldNumber + +func (x case_BatonServiceUploadAssetRequest_Msg) String() string { + md := file_c1_connectorapi_baton_v1_baton_proto_msgTypes[7].Descriptor() + if x == 0 { + return "not set" + } + return protoimpl.X.MessageFieldStringOf(md, protoreflect.FieldNumber(x)) +} + +type isBatonServiceUploadAssetRequest_Msg interface { + isBatonServiceUploadAssetRequest_Msg() +} + +type batonServiceUploadAssetRequest_Metadata struct { + Metadata *BatonServiceUploadAssetRequest_UploadMetadata `protobuf:"bytes,100,opt,name=metadata,proto3,oneof"` +} + +type batonServiceUploadAssetRequest_Data struct { + Data *BatonServiceUploadAssetRequest_UploadData `protobuf:"bytes,101,opt,name=data,proto3,oneof"` +} + +type batonServiceUploadAssetRequest_Eof struct { + Eof *BatonServiceUploadAssetRequest_UploadEOF `protobuf:"bytes,102,opt,name=eof,proto3,oneof"` +} + +func (*batonServiceUploadAssetRequest_Metadata) isBatonServiceUploadAssetRequest_Msg() {} + +func (*batonServiceUploadAssetRequest_Data) isBatonServiceUploadAssetRequest_Msg() {} + +func (*batonServiceUploadAssetRequest_Eof) isBatonServiceUploadAssetRequest_Msg() {} + +type BatonServiceUploadAssetResponse struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Annotations *[]*anypb.Any `protobuf:"bytes,1,rep,name=annotations,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *BatonServiceUploadAssetResponse) Reset() { + *x = BatonServiceUploadAssetResponse{} + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *BatonServiceUploadAssetResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BatonServiceUploadAssetResponse) ProtoMessage() {} + +func (x *BatonServiceUploadAssetResponse) ProtoReflect() protoreflect.Message { + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[8] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *BatonServiceUploadAssetResponse) GetAnnotations() []*anypb.Any { + if x != nil { + if x.xxx_hidden_Annotations != nil { + return *x.xxx_hidden_Annotations + } + } + return nil +} + +func (x *BatonServiceUploadAssetResponse) SetAnnotations(v []*anypb.Any) { + x.xxx_hidden_Annotations = &v +} + +type BatonServiceUploadAssetResponse_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Annotations []*anypb.Any +} + +func (b0 BatonServiceUploadAssetResponse_builder) Build() *BatonServiceUploadAssetResponse { + m0 := &BatonServiceUploadAssetResponse{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Annotations = &b.Annotations + return m0 +} + +// Connector submits a task has been finished. It should always be removed from the queue, and if it isn't a fatal error, we should re-add it to the queue to try again. +type BatonServiceFinishTaskRequest struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_HostId string `protobuf:"bytes,1,opt,name=host_id,json=hostId,proto3"` + xxx_hidden_TaskId string `protobuf:"bytes,2,opt,name=task_id,json=taskId,proto3"` + xxx_hidden_Status *status.Status `protobuf:"bytes,3,opt,name=status,proto3"` + xxx_hidden_FinalState isBatonServiceFinishTaskRequest_FinalState `protobuf_oneof:"final_state"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *BatonServiceFinishTaskRequest) Reset() { + *x = BatonServiceFinishTaskRequest{} + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *BatonServiceFinishTaskRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BatonServiceFinishTaskRequest) ProtoMessage() {} + +func (x *BatonServiceFinishTaskRequest) ProtoReflect() protoreflect.Message { + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[9] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *BatonServiceFinishTaskRequest) GetHostId() string { + if x != nil { + return x.xxx_hidden_HostId + } + return "" +} + +func (x *BatonServiceFinishTaskRequest) GetTaskId() string { + if x != nil { + return x.xxx_hidden_TaskId + } + return "" +} + +func (x *BatonServiceFinishTaskRequest) GetStatus() *status.Status { + if x != nil { + return x.xxx_hidden_Status + } + return nil +} + +func (x *BatonServiceFinishTaskRequest) GetError() *BatonServiceFinishTaskRequest_Error { + if x != nil { + if x, ok := x.xxx_hidden_FinalState.(*batonServiceFinishTaskRequest_Error_); ok { + return x.Error + } + } + return nil +} + +func (x *BatonServiceFinishTaskRequest) GetSuccess() *BatonServiceFinishTaskRequest_Success { + if x != nil { + if x, ok := x.xxx_hidden_FinalState.(*batonServiceFinishTaskRequest_Success_); ok { + return x.Success + } + } + return nil +} + +func (x *BatonServiceFinishTaskRequest) SetHostId(v string) { + x.xxx_hidden_HostId = v +} + +func (x *BatonServiceFinishTaskRequest) SetTaskId(v string) { + x.xxx_hidden_TaskId = v +} + +func (x *BatonServiceFinishTaskRequest) SetStatus(v *status.Status) { + x.xxx_hidden_Status = v +} + +func (x *BatonServiceFinishTaskRequest) SetError(v *BatonServiceFinishTaskRequest_Error) { + if v == nil { + x.xxx_hidden_FinalState = nil + return + } + x.xxx_hidden_FinalState = &batonServiceFinishTaskRequest_Error_{v} +} + +func (x *BatonServiceFinishTaskRequest) SetSuccess(v *BatonServiceFinishTaskRequest_Success) { + if v == nil { + x.xxx_hidden_FinalState = nil + return + } + x.xxx_hidden_FinalState = &batonServiceFinishTaskRequest_Success_{v} +} + +func (x *BatonServiceFinishTaskRequest) HasStatus() bool { + if x == nil { + return false + } + return x.xxx_hidden_Status != nil +} + +func (x *BatonServiceFinishTaskRequest) HasFinalState() bool { + if x == nil { + return false + } + return x.xxx_hidden_FinalState != nil +} + +func (x *BatonServiceFinishTaskRequest) HasError() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_FinalState.(*batonServiceFinishTaskRequest_Error_) + return ok +} + +func (x *BatonServiceFinishTaskRequest) HasSuccess() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_FinalState.(*batonServiceFinishTaskRequest_Success_) + return ok +} + +func (x *BatonServiceFinishTaskRequest) ClearStatus() { + x.xxx_hidden_Status = nil +} + +func (x *BatonServiceFinishTaskRequest) ClearFinalState() { + x.xxx_hidden_FinalState = nil +} + +func (x *BatonServiceFinishTaskRequest) ClearError() { + if _, ok := x.xxx_hidden_FinalState.(*batonServiceFinishTaskRequest_Error_); ok { + x.xxx_hidden_FinalState = nil + } +} + +func (x *BatonServiceFinishTaskRequest) ClearSuccess() { + if _, ok := x.xxx_hidden_FinalState.(*batonServiceFinishTaskRequest_Success_); ok { + x.xxx_hidden_FinalState = nil + } +} + +const BatonServiceFinishTaskRequest_FinalState_not_set_case case_BatonServiceFinishTaskRequest_FinalState = 0 +const BatonServiceFinishTaskRequest_Error_case case_BatonServiceFinishTaskRequest_FinalState = 100 +const BatonServiceFinishTaskRequest_Success_case case_BatonServiceFinishTaskRequest_FinalState = 101 + +func (x *BatonServiceFinishTaskRequest) WhichFinalState() case_BatonServiceFinishTaskRequest_FinalState { + if x == nil { + return BatonServiceFinishTaskRequest_FinalState_not_set_case + } + switch x.xxx_hidden_FinalState.(type) { + case *batonServiceFinishTaskRequest_Error_: + return BatonServiceFinishTaskRequest_Error_case + case *batonServiceFinishTaskRequest_Success_: + return BatonServiceFinishTaskRequest_Success_case + default: + return BatonServiceFinishTaskRequest_FinalState_not_set_case + } +} + +type BatonServiceFinishTaskRequest_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + HostId string + TaskId string + Status *status.Status + // Fields of oneof xxx_hidden_FinalState: + Error *BatonServiceFinishTaskRequest_Error + Success *BatonServiceFinishTaskRequest_Success + // -- end of xxx_hidden_FinalState +} + +func (b0 BatonServiceFinishTaskRequest_builder) Build() *BatonServiceFinishTaskRequest { + m0 := &BatonServiceFinishTaskRequest{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_HostId = b.HostId + x.xxx_hidden_TaskId = b.TaskId + x.xxx_hidden_Status = b.Status + if b.Error != nil { + x.xxx_hidden_FinalState = &batonServiceFinishTaskRequest_Error_{b.Error} + } + if b.Success != nil { + x.xxx_hidden_FinalState = &batonServiceFinishTaskRequest_Success_{b.Success} + } + return m0 +} + +type case_BatonServiceFinishTaskRequest_FinalState protoreflect.FieldNumber + +func (x case_BatonServiceFinishTaskRequest_FinalState) String() string { + md := file_c1_connectorapi_baton_v1_baton_proto_msgTypes[9].Descriptor() + if x == 0 { + return "not set" + } + return protoimpl.X.MessageFieldStringOf(md, protoreflect.FieldNumber(x)) +} + +type isBatonServiceFinishTaskRequest_FinalState interface { + isBatonServiceFinishTaskRequest_FinalState() +} + +type batonServiceFinishTaskRequest_Error_ struct { + Error *BatonServiceFinishTaskRequest_Error `protobuf:"bytes,100,opt,name=error,proto3,oneof"` +} + +type batonServiceFinishTaskRequest_Success_ struct { + Success *BatonServiceFinishTaskRequest_Success `protobuf:"bytes,101,opt,name=success,proto3,oneof"` +} + +func (*batonServiceFinishTaskRequest_Error_) isBatonServiceFinishTaskRequest_FinalState() {} + +func (*batonServiceFinishTaskRequest_Success_) isBatonServiceFinishTaskRequest_FinalState() {} + +type BatonServiceFinishTaskResponse struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Annotations *[]*anypb.Any `protobuf:"bytes,1,rep,name=annotations,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *BatonServiceFinishTaskResponse) Reset() { + *x = BatonServiceFinishTaskResponse{} + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *BatonServiceFinishTaskResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BatonServiceFinishTaskResponse) ProtoMessage() {} + +func (x *BatonServiceFinishTaskResponse) ProtoReflect() protoreflect.Message { + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[10] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *BatonServiceFinishTaskResponse) GetAnnotations() []*anypb.Any { + if x != nil { + if x.xxx_hidden_Annotations != nil { + return *x.xxx_hidden_Annotations + } + } + return nil +} + +func (x *BatonServiceFinishTaskResponse) SetAnnotations(v []*anypb.Any) { + x.xxx_hidden_Annotations = &v +} + +type BatonServiceFinishTaskResponse_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Annotations []*anypb.Any +} + +func (b0 BatonServiceFinishTaskResponse_builder) Build() *BatonServiceFinishTaskResponse { + m0 := &BatonServiceFinishTaskResponse{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Annotations = &b.Annotations + return m0 +} + +type StartDebuggingRequest struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *StartDebuggingRequest) Reset() { + *x = StartDebuggingRequest{} + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StartDebuggingRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StartDebuggingRequest) ProtoMessage() {} + +func (x *StartDebuggingRequest) ProtoReflect() protoreflect.Message { + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[11] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +type StartDebuggingRequest_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + +} + +func (b0 StartDebuggingRequest_builder) Build() *StartDebuggingRequest { + m0 := &StartDebuggingRequest{} + b, x := &b0, m0 + _, _ = b, x + return m0 +} + +type StartDebuggingResponse struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Status bool `protobuf:"varint,1,opt,name=status,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *StartDebuggingResponse) Reset() { + *x = StartDebuggingResponse{} + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StartDebuggingResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StartDebuggingResponse) ProtoMessage() {} + +func (x *StartDebuggingResponse) ProtoReflect() protoreflect.Message { + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[12] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *StartDebuggingResponse) GetStatus() bool { + if x != nil { + return x.xxx_hidden_Status + } + return false +} + +func (x *StartDebuggingResponse) SetStatus(v bool) { + x.xxx_hidden_Status = v +} + +type StartDebuggingResponse_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Status bool +} + +func (b0 StartDebuggingResponse_builder) Build() *StartDebuggingResponse { + m0 := &StartDebuggingResponse{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Status = b.Status + return m0 +} + +type Task_NoneTask struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Annotations *[]*anypb.Any `protobuf:"bytes,1,rep,name=annotations,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Task_NoneTask) Reset() { + *x = Task_NoneTask{} + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Task_NoneTask) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Task_NoneTask) ProtoMessage() {} + +func (x *Task_NoneTask) ProtoReflect() protoreflect.Message { + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[13] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *Task_NoneTask) GetAnnotations() []*anypb.Any { + if x != nil { + if x.xxx_hidden_Annotations != nil { + return *x.xxx_hidden_Annotations + } + } + return nil +} + +func (x *Task_NoneTask) SetAnnotations(v []*anypb.Any) { + x.xxx_hidden_Annotations = &v +} + +type Task_NoneTask_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Annotations []*anypb.Any +} + +func (b0 Task_NoneTask_builder) Build() *Task_NoneTask { + m0 := &Task_NoneTask{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Annotations = &b.Annotations + return m0 +} + +type Task_HelloTask struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Annotations *[]*anypb.Any `protobuf:"bytes,1,rep,name=annotations,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Task_HelloTask) Reset() { + *x = Task_HelloTask{} + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Task_HelloTask) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Task_HelloTask) ProtoMessage() {} + +func (x *Task_HelloTask) ProtoReflect() protoreflect.Message { + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[14] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *Task_HelloTask) GetAnnotations() []*anypb.Any { + if x != nil { + if x.xxx_hidden_Annotations != nil { + return *x.xxx_hidden_Annotations + } + } + return nil +} + +func (x *Task_HelloTask) SetAnnotations(v []*anypb.Any) { + x.xxx_hidden_Annotations = &v +} + +type Task_HelloTask_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Annotations []*anypb.Any +} + +func (b0 Task_HelloTask_builder) Build() *Task_HelloTask { + m0 := &Task_HelloTask{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Annotations = &b.Annotations + return m0 +} + +type Task_SyncFullTask struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Annotations *[]*anypb.Any `protobuf:"bytes,1,rep,name=annotations,proto3"` + xxx_hidden_SkipExpandGrants bool `protobuf:"varint,2,opt,name=skip_expand_grants,json=skipExpandGrants,proto3"` + xxx_hidden_SkipEntitlementsAndGrants bool `protobuf:"varint,3,opt,name=skip_entitlements_and_grants,json=skipEntitlementsAndGrants,proto3"` + xxx_hidden_TargetedSyncResources *[]*v2.Resource `protobuf:"bytes,4,rep,name=targeted_sync_resources,json=targetedSyncResources,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Task_SyncFullTask) Reset() { + *x = Task_SyncFullTask{} + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Task_SyncFullTask) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Task_SyncFullTask) ProtoMessage() {} + +func (x *Task_SyncFullTask) ProtoReflect() protoreflect.Message { + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[15] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *Task_SyncFullTask) GetAnnotations() []*anypb.Any { + if x != nil { + if x.xxx_hidden_Annotations != nil { + return *x.xxx_hidden_Annotations + } + } + return nil +} + +func (x *Task_SyncFullTask) GetSkipExpandGrants() bool { + if x != nil { + return x.xxx_hidden_SkipExpandGrants + } + return false +} + +func (x *Task_SyncFullTask) GetSkipEntitlementsAndGrants() bool { + if x != nil { + return x.xxx_hidden_SkipEntitlementsAndGrants + } + return false +} + +func (x *Task_SyncFullTask) GetTargetedSyncResources() []*v2.Resource { + if x != nil { + if x.xxx_hidden_TargetedSyncResources != nil { + return *x.xxx_hidden_TargetedSyncResources + } + } + return nil +} + +func (x *Task_SyncFullTask) SetAnnotations(v []*anypb.Any) { + x.xxx_hidden_Annotations = &v +} + +func (x *Task_SyncFullTask) SetSkipExpandGrants(v bool) { + x.xxx_hidden_SkipExpandGrants = v +} + +func (x *Task_SyncFullTask) SetSkipEntitlementsAndGrants(v bool) { + x.xxx_hidden_SkipEntitlementsAndGrants = v +} + +func (x *Task_SyncFullTask) SetTargetedSyncResources(v []*v2.Resource) { + x.xxx_hidden_TargetedSyncResources = &v +} + +type Task_SyncFullTask_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Annotations []*anypb.Any + SkipExpandGrants bool + SkipEntitlementsAndGrants bool + TargetedSyncResources []*v2.Resource +} + +func (b0 Task_SyncFullTask_builder) Build() *Task_SyncFullTask { + m0 := &Task_SyncFullTask{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Annotations = &b.Annotations + x.xxx_hidden_SkipExpandGrants = b.SkipExpandGrants + x.xxx_hidden_SkipEntitlementsAndGrants = b.SkipEntitlementsAndGrants + x.xxx_hidden_TargetedSyncResources = &b.TargetedSyncResources + return m0 +} + +type Task_EventFeedTask struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Annotations *[]*anypb.Any `protobuf:"bytes,1,rep,name=annotations,proto3"` + xxx_hidden_StartAt *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=start_at,json=startAt,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Task_EventFeedTask) Reset() { + *x = Task_EventFeedTask{} + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Task_EventFeedTask) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Task_EventFeedTask) ProtoMessage() {} + +func (x *Task_EventFeedTask) ProtoReflect() protoreflect.Message { + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[16] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *Task_EventFeedTask) GetAnnotations() []*anypb.Any { + if x != nil { + if x.xxx_hidden_Annotations != nil { + return *x.xxx_hidden_Annotations + } + } + return nil +} + +func (x *Task_EventFeedTask) GetStartAt() *timestamppb.Timestamp { + if x != nil { + return x.xxx_hidden_StartAt + } + return nil +} + +func (x *Task_EventFeedTask) SetAnnotations(v []*anypb.Any) { + x.xxx_hidden_Annotations = &v +} + +func (x *Task_EventFeedTask) SetStartAt(v *timestamppb.Timestamp) { + x.xxx_hidden_StartAt = v +} + +func (x *Task_EventFeedTask) HasStartAt() bool { + if x == nil { + return false + } + return x.xxx_hidden_StartAt != nil +} + +func (x *Task_EventFeedTask) ClearStartAt() { + x.xxx_hidden_StartAt = nil +} + +type Task_EventFeedTask_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Annotations []*anypb.Any + StartAt *timestamppb.Timestamp +} + +func (b0 Task_EventFeedTask_builder) Build() *Task_EventFeedTask { + m0 := &Task_EventFeedTask{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Annotations = &b.Annotations + x.xxx_hidden_StartAt = b.StartAt + return m0 +} + +type Task_GrantTask struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Entitlement *v2.Entitlement `protobuf:"bytes,1,opt,name=entitlement,proto3"` + xxx_hidden_Principal *v2.Resource `protobuf:"bytes,2,opt,name=principal,proto3"` + xxx_hidden_Annotations *[]*anypb.Any `protobuf:"bytes,3,rep,name=annotations,proto3"` + xxx_hidden_Duration *durationpb.Duration `protobuf:"bytes,4,opt,name=duration,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Task_GrantTask) Reset() { + *x = Task_GrantTask{} + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Task_GrantTask) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Task_GrantTask) ProtoMessage() {} + +func (x *Task_GrantTask) ProtoReflect() protoreflect.Message { + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[17] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *Task_GrantTask) GetEntitlement() *v2.Entitlement { + if x != nil { + return x.xxx_hidden_Entitlement + } + return nil +} + +func (x *Task_GrantTask) GetPrincipal() *v2.Resource { + if x != nil { + return x.xxx_hidden_Principal + } + return nil +} + +func (x *Task_GrantTask) GetAnnotations() []*anypb.Any { + if x != nil { + if x.xxx_hidden_Annotations != nil { + return *x.xxx_hidden_Annotations + } + } + return nil +} + +func (x *Task_GrantTask) GetDuration() *durationpb.Duration { + if x != nil { + return x.xxx_hidden_Duration + } + return nil +} + +func (x *Task_GrantTask) SetEntitlement(v *v2.Entitlement) { + x.xxx_hidden_Entitlement = v +} + +func (x *Task_GrantTask) SetPrincipal(v *v2.Resource) { + x.xxx_hidden_Principal = v +} + +func (x *Task_GrantTask) SetAnnotations(v []*anypb.Any) { + x.xxx_hidden_Annotations = &v +} + +func (x *Task_GrantTask) SetDuration(v *durationpb.Duration) { + x.xxx_hidden_Duration = v +} + +func (x *Task_GrantTask) HasEntitlement() bool { + if x == nil { + return false + } + return x.xxx_hidden_Entitlement != nil +} + +func (x *Task_GrantTask) HasPrincipal() bool { + if x == nil { + return false + } + return x.xxx_hidden_Principal != nil +} + +func (x *Task_GrantTask) HasDuration() bool { + if x == nil { + return false + } + return x.xxx_hidden_Duration != nil +} + +func (x *Task_GrantTask) ClearEntitlement() { + x.xxx_hidden_Entitlement = nil +} + +func (x *Task_GrantTask) ClearPrincipal() { + x.xxx_hidden_Principal = nil +} + +func (x *Task_GrantTask) ClearDuration() { + x.xxx_hidden_Duration = nil +} + +type Task_GrantTask_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Entitlement *v2.Entitlement + Principal *v2.Resource + Annotations []*anypb.Any + Duration *durationpb.Duration +} + +func (b0 Task_GrantTask_builder) Build() *Task_GrantTask { + m0 := &Task_GrantTask{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Entitlement = b.Entitlement + x.xxx_hidden_Principal = b.Principal + x.xxx_hidden_Annotations = &b.Annotations + x.xxx_hidden_Duration = b.Duration + return m0 +} + +type Task_RevokeTask struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Grant *v2.Grant `protobuf:"bytes,1,opt,name=grant,proto3"` + xxx_hidden_Annotations *[]*anypb.Any `protobuf:"bytes,2,rep,name=annotations,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Task_RevokeTask) Reset() { + *x = Task_RevokeTask{} + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Task_RevokeTask) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Task_RevokeTask) ProtoMessage() {} + +func (x *Task_RevokeTask) ProtoReflect() protoreflect.Message { + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[18] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *Task_RevokeTask) GetGrant() *v2.Grant { + if x != nil { + return x.xxx_hidden_Grant + } + return nil +} + +func (x *Task_RevokeTask) GetAnnotations() []*anypb.Any { + if x != nil { + if x.xxx_hidden_Annotations != nil { + return *x.xxx_hidden_Annotations + } + } + return nil +} + +func (x *Task_RevokeTask) SetGrant(v *v2.Grant) { + x.xxx_hidden_Grant = v +} + +func (x *Task_RevokeTask) SetAnnotations(v []*anypb.Any) { + x.xxx_hidden_Annotations = &v +} + +func (x *Task_RevokeTask) HasGrant() bool { + if x == nil { + return false + } + return x.xxx_hidden_Grant != nil +} + +func (x *Task_RevokeTask) ClearGrant() { + x.xxx_hidden_Grant = nil +} + +type Task_RevokeTask_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Grant *v2.Grant + Annotations []*anypb.Any +} + +func (b0 Task_RevokeTask_builder) Build() *Task_RevokeTask { + m0 := &Task_RevokeTask{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Grant = b.Grant + x.xxx_hidden_Annotations = &b.Annotations + return m0 +} + +type Task_CreateAccountTask struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_AccountInfo *v2.AccountInfo `protobuf:"bytes,1,opt,name=account_info,json=accountInfo,proto3"` + xxx_hidden_CredentialOptions *v2.CredentialOptions `protobuf:"bytes,2,opt,name=credential_options,json=credentialOptions,proto3"` + xxx_hidden_EncryptionConfigs *[]*v2.EncryptionConfig `protobuf:"bytes,3,rep,name=encryption_configs,json=encryptionConfigs,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Task_CreateAccountTask) Reset() { + *x = Task_CreateAccountTask{} + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Task_CreateAccountTask) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Task_CreateAccountTask) ProtoMessage() {} + +func (x *Task_CreateAccountTask) ProtoReflect() protoreflect.Message { + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[19] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *Task_CreateAccountTask) GetAccountInfo() *v2.AccountInfo { + if x != nil { + return x.xxx_hidden_AccountInfo + } + return nil +} + +func (x *Task_CreateAccountTask) GetCredentialOptions() *v2.CredentialOptions { + if x != nil { + return x.xxx_hidden_CredentialOptions + } + return nil +} + +func (x *Task_CreateAccountTask) GetEncryptionConfigs() []*v2.EncryptionConfig { + if x != nil { + if x.xxx_hidden_EncryptionConfigs != nil { + return *x.xxx_hidden_EncryptionConfigs + } + } + return nil +} + +func (x *Task_CreateAccountTask) SetAccountInfo(v *v2.AccountInfo) { + x.xxx_hidden_AccountInfo = v +} + +func (x *Task_CreateAccountTask) SetCredentialOptions(v *v2.CredentialOptions) { + x.xxx_hidden_CredentialOptions = v +} + +func (x *Task_CreateAccountTask) SetEncryptionConfigs(v []*v2.EncryptionConfig) { + x.xxx_hidden_EncryptionConfigs = &v +} + +func (x *Task_CreateAccountTask) HasAccountInfo() bool { + if x == nil { + return false + } + return x.xxx_hidden_AccountInfo != nil +} + +func (x *Task_CreateAccountTask) HasCredentialOptions() bool { + if x == nil { + return false + } + return x.xxx_hidden_CredentialOptions != nil +} + +func (x *Task_CreateAccountTask) ClearAccountInfo() { + x.xxx_hidden_AccountInfo = nil +} + +func (x *Task_CreateAccountTask) ClearCredentialOptions() { + x.xxx_hidden_CredentialOptions = nil +} + +type Task_CreateAccountTask_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + AccountInfo *v2.AccountInfo + CredentialOptions *v2.CredentialOptions + EncryptionConfigs []*v2.EncryptionConfig +} + +func (b0 Task_CreateAccountTask_builder) Build() *Task_CreateAccountTask { + m0 := &Task_CreateAccountTask{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_AccountInfo = b.AccountInfo + x.xxx_hidden_CredentialOptions = b.CredentialOptions + x.xxx_hidden_EncryptionConfigs = &b.EncryptionConfigs + return m0 +} + +type Task_CreateResourceTask struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Resource *v2.Resource `protobuf:"bytes,1,opt,name=resource,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Task_CreateResourceTask) Reset() { + *x = Task_CreateResourceTask{} + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Task_CreateResourceTask) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Task_CreateResourceTask) ProtoMessage() {} + +func (x *Task_CreateResourceTask) ProtoReflect() protoreflect.Message { + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[20] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *Task_CreateResourceTask) GetResource() *v2.Resource { + if x != nil { + return x.xxx_hidden_Resource + } + return nil +} + +func (x *Task_CreateResourceTask) SetResource(v *v2.Resource) { + x.xxx_hidden_Resource = v +} + +func (x *Task_CreateResourceTask) HasResource() bool { + if x == nil { + return false + } + return x.xxx_hidden_Resource != nil +} + +func (x *Task_CreateResourceTask) ClearResource() { + x.xxx_hidden_Resource = nil +} + +type Task_CreateResourceTask_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Resource *v2.Resource +} + +func (b0 Task_CreateResourceTask_builder) Build() *Task_CreateResourceTask { + m0 := &Task_CreateResourceTask{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Resource = b.Resource + return m0 +} + +type Task_DeleteResourceTask struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_ResourceId *v2.ResourceId `protobuf:"bytes,1,opt,name=resource_id,json=resourceId,proto3"` + xxx_hidden_ParentResourceId *v2.ResourceId `protobuf:"bytes,2,opt,name=parent_resource_id,json=parentResourceId,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Task_DeleteResourceTask) Reset() { + *x = Task_DeleteResourceTask{} + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Task_DeleteResourceTask) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Task_DeleteResourceTask) ProtoMessage() {} + +func (x *Task_DeleteResourceTask) ProtoReflect() protoreflect.Message { + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[21] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *Task_DeleteResourceTask) GetResourceId() *v2.ResourceId { + if x != nil { + return x.xxx_hidden_ResourceId + } + return nil +} + +func (x *Task_DeleteResourceTask) GetParentResourceId() *v2.ResourceId { + if x != nil { + return x.xxx_hidden_ParentResourceId + } + return nil +} + +func (x *Task_DeleteResourceTask) SetResourceId(v *v2.ResourceId) { + x.xxx_hidden_ResourceId = v +} + +func (x *Task_DeleteResourceTask) SetParentResourceId(v *v2.ResourceId) { + x.xxx_hidden_ParentResourceId = v +} + +func (x *Task_DeleteResourceTask) HasResourceId() bool { + if x == nil { + return false + } + return x.xxx_hidden_ResourceId != nil +} + +func (x *Task_DeleteResourceTask) HasParentResourceId() bool { + if x == nil { + return false + } + return x.xxx_hidden_ParentResourceId != nil +} + +func (x *Task_DeleteResourceTask) ClearResourceId() { + x.xxx_hidden_ResourceId = nil +} + +func (x *Task_DeleteResourceTask) ClearParentResourceId() { + x.xxx_hidden_ParentResourceId = nil +} + +type Task_DeleteResourceTask_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + ResourceId *v2.ResourceId + ParentResourceId *v2.ResourceId +} + +func (b0 Task_DeleteResourceTask_builder) Build() *Task_DeleteResourceTask { + m0 := &Task_DeleteResourceTask{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_ResourceId = b.ResourceId + x.xxx_hidden_ParentResourceId = b.ParentResourceId + return m0 +} + +type Task_RotateCredentialsTask struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_ResourceId *v2.ResourceId `protobuf:"bytes,1,opt,name=resource_id,json=resourceId,proto3"` + xxx_hidden_CredentialOptions *v2.CredentialOptions `protobuf:"bytes,2,opt,name=credential_options,json=credentialOptions,proto3"` + xxx_hidden_EncryptionConfigs *[]*v2.EncryptionConfig `protobuf:"bytes,3,rep,name=encryption_configs,json=encryptionConfigs,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Task_RotateCredentialsTask) Reset() { + *x = Task_RotateCredentialsTask{} + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Task_RotateCredentialsTask) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Task_RotateCredentialsTask) ProtoMessage() {} + +func (x *Task_RotateCredentialsTask) ProtoReflect() protoreflect.Message { + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[22] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *Task_RotateCredentialsTask) GetResourceId() *v2.ResourceId { + if x != nil { + return x.xxx_hidden_ResourceId + } + return nil +} + +func (x *Task_RotateCredentialsTask) GetCredentialOptions() *v2.CredentialOptions { + if x != nil { + return x.xxx_hidden_CredentialOptions + } + return nil +} + +func (x *Task_RotateCredentialsTask) GetEncryptionConfigs() []*v2.EncryptionConfig { + if x != nil { + if x.xxx_hidden_EncryptionConfigs != nil { + return *x.xxx_hidden_EncryptionConfigs + } + } + return nil +} + +func (x *Task_RotateCredentialsTask) SetResourceId(v *v2.ResourceId) { + x.xxx_hidden_ResourceId = v +} + +func (x *Task_RotateCredentialsTask) SetCredentialOptions(v *v2.CredentialOptions) { + x.xxx_hidden_CredentialOptions = v +} + +func (x *Task_RotateCredentialsTask) SetEncryptionConfigs(v []*v2.EncryptionConfig) { + x.xxx_hidden_EncryptionConfigs = &v +} + +func (x *Task_RotateCredentialsTask) HasResourceId() bool { + if x == nil { + return false + } + return x.xxx_hidden_ResourceId != nil +} + +func (x *Task_RotateCredentialsTask) HasCredentialOptions() bool { + if x == nil { + return false + } + return x.xxx_hidden_CredentialOptions != nil +} + +func (x *Task_RotateCredentialsTask) ClearResourceId() { + x.xxx_hidden_ResourceId = nil +} + +func (x *Task_RotateCredentialsTask) ClearCredentialOptions() { + x.xxx_hidden_CredentialOptions = nil +} + +type Task_RotateCredentialsTask_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + ResourceId *v2.ResourceId + CredentialOptions *v2.CredentialOptions + EncryptionConfigs []*v2.EncryptionConfig +} + +func (b0 Task_RotateCredentialsTask_builder) Build() *Task_RotateCredentialsTask { + m0 := &Task_RotateCredentialsTask{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_ResourceId = b.ResourceId + x.xxx_hidden_CredentialOptions = b.CredentialOptions + x.xxx_hidden_EncryptionConfigs = &b.EncryptionConfigs + return m0 +} + +type Task_CreateTicketTask struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_TicketRequest *v2.TicketRequest `protobuf:"bytes,1,opt,name=ticket_request,json=ticketRequest,proto3"` + xxx_hidden_TicketSchema *v2.TicketSchema `protobuf:"bytes,2,opt,name=ticket_schema,json=ticketSchema,proto3"` + xxx_hidden_Annotations *[]*anypb.Any `protobuf:"bytes,3,rep,name=annotations,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Task_CreateTicketTask) Reset() { + *x = Task_CreateTicketTask{} + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Task_CreateTicketTask) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Task_CreateTicketTask) ProtoMessage() {} + +func (x *Task_CreateTicketTask) ProtoReflect() protoreflect.Message { + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[23] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *Task_CreateTicketTask) GetTicketRequest() *v2.TicketRequest { + if x != nil { + return x.xxx_hidden_TicketRequest + } + return nil +} + +func (x *Task_CreateTicketTask) GetTicketSchema() *v2.TicketSchema { + if x != nil { + return x.xxx_hidden_TicketSchema + } + return nil +} + +func (x *Task_CreateTicketTask) GetAnnotations() []*anypb.Any { + if x != nil { + if x.xxx_hidden_Annotations != nil { + return *x.xxx_hidden_Annotations + } + } + return nil +} + +func (x *Task_CreateTicketTask) SetTicketRequest(v *v2.TicketRequest) { + x.xxx_hidden_TicketRequest = v +} + +func (x *Task_CreateTicketTask) SetTicketSchema(v *v2.TicketSchema) { + x.xxx_hidden_TicketSchema = v +} + +func (x *Task_CreateTicketTask) SetAnnotations(v []*anypb.Any) { + x.xxx_hidden_Annotations = &v +} + +func (x *Task_CreateTicketTask) HasTicketRequest() bool { + if x == nil { + return false + } + return x.xxx_hidden_TicketRequest != nil +} + +func (x *Task_CreateTicketTask) HasTicketSchema() bool { + if x == nil { + return false + } + return x.xxx_hidden_TicketSchema != nil +} + +func (x *Task_CreateTicketTask) ClearTicketRequest() { + x.xxx_hidden_TicketRequest = nil +} + +func (x *Task_CreateTicketTask) ClearTicketSchema() { + x.xxx_hidden_TicketSchema = nil +} + +type Task_CreateTicketTask_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + TicketRequest *v2.TicketRequest + TicketSchema *v2.TicketSchema + Annotations []*anypb.Any +} + +func (b0 Task_CreateTicketTask_builder) Build() *Task_CreateTicketTask { + m0 := &Task_CreateTicketTask{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_TicketRequest = b.TicketRequest + x.xxx_hidden_TicketSchema = b.TicketSchema + x.xxx_hidden_Annotations = &b.Annotations + return m0 +} + +type Task_BulkCreateTicketsTask struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_TicketRequests *[]*Task_CreateTicketTask `protobuf:"bytes,1,rep,name=ticket_requests,json=ticketRequests,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Task_BulkCreateTicketsTask) Reset() { + *x = Task_BulkCreateTicketsTask{} + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Task_BulkCreateTicketsTask) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Task_BulkCreateTicketsTask) ProtoMessage() {} + +func (x *Task_BulkCreateTicketsTask) ProtoReflect() protoreflect.Message { + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[24] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *Task_BulkCreateTicketsTask) GetTicketRequests() []*Task_CreateTicketTask { + if x != nil { + if x.xxx_hidden_TicketRequests != nil { + return *x.xxx_hidden_TicketRequests + } + } + return nil +} + +func (x *Task_BulkCreateTicketsTask) SetTicketRequests(v []*Task_CreateTicketTask) { + x.xxx_hidden_TicketRequests = &v +} + +type Task_BulkCreateTicketsTask_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + TicketRequests []*Task_CreateTicketTask +} + +func (b0 Task_BulkCreateTicketsTask_builder) Build() *Task_BulkCreateTicketsTask { + m0 := &Task_BulkCreateTicketsTask{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_TicketRequests = &b.TicketRequests + return m0 +} + +type Task_BulkGetTicketsTask struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_TicketRequests *[]*Task_GetTicketTask `protobuf:"bytes,1,rep,name=ticket_requests,json=ticketRequests,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Task_BulkGetTicketsTask) Reset() { + *x = Task_BulkGetTicketsTask{} + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Task_BulkGetTicketsTask) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Task_BulkGetTicketsTask) ProtoMessage() {} + +func (x *Task_BulkGetTicketsTask) ProtoReflect() protoreflect.Message { + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[25] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *Task_BulkGetTicketsTask) GetTicketRequests() []*Task_GetTicketTask { + if x != nil { + if x.xxx_hidden_TicketRequests != nil { + return *x.xxx_hidden_TicketRequests + } + } + return nil +} + +func (x *Task_BulkGetTicketsTask) SetTicketRequests(v []*Task_GetTicketTask) { + x.xxx_hidden_TicketRequests = &v +} + +type Task_BulkGetTicketsTask_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + TicketRequests []*Task_GetTicketTask +} + +func (b0 Task_BulkGetTicketsTask_builder) Build() *Task_BulkGetTicketsTask { + m0 := &Task_BulkGetTicketsTask{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_TicketRequests = &b.TicketRequests + return m0 +} + +type Task_ListTicketSchemasTask struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Annotations *[]*anypb.Any `protobuf:"bytes,1,rep,name=annotations,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Task_ListTicketSchemasTask) Reset() { + *x = Task_ListTicketSchemasTask{} + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Task_ListTicketSchemasTask) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Task_ListTicketSchemasTask) ProtoMessage() {} + +func (x *Task_ListTicketSchemasTask) ProtoReflect() protoreflect.Message { + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[26] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *Task_ListTicketSchemasTask) GetAnnotations() []*anypb.Any { + if x != nil { + if x.xxx_hidden_Annotations != nil { + return *x.xxx_hidden_Annotations + } + } + return nil +} + +func (x *Task_ListTicketSchemasTask) SetAnnotations(v []*anypb.Any) { + x.xxx_hidden_Annotations = &v +} + +type Task_ListTicketSchemasTask_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Annotations []*anypb.Any +} + +func (b0 Task_ListTicketSchemasTask_builder) Build() *Task_ListTicketSchemasTask { + m0 := &Task_ListTicketSchemasTask{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Annotations = &b.Annotations + return m0 +} + +type Task_GetTicketTask struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_TicketId string `protobuf:"bytes,1,opt,name=ticket_id,json=ticketId,proto3"` + xxx_hidden_Annotations *[]*anypb.Any `protobuf:"bytes,2,rep,name=annotations,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Task_GetTicketTask) Reset() { + *x = Task_GetTicketTask{} + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Task_GetTicketTask) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Task_GetTicketTask) ProtoMessage() {} + +func (x *Task_GetTicketTask) ProtoReflect() protoreflect.Message { + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[27] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *Task_GetTicketTask) GetTicketId() string { + if x != nil { + return x.xxx_hidden_TicketId + } + return "" +} + +func (x *Task_GetTicketTask) GetAnnotations() []*anypb.Any { + if x != nil { + if x.xxx_hidden_Annotations != nil { + return *x.xxx_hidden_Annotations + } + } + return nil +} + +func (x *Task_GetTicketTask) SetTicketId(v string) { + x.xxx_hidden_TicketId = v +} + +func (x *Task_GetTicketTask) SetAnnotations(v []*anypb.Any) { + x.xxx_hidden_Annotations = &v +} + +type Task_GetTicketTask_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + TicketId string + Annotations []*anypb.Any +} + +func (b0 Task_GetTicketTask_builder) Build() *Task_GetTicketTask { + m0 := &Task_GetTicketTask{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_TicketId = b.TicketId + x.xxx_hidden_Annotations = &b.Annotations + return m0 +} + +type Task_ActionListSchemasTask struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Annotations *[]*anypb.Any `protobuf:"bytes,1,rep,name=annotations,proto3"` + xxx_hidden_ResourceTypeId string `protobuf:"bytes,2,opt,name=resource_type_id,json=resourceTypeId,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Task_ActionListSchemasTask) Reset() { + *x = Task_ActionListSchemasTask{} + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[28] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Task_ActionListSchemasTask) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Task_ActionListSchemasTask) ProtoMessage() {} + +func (x *Task_ActionListSchemasTask) ProtoReflect() protoreflect.Message { + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[28] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *Task_ActionListSchemasTask) GetAnnotations() []*anypb.Any { + if x != nil { + if x.xxx_hidden_Annotations != nil { + return *x.xxx_hidden_Annotations + } + } + return nil +} + +func (x *Task_ActionListSchemasTask) GetResourceTypeId() string { + if x != nil { + return x.xxx_hidden_ResourceTypeId + } + return "" +} + +func (x *Task_ActionListSchemasTask) SetAnnotations(v []*anypb.Any) { + x.xxx_hidden_Annotations = &v +} + +func (x *Task_ActionListSchemasTask) SetResourceTypeId(v string) { + x.xxx_hidden_ResourceTypeId = v +} + +type Task_ActionListSchemasTask_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Annotations []*anypb.Any + // Optional: filter to only return actions for a specific resource type + ResourceTypeId string +} + +func (b0 Task_ActionListSchemasTask_builder) Build() *Task_ActionListSchemasTask { + m0 := &Task_ActionListSchemasTask{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Annotations = &b.Annotations + x.xxx_hidden_ResourceTypeId = b.ResourceTypeId + return m0 +} + +type Task_ActionGetSchemaTask struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Name string `protobuf:"bytes,1,opt,name=name,proto3"` + xxx_hidden_Annotations *[]*anypb.Any `protobuf:"bytes,2,rep,name=annotations,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Task_ActionGetSchemaTask) Reset() { + *x = Task_ActionGetSchemaTask{} + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[29] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Task_ActionGetSchemaTask) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Task_ActionGetSchemaTask) ProtoMessage() {} + +func (x *Task_ActionGetSchemaTask) ProtoReflect() protoreflect.Message { + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[29] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *Task_ActionGetSchemaTask) GetName() string { + if x != nil { + return x.xxx_hidden_Name + } + return "" +} + +func (x *Task_ActionGetSchemaTask) GetAnnotations() []*anypb.Any { + if x != nil { + if x.xxx_hidden_Annotations != nil { + return *x.xxx_hidden_Annotations + } + } + return nil +} + +func (x *Task_ActionGetSchemaTask) SetName(v string) { + x.xxx_hidden_Name = v +} + +func (x *Task_ActionGetSchemaTask) SetAnnotations(v []*anypb.Any) { + x.xxx_hidden_Annotations = &v +} + +type Task_ActionGetSchemaTask_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Name string + Annotations []*anypb.Any +} + +func (b0 Task_ActionGetSchemaTask_builder) Build() *Task_ActionGetSchemaTask { + m0 := &Task_ActionGetSchemaTask{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Name = b.Name + x.xxx_hidden_Annotations = &b.Annotations + return m0 +} + +type Task_ActionInvokeTask struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Name string `protobuf:"bytes,1,opt,name=name,proto3"` + xxx_hidden_Args *structpb.Struct `protobuf:"bytes,2,opt,name=args,proto3"` + xxx_hidden_Annotations *[]*anypb.Any `protobuf:"bytes,3,rep,name=annotations,proto3"` + xxx_hidden_ResourceTypeId string `protobuf:"bytes,4,opt,name=resource_type_id,json=resourceTypeId,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Task_ActionInvokeTask) Reset() { + *x = Task_ActionInvokeTask{} + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[30] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Task_ActionInvokeTask) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Task_ActionInvokeTask) ProtoMessage() {} + +func (x *Task_ActionInvokeTask) ProtoReflect() protoreflect.Message { + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[30] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *Task_ActionInvokeTask) GetName() string { + if x != nil { + return x.xxx_hidden_Name + } + return "" +} + +func (x *Task_ActionInvokeTask) GetArgs() *structpb.Struct { + if x != nil { + return x.xxx_hidden_Args + } + return nil +} + +func (x *Task_ActionInvokeTask) GetAnnotations() []*anypb.Any { + if x != nil { + if x.xxx_hidden_Annotations != nil { + return *x.xxx_hidden_Annotations + } + } + return nil +} + +func (x *Task_ActionInvokeTask) GetResourceTypeId() string { + if x != nil { + return x.xxx_hidden_ResourceTypeId + } + return "" +} + +func (x *Task_ActionInvokeTask) SetName(v string) { + x.xxx_hidden_Name = v +} + +func (x *Task_ActionInvokeTask) SetArgs(v *structpb.Struct) { + x.xxx_hidden_Args = v +} + +func (x *Task_ActionInvokeTask) SetAnnotations(v []*anypb.Any) { + x.xxx_hidden_Annotations = &v +} + +func (x *Task_ActionInvokeTask) SetResourceTypeId(v string) { + x.xxx_hidden_ResourceTypeId = v +} + +func (x *Task_ActionInvokeTask) HasArgs() bool { + if x == nil { + return false + } + return x.xxx_hidden_Args != nil +} + +func (x *Task_ActionInvokeTask) ClearArgs() { + x.xxx_hidden_Args = nil +} + +type Task_ActionInvokeTask_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Name string + Args *structpb.Struct + Annotations []*anypb.Any + // Optional: if set, invokes a resource-scoped action + ResourceTypeId string +} + +func (b0 Task_ActionInvokeTask_builder) Build() *Task_ActionInvokeTask { + m0 := &Task_ActionInvokeTask{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Name = b.Name + x.xxx_hidden_Args = b.Args + x.xxx_hidden_Annotations = &b.Annotations + x.xxx_hidden_ResourceTypeId = b.ResourceTypeId + return m0 +} + +type Task_ActionStatusTask struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Name string `protobuf:"bytes,1,opt,name=name,proto3"` + xxx_hidden_Id string `protobuf:"bytes,2,opt,name=id,proto3"` + xxx_hidden_Annotations *[]*anypb.Any `protobuf:"bytes,3,rep,name=annotations,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Task_ActionStatusTask) Reset() { + *x = Task_ActionStatusTask{} + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[31] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Task_ActionStatusTask) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Task_ActionStatusTask) ProtoMessage() {} + +func (x *Task_ActionStatusTask) ProtoReflect() protoreflect.Message { + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[31] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *Task_ActionStatusTask) GetName() string { + if x != nil { + return x.xxx_hidden_Name + } + return "" +} + +func (x *Task_ActionStatusTask) GetId() string { + if x != nil { + return x.xxx_hidden_Id + } + return "" +} + +func (x *Task_ActionStatusTask) GetAnnotations() []*anypb.Any { + if x != nil { + if x.xxx_hidden_Annotations != nil { + return *x.xxx_hidden_Annotations + } + } + return nil +} + +func (x *Task_ActionStatusTask) SetName(v string) { + x.xxx_hidden_Name = v +} + +func (x *Task_ActionStatusTask) SetId(v string) { + x.xxx_hidden_Id = v +} + +func (x *Task_ActionStatusTask) SetAnnotations(v []*anypb.Any) { + x.xxx_hidden_Annotations = &v +} + +type Task_ActionStatusTask_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Name string + Id string + Annotations []*anypb.Any +} + +func (b0 Task_ActionStatusTask_builder) Build() *Task_ActionStatusTask { + m0 := &Task_ActionStatusTask{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Name = b.Name + x.xxx_hidden_Id = b.Id + x.xxx_hidden_Annotations = &b.Annotations + return m0 +} + +type Task_CreateSyncDiffTask struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_BaseSyncId string `protobuf:"bytes,1,opt,name=base_sync_id,json=baseSyncId,proto3"` + xxx_hidden_NewSyncId string `protobuf:"bytes,2,opt,name=new_sync_id,json=newSyncId,proto3"` + xxx_hidden_Annotations *[]*anypb.Any `protobuf:"bytes,3,rep,name=annotations,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Task_CreateSyncDiffTask) Reset() { + *x = Task_CreateSyncDiffTask{} + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[32] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Task_CreateSyncDiffTask) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Task_CreateSyncDiffTask) ProtoMessage() {} + +func (x *Task_CreateSyncDiffTask) ProtoReflect() protoreflect.Message { + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[32] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *Task_CreateSyncDiffTask) GetBaseSyncId() string { + if x != nil { + return x.xxx_hidden_BaseSyncId + } + return "" +} + +func (x *Task_CreateSyncDiffTask) GetNewSyncId() string { + if x != nil { + return x.xxx_hidden_NewSyncId + } + return "" +} + +func (x *Task_CreateSyncDiffTask) GetAnnotations() []*anypb.Any { + if x != nil { + if x.xxx_hidden_Annotations != nil { + return *x.xxx_hidden_Annotations + } + } + return nil +} + +func (x *Task_CreateSyncDiffTask) SetBaseSyncId(v string) { + x.xxx_hidden_BaseSyncId = v +} + +func (x *Task_CreateSyncDiffTask) SetNewSyncId(v string) { + x.xxx_hidden_NewSyncId = v +} + +func (x *Task_CreateSyncDiffTask) SetAnnotations(v []*anypb.Any) { + x.xxx_hidden_Annotations = &v +} + +type Task_CreateSyncDiffTask_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // Open to suggestions here + BaseSyncId string + NewSyncId string + Annotations []*anypb.Any +} + +func (b0 Task_CreateSyncDiffTask_builder) Build() *Task_CreateSyncDiffTask { + m0 := &Task_CreateSyncDiffTask{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_BaseSyncId = b.BaseSyncId + x.xxx_hidden_NewSyncId = b.NewSyncId + x.xxx_hidden_Annotations = &b.Annotations + return m0 +} + +type Task_CompactSyncs struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_CompactableSyncs *[]*Task_CompactSyncs_CompactableSync `protobuf:"bytes,1,rep,name=compactable_syncs,json=compactableSyncs,proto3"` + xxx_hidden_Annotations *[]*anypb.Any `protobuf:"bytes,2,rep,name=annotations,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Task_CompactSyncs) Reset() { + *x = Task_CompactSyncs{} + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[33] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Task_CompactSyncs) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Task_CompactSyncs) ProtoMessage() {} + +func (x *Task_CompactSyncs) ProtoReflect() protoreflect.Message { + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[33] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *Task_CompactSyncs) GetCompactableSyncs() []*Task_CompactSyncs_CompactableSync { + if x != nil { + if x.xxx_hidden_CompactableSyncs != nil { + return *x.xxx_hidden_CompactableSyncs + } + } + return nil +} + +func (x *Task_CompactSyncs) GetAnnotations() []*anypb.Any { + if x != nil { + if x.xxx_hidden_Annotations != nil { + return *x.xxx_hidden_Annotations + } + } + return nil +} + +func (x *Task_CompactSyncs) SetCompactableSyncs(v []*Task_CompactSyncs_CompactableSync) { + x.xxx_hidden_CompactableSyncs = &v +} + +func (x *Task_CompactSyncs) SetAnnotations(v []*anypb.Any) { + x.xxx_hidden_Annotations = &v +} + +type Task_CompactSyncs_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + CompactableSyncs []*Task_CompactSyncs_CompactableSync + Annotations []*anypb.Any +} + +func (b0 Task_CompactSyncs_builder) Build() *Task_CompactSyncs { + m0 := &Task_CompactSyncs{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_CompactableSyncs = &b.CompactableSyncs + x.xxx_hidden_Annotations = &b.Annotations + return m0 +} + +type Task_CompactSyncs_CompactableSync struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_FilePath string `protobuf:"bytes,1,opt,name=file_path,json=filePath,proto3"` + xxx_hidden_SyncId string `protobuf:"bytes,2,opt,name=sync_id,json=syncId,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Task_CompactSyncs_CompactableSync) Reset() { + *x = Task_CompactSyncs_CompactableSync{} + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[34] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Task_CompactSyncs_CompactableSync) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Task_CompactSyncs_CompactableSync) ProtoMessage() {} + +func (x *Task_CompactSyncs_CompactableSync) ProtoReflect() protoreflect.Message { + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[34] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *Task_CompactSyncs_CompactableSync) GetFilePath() string { + if x != nil { + return x.xxx_hidden_FilePath + } + return "" +} + +func (x *Task_CompactSyncs_CompactableSync) GetSyncId() string { + if x != nil { + return x.xxx_hidden_SyncId + } + return "" +} + +func (x *Task_CompactSyncs_CompactableSync) SetFilePath(v string) { + x.xxx_hidden_FilePath = v +} + +func (x *Task_CompactSyncs_CompactableSync) SetSyncId(v string) { + x.xxx_hidden_SyncId = v +} + +type Task_CompactSyncs_CompactableSync_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + FilePath string + SyncId string +} + +func (b0 Task_CompactSyncs_CompactableSync_builder) Build() *Task_CompactSyncs_CompactableSync { + m0 := &Task_CompactSyncs_CompactableSync{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_FilePath = b.FilePath + x.xxx_hidden_SyncId = b.SyncId + return m0 +} + +type BatonServiceHelloRequest_BuildInfo struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_LangVersion string `protobuf:"bytes,1,opt,name=lang_version,json=langVersion,proto3"` + xxx_hidden_Package string `protobuf:"bytes,2,opt,name=package,proto3"` + xxx_hidden_PackageVersion string `protobuf:"bytes,3,opt,name=package_version,json=packageVersion,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *BatonServiceHelloRequest_BuildInfo) Reset() { + *x = BatonServiceHelloRequest_BuildInfo{} + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[35] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *BatonServiceHelloRequest_BuildInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BatonServiceHelloRequest_BuildInfo) ProtoMessage() {} + +func (x *BatonServiceHelloRequest_BuildInfo) ProtoReflect() protoreflect.Message { + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[35] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *BatonServiceHelloRequest_BuildInfo) GetLangVersion() string { + if x != nil { + return x.xxx_hidden_LangVersion + } + return "" +} + +func (x *BatonServiceHelloRequest_BuildInfo) GetPackage() string { + if x != nil { + return x.xxx_hidden_Package + } + return "" +} + +func (x *BatonServiceHelloRequest_BuildInfo) GetPackageVersion() string { + if x != nil { + return x.xxx_hidden_PackageVersion + } + return "" +} + +func (x *BatonServiceHelloRequest_BuildInfo) SetLangVersion(v string) { + x.xxx_hidden_LangVersion = v +} + +func (x *BatonServiceHelloRequest_BuildInfo) SetPackage(v string) { + x.xxx_hidden_Package = v +} + +func (x *BatonServiceHelloRequest_BuildInfo) SetPackageVersion(v string) { + x.xxx_hidden_PackageVersion = v +} + +type BatonServiceHelloRequest_BuildInfo_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + LangVersion string + Package string + PackageVersion string +} + +func (b0 BatonServiceHelloRequest_BuildInfo_builder) Build() *BatonServiceHelloRequest_BuildInfo { + m0 := &BatonServiceHelloRequest_BuildInfo{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_LangVersion = b.LangVersion + x.xxx_hidden_Package = b.Package + x.xxx_hidden_PackageVersion = b.PackageVersion + return m0 +} + +type BatonServiceHelloRequest_OSInfo struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Hostname string `protobuf:"bytes,1,opt,name=hostname,proto3"` + xxx_hidden_Os string `protobuf:"bytes,2,opt,name=os,proto3"` + xxx_hidden_Platform string `protobuf:"bytes,3,opt,name=platform,proto3"` + xxx_hidden_PlatformVersion string `protobuf:"bytes,4,opt,name=platform_version,json=platformVersion,proto3"` + xxx_hidden_PlatformFamily string `protobuf:"bytes,5,opt,name=platform_family,json=platformFamily,proto3"` + xxx_hidden_KernelVersion string `protobuf:"bytes,6,opt,name=kernel_version,json=kernelVersion,proto3"` + xxx_hidden_KernelArch string `protobuf:"bytes,7,opt,name=kernel_arch,json=kernelArch,proto3"` + xxx_hidden_VirtualizationSystem string `protobuf:"bytes,8,opt,name=virtualization_system,json=virtualizationSystem,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *BatonServiceHelloRequest_OSInfo) Reset() { + *x = BatonServiceHelloRequest_OSInfo{} + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[36] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *BatonServiceHelloRequest_OSInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BatonServiceHelloRequest_OSInfo) ProtoMessage() {} + +func (x *BatonServiceHelloRequest_OSInfo) ProtoReflect() protoreflect.Message { + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[36] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *BatonServiceHelloRequest_OSInfo) GetHostname() string { + if x != nil { + return x.xxx_hidden_Hostname + } + return "" +} + +func (x *BatonServiceHelloRequest_OSInfo) GetOs() string { + if x != nil { + return x.xxx_hidden_Os + } + return "" +} + +func (x *BatonServiceHelloRequest_OSInfo) GetPlatform() string { + if x != nil { + return x.xxx_hidden_Platform + } + return "" +} + +func (x *BatonServiceHelloRequest_OSInfo) GetPlatformVersion() string { + if x != nil { + return x.xxx_hidden_PlatformVersion + } + return "" +} + +func (x *BatonServiceHelloRequest_OSInfo) GetPlatformFamily() string { + if x != nil { + return x.xxx_hidden_PlatformFamily + } + return "" +} + +func (x *BatonServiceHelloRequest_OSInfo) GetKernelVersion() string { + if x != nil { + return x.xxx_hidden_KernelVersion + } + return "" +} + +func (x *BatonServiceHelloRequest_OSInfo) GetKernelArch() string { + if x != nil { + return x.xxx_hidden_KernelArch + } + return "" +} + +func (x *BatonServiceHelloRequest_OSInfo) GetVirtualizationSystem() string { + if x != nil { + return x.xxx_hidden_VirtualizationSystem + } + return "" +} + +func (x *BatonServiceHelloRequest_OSInfo) SetHostname(v string) { + x.xxx_hidden_Hostname = v +} + +func (x *BatonServiceHelloRequest_OSInfo) SetOs(v string) { + x.xxx_hidden_Os = v +} + +func (x *BatonServiceHelloRequest_OSInfo) SetPlatform(v string) { + x.xxx_hidden_Platform = v +} + +func (x *BatonServiceHelloRequest_OSInfo) SetPlatformVersion(v string) { + x.xxx_hidden_PlatformVersion = v +} + +func (x *BatonServiceHelloRequest_OSInfo) SetPlatformFamily(v string) { + x.xxx_hidden_PlatformFamily = v +} + +func (x *BatonServiceHelloRequest_OSInfo) SetKernelVersion(v string) { + x.xxx_hidden_KernelVersion = v +} + +func (x *BatonServiceHelloRequest_OSInfo) SetKernelArch(v string) { + x.xxx_hidden_KernelArch = v +} + +func (x *BatonServiceHelloRequest_OSInfo) SetVirtualizationSystem(v string) { + x.xxx_hidden_VirtualizationSystem = v +} + +type BatonServiceHelloRequest_OSInfo_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Hostname string + Os string + Platform string + PlatformVersion string + PlatformFamily string + KernelVersion string + KernelArch string + VirtualizationSystem string +} + +func (b0 BatonServiceHelloRequest_OSInfo_builder) Build() *BatonServiceHelloRequest_OSInfo { + m0 := &BatonServiceHelloRequest_OSInfo{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Hostname = b.Hostname + x.xxx_hidden_Os = b.Os + x.xxx_hidden_Platform = b.Platform + x.xxx_hidden_PlatformVersion = b.PlatformVersion + x.xxx_hidden_PlatformFamily = b.PlatformFamily + x.xxx_hidden_KernelVersion = b.KernelVersion + x.xxx_hidden_KernelArch = b.KernelArch + x.xxx_hidden_VirtualizationSystem = b.VirtualizationSystem + return m0 +} + +type BatonServiceUploadAssetRequest_UploadMetadata struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_HostId string `protobuf:"bytes,1,opt,name=host_id,json=hostId,proto3"` + xxx_hidden_TaskId string `protobuf:"bytes,2,opt,name=task_id,json=taskId,proto3"` + xxx_hidden_Annotations *[]*anypb.Any `protobuf:"bytes,4,rep,name=annotations,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *BatonServiceUploadAssetRequest_UploadMetadata) Reset() { + *x = BatonServiceUploadAssetRequest_UploadMetadata{} + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[37] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *BatonServiceUploadAssetRequest_UploadMetadata) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BatonServiceUploadAssetRequest_UploadMetadata) ProtoMessage() {} + +func (x *BatonServiceUploadAssetRequest_UploadMetadata) ProtoReflect() protoreflect.Message { + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[37] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *BatonServiceUploadAssetRequest_UploadMetadata) GetHostId() string { + if x != nil { + return x.xxx_hidden_HostId + } + return "" +} + +func (x *BatonServiceUploadAssetRequest_UploadMetadata) GetTaskId() string { + if x != nil { + return x.xxx_hidden_TaskId + } + return "" +} + +func (x *BatonServiceUploadAssetRequest_UploadMetadata) GetAnnotations() []*anypb.Any { + if x != nil { + if x.xxx_hidden_Annotations != nil { + return *x.xxx_hidden_Annotations + } + } + return nil +} + +func (x *BatonServiceUploadAssetRequest_UploadMetadata) SetHostId(v string) { + x.xxx_hidden_HostId = v +} + +func (x *BatonServiceUploadAssetRequest_UploadMetadata) SetTaskId(v string) { + x.xxx_hidden_TaskId = v +} + +func (x *BatonServiceUploadAssetRequest_UploadMetadata) SetAnnotations(v []*anypb.Any) { + x.xxx_hidden_Annotations = &v +} + +type BatonServiceUploadAssetRequest_UploadMetadata_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + HostId string + TaskId string + Annotations []*anypb.Any +} + +func (b0 BatonServiceUploadAssetRequest_UploadMetadata_builder) Build() *BatonServiceUploadAssetRequest_UploadMetadata { + m0 := &BatonServiceUploadAssetRequest_UploadMetadata{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_HostId = b.HostId + x.xxx_hidden_TaskId = b.TaskId + x.xxx_hidden_Annotations = &b.Annotations + return m0 +} + +type BatonServiceUploadAssetRequest_UploadData struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Data []byte `protobuf:"bytes,1,opt,name=data,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *BatonServiceUploadAssetRequest_UploadData) Reset() { + *x = BatonServiceUploadAssetRequest_UploadData{} + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[38] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *BatonServiceUploadAssetRequest_UploadData) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BatonServiceUploadAssetRequest_UploadData) ProtoMessage() {} + +func (x *BatonServiceUploadAssetRequest_UploadData) ProtoReflect() protoreflect.Message { + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[38] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *BatonServiceUploadAssetRequest_UploadData) GetData() []byte { + if x != nil { + return x.xxx_hidden_Data + } + return nil +} + +func (x *BatonServiceUploadAssetRequest_UploadData) SetData(v []byte) { + if v == nil { + v = []byte{} + } + x.xxx_hidden_Data = v +} + +type BatonServiceUploadAssetRequest_UploadData_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // 1MB limit + Data []byte +} + +func (b0 BatonServiceUploadAssetRequest_UploadData_builder) Build() *BatonServiceUploadAssetRequest_UploadData { + m0 := &BatonServiceUploadAssetRequest_UploadData{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Data = b.Data + return m0 +} + +type BatonServiceUploadAssetRequest_UploadEOF struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Sha256Checksum []byte `protobuf:"bytes,1,opt,name=sha256_checksum,json=sha256Checksum,proto3"` + xxx_hidden_Annotations *[]*anypb.Any `protobuf:"bytes,2,rep,name=annotations,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *BatonServiceUploadAssetRequest_UploadEOF) Reset() { + *x = BatonServiceUploadAssetRequest_UploadEOF{} + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[39] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *BatonServiceUploadAssetRequest_UploadEOF) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BatonServiceUploadAssetRequest_UploadEOF) ProtoMessage() {} + +func (x *BatonServiceUploadAssetRequest_UploadEOF) ProtoReflect() protoreflect.Message { + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[39] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *BatonServiceUploadAssetRequest_UploadEOF) GetSha256Checksum() []byte { + if x != nil { + return x.xxx_hidden_Sha256Checksum + } + return nil +} + +func (x *BatonServiceUploadAssetRequest_UploadEOF) GetAnnotations() []*anypb.Any { + if x != nil { + if x.xxx_hidden_Annotations != nil { + return *x.xxx_hidden_Annotations + } + } + return nil +} + +func (x *BatonServiceUploadAssetRequest_UploadEOF) SetSha256Checksum(v []byte) { + if v == nil { + v = []byte{} + } + x.xxx_hidden_Sha256Checksum = v +} + +func (x *BatonServiceUploadAssetRequest_UploadEOF) SetAnnotations(v []*anypb.Any) { + x.xxx_hidden_Annotations = &v +} + +type BatonServiceUploadAssetRequest_UploadEOF_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Sha256Checksum []byte + Annotations []*anypb.Any +} + +func (b0 BatonServiceUploadAssetRequest_UploadEOF_builder) Build() *BatonServiceUploadAssetRequest_UploadEOF { + m0 := &BatonServiceUploadAssetRequest_UploadEOF{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Sha256Checksum = b.Sha256Checksum + x.xxx_hidden_Annotations = &b.Annotations + return m0 +} + +type BatonServiceFinishTaskRequest_Error struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_NonRetryable bool `protobuf:"varint,1,opt,name=non_retryable,json=nonRetryable,proto3"` + xxx_hidden_Annotations *[]*anypb.Any `protobuf:"bytes,2,rep,name=annotations,proto3"` + xxx_hidden_Response *anypb.Any `protobuf:"bytes,3,opt,name=response,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *BatonServiceFinishTaskRequest_Error) Reset() { + *x = BatonServiceFinishTaskRequest_Error{} + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[40] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *BatonServiceFinishTaskRequest_Error) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BatonServiceFinishTaskRequest_Error) ProtoMessage() {} + +func (x *BatonServiceFinishTaskRequest_Error) ProtoReflect() protoreflect.Message { + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[40] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *BatonServiceFinishTaskRequest_Error) GetNonRetryable() bool { + if x != nil { + return x.xxx_hidden_NonRetryable + } + return false +} + +func (x *BatonServiceFinishTaskRequest_Error) GetAnnotations() []*anypb.Any { + if x != nil { + if x.xxx_hidden_Annotations != nil { + return *x.xxx_hidden_Annotations + } + } + return nil +} + +func (x *BatonServiceFinishTaskRequest_Error) GetResponse() *anypb.Any { + if x != nil { + return x.xxx_hidden_Response + } + return nil +} + +func (x *BatonServiceFinishTaskRequest_Error) SetNonRetryable(v bool) { + x.xxx_hidden_NonRetryable = v +} + +func (x *BatonServiceFinishTaskRequest_Error) SetAnnotations(v []*anypb.Any) { + x.xxx_hidden_Annotations = &v +} + +func (x *BatonServiceFinishTaskRequest_Error) SetResponse(v *anypb.Any) { + x.xxx_hidden_Response = v +} + +func (x *BatonServiceFinishTaskRequest_Error) HasResponse() bool { + if x == nil { + return false + } + return x.xxx_hidden_Response != nil +} + +func (x *BatonServiceFinishTaskRequest_Error) ClearResponse() { + x.xxx_hidden_Response = nil +} + +type BatonServiceFinishTaskRequest_Error_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + NonRetryable bool + Annotations []*anypb.Any + // The response from the connector, if any. + Response *anypb.Any +} + +func (b0 BatonServiceFinishTaskRequest_Error_builder) Build() *BatonServiceFinishTaskRequest_Error { + m0 := &BatonServiceFinishTaskRequest_Error{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_NonRetryable = b.NonRetryable + x.xxx_hidden_Annotations = &b.Annotations + x.xxx_hidden_Response = b.Response + return m0 +} + +type BatonServiceFinishTaskRequest_Success struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Annotations *[]*anypb.Any `protobuf:"bytes,1,rep,name=annotations,proto3"` + xxx_hidden_Response *anypb.Any `protobuf:"bytes,2,opt,name=response,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *BatonServiceFinishTaskRequest_Success) Reset() { + *x = BatonServiceFinishTaskRequest_Success{} + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[41] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *BatonServiceFinishTaskRequest_Success) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BatonServiceFinishTaskRequest_Success) ProtoMessage() {} + +func (x *BatonServiceFinishTaskRequest_Success) ProtoReflect() protoreflect.Message { + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[41] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *BatonServiceFinishTaskRequest_Success) GetAnnotations() []*anypb.Any { + if x != nil { + if x.xxx_hidden_Annotations != nil { + return *x.xxx_hidden_Annotations + } + } + return nil +} + +func (x *BatonServiceFinishTaskRequest_Success) GetResponse() *anypb.Any { + if x != nil { + return x.xxx_hidden_Response + } + return nil +} + +func (x *BatonServiceFinishTaskRequest_Success) SetAnnotations(v []*anypb.Any) { + x.xxx_hidden_Annotations = &v +} + +func (x *BatonServiceFinishTaskRequest_Success) SetResponse(v *anypb.Any) { + x.xxx_hidden_Response = v +} + +func (x *BatonServiceFinishTaskRequest_Success) HasResponse() bool { + if x == nil { + return false + } + return x.xxx_hidden_Response != nil +} + +func (x *BatonServiceFinishTaskRequest_Success) ClearResponse() { + x.xxx_hidden_Response = nil +} + +type BatonServiceFinishTaskRequest_Success_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Annotations []*anypb.Any + // The response from the connector, if any. + Response *anypb.Any +} + +func (b0 BatonServiceFinishTaskRequest_Success_builder) Build() *BatonServiceFinishTaskRequest_Success { + m0 := &BatonServiceFinishTaskRequest_Success{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Annotations = &b.Annotations + x.xxx_hidden_Response = b.Response + return m0 +} + +var File_c1_connectorapi_baton_v1_baton_proto protoreflect.FileDescriptor + +const file_c1_connectorapi_baton_v1_baton_proto_rawDesc = "" + + "\n" + + "$c1/connectorapi/baton/v1/baton.proto\x12\x18c1.connectorapi.baton.v1\x1a\x1fc1/connector/v2/connector.proto\x1a!c1/connector/v2/entitlement.proto\x1a\x1bc1/connector/v2/grant.proto\x1a\x1ec1/connector/v2/resource.proto\x1a\x1cc1/connector/v2/ticket.proto\x1a\x19google/protobuf/any.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1cgoogle/protobuf/struct.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x17google/rpc/status.proto\x1a\x17validate/validate.proto\"\x80)\n" + + "\x04Task\x12\x0e\n" + + "\x02id\x18\x01 \x01(\tR\x02id\x12=\n" + + "\x06status\x18\x02 \x01(\x0e2%.c1.connectorapi.baton.v1.Task.StatusR\x06status\x12=\n" + + "\x04none\x18d \x01(\v2'.c1.connectorapi.baton.v1.Task.NoneTaskH\x00R\x04none\x12@\n" + + "\x05hello\x18e \x01(\v2(.c1.connectorapi.baton.v1.Task.HelloTaskH\x00R\x05hello\x12J\n" + + "\tsync_full\x18f \x01(\v2+.c1.connectorapi.baton.v1.Task.SyncFullTaskH\x00R\bsyncFull\x12@\n" + + "\x05grant\x18g \x01(\v2(.c1.connectorapi.baton.v1.Task.GrantTaskH\x00R\x05grant\x12C\n" + + "\x06revoke\x18h \x01(\v2).c1.connectorapi.baton.v1.Task.RevokeTaskH\x00R\x06revoke\x12Y\n" + + "\x0ecreate_account\x18i \x01(\v20.c1.connectorapi.baton.v1.Task.CreateAccountTaskH\x00R\rcreateAccount\x12\\\n" + + "\x0fcreate_resource\x18j \x01(\v21.c1.connectorapi.baton.v1.Task.CreateResourceTaskH\x00R\x0ecreateResource\x12\\\n" + + "\x0fdelete_resource\x18k \x01(\v21.c1.connectorapi.baton.v1.Task.DeleteResourceTaskH\x00R\x0edeleteResource\x12e\n" + + "\x12rotate_credentials\x18l \x01(\v24.c1.connectorapi.baton.v1.Task.RotateCredentialsTaskH\x00R\x11rotateCredentials\x12M\n" + + "\n" + + "event_feed\x18m \x01(\v2,.c1.connectorapi.baton.v1.Task.EventFeedTaskH\x00R\teventFeed\x12_\n" + + "\x12create_ticket_task\x18n \x01(\v2/.c1.connectorapi.baton.v1.Task.CreateTicketTaskH\x00R\x10createTicketTask\x12f\n" + + "\x13list_ticket_schemas\x18o \x01(\v24.c1.connectorapi.baton.v1.Task.ListTicketSchemasTaskH\x00R\x11listTicketSchemas\x12M\n" + + "\n" + + "get_ticket\x18p \x01(\v2,.c1.connectorapi.baton.v1.Task.GetTicketTaskH\x00R\tgetTicket\x12f\n" + + "\x13bulk_create_tickets\x18q \x01(\v24.c1.connectorapi.baton.v1.Task.BulkCreateTicketsTaskH\x00R\x11bulkCreateTickets\x12]\n" + + "\x10bulk_get_tickets\x18r \x01(\v21.c1.connectorapi.baton.v1.Task.BulkGetTicketsTaskH\x00R\x0ebulkGetTickets\x12f\n" + + "\x13action_list_schemas\x18s \x01(\v24.c1.connectorapi.baton.v1.Task.ActionListSchemasTaskH\x00R\x11actionListSchemas\x12`\n" + + "\x11action_get_schema\x18t \x01(\v22.c1.connectorapi.baton.v1.Task.ActionGetSchemaTaskH\x00R\x0factionGetSchema\x12V\n" + + "\raction_invoke\x18u \x01(\v2/.c1.connectorapi.baton.v1.Task.ActionInvokeTaskH\x00R\factionInvoke\x12V\n" + + "\raction_status\x18v \x01(\v2/.c1.connectorapi.baton.v1.Task.ActionStatusTaskH\x00R\factionStatus\x12]\n" + + "\x10create_sync_diff\x18w \x01(\v21.c1.connectorapi.baton.v1.Task.CreateSyncDiffTaskH\x00R\x0ecreateSyncDiff\x12R\n" + + "\rcompact_syncs\x18x \x01(\v2+.c1.connectorapi.baton.v1.Task.CompactSyncsH\x00R\fcompactSyncs\x12\x14\n" + + "\x05debug\x18\x03 \x01(\bR\x05debug\x1aB\n" + + "\bNoneTask\x126\n" + + "\vannotations\x18\x01 \x03(\v2\x14.google.protobuf.AnyR\vannotations\x1aC\n" + + "\tHelloTask\x126\n" + + "\vannotations\x18\x01 \x03(\v2\x14.google.protobuf.AnyR\vannotations\x1a\x88\x02\n" + + "\fSyncFullTask\x126\n" + + "\vannotations\x18\x01 \x03(\v2\x14.google.protobuf.AnyR\vannotations\x12,\n" + + "\x12skip_expand_grants\x18\x02 \x01(\bR\x10skipExpandGrants\x12?\n" + + "\x1cskip_entitlements_and_grants\x18\x03 \x01(\bR\x19skipEntitlementsAndGrants\x12Q\n" + + "\x17targeted_sync_resources\x18\x04 \x03(\v2\x19.c1.connector.v2.ResourceR\x15targetedSyncResources\x1a~\n" + + "\rEventFeedTask\x126\n" + + "\vannotations\x18\x01 \x03(\v2\x14.google.protobuf.AnyR\vannotations\x125\n" + + "\bstart_at\x18\x02 \x01(\v2\x1a.google.protobuf.TimestampR\astartAt\x1a\xf3\x01\n" + + "\tGrantTask\x12>\n" + + "\ventitlement\x18\x01 \x01(\v2\x1c.c1.connector.v2.EntitlementR\ventitlement\x127\n" + + "\tprincipal\x18\x02 \x01(\v2\x19.c1.connector.v2.ResourceR\tprincipal\x126\n" + + "\vannotations\x18\x03 \x03(\v2\x14.google.protobuf.AnyR\vannotations\x125\n" + + "\bduration\x18\x04 \x01(\v2\x19.google.protobuf.DurationR\bduration\x1ar\n" + + "\n" + + "RevokeTask\x12,\n" + + "\x05grant\x18\x01 \x01(\v2\x16.c1.connector.v2.GrantR\x05grant\x126\n" + + "\vannotations\x18\x02 \x03(\v2\x14.google.protobuf.AnyR\vannotations\x1a\xf9\x01\n" + + "\x11CreateAccountTask\x12?\n" + + "\faccount_info\x18\x01 \x01(\v2\x1c.c1.connector.v2.AccountInfoR\vaccountInfo\x12Q\n" + + "\x12credential_options\x18\x02 \x01(\v2\".c1.connector.v2.CredentialOptionsR\x11credentialOptions\x12P\n" + + "\x12encryption_configs\x18\x03 \x03(\v2!.c1.connector.v2.EncryptionConfigR\x11encryptionConfigs\x1aK\n" + + "\x12CreateResourceTask\x125\n" + + "\bresource\x18\x01 \x01(\v2\x19.c1.connector.v2.ResourceR\bresource\x1a\x9d\x01\n" + + "\x12DeleteResourceTask\x12<\n" + + "\vresource_id\x18\x01 \x01(\v2\x1b.c1.connector.v2.ResourceIdR\n" + + "resourceId\x12I\n" + + "\x12parent_resource_id\x18\x02 \x01(\v2\x1b.c1.connector.v2.ResourceIdR\x10parentResourceId\x1a\xfa\x01\n" + + "\x15RotateCredentialsTask\x12<\n" + + "\vresource_id\x18\x01 \x01(\v2\x1b.c1.connector.v2.ResourceIdR\n" + + "resourceId\x12Q\n" + + "\x12credential_options\x18\x02 \x01(\v2\".c1.connector.v2.CredentialOptionsR\x11credentialOptions\x12P\n" + + "\x12encryption_configs\x18\x03 \x03(\v2!.c1.connector.v2.EncryptionConfigR\x11encryptionConfigs\x1a\xd5\x01\n" + + "\x10CreateTicketTask\x12E\n" + + "\x0eticket_request\x18\x01 \x01(\v2\x1e.c1.connector.v2.TicketRequestR\rticketRequest\x12B\n" + + "\rticket_schema\x18\x02 \x01(\v2\x1d.c1.connector.v2.TicketSchemaR\fticketSchema\x126\n" + + "\vannotations\x18\x03 \x03(\v2\x14.google.protobuf.AnyR\vannotations\x1aq\n" + + "\x15BulkCreateTicketsTask\x12X\n" + + "\x0fticket_requests\x18\x01 \x03(\v2/.c1.connectorapi.baton.v1.Task.CreateTicketTaskR\x0eticketRequests\x1ak\n" + + "\x12BulkGetTicketsTask\x12U\n" + + "\x0fticket_requests\x18\x01 \x03(\v2,.c1.connectorapi.baton.v1.Task.GetTicketTaskR\x0eticketRequests\x1aO\n" + + "\x15ListTicketSchemasTask\x126\n" + + "\vannotations\x18\x01 \x03(\v2\x14.google.protobuf.AnyR\vannotations\x1ad\n" + + "\rGetTicketTask\x12\x1b\n" + + "\tticket_id\x18\x01 \x01(\tR\bticketId\x126\n" + + "\vannotations\x18\x02 \x03(\v2\x14.google.protobuf.AnyR\vannotations\x1ay\n" + + "\x15ActionListSchemasTask\x126\n" + + "\vannotations\x18\x01 \x03(\v2\x14.google.protobuf.AnyR\vannotations\x12(\n" + + "\x10resource_type_id\x18\x02 \x01(\tR\x0eresourceTypeId\x1aa\n" + + "\x13ActionGetSchemaTask\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\x126\n" + + "\vannotations\x18\x02 \x03(\v2\x14.google.protobuf.AnyR\vannotations\x1a\xb5\x01\n" + + "\x10ActionInvokeTask\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\x12+\n" + + "\x04args\x18\x02 \x01(\v2\x17.google.protobuf.StructR\x04args\x126\n" + + "\vannotations\x18\x03 \x03(\v2\x14.google.protobuf.AnyR\vannotations\x12(\n" + + "\x10resource_type_id\x18\x04 \x01(\tR\x0eresourceTypeId\x1an\n" + + "\x10ActionStatusTask\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\x12\x0e\n" + + "\x02id\x18\x02 \x01(\tR\x02id\x126\n" + + "\vannotations\x18\x03 \x03(\v2\x14.google.protobuf.AnyR\vannotations\x1a\x8e\x01\n" + + "\x12CreateSyncDiffTask\x12 \n" + + "\fbase_sync_id\x18\x01 \x01(\tR\n" + + "baseSyncId\x12\x1e\n" + + "\vnew_sync_id\x18\x02 \x01(\tR\tnewSyncId\x126\n" + + "\vannotations\x18\x03 \x03(\v2\x14.google.protobuf.AnyR\vannotations\x1a\xf9\x01\n" + + "\fCompactSyncs\x12h\n" + + "\x11compactable_syncs\x18\x01 \x03(\v2;.c1.connectorapi.baton.v1.Task.CompactSyncs.CompactableSyncR\x10compactableSyncs\x126\n" + + "\vannotations\x18\x02 \x03(\v2\x14.google.protobuf.AnyR\vannotations\x1aG\n" + + "\x0fCompactableSync\x12\x1b\n" + + "\tfile_path\x18\x01 \x01(\tR\bfilePath\x12\x17\n" + + "\async_id\x18\x02 \x01(\tR\x06syncId\"s\n" + + "\x06Status\x12\x16\n" + + "\x12STATUS_UNSPECIFIED\x10\x00\x12\x12\n" + + "\x0eSTATUS_PENDING\x10\x01\x12\x14\n" + + "\x10STATUS_SCHEDULED\x10\x02\x12\x12\n" + + "\x0eSTATUS_RUNNING\x10\x03\x12\x13\n" + + "\x0fSTATUS_FINISHED\x10\x04B\v\n" + + "\ttask_type\"\xf3\a\n" + + "\x18BatonServiceHelloRequest\x12#\n" + + "\ahost_id\x18\x01 \x01(\tB\n" + + "\xfaB\ar\x05\x10\x01\x18\x80\x02R\x06hostId\x122\n" + + "\atask_id\x18\x02 \x01(\tB\x19\xfaB\x16r\x142\x12^[a-zA-Z0-9]{27}|$R\x06taskId\x12e\n" + + "\n" + + "build_info\x18\x03 \x01(\v2<.c1.connectorapi.baton.v1.BatonServiceHelloRequest.BuildInfoB\b\xfaB\x05\x8a\x01\x02\x10\x01R\tbuildInfo\x12\\\n" + + "\aos_info\x18\x04 \x01(\v29.c1.connectorapi.baton.v1.BatonServiceHelloRequest.OSInfoB\b\xfaB\x05\x8a\x01\x02\x10\x01R\x06osInfo\x12[\n" + + "\x12connector_metadata\x18\x05 \x01(\v2\".c1.connector.v2.ConnectorMetadataB\b\xfaB\x05\x8a\x01\x02\x10\x01R\x11connectorMetadata\x12@\n" + + "\vannotations\x18\x06 \x03(\v2\x14.google.protobuf.AnyB\b\xfaB\x05\x92\x01\x02\x10\x10R\vannotations\x1a\x95\x01\n" + + "\tBuildInfo\x12-\n" + + "\flang_version\x18\x01 \x01(\tB\n" + + "\xfaB\ar\x05\x10\x01\x18\x80\x02R\vlangVersion\x12$\n" + + "\apackage\x18\x02 \x01(\tB\n" + + "\xfaB\ar\x05\x10\x01\x18\x80\x02R\apackage\x123\n" + + "\x0fpackage_version\x18\x03 \x01(\tB\n" + + "\xfaB\ar\x05\x10\x01\x18\x80\x02R\x0epackageVersion\x1a\x81\x03\n" + + "\x06OSInfo\x12&\n" + + "\bhostname\x18\x01 \x01(\tB\n" + + "\xfaB\ar\x05\x10\x01\x18\x80\x02R\bhostname\x12\x1a\n" + + "\x02os\x18\x02 \x01(\tB\n" + + "\xfaB\ar\x05\x10\x01\x18\x80\x02R\x02os\x12&\n" + + "\bplatform\x18\x03 \x01(\tB\n" + + "\xfaB\ar\x05\x10\x01\x18\x80\x02R\bplatform\x125\n" + + "\x10platform_version\x18\x04 \x01(\tB\n" + + "\xfaB\ar\x05\x10\x01\x18\x80\x02R\x0fplatformVersion\x123\n" + + "\x0fplatform_family\x18\x05 \x01(\tB\n" + + "\xfaB\ar\x05\x10\x01\x18\x80\x02R\x0eplatformFamily\x121\n" + + "\x0ekernel_version\x18\x06 \x01(\tB\n" + + "\xfaB\ar\x05\x10\x01\x18\x80\x02R\rkernelVersion\x12+\n" + + "\vkernel_arch\x18\a \x01(\tB\n" + + "\xfaB\ar\x05\x10\x01\x18\x80\x02R\n" + + "kernelArch\x12?\n" + + "\x15virtualization_system\x18\b \x01(\tB\n" + + "\xfaB\ar\x05\x10\x01\x18\x80\x02R\x14virtualizationSystem\"S\n" + + "\x19BatonServiceHelloResponse\x126\n" + + "\vannotations\x18\x01 \x03(\v2\x14.google.protobuf.AnyR\vannotations\"A\n" + + "\x1aBatonServiceGetTaskRequest\x12#\n" + + "\ahost_id\x18\x01 \x01(\tB\n" + + "\xfaB\ar\x05\x10\x01\x18\x80\x02R\x06hostId\"\x83\x02\n" + + "\x1bBatonServiceGetTaskResponse\x122\n" + + "\x04task\x18\x01 \x01(\v2\x1e.c1.connectorapi.baton.v1.TaskR\x04task\x126\n" + + "\tnext_poll\x18\x02 \x01(\v2\x19.google.protobuf.DurationR\bnextPoll\x12@\n" + + "\x0enext_heartbeat\x18\x03 \x01(\v2\x19.google.protobuf.DurationR\rnextHeartbeat\x126\n" + + "\vannotations\x18\x04 \x03(\v2\x14.google.protobuf.AnyR\vannotations\"\xb8\x01\n" + + "\x1cBatonServiceHeartbeatRequest\x12#\n" + + "\ahost_id\x18\x01 \x01(\tB\n" + + "\xfaB\ar\x05\x10\x01\x18\x80\x02R\x06hostId\x121\n" + + "\atask_id\x18\x02 \x01(\tB\x18\xfaB\x15r\x132\x11^[a-zA-Z0-9]{27}$R\x06taskId\x12@\n" + + "\vannotations\x18\x03 \x03(\v2\x14.google.protobuf.AnyB\b\xfaB\x05\x92\x01\x02\x10\x10R\vannotations\"\xb7\x01\n" + + "\x1dBatonServiceHeartbeatResponse\x12@\n" + + "\x0enext_heartbeat\x18\x01 \x01(\v2\x19.google.protobuf.DurationR\rnextHeartbeat\x12\x1c\n" + + "\tcancelled\x18\x02 \x01(\bR\tcancelled\x126\n" + + "\vannotations\x18\x03 \x03(\v2\x14.google.protobuf.AnyR\vannotations\"\xa3\x05\n" + + "\x1eBatonServiceUploadAssetRequest\x12e\n" + + "\bmetadata\x18d \x01(\v2G.c1.connectorapi.baton.v1.BatonServiceUploadAssetRequest.UploadMetadataH\x00R\bmetadata\x12Y\n" + + "\x04data\x18e \x01(\v2C.c1.connectorapi.baton.v1.BatonServiceUploadAssetRequest.UploadDataH\x00R\x04data\x12V\n" + + "\x03eof\x18f \x01(\v2B.c1.connectorapi.baton.v1.BatonServiceUploadAssetRequest.UploadEOFH\x00R\x03eof\x1a\xaa\x01\n" + + "\x0eUploadMetadata\x12#\n" + + "\ahost_id\x18\x01 \x01(\tB\n" + + "\xfaB\ar\x05\x10\x01\x18\x80\x02R\x06hostId\x121\n" + + "\atask_id\x18\x02 \x01(\tB\x18\xfaB\x15r\x132\x11^[a-zA-Z0-9]{27}$R\x06taskId\x12@\n" + + "\vannotations\x18\x04 \x03(\v2\x14.google.protobuf.AnyB\b\xfaB\x05\x92\x01\x02\x10\x10R\vannotations\x1a-\n" + + "\n" + + "UploadData\x12\x1f\n" + + "\x04data\x18\x01 \x01(\fB\v\xfaB\bz\x06\x10\x01\x18\x80\x80@R\x04data\x1a\x7f\n" + + "\tUploadEOF\x120\n" + + "\x0fsha256_checksum\x18\x01 \x01(\fB\a\xfaB\x04z\x02h R\x0esha256Checksum\x12@\n" + + "\vannotations\x18\x02 \x03(\v2\x14.google.protobuf.AnyB\b\xfaB\x05\x92\x01\x02\x10\x10R\vannotationsB\n" + + "\n" + + "\x03msg\x12\x03\xf8B\x01\"Y\n" + + "\x1fBatonServiceUploadAssetResponse\x126\n" + + "\vannotations\x18\x01 \x03(\v2\x14.google.protobuf.AnyR\vannotations\"\x8d\x05\n" + + "\x1dBatonServiceFinishTaskRequest\x12#\n" + + "\ahost_id\x18\x01 \x01(\tB\n" + + "\xfaB\ar\x05\x10\x01\x18\x80\x02R\x06hostId\x121\n" + + "\atask_id\x18\x02 \x01(\tB\x18\xfaB\x15r\x132\x11^[a-zA-Z0-9]{27}$R\x06taskId\x12*\n" + + "\x06status\x18\x03 \x01(\v2\x12.google.rpc.StatusR\x06status\x12U\n" + + "\x05error\x18d \x01(\v2=.c1.connectorapi.baton.v1.BatonServiceFinishTaskRequest.ErrorH\x00R\x05error\x12[\n" + + "\asuccess\x18e \x01(\v2?.c1.connectorapi.baton.v1.BatonServiceFinishTaskRequest.SuccessH\x00R\asuccess\x1a\xa0\x01\n" + + "\x05Error\x12#\n" + + "\rnon_retryable\x18\x01 \x01(\bR\fnonRetryable\x12@\n" + + "\vannotations\x18\x02 \x03(\v2\x14.google.protobuf.AnyB\b\xfaB\x05\x92\x01\x02\x10\x10R\vannotations\x120\n" + + "\bresponse\x18\x03 \x01(\v2\x14.google.protobuf.AnyR\bresponse\x1a}\n" + + "\aSuccess\x12@\n" + + "\vannotations\x18\x01 \x03(\v2\x14.google.protobuf.AnyB\b\xfaB\x05\x92\x01\x02\x10\x10R\vannotations\x120\n" + + "\bresponse\x18\x02 \x01(\v2\x14.google.protobuf.AnyR\bresponseB\x12\n" + + "\vfinal_state\x12\x03\xf8B\x01\"X\n" + + "\x1eBatonServiceFinishTaskResponse\x126\n" + + "\vannotations\x18\x01 \x03(\v2\x14.google.protobuf.AnyR\vannotations\"\x17\n" + + "\x15StartDebuggingRequest\"0\n" + + "\x16StartDebuggingResponse\x12\x16\n" + + "\x06status\x18\x01 \x01(\bR\x06status2\x80\x06\n" + + "\fBatonService\x12r\n" + + "\x05Hello\x122.c1.connectorapi.baton.v1.BatonServiceHelloRequest\x1a3.c1.connectorapi.baton.v1.BatonServiceHelloResponse\"\x00\x12x\n" + + "\aGetTask\x124.c1.connectorapi.baton.v1.BatonServiceGetTaskRequest\x1a5.c1.connectorapi.baton.v1.BatonServiceGetTaskResponse\"\x00\x12~\n" + + "\tHeartbeat\x126.c1.connectorapi.baton.v1.BatonServiceHeartbeatRequest\x1a7.c1.connectorapi.baton.v1.BatonServiceHeartbeatResponse\"\x00\x12\x81\x01\n" + + "\n" + + "FinishTask\x127.c1.connectorapi.baton.v1.BatonServiceFinishTaskRequest\x1a8.c1.connectorapi.baton.v1.BatonServiceFinishTaskResponse\"\x00\x12\x86\x01\n" + + "\vUploadAsset\x128.c1.connectorapi.baton.v1.BatonServiceUploadAssetRequest\x1a9.c1.connectorapi.baton.v1.BatonServiceUploadAssetResponse\"\x00(\x01\x12u\n" + + "\x0eStartDebugging\x12/.c1.connectorapi.baton.v1.StartDebuggingRequest\x1a0.c1.connectorapi.baton.v1.StartDebuggingResponse\"\x00B7Z5gitlab.com/ductone/c1/pkg/pb/c1/connectorapi/baton/v1b\x06proto3" + +var file_c1_connectorapi_baton_v1_baton_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_c1_connectorapi_baton_v1_baton_proto_msgTypes = make([]protoimpl.MessageInfo, 42) +var file_c1_connectorapi_baton_v1_baton_proto_goTypes = []any{ + (Task_Status)(0), // 0: c1.connectorapi.baton.v1.Task.Status + (*Task)(nil), // 1: c1.connectorapi.baton.v1.Task + (*BatonServiceHelloRequest)(nil), // 2: c1.connectorapi.baton.v1.BatonServiceHelloRequest + (*BatonServiceHelloResponse)(nil), // 3: c1.connectorapi.baton.v1.BatonServiceHelloResponse + (*BatonServiceGetTaskRequest)(nil), // 4: c1.connectorapi.baton.v1.BatonServiceGetTaskRequest + (*BatonServiceGetTaskResponse)(nil), // 5: c1.connectorapi.baton.v1.BatonServiceGetTaskResponse + (*BatonServiceHeartbeatRequest)(nil), // 6: c1.connectorapi.baton.v1.BatonServiceHeartbeatRequest + (*BatonServiceHeartbeatResponse)(nil), // 7: c1.connectorapi.baton.v1.BatonServiceHeartbeatResponse + (*BatonServiceUploadAssetRequest)(nil), // 8: c1.connectorapi.baton.v1.BatonServiceUploadAssetRequest + (*BatonServiceUploadAssetResponse)(nil), // 9: c1.connectorapi.baton.v1.BatonServiceUploadAssetResponse + (*BatonServiceFinishTaskRequest)(nil), // 10: c1.connectorapi.baton.v1.BatonServiceFinishTaskRequest + (*BatonServiceFinishTaskResponse)(nil), // 11: c1.connectorapi.baton.v1.BatonServiceFinishTaskResponse + (*StartDebuggingRequest)(nil), // 12: c1.connectorapi.baton.v1.StartDebuggingRequest + (*StartDebuggingResponse)(nil), // 13: c1.connectorapi.baton.v1.StartDebuggingResponse + (*Task_NoneTask)(nil), // 14: c1.connectorapi.baton.v1.Task.NoneTask + (*Task_HelloTask)(nil), // 15: c1.connectorapi.baton.v1.Task.HelloTask + (*Task_SyncFullTask)(nil), // 16: c1.connectorapi.baton.v1.Task.SyncFullTask + (*Task_EventFeedTask)(nil), // 17: c1.connectorapi.baton.v1.Task.EventFeedTask + (*Task_GrantTask)(nil), // 18: c1.connectorapi.baton.v1.Task.GrantTask + (*Task_RevokeTask)(nil), // 19: c1.connectorapi.baton.v1.Task.RevokeTask + (*Task_CreateAccountTask)(nil), // 20: c1.connectorapi.baton.v1.Task.CreateAccountTask + (*Task_CreateResourceTask)(nil), // 21: c1.connectorapi.baton.v1.Task.CreateResourceTask + (*Task_DeleteResourceTask)(nil), // 22: c1.connectorapi.baton.v1.Task.DeleteResourceTask + (*Task_RotateCredentialsTask)(nil), // 23: c1.connectorapi.baton.v1.Task.RotateCredentialsTask + (*Task_CreateTicketTask)(nil), // 24: c1.connectorapi.baton.v1.Task.CreateTicketTask + (*Task_BulkCreateTicketsTask)(nil), // 25: c1.connectorapi.baton.v1.Task.BulkCreateTicketsTask + (*Task_BulkGetTicketsTask)(nil), // 26: c1.connectorapi.baton.v1.Task.BulkGetTicketsTask + (*Task_ListTicketSchemasTask)(nil), // 27: c1.connectorapi.baton.v1.Task.ListTicketSchemasTask + (*Task_GetTicketTask)(nil), // 28: c1.connectorapi.baton.v1.Task.GetTicketTask + (*Task_ActionListSchemasTask)(nil), // 29: c1.connectorapi.baton.v1.Task.ActionListSchemasTask + (*Task_ActionGetSchemaTask)(nil), // 30: c1.connectorapi.baton.v1.Task.ActionGetSchemaTask + (*Task_ActionInvokeTask)(nil), // 31: c1.connectorapi.baton.v1.Task.ActionInvokeTask + (*Task_ActionStatusTask)(nil), // 32: c1.connectorapi.baton.v1.Task.ActionStatusTask + (*Task_CreateSyncDiffTask)(nil), // 33: c1.connectorapi.baton.v1.Task.CreateSyncDiffTask + (*Task_CompactSyncs)(nil), // 34: c1.connectorapi.baton.v1.Task.CompactSyncs + (*Task_CompactSyncs_CompactableSync)(nil), // 35: c1.connectorapi.baton.v1.Task.CompactSyncs.CompactableSync + (*BatonServiceHelloRequest_BuildInfo)(nil), // 36: c1.connectorapi.baton.v1.BatonServiceHelloRequest.BuildInfo + (*BatonServiceHelloRequest_OSInfo)(nil), // 37: c1.connectorapi.baton.v1.BatonServiceHelloRequest.OSInfo + (*BatonServiceUploadAssetRequest_UploadMetadata)(nil), // 38: c1.connectorapi.baton.v1.BatonServiceUploadAssetRequest.UploadMetadata + (*BatonServiceUploadAssetRequest_UploadData)(nil), // 39: c1.connectorapi.baton.v1.BatonServiceUploadAssetRequest.UploadData + (*BatonServiceUploadAssetRequest_UploadEOF)(nil), // 40: c1.connectorapi.baton.v1.BatonServiceUploadAssetRequest.UploadEOF + (*BatonServiceFinishTaskRequest_Error)(nil), // 41: c1.connectorapi.baton.v1.BatonServiceFinishTaskRequest.Error + (*BatonServiceFinishTaskRequest_Success)(nil), // 42: c1.connectorapi.baton.v1.BatonServiceFinishTaskRequest.Success + (*v2.ConnectorMetadata)(nil), // 43: c1.connector.v2.ConnectorMetadata + (*anypb.Any)(nil), // 44: google.protobuf.Any + (*durationpb.Duration)(nil), // 45: google.protobuf.Duration + (*status.Status)(nil), // 46: google.rpc.Status + (*v2.Resource)(nil), // 47: c1.connector.v2.Resource + (*timestamppb.Timestamp)(nil), // 48: google.protobuf.Timestamp + (*v2.Entitlement)(nil), // 49: c1.connector.v2.Entitlement + (*v2.Grant)(nil), // 50: c1.connector.v2.Grant + (*v2.AccountInfo)(nil), // 51: c1.connector.v2.AccountInfo + (*v2.CredentialOptions)(nil), // 52: c1.connector.v2.CredentialOptions + (*v2.EncryptionConfig)(nil), // 53: c1.connector.v2.EncryptionConfig + (*v2.ResourceId)(nil), // 54: c1.connector.v2.ResourceId + (*v2.TicketRequest)(nil), // 55: c1.connector.v2.TicketRequest + (*v2.TicketSchema)(nil), // 56: c1.connector.v2.TicketSchema + (*structpb.Struct)(nil), // 57: google.protobuf.Struct +} +var file_c1_connectorapi_baton_v1_baton_proto_depIdxs = []int32{ + 0, // 0: c1.connectorapi.baton.v1.Task.status:type_name -> c1.connectorapi.baton.v1.Task.Status + 14, // 1: c1.connectorapi.baton.v1.Task.none:type_name -> c1.connectorapi.baton.v1.Task.NoneTask + 15, // 2: c1.connectorapi.baton.v1.Task.hello:type_name -> c1.connectorapi.baton.v1.Task.HelloTask + 16, // 3: c1.connectorapi.baton.v1.Task.sync_full:type_name -> c1.connectorapi.baton.v1.Task.SyncFullTask + 18, // 4: c1.connectorapi.baton.v1.Task.grant:type_name -> c1.connectorapi.baton.v1.Task.GrantTask + 19, // 5: c1.connectorapi.baton.v1.Task.revoke:type_name -> c1.connectorapi.baton.v1.Task.RevokeTask + 20, // 6: c1.connectorapi.baton.v1.Task.create_account:type_name -> c1.connectorapi.baton.v1.Task.CreateAccountTask + 21, // 7: c1.connectorapi.baton.v1.Task.create_resource:type_name -> c1.connectorapi.baton.v1.Task.CreateResourceTask + 22, // 8: c1.connectorapi.baton.v1.Task.delete_resource:type_name -> c1.connectorapi.baton.v1.Task.DeleteResourceTask + 23, // 9: c1.connectorapi.baton.v1.Task.rotate_credentials:type_name -> c1.connectorapi.baton.v1.Task.RotateCredentialsTask + 17, // 10: c1.connectorapi.baton.v1.Task.event_feed:type_name -> c1.connectorapi.baton.v1.Task.EventFeedTask + 24, // 11: c1.connectorapi.baton.v1.Task.create_ticket_task:type_name -> c1.connectorapi.baton.v1.Task.CreateTicketTask + 27, // 12: c1.connectorapi.baton.v1.Task.list_ticket_schemas:type_name -> c1.connectorapi.baton.v1.Task.ListTicketSchemasTask + 28, // 13: c1.connectorapi.baton.v1.Task.get_ticket:type_name -> c1.connectorapi.baton.v1.Task.GetTicketTask + 25, // 14: c1.connectorapi.baton.v1.Task.bulk_create_tickets:type_name -> c1.connectorapi.baton.v1.Task.BulkCreateTicketsTask + 26, // 15: c1.connectorapi.baton.v1.Task.bulk_get_tickets:type_name -> c1.connectorapi.baton.v1.Task.BulkGetTicketsTask + 29, // 16: c1.connectorapi.baton.v1.Task.action_list_schemas:type_name -> c1.connectorapi.baton.v1.Task.ActionListSchemasTask + 30, // 17: c1.connectorapi.baton.v1.Task.action_get_schema:type_name -> c1.connectorapi.baton.v1.Task.ActionGetSchemaTask + 31, // 18: c1.connectorapi.baton.v1.Task.action_invoke:type_name -> c1.connectorapi.baton.v1.Task.ActionInvokeTask + 32, // 19: c1.connectorapi.baton.v1.Task.action_status:type_name -> c1.connectorapi.baton.v1.Task.ActionStatusTask + 33, // 20: c1.connectorapi.baton.v1.Task.create_sync_diff:type_name -> c1.connectorapi.baton.v1.Task.CreateSyncDiffTask + 34, // 21: c1.connectorapi.baton.v1.Task.compact_syncs:type_name -> c1.connectorapi.baton.v1.Task.CompactSyncs + 36, // 22: c1.connectorapi.baton.v1.BatonServiceHelloRequest.build_info:type_name -> c1.connectorapi.baton.v1.BatonServiceHelloRequest.BuildInfo + 37, // 23: c1.connectorapi.baton.v1.BatonServiceHelloRequest.os_info:type_name -> c1.connectorapi.baton.v1.BatonServiceHelloRequest.OSInfo + 43, // 24: c1.connectorapi.baton.v1.BatonServiceHelloRequest.connector_metadata:type_name -> c1.connector.v2.ConnectorMetadata + 44, // 25: c1.connectorapi.baton.v1.BatonServiceHelloRequest.annotations:type_name -> google.protobuf.Any + 44, // 26: c1.connectorapi.baton.v1.BatonServiceHelloResponse.annotations:type_name -> google.protobuf.Any + 1, // 27: c1.connectorapi.baton.v1.BatonServiceGetTaskResponse.task:type_name -> c1.connectorapi.baton.v1.Task + 45, // 28: c1.connectorapi.baton.v1.BatonServiceGetTaskResponse.next_poll:type_name -> google.protobuf.Duration + 45, // 29: c1.connectorapi.baton.v1.BatonServiceGetTaskResponse.next_heartbeat:type_name -> google.protobuf.Duration + 44, // 30: c1.connectorapi.baton.v1.BatonServiceGetTaskResponse.annotations:type_name -> google.protobuf.Any + 44, // 31: c1.connectorapi.baton.v1.BatonServiceHeartbeatRequest.annotations:type_name -> google.protobuf.Any + 45, // 32: c1.connectorapi.baton.v1.BatonServiceHeartbeatResponse.next_heartbeat:type_name -> google.protobuf.Duration + 44, // 33: c1.connectorapi.baton.v1.BatonServiceHeartbeatResponse.annotations:type_name -> google.protobuf.Any + 38, // 34: c1.connectorapi.baton.v1.BatonServiceUploadAssetRequest.metadata:type_name -> c1.connectorapi.baton.v1.BatonServiceUploadAssetRequest.UploadMetadata + 39, // 35: c1.connectorapi.baton.v1.BatonServiceUploadAssetRequest.data:type_name -> c1.connectorapi.baton.v1.BatonServiceUploadAssetRequest.UploadData + 40, // 36: c1.connectorapi.baton.v1.BatonServiceUploadAssetRequest.eof:type_name -> c1.connectorapi.baton.v1.BatonServiceUploadAssetRequest.UploadEOF + 44, // 37: c1.connectorapi.baton.v1.BatonServiceUploadAssetResponse.annotations:type_name -> google.protobuf.Any + 46, // 38: c1.connectorapi.baton.v1.BatonServiceFinishTaskRequest.status:type_name -> google.rpc.Status + 41, // 39: c1.connectorapi.baton.v1.BatonServiceFinishTaskRequest.error:type_name -> c1.connectorapi.baton.v1.BatonServiceFinishTaskRequest.Error + 42, // 40: c1.connectorapi.baton.v1.BatonServiceFinishTaskRequest.success:type_name -> c1.connectorapi.baton.v1.BatonServiceFinishTaskRequest.Success + 44, // 41: c1.connectorapi.baton.v1.BatonServiceFinishTaskResponse.annotations:type_name -> google.protobuf.Any + 44, // 42: c1.connectorapi.baton.v1.Task.NoneTask.annotations:type_name -> google.protobuf.Any + 44, // 43: c1.connectorapi.baton.v1.Task.HelloTask.annotations:type_name -> google.protobuf.Any + 44, // 44: c1.connectorapi.baton.v1.Task.SyncFullTask.annotations:type_name -> google.protobuf.Any + 47, // 45: c1.connectorapi.baton.v1.Task.SyncFullTask.targeted_sync_resources:type_name -> c1.connector.v2.Resource + 44, // 46: c1.connectorapi.baton.v1.Task.EventFeedTask.annotations:type_name -> google.protobuf.Any + 48, // 47: c1.connectorapi.baton.v1.Task.EventFeedTask.start_at:type_name -> google.protobuf.Timestamp + 49, // 48: c1.connectorapi.baton.v1.Task.GrantTask.entitlement:type_name -> c1.connector.v2.Entitlement + 47, // 49: c1.connectorapi.baton.v1.Task.GrantTask.principal:type_name -> c1.connector.v2.Resource + 44, // 50: c1.connectorapi.baton.v1.Task.GrantTask.annotations:type_name -> google.protobuf.Any + 45, // 51: c1.connectorapi.baton.v1.Task.GrantTask.duration:type_name -> google.protobuf.Duration + 50, // 52: c1.connectorapi.baton.v1.Task.RevokeTask.grant:type_name -> c1.connector.v2.Grant + 44, // 53: c1.connectorapi.baton.v1.Task.RevokeTask.annotations:type_name -> google.protobuf.Any + 51, // 54: c1.connectorapi.baton.v1.Task.CreateAccountTask.account_info:type_name -> c1.connector.v2.AccountInfo + 52, // 55: c1.connectorapi.baton.v1.Task.CreateAccountTask.credential_options:type_name -> c1.connector.v2.CredentialOptions + 53, // 56: c1.connectorapi.baton.v1.Task.CreateAccountTask.encryption_configs:type_name -> c1.connector.v2.EncryptionConfig + 47, // 57: c1.connectorapi.baton.v1.Task.CreateResourceTask.resource:type_name -> c1.connector.v2.Resource + 54, // 58: c1.connectorapi.baton.v1.Task.DeleteResourceTask.resource_id:type_name -> c1.connector.v2.ResourceId + 54, // 59: c1.connectorapi.baton.v1.Task.DeleteResourceTask.parent_resource_id:type_name -> c1.connector.v2.ResourceId + 54, // 60: c1.connectorapi.baton.v1.Task.RotateCredentialsTask.resource_id:type_name -> c1.connector.v2.ResourceId + 52, // 61: c1.connectorapi.baton.v1.Task.RotateCredentialsTask.credential_options:type_name -> c1.connector.v2.CredentialOptions + 53, // 62: c1.connectorapi.baton.v1.Task.RotateCredentialsTask.encryption_configs:type_name -> c1.connector.v2.EncryptionConfig + 55, // 63: c1.connectorapi.baton.v1.Task.CreateTicketTask.ticket_request:type_name -> c1.connector.v2.TicketRequest + 56, // 64: c1.connectorapi.baton.v1.Task.CreateTicketTask.ticket_schema:type_name -> c1.connector.v2.TicketSchema + 44, // 65: c1.connectorapi.baton.v1.Task.CreateTicketTask.annotations:type_name -> google.protobuf.Any + 24, // 66: c1.connectorapi.baton.v1.Task.BulkCreateTicketsTask.ticket_requests:type_name -> c1.connectorapi.baton.v1.Task.CreateTicketTask + 28, // 67: c1.connectorapi.baton.v1.Task.BulkGetTicketsTask.ticket_requests:type_name -> c1.connectorapi.baton.v1.Task.GetTicketTask + 44, // 68: c1.connectorapi.baton.v1.Task.ListTicketSchemasTask.annotations:type_name -> google.protobuf.Any + 44, // 69: c1.connectorapi.baton.v1.Task.GetTicketTask.annotations:type_name -> google.protobuf.Any + 44, // 70: c1.connectorapi.baton.v1.Task.ActionListSchemasTask.annotations:type_name -> google.protobuf.Any + 44, // 71: c1.connectorapi.baton.v1.Task.ActionGetSchemaTask.annotations:type_name -> google.protobuf.Any + 57, // 72: c1.connectorapi.baton.v1.Task.ActionInvokeTask.args:type_name -> google.protobuf.Struct + 44, // 73: c1.connectorapi.baton.v1.Task.ActionInvokeTask.annotations:type_name -> google.protobuf.Any + 44, // 74: c1.connectorapi.baton.v1.Task.ActionStatusTask.annotations:type_name -> google.protobuf.Any + 44, // 75: c1.connectorapi.baton.v1.Task.CreateSyncDiffTask.annotations:type_name -> google.protobuf.Any + 35, // 76: c1.connectorapi.baton.v1.Task.CompactSyncs.compactable_syncs:type_name -> c1.connectorapi.baton.v1.Task.CompactSyncs.CompactableSync + 44, // 77: c1.connectorapi.baton.v1.Task.CompactSyncs.annotations:type_name -> google.protobuf.Any + 44, // 78: c1.connectorapi.baton.v1.BatonServiceUploadAssetRequest.UploadMetadata.annotations:type_name -> google.protobuf.Any + 44, // 79: c1.connectorapi.baton.v1.BatonServiceUploadAssetRequest.UploadEOF.annotations:type_name -> google.protobuf.Any + 44, // 80: c1.connectorapi.baton.v1.BatonServiceFinishTaskRequest.Error.annotations:type_name -> google.protobuf.Any + 44, // 81: c1.connectorapi.baton.v1.BatonServiceFinishTaskRequest.Error.response:type_name -> google.protobuf.Any + 44, // 82: c1.connectorapi.baton.v1.BatonServiceFinishTaskRequest.Success.annotations:type_name -> google.protobuf.Any + 44, // 83: c1.connectorapi.baton.v1.BatonServiceFinishTaskRequest.Success.response:type_name -> google.protobuf.Any + 2, // 84: c1.connectorapi.baton.v1.BatonService.Hello:input_type -> c1.connectorapi.baton.v1.BatonServiceHelloRequest + 4, // 85: c1.connectorapi.baton.v1.BatonService.GetTask:input_type -> c1.connectorapi.baton.v1.BatonServiceGetTaskRequest + 6, // 86: c1.connectorapi.baton.v1.BatonService.Heartbeat:input_type -> c1.connectorapi.baton.v1.BatonServiceHeartbeatRequest + 10, // 87: c1.connectorapi.baton.v1.BatonService.FinishTask:input_type -> c1.connectorapi.baton.v1.BatonServiceFinishTaskRequest + 8, // 88: c1.connectorapi.baton.v1.BatonService.UploadAsset:input_type -> c1.connectorapi.baton.v1.BatonServiceUploadAssetRequest + 12, // 89: c1.connectorapi.baton.v1.BatonService.StartDebugging:input_type -> c1.connectorapi.baton.v1.StartDebuggingRequest + 3, // 90: c1.connectorapi.baton.v1.BatonService.Hello:output_type -> c1.connectorapi.baton.v1.BatonServiceHelloResponse + 5, // 91: c1.connectorapi.baton.v1.BatonService.GetTask:output_type -> c1.connectorapi.baton.v1.BatonServiceGetTaskResponse + 7, // 92: c1.connectorapi.baton.v1.BatonService.Heartbeat:output_type -> c1.connectorapi.baton.v1.BatonServiceHeartbeatResponse + 11, // 93: c1.connectorapi.baton.v1.BatonService.FinishTask:output_type -> c1.connectorapi.baton.v1.BatonServiceFinishTaskResponse + 9, // 94: c1.connectorapi.baton.v1.BatonService.UploadAsset:output_type -> c1.connectorapi.baton.v1.BatonServiceUploadAssetResponse + 13, // 95: c1.connectorapi.baton.v1.BatonService.StartDebugging:output_type -> c1.connectorapi.baton.v1.StartDebuggingResponse + 90, // [90:96] is the sub-list for method output_type + 84, // [84:90] is the sub-list for method input_type + 84, // [84:84] is the sub-list for extension type_name + 84, // [84:84] is the sub-list for extension extendee + 0, // [0:84] is the sub-list for field type_name +} + +func init() { file_c1_connectorapi_baton_v1_baton_proto_init() } +func file_c1_connectorapi_baton_v1_baton_proto_init() { + if File_c1_connectorapi_baton_v1_baton_proto != nil { + return + } + file_c1_connectorapi_baton_v1_baton_proto_msgTypes[0].OneofWrappers = []any{ + (*task_None)(nil), + (*task_Hello)(nil), + (*task_SyncFull)(nil), + (*task_Grant)(nil), + (*task_Revoke)(nil), + (*task_CreateAccount)(nil), + (*task_CreateResource)(nil), + (*task_DeleteResource)(nil), + (*task_RotateCredentials)(nil), + (*task_EventFeed)(nil), + (*task_CreateTicketTask_)(nil), + (*task_ListTicketSchemas)(nil), + (*task_GetTicket)(nil), + (*task_BulkCreateTickets)(nil), + (*task_BulkGetTickets)(nil), + (*task_ActionListSchemas)(nil), + (*task_ActionGetSchema)(nil), + (*task_ActionInvoke)(nil), + (*task_ActionStatus)(nil), + (*task_CreateSyncDiff)(nil), + (*task_CompactSyncs_)(nil), + } + file_c1_connectorapi_baton_v1_baton_proto_msgTypes[7].OneofWrappers = []any{ + (*batonServiceUploadAssetRequest_Metadata)(nil), + (*batonServiceUploadAssetRequest_Data)(nil), + (*batonServiceUploadAssetRequest_Eof)(nil), + } + file_c1_connectorapi_baton_v1_baton_proto_msgTypes[9].OneofWrappers = []any{ + (*batonServiceFinishTaskRequest_Error_)(nil), + (*batonServiceFinishTaskRequest_Success_)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_c1_connectorapi_baton_v1_baton_proto_rawDesc), len(file_c1_connectorapi_baton_v1_baton_proto_rawDesc)), + NumEnums: 1, + NumMessages: 42, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_c1_connectorapi_baton_v1_baton_proto_goTypes, + DependencyIndexes: file_c1_connectorapi_baton_v1_baton_proto_depIdxs, + EnumInfos: file_c1_connectorapi_baton_v1_baton_proto_enumTypes, + MessageInfos: file_c1_connectorapi_baton_v1_baton_proto_msgTypes, + }.Build() + File_c1_connectorapi_baton_v1_baton_proto = out.File + file_c1_connectorapi_baton_v1_baton_proto_goTypes = nil + file_c1_connectorapi_baton_v1_baton_proto_depIdxs = nil +} diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/connectorapi/baton/v1/config.pb.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/connectorapi/baton/v1/config.pb.go index ccf3c296..035fdcd2 100644 --- a/vendor/github.com/conductorone/baton-sdk/pb/c1/connectorapi/baton/v1/config.pb.go +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/connectorapi/baton/v1/config.pb.go @@ -1,9 +1,11 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.4 +// protoc-gen-go v1.36.10 // protoc (unknown) // source: c1/connectorapi/baton/v1/config.proto +//go:build !protoopaque + package v1 import ( @@ -11,7 +13,6 @@ import ( protoimpl "google.golang.org/protobuf/runtime/protoimpl" timestamppb "google.golang.org/protobuf/types/known/timestamppb" reflect "reflect" - sync "sync" unsafe "unsafe" ) @@ -23,7 +24,7 @@ const ( ) type GetConnectorConfigRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } @@ -53,13 +54,20 @@ func (x *GetConnectorConfigRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetConnectorConfigRequest.ProtoReflect.Descriptor instead. -func (*GetConnectorConfigRequest) Descriptor() ([]byte, []int) { - return file_c1_connectorapi_baton_v1_config_proto_rawDescGZIP(), []int{0} +type GetConnectorConfigRequest_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + +} + +func (b0 GetConnectorConfigRequest_builder) Build() *GetConnectorConfigRequest { + m0 := &GetConnectorConfigRequest{} + b, x := &b0, m0 + _, _ = b, x + return m0 } type GetConnectorConfigResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Config []byte `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"` LastUpdated *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"` unknownFields protoimpl.UnknownFields @@ -91,11 +99,6 @@ func (x *GetConnectorConfigResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetConnectorConfigResponse.ProtoReflect.Descriptor instead. -func (*GetConnectorConfigResponse) Descriptor() ([]byte, []int) { - return file_c1_connectorapi_baton_v1_config_proto_rawDescGZIP(), []int{1} -} - func (x *GetConnectorConfigResponse) GetConfig() []byte { if x != nil { return x.Config @@ -110,8 +113,46 @@ func (x *GetConnectorConfigResponse) GetLastUpdated() *timestamppb.Timestamp { return nil } +func (x *GetConnectorConfigResponse) SetConfig(v []byte) { + if v == nil { + v = []byte{} + } + x.Config = v +} + +func (x *GetConnectorConfigResponse) SetLastUpdated(v *timestamppb.Timestamp) { + x.LastUpdated = v +} + +func (x *GetConnectorConfigResponse) HasLastUpdated() bool { + if x == nil { + return false + } + return x.LastUpdated != nil +} + +func (x *GetConnectorConfigResponse) ClearLastUpdated() { + x.LastUpdated = nil +} + +type GetConnectorConfigResponse_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Config []byte + LastUpdated *timestamppb.Timestamp +} + +func (b0 GetConnectorConfigResponse_builder) Build() *GetConnectorConfigResponse { + m0 := &GetConnectorConfigResponse{} + b, x := &b0, m0 + _, _ = b, x + x.Config = b.Config + x.LastUpdated = b.LastUpdated + return m0 +} + type SignedHeader struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` Value []string `protobuf:"bytes,2,rep,name=value,proto3" json:"value,omitempty"` unknownFields protoimpl.UnknownFields @@ -143,11 +184,6 @@ func (x *SignedHeader) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use SignedHeader.ProtoReflect.Descriptor instead. -func (*SignedHeader) Descriptor() ([]byte, []int) { - return file_c1_connectorapi_baton_v1_config_proto_rawDescGZIP(), []int{2} -} - func (x *SignedHeader) GetKey() string { if x != nil { return x.Key @@ -162,8 +198,32 @@ func (x *SignedHeader) GetValue() []string { return nil } +func (x *SignedHeader) SetKey(v string) { + x.Key = v +} + +func (x *SignedHeader) SetValue(v []string) { + x.Value = v +} + +type SignedHeader_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Key string + Value []string +} + +func (b0 SignedHeader_builder) Build() *SignedHeader { + m0 := &SignedHeader{} + b, x := &b0, m0 + _, _ = b, x + x.Key = b.Key + x.Value = b.Value + return m0 +} + type Sigv4SignedRequestSTSGetCallerIdentity struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Method string `protobuf:"bytes,1,opt,name=method,proto3" json:"method,omitempty"` Endpoint string `protobuf:"bytes,2,opt,name=endpoint,proto3" json:"endpoint,omitempty"` Headers []*SignedHeader `protobuf:"bytes,3,rep,name=headers,proto3" json:"headers,omitempty"` @@ -197,11 +257,6 @@ func (x *Sigv4SignedRequestSTSGetCallerIdentity) ProtoReflect() protoreflect.Mes return mi.MessageOf(x) } -// Deprecated: Use Sigv4SignedRequestSTSGetCallerIdentity.ProtoReflect.Descriptor instead. -func (*Sigv4SignedRequestSTSGetCallerIdentity) Descriptor() ([]byte, []int) { - return file_c1_connectorapi_baton_v1_config_proto_rawDescGZIP(), []int{3} -} - func (x *Sigv4SignedRequestSTSGetCallerIdentity) GetMethod() string { if x != nil { return x.Method @@ -230,82 +285,191 @@ func (x *Sigv4SignedRequestSTSGetCallerIdentity) GetBody() []byte { return nil } -var File_c1_connectorapi_baton_v1_config_proto protoreflect.FileDescriptor +func (x *Sigv4SignedRequestSTSGetCallerIdentity) SetMethod(v string) { + x.Method = v +} -var file_c1_connectorapi_baton_v1_config_proto_rawDesc = string([]byte{ - 0x0a, 0x25, 0x63, 0x31, 0x2f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x61, 0x70, - 0x69, 0x2f, 0x62, 0x61, 0x74, 0x6f, 0x6e, 0x2f, 0x76, 0x31, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x18, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, - 0x65, 0x63, 0x74, 0x6f, 0x72, 0x61, 0x70, 0x69, 0x2e, 0x62, 0x61, 0x74, 0x6f, 0x6e, 0x2e, 0x76, - 0x31, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x22, 0x1b, 0x0a, 0x19, 0x47, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, - 0x6f, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, - 0x73, 0x0a, 0x1a, 0x47, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, - 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x3d, 0x0a, 0x0c, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x75, 0x70, - 0x64, 0x61, 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, - 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x6c, 0x61, 0x73, 0x74, 0x55, 0x70, 0x64, - 0x61, 0x74, 0x65, 0x64, 0x22, 0x36, 0x0a, 0x0c, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x48, 0x65, - 0x61, 0x64, 0x65, 0x72, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, - 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xb2, 0x01, 0x0a, - 0x26, 0x53, 0x69, 0x67, 0x76, 0x34, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x53, 0x54, 0x53, 0x47, 0x65, 0x74, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, - 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, - 0x1a, 0x0a, 0x08, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x08, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x40, 0x0a, 0x07, 0x68, - 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x63, - 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x61, 0x70, 0x69, 0x2e, 0x62, - 0x61, 0x74, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x48, 0x65, - 0x61, 0x64, 0x65, 0x72, 0x52, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x12, 0x0a, - 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x62, 0x6f, 0x64, - 0x79, 0x32, 0x99, 0x01, 0x0a, 0x16, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x7f, 0x0a, 0x12, - 0x47, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x12, 0x33, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, - 0x72, 0x61, 0x70, 0x69, 0x2e, 0x62, 0x61, 0x74, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, - 0x74, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x34, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, - 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x61, 0x70, 0x69, 0x2e, 0x62, 0x61, 0x74, 0x6f, 0x6e, 0x2e, - 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x37, 0x5a, - 0x35, 0x67, 0x69, 0x74, 0x6c, 0x61, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x64, 0x75, 0x63, 0x74, - 0x6f, 0x6e, 0x65, 0x2f, 0x63, 0x31, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x70, 0x62, 0x2f, 0x63, 0x31, - 0x2f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x61, 0x70, 0x69, 0x2f, 0x62, 0x61, - 0x74, 0x6f, 0x6e, 0x2f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -}) - -var ( - file_c1_connectorapi_baton_v1_config_proto_rawDescOnce sync.Once - file_c1_connectorapi_baton_v1_config_proto_rawDescData []byte -) +func (x *Sigv4SignedRequestSTSGetCallerIdentity) SetEndpoint(v string) { + x.Endpoint = v +} + +func (x *Sigv4SignedRequestSTSGetCallerIdentity) SetHeaders(v []*SignedHeader) { + x.Headers = v +} + +func (x *Sigv4SignedRequestSTSGetCallerIdentity) SetBody(v []byte) { + if v == nil { + v = []byte{} + } + x.Body = v +} + +type Sigv4SignedRequestSTSGetCallerIdentity_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Method string + Endpoint string + Headers []*SignedHeader + Body []byte +} + +func (b0 Sigv4SignedRequestSTSGetCallerIdentity_builder) Build() *Sigv4SignedRequestSTSGetCallerIdentity { + m0 := &Sigv4SignedRequestSTSGetCallerIdentity{} + b, x := &b0, m0 + _, _ = b, x + x.Method = b.Method + x.Endpoint = b.Endpoint + x.Headers = b.Headers + x.Body = b.Body + return m0 +} + +type GetConnectorOauthTokenRequest struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetConnectorOauthTokenRequest) Reset() { + *x = GetConnectorOauthTokenRequest{} + mi := &file_c1_connectorapi_baton_v1_config_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetConnectorOauthTokenRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetConnectorOauthTokenRequest) ProtoMessage() {} + +func (x *GetConnectorOauthTokenRequest) ProtoReflect() protoreflect.Message { + mi := &file_c1_connectorapi_baton_v1_config_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +type GetConnectorOauthTokenRequest_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + +} + +func (b0 GetConnectorOauthTokenRequest_builder) Build() *GetConnectorOauthTokenRequest { + m0 := &GetConnectorOauthTokenRequest{} + b, x := &b0, m0 + _, _ = b, x + return m0 +} + +type GetConnectorOauthTokenResponse struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + Token []byte `protobuf:"bytes,1,opt,name=token,proto3" json:"token,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} -func file_c1_connectorapi_baton_v1_config_proto_rawDescGZIP() []byte { - file_c1_connectorapi_baton_v1_config_proto_rawDescOnce.Do(func() { - file_c1_connectorapi_baton_v1_config_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_c1_connectorapi_baton_v1_config_proto_rawDesc), len(file_c1_connectorapi_baton_v1_config_proto_rawDesc))) - }) - return file_c1_connectorapi_baton_v1_config_proto_rawDescData +func (x *GetConnectorOauthTokenResponse) Reset() { + *x = GetConnectorOauthTokenResponse{} + mi := &file_c1_connectorapi_baton_v1_config_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -var file_c1_connectorapi_baton_v1_config_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +func (x *GetConnectorOauthTokenResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetConnectorOauthTokenResponse) ProtoMessage() {} + +func (x *GetConnectorOauthTokenResponse) ProtoReflect() protoreflect.Message { + mi := &file_c1_connectorapi_baton_v1_config_proto_msgTypes[5] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *GetConnectorOauthTokenResponse) GetToken() []byte { + if x != nil { + return x.Token + } + return nil +} + +func (x *GetConnectorOauthTokenResponse) SetToken(v []byte) { + if v == nil { + v = []byte{} + } + x.Token = v +} + +type GetConnectorOauthTokenResponse_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Token []byte +} + +func (b0 GetConnectorOauthTokenResponse_builder) Build() *GetConnectorOauthTokenResponse { + m0 := &GetConnectorOauthTokenResponse{} + b, x := &b0, m0 + _, _ = b, x + x.Token = b.Token + return m0 +} + +var File_c1_connectorapi_baton_v1_config_proto protoreflect.FileDescriptor + +const file_c1_connectorapi_baton_v1_config_proto_rawDesc = "" + + "\n" + + "%c1/connectorapi/baton/v1/config.proto\x12\x18c1.connectorapi.baton.v1\x1a\x1fgoogle/protobuf/timestamp.proto\"\x1b\n" + + "\x19GetConnectorConfigRequest\"s\n" + + "\x1aGetConnectorConfigResponse\x12\x16\n" + + "\x06config\x18\x01 \x01(\fR\x06config\x12=\n" + + "\flast_updated\x18\x02 \x01(\v2\x1a.google.protobuf.TimestampR\vlastUpdated\"6\n" + + "\fSignedHeader\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x03(\tR\x05value\"\xb2\x01\n" + + "&Sigv4SignedRequestSTSGetCallerIdentity\x12\x16\n" + + "\x06method\x18\x01 \x01(\tR\x06method\x12\x1a\n" + + "\bendpoint\x18\x02 \x01(\tR\bendpoint\x12@\n" + + "\aheaders\x18\x03 \x03(\v2&.c1.connectorapi.baton.v1.SignedHeaderR\aheaders\x12\x12\n" + + "\x04body\x18\x04 \x01(\fR\x04body\"\x1f\n" + + "\x1dGetConnectorOauthTokenRequest\"6\n" + + "\x1eGetConnectorOauthTokenResponse\x12\x14\n" + + "\x05token\x18\x01 \x01(\fR\x05token2\xa7\x02\n" + + "\x16ConnectorConfigService\x12\x7f\n" + + "\x12GetConnectorConfig\x123.c1.connectorapi.baton.v1.GetConnectorConfigRequest\x1a4.c1.connectorapi.baton.v1.GetConnectorConfigResponse\x12\x8b\x01\n" + + "\x16GetConnectorOauthToken\x127.c1.connectorapi.baton.v1.GetConnectorOauthTokenRequest\x1a8.c1.connectorapi.baton.v1.GetConnectorOauthTokenResponseB7Z5gitlab.com/ductone/c1/pkg/pb/c1/connectorapi/baton/v1b\x06proto3" + +var file_c1_connectorapi_baton_v1_config_proto_msgTypes = make([]protoimpl.MessageInfo, 6) var file_c1_connectorapi_baton_v1_config_proto_goTypes = []any{ (*GetConnectorConfigRequest)(nil), // 0: c1.connectorapi.baton.v1.GetConnectorConfigRequest (*GetConnectorConfigResponse)(nil), // 1: c1.connectorapi.baton.v1.GetConnectorConfigResponse (*SignedHeader)(nil), // 2: c1.connectorapi.baton.v1.SignedHeader (*Sigv4SignedRequestSTSGetCallerIdentity)(nil), // 3: c1.connectorapi.baton.v1.Sigv4SignedRequestSTSGetCallerIdentity - (*timestamppb.Timestamp)(nil), // 4: google.protobuf.Timestamp + (*GetConnectorOauthTokenRequest)(nil), // 4: c1.connectorapi.baton.v1.GetConnectorOauthTokenRequest + (*GetConnectorOauthTokenResponse)(nil), // 5: c1.connectorapi.baton.v1.GetConnectorOauthTokenResponse + (*timestamppb.Timestamp)(nil), // 6: google.protobuf.Timestamp } var file_c1_connectorapi_baton_v1_config_proto_depIdxs = []int32{ - 4, // 0: c1.connectorapi.baton.v1.GetConnectorConfigResponse.last_updated:type_name -> google.protobuf.Timestamp + 6, // 0: c1.connectorapi.baton.v1.GetConnectorConfigResponse.last_updated:type_name -> google.protobuf.Timestamp 2, // 1: c1.connectorapi.baton.v1.Sigv4SignedRequestSTSGetCallerIdentity.headers:type_name -> c1.connectorapi.baton.v1.SignedHeader 0, // 2: c1.connectorapi.baton.v1.ConnectorConfigService.GetConnectorConfig:input_type -> c1.connectorapi.baton.v1.GetConnectorConfigRequest - 1, // 3: c1.connectorapi.baton.v1.ConnectorConfigService.GetConnectorConfig:output_type -> c1.connectorapi.baton.v1.GetConnectorConfigResponse - 3, // [3:4] is the sub-list for method output_type - 2, // [2:3] is the sub-list for method input_type + 4, // 3: c1.connectorapi.baton.v1.ConnectorConfigService.GetConnectorOauthToken:input_type -> c1.connectorapi.baton.v1.GetConnectorOauthTokenRequest + 1, // 4: c1.connectorapi.baton.v1.ConnectorConfigService.GetConnectorConfig:output_type -> c1.connectorapi.baton.v1.GetConnectorConfigResponse + 5, // 5: c1.connectorapi.baton.v1.ConnectorConfigService.GetConnectorOauthToken:output_type -> c1.connectorapi.baton.v1.GetConnectorOauthTokenResponse + 4, // [4:6] is the sub-list for method output_type + 2, // [2:4] is the sub-list for method input_type 2, // [2:2] is the sub-list for extension type_name 2, // [2:2] is the sub-list for extension extendee 0, // [0:2] is the sub-list for field type_name @@ -322,7 +486,7 @@ func file_c1_connectorapi_baton_v1_config_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: unsafe.Slice(unsafe.StringData(file_c1_connectorapi_baton_v1_config_proto_rawDesc), len(file_c1_connectorapi_baton_v1_config_proto_rawDesc)), NumEnums: 0, - NumMessages: 4, + NumMessages: 6, NumExtensions: 0, NumServices: 1, }, diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/connectorapi/baton/v1/config.pb.validate.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/connectorapi/baton/v1/config.pb.validate.go index ee2b6ec2..c877874e 100644 --- a/vendor/github.com/conductorone/baton-sdk/pb/c1/connectorapi/baton/v1/config.pb.validate.go +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/connectorapi/baton/v1/config.pb.validate.go @@ -516,3 +516,211 @@ var _ interface { Cause() error ErrorName() string } = Sigv4SignedRequestSTSGetCallerIdentityValidationError{} + +// Validate checks the field values on GetConnectorOauthTokenRequest with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *GetConnectorOauthTokenRequest) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on GetConnectorOauthTokenRequest with +// the rules defined in the proto definition for this message. If any rules +// are violated, the result is a list of violation errors wrapped in +// GetConnectorOauthTokenRequestMultiError, or nil if none found. +func (m *GetConnectorOauthTokenRequest) ValidateAll() error { + return m.validate(true) +} + +func (m *GetConnectorOauthTokenRequest) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return GetConnectorOauthTokenRequestMultiError(errors) + } + + return nil +} + +// GetConnectorOauthTokenRequestMultiError is an error wrapping multiple +// validation errors returned by GetConnectorOauthTokenRequest.ValidateAll() +// if the designated constraints aren't met. +type GetConnectorOauthTokenRequestMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m GetConnectorOauthTokenRequestMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m GetConnectorOauthTokenRequestMultiError) AllErrors() []error { return m } + +// GetConnectorOauthTokenRequestValidationError is the validation error +// returned by GetConnectorOauthTokenRequest.Validate if the designated +// constraints aren't met. +type GetConnectorOauthTokenRequestValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e GetConnectorOauthTokenRequestValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e GetConnectorOauthTokenRequestValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e GetConnectorOauthTokenRequestValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e GetConnectorOauthTokenRequestValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e GetConnectorOauthTokenRequestValidationError) ErrorName() string { + return "GetConnectorOauthTokenRequestValidationError" +} + +// Error satisfies the builtin error interface +func (e GetConnectorOauthTokenRequestValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sGetConnectorOauthTokenRequest.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = GetConnectorOauthTokenRequestValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = GetConnectorOauthTokenRequestValidationError{} + +// Validate checks the field values on GetConnectorOauthTokenResponse with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *GetConnectorOauthTokenResponse) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on GetConnectorOauthTokenResponse with +// the rules defined in the proto definition for this message. If any rules +// are violated, the result is a list of violation errors wrapped in +// GetConnectorOauthTokenResponseMultiError, or nil if none found. +func (m *GetConnectorOauthTokenResponse) ValidateAll() error { + return m.validate(true) +} + +func (m *GetConnectorOauthTokenResponse) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Token + + if len(errors) > 0 { + return GetConnectorOauthTokenResponseMultiError(errors) + } + + return nil +} + +// GetConnectorOauthTokenResponseMultiError is an error wrapping multiple +// validation errors returned by GetConnectorOauthTokenResponse.ValidateAll() +// if the designated constraints aren't met. +type GetConnectorOauthTokenResponseMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m GetConnectorOauthTokenResponseMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m GetConnectorOauthTokenResponseMultiError) AllErrors() []error { return m } + +// GetConnectorOauthTokenResponseValidationError is the validation error +// returned by GetConnectorOauthTokenResponse.Validate if the designated +// constraints aren't met. +type GetConnectorOauthTokenResponseValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e GetConnectorOauthTokenResponseValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e GetConnectorOauthTokenResponseValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e GetConnectorOauthTokenResponseValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e GetConnectorOauthTokenResponseValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e GetConnectorOauthTokenResponseValidationError) ErrorName() string { + return "GetConnectorOauthTokenResponseValidationError" +} + +// Error satisfies the builtin error interface +func (e GetConnectorOauthTokenResponseValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sGetConnectorOauthTokenResponse.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = GetConnectorOauthTokenResponseValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = GetConnectorOauthTokenResponseValidationError{} diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/connectorapi/baton/v1/config_grpc.pb.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/connectorapi/baton/v1/config_grpc.pb.go index 985a23de..92807fbd 100644 --- a/vendor/github.com/conductorone/baton-sdk/pb/c1/connectorapi/baton/v1/config_grpc.pb.go +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/connectorapi/baton/v1/config_grpc.pb.go @@ -19,7 +19,8 @@ import ( const _ = grpc.SupportPackageIsVersion9 const ( - ConnectorConfigService_GetConnectorConfig_FullMethodName = "/c1.connectorapi.baton.v1.ConnectorConfigService/GetConnectorConfig" + ConnectorConfigService_GetConnectorConfig_FullMethodName = "/c1.connectorapi.baton.v1.ConnectorConfigService/GetConnectorConfig" + ConnectorConfigService_GetConnectorOauthToken_FullMethodName = "/c1.connectorapi.baton.v1.ConnectorConfigService/GetConnectorOauthToken" ) // ConnectorConfigServiceClient is the client API for ConnectorConfigService service. @@ -27,6 +28,7 @@ const ( // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. type ConnectorConfigServiceClient interface { GetConnectorConfig(ctx context.Context, in *GetConnectorConfigRequest, opts ...grpc.CallOption) (*GetConnectorConfigResponse, error) + GetConnectorOauthToken(ctx context.Context, in *GetConnectorOauthTokenRequest, opts ...grpc.CallOption) (*GetConnectorOauthTokenResponse, error) } type connectorConfigServiceClient struct { @@ -47,11 +49,22 @@ func (c *connectorConfigServiceClient) GetConnectorConfig(ctx context.Context, i return out, nil } +func (c *connectorConfigServiceClient) GetConnectorOauthToken(ctx context.Context, in *GetConnectorOauthTokenRequest, opts ...grpc.CallOption) (*GetConnectorOauthTokenResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(GetConnectorOauthTokenResponse) + err := c.cc.Invoke(ctx, ConnectorConfigService_GetConnectorOauthToken_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + // ConnectorConfigServiceServer is the server API for ConnectorConfigService service. // All implementations should embed UnimplementedConnectorConfigServiceServer // for forward compatibility. type ConnectorConfigServiceServer interface { GetConnectorConfig(context.Context, *GetConnectorConfigRequest) (*GetConnectorConfigResponse, error) + GetConnectorOauthToken(context.Context, *GetConnectorOauthTokenRequest) (*GetConnectorOauthTokenResponse, error) } // UnimplementedConnectorConfigServiceServer should be embedded to have @@ -64,6 +77,9 @@ type UnimplementedConnectorConfigServiceServer struct{} func (UnimplementedConnectorConfigServiceServer) GetConnectorConfig(context.Context, *GetConnectorConfigRequest) (*GetConnectorConfigResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method GetConnectorConfig not implemented") } +func (UnimplementedConnectorConfigServiceServer) GetConnectorOauthToken(context.Context, *GetConnectorOauthTokenRequest) (*GetConnectorOauthTokenResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetConnectorOauthToken not implemented") +} func (UnimplementedConnectorConfigServiceServer) testEmbeddedByValue() {} // UnsafeConnectorConfigServiceServer may be embedded to opt out of forward compatibility for this service. @@ -102,6 +118,24 @@ func _ConnectorConfigService_GetConnectorConfig_Handler(srv interface{}, ctx con return interceptor(ctx, in, info, handler) } +func _ConnectorConfigService_GetConnectorOauthToken_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetConnectorOauthTokenRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ConnectorConfigServiceServer).GetConnectorOauthToken(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: ConnectorConfigService_GetConnectorOauthToken_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ConnectorConfigServiceServer).GetConnectorOauthToken(ctx, req.(*GetConnectorOauthTokenRequest)) + } + return interceptor(ctx, in, info, handler) +} + // ConnectorConfigService_ServiceDesc is the grpc.ServiceDesc for ConnectorConfigService service. // It's only intended for direct use with grpc.RegisterService, // and not to be introspected or modified (even as a copy) @@ -113,6 +147,10 @@ var ConnectorConfigService_ServiceDesc = grpc.ServiceDesc{ MethodName: "GetConnectorConfig", Handler: _ConnectorConfigService_GetConnectorConfig_Handler, }, + { + MethodName: "GetConnectorOauthToken", + Handler: _ConnectorConfigService_GetConnectorOauthToken_Handler, + }, }, Streams: []grpc.StreamDesc{}, Metadata: "c1/connectorapi/baton/v1/config.proto", diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/connectorapi/baton/v1/config_protoopaque.pb.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/connectorapi/baton/v1/config_protoopaque.pb.go new file mode 100644 index 00000000..0037a5cd --- /dev/null +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/connectorapi/baton/v1/config_protoopaque.pb.go @@ -0,0 +1,502 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.10 +// protoc (unknown) +// source: c1/connectorapi/baton/v1/config.proto + +//go:build protoopaque + +package v1 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type GetConnectorConfigRequest struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetConnectorConfigRequest) Reset() { + *x = GetConnectorConfigRequest{} + mi := &file_c1_connectorapi_baton_v1_config_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetConnectorConfigRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetConnectorConfigRequest) ProtoMessage() {} + +func (x *GetConnectorConfigRequest) ProtoReflect() protoreflect.Message { + mi := &file_c1_connectorapi_baton_v1_config_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +type GetConnectorConfigRequest_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + +} + +func (b0 GetConnectorConfigRequest_builder) Build() *GetConnectorConfigRequest { + m0 := &GetConnectorConfigRequest{} + b, x := &b0, m0 + _, _ = b, x + return m0 +} + +type GetConnectorConfigResponse struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Config []byte `protobuf:"bytes,1,opt,name=config,proto3"` + xxx_hidden_LastUpdated *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=last_updated,json=lastUpdated,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetConnectorConfigResponse) Reset() { + *x = GetConnectorConfigResponse{} + mi := &file_c1_connectorapi_baton_v1_config_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetConnectorConfigResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetConnectorConfigResponse) ProtoMessage() {} + +func (x *GetConnectorConfigResponse) ProtoReflect() protoreflect.Message { + mi := &file_c1_connectorapi_baton_v1_config_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *GetConnectorConfigResponse) GetConfig() []byte { + if x != nil { + return x.xxx_hidden_Config + } + return nil +} + +func (x *GetConnectorConfigResponse) GetLastUpdated() *timestamppb.Timestamp { + if x != nil { + return x.xxx_hidden_LastUpdated + } + return nil +} + +func (x *GetConnectorConfigResponse) SetConfig(v []byte) { + if v == nil { + v = []byte{} + } + x.xxx_hidden_Config = v +} + +func (x *GetConnectorConfigResponse) SetLastUpdated(v *timestamppb.Timestamp) { + x.xxx_hidden_LastUpdated = v +} + +func (x *GetConnectorConfigResponse) HasLastUpdated() bool { + if x == nil { + return false + } + return x.xxx_hidden_LastUpdated != nil +} + +func (x *GetConnectorConfigResponse) ClearLastUpdated() { + x.xxx_hidden_LastUpdated = nil +} + +type GetConnectorConfigResponse_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Config []byte + LastUpdated *timestamppb.Timestamp +} + +func (b0 GetConnectorConfigResponse_builder) Build() *GetConnectorConfigResponse { + m0 := &GetConnectorConfigResponse{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Config = b.Config + x.xxx_hidden_LastUpdated = b.LastUpdated + return m0 +} + +type SignedHeader struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Key string `protobuf:"bytes,1,opt,name=key,proto3"` + xxx_hidden_Value []string `protobuf:"bytes,2,rep,name=value,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SignedHeader) Reset() { + *x = SignedHeader{} + mi := &file_c1_connectorapi_baton_v1_config_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SignedHeader) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SignedHeader) ProtoMessage() {} + +func (x *SignedHeader) ProtoReflect() protoreflect.Message { + mi := &file_c1_connectorapi_baton_v1_config_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *SignedHeader) GetKey() string { + if x != nil { + return x.xxx_hidden_Key + } + return "" +} + +func (x *SignedHeader) GetValue() []string { + if x != nil { + return x.xxx_hidden_Value + } + return nil +} + +func (x *SignedHeader) SetKey(v string) { + x.xxx_hidden_Key = v +} + +func (x *SignedHeader) SetValue(v []string) { + x.xxx_hidden_Value = v +} + +type SignedHeader_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Key string + Value []string +} + +func (b0 SignedHeader_builder) Build() *SignedHeader { + m0 := &SignedHeader{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Key = b.Key + x.xxx_hidden_Value = b.Value + return m0 +} + +type Sigv4SignedRequestSTSGetCallerIdentity struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Method string `protobuf:"bytes,1,opt,name=method,proto3"` + xxx_hidden_Endpoint string `protobuf:"bytes,2,opt,name=endpoint,proto3"` + xxx_hidden_Headers *[]*SignedHeader `protobuf:"bytes,3,rep,name=headers,proto3"` + xxx_hidden_Body []byte `protobuf:"bytes,4,opt,name=body,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Sigv4SignedRequestSTSGetCallerIdentity) Reset() { + *x = Sigv4SignedRequestSTSGetCallerIdentity{} + mi := &file_c1_connectorapi_baton_v1_config_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Sigv4SignedRequestSTSGetCallerIdentity) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Sigv4SignedRequestSTSGetCallerIdentity) ProtoMessage() {} + +func (x *Sigv4SignedRequestSTSGetCallerIdentity) ProtoReflect() protoreflect.Message { + mi := &file_c1_connectorapi_baton_v1_config_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *Sigv4SignedRequestSTSGetCallerIdentity) GetMethod() string { + if x != nil { + return x.xxx_hidden_Method + } + return "" +} + +func (x *Sigv4SignedRequestSTSGetCallerIdentity) GetEndpoint() string { + if x != nil { + return x.xxx_hidden_Endpoint + } + return "" +} + +func (x *Sigv4SignedRequestSTSGetCallerIdentity) GetHeaders() []*SignedHeader { + if x != nil { + if x.xxx_hidden_Headers != nil { + return *x.xxx_hidden_Headers + } + } + return nil +} + +func (x *Sigv4SignedRequestSTSGetCallerIdentity) GetBody() []byte { + if x != nil { + return x.xxx_hidden_Body + } + return nil +} + +func (x *Sigv4SignedRequestSTSGetCallerIdentity) SetMethod(v string) { + x.xxx_hidden_Method = v +} + +func (x *Sigv4SignedRequestSTSGetCallerIdentity) SetEndpoint(v string) { + x.xxx_hidden_Endpoint = v +} + +func (x *Sigv4SignedRequestSTSGetCallerIdentity) SetHeaders(v []*SignedHeader) { + x.xxx_hidden_Headers = &v +} + +func (x *Sigv4SignedRequestSTSGetCallerIdentity) SetBody(v []byte) { + if v == nil { + v = []byte{} + } + x.xxx_hidden_Body = v +} + +type Sigv4SignedRequestSTSGetCallerIdentity_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Method string + Endpoint string + Headers []*SignedHeader + Body []byte +} + +func (b0 Sigv4SignedRequestSTSGetCallerIdentity_builder) Build() *Sigv4SignedRequestSTSGetCallerIdentity { + m0 := &Sigv4SignedRequestSTSGetCallerIdentity{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Method = b.Method + x.xxx_hidden_Endpoint = b.Endpoint + x.xxx_hidden_Headers = &b.Headers + x.xxx_hidden_Body = b.Body + return m0 +} + +type GetConnectorOauthTokenRequest struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetConnectorOauthTokenRequest) Reset() { + *x = GetConnectorOauthTokenRequest{} + mi := &file_c1_connectorapi_baton_v1_config_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetConnectorOauthTokenRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetConnectorOauthTokenRequest) ProtoMessage() {} + +func (x *GetConnectorOauthTokenRequest) ProtoReflect() protoreflect.Message { + mi := &file_c1_connectorapi_baton_v1_config_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +type GetConnectorOauthTokenRequest_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + +} + +func (b0 GetConnectorOauthTokenRequest_builder) Build() *GetConnectorOauthTokenRequest { + m0 := &GetConnectorOauthTokenRequest{} + b, x := &b0, m0 + _, _ = b, x + return m0 +} + +type GetConnectorOauthTokenResponse struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Token []byte `protobuf:"bytes,1,opt,name=token,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetConnectorOauthTokenResponse) Reset() { + *x = GetConnectorOauthTokenResponse{} + mi := &file_c1_connectorapi_baton_v1_config_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetConnectorOauthTokenResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetConnectorOauthTokenResponse) ProtoMessage() {} + +func (x *GetConnectorOauthTokenResponse) ProtoReflect() protoreflect.Message { + mi := &file_c1_connectorapi_baton_v1_config_proto_msgTypes[5] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *GetConnectorOauthTokenResponse) GetToken() []byte { + if x != nil { + return x.xxx_hidden_Token + } + return nil +} + +func (x *GetConnectorOauthTokenResponse) SetToken(v []byte) { + if v == nil { + v = []byte{} + } + x.xxx_hidden_Token = v +} + +type GetConnectorOauthTokenResponse_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Token []byte +} + +func (b0 GetConnectorOauthTokenResponse_builder) Build() *GetConnectorOauthTokenResponse { + m0 := &GetConnectorOauthTokenResponse{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Token = b.Token + return m0 +} + +var File_c1_connectorapi_baton_v1_config_proto protoreflect.FileDescriptor + +const file_c1_connectorapi_baton_v1_config_proto_rawDesc = "" + + "\n" + + "%c1/connectorapi/baton/v1/config.proto\x12\x18c1.connectorapi.baton.v1\x1a\x1fgoogle/protobuf/timestamp.proto\"\x1b\n" + + "\x19GetConnectorConfigRequest\"s\n" + + "\x1aGetConnectorConfigResponse\x12\x16\n" + + "\x06config\x18\x01 \x01(\fR\x06config\x12=\n" + + "\flast_updated\x18\x02 \x01(\v2\x1a.google.protobuf.TimestampR\vlastUpdated\"6\n" + + "\fSignedHeader\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x03(\tR\x05value\"\xb2\x01\n" + + "&Sigv4SignedRequestSTSGetCallerIdentity\x12\x16\n" + + "\x06method\x18\x01 \x01(\tR\x06method\x12\x1a\n" + + "\bendpoint\x18\x02 \x01(\tR\bendpoint\x12@\n" + + "\aheaders\x18\x03 \x03(\v2&.c1.connectorapi.baton.v1.SignedHeaderR\aheaders\x12\x12\n" + + "\x04body\x18\x04 \x01(\fR\x04body\"\x1f\n" + + "\x1dGetConnectorOauthTokenRequest\"6\n" + + "\x1eGetConnectorOauthTokenResponse\x12\x14\n" + + "\x05token\x18\x01 \x01(\fR\x05token2\xa7\x02\n" + + "\x16ConnectorConfigService\x12\x7f\n" + + "\x12GetConnectorConfig\x123.c1.connectorapi.baton.v1.GetConnectorConfigRequest\x1a4.c1.connectorapi.baton.v1.GetConnectorConfigResponse\x12\x8b\x01\n" + + "\x16GetConnectorOauthToken\x127.c1.connectorapi.baton.v1.GetConnectorOauthTokenRequest\x1a8.c1.connectorapi.baton.v1.GetConnectorOauthTokenResponseB7Z5gitlab.com/ductone/c1/pkg/pb/c1/connectorapi/baton/v1b\x06proto3" + +var file_c1_connectorapi_baton_v1_config_proto_msgTypes = make([]protoimpl.MessageInfo, 6) +var file_c1_connectorapi_baton_v1_config_proto_goTypes = []any{ + (*GetConnectorConfigRequest)(nil), // 0: c1.connectorapi.baton.v1.GetConnectorConfigRequest + (*GetConnectorConfigResponse)(nil), // 1: c1.connectorapi.baton.v1.GetConnectorConfigResponse + (*SignedHeader)(nil), // 2: c1.connectorapi.baton.v1.SignedHeader + (*Sigv4SignedRequestSTSGetCallerIdentity)(nil), // 3: c1.connectorapi.baton.v1.Sigv4SignedRequestSTSGetCallerIdentity + (*GetConnectorOauthTokenRequest)(nil), // 4: c1.connectorapi.baton.v1.GetConnectorOauthTokenRequest + (*GetConnectorOauthTokenResponse)(nil), // 5: c1.connectorapi.baton.v1.GetConnectorOauthTokenResponse + (*timestamppb.Timestamp)(nil), // 6: google.protobuf.Timestamp +} +var file_c1_connectorapi_baton_v1_config_proto_depIdxs = []int32{ + 6, // 0: c1.connectorapi.baton.v1.GetConnectorConfigResponse.last_updated:type_name -> google.protobuf.Timestamp + 2, // 1: c1.connectorapi.baton.v1.Sigv4SignedRequestSTSGetCallerIdentity.headers:type_name -> c1.connectorapi.baton.v1.SignedHeader + 0, // 2: c1.connectorapi.baton.v1.ConnectorConfigService.GetConnectorConfig:input_type -> c1.connectorapi.baton.v1.GetConnectorConfigRequest + 4, // 3: c1.connectorapi.baton.v1.ConnectorConfigService.GetConnectorOauthToken:input_type -> c1.connectorapi.baton.v1.GetConnectorOauthTokenRequest + 1, // 4: c1.connectorapi.baton.v1.ConnectorConfigService.GetConnectorConfig:output_type -> c1.connectorapi.baton.v1.GetConnectorConfigResponse + 5, // 5: c1.connectorapi.baton.v1.ConnectorConfigService.GetConnectorOauthToken:output_type -> c1.connectorapi.baton.v1.GetConnectorOauthTokenResponse + 4, // [4:6] is the sub-list for method output_type + 2, // [2:4] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_c1_connectorapi_baton_v1_config_proto_init() } +func file_c1_connectorapi_baton_v1_config_proto_init() { + if File_c1_connectorapi_baton_v1_config_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_c1_connectorapi_baton_v1_config_proto_rawDesc), len(file_c1_connectorapi_baton_v1_config_proto_rawDesc)), + NumEnums: 0, + NumMessages: 6, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_c1_connectorapi_baton_v1_config_proto_goTypes, + DependencyIndexes: file_c1_connectorapi_baton_v1_config_proto_depIdxs, + MessageInfos: file_c1_connectorapi_baton_v1_config_proto_msgTypes, + }.Build() + File_c1_connectorapi_baton_v1_config_proto = out.File + file_c1_connectorapi_baton_v1_config_proto_goTypes = nil + file_c1_connectorapi_baton_v1_config_proto_depIdxs = nil +} diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/connectorapi/baton/v1/session.pb.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/connectorapi/baton/v1/session.pb.go index 8be34ae2..18537268 100644 --- a/vendor/github.com/conductorone/baton-sdk/pb/c1/connectorapi/baton/v1/session.pb.go +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/connectorapi/baton/v1/session.pb.go @@ -1,9 +1,11 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.4 +// protoc-gen-go v1.36.10 // protoc (unknown) // source: c1/connectorapi/baton/v1/session.proto +//go:build !protoopaque + package v1 import ( @@ -11,7 +13,6 @@ import ( protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" - sync "sync" unsafe "unsafe" ) @@ -23,9 +24,10 @@ const ( ) type GetRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` SyncId string `protobuf:"bytes,1,opt,name=sync_id,json=syncId,proto3" json:"sync_id,omitempty"` Key string `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` + Prefix string `protobuf:"bytes,3,opt,name=prefix,proto3" json:"prefix,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } @@ -55,11 +57,6 @@ func (x *GetRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetRequest.ProtoReflect.Descriptor instead. -func (*GetRequest) Descriptor() ([]byte, []int) { - return file_c1_connectorapi_baton_v1_session_proto_rawDescGZIP(), []int{0} -} - func (x *GetRequest) GetSyncId() string { if x != nil { return x.SyncId @@ -74,9 +71,47 @@ func (x *GetRequest) GetKey() string { return "" } +func (x *GetRequest) GetPrefix() string { + if x != nil { + return x.Prefix + } + return "" +} + +func (x *GetRequest) SetSyncId(v string) { + x.SyncId = v +} + +func (x *GetRequest) SetKey(v string) { + x.Key = v +} + +func (x *GetRequest) SetPrefix(v string) { + x.Prefix = v +} + +type GetRequest_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + SyncId string + Key string + Prefix string +} + +func (b0 GetRequest_builder) Build() *GetRequest { + m0 := &GetRequest{} + b, x := &b0, m0 + _, _ = b, x + x.SyncId = b.SyncId + x.Key = b.Key + x.Prefix = b.Prefix + return m0 +} + type GetResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` + Found bool `protobuf:"varint,2,opt,name=found,proto3" json:"found,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } @@ -106,11 +141,6 @@ func (x *GetResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetResponse.ProtoReflect.Descriptor instead. -func (*GetResponse) Descriptor() ([]byte, []int) { - return file_c1_connectorapi_baton_v1_session_proto_rawDescGZIP(), []int{1} -} - func (x *GetResponse) GetValue() []byte { if x != nil { return x.Value @@ -118,10 +148,45 @@ func (x *GetResponse) GetValue() []byte { return nil } +func (x *GetResponse) GetFound() bool { + if x != nil { + return x.Found + } + return false +} + +func (x *GetResponse) SetValue(v []byte) { + if v == nil { + v = []byte{} + } + x.Value = v +} + +func (x *GetResponse) SetFound(v bool) { + x.Found = v +} + +type GetResponse_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Value []byte + Found bool +} + +func (b0 GetResponse_builder) Build() *GetResponse { + m0 := &GetResponse{} + b, x := &b0, m0 + _, _ = b, x + x.Value = b.Value + x.Found = b.Found + return m0 +} + type GetManyRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` SyncId string `protobuf:"bytes,1,opt,name=sync_id,json=syncId,proto3" json:"sync_id,omitempty"` Keys []string `protobuf:"bytes,2,rep,name=keys,proto3" json:"keys,omitempty"` + Prefix string `protobuf:"bytes,3,opt,name=prefix,proto3" json:"prefix,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } @@ -151,11 +216,6 @@ func (x *GetManyRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetManyRequest.ProtoReflect.Descriptor instead. -func (*GetManyRequest) Descriptor() ([]byte, []int) { - return file_c1_connectorapi_baton_v1_session_proto_rawDescGZIP(), []int{2} -} - func (x *GetManyRequest) GetSyncId() string { if x != nil { return x.SyncId @@ -170,12 +230,49 @@ func (x *GetManyRequest) GetKeys() []string { return nil } +func (x *GetManyRequest) GetPrefix() string { + if x != nil { + return x.Prefix + } + return "" +} + +func (x *GetManyRequest) SetSyncId(v string) { + x.SyncId = v +} + +func (x *GetManyRequest) SetKeys(v []string) { + x.Keys = v +} + +func (x *GetManyRequest) SetPrefix(v string) { + x.Prefix = v +} + +type GetManyRequest_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + SyncId string + Keys []string + Prefix string +} + +func (b0 GetManyRequest_builder) Build() *GetManyRequest { + m0 := &GetManyRequest{} + b, x := &b0, m0 + _, _ = b, x + x.SyncId = b.SyncId + x.Keys = b.Keys + x.Prefix = b.Prefix + return m0 +} + type GetManyResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"hybrid.v1"` + Items []*GetManyItem `protobuf:"bytes,1,rep,name=items,proto3" json:"items,omitempty"` + UnprocessedKeys []string `protobuf:"bytes,2,rep,name=unprocessed_keys,json=unprocessedKeys,proto3" json:"unprocessed_keys,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *GetManyResponse) Reset() { @@ -203,36 +300,130 @@ func (x *GetManyResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetManyResponse.ProtoReflect.Descriptor instead. -func (*GetManyResponse) Descriptor() ([]byte, []int) { - return file_c1_connectorapi_baton_v1_session_proto_rawDescGZIP(), []int{3} +func (x *GetManyResponse) GetItems() []*GetManyItem { + if x != nil { + return x.Items + } + return nil } -func (x *GetManyResponse) GetKey() string { +func (x *GetManyResponse) GetUnprocessedKeys() []string { + if x != nil { + return x.UnprocessedKeys + } + return nil +} + +func (x *GetManyResponse) SetItems(v []*GetManyItem) { + x.Items = v +} + +func (x *GetManyResponse) SetUnprocessedKeys(v []string) { + x.UnprocessedKeys = v +} + +type GetManyResponse_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Items []*GetManyItem + UnprocessedKeys []string +} + +func (b0 GetManyResponse_builder) Build() *GetManyResponse { + m0 := &GetManyResponse{} + b, x := &b0, m0 + _, _ = b, x + x.Items = b.Items + x.UnprocessedKeys = b.UnprocessedKeys + return m0 +} + +type GetManyItem struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetManyItem) Reset() { + *x = GetManyItem{} + mi := &file_c1_connectorapi_baton_v1_session_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetManyItem) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetManyItem) ProtoMessage() {} + +func (x *GetManyItem) ProtoReflect() protoreflect.Message { + mi := &file_c1_connectorapi_baton_v1_session_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *GetManyItem) GetKey() string { if x != nil { return x.Key } return "" } -func (x *GetManyResponse) GetValue() []byte { +func (x *GetManyItem) GetValue() []byte { if x != nil { return x.Value } return nil } +func (x *GetManyItem) SetKey(v string) { + x.Key = v +} + +func (x *GetManyItem) SetValue(v []byte) { + if v == nil { + v = []byte{} + } + x.Value = v +} + +type GetManyItem_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Key string + Value []byte +} + +func (b0 GetManyItem_builder) Build() *GetManyItem { + m0 := &GetManyItem{} + b, x := &b0, m0 + _, _ = b, x + x.Key = b.Key + x.Value = b.Value + return m0 +} + type GetAllRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` SyncId string `protobuf:"bytes,1,opt,name=sync_id,json=syncId,proto3" json:"sync_id,omitempty"` PageToken string `protobuf:"bytes,2,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + Prefix string `protobuf:"bytes,3,opt,name=prefix,proto3" json:"prefix,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } func (x *GetAllRequest) Reset() { *x = GetAllRequest{} - mi := &file_c1_connectorapi_baton_v1_session_proto_msgTypes[4] + mi := &file_c1_connectorapi_baton_v1_session_proto_msgTypes[5] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -244,7 +435,7 @@ func (x *GetAllRequest) String() string { func (*GetAllRequest) ProtoMessage() {} func (x *GetAllRequest) ProtoReflect() protoreflect.Message { - mi := &file_c1_connectorapi_baton_v1_session_proto_msgTypes[4] + mi := &file_c1_connectorapi_baton_v1_session_proto_msgTypes[5] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -255,11 +446,6 @@ func (x *GetAllRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetAllRequest.ProtoReflect.Descriptor instead. -func (*GetAllRequest) Descriptor() ([]byte, []int) { - return file_c1_connectorapi_baton_v1_session_proto_rawDescGZIP(), []int{4} -} - func (x *GetAllRequest) GetSyncId() string { if x != nil { return x.SyncId @@ -274,18 +460,54 @@ func (x *GetAllRequest) GetPageToken() string { return "" } +func (x *GetAllRequest) GetPrefix() string { + if x != nil { + return x.Prefix + } + return "" +} + +func (x *GetAllRequest) SetSyncId(v string) { + x.SyncId = v +} + +func (x *GetAllRequest) SetPageToken(v string) { + x.PageToken = v +} + +func (x *GetAllRequest) SetPrefix(v string) { + x.Prefix = v +} + +type GetAllRequest_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + SyncId string + PageToken string + Prefix string +} + +func (b0 GetAllRequest_builder) Build() *GetAllRequest { + m0 := &GetAllRequest{} + b, x := &b0, m0 + _, _ = b, x + x.SyncId = b.SyncId + x.PageToken = b.PageToken + x.Prefix = b.Prefix + return m0 +} + type GetAllResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` - NextPageToken string `protobuf:"bytes,3,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + state protoimpl.MessageState `protogen:"hybrid.v1"` + Items []*GetAllItem `protobuf:"bytes,1,rep,name=items,proto3" json:"items,omitempty"` + PageToken string `protobuf:"bytes,2,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } func (x *GetAllResponse) Reset() { *x = GetAllResponse{} - mi := &file_c1_connectorapi_baton_v1_session_proto_msgTypes[5] + mi := &file_c1_connectorapi_baton_v1_session_proto_msgTypes[6] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -297,7 +519,7 @@ func (x *GetAllResponse) String() string { func (*GetAllResponse) ProtoMessage() {} func (x *GetAllResponse) ProtoReflect() protoreflect.Message { - mi := &file_c1_connectorapi_baton_v1_session_proto_msgTypes[5] + mi := &file_c1_connectorapi_baton_v1_session_proto_msgTypes[6] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -308,44 +530,132 @@ func (x *GetAllResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetAllResponse.ProtoReflect.Descriptor instead. -func (*GetAllResponse) Descriptor() ([]byte, []int) { - return file_c1_connectorapi_baton_v1_session_proto_rawDescGZIP(), []int{5} +func (x *GetAllResponse) GetItems() []*GetAllItem { + if x != nil { + return x.Items + } + return nil +} + +func (x *GetAllResponse) GetPageToken() string { + if x != nil { + return x.PageToken + } + return "" +} + +func (x *GetAllResponse) SetItems(v []*GetAllItem) { + x.Items = v +} + +func (x *GetAllResponse) SetPageToken(v string) { + x.PageToken = v +} + +type GetAllResponse_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Items []*GetAllItem + PageToken string +} + +func (b0 GetAllResponse_builder) Build() *GetAllResponse { + m0 := &GetAllResponse{} + b, x := &b0, m0 + _, _ = b, x + x.Items = b.Items + x.PageToken = b.PageToken + return m0 +} + +type GetAllItem struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetAllItem) Reset() { + *x = GetAllItem{} + mi := &file_c1_connectorapi_baton_v1_session_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetAllItem) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetAllItem) ProtoMessage() {} + +func (x *GetAllItem) ProtoReflect() protoreflect.Message { + mi := &file_c1_connectorapi_baton_v1_session_proto_msgTypes[7] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (x *GetAllResponse) GetKey() string { +func (x *GetAllItem) GetKey() string { if x != nil { return x.Key } return "" } -func (x *GetAllResponse) GetValue() []byte { +func (x *GetAllItem) GetValue() []byte { if x != nil { return x.Value } return nil } -func (x *GetAllResponse) GetNextPageToken() string { - if x != nil { - return x.NextPageToken +func (x *GetAllItem) SetKey(v string) { + x.Key = v +} + +func (x *GetAllItem) SetValue(v []byte) { + if v == nil { + v = []byte{} } - return "" + x.Value = v } +type GetAllItem_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Key string + Value []byte +} + +func (b0 GetAllItem_builder) Build() *GetAllItem { + m0 := &GetAllItem{} + b, x := &b0, m0 + _, _ = b, x + x.Key = b.Key + x.Value = b.Value + return m0 +} + +// 4MB for a single gRPC request is the default limit- divide by 100 (SetMany/GetMany max items) and add some padding for overhead. type SetRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` SyncId string `protobuf:"bytes,1,opt,name=sync_id,json=syncId,proto3" json:"sync_id,omitempty"` Key string `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` Value []byte `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` + Prefix string `protobuf:"bytes,4,opt,name=prefix,proto3" json:"prefix,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } func (x *SetRequest) Reset() { *x = SetRequest{} - mi := &file_c1_connectorapi_baton_v1_session_proto_msgTypes[6] + mi := &file_c1_connectorapi_baton_v1_session_proto_msgTypes[8] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -357,7 +667,7 @@ func (x *SetRequest) String() string { func (*SetRequest) ProtoMessage() {} func (x *SetRequest) ProtoReflect() protoreflect.Message { - mi := &file_c1_connectorapi_baton_v1_session_proto_msgTypes[6] + mi := &file_c1_connectorapi_baton_v1_session_proto_msgTypes[8] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -368,11 +678,6 @@ func (x *SetRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use SetRequest.ProtoReflect.Descriptor instead. -func (*SetRequest) Descriptor() ([]byte, []int) { - return file_c1_connectorapi_baton_v1_session_proto_rawDescGZIP(), []int{6} -} - func (x *SetRequest) GetSyncId() string { if x != nil { return x.SyncId @@ -394,15 +699,61 @@ func (x *SetRequest) GetValue() []byte { return nil } +func (x *SetRequest) GetPrefix() string { + if x != nil { + return x.Prefix + } + return "" +} + +func (x *SetRequest) SetSyncId(v string) { + x.SyncId = v +} + +func (x *SetRequest) SetKey(v string) { + x.Key = v +} + +func (x *SetRequest) SetValue(v []byte) { + if v == nil { + v = []byte{} + } + x.Value = v +} + +func (x *SetRequest) SetPrefix(v string) { + x.Prefix = v +} + +type SetRequest_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + SyncId string + Key string + Value []byte + Prefix string +} + +func (b0 SetRequest_builder) Build() *SetRequest { + m0 := &SetRequest{} + b, x := &b0, m0 + _, _ = b, x + x.SyncId = b.SyncId + x.Key = b.Key + x.Value = b.Value + x.Prefix = b.Prefix + return m0 +} + type SetResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } func (x *SetResponse) Reset() { *x = SetResponse{} - mi := &file_c1_connectorapi_baton_v1_session_proto_msgTypes[7] + mi := &file_c1_connectorapi_baton_v1_session_proto_msgTypes[9] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -414,7 +765,7 @@ func (x *SetResponse) String() string { func (*SetResponse) ProtoMessage() {} func (x *SetResponse) ProtoReflect() protoreflect.Message { - mi := &file_c1_connectorapi_baton_v1_session_proto_msgTypes[7] + mi := &file_c1_connectorapi_baton_v1_session_proto_msgTypes[9] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -425,22 +776,30 @@ func (x *SetResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use SetResponse.ProtoReflect.Descriptor instead. -func (*SetResponse) Descriptor() ([]byte, []int) { - return file_c1_connectorapi_baton_v1_session_proto_rawDescGZIP(), []int{7} +type SetResponse_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + +} + +func (b0 SetResponse_builder) Build() *SetResponse { + m0 := &SetResponse{} + b, x := &b0, m0 + _, _ = b, x + return m0 } type SetManyRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` SyncId string `protobuf:"bytes,1,opt,name=sync_id,json=syncId,proto3" json:"sync_id,omitempty"` Values map[string][]byte `protobuf:"bytes,2,rep,name=values,proto3" json:"values,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + Prefix string `protobuf:"bytes,3,opt,name=prefix,proto3" json:"prefix,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } func (x *SetManyRequest) Reset() { *x = SetManyRequest{} - mi := &file_c1_connectorapi_baton_v1_session_proto_msgTypes[8] + mi := &file_c1_connectorapi_baton_v1_session_proto_msgTypes[10] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -452,7 +811,7 @@ func (x *SetManyRequest) String() string { func (*SetManyRequest) ProtoMessage() {} func (x *SetManyRequest) ProtoReflect() protoreflect.Message { - mi := &file_c1_connectorapi_baton_v1_session_proto_msgTypes[8] + mi := &file_c1_connectorapi_baton_v1_session_proto_msgTypes[10] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -463,11 +822,6 @@ func (x *SetManyRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use SetManyRequest.ProtoReflect.Descriptor instead. -func (*SetManyRequest) Descriptor() ([]byte, []int) { - return file_c1_connectorapi_baton_v1_session_proto_rawDescGZIP(), []int{8} -} - func (x *SetManyRequest) GetSyncId() string { if x != nil { return x.SyncId @@ -482,15 +836,52 @@ func (x *SetManyRequest) GetValues() map[string][]byte { return nil } +func (x *SetManyRequest) GetPrefix() string { + if x != nil { + return x.Prefix + } + return "" +} + +func (x *SetManyRequest) SetSyncId(v string) { + x.SyncId = v +} + +func (x *SetManyRequest) SetValues(v map[string][]byte) { + x.Values = v +} + +func (x *SetManyRequest) SetPrefix(v string) { + x.Prefix = v +} + +type SetManyRequest_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + SyncId string + Values map[string][]byte + Prefix string +} + +func (b0 SetManyRequest_builder) Build() *SetManyRequest { + m0 := &SetManyRequest{} + b, x := &b0, m0 + _, _ = b, x + x.SyncId = b.SyncId + x.Values = b.Values + x.Prefix = b.Prefix + return m0 +} + type SetManyResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } func (x *SetManyResponse) Reset() { *x = SetManyResponse{} - mi := &file_c1_connectorapi_baton_v1_session_proto_msgTypes[9] + mi := &file_c1_connectorapi_baton_v1_session_proto_msgTypes[11] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -502,7 +893,7 @@ func (x *SetManyResponse) String() string { func (*SetManyResponse) ProtoMessage() {} func (x *SetManyResponse) ProtoReflect() protoreflect.Message { - mi := &file_c1_connectorapi_baton_v1_session_proto_msgTypes[9] + mi := &file_c1_connectorapi_baton_v1_session_proto_msgTypes[11] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -513,22 +904,30 @@ func (x *SetManyResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use SetManyResponse.ProtoReflect.Descriptor instead. -func (*SetManyResponse) Descriptor() ([]byte, []int) { - return file_c1_connectorapi_baton_v1_session_proto_rawDescGZIP(), []int{9} +type SetManyResponse_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + +} + +func (b0 SetManyResponse_builder) Build() *SetManyResponse { + m0 := &SetManyResponse{} + b, x := &b0, m0 + _, _ = b, x + return m0 } type DeleteRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` SyncId string `protobuf:"bytes,1,opt,name=sync_id,json=syncId,proto3" json:"sync_id,omitempty"` Key string `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` + Prefix string `protobuf:"bytes,3,opt,name=prefix,proto3" json:"prefix,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } func (x *DeleteRequest) Reset() { *x = DeleteRequest{} - mi := &file_c1_connectorapi_baton_v1_session_proto_msgTypes[10] + mi := &file_c1_connectorapi_baton_v1_session_proto_msgTypes[12] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -540,7 +939,7 @@ func (x *DeleteRequest) String() string { func (*DeleteRequest) ProtoMessage() {} func (x *DeleteRequest) ProtoReflect() protoreflect.Message { - mi := &file_c1_connectorapi_baton_v1_session_proto_msgTypes[10] + mi := &file_c1_connectorapi_baton_v1_session_proto_msgTypes[12] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -551,11 +950,6 @@ func (x *DeleteRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use DeleteRequest.ProtoReflect.Descriptor instead. -func (*DeleteRequest) Descriptor() ([]byte, []int) { - return file_c1_connectorapi_baton_v1_session_proto_rawDescGZIP(), []int{10} -} - func (x *DeleteRequest) GetSyncId() string { if x != nil { return x.SyncId @@ -570,15 +964,52 @@ func (x *DeleteRequest) GetKey() string { return "" } +func (x *DeleteRequest) GetPrefix() string { + if x != nil { + return x.Prefix + } + return "" +} + +func (x *DeleteRequest) SetSyncId(v string) { + x.SyncId = v +} + +func (x *DeleteRequest) SetKey(v string) { + x.Key = v +} + +func (x *DeleteRequest) SetPrefix(v string) { + x.Prefix = v +} + +type DeleteRequest_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + SyncId string + Key string + Prefix string +} + +func (b0 DeleteRequest_builder) Build() *DeleteRequest { + m0 := &DeleteRequest{} + b, x := &b0, m0 + _, _ = b, x + x.SyncId = b.SyncId + x.Key = b.Key + x.Prefix = b.Prefix + return m0 +} + type DeleteResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } func (x *DeleteResponse) Reset() { *x = DeleteResponse{} - mi := &file_c1_connectorapi_baton_v1_session_proto_msgTypes[11] + mi := &file_c1_connectorapi_baton_v1_session_proto_msgTypes[13] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -590,7 +1021,7 @@ func (x *DeleteResponse) String() string { func (*DeleteResponse) ProtoMessage() {} func (x *DeleteResponse) ProtoReflect() protoreflect.Message { - mi := &file_c1_connectorapi_baton_v1_session_proto_msgTypes[11] + mi := &file_c1_connectorapi_baton_v1_session_proto_msgTypes[13] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -601,22 +1032,30 @@ func (x *DeleteResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use DeleteResponse.ProtoReflect.Descriptor instead. -func (*DeleteResponse) Descriptor() ([]byte, []int) { - return file_c1_connectorapi_baton_v1_session_proto_rawDescGZIP(), []int{11} +type DeleteResponse_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + +} + +func (b0 DeleteResponse_builder) Build() *DeleteResponse { + m0 := &DeleteResponse{} + b, x := &b0, m0 + _, _ = b, x + return m0 } type DeleteManyRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` SyncId string `protobuf:"bytes,1,opt,name=sync_id,json=syncId,proto3" json:"sync_id,omitempty"` Keys []string `protobuf:"bytes,2,rep,name=keys,proto3" json:"keys,omitempty"` + Prefix string `protobuf:"bytes,3,opt,name=prefix,proto3" json:"prefix,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } func (x *DeleteManyRequest) Reset() { *x = DeleteManyRequest{} - mi := &file_c1_connectorapi_baton_v1_session_proto_msgTypes[12] + mi := &file_c1_connectorapi_baton_v1_session_proto_msgTypes[14] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -628,7 +1067,7 @@ func (x *DeleteManyRequest) String() string { func (*DeleteManyRequest) ProtoMessage() {} func (x *DeleteManyRequest) ProtoReflect() protoreflect.Message { - mi := &file_c1_connectorapi_baton_v1_session_proto_msgTypes[12] + mi := &file_c1_connectorapi_baton_v1_session_proto_msgTypes[14] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -639,11 +1078,6 @@ func (x *DeleteManyRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use DeleteManyRequest.ProtoReflect.Descriptor instead. -func (*DeleteManyRequest) Descriptor() ([]byte, []int) { - return file_c1_connectorapi_baton_v1_session_proto_rawDescGZIP(), []int{12} -} - func (x *DeleteManyRequest) GetSyncId() string { if x != nil { return x.SyncId @@ -658,15 +1092,52 @@ func (x *DeleteManyRequest) GetKeys() []string { return nil } +func (x *DeleteManyRequest) GetPrefix() string { + if x != nil { + return x.Prefix + } + return "" +} + +func (x *DeleteManyRequest) SetSyncId(v string) { + x.SyncId = v +} + +func (x *DeleteManyRequest) SetKeys(v []string) { + x.Keys = v +} + +func (x *DeleteManyRequest) SetPrefix(v string) { + x.Prefix = v +} + +type DeleteManyRequest_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + SyncId string + Keys []string + Prefix string +} + +func (b0 DeleteManyRequest_builder) Build() *DeleteManyRequest { + m0 := &DeleteManyRequest{} + b, x := &b0, m0 + _, _ = b, x + x.SyncId = b.SyncId + x.Keys = b.Keys + x.Prefix = b.Prefix + return m0 +} + type DeleteManyResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } func (x *DeleteManyResponse) Reset() { *x = DeleteManyResponse{} - mi := &file_c1_connectorapi_baton_v1_session_proto_msgTypes[13] + mi := &file_c1_connectorapi_baton_v1_session_proto_msgTypes[15] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -678,7 +1149,7 @@ func (x *DeleteManyResponse) String() string { func (*DeleteManyResponse) ProtoMessage() {} func (x *DeleteManyResponse) ProtoReflect() protoreflect.Message { - mi := &file_c1_connectorapi_baton_v1_session_proto_msgTypes[13] + mi := &file_c1_connectorapi_baton_v1_session_proto_msgTypes[15] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -689,21 +1160,29 @@ func (x *DeleteManyResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use DeleteManyResponse.ProtoReflect.Descriptor instead. -func (*DeleteManyResponse) Descriptor() ([]byte, []int) { - return file_c1_connectorapi_baton_v1_session_proto_rawDescGZIP(), []int{13} +type DeleteManyResponse_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + +} + +func (b0 DeleteManyResponse_builder) Build() *DeleteManyResponse { + m0 := &DeleteManyResponse{} + b, x := &b0, m0 + _, _ = b, x + return m0 } type ClearRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` SyncId string `protobuf:"bytes,1,opt,name=sync_id,json=syncId,proto3" json:"sync_id,omitempty"` + Prefix string `protobuf:"bytes,2,opt,name=prefix,proto3" json:"prefix,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } func (x *ClearRequest) Reset() { *x = ClearRequest{} - mi := &file_c1_connectorapi_baton_v1_session_proto_msgTypes[14] + mi := &file_c1_connectorapi_baton_v1_session_proto_msgTypes[16] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -715,7 +1194,7 @@ func (x *ClearRequest) String() string { func (*ClearRequest) ProtoMessage() {} func (x *ClearRequest) ProtoReflect() protoreflect.Message { - mi := &file_c1_connectorapi_baton_v1_session_proto_msgTypes[14] + mi := &file_c1_connectorapi_baton_v1_session_proto_msgTypes[16] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -726,11 +1205,6 @@ func (x *ClearRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ClearRequest.ProtoReflect.Descriptor instead. -func (*ClearRequest) Descriptor() ([]byte, []int) { - return file_c1_connectorapi_baton_v1_session_proto_rawDescGZIP(), []int{14} -} - func (x *ClearRequest) GetSyncId() string { if x != nil { return x.SyncId @@ -738,15 +1212,46 @@ func (x *ClearRequest) GetSyncId() string { return "" } +func (x *ClearRequest) GetPrefix() string { + if x != nil { + return x.Prefix + } + return "" +} + +func (x *ClearRequest) SetSyncId(v string) { + x.SyncId = v +} + +func (x *ClearRequest) SetPrefix(v string) { + x.Prefix = v +} + +type ClearRequest_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + SyncId string + Prefix string +} + +func (b0 ClearRequest_builder) Build() *ClearRequest { + m0 := &ClearRequest{} + b, x := &b0, m0 + _, _ = b, x + x.SyncId = b.SyncId + x.Prefix = b.Prefix + return m0 +} + type ClearResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } func (x *ClearResponse) Reset() { *x = ClearResponse{} - mi := &file_c1_connectorapi_baton_v1_session_proto_msgTypes[15] + mi := &file_c1_connectorapi_baton_v1_session_proto_msgTypes[17] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -758,7 +1263,7 @@ func (x *ClearResponse) String() string { func (*ClearResponse) ProtoMessage() {} func (x *ClearResponse) ProtoReflect() protoreflect.Message { - mi := &file_c1_connectorapi_baton_v1_session_proto_msgTypes[15] + mi := &file_c1_connectorapi_baton_v1_session_proto_msgTypes[17] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -769,207 +1274,144 @@ func (x *ClearResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ClearResponse.ProtoReflect.Descriptor instead. -func (*ClearResponse) Descriptor() ([]byte, []int) { - return file_c1_connectorapi_baton_v1_session_proto_rawDescGZIP(), []int{15} -} - -var File_c1_connectorapi_baton_v1_session_proto protoreflect.FileDescriptor +type ClearResponse_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. -var file_c1_connectorapi_baton_v1_session_proto_rawDesc = string([]byte{ - 0x0a, 0x26, 0x63, 0x31, 0x2f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x61, 0x70, - 0x69, 0x2f, 0x62, 0x61, 0x74, 0x6f, 0x6e, 0x2f, 0x76, 0x31, 0x2f, 0x73, 0x65, 0x73, 0x73, 0x69, - 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x18, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, - 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x61, 0x70, 0x69, 0x2e, 0x62, 0x61, 0x74, 0x6f, 0x6e, 0x2e, - 0x76, 0x31, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, - 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x5d, 0x0a, 0x0a, 0x47, - 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x31, 0x0a, 0x07, 0x73, 0x79, 0x6e, - 0x63, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x18, 0xfa, 0x42, 0x15, 0x72, - 0x13, 0x32, 0x11, 0x5e, 0x5b, 0x61, 0x2d, 0x7a, 0x41, 0x2d, 0x5a, 0x30, 0x2d, 0x39, 0x5d, 0x7b, - 0x32, 0x37, 0x7d, 0x24, 0x52, 0x06, 0x73, 0x79, 0x6e, 0x63, 0x49, 0x64, 0x12, 0x1c, 0x0a, 0x03, - 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0a, 0xfa, 0x42, 0x07, 0x72, 0x05, - 0x10, 0x01, 0x18, 0x80, 0x02, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x22, 0x23, 0x0a, 0x0b, 0x47, 0x65, - 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, - 0x6f, 0x0a, 0x0e, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x6e, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x31, 0x0a, 0x07, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x42, 0x18, 0xfa, 0x42, 0x15, 0x72, 0x13, 0x32, 0x11, 0x5e, 0x5b, 0x61, 0x2d, 0x7a, - 0x41, 0x2d, 0x5a, 0x30, 0x2d, 0x39, 0x5d, 0x7b, 0x32, 0x37, 0x7d, 0x24, 0x52, 0x06, 0x73, 0x79, - 0x6e, 0x63, 0x49, 0x64, 0x12, 0x2a, 0x0a, 0x04, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x02, 0x20, 0x03, - 0x28, 0x09, 0x42, 0x16, 0xfa, 0x42, 0x13, 0x92, 0x01, 0x10, 0x08, 0x01, 0x10, 0xc8, 0x01, 0x18, - 0x01, 0x22, 0x07, 0x72, 0x05, 0x10, 0x01, 0x18, 0x80, 0x02, 0x52, 0x04, 0x6b, 0x65, 0x79, 0x73, - 0x22, 0x39, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x6e, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x61, 0x0a, 0x0d, 0x47, - 0x65, 0x74, 0x41, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x31, 0x0a, 0x07, - 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x18, 0xfa, - 0x42, 0x15, 0x72, 0x13, 0x32, 0x11, 0x5e, 0x5b, 0x61, 0x2d, 0x7a, 0x41, 0x2d, 0x5a, 0x30, 0x2d, - 0x39, 0x5d, 0x7b, 0x32, 0x37, 0x7d, 0x24, 0x52, 0x06, 0x73, 0x79, 0x6e, 0x63, 0x49, 0x64, 0x12, - 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x60, - 0x0a, 0x0e, 0x47, 0x65, 0x74, 0x41, 0x6c, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, - 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, - 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, - 0x22, 0x7e, 0x0a, 0x0a, 0x53, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x31, - 0x0a, 0x07, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, - 0x18, 0xfa, 0x42, 0x15, 0x72, 0x13, 0x32, 0x11, 0x5e, 0x5b, 0x61, 0x2d, 0x7a, 0x41, 0x2d, 0x5a, - 0x30, 0x2d, 0x39, 0x5d, 0x7b, 0x32, 0x37, 0x7d, 0x24, 0x52, 0x06, 0x73, 0x79, 0x6e, 0x63, 0x49, - 0x64, 0x12, 0x1c, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0a, - 0xfa, 0x42, 0x07, 0x72, 0x05, 0x10, 0x01, 0x18, 0x80, 0x02, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, - 0x1f, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x09, - 0xfa, 0x42, 0x06, 0x7a, 0x04, 0x18, 0x80, 0x80, 0x40, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x22, 0x0d, 0x0a, 0x0b, 0x53, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0xe5, 0x01, 0x0a, 0x0e, 0x53, 0x65, 0x74, 0x4d, 0x61, 0x6e, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x31, 0x0a, 0x07, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x42, 0x18, 0xfa, 0x42, 0x15, 0x72, 0x13, 0x32, 0x11, 0x5e, 0x5b, 0x61, 0x2d, - 0x7a, 0x41, 0x2d, 0x5a, 0x30, 0x2d, 0x39, 0x5d, 0x7b, 0x32, 0x37, 0x7d, 0x24, 0x52, 0x06, 0x73, - 0x79, 0x6e, 0x63, 0x49, 0x64, 0x12, 0x65, 0x0a, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, - 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, - 0x63, 0x74, 0x6f, 0x72, 0x61, 0x70, 0x69, 0x2e, 0x62, 0x61, 0x74, 0x6f, 0x6e, 0x2e, 0x76, 0x31, - 0x2e, 0x53, 0x65, 0x74, 0x4d, 0x61, 0x6e, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, - 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x17, 0xfa, 0x42, 0x14, - 0x9a, 0x01, 0x11, 0x22, 0x07, 0x72, 0x05, 0x10, 0x01, 0x18, 0x80, 0x02, 0x2a, 0x06, 0x7a, 0x04, - 0x18, 0x80, 0x80, 0x40, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x1a, 0x39, 0x0a, 0x0b, - 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, - 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x11, 0x0a, 0x0f, 0x53, 0x65, 0x74, 0x4d, 0x61, - 0x6e, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x60, 0x0a, 0x0d, 0x44, 0x65, - 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x31, 0x0a, 0x07, 0x73, - 0x79, 0x6e, 0x63, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x18, 0xfa, 0x42, - 0x15, 0x72, 0x13, 0x32, 0x11, 0x5e, 0x5b, 0x61, 0x2d, 0x7a, 0x41, 0x2d, 0x5a, 0x30, 0x2d, 0x39, - 0x5d, 0x7b, 0x32, 0x37, 0x7d, 0x24, 0x52, 0x06, 0x73, 0x79, 0x6e, 0x63, 0x49, 0x64, 0x12, 0x1c, - 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0a, 0xfa, 0x42, 0x07, - 0x72, 0x05, 0x10, 0x01, 0x18, 0x80, 0x02, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x22, 0x10, 0x0a, 0x0e, - 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x70, - 0x0a, 0x11, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4d, 0x61, 0x6e, 0x79, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x31, 0x0a, 0x07, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x69, 0x64, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x42, 0x18, 0xfa, 0x42, 0x15, 0x72, 0x13, 0x32, 0x11, 0x5e, 0x5b, 0x61, - 0x2d, 0x7a, 0x41, 0x2d, 0x5a, 0x30, 0x2d, 0x39, 0x5d, 0x7b, 0x32, 0x37, 0x7d, 0x24, 0x52, 0x06, - 0x73, 0x79, 0x6e, 0x63, 0x49, 0x64, 0x12, 0x28, 0x0a, 0x04, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x02, - 0x20, 0x03, 0x28, 0x09, 0x42, 0x14, 0xfa, 0x42, 0x11, 0x92, 0x01, 0x0e, 0x08, 0x01, 0x10, 0xc8, - 0x01, 0x22, 0x07, 0x72, 0x05, 0x10, 0x01, 0x18, 0x80, 0x02, 0x52, 0x04, 0x6b, 0x65, 0x79, 0x73, - 0x22, 0x14, 0x0a, 0x12, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4d, 0x61, 0x6e, 0x79, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x41, 0x0a, 0x0c, 0x43, 0x6c, 0x65, 0x61, 0x72, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x31, 0x0a, 0x07, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x69, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x18, 0xfa, 0x42, 0x15, 0x72, 0x13, 0x32, 0x11, - 0x5e, 0x5b, 0x61, 0x2d, 0x7a, 0x41, 0x2d, 0x5a, 0x30, 0x2d, 0x39, 0x5d, 0x7b, 0x32, 0x37, 0x7d, - 0x24, 0x52, 0x06, 0x73, 0x79, 0x6e, 0x63, 0x49, 0x64, 0x22, 0x0f, 0x0a, 0x0d, 0x43, 0x6c, 0x65, - 0x61, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0x8e, 0x06, 0x0a, 0x13, 0x42, - 0x61, 0x74, 0x6f, 0x6e, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x12, 0x54, 0x0a, 0x03, 0x47, 0x65, 0x74, 0x12, 0x24, 0x2e, 0x63, 0x31, 0x2e, 0x63, - 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x61, 0x70, 0x69, 0x2e, 0x62, 0x61, 0x74, 0x6f, - 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x25, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x61, 0x70, - 0x69, 0x2e, 0x62, 0x61, 0x74, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x62, 0x0a, 0x07, 0x47, 0x65, 0x74, 0x4d, - 0x61, 0x6e, 0x79, 0x12, 0x28, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, - 0x6f, 0x72, 0x61, 0x70, 0x69, 0x2e, 0x62, 0x61, 0x74, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x47, - 0x65, 0x74, 0x4d, 0x61, 0x6e, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, - 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x61, 0x70, 0x69, 0x2e, - 0x62, 0x61, 0x74, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x6e, 0x79, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x5f, 0x0a, 0x06, - 0x47, 0x65, 0x74, 0x41, 0x6c, 0x6c, 0x12, 0x27, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, - 0x65, 0x63, 0x74, 0x6f, 0x72, 0x61, 0x70, 0x69, 0x2e, 0x62, 0x61, 0x74, 0x6f, 0x6e, 0x2e, 0x76, - 0x31, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x28, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x61, 0x70, - 0x69, 0x2e, 0x62, 0x61, 0x74, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x6c, - 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x54, 0x0a, - 0x03, 0x53, 0x65, 0x74, 0x12, 0x24, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, - 0x74, 0x6f, 0x72, 0x61, 0x70, 0x69, 0x2e, 0x62, 0x61, 0x74, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, - 0x53, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x63, 0x31, 0x2e, - 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x61, 0x70, 0x69, 0x2e, 0x62, 0x61, 0x74, - 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0x00, 0x12, 0x60, 0x0a, 0x07, 0x53, 0x65, 0x74, 0x4d, 0x61, 0x6e, 0x79, 0x12, 0x28, - 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x61, 0x70, 0x69, - 0x2e, 0x62, 0x61, 0x74, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x74, 0x4d, 0x61, 0x6e, - 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, - 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x61, 0x70, 0x69, 0x2e, 0x62, 0x61, 0x74, 0x6f, 0x6e, - 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x74, 0x4d, 0x61, 0x6e, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5d, 0x0a, 0x06, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, - 0x27, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x61, 0x70, - 0x69, 0x2e, 0x62, 0x61, 0x74, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, - 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, - 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x61, 0x70, 0x69, 0x2e, 0x62, 0x61, 0x74, 0x6f, 0x6e, - 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x22, 0x00, 0x12, 0x69, 0x0a, 0x0a, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4d, 0x61, - 0x6e, 0x79, 0x12, 0x2b, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, - 0x72, 0x61, 0x70, 0x69, 0x2e, 0x62, 0x61, 0x74, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, - 0x6c, 0x65, 0x74, 0x65, 0x4d, 0x61, 0x6e, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x2c, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x61, 0x70, - 0x69, 0x2e, 0x62, 0x61, 0x74, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, - 0x65, 0x4d, 0x61, 0x6e, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, - 0x5a, 0x0a, 0x05, 0x43, 0x6c, 0x65, 0x61, 0x72, 0x12, 0x26, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, - 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x61, 0x70, 0x69, 0x2e, 0x62, 0x61, 0x74, 0x6f, 0x6e, - 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6c, 0x65, 0x61, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x27, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x61, - 0x70, 0x69, 0x2e, 0x62, 0x61, 0x74, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6c, 0x65, 0x61, - 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x37, 0x5a, 0x35, 0x67, - 0x69, 0x74, 0x6c, 0x61, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x64, 0x75, 0x63, 0x74, 0x6f, 0x6e, - 0x65, 0x2f, 0x63, 0x31, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x70, 0x62, 0x2f, 0x63, 0x31, 0x2f, 0x63, - 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x61, 0x70, 0x69, 0x2f, 0x62, 0x61, 0x74, 0x6f, - 0x6e, 0x2f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -}) - -var ( - file_c1_connectorapi_baton_v1_session_proto_rawDescOnce sync.Once - file_c1_connectorapi_baton_v1_session_proto_rawDescData []byte -) +} -func file_c1_connectorapi_baton_v1_session_proto_rawDescGZIP() []byte { - file_c1_connectorapi_baton_v1_session_proto_rawDescOnce.Do(func() { - file_c1_connectorapi_baton_v1_session_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_c1_connectorapi_baton_v1_session_proto_rawDesc), len(file_c1_connectorapi_baton_v1_session_proto_rawDesc))) - }) - return file_c1_connectorapi_baton_v1_session_proto_rawDescData +func (b0 ClearResponse_builder) Build() *ClearResponse { + m0 := &ClearResponse{} + b, x := &b0, m0 + _, _ = b, x + return m0 } -var file_c1_connectorapi_baton_v1_session_proto_msgTypes = make([]protoimpl.MessageInfo, 17) +var File_c1_connectorapi_baton_v1_session_proto protoreflect.FileDescriptor + +const file_c1_connectorapi_baton_v1_session_proto_rawDesc = "" + + "\n" + + "&c1/connectorapi/baton/v1/session.proto\x12\x18c1.connectorapi.baton.v1\x1a\x17validate/validate.proto\"\x7f\n" + + "\n" + + "GetRequest\x121\n" + + "\async_id\x18\x01 \x01(\tB\x18\xfaB\x15r\x132\x11^[a-zA-Z0-9]{27}$R\x06syncId\x12\x1c\n" + + "\x03key\x18\x02 \x01(\tB\n" + + "\xfaB\ar\x05\x10\x01\x18\x80\x02R\x03key\x12 \n" + + "\x06prefix\x18\x03 \x01(\tB\b\xfaB\x05r\x03\x18\x80\x02R\x06prefix\"9\n" + + "\vGetResponse\x12\x14\n" + + "\x05value\x18\x01 \x01(\fR\x05value\x12\x14\n" + + "\x05found\x18\x02 \x01(\bR\x05found\"\x90\x01\n" + + "\x0eGetManyRequest\x121\n" + + "\async_id\x18\x01 \x01(\tB\x18\xfaB\x15r\x132\x11^[a-zA-Z0-9]{27}$R\x06syncId\x12)\n" + + "\x04keys\x18\x02 \x03(\tB\x15\xfaB\x12\x92\x01\x0f\b\x01\x10d\x18\x01\"\ar\x05\x10\x01\x18\x80\x02R\x04keys\x12 \n" + + "\x06prefix\x18\x03 \x01(\tB\b\xfaB\x05r\x03\x18\x80\x02R\x06prefix\"y\n" + + "\x0fGetManyResponse\x12;\n" + + "\x05items\x18\x01 \x03(\v2%.c1.connectorapi.baton.v1.GetManyItemR\x05items\x12)\n" + + "\x10unprocessed_keys\x18\x02 \x03(\tR\x0funprocessedKeys\"5\n" + + "\vGetManyItem\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\fR\x05value\"\x83\x01\n" + + "\rGetAllRequest\x121\n" + + "\async_id\x18\x01 \x01(\tB\x18\xfaB\x15r\x132\x11^[a-zA-Z0-9]{27}$R\x06syncId\x12\x1d\n" + + "\n" + + "page_token\x18\x02 \x01(\tR\tpageToken\x12 \n" + + "\x06prefix\x18\x03 \x01(\tB\b\xfaB\x05r\x03\x18\x80\x02R\x06prefix\"k\n" + + "\x0eGetAllResponse\x12:\n" + + "\x05items\x18\x01 \x03(\v2$.c1.connectorapi.baton.v1.GetAllItemR\x05items\x12\x1d\n" + + "\n" + + "page_token\x18\x02 \x01(\tR\tpageToken\"4\n" + + "\n" + + "GetAllItem\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\fR\x05value\"\xa3\x01\n" + + "\n" + + "SetRequest\x121\n" + + "\async_id\x18\x01 \x01(\tB\x18\xfaB\x15r\x132\x11^[a-zA-Z0-9]{27}$R\x06syncId\x12\x1c\n" + + "\x03key\x18\x02 \x01(\tB\n" + + "\xfaB\ar\x05\x10\x01\x18\x80\x02R\x03key\x12\"\n" + + "\x05value\x18\x03 \x01(\fB\f\xfaB\tz\a\x10\x00\x18\x80\xe8\xfd\x01R\x05value\x12 \n" + + "\x06prefix\x18\x04 \x01(\tB\b\xfaB\x05r\x03\x18\x80\x02R\x06prefix\"\r\n" + + "\vSetResponse\"\x8e\x02\n" + + "\x0eSetManyRequest\x121\n" + + "\async_id\x18\x01 \x01(\tB\x18\xfaB\x15r\x132\x11^[a-zA-Z0-9]{27}$R\x06syncId\x12l\n" + + "\x06values\x18\x02 \x03(\v24.c1.connectorapi.baton.v1.SetManyRequest.ValuesEntryB\x1e\xfaB\x1b\x9a\x01\x18\b\x01\x10d\"\ar\x05\x10\x01\x18\x80\x02*\tz\a\x10\x00\x18\x80\xe8\xfd\x01R\x06values\x12 \n" + + "\x06prefix\x18\x03 \x01(\tB\b\xfaB\x05r\x03\x18\x80\x02R\x06prefix\x1a9\n" + + "\vValuesEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\fR\x05value:\x028\x01\"\x11\n" + + "\x0fSetManyResponse\"\x82\x01\n" + + "\rDeleteRequest\x121\n" + + "\async_id\x18\x01 \x01(\tB\x18\xfaB\x15r\x132\x11^[a-zA-Z0-9]{27}$R\x06syncId\x12\x1c\n" + + "\x03key\x18\x02 \x01(\tB\n" + + "\xfaB\ar\x05\x10\x01\x18\x80\x02R\x03key\x12 \n" + + "\x06prefix\x18\x03 \x01(\tB\b\xfaB\x05r\x03\x18\x80\x02R\x06prefix\"\x10\n" + + "\x0eDeleteResponse\"\x92\x01\n" + + "\x11DeleteManyRequest\x121\n" + + "\async_id\x18\x01 \x01(\tB\x18\xfaB\x15r\x132\x11^[a-zA-Z0-9]{27}$R\x06syncId\x12(\n" + + "\x04keys\x18\x02 \x03(\tB\x14\xfaB\x11\x92\x01\x0e\b\x01\x10\xc8\x01\"\ar\x05\x10\x01\x18\x80\x02R\x04keys\x12 \n" + + "\x06prefix\x18\x03 \x01(\tB\b\xfaB\x05r\x03\x18\x80\x02R\x06prefix\"\x14\n" + + "\x12DeleteManyResponse\"c\n" + + "\fClearRequest\x121\n" + + "\async_id\x18\x01 \x01(\tB\x18\xfaB\x15r\x132\x11^[a-zA-Z0-9]{27}$R\x06syncId\x12 \n" + + "\x06prefix\x18\x02 \x01(\tB\b\xfaB\x05r\x03\x18\x80\x02R\x06prefix\"\x0f\n" + + "\rClearResponse2\x8a\x06\n" + + "\x13BatonSessionService\x12T\n" + + "\x03Get\x12$.c1.connectorapi.baton.v1.GetRequest\x1a%.c1.connectorapi.baton.v1.GetResponse\"\x00\x12`\n" + + "\aGetMany\x12(.c1.connectorapi.baton.v1.GetManyRequest\x1a).c1.connectorapi.baton.v1.GetManyResponse\"\x00\x12]\n" + + "\x06GetAll\x12'.c1.connectorapi.baton.v1.GetAllRequest\x1a(.c1.connectorapi.baton.v1.GetAllResponse\"\x00\x12T\n" + + "\x03Set\x12$.c1.connectorapi.baton.v1.SetRequest\x1a%.c1.connectorapi.baton.v1.SetResponse\"\x00\x12`\n" + + "\aSetMany\x12(.c1.connectorapi.baton.v1.SetManyRequest\x1a).c1.connectorapi.baton.v1.SetManyResponse\"\x00\x12]\n" + + "\x06Delete\x12'.c1.connectorapi.baton.v1.DeleteRequest\x1a(.c1.connectorapi.baton.v1.DeleteResponse\"\x00\x12i\n" + + "\n" + + "DeleteMany\x12+.c1.connectorapi.baton.v1.DeleteManyRequest\x1a,.c1.connectorapi.baton.v1.DeleteManyResponse\"\x00\x12Z\n" + + "\x05Clear\x12&.c1.connectorapi.baton.v1.ClearRequest\x1a'.c1.connectorapi.baton.v1.ClearResponse\"\x00B7Z5gitlab.com/ductone/c1/pkg/pb/c1/connectorapi/baton/v1b\x06proto3" + +var file_c1_connectorapi_baton_v1_session_proto_msgTypes = make([]protoimpl.MessageInfo, 19) var file_c1_connectorapi_baton_v1_session_proto_goTypes = []any{ (*GetRequest)(nil), // 0: c1.connectorapi.baton.v1.GetRequest (*GetResponse)(nil), // 1: c1.connectorapi.baton.v1.GetResponse (*GetManyRequest)(nil), // 2: c1.connectorapi.baton.v1.GetManyRequest (*GetManyResponse)(nil), // 3: c1.connectorapi.baton.v1.GetManyResponse - (*GetAllRequest)(nil), // 4: c1.connectorapi.baton.v1.GetAllRequest - (*GetAllResponse)(nil), // 5: c1.connectorapi.baton.v1.GetAllResponse - (*SetRequest)(nil), // 6: c1.connectorapi.baton.v1.SetRequest - (*SetResponse)(nil), // 7: c1.connectorapi.baton.v1.SetResponse - (*SetManyRequest)(nil), // 8: c1.connectorapi.baton.v1.SetManyRequest - (*SetManyResponse)(nil), // 9: c1.connectorapi.baton.v1.SetManyResponse - (*DeleteRequest)(nil), // 10: c1.connectorapi.baton.v1.DeleteRequest - (*DeleteResponse)(nil), // 11: c1.connectorapi.baton.v1.DeleteResponse - (*DeleteManyRequest)(nil), // 12: c1.connectorapi.baton.v1.DeleteManyRequest - (*DeleteManyResponse)(nil), // 13: c1.connectorapi.baton.v1.DeleteManyResponse - (*ClearRequest)(nil), // 14: c1.connectorapi.baton.v1.ClearRequest - (*ClearResponse)(nil), // 15: c1.connectorapi.baton.v1.ClearResponse - nil, // 16: c1.connectorapi.baton.v1.SetManyRequest.ValuesEntry + (*GetManyItem)(nil), // 4: c1.connectorapi.baton.v1.GetManyItem + (*GetAllRequest)(nil), // 5: c1.connectorapi.baton.v1.GetAllRequest + (*GetAllResponse)(nil), // 6: c1.connectorapi.baton.v1.GetAllResponse + (*GetAllItem)(nil), // 7: c1.connectorapi.baton.v1.GetAllItem + (*SetRequest)(nil), // 8: c1.connectorapi.baton.v1.SetRequest + (*SetResponse)(nil), // 9: c1.connectorapi.baton.v1.SetResponse + (*SetManyRequest)(nil), // 10: c1.connectorapi.baton.v1.SetManyRequest + (*SetManyResponse)(nil), // 11: c1.connectorapi.baton.v1.SetManyResponse + (*DeleteRequest)(nil), // 12: c1.connectorapi.baton.v1.DeleteRequest + (*DeleteResponse)(nil), // 13: c1.connectorapi.baton.v1.DeleteResponse + (*DeleteManyRequest)(nil), // 14: c1.connectorapi.baton.v1.DeleteManyRequest + (*DeleteManyResponse)(nil), // 15: c1.connectorapi.baton.v1.DeleteManyResponse + (*ClearRequest)(nil), // 16: c1.connectorapi.baton.v1.ClearRequest + (*ClearResponse)(nil), // 17: c1.connectorapi.baton.v1.ClearResponse + nil, // 18: c1.connectorapi.baton.v1.SetManyRequest.ValuesEntry } var file_c1_connectorapi_baton_v1_session_proto_depIdxs = []int32{ - 16, // 0: c1.connectorapi.baton.v1.SetManyRequest.values:type_name -> c1.connectorapi.baton.v1.SetManyRequest.ValuesEntry - 0, // 1: c1.connectorapi.baton.v1.BatonSessionService.Get:input_type -> c1.connectorapi.baton.v1.GetRequest - 2, // 2: c1.connectorapi.baton.v1.BatonSessionService.GetMany:input_type -> c1.connectorapi.baton.v1.GetManyRequest - 4, // 3: c1.connectorapi.baton.v1.BatonSessionService.GetAll:input_type -> c1.connectorapi.baton.v1.GetAllRequest - 6, // 4: c1.connectorapi.baton.v1.BatonSessionService.Set:input_type -> c1.connectorapi.baton.v1.SetRequest - 8, // 5: c1.connectorapi.baton.v1.BatonSessionService.SetMany:input_type -> c1.connectorapi.baton.v1.SetManyRequest - 10, // 6: c1.connectorapi.baton.v1.BatonSessionService.Delete:input_type -> c1.connectorapi.baton.v1.DeleteRequest - 12, // 7: c1.connectorapi.baton.v1.BatonSessionService.DeleteMany:input_type -> c1.connectorapi.baton.v1.DeleteManyRequest - 14, // 8: c1.connectorapi.baton.v1.BatonSessionService.Clear:input_type -> c1.connectorapi.baton.v1.ClearRequest - 1, // 9: c1.connectorapi.baton.v1.BatonSessionService.Get:output_type -> c1.connectorapi.baton.v1.GetResponse - 3, // 10: c1.connectorapi.baton.v1.BatonSessionService.GetMany:output_type -> c1.connectorapi.baton.v1.GetManyResponse - 5, // 11: c1.connectorapi.baton.v1.BatonSessionService.GetAll:output_type -> c1.connectorapi.baton.v1.GetAllResponse - 7, // 12: c1.connectorapi.baton.v1.BatonSessionService.Set:output_type -> c1.connectorapi.baton.v1.SetResponse - 9, // 13: c1.connectorapi.baton.v1.BatonSessionService.SetMany:output_type -> c1.connectorapi.baton.v1.SetManyResponse - 11, // 14: c1.connectorapi.baton.v1.BatonSessionService.Delete:output_type -> c1.connectorapi.baton.v1.DeleteResponse - 13, // 15: c1.connectorapi.baton.v1.BatonSessionService.DeleteMany:output_type -> c1.connectorapi.baton.v1.DeleteManyResponse - 15, // 16: c1.connectorapi.baton.v1.BatonSessionService.Clear:output_type -> c1.connectorapi.baton.v1.ClearResponse - 9, // [9:17] is the sub-list for method output_type - 1, // [1:9] is the sub-list for method input_type - 1, // [1:1] is the sub-list for extension type_name - 1, // [1:1] is the sub-list for extension extendee - 0, // [0:1] is the sub-list for field type_name + 4, // 0: c1.connectorapi.baton.v1.GetManyResponse.items:type_name -> c1.connectorapi.baton.v1.GetManyItem + 7, // 1: c1.connectorapi.baton.v1.GetAllResponse.items:type_name -> c1.connectorapi.baton.v1.GetAllItem + 18, // 2: c1.connectorapi.baton.v1.SetManyRequest.values:type_name -> c1.connectorapi.baton.v1.SetManyRequest.ValuesEntry + 0, // 3: c1.connectorapi.baton.v1.BatonSessionService.Get:input_type -> c1.connectorapi.baton.v1.GetRequest + 2, // 4: c1.connectorapi.baton.v1.BatonSessionService.GetMany:input_type -> c1.connectorapi.baton.v1.GetManyRequest + 5, // 5: c1.connectorapi.baton.v1.BatonSessionService.GetAll:input_type -> c1.connectorapi.baton.v1.GetAllRequest + 8, // 6: c1.connectorapi.baton.v1.BatonSessionService.Set:input_type -> c1.connectorapi.baton.v1.SetRequest + 10, // 7: c1.connectorapi.baton.v1.BatonSessionService.SetMany:input_type -> c1.connectorapi.baton.v1.SetManyRequest + 12, // 8: c1.connectorapi.baton.v1.BatonSessionService.Delete:input_type -> c1.connectorapi.baton.v1.DeleteRequest + 14, // 9: c1.connectorapi.baton.v1.BatonSessionService.DeleteMany:input_type -> c1.connectorapi.baton.v1.DeleteManyRequest + 16, // 10: c1.connectorapi.baton.v1.BatonSessionService.Clear:input_type -> c1.connectorapi.baton.v1.ClearRequest + 1, // 11: c1.connectorapi.baton.v1.BatonSessionService.Get:output_type -> c1.connectorapi.baton.v1.GetResponse + 3, // 12: c1.connectorapi.baton.v1.BatonSessionService.GetMany:output_type -> c1.connectorapi.baton.v1.GetManyResponse + 6, // 13: c1.connectorapi.baton.v1.BatonSessionService.GetAll:output_type -> c1.connectorapi.baton.v1.GetAllResponse + 9, // 14: c1.connectorapi.baton.v1.BatonSessionService.Set:output_type -> c1.connectorapi.baton.v1.SetResponse + 11, // 15: c1.connectorapi.baton.v1.BatonSessionService.SetMany:output_type -> c1.connectorapi.baton.v1.SetManyResponse + 13, // 16: c1.connectorapi.baton.v1.BatonSessionService.Delete:output_type -> c1.connectorapi.baton.v1.DeleteResponse + 15, // 17: c1.connectorapi.baton.v1.BatonSessionService.DeleteMany:output_type -> c1.connectorapi.baton.v1.DeleteManyResponse + 17, // 18: c1.connectorapi.baton.v1.BatonSessionService.Clear:output_type -> c1.connectorapi.baton.v1.ClearResponse + 11, // [11:19] is the sub-list for method output_type + 3, // [3:11] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name } func init() { file_c1_connectorapi_baton_v1_session_proto_init() } @@ -983,7 +1425,7 @@ func file_c1_connectorapi_baton_v1_session_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: unsafe.Slice(unsafe.StringData(file_c1_connectorapi_baton_v1_session_proto_rawDesc), len(file_c1_connectorapi_baton_v1_session_proto_rawDesc)), NumEnums: 0, - NumMessages: 17, + NumMessages: 19, NumExtensions: 0, NumServices: 1, }, diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/connectorapi/baton/v1/session.pb.validate.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/connectorapi/baton/v1/session.pb.validate.go index fcf133ae..7f0c234b 100644 --- a/vendor/github.com/conductorone/baton-sdk/pb/c1/connectorapi/baton/v1/session.pb.validate.go +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/connectorapi/baton/v1/session.pb.validate.go @@ -79,6 +79,17 @@ func (m *GetRequest) validate(all bool) error { errors = append(errors, err) } + if utf8.RuneCountInString(m.GetPrefix()) > 256 { + err := GetRequestValidationError{ + field: "Prefix", + reason: "value length must be at most 256 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + if len(errors) > 0 { return GetRequestMultiError(errors) } @@ -182,6 +193,8 @@ func (m *GetResponse) validate(all bool) error { // no validation rules for Value + // no validation rules for Found + if len(errors) > 0 { return GetResponseMultiError(errors) } @@ -292,10 +305,10 @@ func (m *GetManyRequest) validate(all bool) error { errors = append(errors, err) } - if l := len(m.GetKeys()); l < 1 || l > 200 { + if l := len(m.GetKeys()); l < 1 || l > 100 { err := GetManyRequestValidationError{ field: "Keys", - reason: "value must contain between 1 and 200 items, inclusive", + reason: "value must contain between 1 and 100 items, inclusive", } if !all { return err @@ -334,6 +347,17 @@ func (m *GetManyRequest) validate(all bool) error { } + if utf8.RuneCountInString(m.GetPrefix()) > 256 { + err := GetManyRequestValidationError{ + field: "Prefix", + reason: "value length must be at most 256 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + if len(errors) > 0 { return GetManyRequestMultiError(errors) } @@ -436,9 +460,39 @@ func (m *GetManyResponse) validate(all bool) error { var errors []error - // no validation rules for Key + for idx, item := range m.GetItems() { + _, _ = idx, item - // no validation rules for Value + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, GetManyResponseValidationError{ + field: fmt.Sprintf("Items[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, GetManyResponseValidationError{ + field: fmt.Sprintf("Items[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return GetManyResponseValidationError{ + field: fmt.Sprintf("Items[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } if len(errors) > 0 { return GetManyResponseMultiError(errors) @@ -518,6 +572,109 @@ var _ interface { ErrorName() string } = GetManyResponseValidationError{} +// Validate checks the field values on GetManyItem with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *GetManyItem) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on GetManyItem with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in GetManyItemMultiError, or +// nil if none found. +func (m *GetManyItem) ValidateAll() error { + return m.validate(true) +} + +func (m *GetManyItem) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Key + + // no validation rules for Value + + if len(errors) > 0 { + return GetManyItemMultiError(errors) + } + + return nil +} + +// GetManyItemMultiError is an error wrapping multiple validation errors +// returned by GetManyItem.ValidateAll() if the designated constraints aren't met. +type GetManyItemMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m GetManyItemMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m GetManyItemMultiError) AllErrors() []error { return m } + +// GetManyItemValidationError is the validation error returned by +// GetManyItem.Validate if the designated constraints aren't met. +type GetManyItemValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e GetManyItemValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e GetManyItemValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e GetManyItemValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e GetManyItemValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e GetManyItemValidationError) ErrorName() string { return "GetManyItemValidationError" } + +// Error satisfies the builtin error interface +func (e GetManyItemValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sGetManyItem.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = GetManyItemValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = GetManyItemValidationError{} + // Validate checks the field values on GetAllRequest with the rules defined in // the proto definition for this message. If any rules are violated, the first // error encountered is returned, or nil if there are no violations. @@ -553,6 +710,17 @@ func (m *GetAllRequest) validate(all bool) error { // no validation rules for PageToken + if utf8.RuneCountInString(m.GetPrefix()) > 256 { + err := GetAllRequestValidationError{ + field: "Prefix", + reason: "value length must be at most 256 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + if len(errors) > 0 { return GetAllRequestMultiError(errors) } @@ -655,11 +823,41 @@ func (m *GetAllResponse) validate(all bool) error { var errors []error - // no validation rules for Key + for idx, item := range m.GetItems() { + _, _ = idx, item - // no validation rules for Value + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, GetAllResponseValidationError{ + field: fmt.Sprintf("Items[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, GetAllResponseValidationError{ + field: fmt.Sprintf("Items[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return GetAllResponseValidationError{ + field: fmt.Sprintf("Items[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } - // no validation rules for NextPageToken + // no validation rules for PageToken if len(errors) > 0 { return GetAllResponseMultiError(errors) @@ -739,6 +937,109 @@ var _ interface { ErrorName() string } = GetAllResponseValidationError{} +// Validate checks the field values on GetAllItem with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *GetAllItem) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on GetAllItem with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in GetAllItemMultiError, or +// nil if none found. +func (m *GetAllItem) ValidateAll() error { + return m.validate(true) +} + +func (m *GetAllItem) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Key + + // no validation rules for Value + + if len(errors) > 0 { + return GetAllItemMultiError(errors) + } + + return nil +} + +// GetAllItemMultiError is an error wrapping multiple validation errors +// returned by GetAllItem.ValidateAll() if the designated constraints aren't met. +type GetAllItemMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m GetAllItemMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m GetAllItemMultiError) AllErrors() []error { return m } + +// GetAllItemValidationError is the validation error returned by +// GetAllItem.Validate if the designated constraints aren't met. +type GetAllItemValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e GetAllItemValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e GetAllItemValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e GetAllItemValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e GetAllItemValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e GetAllItemValidationError) ErrorName() string { return "GetAllItemValidationError" } + +// Error satisfies the builtin error interface +func (e GetAllItemValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sGetAllItem.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = GetAllItemValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = GetAllItemValidationError{} + // Validate checks the field values on SetRequest with the rules defined in the // proto definition for this message. If any rules are violated, the first // error encountered is returned, or nil if there are no violations. @@ -783,10 +1084,21 @@ func (m *SetRequest) validate(all bool) error { errors = append(errors, err) } - if len(m.GetValue()) > 1048576 { + if l := len(m.GetValue()); l < 0 || l > 4158464 { err := SetRequestValidationError{ field: "Value", - reason: "value length must be at most 1048576 bytes", + reason: "value length must be between 0 and 4158464 bytes, inclusive", + } + if !all { + return err + } + errors = append(errors, err) + } + + if utf8.RuneCountInString(m.GetPrefix()) > 256 { + err := SetRequestValidationError{ + field: "Prefix", + reason: "value length must be at most 256 runes", } if !all { return err @@ -1005,6 +1317,17 @@ func (m *SetManyRequest) validate(all bool) error { errors = append(errors, err) } + if l := len(m.GetValues()); l < 1 || l > 100 { + err := SetManyRequestValidationError{ + field: "Values", + reason: "value must contain between 1 and 100 pairs, inclusive", + } + if !all { + return err + } + errors = append(errors, err) + } + { sorted_keys := make([]string, len(m.GetValues())) i := 0 @@ -1028,10 +1351,10 @@ func (m *SetManyRequest) validate(all bool) error { errors = append(errors, err) } - if len(val) > 1048576 { + if l := len(val); l < 0 || l > 4158464 { err := SetManyRequestValidationError{ field: fmt.Sprintf("Values[%v]", key), - reason: "value length must be at most 1048576 bytes", + reason: "value length must be between 0 and 4158464 bytes, inclusive", } if !all { return err @@ -1042,6 +1365,17 @@ func (m *SetManyRequest) validate(all bool) error { } } + if utf8.RuneCountInString(m.GetPrefix()) > 256 { + err := SetManyRequestValidationError{ + field: "Prefix", + reason: "value length must be at most 256 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + if len(errors) > 0 { return SetManyRequestMultiError(errors) } @@ -1266,6 +1600,17 @@ func (m *DeleteRequest) validate(all bool) error { errors = append(errors, err) } + if utf8.RuneCountInString(m.GetPrefix()) > 256 { + err := DeleteRequestValidationError{ + field: "Prefix", + reason: "value length must be at most 256 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + if len(errors) > 0 { return DeleteRequestMultiError(errors) } @@ -1506,6 +1851,17 @@ func (m *DeleteManyRequest) validate(all bool) error { } + if utf8.RuneCountInString(m.GetPrefix()) > 256 { + err := DeleteManyRequestValidationError{ + field: "Prefix", + reason: "value length must be at most 256 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + if len(errors) > 0 { return DeleteManyRequestMultiError(errors) } @@ -1723,6 +2079,17 @@ func (m *ClearRequest) validate(all bool) error { errors = append(errors, err) } + if utf8.RuneCountInString(m.GetPrefix()) > 256 { + err := ClearRequestValidationError{ + field: "Prefix", + reason: "value length must be at most 256 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + if len(errors) > 0 { return ClearRequestMultiError(errors) } diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/connectorapi/baton/v1/session_grpc.pb.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/connectorapi/baton/v1/session_grpc.pb.go index d5201be2..1e93c3d1 100644 --- a/vendor/github.com/conductorone/baton-sdk/pb/c1/connectorapi/baton/v1/session_grpc.pb.go +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/connectorapi/baton/v1/session_grpc.pb.go @@ -34,8 +34,8 @@ const ( // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. type BatonSessionServiceClient interface { Get(ctx context.Context, in *GetRequest, opts ...grpc.CallOption) (*GetResponse, error) - GetMany(ctx context.Context, in *GetManyRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[GetManyResponse], error) - GetAll(ctx context.Context, in *GetAllRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[GetAllResponse], error) + GetMany(ctx context.Context, in *GetManyRequest, opts ...grpc.CallOption) (*GetManyResponse, error) + GetAll(ctx context.Context, in *GetAllRequest, opts ...grpc.CallOption) (*GetAllResponse, error) Set(ctx context.Context, in *SetRequest, opts ...grpc.CallOption) (*SetResponse, error) SetMany(ctx context.Context, in *SetManyRequest, opts ...grpc.CallOption) (*SetManyResponse, error) Delete(ctx context.Context, in *DeleteRequest, opts ...grpc.CallOption) (*DeleteResponse, error) @@ -61,44 +61,26 @@ func (c *batonSessionServiceClient) Get(ctx context.Context, in *GetRequest, opt return out, nil } -func (c *batonSessionServiceClient) GetMany(ctx context.Context, in *GetManyRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[GetManyResponse], error) { +func (c *batonSessionServiceClient) GetMany(ctx context.Context, in *GetManyRequest, opts ...grpc.CallOption) (*GetManyResponse, error) { cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - stream, err := c.cc.NewStream(ctx, &BatonSessionService_ServiceDesc.Streams[0], BatonSessionService_GetMany_FullMethodName, cOpts...) + out := new(GetManyResponse) + err := c.cc.Invoke(ctx, BatonSessionService_GetMany_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } - x := &grpc.GenericClientStream[GetManyRequest, GetManyResponse]{ClientStream: stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil + return out, nil } -// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. -type BatonSessionService_GetManyClient = grpc.ServerStreamingClient[GetManyResponse] - -func (c *batonSessionServiceClient) GetAll(ctx context.Context, in *GetAllRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[GetAllResponse], error) { +func (c *batonSessionServiceClient) GetAll(ctx context.Context, in *GetAllRequest, opts ...grpc.CallOption) (*GetAllResponse, error) { cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - stream, err := c.cc.NewStream(ctx, &BatonSessionService_ServiceDesc.Streams[1], BatonSessionService_GetAll_FullMethodName, cOpts...) + out := new(GetAllResponse) + err := c.cc.Invoke(ctx, BatonSessionService_GetAll_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } - x := &grpc.GenericClientStream[GetAllRequest, GetAllResponse]{ClientStream: stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil + return out, nil } -// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. -type BatonSessionService_GetAllClient = grpc.ServerStreamingClient[GetAllResponse] - func (c *batonSessionServiceClient) Set(ctx context.Context, in *SetRequest, opts ...grpc.CallOption) (*SetResponse, error) { cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(SetResponse) @@ -154,8 +136,8 @@ func (c *batonSessionServiceClient) Clear(ctx context.Context, in *ClearRequest, // for forward compatibility. type BatonSessionServiceServer interface { Get(context.Context, *GetRequest) (*GetResponse, error) - GetMany(*GetManyRequest, grpc.ServerStreamingServer[GetManyResponse]) error - GetAll(*GetAllRequest, grpc.ServerStreamingServer[GetAllResponse]) error + GetMany(context.Context, *GetManyRequest) (*GetManyResponse, error) + GetAll(context.Context, *GetAllRequest) (*GetAllResponse, error) Set(context.Context, *SetRequest) (*SetResponse, error) SetMany(context.Context, *SetManyRequest) (*SetManyResponse, error) Delete(context.Context, *DeleteRequest) (*DeleteResponse, error) @@ -173,11 +155,11 @@ type UnimplementedBatonSessionServiceServer struct{} func (UnimplementedBatonSessionServiceServer) Get(context.Context, *GetRequest) (*GetResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method Get not implemented") } -func (UnimplementedBatonSessionServiceServer) GetMany(*GetManyRequest, grpc.ServerStreamingServer[GetManyResponse]) error { - return status.Errorf(codes.Unimplemented, "method GetMany not implemented") +func (UnimplementedBatonSessionServiceServer) GetMany(context.Context, *GetManyRequest) (*GetManyResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetMany not implemented") } -func (UnimplementedBatonSessionServiceServer) GetAll(*GetAllRequest, grpc.ServerStreamingServer[GetAllResponse]) error { - return status.Errorf(codes.Unimplemented, "method GetAll not implemented") +func (UnimplementedBatonSessionServiceServer) GetAll(context.Context, *GetAllRequest) (*GetAllResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetAll not implemented") } func (UnimplementedBatonSessionServiceServer) Set(context.Context, *SetRequest) (*SetResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method Set not implemented") @@ -232,28 +214,42 @@ func _BatonSessionService_Get_Handler(srv interface{}, ctx context.Context, dec return interceptor(ctx, in, info, handler) } -func _BatonSessionService_GetMany_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(GetManyRequest) - if err := stream.RecvMsg(m); err != nil { - return err +func _BatonSessionService_GetMany_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetManyRequest) + if err := dec(in); err != nil { + return nil, err } - return srv.(BatonSessionServiceServer).GetMany(m, &grpc.GenericServerStream[GetManyRequest, GetManyResponse]{ServerStream: stream}) + if interceptor == nil { + return srv.(BatonSessionServiceServer).GetMany(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: BatonSessionService_GetMany_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BatonSessionServiceServer).GetMany(ctx, req.(*GetManyRequest)) + } + return interceptor(ctx, in, info, handler) } -// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. -type BatonSessionService_GetManyServer = grpc.ServerStreamingServer[GetManyResponse] - -func _BatonSessionService_GetAll_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(GetAllRequest) - if err := stream.RecvMsg(m); err != nil { - return err +func _BatonSessionService_GetAll_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetAllRequest) + if err := dec(in); err != nil { + return nil, err } - return srv.(BatonSessionServiceServer).GetAll(m, &grpc.GenericServerStream[GetAllRequest, GetAllResponse]{ServerStream: stream}) + if interceptor == nil { + return srv.(BatonSessionServiceServer).GetAll(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: BatonSessionService_GetAll_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BatonSessionServiceServer).GetAll(ctx, req.(*GetAllRequest)) + } + return interceptor(ctx, in, info, handler) } -// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. -type BatonSessionService_GetAllServer = grpc.ServerStreamingServer[GetAllResponse] - func _BatonSessionService_Set_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(SetRequest) if err := dec(in); err != nil { @@ -355,6 +351,14 @@ var BatonSessionService_ServiceDesc = grpc.ServiceDesc{ MethodName: "Get", Handler: _BatonSessionService_Get_Handler, }, + { + MethodName: "GetMany", + Handler: _BatonSessionService_GetMany_Handler, + }, + { + MethodName: "GetAll", + Handler: _BatonSessionService_GetAll_Handler, + }, { MethodName: "Set", Handler: _BatonSessionService_Set_Handler, @@ -376,17 +380,6 @@ var BatonSessionService_ServiceDesc = grpc.ServiceDesc{ Handler: _BatonSessionService_Clear_Handler, }, }, - Streams: []grpc.StreamDesc{ - { - StreamName: "GetMany", - Handler: _BatonSessionService_GetMany_Handler, - ServerStreams: true, - }, - { - StreamName: "GetAll", - Handler: _BatonSessionService_GetAll_Handler, - ServerStreams: true, - }, - }, + Streams: []grpc.StreamDesc{}, Metadata: "c1/connectorapi/baton/v1/session.proto", } diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/connectorapi/baton/v1/session_protoopaque.pb.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/connectorapi/baton/v1/session_protoopaque.pb.go new file mode 100644 index 00000000..870613bb --- /dev/null +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/connectorapi/baton/v1/session_protoopaque.pb.go @@ -0,0 +1,1443 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.10 +// protoc (unknown) +// source: c1/connectorapi/baton/v1/session.proto + +//go:build protoopaque + +package v1 + +import ( + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type GetRequest struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_SyncId string `protobuf:"bytes,1,opt,name=sync_id,json=syncId,proto3"` + xxx_hidden_Key string `protobuf:"bytes,2,opt,name=key,proto3"` + xxx_hidden_Prefix string `protobuf:"bytes,3,opt,name=prefix,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetRequest) Reset() { + *x = GetRequest{} + mi := &file_c1_connectorapi_baton_v1_session_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetRequest) ProtoMessage() {} + +func (x *GetRequest) ProtoReflect() protoreflect.Message { + mi := &file_c1_connectorapi_baton_v1_session_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *GetRequest) GetSyncId() string { + if x != nil { + return x.xxx_hidden_SyncId + } + return "" +} + +func (x *GetRequest) GetKey() string { + if x != nil { + return x.xxx_hidden_Key + } + return "" +} + +func (x *GetRequest) GetPrefix() string { + if x != nil { + return x.xxx_hidden_Prefix + } + return "" +} + +func (x *GetRequest) SetSyncId(v string) { + x.xxx_hidden_SyncId = v +} + +func (x *GetRequest) SetKey(v string) { + x.xxx_hidden_Key = v +} + +func (x *GetRequest) SetPrefix(v string) { + x.xxx_hidden_Prefix = v +} + +type GetRequest_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + SyncId string + Key string + Prefix string +} + +func (b0 GetRequest_builder) Build() *GetRequest { + m0 := &GetRequest{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_SyncId = b.SyncId + x.xxx_hidden_Key = b.Key + x.xxx_hidden_Prefix = b.Prefix + return m0 +} + +type GetResponse struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Value []byte `protobuf:"bytes,1,opt,name=value,proto3"` + xxx_hidden_Found bool `protobuf:"varint,2,opt,name=found,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetResponse) Reset() { + *x = GetResponse{} + mi := &file_c1_connectorapi_baton_v1_session_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetResponse) ProtoMessage() {} + +func (x *GetResponse) ProtoReflect() protoreflect.Message { + mi := &file_c1_connectorapi_baton_v1_session_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *GetResponse) GetValue() []byte { + if x != nil { + return x.xxx_hidden_Value + } + return nil +} + +func (x *GetResponse) GetFound() bool { + if x != nil { + return x.xxx_hidden_Found + } + return false +} + +func (x *GetResponse) SetValue(v []byte) { + if v == nil { + v = []byte{} + } + x.xxx_hidden_Value = v +} + +func (x *GetResponse) SetFound(v bool) { + x.xxx_hidden_Found = v +} + +type GetResponse_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Value []byte + Found bool +} + +func (b0 GetResponse_builder) Build() *GetResponse { + m0 := &GetResponse{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Value = b.Value + x.xxx_hidden_Found = b.Found + return m0 +} + +type GetManyRequest struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_SyncId string `protobuf:"bytes,1,opt,name=sync_id,json=syncId,proto3"` + xxx_hidden_Keys []string `protobuf:"bytes,2,rep,name=keys,proto3"` + xxx_hidden_Prefix string `protobuf:"bytes,3,opt,name=prefix,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetManyRequest) Reset() { + *x = GetManyRequest{} + mi := &file_c1_connectorapi_baton_v1_session_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetManyRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetManyRequest) ProtoMessage() {} + +func (x *GetManyRequest) ProtoReflect() protoreflect.Message { + mi := &file_c1_connectorapi_baton_v1_session_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *GetManyRequest) GetSyncId() string { + if x != nil { + return x.xxx_hidden_SyncId + } + return "" +} + +func (x *GetManyRequest) GetKeys() []string { + if x != nil { + return x.xxx_hidden_Keys + } + return nil +} + +func (x *GetManyRequest) GetPrefix() string { + if x != nil { + return x.xxx_hidden_Prefix + } + return "" +} + +func (x *GetManyRequest) SetSyncId(v string) { + x.xxx_hidden_SyncId = v +} + +func (x *GetManyRequest) SetKeys(v []string) { + x.xxx_hidden_Keys = v +} + +func (x *GetManyRequest) SetPrefix(v string) { + x.xxx_hidden_Prefix = v +} + +type GetManyRequest_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + SyncId string + Keys []string + Prefix string +} + +func (b0 GetManyRequest_builder) Build() *GetManyRequest { + m0 := &GetManyRequest{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_SyncId = b.SyncId + x.xxx_hidden_Keys = b.Keys + x.xxx_hidden_Prefix = b.Prefix + return m0 +} + +type GetManyResponse struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Items *[]*GetManyItem `protobuf:"bytes,1,rep,name=items,proto3"` + xxx_hidden_UnprocessedKeys []string `protobuf:"bytes,2,rep,name=unprocessed_keys,json=unprocessedKeys,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetManyResponse) Reset() { + *x = GetManyResponse{} + mi := &file_c1_connectorapi_baton_v1_session_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetManyResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetManyResponse) ProtoMessage() {} + +func (x *GetManyResponse) ProtoReflect() protoreflect.Message { + mi := &file_c1_connectorapi_baton_v1_session_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *GetManyResponse) GetItems() []*GetManyItem { + if x != nil { + if x.xxx_hidden_Items != nil { + return *x.xxx_hidden_Items + } + } + return nil +} + +func (x *GetManyResponse) GetUnprocessedKeys() []string { + if x != nil { + return x.xxx_hidden_UnprocessedKeys + } + return nil +} + +func (x *GetManyResponse) SetItems(v []*GetManyItem) { + x.xxx_hidden_Items = &v +} + +func (x *GetManyResponse) SetUnprocessedKeys(v []string) { + x.xxx_hidden_UnprocessedKeys = v +} + +type GetManyResponse_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Items []*GetManyItem + UnprocessedKeys []string +} + +func (b0 GetManyResponse_builder) Build() *GetManyResponse { + m0 := &GetManyResponse{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Items = &b.Items + x.xxx_hidden_UnprocessedKeys = b.UnprocessedKeys + return m0 +} + +type GetManyItem struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Key string `protobuf:"bytes,1,opt,name=key,proto3"` + xxx_hidden_Value []byte `protobuf:"bytes,2,opt,name=value,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetManyItem) Reset() { + *x = GetManyItem{} + mi := &file_c1_connectorapi_baton_v1_session_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetManyItem) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetManyItem) ProtoMessage() {} + +func (x *GetManyItem) ProtoReflect() protoreflect.Message { + mi := &file_c1_connectorapi_baton_v1_session_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *GetManyItem) GetKey() string { + if x != nil { + return x.xxx_hidden_Key + } + return "" +} + +func (x *GetManyItem) GetValue() []byte { + if x != nil { + return x.xxx_hidden_Value + } + return nil +} + +func (x *GetManyItem) SetKey(v string) { + x.xxx_hidden_Key = v +} + +func (x *GetManyItem) SetValue(v []byte) { + if v == nil { + v = []byte{} + } + x.xxx_hidden_Value = v +} + +type GetManyItem_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Key string + Value []byte +} + +func (b0 GetManyItem_builder) Build() *GetManyItem { + m0 := &GetManyItem{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Key = b.Key + x.xxx_hidden_Value = b.Value + return m0 +} + +type GetAllRequest struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_SyncId string `protobuf:"bytes,1,opt,name=sync_id,json=syncId,proto3"` + xxx_hidden_PageToken string `protobuf:"bytes,2,opt,name=page_token,json=pageToken,proto3"` + xxx_hidden_Prefix string `protobuf:"bytes,3,opt,name=prefix,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetAllRequest) Reset() { + *x = GetAllRequest{} + mi := &file_c1_connectorapi_baton_v1_session_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetAllRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetAllRequest) ProtoMessage() {} + +func (x *GetAllRequest) ProtoReflect() protoreflect.Message { + mi := &file_c1_connectorapi_baton_v1_session_proto_msgTypes[5] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *GetAllRequest) GetSyncId() string { + if x != nil { + return x.xxx_hidden_SyncId + } + return "" +} + +func (x *GetAllRequest) GetPageToken() string { + if x != nil { + return x.xxx_hidden_PageToken + } + return "" +} + +func (x *GetAllRequest) GetPrefix() string { + if x != nil { + return x.xxx_hidden_Prefix + } + return "" +} + +func (x *GetAllRequest) SetSyncId(v string) { + x.xxx_hidden_SyncId = v +} + +func (x *GetAllRequest) SetPageToken(v string) { + x.xxx_hidden_PageToken = v +} + +func (x *GetAllRequest) SetPrefix(v string) { + x.xxx_hidden_Prefix = v +} + +type GetAllRequest_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + SyncId string + PageToken string + Prefix string +} + +func (b0 GetAllRequest_builder) Build() *GetAllRequest { + m0 := &GetAllRequest{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_SyncId = b.SyncId + x.xxx_hidden_PageToken = b.PageToken + x.xxx_hidden_Prefix = b.Prefix + return m0 +} + +type GetAllResponse struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Items *[]*GetAllItem `protobuf:"bytes,1,rep,name=items,proto3"` + xxx_hidden_PageToken string `protobuf:"bytes,2,opt,name=page_token,json=pageToken,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetAllResponse) Reset() { + *x = GetAllResponse{} + mi := &file_c1_connectorapi_baton_v1_session_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetAllResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetAllResponse) ProtoMessage() {} + +func (x *GetAllResponse) ProtoReflect() protoreflect.Message { + mi := &file_c1_connectorapi_baton_v1_session_proto_msgTypes[6] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *GetAllResponse) GetItems() []*GetAllItem { + if x != nil { + if x.xxx_hidden_Items != nil { + return *x.xxx_hidden_Items + } + } + return nil +} + +func (x *GetAllResponse) GetPageToken() string { + if x != nil { + return x.xxx_hidden_PageToken + } + return "" +} + +func (x *GetAllResponse) SetItems(v []*GetAllItem) { + x.xxx_hidden_Items = &v +} + +func (x *GetAllResponse) SetPageToken(v string) { + x.xxx_hidden_PageToken = v +} + +type GetAllResponse_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Items []*GetAllItem + PageToken string +} + +func (b0 GetAllResponse_builder) Build() *GetAllResponse { + m0 := &GetAllResponse{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Items = &b.Items + x.xxx_hidden_PageToken = b.PageToken + return m0 +} + +type GetAllItem struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Key string `protobuf:"bytes,1,opt,name=key,proto3"` + xxx_hidden_Value []byte `protobuf:"bytes,2,opt,name=value,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetAllItem) Reset() { + *x = GetAllItem{} + mi := &file_c1_connectorapi_baton_v1_session_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetAllItem) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetAllItem) ProtoMessage() {} + +func (x *GetAllItem) ProtoReflect() protoreflect.Message { + mi := &file_c1_connectorapi_baton_v1_session_proto_msgTypes[7] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *GetAllItem) GetKey() string { + if x != nil { + return x.xxx_hidden_Key + } + return "" +} + +func (x *GetAllItem) GetValue() []byte { + if x != nil { + return x.xxx_hidden_Value + } + return nil +} + +func (x *GetAllItem) SetKey(v string) { + x.xxx_hidden_Key = v +} + +func (x *GetAllItem) SetValue(v []byte) { + if v == nil { + v = []byte{} + } + x.xxx_hidden_Value = v +} + +type GetAllItem_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Key string + Value []byte +} + +func (b0 GetAllItem_builder) Build() *GetAllItem { + m0 := &GetAllItem{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Key = b.Key + x.xxx_hidden_Value = b.Value + return m0 +} + +// 4MB for a single gRPC request is the default limit- divide by 100 (SetMany/GetMany max items) and add some padding for overhead. +type SetRequest struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_SyncId string `protobuf:"bytes,1,opt,name=sync_id,json=syncId,proto3"` + xxx_hidden_Key string `protobuf:"bytes,2,opt,name=key,proto3"` + xxx_hidden_Value []byte `protobuf:"bytes,3,opt,name=value,proto3"` + xxx_hidden_Prefix string `protobuf:"bytes,4,opt,name=prefix,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SetRequest) Reset() { + *x = SetRequest{} + mi := &file_c1_connectorapi_baton_v1_session_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SetRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SetRequest) ProtoMessage() {} + +func (x *SetRequest) ProtoReflect() protoreflect.Message { + mi := &file_c1_connectorapi_baton_v1_session_proto_msgTypes[8] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *SetRequest) GetSyncId() string { + if x != nil { + return x.xxx_hidden_SyncId + } + return "" +} + +func (x *SetRequest) GetKey() string { + if x != nil { + return x.xxx_hidden_Key + } + return "" +} + +func (x *SetRequest) GetValue() []byte { + if x != nil { + return x.xxx_hidden_Value + } + return nil +} + +func (x *SetRequest) GetPrefix() string { + if x != nil { + return x.xxx_hidden_Prefix + } + return "" +} + +func (x *SetRequest) SetSyncId(v string) { + x.xxx_hidden_SyncId = v +} + +func (x *SetRequest) SetKey(v string) { + x.xxx_hidden_Key = v +} + +func (x *SetRequest) SetValue(v []byte) { + if v == nil { + v = []byte{} + } + x.xxx_hidden_Value = v +} + +func (x *SetRequest) SetPrefix(v string) { + x.xxx_hidden_Prefix = v +} + +type SetRequest_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + SyncId string + Key string + Value []byte + Prefix string +} + +func (b0 SetRequest_builder) Build() *SetRequest { + m0 := &SetRequest{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_SyncId = b.SyncId + x.xxx_hidden_Key = b.Key + x.xxx_hidden_Value = b.Value + x.xxx_hidden_Prefix = b.Prefix + return m0 +} + +type SetResponse struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SetResponse) Reset() { + *x = SetResponse{} + mi := &file_c1_connectorapi_baton_v1_session_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SetResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SetResponse) ProtoMessage() {} + +func (x *SetResponse) ProtoReflect() protoreflect.Message { + mi := &file_c1_connectorapi_baton_v1_session_proto_msgTypes[9] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +type SetResponse_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + +} + +func (b0 SetResponse_builder) Build() *SetResponse { + m0 := &SetResponse{} + b, x := &b0, m0 + _, _ = b, x + return m0 +} + +type SetManyRequest struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_SyncId string `protobuf:"bytes,1,opt,name=sync_id,json=syncId,proto3"` + xxx_hidden_Values map[string][]byte `protobuf:"bytes,2,rep,name=values,proto3" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + xxx_hidden_Prefix string `protobuf:"bytes,3,opt,name=prefix,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SetManyRequest) Reset() { + *x = SetManyRequest{} + mi := &file_c1_connectorapi_baton_v1_session_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SetManyRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SetManyRequest) ProtoMessage() {} + +func (x *SetManyRequest) ProtoReflect() protoreflect.Message { + mi := &file_c1_connectorapi_baton_v1_session_proto_msgTypes[10] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *SetManyRequest) GetSyncId() string { + if x != nil { + return x.xxx_hidden_SyncId + } + return "" +} + +func (x *SetManyRequest) GetValues() map[string][]byte { + if x != nil { + return x.xxx_hidden_Values + } + return nil +} + +func (x *SetManyRequest) GetPrefix() string { + if x != nil { + return x.xxx_hidden_Prefix + } + return "" +} + +func (x *SetManyRequest) SetSyncId(v string) { + x.xxx_hidden_SyncId = v +} + +func (x *SetManyRequest) SetValues(v map[string][]byte) { + x.xxx_hidden_Values = v +} + +func (x *SetManyRequest) SetPrefix(v string) { + x.xxx_hidden_Prefix = v +} + +type SetManyRequest_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + SyncId string + Values map[string][]byte + Prefix string +} + +func (b0 SetManyRequest_builder) Build() *SetManyRequest { + m0 := &SetManyRequest{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_SyncId = b.SyncId + x.xxx_hidden_Values = b.Values + x.xxx_hidden_Prefix = b.Prefix + return m0 +} + +type SetManyResponse struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SetManyResponse) Reset() { + *x = SetManyResponse{} + mi := &file_c1_connectorapi_baton_v1_session_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SetManyResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SetManyResponse) ProtoMessage() {} + +func (x *SetManyResponse) ProtoReflect() protoreflect.Message { + mi := &file_c1_connectorapi_baton_v1_session_proto_msgTypes[11] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +type SetManyResponse_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + +} + +func (b0 SetManyResponse_builder) Build() *SetManyResponse { + m0 := &SetManyResponse{} + b, x := &b0, m0 + _, _ = b, x + return m0 +} + +type DeleteRequest struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_SyncId string `protobuf:"bytes,1,opt,name=sync_id,json=syncId,proto3"` + xxx_hidden_Key string `protobuf:"bytes,2,opt,name=key,proto3"` + xxx_hidden_Prefix string `protobuf:"bytes,3,opt,name=prefix,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *DeleteRequest) Reset() { + *x = DeleteRequest{} + mi := &file_c1_connectorapi_baton_v1_session_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *DeleteRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteRequest) ProtoMessage() {} + +func (x *DeleteRequest) ProtoReflect() protoreflect.Message { + mi := &file_c1_connectorapi_baton_v1_session_proto_msgTypes[12] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *DeleteRequest) GetSyncId() string { + if x != nil { + return x.xxx_hidden_SyncId + } + return "" +} + +func (x *DeleteRequest) GetKey() string { + if x != nil { + return x.xxx_hidden_Key + } + return "" +} + +func (x *DeleteRequest) GetPrefix() string { + if x != nil { + return x.xxx_hidden_Prefix + } + return "" +} + +func (x *DeleteRequest) SetSyncId(v string) { + x.xxx_hidden_SyncId = v +} + +func (x *DeleteRequest) SetKey(v string) { + x.xxx_hidden_Key = v +} + +func (x *DeleteRequest) SetPrefix(v string) { + x.xxx_hidden_Prefix = v +} + +type DeleteRequest_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + SyncId string + Key string + Prefix string +} + +func (b0 DeleteRequest_builder) Build() *DeleteRequest { + m0 := &DeleteRequest{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_SyncId = b.SyncId + x.xxx_hidden_Key = b.Key + x.xxx_hidden_Prefix = b.Prefix + return m0 +} + +type DeleteResponse struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *DeleteResponse) Reset() { + *x = DeleteResponse{} + mi := &file_c1_connectorapi_baton_v1_session_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *DeleteResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteResponse) ProtoMessage() {} + +func (x *DeleteResponse) ProtoReflect() protoreflect.Message { + mi := &file_c1_connectorapi_baton_v1_session_proto_msgTypes[13] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +type DeleteResponse_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + +} + +func (b0 DeleteResponse_builder) Build() *DeleteResponse { + m0 := &DeleteResponse{} + b, x := &b0, m0 + _, _ = b, x + return m0 +} + +type DeleteManyRequest struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_SyncId string `protobuf:"bytes,1,opt,name=sync_id,json=syncId,proto3"` + xxx_hidden_Keys []string `protobuf:"bytes,2,rep,name=keys,proto3"` + xxx_hidden_Prefix string `protobuf:"bytes,3,opt,name=prefix,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *DeleteManyRequest) Reset() { + *x = DeleteManyRequest{} + mi := &file_c1_connectorapi_baton_v1_session_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *DeleteManyRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteManyRequest) ProtoMessage() {} + +func (x *DeleteManyRequest) ProtoReflect() protoreflect.Message { + mi := &file_c1_connectorapi_baton_v1_session_proto_msgTypes[14] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *DeleteManyRequest) GetSyncId() string { + if x != nil { + return x.xxx_hidden_SyncId + } + return "" +} + +func (x *DeleteManyRequest) GetKeys() []string { + if x != nil { + return x.xxx_hidden_Keys + } + return nil +} + +func (x *DeleteManyRequest) GetPrefix() string { + if x != nil { + return x.xxx_hidden_Prefix + } + return "" +} + +func (x *DeleteManyRequest) SetSyncId(v string) { + x.xxx_hidden_SyncId = v +} + +func (x *DeleteManyRequest) SetKeys(v []string) { + x.xxx_hidden_Keys = v +} + +func (x *DeleteManyRequest) SetPrefix(v string) { + x.xxx_hidden_Prefix = v +} + +type DeleteManyRequest_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + SyncId string + Keys []string + Prefix string +} + +func (b0 DeleteManyRequest_builder) Build() *DeleteManyRequest { + m0 := &DeleteManyRequest{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_SyncId = b.SyncId + x.xxx_hidden_Keys = b.Keys + x.xxx_hidden_Prefix = b.Prefix + return m0 +} + +type DeleteManyResponse struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *DeleteManyResponse) Reset() { + *x = DeleteManyResponse{} + mi := &file_c1_connectorapi_baton_v1_session_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *DeleteManyResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteManyResponse) ProtoMessage() {} + +func (x *DeleteManyResponse) ProtoReflect() protoreflect.Message { + mi := &file_c1_connectorapi_baton_v1_session_proto_msgTypes[15] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +type DeleteManyResponse_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + +} + +func (b0 DeleteManyResponse_builder) Build() *DeleteManyResponse { + m0 := &DeleteManyResponse{} + b, x := &b0, m0 + _, _ = b, x + return m0 +} + +type ClearRequest struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_SyncId string `protobuf:"bytes,1,opt,name=sync_id,json=syncId,proto3"` + xxx_hidden_Prefix string `protobuf:"bytes,2,opt,name=prefix,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ClearRequest) Reset() { + *x = ClearRequest{} + mi := &file_c1_connectorapi_baton_v1_session_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ClearRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ClearRequest) ProtoMessage() {} + +func (x *ClearRequest) ProtoReflect() protoreflect.Message { + mi := &file_c1_connectorapi_baton_v1_session_proto_msgTypes[16] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *ClearRequest) GetSyncId() string { + if x != nil { + return x.xxx_hidden_SyncId + } + return "" +} + +func (x *ClearRequest) GetPrefix() string { + if x != nil { + return x.xxx_hidden_Prefix + } + return "" +} + +func (x *ClearRequest) SetSyncId(v string) { + x.xxx_hidden_SyncId = v +} + +func (x *ClearRequest) SetPrefix(v string) { + x.xxx_hidden_Prefix = v +} + +type ClearRequest_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + SyncId string + Prefix string +} + +func (b0 ClearRequest_builder) Build() *ClearRequest { + m0 := &ClearRequest{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_SyncId = b.SyncId + x.xxx_hidden_Prefix = b.Prefix + return m0 +} + +type ClearResponse struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ClearResponse) Reset() { + *x = ClearResponse{} + mi := &file_c1_connectorapi_baton_v1_session_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ClearResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ClearResponse) ProtoMessage() {} + +func (x *ClearResponse) ProtoReflect() protoreflect.Message { + mi := &file_c1_connectorapi_baton_v1_session_proto_msgTypes[17] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +type ClearResponse_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + +} + +func (b0 ClearResponse_builder) Build() *ClearResponse { + m0 := &ClearResponse{} + b, x := &b0, m0 + _, _ = b, x + return m0 +} + +var File_c1_connectorapi_baton_v1_session_proto protoreflect.FileDescriptor + +const file_c1_connectorapi_baton_v1_session_proto_rawDesc = "" + + "\n" + + "&c1/connectorapi/baton/v1/session.proto\x12\x18c1.connectorapi.baton.v1\x1a\x17validate/validate.proto\"\x7f\n" + + "\n" + + "GetRequest\x121\n" + + "\async_id\x18\x01 \x01(\tB\x18\xfaB\x15r\x132\x11^[a-zA-Z0-9]{27}$R\x06syncId\x12\x1c\n" + + "\x03key\x18\x02 \x01(\tB\n" + + "\xfaB\ar\x05\x10\x01\x18\x80\x02R\x03key\x12 \n" + + "\x06prefix\x18\x03 \x01(\tB\b\xfaB\x05r\x03\x18\x80\x02R\x06prefix\"9\n" + + "\vGetResponse\x12\x14\n" + + "\x05value\x18\x01 \x01(\fR\x05value\x12\x14\n" + + "\x05found\x18\x02 \x01(\bR\x05found\"\x90\x01\n" + + "\x0eGetManyRequest\x121\n" + + "\async_id\x18\x01 \x01(\tB\x18\xfaB\x15r\x132\x11^[a-zA-Z0-9]{27}$R\x06syncId\x12)\n" + + "\x04keys\x18\x02 \x03(\tB\x15\xfaB\x12\x92\x01\x0f\b\x01\x10d\x18\x01\"\ar\x05\x10\x01\x18\x80\x02R\x04keys\x12 \n" + + "\x06prefix\x18\x03 \x01(\tB\b\xfaB\x05r\x03\x18\x80\x02R\x06prefix\"y\n" + + "\x0fGetManyResponse\x12;\n" + + "\x05items\x18\x01 \x03(\v2%.c1.connectorapi.baton.v1.GetManyItemR\x05items\x12)\n" + + "\x10unprocessed_keys\x18\x02 \x03(\tR\x0funprocessedKeys\"5\n" + + "\vGetManyItem\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\fR\x05value\"\x83\x01\n" + + "\rGetAllRequest\x121\n" + + "\async_id\x18\x01 \x01(\tB\x18\xfaB\x15r\x132\x11^[a-zA-Z0-9]{27}$R\x06syncId\x12\x1d\n" + + "\n" + + "page_token\x18\x02 \x01(\tR\tpageToken\x12 \n" + + "\x06prefix\x18\x03 \x01(\tB\b\xfaB\x05r\x03\x18\x80\x02R\x06prefix\"k\n" + + "\x0eGetAllResponse\x12:\n" + + "\x05items\x18\x01 \x03(\v2$.c1.connectorapi.baton.v1.GetAllItemR\x05items\x12\x1d\n" + + "\n" + + "page_token\x18\x02 \x01(\tR\tpageToken\"4\n" + + "\n" + + "GetAllItem\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\fR\x05value\"\xa3\x01\n" + + "\n" + + "SetRequest\x121\n" + + "\async_id\x18\x01 \x01(\tB\x18\xfaB\x15r\x132\x11^[a-zA-Z0-9]{27}$R\x06syncId\x12\x1c\n" + + "\x03key\x18\x02 \x01(\tB\n" + + "\xfaB\ar\x05\x10\x01\x18\x80\x02R\x03key\x12\"\n" + + "\x05value\x18\x03 \x01(\fB\f\xfaB\tz\a\x10\x00\x18\x80\xe8\xfd\x01R\x05value\x12 \n" + + "\x06prefix\x18\x04 \x01(\tB\b\xfaB\x05r\x03\x18\x80\x02R\x06prefix\"\r\n" + + "\vSetResponse\"\x8e\x02\n" + + "\x0eSetManyRequest\x121\n" + + "\async_id\x18\x01 \x01(\tB\x18\xfaB\x15r\x132\x11^[a-zA-Z0-9]{27}$R\x06syncId\x12l\n" + + "\x06values\x18\x02 \x03(\v24.c1.connectorapi.baton.v1.SetManyRequest.ValuesEntryB\x1e\xfaB\x1b\x9a\x01\x18\b\x01\x10d\"\ar\x05\x10\x01\x18\x80\x02*\tz\a\x10\x00\x18\x80\xe8\xfd\x01R\x06values\x12 \n" + + "\x06prefix\x18\x03 \x01(\tB\b\xfaB\x05r\x03\x18\x80\x02R\x06prefix\x1a9\n" + + "\vValuesEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\fR\x05value:\x028\x01\"\x11\n" + + "\x0fSetManyResponse\"\x82\x01\n" + + "\rDeleteRequest\x121\n" + + "\async_id\x18\x01 \x01(\tB\x18\xfaB\x15r\x132\x11^[a-zA-Z0-9]{27}$R\x06syncId\x12\x1c\n" + + "\x03key\x18\x02 \x01(\tB\n" + + "\xfaB\ar\x05\x10\x01\x18\x80\x02R\x03key\x12 \n" + + "\x06prefix\x18\x03 \x01(\tB\b\xfaB\x05r\x03\x18\x80\x02R\x06prefix\"\x10\n" + + "\x0eDeleteResponse\"\x92\x01\n" + + "\x11DeleteManyRequest\x121\n" + + "\async_id\x18\x01 \x01(\tB\x18\xfaB\x15r\x132\x11^[a-zA-Z0-9]{27}$R\x06syncId\x12(\n" + + "\x04keys\x18\x02 \x03(\tB\x14\xfaB\x11\x92\x01\x0e\b\x01\x10\xc8\x01\"\ar\x05\x10\x01\x18\x80\x02R\x04keys\x12 \n" + + "\x06prefix\x18\x03 \x01(\tB\b\xfaB\x05r\x03\x18\x80\x02R\x06prefix\"\x14\n" + + "\x12DeleteManyResponse\"c\n" + + "\fClearRequest\x121\n" + + "\async_id\x18\x01 \x01(\tB\x18\xfaB\x15r\x132\x11^[a-zA-Z0-9]{27}$R\x06syncId\x12 \n" + + "\x06prefix\x18\x02 \x01(\tB\b\xfaB\x05r\x03\x18\x80\x02R\x06prefix\"\x0f\n" + + "\rClearResponse2\x8a\x06\n" + + "\x13BatonSessionService\x12T\n" + + "\x03Get\x12$.c1.connectorapi.baton.v1.GetRequest\x1a%.c1.connectorapi.baton.v1.GetResponse\"\x00\x12`\n" + + "\aGetMany\x12(.c1.connectorapi.baton.v1.GetManyRequest\x1a).c1.connectorapi.baton.v1.GetManyResponse\"\x00\x12]\n" + + "\x06GetAll\x12'.c1.connectorapi.baton.v1.GetAllRequest\x1a(.c1.connectorapi.baton.v1.GetAllResponse\"\x00\x12T\n" + + "\x03Set\x12$.c1.connectorapi.baton.v1.SetRequest\x1a%.c1.connectorapi.baton.v1.SetResponse\"\x00\x12`\n" + + "\aSetMany\x12(.c1.connectorapi.baton.v1.SetManyRequest\x1a).c1.connectorapi.baton.v1.SetManyResponse\"\x00\x12]\n" + + "\x06Delete\x12'.c1.connectorapi.baton.v1.DeleteRequest\x1a(.c1.connectorapi.baton.v1.DeleteResponse\"\x00\x12i\n" + + "\n" + + "DeleteMany\x12+.c1.connectorapi.baton.v1.DeleteManyRequest\x1a,.c1.connectorapi.baton.v1.DeleteManyResponse\"\x00\x12Z\n" + + "\x05Clear\x12&.c1.connectorapi.baton.v1.ClearRequest\x1a'.c1.connectorapi.baton.v1.ClearResponse\"\x00B7Z5gitlab.com/ductone/c1/pkg/pb/c1/connectorapi/baton/v1b\x06proto3" + +var file_c1_connectorapi_baton_v1_session_proto_msgTypes = make([]protoimpl.MessageInfo, 19) +var file_c1_connectorapi_baton_v1_session_proto_goTypes = []any{ + (*GetRequest)(nil), // 0: c1.connectorapi.baton.v1.GetRequest + (*GetResponse)(nil), // 1: c1.connectorapi.baton.v1.GetResponse + (*GetManyRequest)(nil), // 2: c1.connectorapi.baton.v1.GetManyRequest + (*GetManyResponse)(nil), // 3: c1.connectorapi.baton.v1.GetManyResponse + (*GetManyItem)(nil), // 4: c1.connectorapi.baton.v1.GetManyItem + (*GetAllRequest)(nil), // 5: c1.connectorapi.baton.v1.GetAllRequest + (*GetAllResponse)(nil), // 6: c1.connectorapi.baton.v1.GetAllResponse + (*GetAllItem)(nil), // 7: c1.connectorapi.baton.v1.GetAllItem + (*SetRequest)(nil), // 8: c1.connectorapi.baton.v1.SetRequest + (*SetResponse)(nil), // 9: c1.connectorapi.baton.v1.SetResponse + (*SetManyRequest)(nil), // 10: c1.connectorapi.baton.v1.SetManyRequest + (*SetManyResponse)(nil), // 11: c1.connectorapi.baton.v1.SetManyResponse + (*DeleteRequest)(nil), // 12: c1.connectorapi.baton.v1.DeleteRequest + (*DeleteResponse)(nil), // 13: c1.connectorapi.baton.v1.DeleteResponse + (*DeleteManyRequest)(nil), // 14: c1.connectorapi.baton.v1.DeleteManyRequest + (*DeleteManyResponse)(nil), // 15: c1.connectorapi.baton.v1.DeleteManyResponse + (*ClearRequest)(nil), // 16: c1.connectorapi.baton.v1.ClearRequest + (*ClearResponse)(nil), // 17: c1.connectorapi.baton.v1.ClearResponse + nil, // 18: c1.connectorapi.baton.v1.SetManyRequest.ValuesEntry +} +var file_c1_connectorapi_baton_v1_session_proto_depIdxs = []int32{ + 4, // 0: c1.connectorapi.baton.v1.GetManyResponse.items:type_name -> c1.connectorapi.baton.v1.GetManyItem + 7, // 1: c1.connectorapi.baton.v1.GetAllResponse.items:type_name -> c1.connectorapi.baton.v1.GetAllItem + 18, // 2: c1.connectorapi.baton.v1.SetManyRequest.values:type_name -> c1.connectorapi.baton.v1.SetManyRequest.ValuesEntry + 0, // 3: c1.connectorapi.baton.v1.BatonSessionService.Get:input_type -> c1.connectorapi.baton.v1.GetRequest + 2, // 4: c1.connectorapi.baton.v1.BatonSessionService.GetMany:input_type -> c1.connectorapi.baton.v1.GetManyRequest + 5, // 5: c1.connectorapi.baton.v1.BatonSessionService.GetAll:input_type -> c1.connectorapi.baton.v1.GetAllRequest + 8, // 6: c1.connectorapi.baton.v1.BatonSessionService.Set:input_type -> c1.connectorapi.baton.v1.SetRequest + 10, // 7: c1.connectorapi.baton.v1.BatonSessionService.SetMany:input_type -> c1.connectorapi.baton.v1.SetManyRequest + 12, // 8: c1.connectorapi.baton.v1.BatonSessionService.Delete:input_type -> c1.connectorapi.baton.v1.DeleteRequest + 14, // 9: c1.connectorapi.baton.v1.BatonSessionService.DeleteMany:input_type -> c1.connectorapi.baton.v1.DeleteManyRequest + 16, // 10: c1.connectorapi.baton.v1.BatonSessionService.Clear:input_type -> c1.connectorapi.baton.v1.ClearRequest + 1, // 11: c1.connectorapi.baton.v1.BatonSessionService.Get:output_type -> c1.connectorapi.baton.v1.GetResponse + 3, // 12: c1.connectorapi.baton.v1.BatonSessionService.GetMany:output_type -> c1.connectorapi.baton.v1.GetManyResponse + 6, // 13: c1.connectorapi.baton.v1.BatonSessionService.GetAll:output_type -> c1.connectorapi.baton.v1.GetAllResponse + 9, // 14: c1.connectorapi.baton.v1.BatonSessionService.Set:output_type -> c1.connectorapi.baton.v1.SetResponse + 11, // 15: c1.connectorapi.baton.v1.BatonSessionService.SetMany:output_type -> c1.connectorapi.baton.v1.SetManyResponse + 13, // 16: c1.connectorapi.baton.v1.BatonSessionService.Delete:output_type -> c1.connectorapi.baton.v1.DeleteResponse + 15, // 17: c1.connectorapi.baton.v1.BatonSessionService.DeleteMany:output_type -> c1.connectorapi.baton.v1.DeleteManyResponse + 17, // 18: c1.connectorapi.baton.v1.BatonSessionService.Clear:output_type -> c1.connectorapi.baton.v1.ClearResponse + 11, // [11:19] is the sub-list for method output_type + 3, // [3:11] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name +} + +func init() { file_c1_connectorapi_baton_v1_session_proto_init() } +func file_c1_connectorapi_baton_v1_session_proto_init() { + if File_c1_connectorapi_baton_v1_session_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_c1_connectorapi_baton_v1_session_proto_rawDesc), len(file_c1_connectorapi_baton_v1_session_proto_rawDesc)), + NumEnums: 0, + NumMessages: 19, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_c1_connectorapi_baton_v1_session_proto_goTypes, + DependencyIndexes: file_c1_connectorapi_baton_v1_session_proto_depIdxs, + MessageInfos: file_c1_connectorapi_baton_v1_session_proto_msgTypes, + }.Build() + File_c1_connectorapi_baton_v1_session_proto = out.File + file_c1_connectorapi_baton_v1_session_proto_goTypes = nil + file_c1_connectorapi_baton_v1_session_proto_depIdxs = nil +} diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/ratelimit/v1/ratelimit.pb.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/ratelimit/v1/ratelimit.pb.go index 664980eb..7e91231d 100644 --- a/vendor/github.com/conductorone/baton-sdk/pb/c1/ratelimit/v1/ratelimit.pb.go +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/ratelimit/v1/ratelimit.pb.go @@ -1,9 +1,11 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.4 +// protoc-gen-go v1.36.10 // protoc (unknown) // source: c1/ratelimit/v1/ratelimit.proto +//go:build !protoopaque + package v1 import ( @@ -13,7 +15,6 @@ import ( durationpb "google.golang.org/protobuf/types/known/durationpb" timestamppb "google.golang.org/protobuf/types/known/timestamppb" reflect "reflect" - sync "sync" unsafe "unsafe" ) @@ -74,15 +75,10 @@ func (x RateLimitDescription_Status) Number() protoreflect.EnumNumber { return protoreflect.EnumNumber(x) } -// Deprecated: Use RateLimitDescription_Status.Descriptor instead. -func (RateLimitDescription_Status) EnumDescriptor() ([]byte, []int) { - return file_c1_ratelimit_v1_ratelimit_proto_rawDescGZIP(), []int{0, 0} -} - // https://tools.ietf.org/html/draft-ietf-httpapi-ratelimit-headers-00#section-1.1 // RateLimitDescription is a minimal representation of a rate limit. type RateLimitDescription struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Status RateLimitDescription_Status `protobuf:"varint,1,opt,name=status,proto3,enum=c1.ratelimit.v1.RateLimitDescription_Status" json:"status,omitempty"` Limit int64 `protobuf:"varint,2,opt,name=limit,proto3" json:"limit,omitempty"` Remaining int64 `protobuf:"varint,3,opt,name=remaining,proto3" json:"remaining,omitempty"` @@ -117,11 +113,6 @@ func (x *RateLimitDescription) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use RateLimitDescription.ProtoReflect.Descriptor instead. -func (*RateLimitDescription) Descriptor() ([]byte, []int) { - return file_c1_ratelimit_v1_ratelimit_proto_rawDescGZIP(), []int{0} -} - func (x *RateLimitDescription) GetStatus() RateLimitDescription_Status { if x != nil { return x.Status @@ -157,8 +148,61 @@ func (x *RateLimitDescription) GetResetAt() *timestamppb.Timestamp { return nil } +func (x *RateLimitDescription) SetStatus(v RateLimitDescription_Status) { + x.Status = v +} + +func (x *RateLimitDescription) SetLimit(v int64) { + x.Limit = v +} + +func (x *RateLimitDescription) SetRemaining(v int64) { + x.Remaining = v +} + +func (x *RateLimitDescription) SetCount(v int64) { + x.Count = v +} + +func (x *RateLimitDescription) SetResetAt(v *timestamppb.Timestamp) { + x.ResetAt = v +} + +func (x *RateLimitDescription) HasResetAt() bool { + if x == nil { + return false + } + return x.ResetAt != nil +} + +func (x *RateLimitDescription) ClearResetAt() { + x.ResetAt = nil +} + +type RateLimitDescription_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Status RateLimitDescription_Status + Limit int64 + Remaining int64 + Count int64 + ResetAt *timestamppb.Timestamp +} + +func (b0 RateLimitDescription_builder) Build() *RateLimitDescription { + m0 := &RateLimitDescription{} + b, x := &b0, m0 + _, _ = b, x + x.Status = b.Status + x.Limit = b.Limit + x.Remaining = b.Remaining + x.Count = b.Count + x.ResetAt = b.ResetAt + return m0 +} + type ReportRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` RequestToken string `protobuf:"bytes,1,opt,name=request_token,json=requestToken,proto3" json:"request_token,omitempty"` Description *RateLimitDescription `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` Service string `protobuf:"bytes,3,opt,name=service,proto3" json:"service,omitempty"` @@ -192,11 +236,6 @@ func (x *ReportRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ReportRequest.ProtoReflect.Descriptor instead. -func (*ReportRequest) Descriptor() ([]byte, []int) { - return file_c1_ratelimit_v1_ratelimit_proto_rawDescGZIP(), []int{1} -} - func (x *ReportRequest) GetRequestToken() string { if x != nil { return x.RequestToken @@ -225,8 +264,66 @@ func (x *ReportRequest) GetDescriptors() *RateLimitDescriptors { return nil } +func (x *ReportRequest) SetRequestToken(v string) { + x.RequestToken = v +} + +func (x *ReportRequest) SetDescription(v *RateLimitDescription) { + x.Description = v +} + +func (x *ReportRequest) SetService(v string) { + x.Service = v +} + +func (x *ReportRequest) SetDescriptors(v *RateLimitDescriptors) { + x.Descriptors = v +} + +func (x *ReportRequest) HasDescription() bool { + if x == nil { + return false + } + return x.Description != nil +} + +func (x *ReportRequest) HasDescriptors() bool { + if x == nil { + return false + } + return x.Descriptors != nil +} + +func (x *ReportRequest) ClearDescription() { + x.Description = nil +} + +func (x *ReportRequest) ClearDescriptors() { + x.Descriptors = nil +} + +type ReportRequest_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + RequestToken string + Description *RateLimitDescription + Service string + Descriptors *RateLimitDescriptors +} + +func (b0 ReportRequest_builder) Build() *ReportRequest { + m0 := &ReportRequest{} + b, x := &b0, m0 + _, _ = b, x + x.RequestToken = b.RequestToken + x.Description = b.Description + x.Service = b.Service + x.Descriptors = b.Descriptors + return m0 +} + type ReportResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } @@ -256,13 +353,20 @@ func (x *ReportResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ReportResponse.ProtoReflect.Descriptor instead. -func (*ReportResponse) Descriptor() ([]byte, []int) { - return file_c1_ratelimit_v1_ratelimit_proto_rawDescGZIP(), []int{2} +type ReportResponse_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + +} + +func (b0 ReportResponse_builder) Build() *ReportResponse { + m0 := &ReportResponse{} + b, x := &b0, m0 + _, _ = b, x + return m0 } type RateLimitDescriptors struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Entries []*RateLimitDescriptors_Entry `protobuf:"bytes,1,rep,name=entries,proto3" json:"entries,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache @@ -293,11 +397,6 @@ func (x *RateLimitDescriptors) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use RateLimitDescriptors.ProtoReflect.Descriptor instead. -func (*RateLimitDescriptors) Descriptor() ([]byte, []int) { - return file_c1_ratelimit_v1_ratelimit_proto_rawDescGZIP(), []int{3} -} - func (x *RateLimitDescriptors) GetEntries() []*RateLimitDescriptors_Entry { if x != nil { return x.Entries @@ -305,8 +404,26 @@ func (x *RateLimitDescriptors) GetEntries() []*RateLimitDescriptors_Entry { return nil } +func (x *RateLimitDescriptors) SetEntries(v []*RateLimitDescriptors_Entry) { + x.Entries = v +} + +type RateLimitDescriptors_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Entries []*RateLimitDescriptors_Entry +} + +func (b0 RateLimitDescriptors_builder) Build() *RateLimitDescriptors { + m0 := &RateLimitDescriptors{} + b, x := &b0, m0 + _, _ = b, x + x.Entries = b.Entries + return m0 +} + type DoRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` RequestToken string `protobuf:"bytes,1,opt,name=request_token,json=requestToken,proto3" json:"request_token,omitempty"` Service string `protobuf:"bytes,2,opt,name=service,proto3" json:"service,omitempty"` Descriptors *RateLimitDescriptors `protobuf:"bytes,3,opt,name=descriptors,proto3" json:"descriptors,omitempty"` @@ -339,11 +456,6 @@ func (x *DoRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use DoRequest.ProtoReflect.Descriptor instead. -func (*DoRequest) Descriptor() ([]byte, []int) { - return file_c1_ratelimit_v1_ratelimit_proto_rawDescGZIP(), []int{4} -} - func (x *DoRequest) GetRequestToken() string { if x != nil { return x.RequestToken @@ -365,8 +477,49 @@ func (x *DoRequest) GetDescriptors() *RateLimitDescriptors { return nil } +func (x *DoRequest) SetRequestToken(v string) { + x.RequestToken = v +} + +func (x *DoRequest) SetService(v string) { + x.Service = v +} + +func (x *DoRequest) SetDescriptors(v *RateLimitDescriptors) { + x.Descriptors = v +} + +func (x *DoRequest) HasDescriptors() bool { + if x == nil { + return false + } + return x.Descriptors != nil +} + +func (x *DoRequest) ClearDescriptors() { + x.Descriptors = nil +} + +type DoRequest_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + RequestToken string + Service string + Descriptors *RateLimitDescriptors +} + +func (b0 DoRequest_builder) Build() *DoRequest { + m0 := &DoRequest{} + b, x := &b0, m0 + _, _ = b, x + x.RequestToken = b.RequestToken + x.Service = b.Service + x.Descriptors = b.Descriptors + return m0 +} + type DoResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` RequestToken string `protobuf:"bytes,1,opt,name=request_token,json=requestToken,proto3" json:"request_token,omitempty"` Description *RateLimitDescription `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` unknownFields protoimpl.UnknownFields @@ -398,11 +551,6 @@ func (x *DoResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use DoResponse.ProtoReflect.Descriptor instead. -func (*DoResponse) Descriptor() ([]byte, []int) { - return file_c1_ratelimit_v1_ratelimit_proto_rawDescGZIP(), []int{5} -} - func (x *DoResponse) GetRequestToken() string { if x != nil { return x.RequestToken @@ -417,8 +565,43 @@ func (x *DoResponse) GetDescription() *RateLimitDescription { return nil } +func (x *DoResponse) SetRequestToken(v string) { + x.RequestToken = v +} + +func (x *DoResponse) SetDescription(v *RateLimitDescription) { + x.Description = v +} + +func (x *DoResponse) HasDescription() bool { + if x == nil { + return false + } + return x.Description != nil +} + +func (x *DoResponse) ClearDescription() { + x.Description = nil +} + +type DoResponse_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + RequestToken string + Description *RateLimitDescription +} + +func (b0 DoResponse_builder) Build() *DoResponse { + m0 := &DoResponse{} + b, x := &b0, m0 + _, _ = b, x + x.RequestToken = b.RequestToken + x.Description = b.Description + return m0 +} + type DisabledLimiter struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } @@ -448,13 +631,20 @@ func (x *DisabledLimiter) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use DisabledLimiter.ProtoReflect.Descriptor instead. -func (*DisabledLimiter) Descriptor() ([]byte, []int) { - return file_c1_ratelimit_v1_ratelimit_proto_rawDescGZIP(), []int{6} +type DisabledLimiter_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + +} + +func (b0 DisabledLimiter_builder) Build() *DisabledLimiter { + m0 := &DisabledLimiter{} + b, x := &b0, m0 + _, _ = b, x + return m0 } type SlidingMemoryLimiter struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` UsePercent float64 `protobuf:"fixed64,1,opt,name=use_percent,json=usePercent,proto3" json:"use_percent,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache @@ -485,11 +675,6 @@ func (x *SlidingMemoryLimiter) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use SlidingMemoryLimiter.ProtoReflect.Descriptor instead. -func (*SlidingMemoryLimiter) Descriptor() ([]byte, []int) { - return file_c1_ratelimit_v1_ratelimit_proto_rawDescGZIP(), []int{7} -} - func (x *SlidingMemoryLimiter) GetUsePercent() float64 { if x != nil { return x.UsePercent @@ -497,8 +682,26 @@ func (x *SlidingMemoryLimiter) GetUsePercent() float64 { return 0 } +func (x *SlidingMemoryLimiter) SetUsePercent(v float64) { + x.UsePercent = v +} + +type SlidingMemoryLimiter_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + UsePercent float64 +} + +func (b0 SlidingMemoryLimiter_builder) Build() *SlidingMemoryLimiter { + m0 := &SlidingMemoryLimiter{} + b, x := &b0, m0 + _, _ = b, x + x.UsePercent = b.UsePercent + return m0 +} + type FixedMemoryLimiter struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Rate int64 `protobuf:"varint,1,opt,name=rate,proto3" json:"rate,omitempty"` Period *durationpb.Duration `protobuf:"bytes,2,opt,name=period,proto3" json:"period,omitempty"` unknownFields protoimpl.UnknownFields @@ -530,11 +733,6 @@ func (x *FixedMemoryLimiter) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use FixedMemoryLimiter.ProtoReflect.Descriptor instead. -func (*FixedMemoryLimiter) Descriptor() ([]byte, []int) { - return file_c1_ratelimit_v1_ratelimit_proto_rawDescGZIP(), []int{8} -} - func (x *FixedMemoryLimiter) GetRate() int64 { if x != nil { return x.Rate @@ -549,8 +747,43 @@ func (x *FixedMemoryLimiter) GetPeriod() *durationpb.Duration { return nil } +func (x *FixedMemoryLimiter) SetRate(v int64) { + x.Rate = v +} + +func (x *FixedMemoryLimiter) SetPeriod(v *durationpb.Duration) { + x.Period = v +} + +func (x *FixedMemoryLimiter) HasPeriod() bool { + if x == nil { + return false + } + return x.Period != nil +} + +func (x *FixedMemoryLimiter) ClearPeriod() { + x.Period = nil +} + +type FixedMemoryLimiter_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Rate int64 + Period *durationpb.Duration +} + +func (b0 FixedMemoryLimiter_builder) Build() *FixedMemoryLimiter { + m0 := &FixedMemoryLimiter{} + b, x := &b0, m0 + _, _ = b, x + x.Rate = b.Rate + x.Period = b.Period + return m0 +} + type ExternalLimiter struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Address string `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"` Options map[string]string `protobuf:"bytes,1,rep,name=options,proto3" json:"options,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` unknownFields protoimpl.UnknownFields @@ -582,11 +815,6 @@ func (x *ExternalLimiter) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ExternalLimiter.ProtoReflect.Descriptor instead. -func (*ExternalLimiter) Descriptor() ([]byte, []int) { - return file_c1_ratelimit_v1_ratelimit_proto_rawDescGZIP(), []int{9} -} - func (x *ExternalLimiter) GetAddress() string { if x != nil { return x.Address @@ -601,8 +829,32 @@ func (x *ExternalLimiter) GetOptions() map[string]string { return nil } +func (x *ExternalLimiter) SetAddress(v string) { + x.Address = v +} + +func (x *ExternalLimiter) SetOptions(v map[string]string) { + x.Options = v +} + +type ExternalLimiter_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Address string + Options map[string]string +} + +func (b0 ExternalLimiter_builder) Build() *ExternalLimiter { + m0 := &ExternalLimiter{} + b, x := &b0, m0 + _, _ = b, x + x.Address = b.Address + x.Options = b.Options + return m0 +} + type RateLimiterConfig struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` // Types that are valid to be assigned to Type: // // *RateLimiterConfig_Disabled @@ -639,11 +891,6 @@ func (x *RateLimiterConfig) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use RateLimiterConfig.ProtoReflect.Descriptor instead. -func (*RateLimiterConfig) Descriptor() ([]byte, []int) { - return file_c1_ratelimit_v1_ratelimit_proto_rawDescGZIP(), []int{10} -} - func (x *RateLimiterConfig) GetType() isRateLimiterConfig_Type { if x != nil { return x.Type @@ -687,6 +934,169 @@ func (x *RateLimiterConfig) GetExternal() *ExternalLimiter { return nil } +func (x *RateLimiterConfig) SetDisabled(v *DisabledLimiter) { + if v == nil { + x.Type = nil + return + } + x.Type = &RateLimiterConfig_Disabled{v} +} + +func (x *RateLimiterConfig) SetSlidingMem(v *SlidingMemoryLimiter) { + if v == nil { + x.Type = nil + return + } + x.Type = &RateLimiterConfig_SlidingMem{v} +} + +func (x *RateLimiterConfig) SetFixedMem(v *FixedMemoryLimiter) { + if v == nil { + x.Type = nil + return + } + x.Type = &RateLimiterConfig_FixedMem{v} +} + +func (x *RateLimiterConfig) SetExternal(v *ExternalLimiter) { + if v == nil { + x.Type = nil + return + } + x.Type = &RateLimiterConfig_External{v} +} + +func (x *RateLimiterConfig) HasType() bool { + if x == nil { + return false + } + return x.Type != nil +} + +func (x *RateLimiterConfig) HasDisabled() bool { + if x == nil { + return false + } + _, ok := x.Type.(*RateLimiterConfig_Disabled) + return ok +} + +func (x *RateLimiterConfig) HasSlidingMem() bool { + if x == nil { + return false + } + _, ok := x.Type.(*RateLimiterConfig_SlidingMem) + return ok +} + +func (x *RateLimiterConfig) HasFixedMem() bool { + if x == nil { + return false + } + _, ok := x.Type.(*RateLimiterConfig_FixedMem) + return ok +} + +func (x *RateLimiterConfig) HasExternal() bool { + if x == nil { + return false + } + _, ok := x.Type.(*RateLimiterConfig_External) + return ok +} + +func (x *RateLimiterConfig) ClearType() { + x.Type = nil +} + +func (x *RateLimiterConfig) ClearDisabled() { + if _, ok := x.Type.(*RateLimiterConfig_Disabled); ok { + x.Type = nil + } +} + +func (x *RateLimiterConfig) ClearSlidingMem() { + if _, ok := x.Type.(*RateLimiterConfig_SlidingMem); ok { + x.Type = nil + } +} + +func (x *RateLimiterConfig) ClearFixedMem() { + if _, ok := x.Type.(*RateLimiterConfig_FixedMem); ok { + x.Type = nil + } +} + +func (x *RateLimiterConfig) ClearExternal() { + if _, ok := x.Type.(*RateLimiterConfig_External); ok { + x.Type = nil + } +} + +const RateLimiterConfig_Type_not_set_case case_RateLimiterConfig_Type = 0 +const RateLimiterConfig_Disabled_case case_RateLimiterConfig_Type = 100 +const RateLimiterConfig_SlidingMem_case case_RateLimiterConfig_Type = 101 +const RateLimiterConfig_FixedMem_case case_RateLimiterConfig_Type = 102 +const RateLimiterConfig_External_case case_RateLimiterConfig_Type = 103 + +func (x *RateLimiterConfig) WhichType() case_RateLimiterConfig_Type { + if x == nil { + return RateLimiterConfig_Type_not_set_case + } + switch x.Type.(type) { + case *RateLimiterConfig_Disabled: + return RateLimiterConfig_Disabled_case + case *RateLimiterConfig_SlidingMem: + return RateLimiterConfig_SlidingMem_case + case *RateLimiterConfig_FixedMem: + return RateLimiterConfig_FixedMem_case + case *RateLimiterConfig_External: + return RateLimiterConfig_External_case + default: + return RateLimiterConfig_Type_not_set_case + } +} + +type RateLimiterConfig_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // Fields of oneof Type: + Disabled *DisabledLimiter + SlidingMem *SlidingMemoryLimiter + FixedMem *FixedMemoryLimiter + External *ExternalLimiter + // -- end of Type +} + +func (b0 RateLimiterConfig_builder) Build() *RateLimiterConfig { + m0 := &RateLimiterConfig{} + b, x := &b0, m0 + _, _ = b, x + if b.Disabled != nil { + x.Type = &RateLimiterConfig_Disabled{b.Disabled} + } + if b.SlidingMem != nil { + x.Type = &RateLimiterConfig_SlidingMem{b.SlidingMem} + } + if b.FixedMem != nil { + x.Type = &RateLimiterConfig_FixedMem{b.FixedMem} + } + if b.External != nil { + x.Type = &RateLimiterConfig_External{b.External} + } + return m0 +} + +type case_RateLimiterConfig_Type protoreflect.FieldNumber + +func (x case_RateLimiterConfig_Type) String() string { + md := file_c1_ratelimit_v1_ratelimit_proto_msgTypes[10].Descriptor() + if x == 0 { + return "not set" + } + return protoimpl.X.MessageFieldStringOf(md, protoreflect.FieldNumber(x)) +} + type isRateLimiterConfig_Type interface { isRateLimiterConfig_Type() } @@ -716,7 +1126,7 @@ func (*RateLimiterConfig_FixedMem) isRateLimiterConfig_Type() {} func (*RateLimiterConfig_External) isRateLimiterConfig_Type() {} type RateLimitDescriptors_Entry struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` unknownFields protoimpl.UnknownFields @@ -748,11 +1158,6 @@ func (x *RateLimitDescriptors_Entry) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use RateLimitDescriptors_Entry.ProtoReflect.Descriptor instead. -func (*RateLimitDescriptors_Entry) Descriptor() ([]byte, []int) { - return file_c1_ratelimit_v1_ratelimit_proto_rawDescGZIP(), []int{3, 0} -} - func (x *RateLimitDescriptors_Entry) GetKey() string { if x != nil { return x.Key @@ -767,151 +1172,90 @@ func (x *RateLimitDescriptors_Entry) GetValue() string { return "" } -var File_c1_ratelimit_v1_ratelimit_proto protoreflect.FileDescriptor +func (x *RateLimitDescriptors_Entry) SetKey(v string) { + x.Key = v +} -var file_c1_ratelimit_v1_ratelimit_proto_rawDesc = string([]byte{ - 0x0a, 0x1f, 0x63, 0x31, 0x2f, 0x72, 0x61, 0x74, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x2f, 0x76, - 0x31, 0x2f, 0x72, 0x61, 0x74, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x12, 0x0f, 0x63, 0x31, 0x2e, 0x72, 0x61, 0x74, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x2e, - 0x76, 0x31, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, - 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xc8, 0x02, 0x0a, - 0x14, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x44, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2c, 0x2e, 0x63, 0x31, 0x2e, 0x72, 0x61, 0x74, 0x65, 0x6c, - 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, - 0x74, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6c, - 0x69, 0x6d, 0x69, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, - 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x72, 0x65, 0x6d, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x72, 0x65, 0x6d, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x12, - 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, - 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x35, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x65, 0x74, 0x5f, 0x61, - 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, - 0x61, 0x6d, 0x70, 0x52, 0x07, 0x72, 0x65, 0x73, 0x65, 0x74, 0x41, 0x74, 0x22, 0x69, 0x0a, 0x06, - 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x16, 0x0a, 0x12, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, - 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0d, - 0x0a, 0x09, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x4f, 0x4b, 0x10, 0x01, 0x12, 0x14, 0x0a, - 0x10, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x4f, 0x56, 0x45, 0x52, 0x4c, 0x49, 0x4d, 0x49, - 0x54, 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x45, 0x52, - 0x52, 0x4f, 0x52, 0x10, 0x03, 0x12, 0x10, 0x0a, 0x0c, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, - 0x45, 0x4d, 0x50, 0x54, 0x59, 0x10, 0x04, 0x22, 0xe0, 0x01, 0x0a, 0x0d, 0x52, 0x65, 0x70, 0x6f, - 0x72, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0c, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x47, - 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x63, 0x31, 0x2e, 0x72, 0x61, 0x74, 0x65, 0x6c, 0x69, 0x6d, - 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x44, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, - 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x12, 0x47, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x63, 0x31, 0x2e, 0x72, 0x61, 0x74, 0x65, - 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, - 0x69, 0x74, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x52, 0x0b, 0x64, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x22, 0x10, 0x0a, 0x0e, 0x52, 0x65, - 0x70, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xaa, 0x01, 0x0a, - 0x14, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x6f, 0x72, 0x73, 0x12, 0x4f, 0x0a, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, - 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x63, 0x31, 0x2e, 0x72, 0x61, 0x74, 0x65, - 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, - 0x69, 0x74, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x2e, 0x45, 0x6e, - 0x74, 0x72, 0x79, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x01, 0x52, 0x07, 0x65, - 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x1a, 0x41, 0x0a, 0x05, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, - 0x19, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, - 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x1d, 0x0a, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, - 0x10, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x93, 0x01, 0x0a, 0x09, 0x44, 0x6f, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, - 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x18, 0x0a, 0x07, - 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x47, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x6f, 0x72, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x63, 0x31, - 0x2e, 0x72, 0x61, 0x74, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x61, - 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, - 0x72, 0x73, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x22, - 0x7a, 0x0a, 0x0a, 0x44, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x23, 0x0a, - 0x0d, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x54, 0x6f, 0x6b, - 0x65, 0x6e, 0x12, 0x47, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x63, 0x31, 0x2e, 0x72, 0x61, 0x74, - 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, - 0x6d, 0x69, 0x74, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0b, - 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x11, 0x0a, 0x0f, 0x44, - 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x22, 0x37, - 0x0a, 0x14, 0x53, 0x6c, 0x69, 0x64, 0x69, 0x6e, 0x67, 0x4d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x4c, - 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x12, 0x1f, 0x0a, 0x0b, 0x75, 0x73, 0x65, 0x5f, 0x70, 0x65, - 0x72, 0x63, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x75, 0x73, 0x65, - 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x22, 0x5b, 0x0a, 0x12, 0x46, 0x69, 0x78, 0x65, 0x64, - 0x4d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x12, 0x12, 0x0a, - 0x04, 0x72, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x72, 0x61, 0x74, - 0x65, 0x12, 0x31, 0x0a, 0x06, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x70, 0x65, - 0x72, 0x69, 0x6f, 0x64, 0x22, 0xb0, 0x01, 0x0a, 0x0f, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, - 0x6c, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, - 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, - 0x73, 0x73, 0x12, 0x47, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x63, 0x31, 0x2e, 0x72, 0x61, 0x74, 0x65, 0x6c, 0x69, 0x6d, - 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x4c, 0x69, - 0x6d, 0x69, 0x74, 0x65, 0x72, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, - 0x72, 0x79, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x3a, 0x0a, 0x0c, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, - 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xa9, 0x02, 0x0a, 0x11, 0x52, 0x61, 0x74, 0x65, - 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x3e, 0x0a, - 0x08, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x64, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x20, 0x2e, 0x63, 0x31, 0x2e, 0x72, 0x61, 0x74, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x76, - 0x31, 0x2e, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x65, - 0x72, 0x48, 0x00, 0x52, 0x08, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x48, 0x0a, - 0x0b, 0x73, 0x6c, 0x69, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x6d, 0x65, 0x6d, 0x18, 0x65, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x63, 0x31, 0x2e, 0x72, 0x61, 0x74, 0x65, 0x6c, 0x69, 0x6d, 0x69, - 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x6c, 0x69, 0x64, 0x69, 0x6e, 0x67, 0x4d, 0x65, 0x6d, 0x6f, - 0x72, 0x79, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x48, 0x00, 0x52, 0x0a, 0x73, 0x6c, 0x69, - 0x64, 0x69, 0x6e, 0x67, 0x4d, 0x65, 0x6d, 0x12, 0x42, 0x0a, 0x09, 0x66, 0x69, 0x78, 0x65, 0x64, - 0x5f, 0x6d, 0x65, 0x6d, 0x18, 0x66, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x63, 0x31, 0x2e, - 0x72, 0x61, 0x74, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x46, 0x69, 0x78, - 0x65, 0x64, 0x4d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x48, - 0x00, 0x52, 0x08, 0x66, 0x69, 0x78, 0x65, 0x64, 0x4d, 0x65, 0x6d, 0x12, 0x3e, 0x0a, 0x08, 0x65, - 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x18, 0x67, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, - 0x63, 0x31, 0x2e, 0x72, 0x61, 0x74, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, - 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x48, - 0x00, 0x52, 0x08, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x42, 0x06, 0x0a, 0x04, 0x74, - 0x79, 0x70, 0x65, 0x32, 0x9e, 0x01, 0x0a, 0x12, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, - 0x74, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x3d, 0x0a, 0x02, 0x44, 0x6f, - 0x12, 0x1a, 0x2e, 0x63, 0x31, 0x2e, 0x72, 0x61, 0x74, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x2e, - 0x76, 0x31, 0x2e, 0x44, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x63, - 0x31, 0x2e, 0x72, 0x61, 0x74, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x44, - 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x49, 0x0a, 0x06, 0x52, 0x65, 0x70, - 0x6f, 0x72, 0x74, 0x12, 0x1e, 0x2e, 0x63, 0x31, 0x2e, 0x72, 0x61, 0x74, 0x65, 0x6c, 0x69, 0x6d, - 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x63, 0x31, 0x2e, 0x72, 0x61, 0x74, 0x65, 0x6c, 0x69, 0x6d, - 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x36, 0x5a, 0x34, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, - 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x64, 0x75, 0x63, 0x74, 0x6f, 0x72, 0x6f, 0x6e, 0x65, 0x2f, - 0x62, 0x61, 0x74, 0x6f, 0x6e, 0x2d, 0x73, 0x64, 0x6b, 0x2f, 0x70, 0x62, 0x2f, 0x63, 0x31, 0x2f, - 0x72, 0x61, 0x74, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x2f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x33, -}) +func (x *RateLimitDescriptors_Entry) SetValue(v string) { + x.Value = v +} -var ( - file_c1_ratelimit_v1_ratelimit_proto_rawDescOnce sync.Once - file_c1_ratelimit_v1_ratelimit_proto_rawDescData []byte -) +type RateLimitDescriptors_Entry_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Key string + Value string +} -func file_c1_ratelimit_v1_ratelimit_proto_rawDescGZIP() []byte { - file_c1_ratelimit_v1_ratelimit_proto_rawDescOnce.Do(func() { - file_c1_ratelimit_v1_ratelimit_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_c1_ratelimit_v1_ratelimit_proto_rawDesc), len(file_c1_ratelimit_v1_ratelimit_proto_rawDesc))) - }) - return file_c1_ratelimit_v1_ratelimit_proto_rawDescData +func (b0 RateLimitDescriptors_Entry_builder) Build() *RateLimitDescriptors_Entry { + m0 := &RateLimitDescriptors_Entry{} + b, x := &b0, m0 + _, _ = b, x + x.Key = b.Key + x.Value = b.Value + return m0 } +var File_c1_ratelimit_v1_ratelimit_proto protoreflect.FileDescriptor + +const file_c1_ratelimit_v1_ratelimit_proto_rawDesc = "" + + "\n" + + "\x1fc1/ratelimit/v1/ratelimit.proto\x12\x0fc1.ratelimit.v1\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x17validate/validate.proto\"\xc8\x02\n" + + "\x14RateLimitDescription\x12D\n" + + "\x06status\x18\x01 \x01(\x0e2,.c1.ratelimit.v1.RateLimitDescription.StatusR\x06status\x12\x14\n" + + "\x05limit\x18\x02 \x01(\x03R\x05limit\x12\x1c\n" + + "\tremaining\x18\x03 \x01(\x03R\tremaining\x12\x14\n" + + "\x05count\x18\x05 \x01(\x03R\x05count\x125\n" + + "\breset_at\x18\x04 \x01(\v2\x1a.google.protobuf.TimestampR\aresetAt\"i\n" + + "\x06Status\x12\x16\n" + + "\x12STATUS_UNSPECIFIED\x10\x00\x12\r\n" + + "\tSTATUS_OK\x10\x01\x12\x14\n" + + "\x10STATUS_OVERLIMIT\x10\x02\x12\x10\n" + + "\fSTATUS_ERROR\x10\x03\x12\x10\n" + + "\fSTATUS_EMPTY\x10\x04\"\xe0\x01\n" + + "\rReportRequest\x12#\n" + + "\rrequest_token\x18\x01 \x01(\tR\frequestToken\x12G\n" + + "\vdescription\x18\x02 \x01(\v2%.c1.ratelimit.v1.RateLimitDescriptionR\vdescription\x12\x18\n" + + "\aservice\x18\x03 \x01(\tR\aservice\x12G\n" + + "\vdescriptors\x18\x04 \x01(\v2%.c1.ratelimit.v1.RateLimitDescriptorsR\vdescriptors\"\x10\n" + + "\x0eReportResponse\"\xaa\x01\n" + + "\x14RateLimitDescriptors\x12O\n" + + "\aentries\x18\x01 \x03(\v2+.c1.ratelimit.v1.RateLimitDescriptors.EntryB\b\xfaB\x05\x92\x01\x02\b\x01R\aentries\x1aA\n" + + "\x05Entry\x12\x19\n" + + "\x03key\x18\x01 \x01(\tB\a\xfaB\x04r\x02\x10\x01R\x03key\x12\x1d\n" + + "\x05value\x18\x02 \x01(\tB\a\xfaB\x04r\x02\x10\x01R\x05value\"\x93\x01\n" + + "\tDoRequest\x12#\n" + + "\rrequest_token\x18\x01 \x01(\tR\frequestToken\x12\x18\n" + + "\aservice\x18\x02 \x01(\tR\aservice\x12G\n" + + "\vdescriptors\x18\x03 \x01(\v2%.c1.ratelimit.v1.RateLimitDescriptorsR\vdescriptors\"z\n" + + "\n" + + "DoResponse\x12#\n" + + "\rrequest_token\x18\x01 \x01(\tR\frequestToken\x12G\n" + + "\vdescription\x18\x02 \x01(\v2%.c1.ratelimit.v1.RateLimitDescriptionR\vdescription\"\x11\n" + + "\x0fDisabledLimiter\"7\n" + + "\x14SlidingMemoryLimiter\x12\x1f\n" + + "\vuse_percent\x18\x01 \x01(\x01R\n" + + "usePercent\"[\n" + + "\x12FixedMemoryLimiter\x12\x12\n" + + "\x04rate\x18\x01 \x01(\x03R\x04rate\x121\n" + + "\x06period\x18\x02 \x01(\v2\x19.google.protobuf.DurationR\x06period\"\xb0\x01\n" + + "\x0fExternalLimiter\x12\x18\n" + + "\aaddress\x18\x02 \x01(\tR\aaddress\x12G\n" + + "\aoptions\x18\x01 \x03(\v2-.c1.ratelimit.v1.ExternalLimiter.OptionsEntryR\aoptions\x1a:\n" + + "\fOptionsEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\tR\x05value:\x028\x01\"\xa9\x02\n" + + "\x11RateLimiterConfig\x12>\n" + + "\bdisabled\x18d \x01(\v2 .c1.ratelimit.v1.DisabledLimiterH\x00R\bdisabled\x12H\n" + + "\vsliding_mem\x18e \x01(\v2%.c1.ratelimit.v1.SlidingMemoryLimiterH\x00R\n" + + "slidingMem\x12B\n" + + "\tfixed_mem\x18f \x01(\v2#.c1.ratelimit.v1.FixedMemoryLimiterH\x00R\bfixedMem\x12>\n" + + "\bexternal\x18g \x01(\v2 .c1.ratelimit.v1.ExternalLimiterH\x00R\bexternalB\x06\n" + + "\x04type2\x9e\x01\n" + + "\x12RateLimiterService\x12=\n" + + "\x02Do\x12\x1a.c1.ratelimit.v1.DoRequest\x1a\x1b.c1.ratelimit.v1.DoResponse\x12I\n" + + "\x06Report\x12\x1e.c1.ratelimit.v1.ReportRequest\x1a\x1f.c1.ratelimit.v1.ReportResponseB6Z4github.com/conductorone/baton-sdk/pb/c1/ratelimit/v1b\x06proto3" + var file_c1_ratelimit_v1_ratelimit_proto_enumTypes = make([]protoimpl.EnumInfo, 1) var file_c1_ratelimit_v1_ratelimit_proto_msgTypes = make([]protoimpl.MessageInfo, 13) var file_c1_ratelimit_v1_ratelimit_proto_goTypes = []any{ diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/ratelimit/v1/ratelimit_protoopaque.pb.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/ratelimit/v1/ratelimit_protoopaque.pb.go new file mode 100644 index 00000000..29c584f8 --- /dev/null +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/ratelimit/v1/ratelimit_protoopaque.pb.go @@ -0,0 +1,1322 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.10 +// protoc (unknown) +// source: c1/ratelimit/v1/ratelimit.proto + +//go:build protoopaque + +package v1 + +import ( + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type RateLimitDescription_Status int32 + +const ( + RateLimitDescription_STATUS_UNSPECIFIED RateLimitDescription_Status = 0 + RateLimitDescription_STATUS_OK RateLimitDescription_Status = 1 + RateLimitDescription_STATUS_OVERLIMIT RateLimitDescription_Status = 2 + RateLimitDescription_STATUS_ERROR RateLimitDescription_Status = 3 + RateLimitDescription_STATUS_EMPTY RateLimitDescription_Status = 4 +) + +// Enum value maps for RateLimitDescription_Status. +var ( + RateLimitDescription_Status_name = map[int32]string{ + 0: "STATUS_UNSPECIFIED", + 1: "STATUS_OK", + 2: "STATUS_OVERLIMIT", + 3: "STATUS_ERROR", + 4: "STATUS_EMPTY", + } + RateLimitDescription_Status_value = map[string]int32{ + "STATUS_UNSPECIFIED": 0, + "STATUS_OK": 1, + "STATUS_OVERLIMIT": 2, + "STATUS_ERROR": 3, + "STATUS_EMPTY": 4, + } +) + +func (x RateLimitDescription_Status) Enum() *RateLimitDescription_Status { + p := new(RateLimitDescription_Status) + *p = x + return p +} + +func (x RateLimitDescription_Status) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (RateLimitDescription_Status) Descriptor() protoreflect.EnumDescriptor { + return file_c1_ratelimit_v1_ratelimit_proto_enumTypes[0].Descriptor() +} + +func (RateLimitDescription_Status) Type() protoreflect.EnumType { + return &file_c1_ratelimit_v1_ratelimit_proto_enumTypes[0] +} + +func (x RateLimitDescription_Status) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// https://tools.ietf.org/html/draft-ietf-httpapi-ratelimit-headers-00#section-1.1 +// RateLimitDescription is a minimal representation of a rate limit. +type RateLimitDescription struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Status RateLimitDescription_Status `protobuf:"varint,1,opt,name=status,proto3,enum=c1.ratelimit.v1.RateLimitDescription_Status"` + xxx_hidden_Limit int64 `protobuf:"varint,2,opt,name=limit,proto3"` + xxx_hidden_Remaining int64 `protobuf:"varint,3,opt,name=remaining,proto3"` + xxx_hidden_Count int64 `protobuf:"varint,5,opt,name=count,proto3"` + xxx_hidden_ResetAt *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=reset_at,json=resetAt,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *RateLimitDescription) Reset() { + *x = RateLimitDescription{} + mi := &file_c1_ratelimit_v1_ratelimit_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *RateLimitDescription) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RateLimitDescription) ProtoMessage() {} + +func (x *RateLimitDescription) ProtoReflect() protoreflect.Message { + mi := &file_c1_ratelimit_v1_ratelimit_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *RateLimitDescription) GetStatus() RateLimitDescription_Status { + if x != nil { + return x.xxx_hidden_Status + } + return RateLimitDescription_STATUS_UNSPECIFIED +} + +func (x *RateLimitDescription) GetLimit() int64 { + if x != nil { + return x.xxx_hidden_Limit + } + return 0 +} + +func (x *RateLimitDescription) GetRemaining() int64 { + if x != nil { + return x.xxx_hidden_Remaining + } + return 0 +} + +func (x *RateLimitDescription) GetCount() int64 { + if x != nil { + return x.xxx_hidden_Count + } + return 0 +} + +func (x *RateLimitDescription) GetResetAt() *timestamppb.Timestamp { + if x != nil { + return x.xxx_hidden_ResetAt + } + return nil +} + +func (x *RateLimitDescription) SetStatus(v RateLimitDescription_Status) { + x.xxx_hidden_Status = v +} + +func (x *RateLimitDescription) SetLimit(v int64) { + x.xxx_hidden_Limit = v +} + +func (x *RateLimitDescription) SetRemaining(v int64) { + x.xxx_hidden_Remaining = v +} + +func (x *RateLimitDescription) SetCount(v int64) { + x.xxx_hidden_Count = v +} + +func (x *RateLimitDescription) SetResetAt(v *timestamppb.Timestamp) { + x.xxx_hidden_ResetAt = v +} + +func (x *RateLimitDescription) HasResetAt() bool { + if x == nil { + return false + } + return x.xxx_hidden_ResetAt != nil +} + +func (x *RateLimitDescription) ClearResetAt() { + x.xxx_hidden_ResetAt = nil +} + +type RateLimitDescription_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Status RateLimitDescription_Status + Limit int64 + Remaining int64 + Count int64 + ResetAt *timestamppb.Timestamp +} + +func (b0 RateLimitDescription_builder) Build() *RateLimitDescription { + m0 := &RateLimitDescription{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Status = b.Status + x.xxx_hidden_Limit = b.Limit + x.xxx_hidden_Remaining = b.Remaining + x.xxx_hidden_Count = b.Count + x.xxx_hidden_ResetAt = b.ResetAt + return m0 +} + +type ReportRequest struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_RequestToken string `protobuf:"bytes,1,opt,name=request_token,json=requestToken,proto3"` + xxx_hidden_Description *RateLimitDescription `protobuf:"bytes,2,opt,name=description,proto3"` + xxx_hidden_Service string `protobuf:"bytes,3,opt,name=service,proto3"` + xxx_hidden_Descriptors *RateLimitDescriptors `protobuf:"bytes,4,opt,name=descriptors,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ReportRequest) Reset() { + *x = ReportRequest{} + mi := &file_c1_ratelimit_v1_ratelimit_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ReportRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReportRequest) ProtoMessage() {} + +func (x *ReportRequest) ProtoReflect() protoreflect.Message { + mi := &file_c1_ratelimit_v1_ratelimit_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *ReportRequest) GetRequestToken() string { + if x != nil { + return x.xxx_hidden_RequestToken + } + return "" +} + +func (x *ReportRequest) GetDescription() *RateLimitDescription { + if x != nil { + return x.xxx_hidden_Description + } + return nil +} + +func (x *ReportRequest) GetService() string { + if x != nil { + return x.xxx_hidden_Service + } + return "" +} + +func (x *ReportRequest) GetDescriptors() *RateLimitDescriptors { + if x != nil { + return x.xxx_hidden_Descriptors + } + return nil +} + +func (x *ReportRequest) SetRequestToken(v string) { + x.xxx_hidden_RequestToken = v +} + +func (x *ReportRequest) SetDescription(v *RateLimitDescription) { + x.xxx_hidden_Description = v +} + +func (x *ReportRequest) SetService(v string) { + x.xxx_hidden_Service = v +} + +func (x *ReportRequest) SetDescriptors(v *RateLimitDescriptors) { + x.xxx_hidden_Descriptors = v +} + +func (x *ReportRequest) HasDescription() bool { + if x == nil { + return false + } + return x.xxx_hidden_Description != nil +} + +func (x *ReportRequest) HasDescriptors() bool { + if x == nil { + return false + } + return x.xxx_hidden_Descriptors != nil +} + +func (x *ReportRequest) ClearDescription() { + x.xxx_hidden_Description = nil +} + +func (x *ReportRequest) ClearDescriptors() { + x.xxx_hidden_Descriptors = nil +} + +type ReportRequest_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + RequestToken string + Description *RateLimitDescription + Service string + Descriptors *RateLimitDescriptors +} + +func (b0 ReportRequest_builder) Build() *ReportRequest { + m0 := &ReportRequest{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_RequestToken = b.RequestToken + x.xxx_hidden_Description = b.Description + x.xxx_hidden_Service = b.Service + x.xxx_hidden_Descriptors = b.Descriptors + return m0 +} + +type ReportResponse struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ReportResponse) Reset() { + *x = ReportResponse{} + mi := &file_c1_ratelimit_v1_ratelimit_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ReportResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReportResponse) ProtoMessage() {} + +func (x *ReportResponse) ProtoReflect() protoreflect.Message { + mi := &file_c1_ratelimit_v1_ratelimit_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +type ReportResponse_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + +} + +func (b0 ReportResponse_builder) Build() *ReportResponse { + m0 := &ReportResponse{} + b, x := &b0, m0 + _, _ = b, x + return m0 +} + +type RateLimitDescriptors struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Entries *[]*RateLimitDescriptors_Entry `protobuf:"bytes,1,rep,name=entries,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *RateLimitDescriptors) Reset() { + *x = RateLimitDescriptors{} + mi := &file_c1_ratelimit_v1_ratelimit_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *RateLimitDescriptors) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RateLimitDescriptors) ProtoMessage() {} + +func (x *RateLimitDescriptors) ProtoReflect() protoreflect.Message { + mi := &file_c1_ratelimit_v1_ratelimit_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *RateLimitDescriptors) GetEntries() []*RateLimitDescriptors_Entry { + if x != nil { + if x.xxx_hidden_Entries != nil { + return *x.xxx_hidden_Entries + } + } + return nil +} + +func (x *RateLimitDescriptors) SetEntries(v []*RateLimitDescriptors_Entry) { + x.xxx_hidden_Entries = &v +} + +type RateLimitDescriptors_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Entries []*RateLimitDescriptors_Entry +} + +func (b0 RateLimitDescriptors_builder) Build() *RateLimitDescriptors { + m0 := &RateLimitDescriptors{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Entries = &b.Entries + return m0 +} + +type DoRequest struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_RequestToken string `protobuf:"bytes,1,opt,name=request_token,json=requestToken,proto3"` + xxx_hidden_Service string `protobuf:"bytes,2,opt,name=service,proto3"` + xxx_hidden_Descriptors *RateLimitDescriptors `protobuf:"bytes,3,opt,name=descriptors,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *DoRequest) Reset() { + *x = DoRequest{} + mi := &file_c1_ratelimit_v1_ratelimit_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *DoRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DoRequest) ProtoMessage() {} + +func (x *DoRequest) ProtoReflect() protoreflect.Message { + mi := &file_c1_ratelimit_v1_ratelimit_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *DoRequest) GetRequestToken() string { + if x != nil { + return x.xxx_hidden_RequestToken + } + return "" +} + +func (x *DoRequest) GetService() string { + if x != nil { + return x.xxx_hidden_Service + } + return "" +} + +func (x *DoRequest) GetDescriptors() *RateLimitDescriptors { + if x != nil { + return x.xxx_hidden_Descriptors + } + return nil +} + +func (x *DoRequest) SetRequestToken(v string) { + x.xxx_hidden_RequestToken = v +} + +func (x *DoRequest) SetService(v string) { + x.xxx_hidden_Service = v +} + +func (x *DoRequest) SetDescriptors(v *RateLimitDescriptors) { + x.xxx_hidden_Descriptors = v +} + +func (x *DoRequest) HasDescriptors() bool { + if x == nil { + return false + } + return x.xxx_hidden_Descriptors != nil +} + +func (x *DoRequest) ClearDescriptors() { + x.xxx_hidden_Descriptors = nil +} + +type DoRequest_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + RequestToken string + Service string + Descriptors *RateLimitDescriptors +} + +func (b0 DoRequest_builder) Build() *DoRequest { + m0 := &DoRequest{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_RequestToken = b.RequestToken + x.xxx_hidden_Service = b.Service + x.xxx_hidden_Descriptors = b.Descriptors + return m0 +} + +type DoResponse struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_RequestToken string `protobuf:"bytes,1,opt,name=request_token,json=requestToken,proto3"` + xxx_hidden_Description *RateLimitDescription `protobuf:"bytes,2,opt,name=description,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *DoResponse) Reset() { + *x = DoResponse{} + mi := &file_c1_ratelimit_v1_ratelimit_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *DoResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DoResponse) ProtoMessage() {} + +func (x *DoResponse) ProtoReflect() protoreflect.Message { + mi := &file_c1_ratelimit_v1_ratelimit_proto_msgTypes[5] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *DoResponse) GetRequestToken() string { + if x != nil { + return x.xxx_hidden_RequestToken + } + return "" +} + +func (x *DoResponse) GetDescription() *RateLimitDescription { + if x != nil { + return x.xxx_hidden_Description + } + return nil +} + +func (x *DoResponse) SetRequestToken(v string) { + x.xxx_hidden_RequestToken = v +} + +func (x *DoResponse) SetDescription(v *RateLimitDescription) { + x.xxx_hidden_Description = v +} + +func (x *DoResponse) HasDescription() bool { + if x == nil { + return false + } + return x.xxx_hidden_Description != nil +} + +func (x *DoResponse) ClearDescription() { + x.xxx_hidden_Description = nil +} + +type DoResponse_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + RequestToken string + Description *RateLimitDescription +} + +func (b0 DoResponse_builder) Build() *DoResponse { + m0 := &DoResponse{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_RequestToken = b.RequestToken + x.xxx_hidden_Description = b.Description + return m0 +} + +type DisabledLimiter struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *DisabledLimiter) Reset() { + *x = DisabledLimiter{} + mi := &file_c1_ratelimit_v1_ratelimit_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *DisabledLimiter) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DisabledLimiter) ProtoMessage() {} + +func (x *DisabledLimiter) ProtoReflect() protoreflect.Message { + mi := &file_c1_ratelimit_v1_ratelimit_proto_msgTypes[6] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +type DisabledLimiter_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + +} + +func (b0 DisabledLimiter_builder) Build() *DisabledLimiter { + m0 := &DisabledLimiter{} + b, x := &b0, m0 + _, _ = b, x + return m0 +} + +type SlidingMemoryLimiter struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_UsePercent float64 `protobuf:"fixed64,1,opt,name=use_percent,json=usePercent,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SlidingMemoryLimiter) Reset() { + *x = SlidingMemoryLimiter{} + mi := &file_c1_ratelimit_v1_ratelimit_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SlidingMemoryLimiter) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SlidingMemoryLimiter) ProtoMessage() {} + +func (x *SlidingMemoryLimiter) ProtoReflect() protoreflect.Message { + mi := &file_c1_ratelimit_v1_ratelimit_proto_msgTypes[7] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *SlidingMemoryLimiter) GetUsePercent() float64 { + if x != nil { + return x.xxx_hidden_UsePercent + } + return 0 +} + +func (x *SlidingMemoryLimiter) SetUsePercent(v float64) { + x.xxx_hidden_UsePercent = v +} + +type SlidingMemoryLimiter_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + UsePercent float64 +} + +func (b0 SlidingMemoryLimiter_builder) Build() *SlidingMemoryLimiter { + m0 := &SlidingMemoryLimiter{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_UsePercent = b.UsePercent + return m0 +} + +type FixedMemoryLimiter struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Rate int64 `protobuf:"varint,1,opt,name=rate,proto3"` + xxx_hidden_Period *durationpb.Duration `protobuf:"bytes,2,opt,name=period,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *FixedMemoryLimiter) Reset() { + *x = FixedMemoryLimiter{} + mi := &file_c1_ratelimit_v1_ratelimit_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *FixedMemoryLimiter) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FixedMemoryLimiter) ProtoMessage() {} + +func (x *FixedMemoryLimiter) ProtoReflect() protoreflect.Message { + mi := &file_c1_ratelimit_v1_ratelimit_proto_msgTypes[8] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *FixedMemoryLimiter) GetRate() int64 { + if x != nil { + return x.xxx_hidden_Rate + } + return 0 +} + +func (x *FixedMemoryLimiter) GetPeriod() *durationpb.Duration { + if x != nil { + return x.xxx_hidden_Period + } + return nil +} + +func (x *FixedMemoryLimiter) SetRate(v int64) { + x.xxx_hidden_Rate = v +} + +func (x *FixedMemoryLimiter) SetPeriod(v *durationpb.Duration) { + x.xxx_hidden_Period = v +} + +func (x *FixedMemoryLimiter) HasPeriod() bool { + if x == nil { + return false + } + return x.xxx_hidden_Period != nil +} + +func (x *FixedMemoryLimiter) ClearPeriod() { + x.xxx_hidden_Period = nil +} + +type FixedMemoryLimiter_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Rate int64 + Period *durationpb.Duration +} + +func (b0 FixedMemoryLimiter_builder) Build() *FixedMemoryLimiter { + m0 := &FixedMemoryLimiter{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Rate = b.Rate + x.xxx_hidden_Period = b.Period + return m0 +} + +type ExternalLimiter struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Address string `protobuf:"bytes,2,opt,name=address,proto3"` + xxx_hidden_Options map[string]string `protobuf:"bytes,1,rep,name=options,proto3" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ExternalLimiter) Reset() { + *x = ExternalLimiter{} + mi := &file_c1_ratelimit_v1_ratelimit_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ExternalLimiter) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExternalLimiter) ProtoMessage() {} + +func (x *ExternalLimiter) ProtoReflect() protoreflect.Message { + mi := &file_c1_ratelimit_v1_ratelimit_proto_msgTypes[9] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *ExternalLimiter) GetAddress() string { + if x != nil { + return x.xxx_hidden_Address + } + return "" +} + +func (x *ExternalLimiter) GetOptions() map[string]string { + if x != nil { + return x.xxx_hidden_Options + } + return nil +} + +func (x *ExternalLimiter) SetAddress(v string) { + x.xxx_hidden_Address = v +} + +func (x *ExternalLimiter) SetOptions(v map[string]string) { + x.xxx_hidden_Options = v +} + +type ExternalLimiter_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Address string + Options map[string]string +} + +func (b0 ExternalLimiter_builder) Build() *ExternalLimiter { + m0 := &ExternalLimiter{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Address = b.Address + x.xxx_hidden_Options = b.Options + return m0 +} + +type RateLimiterConfig struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Type isRateLimiterConfig_Type `protobuf_oneof:"type"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *RateLimiterConfig) Reset() { + *x = RateLimiterConfig{} + mi := &file_c1_ratelimit_v1_ratelimit_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *RateLimiterConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RateLimiterConfig) ProtoMessage() {} + +func (x *RateLimiterConfig) ProtoReflect() protoreflect.Message { + mi := &file_c1_ratelimit_v1_ratelimit_proto_msgTypes[10] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *RateLimiterConfig) GetDisabled() *DisabledLimiter { + if x != nil { + if x, ok := x.xxx_hidden_Type.(*rateLimiterConfig_Disabled); ok { + return x.Disabled + } + } + return nil +} + +func (x *RateLimiterConfig) GetSlidingMem() *SlidingMemoryLimiter { + if x != nil { + if x, ok := x.xxx_hidden_Type.(*rateLimiterConfig_SlidingMem); ok { + return x.SlidingMem + } + } + return nil +} + +func (x *RateLimiterConfig) GetFixedMem() *FixedMemoryLimiter { + if x != nil { + if x, ok := x.xxx_hidden_Type.(*rateLimiterConfig_FixedMem); ok { + return x.FixedMem + } + } + return nil +} + +func (x *RateLimiterConfig) GetExternal() *ExternalLimiter { + if x != nil { + if x, ok := x.xxx_hidden_Type.(*rateLimiterConfig_External); ok { + return x.External + } + } + return nil +} + +func (x *RateLimiterConfig) SetDisabled(v *DisabledLimiter) { + if v == nil { + x.xxx_hidden_Type = nil + return + } + x.xxx_hidden_Type = &rateLimiterConfig_Disabled{v} +} + +func (x *RateLimiterConfig) SetSlidingMem(v *SlidingMemoryLimiter) { + if v == nil { + x.xxx_hidden_Type = nil + return + } + x.xxx_hidden_Type = &rateLimiterConfig_SlidingMem{v} +} + +func (x *RateLimiterConfig) SetFixedMem(v *FixedMemoryLimiter) { + if v == nil { + x.xxx_hidden_Type = nil + return + } + x.xxx_hidden_Type = &rateLimiterConfig_FixedMem{v} +} + +func (x *RateLimiterConfig) SetExternal(v *ExternalLimiter) { + if v == nil { + x.xxx_hidden_Type = nil + return + } + x.xxx_hidden_Type = &rateLimiterConfig_External{v} +} + +func (x *RateLimiterConfig) HasType() bool { + if x == nil { + return false + } + return x.xxx_hidden_Type != nil +} + +func (x *RateLimiterConfig) HasDisabled() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_Type.(*rateLimiterConfig_Disabled) + return ok +} + +func (x *RateLimiterConfig) HasSlidingMem() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_Type.(*rateLimiterConfig_SlidingMem) + return ok +} + +func (x *RateLimiterConfig) HasFixedMem() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_Type.(*rateLimiterConfig_FixedMem) + return ok +} + +func (x *RateLimiterConfig) HasExternal() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_Type.(*rateLimiterConfig_External) + return ok +} + +func (x *RateLimiterConfig) ClearType() { + x.xxx_hidden_Type = nil +} + +func (x *RateLimiterConfig) ClearDisabled() { + if _, ok := x.xxx_hidden_Type.(*rateLimiterConfig_Disabled); ok { + x.xxx_hidden_Type = nil + } +} + +func (x *RateLimiterConfig) ClearSlidingMem() { + if _, ok := x.xxx_hidden_Type.(*rateLimiterConfig_SlidingMem); ok { + x.xxx_hidden_Type = nil + } +} + +func (x *RateLimiterConfig) ClearFixedMem() { + if _, ok := x.xxx_hidden_Type.(*rateLimiterConfig_FixedMem); ok { + x.xxx_hidden_Type = nil + } +} + +func (x *RateLimiterConfig) ClearExternal() { + if _, ok := x.xxx_hidden_Type.(*rateLimiterConfig_External); ok { + x.xxx_hidden_Type = nil + } +} + +const RateLimiterConfig_Type_not_set_case case_RateLimiterConfig_Type = 0 +const RateLimiterConfig_Disabled_case case_RateLimiterConfig_Type = 100 +const RateLimiterConfig_SlidingMem_case case_RateLimiterConfig_Type = 101 +const RateLimiterConfig_FixedMem_case case_RateLimiterConfig_Type = 102 +const RateLimiterConfig_External_case case_RateLimiterConfig_Type = 103 + +func (x *RateLimiterConfig) WhichType() case_RateLimiterConfig_Type { + if x == nil { + return RateLimiterConfig_Type_not_set_case + } + switch x.xxx_hidden_Type.(type) { + case *rateLimiterConfig_Disabled: + return RateLimiterConfig_Disabled_case + case *rateLimiterConfig_SlidingMem: + return RateLimiterConfig_SlidingMem_case + case *rateLimiterConfig_FixedMem: + return RateLimiterConfig_FixedMem_case + case *rateLimiterConfig_External: + return RateLimiterConfig_External_case + default: + return RateLimiterConfig_Type_not_set_case + } +} + +type RateLimiterConfig_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // Fields of oneof xxx_hidden_Type: + Disabled *DisabledLimiter + SlidingMem *SlidingMemoryLimiter + FixedMem *FixedMemoryLimiter + External *ExternalLimiter + // -- end of xxx_hidden_Type +} + +func (b0 RateLimiterConfig_builder) Build() *RateLimiterConfig { + m0 := &RateLimiterConfig{} + b, x := &b0, m0 + _, _ = b, x + if b.Disabled != nil { + x.xxx_hidden_Type = &rateLimiterConfig_Disabled{b.Disabled} + } + if b.SlidingMem != nil { + x.xxx_hidden_Type = &rateLimiterConfig_SlidingMem{b.SlidingMem} + } + if b.FixedMem != nil { + x.xxx_hidden_Type = &rateLimiterConfig_FixedMem{b.FixedMem} + } + if b.External != nil { + x.xxx_hidden_Type = &rateLimiterConfig_External{b.External} + } + return m0 +} + +type case_RateLimiterConfig_Type protoreflect.FieldNumber + +func (x case_RateLimiterConfig_Type) String() string { + md := file_c1_ratelimit_v1_ratelimit_proto_msgTypes[10].Descriptor() + if x == 0 { + return "not set" + } + return protoimpl.X.MessageFieldStringOf(md, protoreflect.FieldNumber(x)) +} + +type isRateLimiterConfig_Type interface { + isRateLimiterConfig_Type() +} + +type rateLimiterConfig_Disabled struct { + Disabled *DisabledLimiter `protobuf:"bytes,100,opt,name=disabled,proto3,oneof"` +} + +type rateLimiterConfig_SlidingMem struct { + SlidingMem *SlidingMemoryLimiter `protobuf:"bytes,101,opt,name=sliding_mem,json=slidingMem,proto3,oneof"` +} + +type rateLimiterConfig_FixedMem struct { + FixedMem *FixedMemoryLimiter `protobuf:"bytes,102,opt,name=fixed_mem,json=fixedMem,proto3,oneof"` +} + +type rateLimiterConfig_External struct { + External *ExternalLimiter `protobuf:"bytes,103,opt,name=external,proto3,oneof"` +} + +func (*rateLimiterConfig_Disabled) isRateLimiterConfig_Type() {} + +func (*rateLimiterConfig_SlidingMem) isRateLimiterConfig_Type() {} + +func (*rateLimiterConfig_FixedMem) isRateLimiterConfig_Type() {} + +func (*rateLimiterConfig_External) isRateLimiterConfig_Type() {} + +type RateLimitDescriptors_Entry struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Key string `protobuf:"bytes,1,opt,name=key,proto3"` + xxx_hidden_Value string `protobuf:"bytes,2,opt,name=value,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *RateLimitDescriptors_Entry) Reset() { + *x = RateLimitDescriptors_Entry{} + mi := &file_c1_ratelimit_v1_ratelimit_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *RateLimitDescriptors_Entry) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RateLimitDescriptors_Entry) ProtoMessage() {} + +func (x *RateLimitDescriptors_Entry) ProtoReflect() protoreflect.Message { + mi := &file_c1_ratelimit_v1_ratelimit_proto_msgTypes[11] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *RateLimitDescriptors_Entry) GetKey() string { + if x != nil { + return x.xxx_hidden_Key + } + return "" +} + +func (x *RateLimitDescriptors_Entry) GetValue() string { + if x != nil { + return x.xxx_hidden_Value + } + return "" +} + +func (x *RateLimitDescriptors_Entry) SetKey(v string) { + x.xxx_hidden_Key = v +} + +func (x *RateLimitDescriptors_Entry) SetValue(v string) { + x.xxx_hidden_Value = v +} + +type RateLimitDescriptors_Entry_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Key string + Value string +} + +func (b0 RateLimitDescriptors_Entry_builder) Build() *RateLimitDescriptors_Entry { + m0 := &RateLimitDescriptors_Entry{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Key = b.Key + x.xxx_hidden_Value = b.Value + return m0 +} + +var File_c1_ratelimit_v1_ratelimit_proto protoreflect.FileDescriptor + +const file_c1_ratelimit_v1_ratelimit_proto_rawDesc = "" + + "\n" + + "\x1fc1/ratelimit/v1/ratelimit.proto\x12\x0fc1.ratelimit.v1\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x17validate/validate.proto\"\xc8\x02\n" + + "\x14RateLimitDescription\x12D\n" + + "\x06status\x18\x01 \x01(\x0e2,.c1.ratelimit.v1.RateLimitDescription.StatusR\x06status\x12\x14\n" + + "\x05limit\x18\x02 \x01(\x03R\x05limit\x12\x1c\n" + + "\tremaining\x18\x03 \x01(\x03R\tremaining\x12\x14\n" + + "\x05count\x18\x05 \x01(\x03R\x05count\x125\n" + + "\breset_at\x18\x04 \x01(\v2\x1a.google.protobuf.TimestampR\aresetAt\"i\n" + + "\x06Status\x12\x16\n" + + "\x12STATUS_UNSPECIFIED\x10\x00\x12\r\n" + + "\tSTATUS_OK\x10\x01\x12\x14\n" + + "\x10STATUS_OVERLIMIT\x10\x02\x12\x10\n" + + "\fSTATUS_ERROR\x10\x03\x12\x10\n" + + "\fSTATUS_EMPTY\x10\x04\"\xe0\x01\n" + + "\rReportRequest\x12#\n" + + "\rrequest_token\x18\x01 \x01(\tR\frequestToken\x12G\n" + + "\vdescription\x18\x02 \x01(\v2%.c1.ratelimit.v1.RateLimitDescriptionR\vdescription\x12\x18\n" + + "\aservice\x18\x03 \x01(\tR\aservice\x12G\n" + + "\vdescriptors\x18\x04 \x01(\v2%.c1.ratelimit.v1.RateLimitDescriptorsR\vdescriptors\"\x10\n" + + "\x0eReportResponse\"\xaa\x01\n" + + "\x14RateLimitDescriptors\x12O\n" + + "\aentries\x18\x01 \x03(\v2+.c1.ratelimit.v1.RateLimitDescriptors.EntryB\b\xfaB\x05\x92\x01\x02\b\x01R\aentries\x1aA\n" + + "\x05Entry\x12\x19\n" + + "\x03key\x18\x01 \x01(\tB\a\xfaB\x04r\x02\x10\x01R\x03key\x12\x1d\n" + + "\x05value\x18\x02 \x01(\tB\a\xfaB\x04r\x02\x10\x01R\x05value\"\x93\x01\n" + + "\tDoRequest\x12#\n" + + "\rrequest_token\x18\x01 \x01(\tR\frequestToken\x12\x18\n" + + "\aservice\x18\x02 \x01(\tR\aservice\x12G\n" + + "\vdescriptors\x18\x03 \x01(\v2%.c1.ratelimit.v1.RateLimitDescriptorsR\vdescriptors\"z\n" + + "\n" + + "DoResponse\x12#\n" + + "\rrequest_token\x18\x01 \x01(\tR\frequestToken\x12G\n" + + "\vdescription\x18\x02 \x01(\v2%.c1.ratelimit.v1.RateLimitDescriptionR\vdescription\"\x11\n" + + "\x0fDisabledLimiter\"7\n" + + "\x14SlidingMemoryLimiter\x12\x1f\n" + + "\vuse_percent\x18\x01 \x01(\x01R\n" + + "usePercent\"[\n" + + "\x12FixedMemoryLimiter\x12\x12\n" + + "\x04rate\x18\x01 \x01(\x03R\x04rate\x121\n" + + "\x06period\x18\x02 \x01(\v2\x19.google.protobuf.DurationR\x06period\"\xb0\x01\n" + + "\x0fExternalLimiter\x12\x18\n" + + "\aaddress\x18\x02 \x01(\tR\aaddress\x12G\n" + + "\aoptions\x18\x01 \x03(\v2-.c1.ratelimit.v1.ExternalLimiter.OptionsEntryR\aoptions\x1a:\n" + + "\fOptionsEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\tR\x05value:\x028\x01\"\xa9\x02\n" + + "\x11RateLimiterConfig\x12>\n" + + "\bdisabled\x18d \x01(\v2 .c1.ratelimit.v1.DisabledLimiterH\x00R\bdisabled\x12H\n" + + "\vsliding_mem\x18e \x01(\v2%.c1.ratelimit.v1.SlidingMemoryLimiterH\x00R\n" + + "slidingMem\x12B\n" + + "\tfixed_mem\x18f \x01(\v2#.c1.ratelimit.v1.FixedMemoryLimiterH\x00R\bfixedMem\x12>\n" + + "\bexternal\x18g \x01(\v2 .c1.ratelimit.v1.ExternalLimiterH\x00R\bexternalB\x06\n" + + "\x04type2\x9e\x01\n" + + "\x12RateLimiterService\x12=\n" + + "\x02Do\x12\x1a.c1.ratelimit.v1.DoRequest\x1a\x1b.c1.ratelimit.v1.DoResponse\x12I\n" + + "\x06Report\x12\x1e.c1.ratelimit.v1.ReportRequest\x1a\x1f.c1.ratelimit.v1.ReportResponseB6Z4github.com/conductorone/baton-sdk/pb/c1/ratelimit/v1b\x06proto3" + +var file_c1_ratelimit_v1_ratelimit_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_c1_ratelimit_v1_ratelimit_proto_msgTypes = make([]protoimpl.MessageInfo, 13) +var file_c1_ratelimit_v1_ratelimit_proto_goTypes = []any{ + (RateLimitDescription_Status)(0), // 0: c1.ratelimit.v1.RateLimitDescription.Status + (*RateLimitDescription)(nil), // 1: c1.ratelimit.v1.RateLimitDescription + (*ReportRequest)(nil), // 2: c1.ratelimit.v1.ReportRequest + (*ReportResponse)(nil), // 3: c1.ratelimit.v1.ReportResponse + (*RateLimitDescriptors)(nil), // 4: c1.ratelimit.v1.RateLimitDescriptors + (*DoRequest)(nil), // 5: c1.ratelimit.v1.DoRequest + (*DoResponse)(nil), // 6: c1.ratelimit.v1.DoResponse + (*DisabledLimiter)(nil), // 7: c1.ratelimit.v1.DisabledLimiter + (*SlidingMemoryLimiter)(nil), // 8: c1.ratelimit.v1.SlidingMemoryLimiter + (*FixedMemoryLimiter)(nil), // 9: c1.ratelimit.v1.FixedMemoryLimiter + (*ExternalLimiter)(nil), // 10: c1.ratelimit.v1.ExternalLimiter + (*RateLimiterConfig)(nil), // 11: c1.ratelimit.v1.RateLimiterConfig + (*RateLimitDescriptors_Entry)(nil), // 12: c1.ratelimit.v1.RateLimitDescriptors.Entry + nil, // 13: c1.ratelimit.v1.ExternalLimiter.OptionsEntry + (*timestamppb.Timestamp)(nil), // 14: google.protobuf.Timestamp + (*durationpb.Duration)(nil), // 15: google.protobuf.Duration +} +var file_c1_ratelimit_v1_ratelimit_proto_depIdxs = []int32{ + 0, // 0: c1.ratelimit.v1.RateLimitDescription.status:type_name -> c1.ratelimit.v1.RateLimitDescription.Status + 14, // 1: c1.ratelimit.v1.RateLimitDescription.reset_at:type_name -> google.protobuf.Timestamp + 1, // 2: c1.ratelimit.v1.ReportRequest.description:type_name -> c1.ratelimit.v1.RateLimitDescription + 4, // 3: c1.ratelimit.v1.ReportRequest.descriptors:type_name -> c1.ratelimit.v1.RateLimitDescriptors + 12, // 4: c1.ratelimit.v1.RateLimitDescriptors.entries:type_name -> c1.ratelimit.v1.RateLimitDescriptors.Entry + 4, // 5: c1.ratelimit.v1.DoRequest.descriptors:type_name -> c1.ratelimit.v1.RateLimitDescriptors + 1, // 6: c1.ratelimit.v1.DoResponse.description:type_name -> c1.ratelimit.v1.RateLimitDescription + 15, // 7: c1.ratelimit.v1.FixedMemoryLimiter.period:type_name -> google.protobuf.Duration + 13, // 8: c1.ratelimit.v1.ExternalLimiter.options:type_name -> c1.ratelimit.v1.ExternalLimiter.OptionsEntry + 7, // 9: c1.ratelimit.v1.RateLimiterConfig.disabled:type_name -> c1.ratelimit.v1.DisabledLimiter + 8, // 10: c1.ratelimit.v1.RateLimiterConfig.sliding_mem:type_name -> c1.ratelimit.v1.SlidingMemoryLimiter + 9, // 11: c1.ratelimit.v1.RateLimiterConfig.fixed_mem:type_name -> c1.ratelimit.v1.FixedMemoryLimiter + 10, // 12: c1.ratelimit.v1.RateLimiterConfig.external:type_name -> c1.ratelimit.v1.ExternalLimiter + 5, // 13: c1.ratelimit.v1.RateLimiterService.Do:input_type -> c1.ratelimit.v1.DoRequest + 2, // 14: c1.ratelimit.v1.RateLimiterService.Report:input_type -> c1.ratelimit.v1.ReportRequest + 6, // 15: c1.ratelimit.v1.RateLimiterService.Do:output_type -> c1.ratelimit.v1.DoResponse + 3, // 16: c1.ratelimit.v1.RateLimiterService.Report:output_type -> c1.ratelimit.v1.ReportResponse + 15, // [15:17] is the sub-list for method output_type + 13, // [13:15] is the sub-list for method input_type + 13, // [13:13] is the sub-list for extension type_name + 13, // [13:13] is the sub-list for extension extendee + 0, // [0:13] is the sub-list for field type_name +} + +func init() { file_c1_ratelimit_v1_ratelimit_proto_init() } +func file_c1_ratelimit_v1_ratelimit_proto_init() { + if File_c1_ratelimit_v1_ratelimit_proto != nil { + return + } + file_c1_ratelimit_v1_ratelimit_proto_msgTypes[10].OneofWrappers = []any{ + (*rateLimiterConfig_Disabled)(nil), + (*rateLimiterConfig_SlidingMem)(nil), + (*rateLimiterConfig_FixedMem)(nil), + (*rateLimiterConfig_External)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_c1_ratelimit_v1_ratelimit_proto_rawDesc), len(file_c1_ratelimit_v1_ratelimit_proto_rawDesc)), + NumEnums: 1, + NumMessages: 13, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_c1_ratelimit_v1_ratelimit_proto_goTypes, + DependencyIndexes: file_c1_ratelimit_v1_ratelimit_proto_depIdxs, + EnumInfos: file_c1_ratelimit_v1_ratelimit_proto_enumTypes, + MessageInfos: file_c1_ratelimit_v1_ratelimit_proto_msgTypes, + }.Build() + File_c1_ratelimit_v1_ratelimit_proto = out.File + file_c1_ratelimit_v1_ratelimit_proto_goTypes = nil + file_c1_ratelimit_v1_ratelimit_proto_depIdxs = nil +} diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/reader/v2/entitlement.pb.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/reader/v2/entitlement.pb.go index 1cf92ff9..a2a13b1a 100644 --- a/vendor/github.com/conductorone/baton-sdk/pb/c1/reader/v2/entitlement.pb.go +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/reader/v2/entitlement.pb.go @@ -1,9 +1,11 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.4 +// protoc-gen-go v1.36.10 // protoc (unknown) // source: c1/reader/v2/entitlement.proto +//go:build !protoopaque + package v2 import ( @@ -13,7 +15,6 @@ import ( protoimpl "google.golang.org/protobuf/runtime/protoimpl" anypb "google.golang.org/protobuf/types/known/anypb" reflect "reflect" - sync "sync" unsafe "unsafe" ) @@ -25,7 +26,7 @@ const ( ) type EntitlementsReaderServiceGetEntitlementRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` EntitlementId string `protobuf:"bytes,1,opt,name=entitlement_id,json=entitlementId,proto3" json:"entitlement_id,omitempty"` Annotations []*anypb.Any `protobuf:"bytes,2,rep,name=annotations,proto3" json:"annotations,omitempty"` unknownFields protoimpl.UnknownFields @@ -57,11 +58,6 @@ func (x *EntitlementsReaderServiceGetEntitlementRequest) ProtoReflect() protoref return mi.MessageOf(x) } -// Deprecated: Use EntitlementsReaderServiceGetEntitlementRequest.ProtoReflect.Descriptor instead. -func (*EntitlementsReaderServiceGetEntitlementRequest) Descriptor() ([]byte, []int) { - return file_c1_reader_v2_entitlement_proto_rawDescGZIP(), []int{0} -} - func (x *EntitlementsReaderServiceGetEntitlementRequest) GetEntitlementId() string { if x != nil { return x.EntitlementId @@ -76,8 +72,32 @@ func (x *EntitlementsReaderServiceGetEntitlementRequest) GetAnnotations() []*any return nil } +func (x *EntitlementsReaderServiceGetEntitlementRequest) SetEntitlementId(v string) { + x.EntitlementId = v +} + +func (x *EntitlementsReaderServiceGetEntitlementRequest) SetAnnotations(v []*anypb.Any) { + x.Annotations = v +} + +type EntitlementsReaderServiceGetEntitlementRequest_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + EntitlementId string + Annotations []*anypb.Any +} + +func (b0 EntitlementsReaderServiceGetEntitlementRequest_builder) Build() *EntitlementsReaderServiceGetEntitlementRequest { + m0 := &EntitlementsReaderServiceGetEntitlementRequest{} + b, x := &b0, m0 + _, _ = b, x + x.EntitlementId = b.EntitlementId + x.Annotations = b.Annotations + return m0 +} + type EntitlementsReaderServiceGetEntitlementResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Entitlement *v2.Entitlement `protobuf:"bytes,1,opt,name=entitlement,proto3" json:"entitlement,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache @@ -108,11 +128,6 @@ func (x *EntitlementsReaderServiceGetEntitlementResponse) ProtoReflect() protore return mi.MessageOf(x) } -// Deprecated: Use EntitlementsReaderServiceGetEntitlementResponse.ProtoReflect.Descriptor instead. -func (*EntitlementsReaderServiceGetEntitlementResponse) Descriptor() ([]byte, []int) { - return file_c1_reader_v2_entitlement_proto_rawDescGZIP(), []int{1} -} - func (x *EntitlementsReaderServiceGetEntitlementResponse) GetEntitlement() *v2.Entitlement { if x != nil { return x.Entitlement @@ -120,64 +135,49 @@ func (x *EntitlementsReaderServiceGetEntitlementResponse) GetEntitlement() *v2.E return nil } -var File_c1_reader_v2_entitlement_proto protoreflect.FileDescriptor +func (x *EntitlementsReaderServiceGetEntitlementResponse) SetEntitlement(v *v2.Entitlement) { + x.Entitlement = v +} -var file_c1_reader_v2_entitlement_proto_rawDesc = string([]byte{ - 0x0a, 0x1e, 0x63, 0x31, 0x2f, 0x72, 0x65, 0x61, 0x64, 0x65, 0x72, 0x2f, 0x76, 0x32, 0x2f, 0x65, - 0x6e, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x12, 0x0c, 0x63, 0x31, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x65, 0x72, 0x2e, 0x76, 0x32, 0x1a, 0x21, - 0x63, 0x31, 0x2f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f, 0x76, 0x32, 0x2f, - 0x65, 0x6e, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, - 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x9b, 0x01, 0x0a, 0x2e, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x6c, - 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x61, 0x64, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x47, 0x65, 0x74, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x6d, 0x65, 0x6e, - 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x31, 0x0a, 0x0e, 0x65, 0x6e, 0x74, 0x69, - 0x74, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x42, 0x0a, 0xfa, 0x42, 0x07, 0x72, 0x05, 0x20, 0x01, 0x28, 0x80, 0x08, 0x52, 0x0d, 0x65, 0x6e, - 0x74, 0x69, 0x74, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x36, 0x0a, 0x0b, 0x61, - 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x22, 0x71, 0x0a, 0x2f, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x6d, 0x65, - 0x6e, 0x74, 0x73, 0x52, 0x65, 0x61, 0x64, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x47, 0x65, 0x74, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3e, 0x0a, 0x0b, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x6c, - 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x63, 0x31, - 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x6e, - 0x74, 0x69, 0x74, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x0b, 0x65, 0x6e, 0x74, 0x69, 0x74, - 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x32, 0xab, 0x01, 0x0a, 0x19, 0x45, 0x6e, 0x74, 0x69, 0x74, - 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x61, 0x64, 0x65, 0x72, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x12, 0x8d, 0x01, 0x0a, 0x0e, 0x47, 0x65, 0x74, 0x45, 0x6e, 0x74, 0x69, - 0x74, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x3c, 0x2e, 0x63, 0x31, 0x2e, 0x72, 0x65, 0x61, - 0x64, 0x65, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x6d, 0x65, - 0x6e, 0x74, 0x73, 0x52, 0x65, 0x61, 0x64, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x47, 0x65, 0x74, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x3d, 0x2e, 0x63, 0x31, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x65, - 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, - 0x73, 0x52, 0x65, 0x61, 0x64, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x47, 0x65, - 0x74, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x33, 0x5a, 0x31, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, - 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x64, 0x75, 0x63, 0x74, 0x6f, 0x72, 0x6f, 0x6e, 0x65, 0x2f, - 0x62, 0x61, 0x74, 0x6f, 0x6e, 0x2d, 0x73, 0x64, 0x6b, 0x2f, 0x70, 0x62, 0x2f, 0x63, 0x31, 0x2f, - 0x72, 0x65, 0x61, 0x64, 0x65, 0x72, 0x2f, 0x76, 0x32, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x33, -}) - -var ( - file_c1_reader_v2_entitlement_proto_rawDescOnce sync.Once - file_c1_reader_v2_entitlement_proto_rawDescData []byte -) +func (x *EntitlementsReaderServiceGetEntitlementResponse) HasEntitlement() bool { + if x == nil { + return false + } + return x.Entitlement != nil +} -func file_c1_reader_v2_entitlement_proto_rawDescGZIP() []byte { - file_c1_reader_v2_entitlement_proto_rawDescOnce.Do(func() { - file_c1_reader_v2_entitlement_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_c1_reader_v2_entitlement_proto_rawDesc), len(file_c1_reader_v2_entitlement_proto_rawDesc))) - }) - return file_c1_reader_v2_entitlement_proto_rawDescData +func (x *EntitlementsReaderServiceGetEntitlementResponse) ClearEntitlement() { + x.Entitlement = nil } +type EntitlementsReaderServiceGetEntitlementResponse_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Entitlement *v2.Entitlement +} + +func (b0 EntitlementsReaderServiceGetEntitlementResponse_builder) Build() *EntitlementsReaderServiceGetEntitlementResponse { + m0 := &EntitlementsReaderServiceGetEntitlementResponse{} + b, x := &b0, m0 + _, _ = b, x + x.Entitlement = b.Entitlement + return m0 +} + +var File_c1_reader_v2_entitlement_proto protoreflect.FileDescriptor + +const file_c1_reader_v2_entitlement_proto_rawDesc = "" + + "\n" + + "\x1ec1/reader/v2/entitlement.proto\x12\fc1.reader.v2\x1a!c1/connector/v2/entitlement.proto\x1a\x19google/protobuf/any.proto\x1a\x17validate/validate.proto\"\x9b\x01\n" + + ".EntitlementsReaderServiceGetEntitlementRequest\x121\n" + + "\x0eentitlement_id\x18\x01 \x01(\tB\n" + + "\xfaB\ar\x05 \x01(\x80\bR\rentitlementId\x126\n" + + "\vannotations\x18\x02 \x03(\v2\x14.google.protobuf.AnyR\vannotations\"q\n" + + "/EntitlementsReaderServiceGetEntitlementResponse\x12>\n" + + "\ventitlement\x18\x01 \x01(\v2\x1c.c1.connector.v2.EntitlementR\ventitlement2\xab\x01\n" + + "\x19EntitlementsReaderService\x12\x8d\x01\n" + + "\x0eGetEntitlement\x12<.c1.reader.v2.EntitlementsReaderServiceGetEntitlementRequest\x1a=.c1.reader.v2.EntitlementsReaderServiceGetEntitlementResponseB3Z1github.com/conductorone/baton-sdk/pb/c1/reader/v2b\x06proto3" + var file_c1_reader_v2_entitlement_proto_msgTypes = make([]protoimpl.MessageInfo, 2) var file_c1_reader_v2_entitlement_proto_goTypes = []any{ (*EntitlementsReaderServiceGetEntitlementRequest)(nil), // 0: c1.reader.v2.EntitlementsReaderServiceGetEntitlementRequest diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/reader/v2/entitlement_protoopaque.pb.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/reader/v2/entitlement_protoopaque.pb.go new file mode 100644 index 00000000..5b9fb685 --- /dev/null +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/reader/v2/entitlement_protoopaque.pb.go @@ -0,0 +1,224 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.10 +// protoc (unknown) +// source: c1/reader/v2/entitlement.proto + +//go:build protoopaque + +package v2 + +import ( + v2 "github.com/conductorone/baton-sdk/pb/c1/connector/v2" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + reflect "reflect" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type EntitlementsReaderServiceGetEntitlementRequest struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_EntitlementId string `protobuf:"bytes,1,opt,name=entitlement_id,json=entitlementId,proto3"` + xxx_hidden_Annotations *[]*anypb.Any `protobuf:"bytes,2,rep,name=annotations,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *EntitlementsReaderServiceGetEntitlementRequest) Reset() { + *x = EntitlementsReaderServiceGetEntitlementRequest{} + mi := &file_c1_reader_v2_entitlement_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *EntitlementsReaderServiceGetEntitlementRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EntitlementsReaderServiceGetEntitlementRequest) ProtoMessage() {} + +func (x *EntitlementsReaderServiceGetEntitlementRequest) ProtoReflect() protoreflect.Message { + mi := &file_c1_reader_v2_entitlement_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *EntitlementsReaderServiceGetEntitlementRequest) GetEntitlementId() string { + if x != nil { + return x.xxx_hidden_EntitlementId + } + return "" +} + +func (x *EntitlementsReaderServiceGetEntitlementRequest) GetAnnotations() []*anypb.Any { + if x != nil { + if x.xxx_hidden_Annotations != nil { + return *x.xxx_hidden_Annotations + } + } + return nil +} + +func (x *EntitlementsReaderServiceGetEntitlementRequest) SetEntitlementId(v string) { + x.xxx_hidden_EntitlementId = v +} + +func (x *EntitlementsReaderServiceGetEntitlementRequest) SetAnnotations(v []*anypb.Any) { + x.xxx_hidden_Annotations = &v +} + +type EntitlementsReaderServiceGetEntitlementRequest_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + EntitlementId string + Annotations []*anypb.Any +} + +func (b0 EntitlementsReaderServiceGetEntitlementRequest_builder) Build() *EntitlementsReaderServiceGetEntitlementRequest { + m0 := &EntitlementsReaderServiceGetEntitlementRequest{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_EntitlementId = b.EntitlementId + x.xxx_hidden_Annotations = &b.Annotations + return m0 +} + +type EntitlementsReaderServiceGetEntitlementResponse struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Entitlement *v2.Entitlement `protobuf:"bytes,1,opt,name=entitlement,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *EntitlementsReaderServiceGetEntitlementResponse) Reset() { + *x = EntitlementsReaderServiceGetEntitlementResponse{} + mi := &file_c1_reader_v2_entitlement_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *EntitlementsReaderServiceGetEntitlementResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EntitlementsReaderServiceGetEntitlementResponse) ProtoMessage() {} + +func (x *EntitlementsReaderServiceGetEntitlementResponse) ProtoReflect() protoreflect.Message { + mi := &file_c1_reader_v2_entitlement_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *EntitlementsReaderServiceGetEntitlementResponse) GetEntitlement() *v2.Entitlement { + if x != nil { + return x.xxx_hidden_Entitlement + } + return nil +} + +func (x *EntitlementsReaderServiceGetEntitlementResponse) SetEntitlement(v *v2.Entitlement) { + x.xxx_hidden_Entitlement = v +} + +func (x *EntitlementsReaderServiceGetEntitlementResponse) HasEntitlement() bool { + if x == nil { + return false + } + return x.xxx_hidden_Entitlement != nil +} + +func (x *EntitlementsReaderServiceGetEntitlementResponse) ClearEntitlement() { + x.xxx_hidden_Entitlement = nil +} + +type EntitlementsReaderServiceGetEntitlementResponse_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Entitlement *v2.Entitlement +} + +func (b0 EntitlementsReaderServiceGetEntitlementResponse_builder) Build() *EntitlementsReaderServiceGetEntitlementResponse { + m0 := &EntitlementsReaderServiceGetEntitlementResponse{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Entitlement = b.Entitlement + return m0 +} + +var File_c1_reader_v2_entitlement_proto protoreflect.FileDescriptor + +const file_c1_reader_v2_entitlement_proto_rawDesc = "" + + "\n" + + "\x1ec1/reader/v2/entitlement.proto\x12\fc1.reader.v2\x1a!c1/connector/v2/entitlement.proto\x1a\x19google/protobuf/any.proto\x1a\x17validate/validate.proto\"\x9b\x01\n" + + ".EntitlementsReaderServiceGetEntitlementRequest\x121\n" + + "\x0eentitlement_id\x18\x01 \x01(\tB\n" + + "\xfaB\ar\x05 \x01(\x80\bR\rentitlementId\x126\n" + + "\vannotations\x18\x02 \x03(\v2\x14.google.protobuf.AnyR\vannotations\"q\n" + + "/EntitlementsReaderServiceGetEntitlementResponse\x12>\n" + + "\ventitlement\x18\x01 \x01(\v2\x1c.c1.connector.v2.EntitlementR\ventitlement2\xab\x01\n" + + "\x19EntitlementsReaderService\x12\x8d\x01\n" + + "\x0eGetEntitlement\x12<.c1.reader.v2.EntitlementsReaderServiceGetEntitlementRequest\x1a=.c1.reader.v2.EntitlementsReaderServiceGetEntitlementResponseB3Z1github.com/conductorone/baton-sdk/pb/c1/reader/v2b\x06proto3" + +var file_c1_reader_v2_entitlement_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_c1_reader_v2_entitlement_proto_goTypes = []any{ + (*EntitlementsReaderServiceGetEntitlementRequest)(nil), // 0: c1.reader.v2.EntitlementsReaderServiceGetEntitlementRequest + (*EntitlementsReaderServiceGetEntitlementResponse)(nil), // 1: c1.reader.v2.EntitlementsReaderServiceGetEntitlementResponse + (*anypb.Any)(nil), // 2: google.protobuf.Any + (*v2.Entitlement)(nil), // 3: c1.connector.v2.Entitlement +} +var file_c1_reader_v2_entitlement_proto_depIdxs = []int32{ + 2, // 0: c1.reader.v2.EntitlementsReaderServiceGetEntitlementRequest.annotations:type_name -> google.protobuf.Any + 3, // 1: c1.reader.v2.EntitlementsReaderServiceGetEntitlementResponse.entitlement:type_name -> c1.connector.v2.Entitlement + 0, // 2: c1.reader.v2.EntitlementsReaderService.GetEntitlement:input_type -> c1.reader.v2.EntitlementsReaderServiceGetEntitlementRequest + 1, // 3: c1.reader.v2.EntitlementsReaderService.GetEntitlement:output_type -> c1.reader.v2.EntitlementsReaderServiceGetEntitlementResponse + 3, // [3:4] is the sub-list for method output_type + 2, // [2:3] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_c1_reader_v2_entitlement_proto_init() } +func file_c1_reader_v2_entitlement_proto_init() { + if File_c1_reader_v2_entitlement_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_c1_reader_v2_entitlement_proto_rawDesc), len(file_c1_reader_v2_entitlement_proto_rawDesc)), + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_c1_reader_v2_entitlement_proto_goTypes, + DependencyIndexes: file_c1_reader_v2_entitlement_proto_depIdxs, + MessageInfos: file_c1_reader_v2_entitlement_proto_msgTypes, + }.Build() + File_c1_reader_v2_entitlement_proto = out.File + file_c1_reader_v2_entitlement_proto_goTypes = nil + file_c1_reader_v2_entitlement_proto_depIdxs = nil +} diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/reader/v2/grant.pb.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/reader/v2/grant.pb.go index 13690bcd..9d341d60 100644 --- a/vendor/github.com/conductorone/baton-sdk/pb/c1/reader/v2/grant.pb.go +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/reader/v2/grant.pb.go @@ -1,9 +1,11 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.4 +// protoc-gen-go v1.36.10 // protoc (unknown) // source: c1/reader/v2/grant.proto +//go:build !protoopaque + package v2 import ( @@ -13,7 +15,6 @@ import ( protoimpl "google.golang.org/protobuf/runtime/protoimpl" anypb "google.golang.org/protobuf/types/known/anypb" reflect "reflect" - sync "sync" unsafe "unsafe" ) @@ -25,7 +26,7 @@ const ( ) type GrantsReaderServiceGetGrantRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` GrantId string `protobuf:"bytes,1,opt,name=grant_id,json=grantId,proto3" json:"grant_id,omitempty"` Annotations []*anypb.Any `protobuf:"bytes,2,rep,name=annotations,proto3" json:"annotations,omitempty"` unknownFields protoimpl.UnknownFields @@ -57,11 +58,6 @@ func (x *GrantsReaderServiceGetGrantRequest) ProtoReflect() protoreflect.Message return mi.MessageOf(x) } -// Deprecated: Use GrantsReaderServiceGetGrantRequest.ProtoReflect.Descriptor instead. -func (*GrantsReaderServiceGetGrantRequest) Descriptor() ([]byte, []int) { - return file_c1_reader_v2_grant_proto_rawDescGZIP(), []int{0} -} - func (x *GrantsReaderServiceGetGrantRequest) GetGrantId() string { if x != nil { return x.GrantId @@ -76,8 +72,32 @@ func (x *GrantsReaderServiceGetGrantRequest) GetAnnotations() []*anypb.Any { return nil } +func (x *GrantsReaderServiceGetGrantRequest) SetGrantId(v string) { + x.GrantId = v +} + +func (x *GrantsReaderServiceGetGrantRequest) SetAnnotations(v []*anypb.Any) { + x.Annotations = v +} + +type GrantsReaderServiceGetGrantRequest_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + GrantId string + Annotations []*anypb.Any +} + +func (b0 GrantsReaderServiceGetGrantRequest_builder) Build() *GrantsReaderServiceGetGrantRequest { + m0 := &GrantsReaderServiceGetGrantRequest{} + b, x := &b0, m0 + _, _ = b, x + x.GrantId = b.GrantId + x.Annotations = b.Annotations + return m0 +} + type GrantsReaderServiceGetGrantResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Grant *v2.Grant `protobuf:"bytes,1,opt,name=grant,proto3" json:"grant,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache @@ -108,11 +128,6 @@ func (x *GrantsReaderServiceGetGrantResponse) ProtoReflect() protoreflect.Messag return mi.MessageOf(x) } -// Deprecated: Use GrantsReaderServiceGetGrantResponse.ProtoReflect.Descriptor instead. -func (*GrantsReaderServiceGetGrantResponse) Descriptor() ([]byte, []int) { - return file_c1_reader_v2_grant_proto_rawDescGZIP(), []int{1} -} - func (x *GrantsReaderServiceGetGrantResponse) GetGrant() *v2.Grant { if x != nil { return x.Grant @@ -120,15 +135,45 @@ func (x *GrantsReaderServiceGetGrantResponse) GetGrant() *v2.Grant { return nil } +func (x *GrantsReaderServiceGetGrantResponse) SetGrant(v *v2.Grant) { + x.Grant = v +} + +func (x *GrantsReaderServiceGetGrantResponse) HasGrant() bool { + if x == nil { + return false + } + return x.Grant != nil +} + +func (x *GrantsReaderServiceGetGrantResponse) ClearGrant() { + x.Grant = nil +} + +type GrantsReaderServiceGetGrantResponse_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Grant *v2.Grant +} + +func (b0 GrantsReaderServiceGetGrantResponse_builder) Build() *GrantsReaderServiceGetGrantResponse { + m0 := &GrantsReaderServiceGetGrantResponse{} + b, x := &b0, m0 + _, _ = b, x + x.Grant = b.Grant + return m0 +} + type GrantsReaderServiceListGrantsForEntitlementRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - Entitlement *v2.Entitlement `protobuf:"bytes,1,opt,name=entitlement,proto3" json:"entitlement,omitempty"` - PrincipalId *v2.ResourceId `protobuf:"bytes,5,opt,name=principal_id,json=principalId,proto3" json:"principal_id,omitempty"` - PageSize uint32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` - PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` - Annotations []*anypb.Any `protobuf:"bytes,4,rep,name=annotations,proto3" json:"annotations,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"hybrid.v1"` + Entitlement *v2.Entitlement `protobuf:"bytes,1,opt,name=entitlement,proto3" json:"entitlement,omitempty"` + PrincipalId *v2.ResourceId `protobuf:"bytes,5,opt,name=principal_id,json=principalId,proto3" json:"principal_id,omitempty"` + PrincipalResourceTypeIds []string `protobuf:"bytes,6,rep,name=principal_resource_type_ids,json=principalResourceTypeIds,proto3" json:"principal_resource_type_ids,omitempty"` + PageSize uint32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + Annotations []*anypb.Any `protobuf:"bytes,4,rep,name=annotations,proto3" json:"annotations,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *GrantsReaderServiceListGrantsForEntitlementRequest) Reset() { @@ -156,11 +201,6 @@ func (x *GrantsReaderServiceListGrantsForEntitlementRequest) ProtoReflect() prot return mi.MessageOf(x) } -// Deprecated: Use GrantsReaderServiceListGrantsForEntitlementRequest.ProtoReflect.Descriptor instead. -func (*GrantsReaderServiceListGrantsForEntitlementRequest) Descriptor() ([]byte, []int) { - return file_c1_reader_v2_grant_proto_rawDescGZIP(), []int{2} -} - func (x *GrantsReaderServiceListGrantsForEntitlementRequest) GetEntitlement() *v2.Entitlement { if x != nil { return x.Entitlement @@ -175,6 +215,13 @@ func (x *GrantsReaderServiceListGrantsForEntitlementRequest) GetPrincipalId() *v return nil } +func (x *GrantsReaderServiceListGrantsForEntitlementRequest) GetPrincipalResourceTypeIds() []string { + if x != nil { + return x.PrincipalResourceTypeIds + } + return nil +} + func (x *GrantsReaderServiceListGrantsForEntitlementRequest) GetPageSize() uint32 { if x != nil { return x.PageSize @@ -196,8 +243,78 @@ func (x *GrantsReaderServiceListGrantsForEntitlementRequest) GetAnnotations() [] return nil } +func (x *GrantsReaderServiceListGrantsForEntitlementRequest) SetEntitlement(v *v2.Entitlement) { + x.Entitlement = v +} + +func (x *GrantsReaderServiceListGrantsForEntitlementRequest) SetPrincipalId(v *v2.ResourceId) { + x.PrincipalId = v +} + +func (x *GrantsReaderServiceListGrantsForEntitlementRequest) SetPrincipalResourceTypeIds(v []string) { + x.PrincipalResourceTypeIds = v +} + +func (x *GrantsReaderServiceListGrantsForEntitlementRequest) SetPageSize(v uint32) { + x.PageSize = v +} + +func (x *GrantsReaderServiceListGrantsForEntitlementRequest) SetPageToken(v string) { + x.PageToken = v +} + +func (x *GrantsReaderServiceListGrantsForEntitlementRequest) SetAnnotations(v []*anypb.Any) { + x.Annotations = v +} + +func (x *GrantsReaderServiceListGrantsForEntitlementRequest) HasEntitlement() bool { + if x == nil { + return false + } + return x.Entitlement != nil +} + +func (x *GrantsReaderServiceListGrantsForEntitlementRequest) HasPrincipalId() bool { + if x == nil { + return false + } + return x.PrincipalId != nil +} + +func (x *GrantsReaderServiceListGrantsForEntitlementRequest) ClearEntitlement() { + x.Entitlement = nil +} + +func (x *GrantsReaderServiceListGrantsForEntitlementRequest) ClearPrincipalId() { + x.PrincipalId = nil +} + +type GrantsReaderServiceListGrantsForEntitlementRequest_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Entitlement *v2.Entitlement + PrincipalId *v2.ResourceId + PrincipalResourceTypeIds []string + PageSize uint32 + PageToken string + Annotations []*anypb.Any +} + +func (b0 GrantsReaderServiceListGrantsForEntitlementRequest_builder) Build() *GrantsReaderServiceListGrantsForEntitlementRequest { + m0 := &GrantsReaderServiceListGrantsForEntitlementRequest{} + b, x := &b0, m0 + _, _ = b, x + x.Entitlement = b.Entitlement + x.PrincipalId = b.PrincipalId + x.PrincipalResourceTypeIds = b.PrincipalResourceTypeIds + x.PageSize = b.PageSize + x.PageToken = b.PageToken + x.Annotations = b.Annotations + return m0 +} + type GrantsReaderServiceListGrantsForEntitlementResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` List []*v2.Grant `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"` NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` unknownFields protoimpl.UnknownFields @@ -229,11 +346,6 @@ func (x *GrantsReaderServiceListGrantsForEntitlementResponse) ProtoReflect() pro return mi.MessageOf(x) } -// Deprecated: Use GrantsReaderServiceListGrantsForEntitlementResponse.ProtoReflect.Descriptor instead. -func (*GrantsReaderServiceListGrantsForEntitlementResponse) Descriptor() ([]byte, []int) { - return file_c1_reader_v2_grant_proto_rawDescGZIP(), []int{3} -} - func (x *GrantsReaderServiceListGrantsForEntitlementResponse) GetList() []*v2.Grant { if x != nil { return x.List @@ -248,8 +360,32 @@ func (x *GrantsReaderServiceListGrantsForEntitlementResponse) GetNextPageToken() return "" } +func (x *GrantsReaderServiceListGrantsForEntitlementResponse) SetList(v []*v2.Grant) { + x.List = v +} + +func (x *GrantsReaderServiceListGrantsForEntitlementResponse) SetNextPageToken(v string) { + x.NextPageToken = v +} + +type GrantsReaderServiceListGrantsForEntitlementResponse_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + List []*v2.Grant + NextPageToken string +} + +func (b0 GrantsReaderServiceListGrantsForEntitlementResponse_builder) Build() *GrantsReaderServiceListGrantsForEntitlementResponse { + m0 := &GrantsReaderServiceListGrantsForEntitlementResponse{} + b, x := &b0, m0 + _, _ = b, x + x.List = b.List + x.NextPageToken = b.NextPageToken + return m0 +} + type GrantsReaderServiceListGrantsForResourceTypeRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` ResourceTypeId string `protobuf:"bytes,1,opt,name=resource_type_id,json=resourceTypeId,proto3" json:"resource_type_id,omitempty"` PageSize uint32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` @@ -283,11 +419,6 @@ func (x *GrantsReaderServiceListGrantsForResourceTypeRequest) ProtoReflect() pro return mi.MessageOf(x) } -// Deprecated: Use GrantsReaderServiceListGrantsForResourceTypeRequest.ProtoReflect.Descriptor instead. -func (*GrantsReaderServiceListGrantsForResourceTypeRequest) Descriptor() ([]byte, []int) { - return file_c1_reader_v2_grant_proto_rawDescGZIP(), []int{4} -} - func (x *GrantsReaderServiceListGrantsForResourceTypeRequest) GetResourceTypeId() string { if x != nil { return x.ResourceTypeId @@ -316,8 +447,44 @@ func (x *GrantsReaderServiceListGrantsForResourceTypeRequest) GetAnnotations() [ return nil } +func (x *GrantsReaderServiceListGrantsForResourceTypeRequest) SetResourceTypeId(v string) { + x.ResourceTypeId = v +} + +func (x *GrantsReaderServiceListGrantsForResourceTypeRequest) SetPageSize(v uint32) { + x.PageSize = v +} + +func (x *GrantsReaderServiceListGrantsForResourceTypeRequest) SetPageToken(v string) { + x.PageToken = v +} + +func (x *GrantsReaderServiceListGrantsForResourceTypeRequest) SetAnnotations(v []*anypb.Any) { + x.Annotations = v +} + +type GrantsReaderServiceListGrantsForResourceTypeRequest_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + ResourceTypeId string + PageSize uint32 + PageToken string + Annotations []*anypb.Any +} + +func (b0 GrantsReaderServiceListGrantsForResourceTypeRequest_builder) Build() *GrantsReaderServiceListGrantsForResourceTypeRequest { + m0 := &GrantsReaderServiceListGrantsForResourceTypeRequest{} + b, x := &b0, m0 + _, _ = b, x + x.ResourceTypeId = b.ResourceTypeId + x.PageSize = b.PageSize + x.PageToken = b.PageToken + x.Annotations = b.Annotations + return m0 +} + type GrantsReaderServiceListGrantsForResourceTypeResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` List []*v2.Grant `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"` NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` unknownFields protoimpl.UnknownFields @@ -349,11 +516,6 @@ func (x *GrantsReaderServiceListGrantsForResourceTypeResponse) ProtoReflect() pr return mi.MessageOf(x) } -// Deprecated: Use GrantsReaderServiceListGrantsForResourceTypeResponse.ProtoReflect.Descriptor instead. -func (*GrantsReaderServiceListGrantsForResourceTypeResponse) Descriptor() ([]byte, []int) { - return file_c1_reader_v2_grant_proto_rawDescGZIP(), []int{5} -} - func (x *GrantsReaderServiceListGrantsForResourceTypeResponse) GetList() []*v2.Grant { if x != nil { return x.List @@ -368,139 +530,73 @@ func (x *GrantsReaderServiceListGrantsForResourceTypeResponse) GetNextPageToken( return "" } -var File_c1_reader_v2_grant_proto protoreflect.FileDescriptor +func (x *GrantsReaderServiceListGrantsForResourceTypeResponse) SetList(v []*v2.Grant) { + x.List = v +} -var file_c1_reader_v2_grant_proto_rawDesc = string([]byte{ - 0x0a, 0x18, 0x63, 0x31, 0x2f, 0x72, 0x65, 0x61, 0x64, 0x65, 0x72, 0x2f, 0x76, 0x32, 0x2f, 0x67, - 0x72, 0x61, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0c, 0x63, 0x31, 0x2e, 0x72, - 0x65, 0x61, 0x64, 0x65, 0x72, 0x2e, 0x76, 0x32, 0x1a, 0x21, 0x63, 0x31, 0x2f, 0x63, 0x6f, 0x6e, - 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f, 0x76, 0x32, 0x2f, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x6c, - 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x63, 0x31, 0x2f, - 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f, 0x76, 0x32, 0x2f, 0x67, 0x72, 0x61, - 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x63, 0x31, 0x2f, 0x63, 0x6f, 0x6e, - 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f, 0x76, 0x32, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, - 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x83, 0x01, 0x0a, - 0x22, 0x47, 0x72, 0x61, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x61, 0x64, 0x65, 0x72, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x47, 0x65, 0x74, 0x47, 0x72, 0x61, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x08, 0x67, 0x72, 0x61, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0a, 0xfa, 0x42, 0x07, 0x72, 0x05, 0x20, 0x01, 0x28, 0x80, - 0x08, 0x52, 0x07, 0x67, 0x72, 0x61, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x36, 0x0a, 0x0b, 0x61, 0x6e, - 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x22, 0x53, 0x0a, 0x23, 0x47, 0x72, 0x61, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x61, 0x64, - 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x47, 0x65, 0x74, 0x47, 0x72, 0x61, 0x6e, - 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2c, 0x0a, 0x05, 0x67, 0x72, 0x61, - 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, - 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x72, 0x61, 0x6e, 0x74, - 0x52, 0x05, 0x67, 0x72, 0x61, 0x6e, 0x74, 0x22, 0xd7, 0x02, 0x0a, 0x32, 0x47, 0x72, 0x61, 0x6e, - 0x74, 0x73, 0x52, 0x65, 0x61, 0x64, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, - 0x69, 0x73, 0x74, 0x47, 0x72, 0x61, 0x6e, 0x74, 0x73, 0x46, 0x6f, 0x72, 0x45, 0x6e, 0x74, 0x69, - 0x74, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x48, - 0x0a, 0x0b, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, - 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x6d, 0x65, 0x6e, - 0x74, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x0b, 0x65, 0x6e, 0x74, - 0x69, 0x74, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x48, 0x0a, 0x0c, 0x70, 0x72, 0x69, 0x6e, - 0x63, 0x69, 0x70, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, - 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, - 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x64, 0x42, 0x08, 0xfa, 0x42, 0x05, - 0x8a, 0x01, 0x02, 0x10, 0x00, 0x52, 0x0b, 0x70, 0x72, 0x69, 0x6e, 0x63, 0x69, 0x70, 0x61, 0x6c, - 0x49, 0x64, 0x12, 0x27, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0d, 0x42, 0x0a, 0xfa, 0x42, 0x07, 0x2a, 0x05, 0x18, 0xfa, 0x01, 0x40, - 0x01, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x2c, 0x0a, 0x0a, 0x70, - 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, - 0x0d, 0xfa, 0x42, 0x0a, 0x72, 0x08, 0x20, 0x01, 0x28, 0x80, 0x10, 0xd0, 0x01, 0x01, 0x52, 0x09, - 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x36, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, - 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x22, 0x98, 0x01, 0x0a, 0x33, 0x47, 0x72, 0x61, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x61, 0x64, - 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x47, 0x72, 0x61, - 0x6e, 0x74, 0x73, 0x46, 0x6f, 0x72, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x6d, 0x65, 0x6e, - 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x04, 0x6c, 0x69, 0x73, - 0x74, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, - 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x72, 0x61, 0x6e, 0x74, 0x52, - 0x04, 0x6c, 0x69, 0x73, 0x74, 0x12, 0x35, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, - 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0d, - 0xfa, 0x42, 0x0a, 0x72, 0x08, 0x20, 0x01, 0x28, 0x80, 0x10, 0xd0, 0x01, 0x01, 0x52, 0x0d, 0x6e, - 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xfd, 0x01, 0x0a, - 0x33, 0x47, 0x72, 0x61, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x61, 0x64, 0x65, 0x72, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x47, 0x72, 0x61, 0x6e, 0x74, 0x73, 0x46, 0x6f, - 0x72, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x37, 0x0a, 0x10, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x5f, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0d, - 0xfa, 0x42, 0x0a, 0x72, 0x08, 0x20, 0x01, 0x28, 0x80, 0x10, 0xd0, 0x01, 0x01, 0x52, 0x0e, 0x72, - 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x49, 0x64, 0x12, 0x27, 0x0a, - 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, - 0x42, 0x0a, 0xfa, 0x42, 0x07, 0x2a, 0x05, 0x18, 0xfa, 0x01, 0x40, 0x01, 0x52, 0x08, 0x70, 0x61, - 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x2c, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, - 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0d, 0xfa, 0x42, 0x0a, 0x72, - 0x08, 0x20, 0x01, 0x28, 0x80, 0x10, 0xd0, 0x01, 0x01, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, - 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x36, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, - 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x99, 0x01, 0x0a, - 0x34, 0x47, 0x72, 0x61, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x61, 0x64, 0x65, 0x72, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x47, 0x72, 0x61, 0x6e, 0x74, 0x73, 0x46, 0x6f, - 0x72, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x04, 0x6c, 0x69, 0x73, 0x74, 0x18, 0x01, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, - 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x72, 0x61, 0x6e, 0x74, 0x52, 0x04, 0x6c, 0x69, 0x73, - 0x74, 0x12, 0x35, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, - 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0d, 0xfa, 0x42, 0x0a, 0x72, - 0x08, 0x20, 0x01, 0x28, 0x80, 0x10, 0xd0, 0x01, 0x01, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, - 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x32, 0xcd, 0x03, 0x0a, 0x13, 0x47, 0x72, 0x61, - 0x6e, 0x74, 0x73, 0x52, 0x65, 0x61, 0x64, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x12, 0x6f, 0x0a, 0x08, 0x47, 0x65, 0x74, 0x47, 0x72, 0x61, 0x6e, 0x74, 0x12, 0x30, 0x2e, 0x63, - 0x31, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x65, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x72, 0x61, 0x6e, - 0x74, 0x73, 0x52, 0x65, 0x61, 0x64, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x47, - 0x65, 0x74, 0x47, 0x72, 0x61, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x31, - 0x2e, 0x63, 0x31, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x65, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x72, - 0x61, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x61, 0x64, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x47, 0x65, 0x74, 0x47, 0x72, 0x61, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x9f, 0x01, 0x0a, 0x18, 0x4c, 0x69, 0x73, 0x74, 0x47, 0x72, 0x61, 0x6e, 0x74, 0x73, - 0x46, 0x6f, 0x72, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x40, - 0x2e, 0x63, 0x31, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x65, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x72, - 0x61, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x61, 0x64, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x4c, 0x69, 0x73, 0x74, 0x47, 0x72, 0x61, 0x6e, 0x74, 0x73, 0x46, 0x6f, 0x72, 0x45, 0x6e, - 0x74, 0x69, 0x74, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x41, 0x2e, 0x63, 0x31, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x65, 0x72, 0x2e, 0x76, 0x32, 0x2e, - 0x47, 0x72, 0x61, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x61, 0x64, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x47, 0x72, 0x61, 0x6e, 0x74, 0x73, 0x46, 0x6f, 0x72, - 0x45, 0x6e, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0xa2, 0x01, 0x0a, 0x19, 0x4c, 0x69, 0x73, 0x74, 0x47, 0x72, 0x61, 0x6e, - 0x74, 0x73, 0x46, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, - 0x65, 0x12, 0x41, 0x2e, 0x63, 0x31, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x65, 0x72, 0x2e, 0x76, 0x32, - 0x2e, 0x47, 0x72, 0x61, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x61, 0x64, 0x65, 0x72, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x47, 0x72, 0x61, 0x6e, 0x74, 0x73, 0x46, 0x6f, - 0x72, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x42, 0x2e, 0x63, 0x31, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x65, 0x72, - 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x72, 0x61, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x61, 0x64, 0x65, 0x72, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x47, 0x72, 0x61, 0x6e, 0x74, - 0x73, 0x46, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x33, 0x5a, 0x31, 0x67, 0x69, 0x74, 0x68, - 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x64, 0x75, 0x63, 0x74, 0x6f, 0x72, - 0x6f, 0x6e, 0x65, 0x2f, 0x62, 0x61, 0x74, 0x6f, 0x6e, 0x2d, 0x73, 0x64, 0x6b, 0x2f, 0x70, 0x62, - 0x2f, 0x63, 0x31, 0x2f, 0x72, 0x65, 0x61, 0x64, 0x65, 0x72, 0x2f, 0x76, 0x32, 0x62, 0x06, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x33, -}) - -var ( - file_c1_reader_v2_grant_proto_rawDescOnce sync.Once - file_c1_reader_v2_grant_proto_rawDescData []byte -) +func (x *GrantsReaderServiceListGrantsForResourceTypeResponse) SetNextPageToken(v string) { + x.NextPageToken = v +} -func file_c1_reader_v2_grant_proto_rawDescGZIP() []byte { - file_c1_reader_v2_grant_proto_rawDescOnce.Do(func() { - file_c1_reader_v2_grant_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_c1_reader_v2_grant_proto_rawDesc), len(file_c1_reader_v2_grant_proto_rawDesc))) - }) - return file_c1_reader_v2_grant_proto_rawDescData +type GrantsReaderServiceListGrantsForResourceTypeResponse_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + List []*v2.Grant + NextPageToken string +} + +func (b0 GrantsReaderServiceListGrantsForResourceTypeResponse_builder) Build() *GrantsReaderServiceListGrantsForResourceTypeResponse { + m0 := &GrantsReaderServiceListGrantsForResourceTypeResponse{} + b, x := &b0, m0 + _, _ = b, x + x.List = b.List + x.NextPageToken = b.NextPageToken + return m0 } +var File_c1_reader_v2_grant_proto protoreflect.FileDescriptor + +const file_c1_reader_v2_grant_proto_rawDesc = "" + + "\n" + + "\x18c1/reader/v2/grant.proto\x12\fc1.reader.v2\x1a!c1/connector/v2/entitlement.proto\x1a\x1bc1/connector/v2/grant.proto\x1a\x1ec1/connector/v2/resource.proto\x1a\x19google/protobuf/any.proto\x1a\x17validate/validate.proto\"\x83\x01\n" + + "\"GrantsReaderServiceGetGrantRequest\x12%\n" + + "\bgrant_id\x18\x01 \x01(\tB\n" + + "\xfaB\ar\x05 \x01(\x80\bR\agrantId\x126\n" + + "\vannotations\x18\x02 \x03(\v2\x14.google.protobuf.AnyR\vannotations\"S\n" + + "#GrantsReaderServiceGetGrantResponse\x12,\n" + + "\x05grant\x18\x01 \x01(\v2\x16.c1.connector.v2.GrantR\x05grant\"\x96\x03\n" + + "2GrantsReaderServiceListGrantsForEntitlementRequest\x12H\n" + + "\ventitlement\x18\x01 \x01(\v2\x1c.c1.connector.v2.EntitlementB\b\xfaB\x05\x8a\x01\x02\x10\x01R\ventitlement\x12H\n" + + "\fprincipal_id\x18\x05 \x01(\v2\x1b.c1.connector.v2.ResourceIdB\b\xfaB\x05\x8a\x01\x02\x10\x00R\vprincipalId\x12=\n" + + "\x1bprincipal_resource_type_ids\x18\x06 \x03(\tR\x18principalResourceTypeIds\x12'\n" + + "\tpage_size\x18\x02 \x01(\rB\n" + + "\xfaB\a*\x05\x18\xfa\x01@\x01R\bpageSize\x12,\n" + + "\n" + + "page_token\x18\x03 \x01(\tB\r\xfaB\n" + + "r\b \x01(\x80\x10\xd0\x01\x01R\tpageToken\x126\n" + + "\vannotations\x18\x04 \x03(\v2\x14.google.protobuf.AnyR\vannotations\"\x98\x01\n" + + "3GrantsReaderServiceListGrantsForEntitlementResponse\x12*\n" + + "\x04list\x18\x01 \x03(\v2\x16.c1.connector.v2.GrantR\x04list\x125\n" + + "\x0fnext_page_token\x18\x02 \x01(\tB\r\xfaB\n" + + "r\b \x01(\x80\x10\xd0\x01\x01R\rnextPageToken\"\xfd\x01\n" + + "3GrantsReaderServiceListGrantsForResourceTypeRequest\x127\n" + + "\x10resource_type_id\x18\x01 \x01(\tB\r\xfaB\n" + + "r\b \x01(\x80\x10\xd0\x01\x01R\x0eresourceTypeId\x12'\n" + + "\tpage_size\x18\x02 \x01(\rB\n" + + "\xfaB\a*\x05\x18\xfa\x01@\x01R\bpageSize\x12,\n" + + "\n" + + "page_token\x18\x03 \x01(\tB\r\xfaB\n" + + "r\b \x01(\x80\x10\xd0\x01\x01R\tpageToken\x126\n" + + "\vannotations\x18\x04 \x03(\v2\x14.google.protobuf.AnyR\vannotations\"\x99\x01\n" + + "4GrantsReaderServiceListGrantsForResourceTypeResponse\x12*\n" + + "\x04list\x18\x01 \x03(\v2\x16.c1.connector.v2.GrantR\x04list\x125\n" + + "\x0fnext_page_token\x18\x02 \x01(\tB\r\xfaB\n" + + "r\b \x01(\x80\x10\xd0\x01\x01R\rnextPageToken2\xcd\x03\n" + + "\x13GrantsReaderService\x12o\n" + + "\bGetGrant\x120.c1.reader.v2.GrantsReaderServiceGetGrantRequest\x1a1.c1.reader.v2.GrantsReaderServiceGetGrantResponse\x12\x9f\x01\n" + + "\x18ListGrantsForEntitlement\x12@.c1.reader.v2.GrantsReaderServiceListGrantsForEntitlementRequest\x1aA.c1.reader.v2.GrantsReaderServiceListGrantsForEntitlementResponse\x12\xa2\x01\n" + + "\x19ListGrantsForResourceType\x12A.c1.reader.v2.GrantsReaderServiceListGrantsForResourceTypeRequest\x1aB.c1.reader.v2.GrantsReaderServiceListGrantsForResourceTypeResponseB3Z1github.com/conductorone/baton-sdk/pb/c1/reader/v2b\x06proto3" + var file_c1_reader_v2_grant_proto_msgTypes = make([]protoimpl.MessageInfo, 6) var file_c1_reader_v2_grant_proto_goTypes = []any{ (*GrantsReaderServiceGetGrantRequest)(nil), // 0: c1.reader.v2.GrantsReaderServiceGetGrantRequest diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/reader/v2/grant_protoopaque.pb.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/reader/v2/grant_protoopaque.pb.go new file mode 100644 index 00000000..52a4bec1 --- /dev/null +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/reader/v2/grant_protoopaque.pb.go @@ -0,0 +1,667 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.10 +// protoc (unknown) +// source: c1/reader/v2/grant.proto + +//go:build protoopaque + +package v2 + +import ( + v2 "github.com/conductorone/baton-sdk/pb/c1/connector/v2" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + reflect "reflect" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type GrantsReaderServiceGetGrantRequest struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_GrantId string `protobuf:"bytes,1,opt,name=grant_id,json=grantId,proto3"` + xxx_hidden_Annotations *[]*anypb.Any `protobuf:"bytes,2,rep,name=annotations,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GrantsReaderServiceGetGrantRequest) Reset() { + *x = GrantsReaderServiceGetGrantRequest{} + mi := &file_c1_reader_v2_grant_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GrantsReaderServiceGetGrantRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GrantsReaderServiceGetGrantRequest) ProtoMessage() {} + +func (x *GrantsReaderServiceGetGrantRequest) ProtoReflect() protoreflect.Message { + mi := &file_c1_reader_v2_grant_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *GrantsReaderServiceGetGrantRequest) GetGrantId() string { + if x != nil { + return x.xxx_hidden_GrantId + } + return "" +} + +func (x *GrantsReaderServiceGetGrantRequest) GetAnnotations() []*anypb.Any { + if x != nil { + if x.xxx_hidden_Annotations != nil { + return *x.xxx_hidden_Annotations + } + } + return nil +} + +func (x *GrantsReaderServiceGetGrantRequest) SetGrantId(v string) { + x.xxx_hidden_GrantId = v +} + +func (x *GrantsReaderServiceGetGrantRequest) SetAnnotations(v []*anypb.Any) { + x.xxx_hidden_Annotations = &v +} + +type GrantsReaderServiceGetGrantRequest_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + GrantId string + Annotations []*anypb.Any +} + +func (b0 GrantsReaderServiceGetGrantRequest_builder) Build() *GrantsReaderServiceGetGrantRequest { + m0 := &GrantsReaderServiceGetGrantRequest{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_GrantId = b.GrantId + x.xxx_hidden_Annotations = &b.Annotations + return m0 +} + +type GrantsReaderServiceGetGrantResponse struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Grant *v2.Grant `protobuf:"bytes,1,opt,name=grant,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GrantsReaderServiceGetGrantResponse) Reset() { + *x = GrantsReaderServiceGetGrantResponse{} + mi := &file_c1_reader_v2_grant_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GrantsReaderServiceGetGrantResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GrantsReaderServiceGetGrantResponse) ProtoMessage() {} + +func (x *GrantsReaderServiceGetGrantResponse) ProtoReflect() protoreflect.Message { + mi := &file_c1_reader_v2_grant_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *GrantsReaderServiceGetGrantResponse) GetGrant() *v2.Grant { + if x != nil { + return x.xxx_hidden_Grant + } + return nil +} + +func (x *GrantsReaderServiceGetGrantResponse) SetGrant(v *v2.Grant) { + x.xxx_hidden_Grant = v +} + +func (x *GrantsReaderServiceGetGrantResponse) HasGrant() bool { + if x == nil { + return false + } + return x.xxx_hidden_Grant != nil +} + +func (x *GrantsReaderServiceGetGrantResponse) ClearGrant() { + x.xxx_hidden_Grant = nil +} + +type GrantsReaderServiceGetGrantResponse_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Grant *v2.Grant +} + +func (b0 GrantsReaderServiceGetGrantResponse_builder) Build() *GrantsReaderServiceGetGrantResponse { + m0 := &GrantsReaderServiceGetGrantResponse{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Grant = b.Grant + return m0 +} + +type GrantsReaderServiceListGrantsForEntitlementRequest struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Entitlement *v2.Entitlement `protobuf:"bytes,1,opt,name=entitlement,proto3"` + xxx_hidden_PrincipalId *v2.ResourceId `protobuf:"bytes,5,opt,name=principal_id,json=principalId,proto3"` + xxx_hidden_PrincipalResourceTypeIds []string `protobuf:"bytes,6,rep,name=principal_resource_type_ids,json=principalResourceTypeIds,proto3"` + xxx_hidden_PageSize uint32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3"` + xxx_hidden_PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3"` + xxx_hidden_Annotations *[]*anypb.Any `protobuf:"bytes,4,rep,name=annotations,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GrantsReaderServiceListGrantsForEntitlementRequest) Reset() { + *x = GrantsReaderServiceListGrantsForEntitlementRequest{} + mi := &file_c1_reader_v2_grant_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GrantsReaderServiceListGrantsForEntitlementRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GrantsReaderServiceListGrantsForEntitlementRequest) ProtoMessage() {} + +func (x *GrantsReaderServiceListGrantsForEntitlementRequest) ProtoReflect() protoreflect.Message { + mi := &file_c1_reader_v2_grant_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *GrantsReaderServiceListGrantsForEntitlementRequest) GetEntitlement() *v2.Entitlement { + if x != nil { + return x.xxx_hidden_Entitlement + } + return nil +} + +func (x *GrantsReaderServiceListGrantsForEntitlementRequest) GetPrincipalId() *v2.ResourceId { + if x != nil { + return x.xxx_hidden_PrincipalId + } + return nil +} + +func (x *GrantsReaderServiceListGrantsForEntitlementRequest) GetPrincipalResourceTypeIds() []string { + if x != nil { + return x.xxx_hidden_PrincipalResourceTypeIds + } + return nil +} + +func (x *GrantsReaderServiceListGrantsForEntitlementRequest) GetPageSize() uint32 { + if x != nil { + return x.xxx_hidden_PageSize + } + return 0 +} + +func (x *GrantsReaderServiceListGrantsForEntitlementRequest) GetPageToken() string { + if x != nil { + return x.xxx_hidden_PageToken + } + return "" +} + +func (x *GrantsReaderServiceListGrantsForEntitlementRequest) GetAnnotations() []*anypb.Any { + if x != nil { + if x.xxx_hidden_Annotations != nil { + return *x.xxx_hidden_Annotations + } + } + return nil +} + +func (x *GrantsReaderServiceListGrantsForEntitlementRequest) SetEntitlement(v *v2.Entitlement) { + x.xxx_hidden_Entitlement = v +} + +func (x *GrantsReaderServiceListGrantsForEntitlementRequest) SetPrincipalId(v *v2.ResourceId) { + x.xxx_hidden_PrincipalId = v +} + +func (x *GrantsReaderServiceListGrantsForEntitlementRequest) SetPrincipalResourceTypeIds(v []string) { + x.xxx_hidden_PrincipalResourceTypeIds = v +} + +func (x *GrantsReaderServiceListGrantsForEntitlementRequest) SetPageSize(v uint32) { + x.xxx_hidden_PageSize = v +} + +func (x *GrantsReaderServiceListGrantsForEntitlementRequest) SetPageToken(v string) { + x.xxx_hidden_PageToken = v +} + +func (x *GrantsReaderServiceListGrantsForEntitlementRequest) SetAnnotations(v []*anypb.Any) { + x.xxx_hidden_Annotations = &v +} + +func (x *GrantsReaderServiceListGrantsForEntitlementRequest) HasEntitlement() bool { + if x == nil { + return false + } + return x.xxx_hidden_Entitlement != nil +} + +func (x *GrantsReaderServiceListGrantsForEntitlementRequest) HasPrincipalId() bool { + if x == nil { + return false + } + return x.xxx_hidden_PrincipalId != nil +} + +func (x *GrantsReaderServiceListGrantsForEntitlementRequest) ClearEntitlement() { + x.xxx_hidden_Entitlement = nil +} + +func (x *GrantsReaderServiceListGrantsForEntitlementRequest) ClearPrincipalId() { + x.xxx_hidden_PrincipalId = nil +} + +type GrantsReaderServiceListGrantsForEntitlementRequest_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Entitlement *v2.Entitlement + PrincipalId *v2.ResourceId + PrincipalResourceTypeIds []string + PageSize uint32 + PageToken string + Annotations []*anypb.Any +} + +func (b0 GrantsReaderServiceListGrantsForEntitlementRequest_builder) Build() *GrantsReaderServiceListGrantsForEntitlementRequest { + m0 := &GrantsReaderServiceListGrantsForEntitlementRequest{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Entitlement = b.Entitlement + x.xxx_hidden_PrincipalId = b.PrincipalId + x.xxx_hidden_PrincipalResourceTypeIds = b.PrincipalResourceTypeIds + x.xxx_hidden_PageSize = b.PageSize + x.xxx_hidden_PageToken = b.PageToken + x.xxx_hidden_Annotations = &b.Annotations + return m0 +} + +type GrantsReaderServiceListGrantsForEntitlementResponse struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_List *[]*v2.Grant `protobuf:"bytes,1,rep,name=list,proto3"` + xxx_hidden_NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GrantsReaderServiceListGrantsForEntitlementResponse) Reset() { + *x = GrantsReaderServiceListGrantsForEntitlementResponse{} + mi := &file_c1_reader_v2_grant_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GrantsReaderServiceListGrantsForEntitlementResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GrantsReaderServiceListGrantsForEntitlementResponse) ProtoMessage() {} + +func (x *GrantsReaderServiceListGrantsForEntitlementResponse) ProtoReflect() protoreflect.Message { + mi := &file_c1_reader_v2_grant_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *GrantsReaderServiceListGrantsForEntitlementResponse) GetList() []*v2.Grant { + if x != nil { + if x.xxx_hidden_List != nil { + return *x.xxx_hidden_List + } + } + return nil +} + +func (x *GrantsReaderServiceListGrantsForEntitlementResponse) GetNextPageToken() string { + if x != nil { + return x.xxx_hidden_NextPageToken + } + return "" +} + +func (x *GrantsReaderServiceListGrantsForEntitlementResponse) SetList(v []*v2.Grant) { + x.xxx_hidden_List = &v +} + +func (x *GrantsReaderServiceListGrantsForEntitlementResponse) SetNextPageToken(v string) { + x.xxx_hidden_NextPageToken = v +} + +type GrantsReaderServiceListGrantsForEntitlementResponse_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + List []*v2.Grant + NextPageToken string +} + +func (b0 GrantsReaderServiceListGrantsForEntitlementResponse_builder) Build() *GrantsReaderServiceListGrantsForEntitlementResponse { + m0 := &GrantsReaderServiceListGrantsForEntitlementResponse{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_List = &b.List + x.xxx_hidden_NextPageToken = b.NextPageToken + return m0 +} + +type GrantsReaderServiceListGrantsForResourceTypeRequest struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_ResourceTypeId string `protobuf:"bytes,1,opt,name=resource_type_id,json=resourceTypeId,proto3"` + xxx_hidden_PageSize uint32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3"` + xxx_hidden_PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3"` + xxx_hidden_Annotations *[]*anypb.Any `protobuf:"bytes,4,rep,name=annotations,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GrantsReaderServiceListGrantsForResourceTypeRequest) Reset() { + *x = GrantsReaderServiceListGrantsForResourceTypeRequest{} + mi := &file_c1_reader_v2_grant_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GrantsReaderServiceListGrantsForResourceTypeRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GrantsReaderServiceListGrantsForResourceTypeRequest) ProtoMessage() {} + +func (x *GrantsReaderServiceListGrantsForResourceTypeRequest) ProtoReflect() protoreflect.Message { + mi := &file_c1_reader_v2_grant_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *GrantsReaderServiceListGrantsForResourceTypeRequest) GetResourceTypeId() string { + if x != nil { + return x.xxx_hidden_ResourceTypeId + } + return "" +} + +func (x *GrantsReaderServiceListGrantsForResourceTypeRequest) GetPageSize() uint32 { + if x != nil { + return x.xxx_hidden_PageSize + } + return 0 +} + +func (x *GrantsReaderServiceListGrantsForResourceTypeRequest) GetPageToken() string { + if x != nil { + return x.xxx_hidden_PageToken + } + return "" +} + +func (x *GrantsReaderServiceListGrantsForResourceTypeRequest) GetAnnotations() []*anypb.Any { + if x != nil { + if x.xxx_hidden_Annotations != nil { + return *x.xxx_hidden_Annotations + } + } + return nil +} + +func (x *GrantsReaderServiceListGrantsForResourceTypeRequest) SetResourceTypeId(v string) { + x.xxx_hidden_ResourceTypeId = v +} + +func (x *GrantsReaderServiceListGrantsForResourceTypeRequest) SetPageSize(v uint32) { + x.xxx_hidden_PageSize = v +} + +func (x *GrantsReaderServiceListGrantsForResourceTypeRequest) SetPageToken(v string) { + x.xxx_hidden_PageToken = v +} + +func (x *GrantsReaderServiceListGrantsForResourceTypeRequest) SetAnnotations(v []*anypb.Any) { + x.xxx_hidden_Annotations = &v +} + +type GrantsReaderServiceListGrantsForResourceTypeRequest_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + ResourceTypeId string + PageSize uint32 + PageToken string + Annotations []*anypb.Any +} + +func (b0 GrantsReaderServiceListGrantsForResourceTypeRequest_builder) Build() *GrantsReaderServiceListGrantsForResourceTypeRequest { + m0 := &GrantsReaderServiceListGrantsForResourceTypeRequest{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_ResourceTypeId = b.ResourceTypeId + x.xxx_hidden_PageSize = b.PageSize + x.xxx_hidden_PageToken = b.PageToken + x.xxx_hidden_Annotations = &b.Annotations + return m0 +} + +type GrantsReaderServiceListGrantsForResourceTypeResponse struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_List *[]*v2.Grant `protobuf:"bytes,1,rep,name=list,proto3"` + xxx_hidden_NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GrantsReaderServiceListGrantsForResourceTypeResponse) Reset() { + *x = GrantsReaderServiceListGrantsForResourceTypeResponse{} + mi := &file_c1_reader_v2_grant_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GrantsReaderServiceListGrantsForResourceTypeResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GrantsReaderServiceListGrantsForResourceTypeResponse) ProtoMessage() {} + +func (x *GrantsReaderServiceListGrantsForResourceTypeResponse) ProtoReflect() protoreflect.Message { + mi := &file_c1_reader_v2_grant_proto_msgTypes[5] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *GrantsReaderServiceListGrantsForResourceTypeResponse) GetList() []*v2.Grant { + if x != nil { + if x.xxx_hidden_List != nil { + return *x.xxx_hidden_List + } + } + return nil +} + +func (x *GrantsReaderServiceListGrantsForResourceTypeResponse) GetNextPageToken() string { + if x != nil { + return x.xxx_hidden_NextPageToken + } + return "" +} + +func (x *GrantsReaderServiceListGrantsForResourceTypeResponse) SetList(v []*v2.Grant) { + x.xxx_hidden_List = &v +} + +func (x *GrantsReaderServiceListGrantsForResourceTypeResponse) SetNextPageToken(v string) { + x.xxx_hidden_NextPageToken = v +} + +type GrantsReaderServiceListGrantsForResourceTypeResponse_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + List []*v2.Grant + NextPageToken string +} + +func (b0 GrantsReaderServiceListGrantsForResourceTypeResponse_builder) Build() *GrantsReaderServiceListGrantsForResourceTypeResponse { + m0 := &GrantsReaderServiceListGrantsForResourceTypeResponse{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_List = &b.List + x.xxx_hidden_NextPageToken = b.NextPageToken + return m0 +} + +var File_c1_reader_v2_grant_proto protoreflect.FileDescriptor + +const file_c1_reader_v2_grant_proto_rawDesc = "" + + "\n" + + "\x18c1/reader/v2/grant.proto\x12\fc1.reader.v2\x1a!c1/connector/v2/entitlement.proto\x1a\x1bc1/connector/v2/grant.proto\x1a\x1ec1/connector/v2/resource.proto\x1a\x19google/protobuf/any.proto\x1a\x17validate/validate.proto\"\x83\x01\n" + + "\"GrantsReaderServiceGetGrantRequest\x12%\n" + + "\bgrant_id\x18\x01 \x01(\tB\n" + + "\xfaB\ar\x05 \x01(\x80\bR\agrantId\x126\n" + + "\vannotations\x18\x02 \x03(\v2\x14.google.protobuf.AnyR\vannotations\"S\n" + + "#GrantsReaderServiceGetGrantResponse\x12,\n" + + "\x05grant\x18\x01 \x01(\v2\x16.c1.connector.v2.GrantR\x05grant\"\x96\x03\n" + + "2GrantsReaderServiceListGrantsForEntitlementRequest\x12H\n" + + "\ventitlement\x18\x01 \x01(\v2\x1c.c1.connector.v2.EntitlementB\b\xfaB\x05\x8a\x01\x02\x10\x01R\ventitlement\x12H\n" + + "\fprincipal_id\x18\x05 \x01(\v2\x1b.c1.connector.v2.ResourceIdB\b\xfaB\x05\x8a\x01\x02\x10\x00R\vprincipalId\x12=\n" + + "\x1bprincipal_resource_type_ids\x18\x06 \x03(\tR\x18principalResourceTypeIds\x12'\n" + + "\tpage_size\x18\x02 \x01(\rB\n" + + "\xfaB\a*\x05\x18\xfa\x01@\x01R\bpageSize\x12,\n" + + "\n" + + "page_token\x18\x03 \x01(\tB\r\xfaB\n" + + "r\b \x01(\x80\x10\xd0\x01\x01R\tpageToken\x126\n" + + "\vannotations\x18\x04 \x03(\v2\x14.google.protobuf.AnyR\vannotations\"\x98\x01\n" + + "3GrantsReaderServiceListGrantsForEntitlementResponse\x12*\n" + + "\x04list\x18\x01 \x03(\v2\x16.c1.connector.v2.GrantR\x04list\x125\n" + + "\x0fnext_page_token\x18\x02 \x01(\tB\r\xfaB\n" + + "r\b \x01(\x80\x10\xd0\x01\x01R\rnextPageToken\"\xfd\x01\n" + + "3GrantsReaderServiceListGrantsForResourceTypeRequest\x127\n" + + "\x10resource_type_id\x18\x01 \x01(\tB\r\xfaB\n" + + "r\b \x01(\x80\x10\xd0\x01\x01R\x0eresourceTypeId\x12'\n" + + "\tpage_size\x18\x02 \x01(\rB\n" + + "\xfaB\a*\x05\x18\xfa\x01@\x01R\bpageSize\x12,\n" + + "\n" + + "page_token\x18\x03 \x01(\tB\r\xfaB\n" + + "r\b \x01(\x80\x10\xd0\x01\x01R\tpageToken\x126\n" + + "\vannotations\x18\x04 \x03(\v2\x14.google.protobuf.AnyR\vannotations\"\x99\x01\n" + + "4GrantsReaderServiceListGrantsForResourceTypeResponse\x12*\n" + + "\x04list\x18\x01 \x03(\v2\x16.c1.connector.v2.GrantR\x04list\x125\n" + + "\x0fnext_page_token\x18\x02 \x01(\tB\r\xfaB\n" + + "r\b \x01(\x80\x10\xd0\x01\x01R\rnextPageToken2\xcd\x03\n" + + "\x13GrantsReaderService\x12o\n" + + "\bGetGrant\x120.c1.reader.v2.GrantsReaderServiceGetGrantRequest\x1a1.c1.reader.v2.GrantsReaderServiceGetGrantResponse\x12\x9f\x01\n" + + "\x18ListGrantsForEntitlement\x12@.c1.reader.v2.GrantsReaderServiceListGrantsForEntitlementRequest\x1aA.c1.reader.v2.GrantsReaderServiceListGrantsForEntitlementResponse\x12\xa2\x01\n" + + "\x19ListGrantsForResourceType\x12A.c1.reader.v2.GrantsReaderServiceListGrantsForResourceTypeRequest\x1aB.c1.reader.v2.GrantsReaderServiceListGrantsForResourceTypeResponseB3Z1github.com/conductorone/baton-sdk/pb/c1/reader/v2b\x06proto3" + +var file_c1_reader_v2_grant_proto_msgTypes = make([]protoimpl.MessageInfo, 6) +var file_c1_reader_v2_grant_proto_goTypes = []any{ + (*GrantsReaderServiceGetGrantRequest)(nil), // 0: c1.reader.v2.GrantsReaderServiceGetGrantRequest + (*GrantsReaderServiceGetGrantResponse)(nil), // 1: c1.reader.v2.GrantsReaderServiceGetGrantResponse + (*GrantsReaderServiceListGrantsForEntitlementRequest)(nil), // 2: c1.reader.v2.GrantsReaderServiceListGrantsForEntitlementRequest + (*GrantsReaderServiceListGrantsForEntitlementResponse)(nil), // 3: c1.reader.v2.GrantsReaderServiceListGrantsForEntitlementResponse + (*GrantsReaderServiceListGrantsForResourceTypeRequest)(nil), // 4: c1.reader.v2.GrantsReaderServiceListGrantsForResourceTypeRequest + (*GrantsReaderServiceListGrantsForResourceTypeResponse)(nil), // 5: c1.reader.v2.GrantsReaderServiceListGrantsForResourceTypeResponse + (*anypb.Any)(nil), // 6: google.protobuf.Any + (*v2.Grant)(nil), // 7: c1.connector.v2.Grant + (*v2.Entitlement)(nil), // 8: c1.connector.v2.Entitlement + (*v2.ResourceId)(nil), // 9: c1.connector.v2.ResourceId +} +var file_c1_reader_v2_grant_proto_depIdxs = []int32{ + 6, // 0: c1.reader.v2.GrantsReaderServiceGetGrantRequest.annotations:type_name -> google.protobuf.Any + 7, // 1: c1.reader.v2.GrantsReaderServiceGetGrantResponse.grant:type_name -> c1.connector.v2.Grant + 8, // 2: c1.reader.v2.GrantsReaderServiceListGrantsForEntitlementRequest.entitlement:type_name -> c1.connector.v2.Entitlement + 9, // 3: c1.reader.v2.GrantsReaderServiceListGrantsForEntitlementRequest.principal_id:type_name -> c1.connector.v2.ResourceId + 6, // 4: c1.reader.v2.GrantsReaderServiceListGrantsForEntitlementRequest.annotations:type_name -> google.protobuf.Any + 7, // 5: c1.reader.v2.GrantsReaderServiceListGrantsForEntitlementResponse.list:type_name -> c1.connector.v2.Grant + 6, // 6: c1.reader.v2.GrantsReaderServiceListGrantsForResourceTypeRequest.annotations:type_name -> google.protobuf.Any + 7, // 7: c1.reader.v2.GrantsReaderServiceListGrantsForResourceTypeResponse.list:type_name -> c1.connector.v2.Grant + 0, // 8: c1.reader.v2.GrantsReaderService.GetGrant:input_type -> c1.reader.v2.GrantsReaderServiceGetGrantRequest + 2, // 9: c1.reader.v2.GrantsReaderService.ListGrantsForEntitlement:input_type -> c1.reader.v2.GrantsReaderServiceListGrantsForEntitlementRequest + 4, // 10: c1.reader.v2.GrantsReaderService.ListGrantsForResourceType:input_type -> c1.reader.v2.GrantsReaderServiceListGrantsForResourceTypeRequest + 1, // 11: c1.reader.v2.GrantsReaderService.GetGrant:output_type -> c1.reader.v2.GrantsReaderServiceGetGrantResponse + 3, // 12: c1.reader.v2.GrantsReaderService.ListGrantsForEntitlement:output_type -> c1.reader.v2.GrantsReaderServiceListGrantsForEntitlementResponse + 5, // 13: c1.reader.v2.GrantsReaderService.ListGrantsForResourceType:output_type -> c1.reader.v2.GrantsReaderServiceListGrantsForResourceTypeResponse + 11, // [11:14] is the sub-list for method output_type + 8, // [8:11] is the sub-list for method input_type + 8, // [8:8] is the sub-list for extension type_name + 8, // [8:8] is the sub-list for extension extendee + 0, // [0:8] is the sub-list for field type_name +} + +func init() { file_c1_reader_v2_grant_proto_init() } +func file_c1_reader_v2_grant_proto_init() { + if File_c1_reader_v2_grant_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_c1_reader_v2_grant_proto_rawDesc), len(file_c1_reader_v2_grant_proto_rawDesc)), + NumEnums: 0, + NumMessages: 6, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_c1_reader_v2_grant_proto_goTypes, + DependencyIndexes: file_c1_reader_v2_grant_proto_depIdxs, + MessageInfos: file_c1_reader_v2_grant_proto_msgTypes, + }.Build() + File_c1_reader_v2_grant_proto = out.File + file_c1_reader_v2_grant_proto_goTypes = nil + file_c1_reader_v2_grant_proto_depIdxs = nil +} diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/reader/v2/resource.pb.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/reader/v2/resource.pb.go index 06a4ec90..351e91d8 100644 --- a/vendor/github.com/conductorone/baton-sdk/pb/c1/reader/v2/resource.pb.go +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/reader/v2/resource.pb.go @@ -1,9 +1,11 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.4 +// protoc-gen-go v1.36.10 // protoc (unknown) // source: c1/reader/v2/resource.proto +//go:build !protoopaque + package v2 import ( @@ -13,7 +15,6 @@ import ( protoimpl "google.golang.org/protobuf/runtime/protoimpl" anypb "google.golang.org/protobuf/types/known/anypb" reflect "reflect" - sync "sync" unsafe "unsafe" ) @@ -25,7 +26,7 @@ const ( ) type ResourceTypesReaderServiceGetResourceTypeRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` ResourceTypeId string `protobuf:"bytes,1,opt,name=resource_type_id,json=resourceTypeId,proto3" json:"resource_type_id,omitempty"` Annotations []*anypb.Any `protobuf:"bytes,2,rep,name=annotations,proto3" json:"annotations,omitempty"` unknownFields protoimpl.UnknownFields @@ -57,11 +58,6 @@ func (x *ResourceTypesReaderServiceGetResourceTypeRequest) ProtoReflect() protor return mi.MessageOf(x) } -// Deprecated: Use ResourceTypesReaderServiceGetResourceTypeRequest.ProtoReflect.Descriptor instead. -func (*ResourceTypesReaderServiceGetResourceTypeRequest) Descriptor() ([]byte, []int) { - return file_c1_reader_v2_resource_proto_rawDescGZIP(), []int{0} -} - func (x *ResourceTypesReaderServiceGetResourceTypeRequest) GetResourceTypeId() string { if x != nil { return x.ResourceTypeId @@ -76,8 +72,32 @@ func (x *ResourceTypesReaderServiceGetResourceTypeRequest) GetAnnotations() []*a return nil } +func (x *ResourceTypesReaderServiceGetResourceTypeRequest) SetResourceTypeId(v string) { + x.ResourceTypeId = v +} + +func (x *ResourceTypesReaderServiceGetResourceTypeRequest) SetAnnotations(v []*anypb.Any) { + x.Annotations = v +} + +type ResourceTypesReaderServiceGetResourceTypeRequest_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + ResourceTypeId string + Annotations []*anypb.Any +} + +func (b0 ResourceTypesReaderServiceGetResourceTypeRequest_builder) Build() *ResourceTypesReaderServiceGetResourceTypeRequest { + m0 := &ResourceTypesReaderServiceGetResourceTypeRequest{} + b, x := &b0, m0 + _, _ = b, x + x.ResourceTypeId = b.ResourceTypeId + x.Annotations = b.Annotations + return m0 +} + type ResourceTypesReaderServiceGetResourceTypeResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` ResourceType *v2.ResourceType `protobuf:"bytes,1,opt,name=resource_type,json=resourceType,proto3" json:"resource_type,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache @@ -108,11 +128,6 @@ func (x *ResourceTypesReaderServiceGetResourceTypeResponse) ProtoReflect() proto return mi.MessageOf(x) } -// Deprecated: Use ResourceTypesReaderServiceGetResourceTypeResponse.ProtoReflect.Descriptor instead. -func (*ResourceTypesReaderServiceGetResourceTypeResponse) Descriptor() ([]byte, []int) { - return file_c1_reader_v2_resource_proto_rawDescGZIP(), []int{1} -} - func (x *ResourceTypesReaderServiceGetResourceTypeResponse) GetResourceType() *v2.ResourceType { if x != nil { return x.ResourceType @@ -120,8 +135,37 @@ func (x *ResourceTypesReaderServiceGetResourceTypeResponse) GetResourceType() *v return nil } +func (x *ResourceTypesReaderServiceGetResourceTypeResponse) SetResourceType(v *v2.ResourceType) { + x.ResourceType = v +} + +func (x *ResourceTypesReaderServiceGetResourceTypeResponse) HasResourceType() bool { + if x == nil { + return false + } + return x.ResourceType != nil +} + +func (x *ResourceTypesReaderServiceGetResourceTypeResponse) ClearResourceType() { + x.ResourceType = nil +} + +type ResourceTypesReaderServiceGetResourceTypeResponse_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + ResourceType *v2.ResourceType +} + +func (b0 ResourceTypesReaderServiceGetResourceTypeResponse_builder) Build() *ResourceTypesReaderServiceGetResourceTypeResponse { + m0 := &ResourceTypesReaderServiceGetResourceTypeResponse{} + b, x := &b0, m0 + _, _ = b, x + x.ResourceType = b.ResourceType + return m0 +} + type ResourcesReaderServiceGetResourceRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` ResourceId *v2.ResourceId `protobuf:"bytes,1,opt,name=resource_id,json=resourceId,proto3" json:"resource_id,omitempty"` Annotations []*anypb.Any `protobuf:"bytes,2,rep,name=annotations,proto3" json:"annotations,omitempty"` unknownFields protoimpl.UnknownFields @@ -153,11 +197,6 @@ func (x *ResourcesReaderServiceGetResourceRequest) ProtoReflect() protoreflect.M return mi.MessageOf(x) } -// Deprecated: Use ResourcesReaderServiceGetResourceRequest.ProtoReflect.Descriptor instead. -func (*ResourcesReaderServiceGetResourceRequest) Descriptor() ([]byte, []int) { - return file_c1_reader_v2_resource_proto_rawDescGZIP(), []int{2} -} - func (x *ResourcesReaderServiceGetResourceRequest) GetResourceId() *v2.ResourceId { if x != nil { return x.ResourceId @@ -172,8 +211,43 @@ func (x *ResourcesReaderServiceGetResourceRequest) GetAnnotations() []*anypb.Any return nil } +func (x *ResourcesReaderServiceGetResourceRequest) SetResourceId(v *v2.ResourceId) { + x.ResourceId = v +} + +func (x *ResourcesReaderServiceGetResourceRequest) SetAnnotations(v []*anypb.Any) { + x.Annotations = v +} + +func (x *ResourcesReaderServiceGetResourceRequest) HasResourceId() bool { + if x == nil { + return false + } + return x.ResourceId != nil +} + +func (x *ResourcesReaderServiceGetResourceRequest) ClearResourceId() { + x.ResourceId = nil +} + +type ResourcesReaderServiceGetResourceRequest_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + ResourceId *v2.ResourceId + Annotations []*anypb.Any +} + +func (b0 ResourcesReaderServiceGetResourceRequest_builder) Build() *ResourcesReaderServiceGetResourceRequest { + m0 := &ResourcesReaderServiceGetResourceRequest{} + b, x := &b0, m0 + _, _ = b, x + x.ResourceId = b.ResourceId + x.Annotations = b.Annotations + return m0 +} + type ResourcesReaderServiceGetResourceResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Resource *v2.Resource `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache @@ -204,11 +278,6 @@ func (x *ResourcesReaderServiceGetResourceResponse) ProtoReflect() protoreflect. return mi.MessageOf(x) } -// Deprecated: Use ResourcesReaderServiceGetResourceResponse.ProtoReflect.Descriptor instead. -func (*ResourcesReaderServiceGetResourceResponse) Descriptor() ([]byte, []int) { - return file_c1_reader_v2_resource_proto_rawDescGZIP(), []int{3} -} - func (x *ResourcesReaderServiceGetResourceResponse) GetResource() *v2.Resource { if x != nil { return x.Resource @@ -216,90 +285,56 @@ func (x *ResourcesReaderServiceGetResourceResponse) GetResource() *v2.Resource { return nil } -var File_c1_reader_v2_resource_proto protoreflect.FileDescriptor +func (x *ResourcesReaderServiceGetResourceResponse) SetResource(v *v2.Resource) { + x.Resource = v +} -var file_c1_reader_v2_resource_proto_rawDesc = string([]byte{ - 0x0a, 0x1b, 0x63, 0x31, 0x2f, 0x72, 0x65, 0x61, 0x64, 0x65, 0x72, 0x2f, 0x76, 0x32, 0x2f, 0x72, - 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0c, 0x63, - 0x31, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x65, 0x72, 0x2e, 0x76, 0x32, 0x1a, 0x1e, 0x63, 0x31, 0x2f, - 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f, 0x76, 0x32, 0x2f, 0x72, 0x65, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, - 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, - 0x94, 0x01, 0x0a, 0x30, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, - 0x73, 0x52, 0x65, 0x61, 0x64, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x47, 0x65, - 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x28, 0x0a, 0x10, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x5f, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, - 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x49, 0x64, 0x12, 0x36, - 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x77, 0x0a, 0x31, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x73, 0x52, 0x65, 0x61, 0x64, 0x65, 0x72, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x47, 0x65, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, - 0x79, 0x70, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x42, 0x0a, 0x0d, 0x72, - 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, - 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, - 0x65, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x22, - 0xaa, 0x01, 0x0a, 0x28, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x52, 0x65, 0x61, - 0x64, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x47, 0x65, 0x74, 0x52, 0x65, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x46, 0x0a, 0x0b, - 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1b, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, - 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x64, 0x42, 0x08, - 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x0a, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x49, 0x64, 0x12, 0x36, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, - 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x62, 0x0a, 0x29, - 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x52, 0x65, 0x61, 0x64, 0x65, 0x72, 0x53, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x47, 0x65, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x35, 0x0a, 0x08, 0x72, 0x65, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x63, 0x31, - 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x32, 0xb1, 0x01, 0x0a, 0x1a, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, - 0x65, 0x73, 0x52, 0x65, 0x61, 0x64, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, - 0x92, 0x01, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, - 0x79, 0x70, 0x65, 0x12, 0x3e, 0x2e, 0x63, 0x31, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x65, 0x72, 0x2e, - 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x73, - 0x52, 0x65, 0x61, 0x64, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x47, 0x65, 0x74, - 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x3f, 0x2e, 0x63, 0x31, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x65, 0x72, 0x2e, - 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x73, - 0x52, 0x65, 0x61, 0x64, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x47, 0x65, 0x74, - 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x32, 0x98, 0x01, 0x0a, 0x16, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x73, 0x52, 0x65, 0x61, 0x64, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, - 0x7e, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x36, - 0x2e, 0x63, 0x31, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x65, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x52, 0x65, 0x61, 0x64, 0x65, 0x72, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x47, 0x65, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x37, 0x2e, 0x63, 0x31, 0x2e, 0x72, 0x65, 0x61, 0x64, - 0x65, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x52, - 0x65, 0x61, 0x64, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x47, 0x65, 0x74, 0x52, - 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, - 0x33, 0x5a, 0x31, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, - 0x6e, 0x64, 0x75, 0x63, 0x74, 0x6f, 0x72, 0x6f, 0x6e, 0x65, 0x2f, 0x62, 0x61, 0x74, 0x6f, 0x6e, - 0x2d, 0x73, 0x64, 0x6b, 0x2f, 0x70, 0x62, 0x2f, 0x63, 0x31, 0x2f, 0x72, 0x65, 0x61, 0x64, 0x65, - 0x72, 0x2f, 0x76, 0x32, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -}) - -var ( - file_c1_reader_v2_resource_proto_rawDescOnce sync.Once - file_c1_reader_v2_resource_proto_rawDescData []byte -) +func (x *ResourcesReaderServiceGetResourceResponse) HasResource() bool { + if x == nil { + return false + } + return x.Resource != nil +} + +func (x *ResourcesReaderServiceGetResourceResponse) ClearResource() { + x.Resource = nil +} + +type ResourcesReaderServiceGetResourceResponse_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Resource *v2.Resource +} -func file_c1_reader_v2_resource_proto_rawDescGZIP() []byte { - file_c1_reader_v2_resource_proto_rawDescOnce.Do(func() { - file_c1_reader_v2_resource_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_c1_reader_v2_resource_proto_rawDesc), len(file_c1_reader_v2_resource_proto_rawDesc))) - }) - return file_c1_reader_v2_resource_proto_rawDescData +func (b0 ResourcesReaderServiceGetResourceResponse_builder) Build() *ResourcesReaderServiceGetResourceResponse { + m0 := &ResourcesReaderServiceGetResourceResponse{} + b, x := &b0, m0 + _, _ = b, x + x.Resource = b.Resource + return m0 } +var File_c1_reader_v2_resource_proto protoreflect.FileDescriptor + +const file_c1_reader_v2_resource_proto_rawDesc = "" + + "\n" + + "\x1bc1/reader/v2/resource.proto\x12\fc1.reader.v2\x1a\x1ec1/connector/v2/resource.proto\x1a\x19google/protobuf/any.proto\x1a\x17validate/validate.proto\"\x94\x01\n" + + "0ResourceTypesReaderServiceGetResourceTypeRequest\x12(\n" + + "\x10resource_type_id\x18\x01 \x01(\tR\x0eresourceTypeId\x126\n" + + "\vannotations\x18\x02 \x03(\v2\x14.google.protobuf.AnyR\vannotations\"w\n" + + "1ResourceTypesReaderServiceGetResourceTypeResponse\x12B\n" + + "\rresource_type\x18\x01 \x01(\v2\x1d.c1.connector.v2.ResourceTypeR\fresourceType\"\xaa\x01\n" + + "(ResourcesReaderServiceGetResourceRequest\x12F\n" + + "\vresource_id\x18\x01 \x01(\v2\x1b.c1.connector.v2.ResourceIdB\b\xfaB\x05\x8a\x01\x02\x10\x01R\n" + + "resourceId\x126\n" + + "\vannotations\x18\x02 \x03(\v2\x14.google.protobuf.AnyR\vannotations\"b\n" + + ")ResourcesReaderServiceGetResourceResponse\x125\n" + + "\bresource\x18\x01 \x01(\v2\x19.c1.connector.v2.ResourceR\bresource2\xb1\x01\n" + + "\x1aResourceTypesReaderService\x12\x92\x01\n" + + "\x0fGetResourceType\x12>.c1.reader.v2.ResourceTypesReaderServiceGetResourceTypeRequest\x1a?.c1.reader.v2.ResourceTypesReaderServiceGetResourceTypeResponse2\x98\x01\n" + + "\x16ResourcesReaderService\x12~\n" + + "\vGetResource\x126.c1.reader.v2.ResourcesReaderServiceGetResourceRequest\x1a7.c1.reader.v2.ResourcesReaderServiceGetResourceResponseB3Z1github.com/conductorone/baton-sdk/pb/c1/reader/v2b\x06proto3" + var file_c1_reader_v2_resource_proto_msgTypes = make([]protoimpl.MessageInfo, 4) var file_c1_reader_v2_resource_proto_goTypes = []any{ (*ResourceTypesReaderServiceGetResourceTypeRequest)(nil), // 0: c1.reader.v2.ResourceTypesReaderServiceGetResourceTypeRequest diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/reader/v2/resource_protoopaque.pb.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/reader/v2/resource_protoopaque.pb.go new file mode 100644 index 00000000..47c49e26 --- /dev/null +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/reader/v2/resource_protoopaque.pb.go @@ -0,0 +1,392 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.10 +// protoc (unknown) +// source: c1/reader/v2/resource.proto + +//go:build protoopaque + +package v2 + +import ( + v2 "github.com/conductorone/baton-sdk/pb/c1/connector/v2" + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + reflect "reflect" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type ResourceTypesReaderServiceGetResourceTypeRequest struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_ResourceTypeId string `protobuf:"bytes,1,opt,name=resource_type_id,json=resourceTypeId,proto3"` + xxx_hidden_Annotations *[]*anypb.Any `protobuf:"bytes,2,rep,name=annotations,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ResourceTypesReaderServiceGetResourceTypeRequest) Reset() { + *x = ResourceTypesReaderServiceGetResourceTypeRequest{} + mi := &file_c1_reader_v2_resource_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ResourceTypesReaderServiceGetResourceTypeRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResourceTypesReaderServiceGetResourceTypeRequest) ProtoMessage() {} + +func (x *ResourceTypesReaderServiceGetResourceTypeRequest) ProtoReflect() protoreflect.Message { + mi := &file_c1_reader_v2_resource_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *ResourceTypesReaderServiceGetResourceTypeRequest) GetResourceTypeId() string { + if x != nil { + return x.xxx_hidden_ResourceTypeId + } + return "" +} + +func (x *ResourceTypesReaderServiceGetResourceTypeRequest) GetAnnotations() []*anypb.Any { + if x != nil { + if x.xxx_hidden_Annotations != nil { + return *x.xxx_hidden_Annotations + } + } + return nil +} + +func (x *ResourceTypesReaderServiceGetResourceTypeRequest) SetResourceTypeId(v string) { + x.xxx_hidden_ResourceTypeId = v +} + +func (x *ResourceTypesReaderServiceGetResourceTypeRequest) SetAnnotations(v []*anypb.Any) { + x.xxx_hidden_Annotations = &v +} + +type ResourceTypesReaderServiceGetResourceTypeRequest_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + ResourceTypeId string + Annotations []*anypb.Any +} + +func (b0 ResourceTypesReaderServiceGetResourceTypeRequest_builder) Build() *ResourceTypesReaderServiceGetResourceTypeRequest { + m0 := &ResourceTypesReaderServiceGetResourceTypeRequest{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_ResourceTypeId = b.ResourceTypeId + x.xxx_hidden_Annotations = &b.Annotations + return m0 +} + +type ResourceTypesReaderServiceGetResourceTypeResponse struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_ResourceType *v2.ResourceType `protobuf:"bytes,1,opt,name=resource_type,json=resourceType,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ResourceTypesReaderServiceGetResourceTypeResponse) Reset() { + *x = ResourceTypesReaderServiceGetResourceTypeResponse{} + mi := &file_c1_reader_v2_resource_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ResourceTypesReaderServiceGetResourceTypeResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResourceTypesReaderServiceGetResourceTypeResponse) ProtoMessage() {} + +func (x *ResourceTypesReaderServiceGetResourceTypeResponse) ProtoReflect() protoreflect.Message { + mi := &file_c1_reader_v2_resource_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *ResourceTypesReaderServiceGetResourceTypeResponse) GetResourceType() *v2.ResourceType { + if x != nil { + return x.xxx_hidden_ResourceType + } + return nil +} + +func (x *ResourceTypesReaderServiceGetResourceTypeResponse) SetResourceType(v *v2.ResourceType) { + x.xxx_hidden_ResourceType = v +} + +func (x *ResourceTypesReaderServiceGetResourceTypeResponse) HasResourceType() bool { + if x == nil { + return false + } + return x.xxx_hidden_ResourceType != nil +} + +func (x *ResourceTypesReaderServiceGetResourceTypeResponse) ClearResourceType() { + x.xxx_hidden_ResourceType = nil +} + +type ResourceTypesReaderServiceGetResourceTypeResponse_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + ResourceType *v2.ResourceType +} + +func (b0 ResourceTypesReaderServiceGetResourceTypeResponse_builder) Build() *ResourceTypesReaderServiceGetResourceTypeResponse { + m0 := &ResourceTypesReaderServiceGetResourceTypeResponse{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_ResourceType = b.ResourceType + return m0 +} + +type ResourcesReaderServiceGetResourceRequest struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_ResourceId *v2.ResourceId `protobuf:"bytes,1,opt,name=resource_id,json=resourceId,proto3"` + xxx_hidden_Annotations *[]*anypb.Any `protobuf:"bytes,2,rep,name=annotations,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ResourcesReaderServiceGetResourceRequest) Reset() { + *x = ResourcesReaderServiceGetResourceRequest{} + mi := &file_c1_reader_v2_resource_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ResourcesReaderServiceGetResourceRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResourcesReaderServiceGetResourceRequest) ProtoMessage() {} + +func (x *ResourcesReaderServiceGetResourceRequest) ProtoReflect() protoreflect.Message { + mi := &file_c1_reader_v2_resource_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *ResourcesReaderServiceGetResourceRequest) GetResourceId() *v2.ResourceId { + if x != nil { + return x.xxx_hidden_ResourceId + } + return nil +} + +func (x *ResourcesReaderServiceGetResourceRequest) GetAnnotations() []*anypb.Any { + if x != nil { + if x.xxx_hidden_Annotations != nil { + return *x.xxx_hidden_Annotations + } + } + return nil +} + +func (x *ResourcesReaderServiceGetResourceRequest) SetResourceId(v *v2.ResourceId) { + x.xxx_hidden_ResourceId = v +} + +func (x *ResourcesReaderServiceGetResourceRequest) SetAnnotations(v []*anypb.Any) { + x.xxx_hidden_Annotations = &v +} + +func (x *ResourcesReaderServiceGetResourceRequest) HasResourceId() bool { + if x == nil { + return false + } + return x.xxx_hidden_ResourceId != nil +} + +func (x *ResourcesReaderServiceGetResourceRequest) ClearResourceId() { + x.xxx_hidden_ResourceId = nil +} + +type ResourcesReaderServiceGetResourceRequest_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + ResourceId *v2.ResourceId + Annotations []*anypb.Any +} + +func (b0 ResourcesReaderServiceGetResourceRequest_builder) Build() *ResourcesReaderServiceGetResourceRequest { + m0 := &ResourcesReaderServiceGetResourceRequest{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_ResourceId = b.ResourceId + x.xxx_hidden_Annotations = &b.Annotations + return m0 +} + +type ResourcesReaderServiceGetResourceResponse struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Resource *v2.Resource `protobuf:"bytes,1,opt,name=resource,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ResourcesReaderServiceGetResourceResponse) Reset() { + *x = ResourcesReaderServiceGetResourceResponse{} + mi := &file_c1_reader_v2_resource_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ResourcesReaderServiceGetResourceResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResourcesReaderServiceGetResourceResponse) ProtoMessage() {} + +func (x *ResourcesReaderServiceGetResourceResponse) ProtoReflect() protoreflect.Message { + mi := &file_c1_reader_v2_resource_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *ResourcesReaderServiceGetResourceResponse) GetResource() *v2.Resource { + if x != nil { + return x.xxx_hidden_Resource + } + return nil +} + +func (x *ResourcesReaderServiceGetResourceResponse) SetResource(v *v2.Resource) { + x.xxx_hidden_Resource = v +} + +func (x *ResourcesReaderServiceGetResourceResponse) HasResource() bool { + if x == nil { + return false + } + return x.xxx_hidden_Resource != nil +} + +func (x *ResourcesReaderServiceGetResourceResponse) ClearResource() { + x.xxx_hidden_Resource = nil +} + +type ResourcesReaderServiceGetResourceResponse_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Resource *v2.Resource +} + +func (b0 ResourcesReaderServiceGetResourceResponse_builder) Build() *ResourcesReaderServiceGetResourceResponse { + m0 := &ResourcesReaderServiceGetResourceResponse{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Resource = b.Resource + return m0 +} + +var File_c1_reader_v2_resource_proto protoreflect.FileDescriptor + +const file_c1_reader_v2_resource_proto_rawDesc = "" + + "\n" + + "\x1bc1/reader/v2/resource.proto\x12\fc1.reader.v2\x1a\x1ec1/connector/v2/resource.proto\x1a\x19google/protobuf/any.proto\x1a\x17validate/validate.proto\"\x94\x01\n" + + "0ResourceTypesReaderServiceGetResourceTypeRequest\x12(\n" + + "\x10resource_type_id\x18\x01 \x01(\tR\x0eresourceTypeId\x126\n" + + "\vannotations\x18\x02 \x03(\v2\x14.google.protobuf.AnyR\vannotations\"w\n" + + "1ResourceTypesReaderServiceGetResourceTypeResponse\x12B\n" + + "\rresource_type\x18\x01 \x01(\v2\x1d.c1.connector.v2.ResourceTypeR\fresourceType\"\xaa\x01\n" + + "(ResourcesReaderServiceGetResourceRequest\x12F\n" + + "\vresource_id\x18\x01 \x01(\v2\x1b.c1.connector.v2.ResourceIdB\b\xfaB\x05\x8a\x01\x02\x10\x01R\n" + + "resourceId\x126\n" + + "\vannotations\x18\x02 \x03(\v2\x14.google.protobuf.AnyR\vannotations\"b\n" + + ")ResourcesReaderServiceGetResourceResponse\x125\n" + + "\bresource\x18\x01 \x01(\v2\x19.c1.connector.v2.ResourceR\bresource2\xb1\x01\n" + + "\x1aResourceTypesReaderService\x12\x92\x01\n" + + "\x0fGetResourceType\x12>.c1.reader.v2.ResourceTypesReaderServiceGetResourceTypeRequest\x1a?.c1.reader.v2.ResourceTypesReaderServiceGetResourceTypeResponse2\x98\x01\n" + + "\x16ResourcesReaderService\x12~\n" + + "\vGetResource\x126.c1.reader.v2.ResourcesReaderServiceGetResourceRequest\x1a7.c1.reader.v2.ResourcesReaderServiceGetResourceResponseB3Z1github.com/conductorone/baton-sdk/pb/c1/reader/v2b\x06proto3" + +var file_c1_reader_v2_resource_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_c1_reader_v2_resource_proto_goTypes = []any{ + (*ResourceTypesReaderServiceGetResourceTypeRequest)(nil), // 0: c1.reader.v2.ResourceTypesReaderServiceGetResourceTypeRequest + (*ResourceTypesReaderServiceGetResourceTypeResponse)(nil), // 1: c1.reader.v2.ResourceTypesReaderServiceGetResourceTypeResponse + (*ResourcesReaderServiceGetResourceRequest)(nil), // 2: c1.reader.v2.ResourcesReaderServiceGetResourceRequest + (*ResourcesReaderServiceGetResourceResponse)(nil), // 3: c1.reader.v2.ResourcesReaderServiceGetResourceResponse + (*anypb.Any)(nil), // 4: google.protobuf.Any + (*v2.ResourceType)(nil), // 5: c1.connector.v2.ResourceType + (*v2.ResourceId)(nil), // 6: c1.connector.v2.ResourceId + (*v2.Resource)(nil), // 7: c1.connector.v2.Resource +} +var file_c1_reader_v2_resource_proto_depIdxs = []int32{ + 4, // 0: c1.reader.v2.ResourceTypesReaderServiceGetResourceTypeRequest.annotations:type_name -> google.protobuf.Any + 5, // 1: c1.reader.v2.ResourceTypesReaderServiceGetResourceTypeResponse.resource_type:type_name -> c1.connector.v2.ResourceType + 6, // 2: c1.reader.v2.ResourcesReaderServiceGetResourceRequest.resource_id:type_name -> c1.connector.v2.ResourceId + 4, // 3: c1.reader.v2.ResourcesReaderServiceGetResourceRequest.annotations:type_name -> google.protobuf.Any + 7, // 4: c1.reader.v2.ResourcesReaderServiceGetResourceResponse.resource:type_name -> c1.connector.v2.Resource + 0, // 5: c1.reader.v2.ResourceTypesReaderService.GetResourceType:input_type -> c1.reader.v2.ResourceTypesReaderServiceGetResourceTypeRequest + 2, // 6: c1.reader.v2.ResourcesReaderService.GetResource:input_type -> c1.reader.v2.ResourcesReaderServiceGetResourceRequest + 1, // 7: c1.reader.v2.ResourceTypesReaderService.GetResourceType:output_type -> c1.reader.v2.ResourceTypesReaderServiceGetResourceTypeResponse + 3, // 8: c1.reader.v2.ResourcesReaderService.GetResource:output_type -> c1.reader.v2.ResourcesReaderServiceGetResourceResponse + 7, // [7:9] is the sub-list for method output_type + 5, // [5:7] is the sub-list for method input_type + 5, // [5:5] is the sub-list for extension type_name + 5, // [5:5] is the sub-list for extension extendee + 0, // [0:5] is the sub-list for field type_name +} + +func init() { file_c1_reader_v2_resource_proto_init() } +func file_c1_reader_v2_resource_proto_init() { + if File_c1_reader_v2_resource_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_c1_reader_v2_resource_proto_rawDesc), len(file_c1_reader_v2_resource_proto_rawDesc)), + NumEnums: 0, + NumMessages: 4, + NumExtensions: 0, + NumServices: 2, + }, + GoTypes: file_c1_reader_v2_resource_proto_goTypes, + DependencyIndexes: file_c1_reader_v2_resource_proto_depIdxs, + MessageInfos: file_c1_reader_v2_resource_proto_msgTypes, + }.Build() + File_c1_reader_v2_resource_proto = out.File + file_c1_reader_v2_resource_proto_goTypes = nil + file_c1_reader_v2_resource_proto_depIdxs = nil +} diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/reader/v2/sync.pb.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/reader/v2/sync.pb.go index ec106f8e..454f1ee8 100644 --- a/vendor/github.com/conductorone/baton-sdk/pb/c1/reader/v2/sync.pb.go +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/reader/v2/sync.pb.go @@ -1,9 +1,11 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.4 +// protoc-gen-go v1.36.10 // protoc (unknown) // source: c1/reader/v2/sync.proto +//go:build !protoopaque + package v2 import ( @@ -13,7 +15,6 @@ import ( anypb "google.golang.org/protobuf/types/known/anypb" timestamppb "google.golang.org/protobuf/types/known/timestamppb" reflect "reflect" - sync "sync" unsafe "unsafe" ) @@ -25,7 +26,7 @@ const ( ) type SyncRun struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` StartedAt *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=started_at,json=startedAt,proto3" json:"started_at,omitempty"` EndedAt *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=ended_at,json=endedAt,proto3" json:"ended_at,omitempty"` @@ -61,11 +62,6 @@ func (x *SyncRun) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use SyncRun.ProtoReflect.Descriptor instead. -func (*SyncRun) Descriptor() ([]byte, []int) { - return file_c1_reader_v2_sync_proto_rawDescGZIP(), []int{0} -} - func (x *SyncRun) GetId() string { if x != nil { return x.Id @@ -108,8 +104,78 @@ func (x *SyncRun) GetParentSyncId() string { return "" } +func (x *SyncRun) SetId(v string) { + x.Id = v +} + +func (x *SyncRun) SetStartedAt(v *timestamppb.Timestamp) { + x.StartedAt = v +} + +func (x *SyncRun) SetEndedAt(v *timestamppb.Timestamp) { + x.EndedAt = v +} + +func (x *SyncRun) SetSyncToken(v string) { + x.SyncToken = v +} + +func (x *SyncRun) SetSyncType(v string) { + x.SyncType = v +} + +func (x *SyncRun) SetParentSyncId(v string) { + x.ParentSyncId = v +} + +func (x *SyncRun) HasStartedAt() bool { + if x == nil { + return false + } + return x.StartedAt != nil +} + +func (x *SyncRun) HasEndedAt() bool { + if x == nil { + return false + } + return x.EndedAt != nil +} + +func (x *SyncRun) ClearStartedAt() { + x.StartedAt = nil +} + +func (x *SyncRun) ClearEndedAt() { + x.EndedAt = nil +} + +type SyncRun_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Id string + StartedAt *timestamppb.Timestamp + EndedAt *timestamppb.Timestamp + SyncToken string + SyncType string + ParentSyncId string +} + +func (b0 SyncRun_builder) Build() *SyncRun { + m0 := &SyncRun{} + b, x := &b0, m0 + _, _ = b, x + x.Id = b.Id + x.StartedAt = b.StartedAt + x.EndedAt = b.EndedAt + x.SyncToken = b.SyncToken + x.SyncType = b.SyncType + x.ParentSyncId = b.ParentSyncId + return m0 +} + type SyncsReaderServiceGetSyncRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` SyncId string `protobuf:"bytes,1,opt,name=sync_id,json=syncId,proto3" json:"sync_id,omitempty"` Annotations []*anypb.Any `protobuf:"bytes,2,rep,name=annotations,proto3" json:"annotations,omitempty"` unknownFields protoimpl.UnknownFields @@ -141,11 +207,6 @@ func (x *SyncsReaderServiceGetSyncRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use SyncsReaderServiceGetSyncRequest.ProtoReflect.Descriptor instead. -func (*SyncsReaderServiceGetSyncRequest) Descriptor() ([]byte, []int) { - return file_c1_reader_v2_sync_proto_rawDescGZIP(), []int{1} -} - func (x *SyncsReaderServiceGetSyncRequest) GetSyncId() string { if x != nil { return x.SyncId @@ -160,8 +221,32 @@ func (x *SyncsReaderServiceGetSyncRequest) GetAnnotations() []*anypb.Any { return nil } +func (x *SyncsReaderServiceGetSyncRequest) SetSyncId(v string) { + x.SyncId = v +} + +func (x *SyncsReaderServiceGetSyncRequest) SetAnnotations(v []*anypb.Any) { + x.Annotations = v +} + +type SyncsReaderServiceGetSyncRequest_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + SyncId string + Annotations []*anypb.Any +} + +func (b0 SyncsReaderServiceGetSyncRequest_builder) Build() *SyncsReaderServiceGetSyncRequest { + m0 := &SyncsReaderServiceGetSyncRequest{} + b, x := &b0, m0 + _, _ = b, x + x.SyncId = b.SyncId + x.Annotations = b.Annotations + return m0 +} + type SyncsReaderServiceGetSyncResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Sync *SyncRun `protobuf:"bytes,1,opt,name=sync,proto3" json:"sync,omitempty"` Annotations []*anypb.Any `protobuf:"bytes,2,rep,name=annotations,proto3" json:"annotations,omitempty"` unknownFields protoimpl.UnknownFields @@ -193,11 +278,6 @@ func (x *SyncsReaderServiceGetSyncResponse) ProtoReflect() protoreflect.Message return mi.MessageOf(x) } -// Deprecated: Use SyncsReaderServiceGetSyncResponse.ProtoReflect.Descriptor instead. -func (*SyncsReaderServiceGetSyncResponse) Descriptor() ([]byte, []int) { - return file_c1_reader_v2_sync_proto_rawDescGZIP(), []int{2} -} - func (x *SyncsReaderServiceGetSyncResponse) GetSync() *SyncRun { if x != nil { return x.Sync @@ -212,8 +292,43 @@ func (x *SyncsReaderServiceGetSyncResponse) GetAnnotations() []*anypb.Any { return nil } +func (x *SyncsReaderServiceGetSyncResponse) SetSync(v *SyncRun) { + x.Sync = v +} + +func (x *SyncsReaderServiceGetSyncResponse) SetAnnotations(v []*anypb.Any) { + x.Annotations = v +} + +func (x *SyncsReaderServiceGetSyncResponse) HasSync() bool { + if x == nil { + return false + } + return x.Sync != nil +} + +func (x *SyncsReaderServiceGetSyncResponse) ClearSync() { + x.Sync = nil +} + +type SyncsReaderServiceGetSyncResponse_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Sync *SyncRun + Annotations []*anypb.Any +} + +func (b0 SyncsReaderServiceGetSyncResponse_builder) Build() *SyncsReaderServiceGetSyncResponse { + m0 := &SyncsReaderServiceGetSyncResponse{} + b, x := &b0, m0 + _, _ = b, x + x.Sync = b.Sync + x.Annotations = b.Annotations + return m0 +} + type SyncsReaderServiceListSyncsRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` PageSize uint32 `protobuf:"varint,1,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` PageToken string `protobuf:"bytes,2,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` Annotations []*anypb.Any `protobuf:"bytes,3,rep,name=annotations,proto3" json:"annotations,omitempty"` @@ -246,11 +361,6 @@ func (x *SyncsReaderServiceListSyncsRequest) ProtoReflect() protoreflect.Message return mi.MessageOf(x) } -// Deprecated: Use SyncsReaderServiceListSyncsRequest.ProtoReflect.Descriptor instead. -func (*SyncsReaderServiceListSyncsRequest) Descriptor() ([]byte, []int) { - return file_c1_reader_v2_sync_proto_rawDescGZIP(), []int{3} -} - func (x *SyncsReaderServiceListSyncsRequest) GetPageSize() uint32 { if x != nil { return x.PageSize @@ -272,8 +382,38 @@ func (x *SyncsReaderServiceListSyncsRequest) GetAnnotations() []*anypb.Any { return nil } +func (x *SyncsReaderServiceListSyncsRequest) SetPageSize(v uint32) { + x.PageSize = v +} + +func (x *SyncsReaderServiceListSyncsRequest) SetPageToken(v string) { + x.PageToken = v +} + +func (x *SyncsReaderServiceListSyncsRequest) SetAnnotations(v []*anypb.Any) { + x.Annotations = v +} + +type SyncsReaderServiceListSyncsRequest_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + PageSize uint32 + PageToken string + Annotations []*anypb.Any +} + +func (b0 SyncsReaderServiceListSyncsRequest_builder) Build() *SyncsReaderServiceListSyncsRequest { + m0 := &SyncsReaderServiceListSyncsRequest{} + b, x := &b0, m0 + _, _ = b, x + x.PageSize = b.PageSize + x.PageToken = b.PageToken + x.Annotations = b.Annotations + return m0 +} + type SyncsReaderServiceListSyncsResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Syncs []*SyncRun `protobuf:"bytes,1,rep,name=syncs,proto3" json:"syncs,omitempty"` NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` Annotations []*anypb.Any `protobuf:"bytes,3,rep,name=annotations,proto3" json:"annotations,omitempty"` @@ -306,11 +446,6 @@ func (x *SyncsReaderServiceListSyncsResponse) ProtoReflect() protoreflect.Messag return mi.MessageOf(x) } -// Deprecated: Use SyncsReaderServiceListSyncsResponse.ProtoReflect.Descriptor instead. -func (*SyncsReaderServiceListSyncsResponse) Descriptor() ([]byte, []int) { - return file_c1_reader_v2_sync_proto_rawDescGZIP(), []int{4} -} - func (x *SyncsReaderServiceListSyncsResponse) GetSyncs() []*SyncRun { if x != nil { return x.Syncs @@ -332,8 +467,38 @@ func (x *SyncsReaderServiceListSyncsResponse) GetAnnotations() []*anypb.Any { return nil } +func (x *SyncsReaderServiceListSyncsResponse) SetSyncs(v []*SyncRun) { + x.Syncs = v +} + +func (x *SyncsReaderServiceListSyncsResponse) SetNextPageToken(v string) { + x.NextPageToken = v +} + +func (x *SyncsReaderServiceListSyncsResponse) SetAnnotations(v []*anypb.Any) { + x.Annotations = v +} + +type SyncsReaderServiceListSyncsResponse_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Syncs []*SyncRun + NextPageToken string + Annotations []*anypb.Any +} + +func (b0 SyncsReaderServiceListSyncsResponse_builder) Build() *SyncsReaderServiceListSyncsResponse { + m0 := &SyncsReaderServiceListSyncsResponse{} + b, x := &b0, m0 + _, _ = b, x + x.Syncs = b.Syncs + x.NextPageToken = b.NextPageToken + x.Annotations = b.Annotations + return m0 +} + type SyncsReaderServiceGetLatestFinishedSyncRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Annotations []*anypb.Any `protobuf:"bytes,1,rep,name=annotations,proto3" json:"annotations,omitempty"` SyncType string `protobuf:"bytes,2,opt,name=sync_type,json=syncType,proto3" json:"sync_type,omitempty"` unknownFields protoimpl.UnknownFields @@ -365,11 +530,6 @@ func (x *SyncsReaderServiceGetLatestFinishedSyncRequest) ProtoReflect() protoref return mi.MessageOf(x) } -// Deprecated: Use SyncsReaderServiceGetLatestFinishedSyncRequest.ProtoReflect.Descriptor instead. -func (*SyncsReaderServiceGetLatestFinishedSyncRequest) Descriptor() ([]byte, []int) { - return file_c1_reader_v2_sync_proto_rawDescGZIP(), []int{5} -} - func (x *SyncsReaderServiceGetLatestFinishedSyncRequest) GetAnnotations() []*anypb.Any { if x != nil { return x.Annotations @@ -384,8 +544,32 @@ func (x *SyncsReaderServiceGetLatestFinishedSyncRequest) GetSyncType() string { return "" } +func (x *SyncsReaderServiceGetLatestFinishedSyncRequest) SetAnnotations(v []*anypb.Any) { + x.Annotations = v +} + +func (x *SyncsReaderServiceGetLatestFinishedSyncRequest) SetSyncType(v string) { + x.SyncType = v +} + +type SyncsReaderServiceGetLatestFinishedSyncRequest_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Annotations []*anypb.Any + SyncType string +} + +func (b0 SyncsReaderServiceGetLatestFinishedSyncRequest_builder) Build() *SyncsReaderServiceGetLatestFinishedSyncRequest { + m0 := &SyncsReaderServiceGetLatestFinishedSyncRequest{} + b, x := &b0, m0 + _, _ = b, x + x.Annotations = b.Annotations + x.SyncType = b.SyncType + return m0 +} + type SyncsReaderServiceGetLatestFinishedSyncResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Sync *SyncRun `protobuf:"bytes,1,opt,name=sync,proto3" json:"sync,omitempty"` Annotations []*anypb.Any `protobuf:"bytes,2,rep,name=annotations,proto3" json:"annotations,omitempty"` unknownFields protoimpl.UnknownFields @@ -417,11 +601,6 @@ func (x *SyncsReaderServiceGetLatestFinishedSyncResponse) ProtoReflect() protore return mi.MessageOf(x) } -// Deprecated: Use SyncsReaderServiceGetLatestFinishedSyncResponse.ProtoReflect.Descriptor instead. -func (*SyncsReaderServiceGetLatestFinishedSyncResponse) Descriptor() ([]byte, []int) { - return file_c1_reader_v2_sync_proto_rawDescGZIP(), []int{6} -} - func (x *SyncsReaderServiceGetLatestFinishedSyncResponse) GetSync() *SyncRun { if x != nil { return x.Sync @@ -436,132 +615,84 @@ func (x *SyncsReaderServiceGetLatestFinishedSyncResponse) GetAnnotations() []*an return nil } -var File_c1_reader_v2_sync_proto protoreflect.FileDescriptor +func (x *SyncsReaderServiceGetLatestFinishedSyncResponse) SetSync(v *SyncRun) { + x.Sync = v +} -var file_c1_reader_v2_sync_proto_rawDesc = string([]byte{ - 0x0a, 0x17, 0x63, 0x31, 0x2f, 0x72, 0x65, 0x61, 0x64, 0x65, 0x72, 0x2f, 0x76, 0x32, 0x2f, 0x73, - 0x79, 0x6e, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0c, 0x63, 0x31, 0x2e, 0x72, 0x65, - 0x61, 0x64, 0x65, 0x72, 0x2e, 0x76, 0x32, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, - 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xed, 0x01, 0x0a, - 0x07, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x75, 0x6e, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x39, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72, - 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, - 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, - 0x64, 0x41, 0x74, 0x12, 0x35, 0x0a, 0x08, 0x65, 0x6e, 0x64, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, - 0x70, 0x52, 0x07, 0x65, 0x6e, 0x64, 0x65, 0x64, 0x41, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x79, - 0x6e, 0x63, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, - 0x73, 0x79, 0x6e, 0x63, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x79, 0x6e, - 0x63, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x79, - 0x6e, 0x63, 0x54, 0x79, 0x70, 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, - 0x5f, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, - 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x79, 0x6e, 0x63, 0x49, 0x64, 0x22, 0x73, 0x0a, 0x20, - 0x53, 0x79, 0x6e, 0x63, 0x73, 0x52, 0x65, 0x61, 0x64, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x47, 0x65, 0x74, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x17, 0x0a, 0x07, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x06, 0x73, 0x79, 0x6e, 0x63, 0x49, 0x64, 0x12, 0x36, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, - 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x22, 0x86, 0x01, 0x0a, 0x21, 0x53, 0x79, 0x6e, 0x63, 0x73, 0x52, 0x65, 0x61, 0x64, 0x65, - 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x47, 0x65, 0x74, 0x53, 0x79, 0x6e, 0x63, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x29, 0x0a, 0x04, 0x73, 0x79, 0x6e, 0x63, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x63, 0x31, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x65, - 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x75, 0x6e, 0x52, 0x04, 0x73, 0x79, - 0x6e, 0x63, 0x12, 0x36, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0b, 0x61, - 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xb3, 0x01, 0x0a, 0x22, 0x53, - 0x79, 0x6e, 0x63, 0x73, 0x52, 0x65, 0x61, 0x64, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x79, 0x6e, 0x63, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x27, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0d, 0x42, 0x0a, 0xfa, 0x42, 0x07, 0x2a, 0x05, 0x18, 0xfa, 0x01, 0x40, 0x01, - 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x2c, 0x0a, 0x0a, 0x70, 0x61, - 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0d, - 0xfa, 0x42, 0x0a, 0x72, 0x08, 0x20, 0x01, 0x28, 0x80, 0x10, 0xd0, 0x01, 0x01, 0x52, 0x09, 0x70, - 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x36, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, - 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x41, 0x6e, 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x22, 0xc1, 0x01, 0x0a, 0x23, 0x53, 0x79, 0x6e, 0x63, 0x73, 0x52, 0x65, 0x61, 0x64, 0x65, 0x72, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x79, 0x6e, 0x63, 0x73, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2b, 0x0a, 0x05, 0x73, 0x79, 0x6e, 0x63, - 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x63, 0x31, 0x2e, 0x72, 0x65, 0x61, - 0x64, 0x65, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x75, 0x6e, 0x52, 0x05, - 0x73, 0x79, 0x6e, 0x63, 0x73, 0x12, 0x35, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, - 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0d, - 0xfa, 0x42, 0x0a, 0x72, 0x08, 0x20, 0x01, 0x28, 0x80, 0x10, 0xd0, 0x01, 0x01, 0x52, 0x0d, 0x6e, - 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x36, 0x0a, 0x0b, - 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x85, 0x01, 0x0a, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x73, 0x52, 0x65, - 0x61, 0x64, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x47, 0x65, 0x74, 0x4c, 0x61, - 0x74, 0x65, 0x73, 0x74, 0x46, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x65, 0x64, 0x53, 0x79, 0x6e, 0x63, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x36, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, - 0x6e, 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, - 0x1b, 0x0a, 0x09, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x08, 0x73, 0x79, 0x6e, 0x63, 0x54, 0x79, 0x70, 0x65, 0x22, 0x94, 0x01, 0x0a, - 0x2f, 0x53, 0x79, 0x6e, 0x63, 0x73, 0x52, 0x65, 0x61, 0x64, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x47, 0x65, 0x74, 0x4c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x46, 0x69, 0x6e, 0x69, - 0x73, 0x68, 0x65, 0x64, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x29, 0x0a, 0x04, 0x73, 0x79, 0x6e, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, - 0x2e, 0x63, 0x31, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x65, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x79, - 0x6e, 0x63, 0x52, 0x75, 0x6e, 0x52, 0x04, 0x73, 0x79, 0x6e, 0x63, 0x12, 0x36, 0x0a, 0x0b, 0x61, - 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x32, 0x89, 0x03, 0x0a, 0x12, 0x53, 0x79, 0x6e, 0x63, 0x73, 0x52, 0x65, 0x61, - 0x64, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x6a, 0x0a, 0x07, 0x47, 0x65, - 0x74, 0x53, 0x79, 0x6e, 0x63, 0x12, 0x2e, 0x2e, 0x63, 0x31, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x65, - 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x73, 0x52, 0x65, 0x61, 0x64, 0x65, 0x72, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x47, 0x65, 0x74, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x63, 0x31, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x65, - 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x73, 0x52, 0x65, 0x61, 0x64, 0x65, 0x72, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x47, 0x65, 0x74, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x70, 0x0a, 0x09, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x79, - 0x6e, 0x63, 0x73, 0x12, 0x30, 0x2e, 0x63, 0x31, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x65, 0x72, 0x2e, - 0x76, 0x32, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x73, 0x52, 0x65, 0x61, 0x64, 0x65, 0x72, 0x53, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x79, 0x6e, 0x63, 0x73, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x31, 0x2e, 0x63, 0x31, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x65, - 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x73, 0x52, 0x65, 0x61, 0x64, 0x65, 0x72, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x79, 0x6e, 0x63, 0x73, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x94, 0x01, 0x0a, 0x15, 0x47, 0x65, 0x74, - 0x4c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x46, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x65, 0x64, 0x53, 0x79, - 0x6e, 0x63, 0x12, 0x3c, 0x2e, 0x63, 0x31, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x65, 0x72, 0x2e, 0x76, - 0x32, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x73, 0x52, 0x65, 0x61, 0x64, 0x65, 0x72, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x47, 0x65, 0x74, 0x4c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x46, 0x69, 0x6e, - 0x69, 0x73, 0x68, 0x65, 0x64, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x3d, 0x2e, 0x63, 0x31, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x65, 0x72, 0x2e, 0x76, 0x32, 0x2e, - 0x53, 0x79, 0x6e, 0x63, 0x73, 0x52, 0x65, 0x61, 0x64, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x47, 0x65, 0x74, 0x4c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x46, 0x69, 0x6e, 0x69, 0x73, - 0x68, 0x65, 0x64, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, - 0x33, 0x5a, 0x31, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, - 0x6e, 0x64, 0x75, 0x63, 0x74, 0x6f, 0x72, 0x6f, 0x6e, 0x65, 0x2f, 0x62, 0x61, 0x74, 0x6f, 0x6e, - 0x2d, 0x73, 0x64, 0x6b, 0x2f, 0x70, 0x62, 0x2f, 0x63, 0x31, 0x2f, 0x72, 0x65, 0x61, 0x64, 0x65, - 0x72, 0x2f, 0x76, 0x32, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -}) - -var ( - file_c1_reader_v2_sync_proto_rawDescOnce sync.Once - file_c1_reader_v2_sync_proto_rawDescData []byte -) +func (x *SyncsReaderServiceGetLatestFinishedSyncResponse) SetAnnotations(v []*anypb.Any) { + x.Annotations = v +} -func file_c1_reader_v2_sync_proto_rawDescGZIP() []byte { - file_c1_reader_v2_sync_proto_rawDescOnce.Do(func() { - file_c1_reader_v2_sync_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_c1_reader_v2_sync_proto_rawDesc), len(file_c1_reader_v2_sync_proto_rawDesc))) - }) - return file_c1_reader_v2_sync_proto_rawDescData +func (x *SyncsReaderServiceGetLatestFinishedSyncResponse) HasSync() bool { + if x == nil { + return false + } + return x.Sync != nil } +func (x *SyncsReaderServiceGetLatestFinishedSyncResponse) ClearSync() { + x.Sync = nil +} + +type SyncsReaderServiceGetLatestFinishedSyncResponse_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Sync *SyncRun + Annotations []*anypb.Any +} + +func (b0 SyncsReaderServiceGetLatestFinishedSyncResponse_builder) Build() *SyncsReaderServiceGetLatestFinishedSyncResponse { + m0 := &SyncsReaderServiceGetLatestFinishedSyncResponse{} + b, x := &b0, m0 + _, _ = b, x + x.Sync = b.Sync + x.Annotations = b.Annotations + return m0 +} + +var File_c1_reader_v2_sync_proto protoreflect.FileDescriptor + +const file_c1_reader_v2_sync_proto_rawDesc = "" + + "\n" + + "\x17c1/reader/v2/sync.proto\x12\fc1.reader.v2\x1a\x19google/protobuf/any.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x17validate/validate.proto\"\xed\x01\n" + + "\aSyncRun\x12\x0e\n" + + "\x02id\x18\x01 \x01(\tR\x02id\x129\n" + + "\n" + + "started_at\x18\x02 \x01(\v2\x1a.google.protobuf.TimestampR\tstartedAt\x125\n" + + "\bended_at\x18\x03 \x01(\v2\x1a.google.protobuf.TimestampR\aendedAt\x12\x1d\n" + + "\n" + + "sync_token\x18\x04 \x01(\tR\tsyncToken\x12\x1b\n" + + "\tsync_type\x18\x05 \x01(\tR\bsyncType\x12$\n" + + "\x0eparent_sync_id\x18\x06 \x01(\tR\fparentSyncId\"s\n" + + " SyncsReaderServiceGetSyncRequest\x12\x17\n" + + "\async_id\x18\x01 \x01(\tR\x06syncId\x126\n" + + "\vannotations\x18\x02 \x03(\v2\x14.google.protobuf.AnyR\vannotations\"\x86\x01\n" + + "!SyncsReaderServiceGetSyncResponse\x12)\n" + + "\x04sync\x18\x01 \x01(\v2\x15.c1.reader.v2.SyncRunR\x04sync\x126\n" + + "\vannotations\x18\x02 \x03(\v2\x14.google.protobuf.AnyR\vannotations\"\xb3\x01\n" + + "\"SyncsReaderServiceListSyncsRequest\x12'\n" + + "\tpage_size\x18\x01 \x01(\rB\n" + + "\xfaB\a*\x05\x18\xfa\x01@\x01R\bpageSize\x12,\n" + + "\n" + + "page_token\x18\x02 \x01(\tB\r\xfaB\n" + + "r\b \x01(\x80\x10\xd0\x01\x01R\tpageToken\x126\n" + + "\vannotations\x18\x03 \x03(\v2\x14.google.protobuf.AnyR\vannotations\"\xc1\x01\n" + + "#SyncsReaderServiceListSyncsResponse\x12+\n" + + "\x05syncs\x18\x01 \x03(\v2\x15.c1.reader.v2.SyncRunR\x05syncs\x125\n" + + "\x0fnext_page_token\x18\x02 \x01(\tB\r\xfaB\n" + + "r\b \x01(\x80\x10\xd0\x01\x01R\rnextPageToken\x126\n" + + "\vannotations\x18\x03 \x03(\v2\x14.google.protobuf.AnyR\vannotations\"\x85\x01\n" + + ".SyncsReaderServiceGetLatestFinishedSyncRequest\x126\n" + + "\vannotations\x18\x01 \x03(\v2\x14.google.protobuf.AnyR\vannotations\x12\x1b\n" + + "\tsync_type\x18\x02 \x01(\tR\bsyncType\"\x94\x01\n" + + "/SyncsReaderServiceGetLatestFinishedSyncResponse\x12)\n" + + "\x04sync\x18\x01 \x01(\v2\x15.c1.reader.v2.SyncRunR\x04sync\x126\n" + + "\vannotations\x18\x02 \x03(\v2\x14.google.protobuf.AnyR\vannotations2\x89\x03\n" + + "\x12SyncsReaderService\x12j\n" + + "\aGetSync\x12..c1.reader.v2.SyncsReaderServiceGetSyncRequest\x1a/.c1.reader.v2.SyncsReaderServiceGetSyncResponse\x12p\n" + + "\tListSyncs\x120.c1.reader.v2.SyncsReaderServiceListSyncsRequest\x1a1.c1.reader.v2.SyncsReaderServiceListSyncsResponse\x12\x94\x01\n" + + "\x15GetLatestFinishedSync\x12<.c1.reader.v2.SyncsReaderServiceGetLatestFinishedSyncRequest\x1a=.c1.reader.v2.SyncsReaderServiceGetLatestFinishedSyncResponseB3Z1github.com/conductorone/baton-sdk/pb/c1/reader/v2b\x06proto3" + var file_c1_reader_v2_sync_proto_msgTypes = make([]protoimpl.MessageInfo, 7) var file_c1_reader_v2_sync_proto_goTypes = []any{ (*SyncRun)(nil), // 0: c1.reader.v2.SyncRun diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/reader/v2/sync_protoopaque.pb.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/reader/v2/sync_protoopaque.pb.go new file mode 100644 index 00000000..3276ac81 --- /dev/null +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/reader/v2/sync_protoopaque.pb.go @@ -0,0 +1,769 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.10 +// protoc (unknown) +// source: c1/reader/v2/sync.proto + +//go:build protoopaque + +package v2 + +import ( + _ "github.com/envoyproxy/protoc-gen-validate/validate" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type SyncRun struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Id string `protobuf:"bytes,1,opt,name=id,proto3"` + xxx_hidden_StartedAt *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=started_at,json=startedAt,proto3"` + xxx_hidden_EndedAt *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=ended_at,json=endedAt,proto3"` + xxx_hidden_SyncToken string `protobuf:"bytes,4,opt,name=sync_token,json=syncToken,proto3"` + xxx_hidden_SyncType string `protobuf:"bytes,5,opt,name=sync_type,json=syncType,proto3"` + xxx_hidden_ParentSyncId string `protobuf:"bytes,6,opt,name=parent_sync_id,json=parentSyncId,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SyncRun) Reset() { + *x = SyncRun{} + mi := &file_c1_reader_v2_sync_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SyncRun) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SyncRun) ProtoMessage() {} + +func (x *SyncRun) ProtoReflect() protoreflect.Message { + mi := &file_c1_reader_v2_sync_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *SyncRun) GetId() string { + if x != nil { + return x.xxx_hidden_Id + } + return "" +} + +func (x *SyncRun) GetStartedAt() *timestamppb.Timestamp { + if x != nil { + return x.xxx_hidden_StartedAt + } + return nil +} + +func (x *SyncRun) GetEndedAt() *timestamppb.Timestamp { + if x != nil { + return x.xxx_hidden_EndedAt + } + return nil +} + +func (x *SyncRun) GetSyncToken() string { + if x != nil { + return x.xxx_hidden_SyncToken + } + return "" +} + +func (x *SyncRun) GetSyncType() string { + if x != nil { + return x.xxx_hidden_SyncType + } + return "" +} + +func (x *SyncRun) GetParentSyncId() string { + if x != nil { + return x.xxx_hidden_ParentSyncId + } + return "" +} + +func (x *SyncRun) SetId(v string) { + x.xxx_hidden_Id = v +} + +func (x *SyncRun) SetStartedAt(v *timestamppb.Timestamp) { + x.xxx_hidden_StartedAt = v +} + +func (x *SyncRun) SetEndedAt(v *timestamppb.Timestamp) { + x.xxx_hidden_EndedAt = v +} + +func (x *SyncRun) SetSyncToken(v string) { + x.xxx_hidden_SyncToken = v +} + +func (x *SyncRun) SetSyncType(v string) { + x.xxx_hidden_SyncType = v +} + +func (x *SyncRun) SetParentSyncId(v string) { + x.xxx_hidden_ParentSyncId = v +} + +func (x *SyncRun) HasStartedAt() bool { + if x == nil { + return false + } + return x.xxx_hidden_StartedAt != nil +} + +func (x *SyncRun) HasEndedAt() bool { + if x == nil { + return false + } + return x.xxx_hidden_EndedAt != nil +} + +func (x *SyncRun) ClearStartedAt() { + x.xxx_hidden_StartedAt = nil +} + +func (x *SyncRun) ClearEndedAt() { + x.xxx_hidden_EndedAt = nil +} + +type SyncRun_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Id string + StartedAt *timestamppb.Timestamp + EndedAt *timestamppb.Timestamp + SyncToken string + SyncType string + ParentSyncId string +} + +func (b0 SyncRun_builder) Build() *SyncRun { + m0 := &SyncRun{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Id = b.Id + x.xxx_hidden_StartedAt = b.StartedAt + x.xxx_hidden_EndedAt = b.EndedAt + x.xxx_hidden_SyncToken = b.SyncToken + x.xxx_hidden_SyncType = b.SyncType + x.xxx_hidden_ParentSyncId = b.ParentSyncId + return m0 +} + +type SyncsReaderServiceGetSyncRequest struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_SyncId string `protobuf:"bytes,1,opt,name=sync_id,json=syncId,proto3"` + xxx_hidden_Annotations *[]*anypb.Any `protobuf:"bytes,2,rep,name=annotations,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SyncsReaderServiceGetSyncRequest) Reset() { + *x = SyncsReaderServiceGetSyncRequest{} + mi := &file_c1_reader_v2_sync_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SyncsReaderServiceGetSyncRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SyncsReaderServiceGetSyncRequest) ProtoMessage() {} + +func (x *SyncsReaderServiceGetSyncRequest) ProtoReflect() protoreflect.Message { + mi := &file_c1_reader_v2_sync_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *SyncsReaderServiceGetSyncRequest) GetSyncId() string { + if x != nil { + return x.xxx_hidden_SyncId + } + return "" +} + +func (x *SyncsReaderServiceGetSyncRequest) GetAnnotations() []*anypb.Any { + if x != nil { + if x.xxx_hidden_Annotations != nil { + return *x.xxx_hidden_Annotations + } + } + return nil +} + +func (x *SyncsReaderServiceGetSyncRequest) SetSyncId(v string) { + x.xxx_hidden_SyncId = v +} + +func (x *SyncsReaderServiceGetSyncRequest) SetAnnotations(v []*anypb.Any) { + x.xxx_hidden_Annotations = &v +} + +type SyncsReaderServiceGetSyncRequest_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + SyncId string + Annotations []*anypb.Any +} + +func (b0 SyncsReaderServiceGetSyncRequest_builder) Build() *SyncsReaderServiceGetSyncRequest { + m0 := &SyncsReaderServiceGetSyncRequest{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_SyncId = b.SyncId + x.xxx_hidden_Annotations = &b.Annotations + return m0 +} + +type SyncsReaderServiceGetSyncResponse struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Sync *SyncRun `protobuf:"bytes,1,opt,name=sync,proto3"` + xxx_hidden_Annotations *[]*anypb.Any `protobuf:"bytes,2,rep,name=annotations,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SyncsReaderServiceGetSyncResponse) Reset() { + *x = SyncsReaderServiceGetSyncResponse{} + mi := &file_c1_reader_v2_sync_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SyncsReaderServiceGetSyncResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SyncsReaderServiceGetSyncResponse) ProtoMessage() {} + +func (x *SyncsReaderServiceGetSyncResponse) ProtoReflect() protoreflect.Message { + mi := &file_c1_reader_v2_sync_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *SyncsReaderServiceGetSyncResponse) GetSync() *SyncRun { + if x != nil { + return x.xxx_hidden_Sync + } + return nil +} + +func (x *SyncsReaderServiceGetSyncResponse) GetAnnotations() []*anypb.Any { + if x != nil { + if x.xxx_hidden_Annotations != nil { + return *x.xxx_hidden_Annotations + } + } + return nil +} + +func (x *SyncsReaderServiceGetSyncResponse) SetSync(v *SyncRun) { + x.xxx_hidden_Sync = v +} + +func (x *SyncsReaderServiceGetSyncResponse) SetAnnotations(v []*anypb.Any) { + x.xxx_hidden_Annotations = &v +} + +func (x *SyncsReaderServiceGetSyncResponse) HasSync() bool { + if x == nil { + return false + } + return x.xxx_hidden_Sync != nil +} + +func (x *SyncsReaderServiceGetSyncResponse) ClearSync() { + x.xxx_hidden_Sync = nil +} + +type SyncsReaderServiceGetSyncResponse_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Sync *SyncRun + Annotations []*anypb.Any +} + +func (b0 SyncsReaderServiceGetSyncResponse_builder) Build() *SyncsReaderServiceGetSyncResponse { + m0 := &SyncsReaderServiceGetSyncResponse{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Sync = b.Sync + x.xxx_hidden_Annotations = &b.Annotations + return m0 +} + +type SyncsReaderServiceListSyncsRequest struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_PageSize uint32 `protobuf:"varint,1,opt,name=page_size,json=pageSize,proto3"` + xxx_hidden_PageToken string `protobuf:"bytes,2,opt,name=page_token,json=pageToken,proto3"` + xxx_hidden_Annotations *[]*anypb.Any `protobuf:"bytes,3,rep,name=annotations,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SyncsReaderServiceListSyncsRequest) Reset() { + *x = SyncsReaderServiceListSyncsRequest{} + mi := &file_c1_reader_v2_sync_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SyncsReaderServiceListSyncsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SyncsReaderServiceListSyncsRequest) ProtoMessage() {} + +func (x *SyncsReaderServiceListSyncsRequest) ProtoReflect() protoreflect.Message { + mi := &file_c1_reader_v2_sync_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *SyncsReaderServiceListSyncsRequest) GetPageSize() uint32 { + if x != nil { + return x.xxx_hidden_PageSize + } + return 0 +} + +func (x *SyncsReaderServiceListSyncsRequest) GetPageToken() string { + if x != nil { + return x.xxx_hidden_PageToken + } + return "" +} + +func (x *SyncsReaderServiceListSyncsRequest) GetAnnotations() []*anypb.Any { + if x != nil { + if x.xxx_hidden_Annotations != nil { + return *x.xxx_hidden_Annotations + } + } + return nil +} + +func (x *SyncsReaderServiceListSyncsRequest) SetPageSize(v uint32) { + x.xxx_hidden_PageSize = v +} + +func (x *SyncsReaderServiceListSyncsRequest) SetPageToken(v string) { + x.xxx_hidden_PageToken = v +} + +func (x *SyncsReaderServiceListSyncsRequest) SetAnnotations(v []*anypb.Any) { + x.xxx_hidden_Annotations = &v +} + +type SyncsReaderServiceListSyncsRequest_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + PageSize uint32 + PageToken string + Annotations []*anypb.Any +} + +func (b0 SyncsReaderServiceListSyncsRequest_builder) Build() *SyncsReaderServiceListSyncsRequest { + m0 := &SyncsReaderServiceListSyncsRequest{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_PageSize = b.PageSize + x.xxx_hidden_PageToken = b.PageToken + x.xxx_hidden_Annotations = &b.Annotations + return m0 +} + +type SyncsReaderServiceListSyncsResponse struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Syncs *[]*SyncRun `protobuf:"bytes,1,rep,name=syncs,proto3"` + xxx_hidden_NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3"` + xxx_hidden_Annotations *[]*anypb.Any `protobuf:"bytes,3,rep,name=annotations,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SyncsReaderServiceListSyncsResponse) Reset() { + *x = SyncsReaderServiceListSyncsResponse{} + mi := &file_c1_reader_v2_sync_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SyncsReaderServiceListSyncsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SyncsReaderServiceListSyncsResponse) ProtoMessage() {} + +func (x *SyncsReaderServiceListSyncsResponse) ProtoReflect() protoreflect.Message { + mi := &file_c1_reader_v2_sync_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *SyncsReaderServiceListSyncsResponse) GetSyncs() []*SyncRun { + if x != nil { + if x.xxx_hidden_Syncs != nil { + return *x.xxx_hidden_Syncs + } + } + return nil +} + +func (x *SyncsReaderServiceListSyncsResponse) GetNextPageToken() string { + if x != nil { + return x.xxx_hidden_NextPageToken + } + return "" +} + +func (x *SyncsReaderServiceListSyncsResponse) GetAnnotations() []*anypb.Any { + if x != nil { + if x.xxx_hidden_Annotations != nil { + return *x.xxx_hidden_Annotations + } + } + return nil +} + +func (x *SyncsReaderServiceListSyncsResponse) SetSyncs(v []*SyncRun) { + x.xxx_hidden_Syncs = &v +} + +func (x *SyncsReaderServiceListSyncsResponse) SetNextPageToken(v string) { + x.xxx_hidden_NextPageToken = v +} + +func (x *SyncsReaderServiceListSyncsResponse) SetAnnotations(v []*anypb.Any) { + x.xxx_hidden_Annotations = &v +} + +type SyncsReaderServiceListSyncsResponse_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Syncs []*SyncRun + NextPageToken string + Annotations []*anypb.Any +} + +func (b0 SyncsReaderServiceListSyncsResponse_builder) Build() *SyncsReaderServiceListSyncsResponse { + m0 := &SyncsReaderServiceListSyncsResponse{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Syncs = &b.Syncs + x.xxx_hidden_NextPageToken = b.NextPageToken + x.xxx_hidden_Annotations = &b.Annotations + return m0 +} + +type SyncsReaderServiceGetLatestFinishedSyncRequest struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Annotations *[]*anypb.Any `protobuf:"bytes,1,rep,name=annotations,proto3"` + xxx_hidden_SyncType string `protobuf:"bytes,2,opt,name=sync_type,json=syncType,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SyncsReaderServiceGetLatestFinishedSyncRequest) Reset() { + *x = SyncsReaderServiceGetLatestFinishedSyncRequest{} + mi := &file_c1_reader_v2_sync_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SyncsReaderServiceGetLatestFinishedSyncRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SyncsReaderServiceGetLatestFinishedSyncRequest) ProtoMessage() {} + +func (x *SyncsReaderServiceGetLatestFinishedSyncRequest) ProtoReflect() protoreflect.Message { + mi := &file_c1_reader_v2_sync_proto_msgTypes[5] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *SyncsReaderServiceGetLatestFinishedSyncRequest) GetAnnotations() []*anypb.Any { + if x != nil { + if x.xxx_hidden_Annotations != nil { + return *x.xxx_hidden_Annotations + } + } + return nil +} + +func (x *SyncsReaderServiceGetLatestFinishedSyncRequest) GetSyncType() string { + if x != nil { + return x.xxx_hidden_SyncType + } + return "" +} + +func (x *SyncsReaderServiceGetLatestFinishedSyncRequest) SetAnnotations(v []*anypb.Any) { + x.xxx_hidden_Annotations = &v +} + +func (x *SyncsReaderServiceGetLatestFinishedSyncRequest) SetSyncType(v string) { + x.xxx_hidden_SyncType = v +} + +type SyncsReaderServiceGetLatestFinishedSyncRequest_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Annotations []*anypb.Any + SyncType string +} + +func (b0 SyncsReaderServiceGetLatestFinishedSyncRequest_builder) Build() *SyncsReaderServiceGetLatestFinishedSyncRequest { + m0 := &SyncsReaderServiceGetLatestFinishedSyncRequest{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Annotations = &b.Annotations + x.xxx_hidden_SyncType = b.SyncType + return m0 +} + +type SyncsReaderServiceGetLatestFinishedSyncResponse struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Sync *SyncRun `protobuf:"bytes,1,opt,name=sync,proto3"` + xxx_hidden_Annotations *[]*anypb.Any `protobuf:"bytes,2,rep,name=annotations,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SyncsReaderServiceGetLatestFinishedSyncResponse) Reset() { + *x = SyncsReaderServiceGetLatestFinishedSyncResponse{} + mi := &file_c1_reader_v2_sync_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SyncsReaderServiceGetLatestFinishedSyncResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SyncsReaderServiceGetLatestFinishedSyncResponse) ProtoMessage() {} + +func (x *SyncsReaderServiceGetLatestFinishedSyncResponse) ProtoReflect() protoreflect.Message { + mi := &file_c1_reader_v2_sync_proto_msgTypes[6] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *SyncsReaderServiceGetLatestFinishedSyncResponse) GetSync() *SyncRun { + if x != nil { + return x.xxx_hidden_Sync + } + return nil +} + +func (x *SyncsReaderServiceGetLatestFinishedSyncResponse) GetAnnotations() []*anypb.Any { + if x != nil { + if x.xxx_hidden_Annotations != nil { + return *x.xxx_hidden_Annotations + } + } + return nil +} + +func (x *SyncsReaderServiceGetLatestFinishedSyncResponse) SetSync(v *SyncRun) { + x.xxx_hidden_Sync = v +} + +func (x *SyncsReaderServiceGetLatestFinishedSyncResponse) SetAnnotations(v []*anypb.Any) { + x.xxx_hidden_Annotations = &v +} + +func (x *SyncsReaderServiceGetLatestFinishedSyncResponse) HasSync() bool { + if x == nil { + return false + } + return x.xxx_hidden_Sync != nil +} + +func (x *SyncsReaderServiceGetLatestFinishedSyncResponse) ClearSync() { + x.xxx_hidden_Sync = nil +} + +type SyncsReaderServiceGetLatestFinishedSyncResponse_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Sync *SyncRun + Annotations []*anypb.Any +} + +func (b0 SyncsReaderServiceGetLatestFinishedSyncResponse_builder) Build() *SyncsReaderServiceGetLatestFinishedSyncResponse { + m0 := &SyncsReaderServiceGetLatestFinishedSyncResponse{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Sync = b.Sync + x.xxx_hidden_Annotations = &b.Annotations + return m0 +} + +var File_c1_reader_v2_sync_proto protoreflect.FileDescriptor + +const file_c1_reader_v2_sync_proto_rawDesc = "" + + "\n" + + "\x17c1/reader/v2/sync.proto\x12\fc1.reader.v2\x1a\x19google/protobuf/any.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x17validate/validate.proto\"\xed\x01\n" + + "\aSyncRun\x12\x0e\n" + + "\x02id\x18\x01 \x01(\tR\x02id\x129\n" + + "\n" + + "started_at\x18\x02 \x01(\v2\x1a.google.protobuf.TimestampR\tstartedAt\x125\n" + + "\bended_at\x18\x03 \x01(\v2\x1a.google.protobuf.TimestampR\aendedAt\x12\x1d\n" + + "\n" + + "sync_token\x18\x04 \x01(\tR\tsyncToken\x12\x1b\n" + + "\tsync_type\x18\x05 \x01(\tR\bsyncType\x12$\n" + + "\x0eparent_sync_id\x18\x06 \x01(\tR\fparentSyncId\"s\n" + + " SyncsReaderServiceGetSyncRequest\x12\x17\n" + + "\async_id\x18\x01 \x01(\tR\x06syncId\x126\n" + + "\vannotations\x18\x02 \x03(\v2\x14.google.protobuf.AnyR\vannotations\"\x86\x01\n" + + "!SyncsReaderServiceGetSyncResponse\x12)\n" + + "\x04sync\x18\x01 \x01(\v2\x15.c1.reader.v2.SyncRunR\x04sync\x126\n" + + "\vannotations\x18\x02 \x03(\v2\x14.google.protobuf.AnyR\vannotations\"\xb3\x01\n" + + "\"SyncsReaderServiceListSyncsRequest\x12'\n" + + "\tpage_size\x18\x01 \x01(\rB\n" + + "\xfaB\a*\x05\x18\xfa\x01@\x01R\bpageSize\x12,\n" + + "\n" + + "page_token\x18\x02 \x01(\tB\r\xfaB\n" + + "r\b \x01(\x80\x10\xd0\x01\x01R\tpageToken\x126\n" + + "\vannotations\x18\x03 \x03(\v2\x14.google.protobuf.AnyR\vannotations\"\xc1\x01\n" + + "#SyncsReaderServiceListSyncsResponse\x12+\n" + + "\x05syncs\x18\x01 \x03(\v2\x15.c1.reader.v2.SyncRunR\x05syncs\x125\n" + + "\x0fnext_page_token\x18\x02 \x01(\tB\r\xfaB\n" + + "r\b \x01(\x80\x10\xd0\x01\x01R\rnextPageToken\x126\n" + + "\vannotations\x18\x03 \x03(\v2\x14.google.protobuf.AnyR\vannotations\"\x85\x01\n" + + ".SyncsReaderServiceGetLatestFinishedSyncRequest\x126\n" + + "\vannotations\x18\x01 \x03(\v2\x14.google.protobuf.AnyR\vannotations\x12\x1b\n" + + "\tsync_type\x18\x02 \x01(\tR\bsyncType\"\x94\x01\n" + + "/SyncsReaderServiceGetLatestFinishedSyncResponse\x12)\n" + + "\x04sync\x18\x01 \x01(\v2\x15.c1.reader.v2.SyncRunR\x04sync\x126\n" + + "\vannotations\x18\x02 \x03(\v2\x14.google.protobuf.AnyR\vannotations2\x89\x03\n" + + "\x12SyncsReaderService\x12j\n" + + "\aGetSync\x12..c1.reader.v2.SyncsReaderServiceGetSyncRequest\x1a/.c1.reader.v2.SyncsReaderServiceGetSyncResponse\x12p\n" + + "\tListSyncs\x120.c1.reader.v2.SyncsReaderServiceListSyncsRequest\x1a1.c1.reader.v2.SyncsReaderServiceListSyncsResponse\x12\x94\x01\n" + + "\x15GetLatestFinishedSync\x12<.c1.reader.v2.SyncsReaderServiceGetLatestFinishedSyncRequest\x1a=.c1.reader.v2.SyncsReaderServiceGetLatestFinishedSyncResponseB3Z1github.com/conductorone/baton-sdk/pb/c1/reader/v2b\x06proto3" + +var file_c1_reader_v2_sync_proto_msgTypes = make([]protoimpl.MessageInfo, 7) +var file_c1_reader_v2_sync_proto_goTypes = []any{ + (*SyncRun)(nil), // 0: c1.reader.v2.SyncRun + (*SyncsReaderServiceGetSyncRequest)(nil), // 1: c1.reader.v2.SyncsReaderServiceGetSyncRequest + (*SyncsReaderServiceGetSyncResponse)(nil), // 2: c1.reader.v2.SyncsReaderServiceGetSyncResponse + (*SyncsReaderServiceListSyncsRequest)(nil), // 3: c1.reader.v2.SyncsReaderServiceListSyncsRequest + (*SyncsReaderServiceListSyncsResponse)(nil), // 4: c1.reader.v2.SyncsReaderServiceListSyncsResponse + (*SyncsReaderServiceGetLatestFinishedSyncRequest)(nil), // 5: c1.reader.v2.SyncsReaderServiceGetLatestFinishedSyncRequest + (*SyncsReaderServiceGetLatestFinishedSyncResponse)(nil), // 6: c1.reader.v2.SyncsReaderServiceGetLatestFinishedSyncResponse + (*timestamppb.Timestamp)(nil), // 7: google.protobuf.Timestamp + (*anypb.Any)(nil), // 8: google.protobuf.Any +} +var file_c1_reader_v2_sync_proto_depIdxs = []int32{ + 7, // 0: c1.reader.v2.SyncRun.started_at:type_name -> google.protobuf.Timestamp + 7, // 1: c1.reader.v2.SyncRun.ended_at:type_name -> google.protobuf.Timestamp + 8, // 2: c1.reader.v2.SyncsReaderServiceGetSyncRequest.annotations:type_name -> google.protobuf.Any + 0, // 3: c1.reader.v2.SyncsReaderServiceGetSyncResponse.sync:type_name -> c1.reader.v2.SyncRun + 8, // 4: c1.reader.v2.SyncsReaderServiceGetSyncResponse.annotations:type_name -> google.protobuf.Any + 8, // 5: c1.reader.v2.SyncsReaderServiceListSyncsRequest.annotations:type_name -> google.protobuf.Any + 0, // 6: c1.reader.v2.SyncsReaderServiceListSyncsResponse.syncs:type_name -> c1.reader.v2.SyncRun + 8, // 7: c1.reader.v2.SyncsReaderServiceListSyncsResponse.annotations:type_name -> google.protobuf.Any + 8, // 8: c1.reader.v2.SyncsReaderServiceGetLatestFinishedSyncRequest.annotations:type_name -> google.protobuf.Any + 0, // 9: c1.reader.v2.SyncsReaderServiceGetLatestFinishedSyncResponse.sync:type_name -> c1.reader.v2.SyncRun + 8, // 10: c1.reader.v2.SyncsReaderServiceGetLatestFinishedSyncResponse.annotations:type_name -> google.protobuf.Any + 1, // 11: c1.reader.v2.SyncsReaderService.GetSync:input_type -> c1.reader.v2.SyncsReaderServiceGetSyncRequest + 3, // 12: c1.reader.v2.SyncsReaderService.ListSyncs:input_type -> c1.reader.v2.SyncsReaderServiceListSyncsRequest + 5, // 13: c1.reader.v2.SyncsReaderService.GetLatestFinishedSync:input_type -> c1.reader.v2.SyncsReaderServiceGetLatestFinishedSyncRequest + 2, // 14: c1.reader.v2.SyncsReaderService.GetSync:output_type -> c1.reader.v2.SyncsReaderServiceGetSyncResponse + 4, // 15: c1.reader.v2.SyncsReaderService.ListSyncs:output_type -> c1.reader.v2.SyncsReaderServiceListSyncsResponse + 6, // 16: c1.reader.v2.SyncsReaderService.GetLatestFinishedSync:output_type -> c1.reader.v2.SyncsReaderServiceGetLatestFinishedSyncResponse + 14, // [14:17] is the sub-list for method output_type + 11, // [11:14] is the sub-list for method input_type + 11, // [11:11] is the sub-list for extension type_name + 11, // [11:11] is the sub-list for extension extendee + 0, // [0:11] is the sub-list for field type_name +} + +func init() { file_c1_reader_v2_sync_proto_init() } +func file_c1_reader_v2_sync_proto_init() { + if File_c1_reader_v2_sync_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_c1_reader_v2_sync_proto_rawDesc), len(file_c1_reader_v2_sync_proto_rawDesc)), + NumEnums: 0, + NumMessages: 7, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_c1_reader_v2_sync_proto_goTypes, + DependencyIndexes: file_c1_reader_v2_sync_proto_depIdxs, + MessageInfos: file_c1_reader_v2_sync_proto_msgTypes, + }.Build() + File_c1_reader_v2_sync_proto = out.File + file_c1_reader_v2_sync_proto_goTypes = nil + file_c1_reader_v2_sync_proto_depIdxs = nil +} diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/transport/v1/transport.pb.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/transport/v1/transport.pb.go index a899c5f6..a2643683 100644 --- a/vendor/github.com/conductorone/baton-sdk/pb/c1/transport/v1/transport.pb.go +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/transport/v1/transport.pb.go @@ -1,9 +1,11 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.4 +// protoc-gen-go v1.36.10 // protoc (unknown) // source: c1/transport/v1/transport.proto +//go:build !protoopaque + package v1 import ( @@ -12,7 +14,6 @@ import ( anypb "google.golang.org/protobuf/types/known/anypb" structpb "google.golang.org/protobuf/types/known/structpb" reflect "reflect" - sync "sync" unsafe "unsafe" ) @@ -24,7 +25,7 @@ const ( ) type Request struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Method string `protobuf:"bytes,1,opt,name=method,proto3" json:"method,omitempty"` Req *anypb.Any `protobuf:"bytes,2,opt,name=req,proto3" json:"req,omitempty"` Headers *structpb.Struct `protobuf:"bytes,3,opt,name=headers,proto3" json:"headers,omitempty"` @@ -57,11 +58,6 @@ func (x *Request) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use Request.ProtoReflect.Descriptor instead. -func (*Request) Descriptor() ([]byte, []int) { - return file_c1_transport_v1_transport_proto_rawDescGZIP(), []int{0} -} - func (x *Request) GetMethod() string { if x != nil { return x.Method @@ -83,8 +79,60 @@ func (x *Request) GetHeaders() *structpb.Struct { return nil } +func (x *Request) SetMethod(v string) { + x.Method = v +} + +func (x *Request) SetReq(v *anypb.Any) { + x.Req = v +} + +func (x *Request) SetHeaders(v *structpb.Struct) { + x.Headers = v +} + +func (x *Request) HasReq() bool { + if x == nil { + return false + } + return x.Req != nil +} + +func (x *Request) HasHeaders() bool { + if x == nil { + return false + } + return x.Headers != nil +} + +func (x *Request) ClearReq() { + x.Req = nil +} + +func (x *Request) ClearHeaders() { + x.Headers = nil +} + +type Request_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Method string + Req *anypb.Any + Headers *structpb.Struct +} + +func (b0 Request_builder) Build() *Request { + m0 := &Request{} + b, x := &b0, m0 + _, _ = b, x + x.Method = b.Method + x.Req = b.Req + x.Headers = b.Headers + return m0 +} + type Response struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` Resp *anypb.Any `protobuf:"bytes,1,opt,name=resp,proto3" json:"resp,omitempty"` Status *anypb.Any `protobuf:"bytes,2,opt,name=status,proto3" json:"status,omitempty"` Headers *structpb.Struct `protobuf:"bytes,3,opt,name=headers,proto3" json:"headers,omitempty"` @@ -118,11 +166,6 @@ func (x *Response) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use Response.ProtoReflect.Descriptor instead. -func (*Response) Descriptor() ([]byte, []int) { - return file_c1_transport_v1_transport_proto_rawDescGZIP(), []int{1} -} - func (x *Response) GetResp() *anypb.Any { if x != nil { return x.Resp @@ -151,55 +194,101 @@ func (x *Response) GetTrailers() *structpb.Struct { return nil } -var File_c1_transport_v1_transport_proto protoreflect.FileDescriptor +func (x *Response) SetResp(v *anypb.Any) { + x.Resp = v +} -var file_c1_transport_v1_transport_proto_rawDesc = string([]byte{ - 0x0a, 0x1f, 0x63, 0x31, 0x2f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x2f, 0x76, - 0x31, 0x2f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x12, 0x0f, 0x63, 0x31, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x2e, - 0x76, 0x31, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, - 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x7c, 0x0a, 0x07, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x26, - 0x0a, 0x03, 0x72, 0x65, 0x71, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, - 0x79, 0x52, 0x03, 0x72, 0x65, 0x71, 0x12, 0x31, 0x0a, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, - 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, - 0x52, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x22, 0xca, 0x01, 0x0a, 0x08, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x28, 0x0a, 0x04, 0x72, 0x65, 0x73, 0x70, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x04, 0x72, 0x65, 0x73, 0x70, - 0x12, 0x2c, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x31, - 0x0a, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, - 0x73, 0x12, 0x33, 0x0a, 0x08, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x65, 0x72, 0x73, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x08, 0x74, 0x72, - 0x61, 0x69, 0x6c, 0x65, 0x72, 0x73, 0x42, 0x36, 0x5a, 0x34, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, - 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x64, 0x75, 0x63, 0x74, 0x6f, 0x72, 0x6f, 0x6e, - 0x65, 0x2f, 0x62, 0x61, 0x74, 0x6f, 0x6e, 0x2d, 0x73, 0x64, 0x6b, 0x2f, 0x70, 0x62, 0x2f, 0x63, - 0x31, 0x2f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x2f, 0x76, 0x31, 0x62, 0x06, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -}) - -var ( - file_c1_transport_v1_transport_proto_rawDescOnce sync.Once - file_c1_transport_v1_transport_proto_rawDescData []byte -) +func (x *Response) SetStatus(v *anypb.Any) { + x.Status = v +} -func file_c1_transport_v1_transport_proto_rawDescGZIP() []byte { - file_c1_transport_v1_transport_proto_rawDescOnce.Do(func() { - file_c1_transport_v1_transport_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_c1_transport_v1_transport_proto_rawDesc), len(file_c1_transport_v1_transport_proto_rawDesc))) - }) - return file_c1_transport_v1_transport_proto_rawDescData +func (x *Response) SetHeaders(v *structpb.Struct) { + x.Headers = v } +func (x *Response) SetTrailers(v *structpb.Struct) { + x.Trailers = v +} + +func (x *Response) HasResp() bool { + if x == nil { + return false + } + return x.Resp != nil +} + +func (x *Response) HasStatus() bool { + if x == nil { + return false + } + return x.Status != nil +} + +func (x *Response) HasHeaders() bool { + if x == nil { + return false + } + return x.Headers != nil +} + +func (x *Response) HasTrailers() bool { + if x == nil { + return false + } + return x.Trailers != nil +} + +func (x *Response) ClearResp() { + x.Resp = nil +} + +func (x *Response) ClearStatus() { + x.Status = nil +} + +func (x *Response) ClearHeaders() { + x.Headers = nil +} + +func (x *Response) ClearTrailers() { + x.Trailers = nil +} + +type Response_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Resp *anypb.Any + Status *anypb.Any + Headers *structpb.Struct + Trailers *structpb.Struct +} + +func (b0 Response_builder) Build() *Response { + m0 := &Response{} + b, x := &b0, m0 + _, _ = b, x + x.Resp = b.Resp + x.Status = b.Status + x.Headers = b.Headers + x.Trailers = b.Trailers + return m0 +} + +var File_c1_transport_v1_transport_proto protoreflect.FileDescriptor + +const file_c1_transport_v1_transport_proto_rawDesc = "" + + "\n" + + "\x1fc1/transport/v1/transport.proto\x12\x0fc1.transport.v1\x1a\x19google/protobuf/any.proto\x1a\x1cgoogle/protobuf/struct.proto\"|\n" + + "\aRequest\x12\x16\n" + + "\x06method\x18\x01 \x01(\tR\x06method\x12&\n" + + "\x03req\x18\x02 \x01(\v2\x14.google.protobuf.AnyR\x03req\x121\n" + + "\aheaders\x18\x03 \x01(\v2\x17.google.protobuf.StructR\aheaders\"\xca\x01\n" + + "\bResponse\x12(\n" + + "\x04resp\x18\x01 \x01(\v2\x14.google.protobuf.AnyR\x04resp\x12,\n" + + "\x06status\x18\x02 \x01(\v2\x14.google.protobuf.AnyR\x06status\x121\n" + + "\aheaders\x18\x03 \x01(\v2\x17.google.protobuf.StructR\aheaders\x123\n" + + "\btrailers\x18\x04 \x01(\v2\x17.google.protobuf.StructR\btrailersB6Z4github.com/conductorone/baton-sdk/pb/c1/transport/v1b\x06proto3" + var file_c1_transport_v1_transport_proto_msgTypes = make([]protoimpl.MessageInfo, 2) var file_c1_transport_v1_transport_proto_goTypes = []any{ (*Request)(nil), // 0: c1.transport.v1.Request diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/transport/v1/transport.proto b/vendor/github.com/conductorone/baton-sdk/pb/c1/transport/v1/transport.proto deleted file mode 100644 index 94efa33c..00000000 --- a/vendor/github.com/conductorone/baton-sdk/pb/c1/transport/v1/transport.proto +++ /dev/null @@ -1,22 +0,0 @@ -syntax = "proto3"; - -package c1.svc.transport.v1; - -import "google/protobuf/struct.proto"; -import "google/protobuf/any.proto"; - - -option go_package = "github.com/conductorone/baton-sdk/pb/c1/transport/v1"; - -message Request { - string method = 1; - google.protobuf.Any req = 2; - google.protobuf.Struct headers = 3; -} - -message Response { - google.protobuf.Any resp = 1; - google.protobuf.Any status = 2; - google.protobuf.Struct headers = 3; - google.protobuf.Struct trailers = 4; -} \ No newline at end of file diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/transport/v1/transport_protoopaque.pb.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/transport/v1/transport_protoopaque.pb.go new file mode 100644 index 00000000..07b8ca04 --- /dev/null +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/transport/v1/transport_protoopaque.pb.go @@ -0,0 +1,335 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.10 +// protoc (unknown) +// source: c1/transport/v1/transport.proto + +//go:build protoopaque + +package v1 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + structpb "google.golang.org/protobuf/types/known/structpb" + reflect "reflect" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type Request struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Method string `protobuf:"bytes,1,opt,name=method,proto3"` + xxx_hidden_Req *anypb.Any `protobuf:"bytes,2,opt,name=req,proto3"` + xxx_hidden_Headers *structpb.Struct `protobuf:"bytes,3,opt,name=headers,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Request) Reset() { + *x = Request{} + mi := &file_c1_transport_v1_transport_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Request) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Request) ProtoMessage() {} + +func (x *Request) ProtoReflect() protoreflect.Message { + mi := &file_c1_transport_v1_transport_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *Request) GetMethod() string { + if x != nil { + return x.xxx_hidden_Method + } + return "" +} + +func (x *Request) GetReq() *anypb.Any { + if x != nil { + return x.xxx_hidden_Req + } + return nil +} + +func (x *Request) GetHeaders() *structpb.Struct { + if x != nil { + return x.xxx_hidden_Headers + } + return nil +} + +func (x *Request) SetMethod(v string) { + x.xxx_hidden_Method = v +} + +func (x *Request) SetReq(v *anypb.Any) { + x.xxx_hidden_Req = v +} + +func (x *Request) SetHeaders(v *structpb.Struct) { + x.xxx_hidden_Headers = v +} + +func (x *Request) HasReq() bool { + if x == nil { + return false + } + return x.xxx_hidden_Req != nil +} + +func (x *Request) HasHeaders() bool { + if x == nil { + return false + } + return x.xxx_hidden_Headers != nil +} + +func (x *Request) ClearReq() { + x.xxx_hidden_Req = nil +} + +func (x *Request) ClearHeaders() { + x.xxx_hidden_Headers = nil +} + +type Request_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Method string + Req *anypb.Any + Headers *structpb.Struct +} + +func (b0 Request_builder) Build() *Request { + m0 := &Request{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Method = b.Method + x.xxx_hidden_Req = b.Req + x.xxx_hidden_Headers = b.Headers + return m0 +} + +type Response struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Resp *anypb.Any `protobuf:"bytes,1,opt,name=resp,proto3"` + xxx_hidden_Status *anypb.Any `protobuf:"bytes,2,opt,name=status,proto3"` + xxx_hidden_Headers *structpb.Struct `protobuf:"bytes,3,opt,name=headers,proto3"` + xxx_hidden_Trailers *structpb.Struct `protobuf:"bytes,4,opt,name=trailers,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Response) Reset() { + *x = Response{} + mi := &file_c1_transport_v1_transport_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Response) ProtoMessage() {} + +func (x *Response) ProtoReflect() protoreflect.Message { + mi := &file_c1_transport_v1_transport_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *Response) GetResp() *anypb.Any { + if x != nil { + return x.xxx_hidden_Resp + } + return nil +} + +func (x *Response) GetStatus() *anypb.Any { + if x != nil { + return x.xxx_hidden_Status + } + return nil +} + +func (x *Response) GetHeaders() *structpb.Struct { + if x != nil { + return x.xxx_hidden_Headers + } + return nil +} + +func (x *Response) GetTrailers() *structpb.Struct { + if x != nil { + return x.xxx_hidden_Trailers + } + return nil +} + +func (x *Response) SetResp(v *anypb.Any) { + x.xxx_hidden_Resp = v +} + +func (x *Response) SetStatus(v *anypb.Any) { + x.xxx_hidden_Status = v +} + +func (x *Response) SetHeaders(v *structpb.Struct) { + x.xxx_hidden_Headers = v +} + +func (x *Response) SetTrailers(v *structpb.Struct) { + x.xxx_hidden_Trailers = v +} + +func (x *Response) HasResp() bool { + if x == nil { + return false + } + return x.xxx_hidden_Resp != nil +} + +func (x *Response) HasStatus() bool { + if x == nil { + return false + } + return x.xxx_hidden_Status != nil +} + +func (x *Response) HasHeaders() bool { + if x == nil { + return false + } + return x.xxx_hidden_Headers != nil +} + +func (x *Response) HasTrailers() bool { + if x == nil { + return false + } + return x.xxx_hidden_Trailers != nil +} + +func (x *Response) ClearResp() { + x.xxx_hidden_Resp = nil +} + +func (x *Response) ClearStatus() { + x.xxx_hidden_Status = nil +} + +func (x *Response) ClearHeaders() { + x.xxx_hidden_Headers = nil +} + +func (x *Response) ClearTrailers() { + x.xxx_hidden_Trailers = nil +} + +type Response_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Resp *anypb.Any + Status *anypb.Any + Headers *structpb.Struct + Trailers *structpb.Struct +} + +func (b0 Response_builder) Build() *Response { + m0 := &Response{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Resp = b.Resp + x.xxx_hidden_Status = b.Status + x.xxx_hidden_Headers = b.Headers + x.xxx_hidden_Trailers = b.Trailers + return m0 +} + +var File_c1_transport_v1_transport_proto protoreflect.FileDescriptor + +const file_c1_transport_v1_transport_proto_rawDesc = "" + + "\n" + + "\x1fc1/transport/v1/transport.proto\x12\x0fc1.transport.v1\x1a\x19google/protobuf/any.proto\x1a\x1cgoogle/protobuf/struct.proto\"|\n" + + "\aRequest\x12\x16\n" + + "\x06method\x18\x01 \x01(\tR\x06method\x12&\n" + + "\x03req\x18\x02 \x01(\v2\x14.google.protobuf.AnyR\x03req\x121\n" + + "\aheaders\x18\x03 \x01(\v2\x17.google.protobuf.StructR\aheaders\"\xca\x01\n" + + "\bResponse\x12(\n" + + "\x04resp\x18\x01 \x01(\v2\x14.google.protobuf.AnyR\x04resp\x12,\n" + + "\x06status\x18\x02 \x01(\v2\x14.google.protobuf.AnyR\x06status\x121\n" + + "\aheaders\x18\x03 \x01(\v2\x17.google.protobuf.StructR\aheaders\x123\n" + + "\btrailers\x18\x04 \x01(\v2\x17.google.protobuf.StructR\btrailersB6Z4github.com/conductorone/baton-sdk/pb/c1/transport/v1b\x06proto3" + +var file_c1_transport_v1_transport_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_c1_transport_v1_transport_proto_goTypes = []any{ + (*Request)(nil), // 0: c1.transport.v1.Request + (*Response)(nil), // 1: c1.transport.v1.Response + (*anypb.Any)(nil), // 2: google.protobuf.Any + (*structpb.Struct)(nil), // 3: google.protobuf.Struct +} +var file_c1_transport_v1_transport_proto_depIdxs = []int32{ + 2, // 0: c1.transport.v1.Request.req:type_name -> google.protobuf.Any + 3, // 1: c1.transport.v1.Request.headers:type_name -> google.protobuf.Struct + 2, // 2: c1.transport.v1.Response.resp:type_name -> google.protobuf.Any + 2, // 3: c1.transport.v1.Response.status:type_name -> google.protobuf.Any + 3, // 4: c1.transport.v1.Response.headers:type_name -> google.protobuf.Struct + 3, // 5: c1.transport.v1.Response.trailers:type_name -> google.protobuf.Struct + 6, // [6:6] is the sub-list for method output_type + 6, // [6:6] is the sub-list for method input_type + 6, // [6:6] is the sub-list for extension type_name + 6, // [6:6] is the sub-list for extension extendee + 0, // [0:6] is the sub-list for field type_name +} + +func init() { file_c1_transport_v1_transport_proto_init() } +func file_c1_transport_v1_transport_proto_init() { + if File_c1_transport_v1_transport_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_c1_transport_v1_transport_proto_rawDesc), len(file_c1_transport_v1_transport_proto_rawDesc)), + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_c1_transport_v1_transport_proto_goTypes, + DependencyIndexes: file_c1_transport_v1_transport_proto_depIdxs, + MessageInfos: file_c1_transport_v1_transport_proto_msgTypes, + }.Build() + File_c1_transport_v1_transport_proto = out.File + file_c1_transport_v1_transport_proto_goTypes = nil + file_c1_transport_v1_transport_proto_depIdxs = nil +} diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/utls/v1/tls.pb.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/utls/v1/tls.pb.go index fcfe5b8c..b9c28da5 100644 --- a/vendor/github.com/conductorone/baton-sdk/pb/c1/utls/v1/tls.pb.go +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/utls/v1/tls.pb.go @@ -1,16 +1,17 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.4 +// protoc-gen-go v1.36.10 // protoc (unknown) // source: c1/utls/v1/tls.proto +//go:build !protoopaque + package v1 import ( protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" - sync "sync" unsafe "unsafe" ) @@ -22,7 +23,7 @@ const ( ) type Credential struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState `protogen:"hybrid.v1"` CaCert []byte `protobuf:"bytes,1,opt,name=ca_cert,json=caCert,proto3" json:"ca_cert,omitempty"` Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` Cert []byte `protobuf:"bytes,3,opt,name=cert,proto3" json:"cert,omitempty"` @@ -55,11 +56,6 @@ func (x *Credential) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use Credential.ProtoReflect.Descriptor instead. -func (*Credential) Descriptor() ([]byte, []int) { - return file_c1_utls_v1_tls_proto_rawDescGZIP(), []int{0} -} - func (x *Credential) GetCaCert() []byte { if x != nil { return x.CaCert @@ -81,34 +77,57 @@ func (x *Credential) GetCert() []byte { return nil } -var File_c1_utls_v1_tls_proto protoreflect.FileDescriptor +func (x *Credential) SetCaCert(v []byte) { + if v == nil { + v = []byte{} + } + x.CaCert = v +} -var file_c1_utls_v1_tls_proto_rawDesc = string([]byte{ - 0x0a, 0x14, 0x63, 0x31, 0x2f, 0x75, 0x74, 0x6c, 0x73, 0x2f, 0x76, 0x31, 0x2f, 0x74, 0x6c, 0x73, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x63, 0x31, 0x2e, 0x75, 0x74, 0x6c, 0x73, 0x2e, - 0x76, 0x31, 0x22, 0x4b, 0x0a, 0x0a, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, - 0x12, 0x17, 0x0a, 0x07, 0x63, 0x61, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0c, 0x52, 0x06, 0x63, 0x61, 0x43, 0x65, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x63, - 0x65, 0x72, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x63, 0x65, 0x72, 0x74, 0x42, - 0x31, 0x5a, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, - 0x6e, 0x64, 0x75, 0x63, 0x74, 0x6f, 0x72, 0x6f, 0x6e, 0x65, 0x2f, 0x62, 0x61, 0x74, 0x6f, 0x6e, - 0x2d, 0x73, 0x64, 0x6b, 0x2f, 0x70, 0x62, 0x2f, 0x63, 0x31, 0x2f, 0x75, 0x74, 0x6c, 0x73, 0x2f, - 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -}) - -var ( - file_c1_utls_v1_tls_proto_rawDescOnce sync.Once - file_c1_utls_v1_tls_proto_rawDescData []byte -) +func (x *Credential) SetKey(v []byte) { + if v == nil { + v = []byte{} + } + x.Key = v +} -func file_c1_utls_v1_tls_proto_rawDescGZIP() []byte { - file_c1_utls_v1_tls_proto_rawDescOnce.Do(func() { - file_c1_utls_v1_tls_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_c1_utls_v1_tls_proto_rawDesc), len(file_c1_utls_v1_tls_proto_rawDesc))) - }) - return file_c1_utls_v1_tls_proto_rawDescData +func (x *Credential) SetCert(v []byte) { + if v == nil { + v = []byte{} + } + x.Cert = v } +type Credential_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + CaCert []byte + Key []byte + Cert []byte +} + +func (b0 Credential_builder) Build() *Credential { + m0 := &Credential{} + b, x := &b0, m0 + _, _ = b, x + x.CaCert = b.CaCert + x.Key = b.Key + x.Cert = b.Cert + return m0 +} + +var File_c1_utls_v1_tls_proto protoreflect.FileDescriptor + +const file_c1_utls_v1_tls_proto_rawDesc = "" + + "\n" + + "\x14c1/utls/v1/tls.proto\x12\n" + + "c1.utls.v1\"K\n" + + "\n" + + "Credential\x12\x17\n" + + "\aca_cert\x18\x01 \x01(\fR\x06caCert\x12\x10\n" + + "\x03key\x18\x02 \x01(\fR\x03key\x12\x12\n" + + "\x04cert\x18\x03 \x01(\fR\x04certB1Z/github.com/conductorone/baton-sdk/pb/c1/utls/v1b\x06proto3" + var file_c1_utls_v1_tls_proto_msgTypes = make([]protoimpl.MessageInfo, 1) var file_c1_utls_v1_tls_proto_goTypes = []any{ (*Credential)(nil), // 0: c1.utls.v1.Credential diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/utls/v1/tls_protoopaque.pb.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/utls/v1/tls_protoopaque.pb.go new file mode 100644 index 00000000..aace2723 --- /dev/null +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/utls/v1/tls_protoopaque.pb.go @@ -0,0 +1,165 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.10 +// protoc (unknown) +// source: c1/utls/v1/tls.proto + +//go:build protoopaque + +package v1 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type Credential struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_CaCert []byte `protobuf:"bytes,1,opt,name=ca_cert,json=caCert,proto3"` + xxx_hidden_Key []byte `protobuf:"bytes,2,opt,name=key,proto3"` + xxx_hidden_Cert []byte `protobuf:"bytes,3,opt,name=cert,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Credential) Reset() { + *x = Credential{} + mi := &file_c1_utls_v1_tls_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Credential) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Credential) ProtoMessage() {} + +func (x *Credential) ProtoReflect() protoreflect.Message { + mi := &file_c1_utls_v1_tls_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *Credential) GetCaCert() []byte { + if x != nil { + return x.xxx_hidden_CaCert + } + return nil +} + +func (x *Credential) GetKey() []byte { + if x != nil { + return x.xxx_hidden_Key + } + return nil +} + +func (x *Credential) GetCert() []byte { + if x != nil { + return x.xxx_hidden_Cert + } + return nil +} + +func (x *Credential) SetCaCert(v []byte) { + if v == nil { + v = []byte{} + } + x.xxx_hidden_CaCert = v +} + +func (x *Credential) SetKey(v []byte) { + if v == nil { + v = []byte{} + } + x.xxx_hidden_Key = v +} + +func (x *Credential) SetCert(v []byte) { + if v == nil { + v = []byte{} + } + x.xxx_hidden_Cert = v +} + +type Credential_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + CaCert []byte + Key []byte + Cert []byte +} + +func (b0 Credential_builder) Build() *Credential { + m0 := &Credential{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_CaCert = b.CaCert + x.xxx_hidden_Key = b.Key + x.xxx_hidden_Cert = b.Cert + return m0 +} + +var File_c1_utls_v1_tls_proto protoreflect.FileDescriptor + +const file_c1_utls_v1_tls_proto_rawDesc = "" + + "\n" + + "\x14c1/utls/v1/tls.proto\x12\n" + + "c1.utls.v1\"K\n" + + "\n" + + "Credential\x12\x17\n" + + "\aca_cert\x18\x01 \x01(\fR\x06caCert\x12\x10\n" + + "\x03key\x18\x02 \x01(\fR\x03key\x12\x12\n" + + "\x04cert\x18\x03 \x01(\fR\x04certB1Z/github.com/conductorone/baton-sdk/pb/c1/utls/v1b\x06proto3" + +var file_c1_utls_v1_tls_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_c1_utls_v1_tls_proto_goTypes = []any{ + (*Credential)(nil), // 0: c1.utls.v1.Credential +} +var file_c1_utls_v1_tls_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_c1_utls_v1_tls_proto_init() } +func file_c1_utls_v1_tls_proto_init() { + if File_c1_utls_v1_tls_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_c1_utls_v1_tls_proto_rawDesc), len(file_c1_utls_v1_tls_proto_rawDesc)), + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_c1_utls_v1_tls_proto_goTypes, + DependencyIndexes: file_c1_utls_v1_tls_proto_depIdxs, + MessageInfos: file_c1_utls_v1_tls_proto_msgTypes, + }.Build() + File_c1_utls_v1_tls_proto = out.File + file_c1_utls_v1_tls_proto_goTypes = nil + file_c1_utls_v1_tls_proto_depIdxs = nil +} diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/actions/actions.go b/vendor/github.com/conductorone/baton-sdk/pkg/actions/actions.go new file mode 100644 index 00000000..ca26d7e4 --- /dev/null +++ b/vendor/github.com/conductorone/baton-sdk/pkg/actions/actions.go @@ -0,0 +1,519 @@ +package actions + +import ( + "context" + "errors" + "fmt" + "sort" + "sync" + "time" + + v2 "github.com/conductorone/baton-sdk/pb/c1/connector/v2" + "github.com/conductorone/baton-sdk/pkg/annotations" + "github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/ctxzap" + "github.com/segmentio/ksuid" + "go.uber.org/zap" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/types/known/structpb" +) + +type ActionHandler func(ctx context.Context, args *structpb.Struct) (*structpb.Struct, annotations.Annotations, error) + +type OutstandingAction struct { + Id string + Name string + Status v2.BatonActionStatus + Rv *structpb.Struct + Annos annotations.Annotations + Err error + StartedAt time.Time + sync.Mutex +} + +func NewOutstandingAction(id, name string) *OutstandingAction { + return &OutstandingAction{ + Id: id, + Name: name, + Status: v2.BatonActionStatus_BATON_ACTION_STATUS_PENDING, + StartedAt: time.Now(), + } +} + +func (oa *OutstandingAction) SetStatus(ctx context.Context, status v2.BatonActionStatus) { + oa.Lock() + defer oa.Unlock() + l := ctxzap.Extract(ctx).With( + zap.String("action_id", oa.Id), + zap.String("action_name", oa.Name), + zap.String("status", status.String()), + ) + if oa.Status == v2.BatonActionStatus_BATON_ACTION_STATUS_COMPLETE || oa.Status == v2.BatonActionStatus_BATON_ACTION_STATUS_FAILED { + l.Error("cannot set status on completed action") + } + if status == v2.BatonActionStatus_BATON_ACTION_STATUS_RUNNING && oa.Status != v2.BatonActionStatus_BATON_ACTION_STATUS_PENDING { + l.Error("cannot set status to running unless action is pending") + } + + oa.Status = status +} + +func (oa *OutstandingAction) setError(_ context.Context, err error) { + oa.Lock() + defer oa.Unlock() + if oa.Rv == nil { + oa.Rv = &structpb.Struct{} + } + if oa.Rv.Fields == nil { + oa.Rv.Fields = make(map[string]*structpb.Value) + } + oa.Rv.Fields["error"] = &structpb.Value{ + Kind: &structpb.Value_StringValue{ + StringValue: err.Error(), + }, + } + oa.Err = err +} + +func (oa *OutstandingAction) SetError(ctx context.Context, err error) { + oa.setError(ctx, err) + oa.SetStatus(ctx, v2.BatonActionStatus_BATON_ACTION_STATUS_FAILED) +} + +const maxOldActions = 1000 + +// ActionRegistry provides methods for registering actions. +// Used by both GlobalActionProvider (global actions) and ResourceActionProvider (resource-scoped actions). +type ActionRegistry interface { + // Register registers an action using the name from the schema. + Register(ctx context.Context, schema *v2.BatonActionSchema, handler ActionHandler) error + + // Deprecated: Use Register instead. + // RegisterAction registers an action. + RegisterAction(ctx context.Context, name string, schema *v2.BatonActionSchema, handler ActionHandler) error +} + +// Deprecated: Use ActionRegistry instead. +// ResourceTypeActionRegistry is an alias for ActionRegistry for backwards compatibility. +type ResourceTypeActionRegistry = ActionRegistry + +// ActionManager manages both global actions and resource-scoped actions. +type ActionManager struct { + // Global actions (no resource type) + schemas map[string]*v2.BatonActionSchema // actionName -> schema + handlers map[string]ActionHandler // actionName -> handler + + // Resource-scoped actions (keyed by resource type) + resourceSchemas map[string]map[string]*v2.BatonActionSchema // resourceTypeID -> actionName -> schema + resourceHandlers map[string]map[string]ActionHandler // resourceTypeID -> actionName -> handler + + // Outstanding actions (shared across global and resource-scoped) + actions map[string]*OutstandingAction // actionID -> outstanding action + + mu sync.RWMutex +} + +func NewActionManager(_ context.Context) *ActionManager { + return &ActionManager{ + schemas: make(map[string]*v2.BatonActionSchema), + handlers: make(map[string]ActionHandler), + resourceSchemas: make(map[string]map[string]*v2.BatonActionSchema), + resourceHandlers: make(map[string]map[string]ActionHandler), + actions: make(map[string]*OutstandingAction), + } +} + +func (a *ActionManager) GetNewActionId() string { + uid := ksuid.New() + return uid.String() +} + +func (a *ActionManager) GetNewAction(name string) *OutstandingAction { + a.mu.Lock() + defer a.mu.Unlock() + actionId := a.GetNewActionId() + oa := NewOutstandingAction(actionId, name) + a.actions[actionId] = oa + return oa +} + +func (a *ActionManager) CleanupOldActions(ctx context.Context) { + a.mu.Lock() + defer a.mu.Unlock() + + if len(a.actions) < maxOldActions { + return + } + + l := ctxzap.Extract(ctx) + l.Debug("cleaning up old actions") + // Create a slice to hold the actions + actionList := make([]*OutstandingAction, 0, len(a.actions)) + for _, action := range a.actions { + actionList = append(actionList, action) + } + + // Sort the actions by StartedAt time + sort.Slice(actionList, func(i, j int) bool { + return actionList[i].StartedAt.Before(actionList[j].StartedAt) + }) + + count := 0 + // Delete the oldest actions + for i := 0; i < len(actionList)-maxOldActions; i++ { + action := actionList[i] + if action.Status == v2.BatonActionStatus_BATON_ACTION_STATUS_COMPLETE || action.Status == v2.BatonActionStatus_BATON_ACTION_STATUS_FAILED { + count++ + delete(a.actions, actionList[i].Id) + } + } + l.Debug("cleaned up old actions", zap.Int("count", count)) +} + +func (a *ActionManager) registerActionSchema(_ context.Context, name string, schema *v2.BatonActionSchema) error { + if name == "" { + return errors.New("action name cannot be empty") + } + if schema == nil { + return errors.New("action schema cannot be nil") + } + if _, ok := a.schemas[name]; ok { + return fmt.Errorf("action schema %s already registered", name) + } + a.schemas[name] = schema + return nil +} + +// Register registers a global action using the name from the schema. +func (a *ActionManager) Register(ctx context.Context, schema *v2.BatonActionSchema, handler ActionHandler) error { + if schema == nil { + return errors.New("action schema cannot be nil") + } + return a.RegisterAction(ctx, schema.GetName(), schema, handler) +} + +// Deprecated: Use Register instead. +// RegisterAction registers a global action (not scoped to a resource type). +func (a *ActionManager) RegisterAction(ctx context.Context, name string, schema *v2.BatonActionSchema, handler ActionHandler) error { + a.mu.Lock() + defer a.mu.Unlock() + + if handler == nil { + return errors.New("action handler cannot be nil") + } + err := a.registerActionSchema(ctx, name, schema) + if err != nil { + return err + } + + if _, ok := a.handlers[name]; ok { + return fmt.Errorf("action handler %s already registered", name) + } + a.handlers[name] = handler + + l := ctxzap.Extract(ctx) + l.Debug("registered action", zap.String("name", name)) + + return nil +} + +func (a *ActionManager) HasActions() bool { + a.mu.RLock() + defer a.mu.RUnlock() + return len(a.schemas) > 0 || len(a.resourceSchemas) > 0 +} + +// RegisterResourceAction registers a resource-scoped action. +func (a *ActionManager) RegisterResourceAction( + ctx context.Context, + resourceTypeID string, + schema *v2.BatonActionSchema, + handler ActionHandler, +) error { + if resourceTypeID == "" { + return errors.New("resource type ID cannot be empty") + } + if schema == nil { + return errors.New("action schema cannot be nil") + } + if schema.GetName() == "" { + return errors.New("action schema name cannot be empty") + } + if handler == nil { + return fmt.Errorf("handler cannot be nil for action %s", schema.GetName()) + } + + a.mu.Lock() + defer a.mu.Unlock() + + // Set the resource type ID on the schema + schema.SetResourceTypeId(resourceTypeID) + + if a.resourceSchemas[resourceTypeID] == nil { + a.resourceSchemas[resourceTypeID] = make(map[string]*v2.BatonActionSchema) + } + if a.resourceHandlers[resourceTypeID] == nil { + a.resourceHandlers[resourceTypeID] = make(map[string]ActionHandler) + } + + actionName := schema.GetName() + + // Check for duplicate action names + if _, ok := a.resourceSchemas[resourceTypeID][actionName]; ok { + return fmt.Errorf("action schema %s already registered for resource type %s", actionName, resourceTypeID) + } + + // Check for duplicate action types + if len(schema.GetActionType()) > 0 { + for existingName, existingSchema := range a.resourceSchemas[resourceTypeID] { + if existingSchema == nil || len(existingSchema.GetActionType()) == 0 { + continue + } + // Check if any ActionType in the new schema matches any in existing schemas + for _, newActionType := range schema.GetActionType() { + if newActionType == v2.ActionType_ACTION_TYPE_UNSPECIFIED || newActionType == v2.ActionType_ACTION_TYPE_DYNAMIC { + continue // Skip unspecified and dynamic types as they can overlap + } + for _, existingActionType := range existingSchema.GetActionType() { + if newActionType == existingActionType { + return fmt.Errorf("action type %s already registered for resource type %s (existing action: %s)", newActionType.String(), resourceTypeID, existingName) + } + } + } + } + } + + a.resourceSchemas[resourceTypeID][actionName] = schema + a.resourceHandlers[resourceTypeID][actionName] = handler + + ctxzap.Extract(ctx).Debug("registered resource action", zap.String("resource_type", resourceTypeID), zap.String("action_name", actionName)) + + return nil +} + +// resourceTypeActionRegistry implements ResourceTypeActionRegistry for a specific resource type. +type resourceTypeActionRegistry struct { + resourceTypeID string + actionManager *ActionManager +} + +func (r *resourceTypeActionRegistry) Register(ctx context.Context, schema *v2.BatonActionSchema, handler ActionHandler) error { + return r.actionManager.RegisterResourceAction(ctx, r.resourceTypeID, schema, handler) +} + +// Deprecated: Use Register instead. +// RegisterAction registers a resource-scoped action. The name parameter is ignored; the name from schema is used. +func (r *resourceTypeActionRegistry) RegisterAction(ctx context.Context, name string, schema *v2.BatonActionSchema, handler ActionHandler) error { + return r.Register(ctx, schema, handler) +} + +// GetTypeRegistry returns an ActionRegistry for registering actions scoped to a specific resource type. +func (a *ActionManager) GetTypeRegistry(_ context.Context, resourceTypeID string) (ActionRegistry, error) { + if resourceTypeID == "" { + return nil, errors.New("resource type ID cannot be empty") + } + return &resourceTypeActionRegistry{resourceTypeID: resourceTypeID, actionManager: a}, nil +} + +func (a *ActionManager) UnregisterAction(ctx context.Context, name string) error { + a.mu.Lock() + defer a.mu.Unlock() + + if _, ok := a.schemas[name]; !ok { + return fmt.Errorf("action %s not registered", name) + } + delete(a.schemas, name) + if _, ok := a.handlers[name]; !ok { + return fmt.Errorf("action handler %s not registered", name) + } + delete(a.handlers, name) + + l := ctxzap.Extract(ctx) + l.Debug("unregistered action", zap.String("name", name)) + + // TODO: cancel & clean up outstanding actions? + + return nil +} + +// ListActionSchemas returns all action schemas, optionally filtered by resource type. +// If resourceTypeID is empty, returns all global actions plus all resource-scoped actions. +// If resourceTypeID is set, returns only actions for that resource type. +func (a *ActionManager) ListActionSchemas(_ context.Context, resourceTypeID string) ([]*v2.BatonActionSchema, annotations.Annotations, error) { + a.mu.RLock() + defer a.mu.RUnlock() + + var rv []*v2.BatonActionSchema + + if resourceTypeID == "" { + // Return all global actions + rv = make([]*v2.BatonActionSchema, 0, len(a.schemas)) + for _, schema := range a.schemas { + rv = append(rv, schema) + } + + // Also return all resource-scoped actions + for _, schemas := range a.resourceSchemas { + for _, schema := range schemas { + rv = append(rv, schema) + } + } + } else { + // Return only actions for the specified resource type + schemas, ok := a.resourceSchemas[resourceTypeID] + if !ok { + return []*v2.BatonActionSchema{}, nil, nil + } + + rv = make([]*v2.BatonActionSchema, 0, len(schemas)) + for _, schema := range schemas { + rv = append(rv, schema) + } + } + + return rv, nil, nil +} + +func (a *ActionManager) GetActionSchema(_ context.Context, name string) (*v2.BatonActionSchema, annotations.Annotations, error) { + a.mu.RLock() + defer a.mu.RUnlock() + + schema, ok := a.schemas[name] + if !ok { + return nil, nil, status.Error(codes.NotFound, fmt.Sprintf("action %s not found", name)) + } + return schema, nil, nil +} + +func (a *ActionManager) GetActionStatus(_ context.Context, actionId string) (v2.BatonActionStatus, string, *structpb.Struct, annotations.Annotations, error) { + a.mu.RLock() + defer a.mu.RUnlock() + + oa := a.actions[actionId] + if oa == nil { + return v2.BatonActionStatus_BATON_ACTION_STATUS_UNKNOWN, "", nil, nil, status.Error(codes.NotFound, fmt.Sprintf("action id %s not found", actionId)) + } + + // Don't return oa.Err here because error is for GetActionStatus, not the action itself. + // oa.Rv contains any error. + return oa.Status, oa.Name, oa.Rv, oa.Annos, nil +} + +// InvokeAction invokes an action. If resourceTypeID is set, it invokes a resource-scoped action. +// Otherwise, it invokes a global action. +func (a *ActionManager) InvokeAction( + ctx context.Context, + name string, + resourceTypeID string, + args *structpb.Struct, +) (string, v2.BatonActionStatus, *structpb.Struct, annotations.Annotations, error) { + if resourceTypeID != "" { + return a.invokeResourceAction(ctx, resourceTypeID, name, args) + } + + return a.invokeGlobalAction(ctx, name, args) +} + +// invokeGlobalAction invokes a global (non-resource-scoped) action. +func (a *ActionManager) invokeGlobalAction(ctx context.Context, name string, args *structpb.Struct) (string, v2.BatonActionStatus, *structpb.Struct, annotations.Annotations, error) { + a.mu.RLock() + handler, ok := a.handlers[name] + a.mu.RUnlock() + + if !ok { + return "", v2.BatonActionStatus_BATON_ACTION_STATUS_FAILED, nil, nil, status.Error(codes.NotFound, fmt.Sprintf("handler for action %s not found", name)) + } + + oa := a.GetNewAction(name) + + done := make(chan struct{}) + + // If handler exits within a second, return result. + // If handler takes longer than 1 second, return status pending. + // If handler takes longer than an hour, return status failed. + go func() { + oa.SetStatus(ctx, v2.BatonActionStatus_BATON_ACTION_STATUS_RUNNING) + handlerCtx, cancel := context.WithTimeoutCause(context.Background(), 1*time.Hour, errors.New("action handler timed out")) + defer cancel() + var oaErr error + oa.Rv, oa.Annos, oaErr = handler(handlerCtx, args) + if oaErr == nil { + oa.SetStatus(ctx, v2.BatonActionStatus_BATON_ACTION_STATUS_COMPLETE) + } else { + oa.SetError(ctx, oaErr) + } + done <- struct{}{} + }() + + select { + case <-done: + return oa.Id, oa.Status, oa.Rv, oa.Annos, nil + case <-time.After(1 * time.Second): + return oa.Id, oa.Status, oa.Rv, oa.Annos, nil + case <-ctx.Done(): + oa.SetError(ctx, ctx.Err()) + return oa.Id, oa.Status, oa.Rv, oa.Annos, ctx.Err() + } +} + +// invokeResourceAction invokes a resource-scoped action. +func (a *ActionManager) invokeResourceAction( + ctx context.Context, + resourceTypeID string, + actionName string, + args *structpb.Struct, +) (string, v2.BatonActionStatus, *structpb.Struct, annotations.Annotations, error) { + if resourceTypeID == "" { + return "", v2.BatonActionStatus_BATON_ACTION_STATUS_FAILED, nil, nil, status.Error(codes.InvalidArgument, "resource type ID is required") + } + if actionName == "" { + return "", v2.BatonActionStatus_BATON_ACTION_STATUS_FAILED, nil, nil, status.Error(codes.InvalidArgument, "action name is required") + } + + a.mu.RLock() + handlers, ok := a.resourceHandlers[resourceTypeID] + if !ok { + a.mu.RUnlock() + return "", v2.BatonActionStatus_BATON_ACTION_STATUS_FAILED, nil, nil, status.Error(codes.NotFound, fmt.Sprintf("no actions found for resource type %s", resourceTypeID)) + } + + handler, ok := handlers[actionName] + if !ok { + a.mu.RUnlock() + return "", + v2.BatonActionStatus_BATON_ACTION_STATUS_FAILED, + nil, + nil, + status.Error(codes.NotFound, fmt.Sprintf("handler for action %s not found for resource type %s", actionName, resourceTypeID)) + } + a.mu.RUnlock() + + oa := a.GetNewAction(actionName) + done := make(chan struct{}) + + // Invoke handler in goroutine + go func() { + oa.SetStatus(ctx, v2.BatonActionStatus_BATON_ACTION_STATUS_RUNNING) + handlerCtx, cancel := context.WithTimeoutCause(context.Background(), 1*time.Hour, errors.New("action handler timed out")) + defer cancel() + var oaErr error + oa.Rv, oa.Annos, oaErr = handler(handlerCtx, args) + if oaErr == nil { + oa.SetStatus(ctx, v2.BatonActionStatus_BATON_ACTION_STATUS_COMPLETE) + } else { + oa.SetError(ctx, oaErr) + } + done <- struct{}{} + }() + + // Wait for completion or timeout + select { + case <-done: + return oa.Id, oa.Status, oa.Rv, oa.Annos, nil + case <-time.After(1 * time.Second): + return oa.Id, oa.Status, oa.Rv, oa.Annos, nil + case <-ctx.Done(): + oa.SetError(ctx, ctx.Err()) + return oa.Id, oa.Status, oa.Rv, oa.Annos, ctx.Err() + } +} diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/actions/args.go b/vendor/github.com/conductorone/baton-sdk/pkg/actions/args.go new file mode 100644 index 00000000..367b3287 --- /dev/null +++ b/vendor/github.com/conductorone/baton-sdk/pkg/actions/args.go @@ -0,0 +1,543 @@ +package actions + +import ( + "fmt" + + config "github.com/conductorone/baton-sdk/pb/c1/config/v1" + v2 "github.com/conductorone/baton-sdk/pb/c1/connector/v2" + "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/types/known/structpb" +) + +// GetStringArg extracts a string value from the args struct by key. +// Returns the value and true if found, empty string and false otherwise. +func GetStringArg(args *structpb.Struct, key string) (string, bool) { + if args == nil || args.Fields == nil { + return "", false + } + + value, ok := args.Fields[key] + if !ok { + return "", false + } + + stringValue, ok := value.GetKind().(*structpb.Value_StringValue) + if !ok { + return "", false + } + + return stringValue.StringValue, true +} + +// GetIntArg extracts an int64 value from the args struct by key. +// Returns the value and true if found, 0 and false otherwise. +func GetIntArg(args *structpb.Struct, key string) (int64, bool) { + if args == nil || args.Fields == nil { + return 0, false + } + + value, ok := args.Fields[key] + if !ok { + return 0, false + } + + numberValue, ok := value.GetKind().(*structpb.Value_NumberValue) + if !ok { + return 0, false + } + + return int64(numberValue.NumberValue), true +} + +// GetBoolArg extracts a bool value from the args struct by key. +// Returns the value and true if found, false and false otherwise. +func GetBoolArg(args *structpb.Struct, key string) (bool, bool) { + if args == nil || args.Fields == nil { + return false, false + } + + value, ok := args.Fields[key] + if !ok { + return false, false + } + + boolValue, ok := value.GetKind().(*structpb.Value_BoolValue) + if !ok { + return false, false + } + + return boolValue.BoolValue, true +} + +// GetResourceIDArg extracts a ResourceId from the args struct by key. +// The value is expected to be a struct with "resource_type_id" and "resource_id" fields +// (as stored by ResourceField). Returns the ResourceId and true if found, nil and false otherwise. +func GetResourceIDArg(args *structpb.Struct, key string) (*v2.ResourceId, bool) { + if args == nil || args.Fields == nil { + return nil, false + } + + value, ok := args.Fields[key] + if !ok { + return nil, false + } + + structValue, ok := value.GetKind().(*structpb.Value_StructValue) + if !ok { + return nil, false + } + + // Try to get resource_type_id and resource_id fields + resourceTypeID, ok := GetStringArg(structValue.StructValue, "resource_type_id") + if !ok { + // Also try resource_type as an alternative + resourceTypeID, ok = GetStringArg(structValue.StructValue, "resource_type") + if !ok { + return nil, false + } + } + + resourceID, ok := GetStringArg(structValue.StructValue, "resource_id") + if !ok { + // Also try resource as an alternative + resourceID, ok = GetStringArg(structValue.StructValue, "resource") + if !ok { + return nil, false + } + } + + return &v2.ResourceId{ + ResourceType: resourceTypeID, + Resource: resourceID, + }, true +} + +// GetStringSliceArg extracts a string slice from the args struct by key. +// Returns the slice and true if found, nil and false otherwise. +func GetStringSliceArg(args *structpb.Struct, key string) ([]string, bool) { + if args == nil || args.Fields == nil { + return nil, false + } + + value, ok := args.Fields[key] + if !ok { + return nil, false + } + + listValue, ok := value.GetKind().(*structpb.Value_ListValue) + if !ok { + return nil, false + } + + result := make([]string, 0, len(listValue.ListValue.Values)) + for _, v := range listValue.ListValue.Values { + stringValue, ok := v.GetKind().(*structpb.Value_StringValue) + if !ok { + return nil, false + } + result = append(result, stringValue.StringValue) + } + + return result, true +} + +// GetStructArg extracts a nested struct from the args struct by key. +// Returns the struct and true if found, nil and false otherwise. +func GetStructArg(args *structpb.Struct, key string) (*structpb.Struct, bool) { + if args == nil || args.Fields == nil { + return nil, false + } + + value, ok := args.Fields[key] + if !ok { + return nil, false + } + + structValue, ok := value.GetKind().(*structpb.Value_StructValue) + if !ok { + return nil, false + } + + return structValue.StructValue, true +} + +// RequireStringArg extracts a string value from the args struct by key. +// Returns the value or an error if not found or invalid. +func RequireStringArg(args *structpb.Struct, key string) (string, error) { + value, ok := GetStringArg(args, key) + if !ok { + return "", fmt.Errorf("required argument %s is missing or invalid", key) + } + return value, nil +} + +// RequireResourceIDArg extracts a ResourceId from the args struct by key. +// Returns the ResourceId or an error if not found or invalid. +func RequireResourceIDArg(args *structpb.Struct, key string) (*v2.ResourceId, error) { + value, ok := GetResourceIDArg(args, key) + if !ok { + return nil, fmt.Errorf("required argument %s is missing or invalid", key) + } + return value, nil +} + +// RequireResourceIdListArg extracts a list of ResourceId from the args struct by key. +// Returns the list of ResourceId or an error if not found or invalid. +func RequireResourceIdListArg(args *structpb.Struct, key string) ([]*v2.ResourceId, error) { + list, ok := GetResourceIdListArg(args, key) + if !ok { + return nil, fmt.Errorf("required argument %s is missing or invalid", key) + } + return list, nil +} + +// GetResourceIdListArg extracts a list of ResourceId from the args struct by key. +// Returns the list and true if found and valid, or nil and false otherwise. +func GetResourceIdListArg(args *structpb.Struct, key string) ([]*v2.ResourceId, bool) { + if args == nil || args.Fields == nil { + return nil, false + } + + value, ok := args.Fields[key] + if !ok { + return nil, false + } + + listValue, ok := value.GetKind().(*structpb.Value_ListValue) + if !ok { + return nil, false + } + + var resourceIDs []*v2.ResourceId + for _, v := range listValue.ListValue.Values { + structValue, ok := v.GetKind().(*structpb.Value_StructValue) + if !ok { + return nil, false + } + // Try to get resource_type_id and resource_id fields + resourceTypeID, ok := GetStringArg(structValue.StructValue, "resource_type_id") + if !ok { + // Also try resource_type as an alternative + resourceTypeID, ok = GetStringArg(structValue.StructValue, "resource_type") + if !ok { + return nil, false + } + } + + resourceID, ok := GetStringArg(structValue.StructValue, "resource_id") + if !ok { + // Also try resource as an alternative + resourceID, ok = GetStringArg(structValue.StructValue, "resource") + if !ok { + return nil, false + } + } + resourceIDs = append(resourceIDs, &v2.ResourceId{ + ResourceType: resourceTypeID, + Resource: resourceID, + }) + } + + return resourceIDs, true +} + +// GetResourceFieldArg extracts a Resource proto message from the args struct by key. +// The Resource is expected to be stored as a JSON-serialized struct value. +// Returns the Resource and true if found and valid, or nil and false otherwise. +func GetResourceFieldArg(args *structpb.Struct, key string) (*v2.Resource, bool) { + if args == nil || args.Fields == nil { + return nil, false + } + value, ok := args.Fields[key] + if !ok { + return nil, false + } + structValue, ok := value.GetKind().(*structpb.Value_StructValue) + if !ok { + return nil, false + } + + // Marshal the struct value back to JSON, then unmarshal into the proto message + jsonBytes, err := protojson.Marshal(structValue.StructValue) + if err != nil { + return nil, false + } + + basicResource := &config.Resource{} + if err := protojson.Unmarshal(jsonBytes, basicResource); err != nil { + return nil, false + } + + return basicResourceToResource(basicResource), true +} + +func resourceToBasicResource(resource *v2.Resource) *config.Resource { + var resourceId *config.ResourceId + if resource.Id != nil { + resourceId = config.ResourceId_builder{ + ResourceTypeId: resource.Id.ResourceType, + ResourceId: resource.Id.Resource, + }.Build() + } + var parentResourceId *config.ResourceId + if resource.ParentResourceId != nil { + parentResourceId = config.ResourceId_builder{ + ResourceTypeId: resource.ParentResourceId.ResourceType, + ResourceId: resource.ParentResourceId.Resource, + }.Build() + } + return config.Resource_builder{ + ResourceId: resourceId, + ParentResourceId: parentResourceId, + DisplayName: resource.DisplayName, + Description: resource.Description, + Annotations: resource.Annotations, + }.Build() +} + +func basicResourceToResource(basicResource *config.Resource) *v2.Resource { + var resourceId *v2.ResourceId + if basicResource.GetResourceId() != nil { + resourceId = &v2.ResourceId{ + ResourceType: basicResource.GetResourceId().GetResourceTypeId(), + Resource: basicResource.GetResourceId().GetResourceId(), + } + } + var parentResourceId *v2.ResourceId + if basicResource.GetParentResourceId() != nil { + parentResourceId = &v2.ResourceId{ + ResourceType: basicResource.GetParentResourceId().GetResourceTypeId(), + Resource: basicResource.GetParentResourceId().GetResourceId(), + } + } + return &v2.Resource{ + Id: resourceId, + ParentResourceId: parentResourceId, + DisplayName: basicResource.GetDisplayName(), + Description: basicResource.GetDescription(), + Annotations: basicResource.GetAnnotations(), + } +} + +// GetResourceListFieldArg extracts a list of Resource proto messages from the args struct by key. +// Each Resource is expected to be stored as a JSON-serialized struct value. +// Returns the list of Resource and true if found and valid, or nil and false otherwise. +func GetResourceListFieldArg(args *structpb.Struct, key string) ([]*v2.Resource, bool) { + if args == nil || args.Fields == nil { + return nil, false + } + value, ok := args.Fields[key] + if !ok { + return nil, false + } + listValue, ok := value.GetKind().(*structpb.Value_ListValue) + if !ok { + return nil, false + } + var resources []*v2.Resource + for _, v := range listValue.ListValue.Values { + structValue, ok := v.GetKind().(*structpb.Value_StructValue) + if !ok { + return nil, false + } + + // Marshal the struct value back to JSON, then unmarshal into the proto message + jsonBytes, err := protojson.Marshal(structValue.StructValue) + if err != nil { + return nil, false + } + + basicResource := &config.Resource{} + if err := protojson.Unmarshal(jsonBytes, basicResource); err != nil { + return nil, false + } + + resources = append(resources, basicResourceToResource(basicResource)) + } + return resources, true +} + +// SetResourceFieldArg stores a Resource proto message in the args struct by key. +// The Resource is serialized as a JSON struct value. +func SetResourceFieldArg(args *structpb.Struct, key string, resource *v2.Resource) error { + if args == nil { + return fmt.Errorf("args cannot be nil") + } + if resource == nil { + return fmt.Errorf("resource cannot be nil") + } + + basicResource := resourceToBasicResource(resource) + + // Marshal the proto message to JSON, then unmarshal into a struct value + jsonBytes, err := protojson.Marshal(basicResource) + if err != nil { + return fmt.Errorf("failed to marshal resource: %w", err) + } + + structValue := &structpb.Struct{} + if err := protojson.Unmarshal(jsonBytes, structValue); err != nil { + return fmt.Errorf("failed to unmarshal resource to struct: %w", err) + } + + if args.Fields == nil { + args.Fields = make(map[string]*structpb.Value) + } + args.Fields[key] = structpb.NewStructValue(structValue) + return nil +} + +// ReturnField represents a key-value pair for action return values. +type ReturnField struct { + Key string + Value *structpb.Value +} + +// NewReturnField creates a new return field with the given key and value. +func NewReturnField(key string, value *structpb.Value) ReturnField { + return ReturnField{Key: key, Value: value} +} + +// NewStringReturnField creates a return field with a string value. +func NewStringReturnField(key string, value string) ReturnField { + return ReturnField{Key: key, Value: structpb.NewStringValue(value)} +} + +// NewBoolReturnField creates a return field with a bool value. +func NewBoolReturnField(key string, value bool) ReturnField { + return ReturnField{Key: key, Value: structpb.NewBoolValue(value)} +} + +// NewNumberReturnField creates a return field with a number value. +func NewNumberReturnField(key string, value float64) ReturnField { + return ReturnField{Key: key, Value: structpb.NewNumberValue(value)} +} + +// NewResourceReturnField creates a return field with a Resource proto value. +func NewResourceReturnField(key string, resource *v2.Resource) (ReturnField, error) { + if resource == nil { + return ReturnField{}, fmt.Errorf("resource cannot be nil") + } + basicResource := resourceToBasicResource(resource) + jsonBytes, err := protojson.Marshal(basicResource) + if err != nil { + return ReturnField{}, fmt.Errorf("failed to marshal resource: %w", err) + } + + structValue := &structpb.Struct{} + if err := protojson.Unmarshal(jsonBytes, structValue); err != nil { + return ReturnField{}, fmt.Errorf("failed to unmarshal resource to struct: %w", err) + } + + return ReturnField{Key: key, Value: structpb.NewStructValue(structValue)}, nil +} + +// NewResourceIdReturnField creates a return field with a ResourceId proto value. +func NewResourceIdReturnField(key string, resourceId *v2.ResourceId) (ReturnField, error) { + if resourceId == nil { + return ReturnField{}, fmt.Errorf("resource ID cannot be nil") + } + basicResourceId := config.ResourceId_builder{ + ResourceTypeId: resourceId.ResourceType, + ResourceId: resourceId.Resource, + }.Build() + jsonBytes, err := protojson.Marshal(basicResourceId) + if err != nil { + return ReturnField{}, fmt.Errorf("failed to marshal resource id: %w", err) + } + + structValue := &structpb.Struct{} + if err := protojson.Unmarshal(jsonBytes, structValue); err != nil { + return ReturnField{}, fmt.Errorf("failed to unmarshal resource id to struct: %w", err) + } + + return ReturnField{Key: key, Value: structpb.NewStructValue(structValue)}, nil +} + +// NewStringListReturnField creates a return field with a list of string values. +func NewStringListReturnField(key string, values []string) ReturnField { + listValues := make([]*structpb.Value, len(values)) + for i, v := range values { + listValues[i] = structpb.NewStringValue(v) + } + return ReturnField{Key: key, Value: structpb.NewListValue(&structpb.ListValue{Values: listValues})} +} + +// NewNumberListReturnField creates a return field with a list of number values. +func NewNumberListReturnField(key string, values []float64) ReturnField { + listValues := make([]*structpb.Value, len(values)) + for i, v := range values { + listValues[i] = structpb.NewNumberValue(v) + } + return ReturnField{Key: key, Value: structpb.NewListValue(&structpb.ListValue{Values: listValues})} +} + +// NewResourceListReturnField creates a return field with a list of Resource proto values. +func NewResourceListReturnField(key string, resources []*v2.Resource) (ReturnField, error) { + listValues := make([]*structpb.Value, len(resources)) + for i, resource := range resources { + if resource == nil { + return ReturnField{}, fmt.Errorf("resource at index %d cannot be nil", i) + } + basicResource := resourceToBasicResource(resource) + jsonBytes, err := protojson.Marshal(basicResource) + if err != nil { + return ReturnField{}, fmt.Errorf("failed to marshal resource: %w", err) + } + + structValue := &structpb.Struct{} + if err := protojson.Unmarshal(jsonBytes, structValue); err != nil { + return ReturnField{}, fmt.Errorf("failed to unmarshal resource to struct: %w", err) + } + + listValues[i] = structpb.NewStructValue(structValue) + } + return ReturnField{Key: key, Value: structpb.NewListValue(&structpb.ListValue{Values: listValues})}, nil +} + +// NewResourceIdListReturnField creates a return field with a list of ResourceId proto values. +func NewResourceIdListReturnField(key string, resourceIDs []*v2.ResourceId) (ReturnField, error) { + listValues := make([]*structpb.Value, len(resourceIDs)) + for i, resourceId := range resourceIDs { + if resourceId == nil { + return ReturnField{}, fmt.Errorf("resource id at index %d cannot be nil", i) + } + basicResourceId := config.ResourceId_builder{ + ResourceTypeId: resourceId.ResourceType, + ResourceId: resourceId.Resource, + }.Build() + jsonBytes, err := protojson.Marshal(basicResourceId) + if err != nil { + return ReturnField{}, fmt.Errorf("failed to marshal resource id: %w", err) + } + + structValue := &structpb.Struct{} + if err := protojson.Unmarshal(jsonBytes, structValue); err != nil { + return ReturnField{}, fmt.Errorf("failed to unmarshal resource id to struct: %w", err) + } + + listValues[i] = structpb.NewStructValue(structValue) + } + return ReturnField{Key: key, Value: structpb.NewListValue(&structpb.ListValue{Values: listValues})}, nil +} + +// NewListReturnField creates a return field with a list of arbitrary values. +func NewListReturnField(key string, values []*structpb.Value) ReturnField { + return ReturnField{Key: key, Value: structpb.NewListValue(&structpb.ListValue{Values: values})} +} + +// NewReturnValues creates a return struct with the specified success status and fields. +// This helps users avoid having to remember the correct structure for return values. +func NewReturnValues(success bool, fields ...ReturnField) *structpb.Struct { + rv := &structpb.Struct{ + Fields: map[string]*structpb.Value{ + "success": structpb.NewBoolValue(success), + }, + } + + for _, field := range fields { + rv.Fields[field.Key] = field.Value + } + + return rv +} diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/annotations/annotations.go b/vendor/github.com/conductorone/baton-sdk/pkg/annotations/annotations.go index d0566e71..64a3004b 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/annotations/annotations.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/annotations/annotations.go @@ -137,13 +137,3 @@ func GetSyncIdFromAnnotations(annos Annotations) (string, error) { return "", nil } - -// NOTE: this is used to communicate the active sync to the connector proper, for session storage. -func GetActiveSyncIdFromAnnotations(annos Annotations) (string, error) { - v2SyncId := &v2.ActiveSync{} - _, err := annos.Pick(v2SyncId) - if err != nil { - return "", err - } - return v2SyncId.GetId(), nil -} diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/bid/bid.go b/vendor/github.com/conductorone/baton-sdk/pkg/bid/bid.go index ff1b7890..cd24e2cd 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/bid/bid.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/bid/bid.go @@ -153,15 +153,15 @@ func resourcePartToStr(r *v2.Resource) (string, error) { } func entitlementPartToStr(e *v2.Entitlement) (string, error) { - resourcePart, err := resourcePartToStr(e.Resource) + resourcePart, err := resourcePartToStr(e.GetResource()) if err != nil { return "", err } - if e.Slug == "" { + if e.GetSlug() == "" { return "", NewBidStringError(e, "entitlement slug is empty") } - return strings.Join([]string{resourcePart, escapeParts(e.Slug)}, ":"), nil + return strings.Join([]string{resourcePart, escapeParts(e.GetSlug())}, ":"), nil } func makeResourceBid(r *v2.Resource) (string, error) { @@ -183,11 +183,11 @@ func makeEntitlementBid(e *v2.Entitlement) (string, error) { } func makeGrantBid(g *v2.Grant) (string, error) { - principalPart, err := resourcePartToStr(g.Principal) + principalPart, err := resourcePartToStr(g.GetPrincipal()) if err != nil { return "", err } - entitlementPart, err := entitlementPartToStr(g.Entitlement) + entitlementPart, err := entitlementPartToStr(g.GetEntitlement()) if err != nil { return "", err } diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/bid/parser.go b/vendor/github.com/conductorone/baton-sdk/pkg/bid/parser.go index c857bcda..cbe7604e 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/bid/parser.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/bid/parser.go @@ -136,19 +136,19 @@ func parseResourcePart(rs *bidScanner) (*v2.Resource, error) { } if len(tokens) == 4 { - parentResourceId.ResourceType = tokens[0] - parentResourceId.Resource = tokens[1] - resourceId.ResourceType = tokens[2] - resourceId.Resource = tokens[3] - return &v2.Resource{ + parentResourceId.SetResourceType(tokens[0]) + parentResourceId.SetResource(tokens[1]) + resourceId.SetResourceType(tokens[2]) + resourceId.SetResource(tokens[3]) + return v2.Resource_builder{ Id: resourceId, ParentResourceId: parentResourceId, - }, nil + }.Build(), nil } if len(tokens) == 2 { - resourceId.ResourceType = tokens[0] - resourceId.Resource = tokens[1] - return &v2.Resource{Id: resourceId}, nil + resourceId.SetResourceType(tokens[0]) + resourceId.SetResource(tokens[1]) + return v2.Resource_builder{Id: resourceId}.Build(), nil } return nil, NewBidParseError(rs, "invalid resource part") @@ -189,10 +189,10 @@ func parseEntitlementPart(rs *bidScanner) (*v2.Entitlement, error) { return nil, NewBidParseError(rs, "invalid baton id entitlement part: %s", val) } - return &v2.Entitlement{ + return v2.Entitlement_builder{ Slug: val, Resource: resource, - }, nil + }.Build(), nil } func ParseEntitlementBid(bidStr string) (*v2.Entitlement, error) { @@ -225,10 +225,10 @@ func parseGrantPart(rs *bidScanner) (*v2.Grant, error) { return nil, err } - return &v2.Grant{ + return v2.Grant_builder{ Entitlement: entitlement, Principal: principal, - }, nil + }.Build(), nil } func ParseGrantBid(bidStr string) (*v2.Grant, error) { diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/cli/cli.go b/vendor/github.com/conductorone/baton-sdk/pkg/cli/cli.go index 32b0541f..93a6e3ba 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/cli/cli.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/cli/cli.go @@ -5,27 +5,41 @@ import ( "fmt" "reflect" + "github.com/conductorone/baton-sdk/pkg/connectorbuilder" "github.com/conductorone/baton-sdk/pkg/field" "github.com/conductorone/baton-sdk/pkg/types" + "github.com/conductorone/baton-sdk/pkg/types/sessions" "github.com/spf13/cobra" "github.com/spf13/pflag" "github.com/spf13/viper" + "golang.org/x/oauth2" ) +type RunTimeOpts struct { + SessionStore sessions.SessionStore + TokenSource oauth2.TokenSource +} + // GetConnectorFunc is a function type that creates a connector instance. // It takes a context and configuration. The session cache constructor is retrieved from the context. type GetConnectorFunc[T field.Configurable] func(ctx context.Context, cfg T) (types.ConnectorServer, error) +type GetConnectorFunc2[T field.Configurable] func(ctx context.Context, cfg T, runTimeOpts RunTimeOpts) (types.ConnectorServer, error) // WithSessionCache creates a session cache using the provided constructor and adds it to the context. -func WithSessionCache(ctx context.Context, constructor types.SessionCacheConstructor) (context.Context, error) { +func WithSessionCache(ctx context.Context, constructor sessions.SessionStoreConstructor) (context.Context, error) { sessionCache, err := constructor(ctx) if err != nil { return ctx, fmt.Errorf("failed to create session cache: %w", err) } - return context.WithValue(ctx, types.SessionCacheKey{}, sessionCache), nil + return context.WithValue(ctx, sessions.SessionStoreKey{}, sessionCache), nil +} + +type ConnectorOpts struct { + TokenSource oauth2.TokenSource } +type NewConnector[T field.Configurable] func(ctx context.Context, cfg T, opts *ConnectorOpts) (connectorbuilder.ConnectorBuilderV2, []connectorbuilder.Opt, error) -func MakeGenericConfiguration[T field.Configurable](v *viper.Viper) (T, error) { +func MakeGenericConfiguration[T field.Configurable](v *viper.Viper, opts ...field.DecodeHookOption) (T, error) { // Create an instance of the struct type T using reflection var config T // Create a zero-value instance of T @@ -37,8 +51,8 @@ func MakeGenericConfiguration[T field.Configurable](v *viper.Viper) (T, error) { return config, fmt.Errorf("cannot convert *viper.Viper to %T", config) } - // Unmarshal into the config struct - err := v.Unmarshal(&config) + // Unmarshal into the config struct with any decode hook options provided + err := v.Unmarshal(&config, viper.DecodeHook(field.ComposeDecodeHookFunc(opts...))) if err != nil { return config, fmt.Errorf("failed to unmarshal config: %w", err) } @@ -207,7 +221,7 @@ func SetFlagsAndConstraints(command *cobra.Command, schema field.Configuration) } // mark required - if f.Required { + if f.Required && len(schema.FieldGroups) == 0 { if f.Variant == field.BoolVariant { return fmt.Errorf("requiring %s of type %s does not make sense", f.FieldName, f.Variant) } diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/cli/commands.go b/vendor/github.com/conductorone/baton-sdk/pkg/cli/commands.go index 3f728655..93c0e5b8 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/cli/commands.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/cli/commands.go @@ -7,13 +7,15 @@ import ( "encoding/json" "fmt" "os" - "sort" "time" "github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/ctxzap" + "github.com/maypok86/otter/v2" "github.com/spf13/cobra" "github.com/spf13/viper" "go.uber.org/zap" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" "google.golang.org/protobuf/encoding/protojson" "google.golang.org/protobuf/proto" "google.golang.org/protobuf/types/known/anypb" @@ -22,13 +24,15 @@ import ( "github.com/conductorone/baton-sdk/internal/connector" v2 "github.com/conductorone/baton-sdk/pb/c1/connector/v2" v1 "github.com/conductorone/baton-sdk/pb/c1/connector_wrapper/v1" + baton_v1 "github.com/conductorone/baton-sdk/pb/c1/connectorapi/baton/v1" "github.com/conductorone/baton-sdk/pkg/connectorrunner" "github.com/conductorone/baton-sdk/pkg/crypto" "github.com/conductorone/baton-sdk/pkg/field" "github.com/conductorone/baton-sdk/pkg/logging" "github.com/conductorone/baton-sdk/pkg/session" - "github.com/conductorone/baton-sdk/pkg/types" + "github.com/conductorone/baton-sdk/pkg/types/sessions" "github.com/conductorone/baton-sdk/pkg/uotel" + utls2 "github.com/conductorone/baton-sdk/pkg/utls" ) const ( @@ -37,9 +41,55 @@ const ( type ContrainstSetter func(*cobra.Command, field.Configuration) error -// defaultSessionCacheConstructor creates a default in-memory session cache. -func defaultSessionCacheConstructor(ctx context.Context, opt ...types.SessionCacheConstructorOption) (types.SessionCache, error) { - return session.NewMemorySessionCache(ctx, opt...) +// In one shot & service mode, the child process uses this client to connect to the session store server... +// +// which uses the C1Z for storage. Unfortunately the C1Z is instantiated well after we fork the child process, +// so there is quite a bit of pass through. +func getGRPCSessionStoreClient(ctx context.Context, serverCfg *v1.ServerConfig) func(ctx context.Context, opt ...sessions.SessionStoreConstructorOption) (sessions.SessionStore, error) { + return func(_ context.Context, opt ...sessions.SessionStoreConstructorOption) (sessions.SessionStore, error) { + l := ctxzap.Extract(ctx) + clientTLSConfig, err := utls2.ClientConfig(ctx, serverCfg.GetCredential()) + if err != nil { + return nil, err + } + if serverCfg.GetSessionStoreListenPort() == 0 { + return &session.NoOpSessionStore{}, nil + } + // connected, grpc will handle retries for us. + dialCtx, canc := context.WithTimeout(ctx, 5*time.Second) + defer canc() + var dialErr error + var conn *grpc.ClientConn + for { + conn, err = grpc.DialContext( //nolint:staticcheck // grpc.DialContext is deprecated but we are using it still. + ctx, + fmt.Sprintf("127.0.0.1:%d", serverCfg.GetSessionStoreListenPort()), + grpc.WithTransportCredentials(credentials.NewTLS(clientTLSConfig)), + grpc.WithBlock(), //nolint:staticcheck // grpc.WithBlock is deprecated but we are using it still. + ) + if err != nil { + dialErr = err + select { + case <-time.After(time.Millisecond * 500): + case <-dialCtx.Done(): + return nil, dialErr + } + continue + } + break + } + + client := baton_v1.NewBatonSessionServiceClient(conn) + ss, err := session.NewGRPCSessionStore(ctx, client, opt...) + if err != nil { + err2 := conn.Close() + if err2 != nil { + l.Error("error closing connection", zap.Error(err2)) + } + return nil, err + } + return ss, nil + } } func MakeMainCommand[T field.Configurable]( @@ -47,7 +97,7 @@ func MakeMainCommand[T field.Configurable]( name string, v *viper.Viper, confschema field.Configuration, - getconnector GetConnectorFunc[T], + getconnector GetConnectorFunc2[T], opts ...connectorrunner.Option, ) func(*cobra.Command, []string) error { return func(cmd *cobra.Command, args []string) error { @@ -98,8 +148,14 @@ func MakeMainCommand[T field.Configurable]( } } + readFromPath := true + decodeOpts := field.WithAdditionalDecodeHooks(field.FileUploadDecodeHook(readFromPath)) + t, err := MakeGenericConfiguration[T](v, decodeOpts) + if err != nil { + return fmt.Errorf("failed to make configuration: %w", err) + } // validate required fields and relationship constraints - if err := field.Validate(confschema, v); err != nil { + if err := field.Validate(confschema, t, field.WithAuthMethod(v.GetString("auth-method"))); err != nil { return err } @@ -140,7 +196,7 @@ func MakeMainCommand[T field.Configurable]( v.GetString("revoke-grant"), )) case v.GetBool("event-feed"): - opts = append(opts, connectorrunner.WithOnDemandEventStream(v.GetString("event-feed-id"), v.GetTime("event-feed-start-at"))) + opts = append(opts, connectorrunner.WithOnDemandEventStream(v.GetString("event-feed-id"), v.GetTime("event-feed-start-at"), v.GetString("event-feed-cursor"))) case v.GetString("create-account-profile") != "": profileMap := v.GetStringMap("create-account-profile") if profileMap == nil { @@ -214,8 +270,16 @@ func MakeMainCommand[T field.Configurable]( connectorrunner.WithOnDemandInvokeAction( v.GetString("file"), v.GetString("invoke-action"), + v.GetString("invoke-action-resource-type"), // Optional resource type for resource-scoped actions invokeActionArgsStruct, )) + case v.GetBool("list-action-schemas"): + opts = append(opts, + connectorrunner.WithActionsEnabled(), + connectorrunner.WithOnDemandListActionSchemas( + v.GetString("file"), + v.GetString("list-action-schemas-resource-type"), // Optional resource type filter + )) case v.GetString("delete-resource") != "": opts = append(opts, connectorrunner.WithProvisioningEnabled(), @@ -248,11 +312,6 @@ func MakeMainCommand[T field.Configurable]( opts = append(opts, connectorrunner.WithTicketingEnabled(), connectorrunner.WithGetTicket(v.GetString("ticket-id"))) - case len(v.GetStringSlice("sync-resources")) > 0: - opts = append(opts, - connectorrunner.WithTargetedSyncResourceIDs(v.GetStringSlice("sync-resources")), - connectorrunner.WithOnDemandSync(v.GetString("file")), - ) case v.GetBool("diff-syncs"): opts = append(opts, connectorrunner.WithDiffSyncs( @@ -269,8 +328,15 @@ func MakeMainCommand[T field.Configurable]( v.GetStringSlice("compact-sync-ids"), ), ) - default: + if len(v.GetStringSlice("sync-resources")) > 0 { + opts = append(opts, + connectorrunner.WithTargetedSyncResources(v.GetStringSlice("sync-resources"))) + } + if len(v.GetStringSlice("sync-resource-types")) > 0 { + opts = append(opts, + connectorrunner.WithSyncResourceTypeIDs(v.GetStringSlice("sync-resource-types"))) + } opts = append(opts, connectorrunner.WithOnDemandSync(v.GetString("file"))) } } @@ -299,18 +365,11 @@ func MakeMainCommand[T field.Configurable]( opts = append(opts, connectorrunner.WithSkipEntitlementsAndGrants(v.GetBool("skip-entitlements-and-grants"))) - t, err := MakeGenericConfiguration[T](v) - if err != nil { - return fmt.Errorf("failed to make configuration: %w", err) + if v.GetBool("skip-grants") { + opts = append(opts, connectorrunner.WithSkipGrants(v.GetBool("skip-grants"))) } - // Create session cache and add to context - runCtx, err = WithSessionCache(runCtx, defaultSessionCacheConstructor) - if err != nil { - return fmt.Errorf("failed to create session cache: %w", err) - } - - c, err := getconnector(runCtx, t) + c, err := getconnector(runCtx, t, RunTimeOpts{}) if err != nil { return err } @@ -371,7 +430,7 @@ func MakeGRPCServerCommand[T field.Configurable]( name string, v *viper.Viper, confschema field.Configuration, - getconnector GetConnectorFunc[T], + getconnector GetConnectorFunc2[T], ) func(*cobra.Command, []string) error { return func(cmd *cobra.Command, args []string) error { // NOTE(shackra): bind all the flags (persistent and @@ -412,21 +471,54 @@ func MakeGRPCServerCommand[T field.Configurable]( l := ctxzap.Extract(runCtx) l.Debug("starting grpc server") + readFromPath := true + decodeOpts := field.WithAdditionalDecodeHooks(field.FileUploadDecodeHook(readFromPath)) + t, err := MakeGenericConfiguration[T](v, decodeOpts) + if err != nil { + return fmt.Errorf("failed to make configuration: %w", err) + } // validate required fields and relationship constraints - if err := field.Validate(confschema, v); err != nil { + if err := field.Validate(confschema, t, field.WithAuthMethod(v.GetString("auth-method"))); err != nil { return err } - t, err := MakeGenericConfiguration[T](v) + + var cfgStr string + scn := bufio.NewScanner(os.Stdin) + for scn.Scan() { + cfgStr = scn.Text() + break + } + + cfgBytes, err := base64.StdEncoding.DecodeString(cfgStr) if err != nil { - return fmt.Errorf("failed to make configuration: %w", err) + return err } - // Create session cache and add to context - runCtx, err = WithSessionCache(runCtx, defaultSessionCacheConstructor) + // Avoid zombie processes. If the parent dies, this + // will cause Stdin on the child to close, and then + // the child will exit itself. + go func() { + in := make([]byte, 1) + _, err := os.Stdin.Read(in) + if err != nil { + os.Exit(0) + } + }() + + if len(cfgBytes) == 0 { + return fmt.Errorf("unexpected empty input") + } + + serverCfg := &v1.ServerConfig{} + err = proto.Unmarshal(cfgBytes, serverCfg) if err != nil { - return fmt.Errorf("failed to create session cache: %w", err) + return err } + err = serverCfg.ValidateAll() + if err != nil { + return err + } clientSecret := v.GetString("client-secret") if clientSecret != "" { secretJwk, err := crypto.ParseClientSecret([]byte(clientSecret), true) @@ -436,7 +528,17 @@ func MakeGRPCServerCommand[T field.Configurable]( runCtx = context.WithValue(runCtx, crypto.ContextClientSecretKey, secretJwk) } - c, err := getconnector(runCtx, t) + sessionStoreMaximumSize := v.GetInt(field.ServerSessionStoreMaximumSizeField.GetName()) + sessionConstructor := getGRPCSessionStoreClient(runCtx, serverCfg) + c, err := getconnector(runCtx, t, RunTimeOpts{ + SessionStore: NewLazyCachingSessionStore(sessionConstructor, func(otterOptions *otter.Options[string, []byte]) { + if sessionStoreMaximumSize <= 0 { + otterOptions.MaximumWeight = 0 + } else { + otterOptions.MaximumWeight = uint64(sessionStoreMaximumSize) + } + }), + }) if err != nil { return err } @@ -455,6 +557,14 @@ func MakeGRPCServerCommand[T field.Configurable]( copts = append(copts, connector.WithFullSyncDisabled()) } + if len(v.GetStringSlice("sync-resources")) > 0 { + copts = append(copts, connector.WithTargetedSyncResources(v.GetStringSlice("sync-resources"))) + } + + if len(v.GetStringSlice("sync-resource-types")) > 0 { + copts = append(copts, connector.WithSyncResourceTypeIDs(v.GetStringSlice("sync-resource-types"))) + } + switch { case v.GetString("grant-entitlement") != "": copts = append(copts, connector.WithProvisioningEnabled()) @@ -476,8 +586,6 @@ func MakeGRPCServerCommand[T field.Configurable]( copts = append(copts, connector.WithTicketingEnabled()) case v.GetBool("get-ticket"): copts = append(copts, connector.WithTicketingEnabled()) - case len(v.GetStringSlice("sync-resources")) > 0: - copts = append(copts, connector.WithTargetedSyncResourceIDs(v.GetStringSlice("sync-resources"))) } cw, err := connector.NewWrapper(runCtx, c, copts...) @@ -485,43 +593,6 @@ func MakeGRPCServerCommand[T field.Configurable]( return err } - var cfgStr string - scn := bufio.NewScanner(os.Stdin) - for scn.Scan() { - cfgStr = scn.Text() - break - } - cfgBytes, err := base64.StdEncoding.DecodeString(cfgStr) - if err != nil { - return err - } - - // Avoid zombie processes. If the parent dies, this - // will cause Stdin on the child to close, and then - // the child will exit itself. - go func() { - in := make([]byte, 1) - _, err := os.Stdin.Read(in) - if err != nil { - os.Exit(0) - } - }() - - if len(cfgBytes) == 0 { - return fmt.Errorf("unexpected empty input") - } - - serverCfg := &v1.ServerConfig{} - err = proto.Unmarshal(cfgBytes, serverCfg) - if err != nil { - return err - } - - err = serverCfg.ValidateAll() - if err != nil { - return err - } - return cw.Run(runCtx, serverCfg) } } @@ -531,7 +602,7 @@ func MakeCapabilitiesCommand[T field.Configurable]( name string, v *viper.Viper, confschema field.Configuration, - getconnector GetConnectorFunc[T], + getconnector GetConnectorFunc2[T], ) func(*cobra.Command, []string) error { return func(cmd *cobra.Command, args []string) error { // NOTE(shackra): bind all the flags (persistent and @@ -553,22 +624,18 @@ func MakeCapabilitiesCommand[T field.Configurable]( return err } - // validate required fields and relationship constraints - if err := field.Validate(confschema, v); err != nil { - return err - } - t, err := MakeGenericConfiguration[T](v) + readFromPath := true + decodeOpts := field.WithAdditionalDecodeHooks(field.FileUploadDecodeHook(readFromPath)) + t, err := MakeGenericConfiguration[T](v, decodeOpts) if err != nil { return fmt.Errorf("failed to make configuration: %w", err) } - - // Create session cache and add to context - runCtx, err = WithSessionCache(runCtx, defaultSessionCacheConstructor) - if err != nil { - return fmt.Errorf("failed to create session cache: %w", err) + // validate required fields and relationship constraints + if err := field.Validate(confschema, t, field.WithAuthMethod(v.GetString("auth-method"))); err != nil { + return err } - c, err := getconnector(runCtx, t) + c, err := getconnector(runCtx, t, RunTimeOpts{}) if err != nil { return err } @@ -578,7 +645,7 @@ func MakeCapabilitiesCommand[T field.Configurable]( return err } - if md.Metadata.Capabilities == nil { + if !md.GetMetadata().HasCapabilities() { return fmt.Errorf("connector does not support capabilities") } @@ -588,7 +655,7 @@ func MakeCapabilitiesCommand[T field.Configurable]( } a := &anypb.Any{} - err = anypb.MarshalFrom(a, md.Metadata.Capabilities, proto.MarshalOptions{Deterministic: true}) + err = anypb.MarshalFrom(a, md.GetMetadata().GetCapabilities(), proto.MarshalOptions{Deterministic: true}) if err != nil { return err } @@ -612,14 +679,9 @@ func MakeConfigSchemaCommand[T field.Configurable]( name string, v *viper.Viper, confschema field.Configuration, - getconnector GetConnectorFunc[T], + getconnector GetConnectorFunc2[T], ) func(*cobra.Command, []string) error { return func(cmd *cobra.Command, args []string) error { - // Sort fields by FieldName - sort.Slice(confschema.Fields, func(i, j int) bool { - return confschema.Fields[i].FieldName < confschema.Fields[j].FieldName - }) - // Use MarshalIndent for pretty printing pb, err := json.MarshalIndent(&confschema, "", " ") if err != nil { diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/cli/lambda_server__added.go b/vendor/github.com/conductorone/baton-sdk/pkg/cli/lambda_server__added.go index 28ca3713..c2fd1848 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/cli/lambda_server__added.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/cli/lambda_server__added.go @@ -14,22 +14,25 @@ import ( "github.com/conductorone/baton-sdk/pkg/crypto" "github.com/conductorone/baton-sdk/pkg/crypto/providers/jwk" "github.com/conductorone/baton-sdk/pkg/logging" + "github.com/conductorone/baton-sdk/pkg/session" "github.com/conductorone/baton-sdk/pkg/ugrpc" + "github.com/go-jose/go-jose/v4" + "github.com/maypok86/otter/v2" "github.com/mitchellh/mapstructure" "github.com/spf13/cobra" "github.com/spf13/viper" "go.uber.org/zap" + "golang.org/x/oauth2" "google.golang.org/protobuf/types/known/structpb" "github.com/conductorone/baton-sdk/internal/connector" - pb_connector_api "github.com/conductorone/baton-sdk/pb/c1/connectorapi/baton/v1" + v1 "github.com/conductorone/baton-sdk/pb/c1/connectorapi/baton/v1" "github.com/conductorone/baton-sdk/pkg/auth" "github.com/conductorone/baton-sdk/pkg/field" c1_lambda_grpc "github.com/conductorone/baton-sdk/pkg/lambda/grpc" c1_lambda_config "github.com/conductorone/baton-sdk/pkg/lambda/grpc/config" "github.com/conductorone/baton-sdk/pkg/lambda/grpc/middleware" - "github.com/conductorone/baton-sdk/pkg/session" - "github.com/conductorone/baton-sdk/pkg/types" + "github.com/conductorone/baton-sdk/pkg/types/sessions" "google.golang.org/grpc" ) @@ -37,9 +40,10 @@ func OptionallyAddLambdaCommand[T field.Configurable]( ctx context.Context, name string, v *viper.Viper, - getconnector GetConnectorFunc[T], + getconnector GetConnectorFunc2[T], connectorSchema field.Configuration, mainCmd *cobra.Command, + sessionStoreEnabled bool, ) error { lambdaSchema := field.NewConfiguration(field.LambdaServerFields(), field.WithConstraints(field.LambdaServerRelationships...)) @@ -67,12 +71,16 @@ func OptionallyAddLambdaCommand[T field.Configurable]( logLevel = "info" } - initalLogFields := map[string]interface{}{ - "tenant": os.Getenv("tenant"), - "connector": os.Getenv("connector"), - "installation": os.Getenv("installation"), - "app": os.Getenv("app"), - "version": os.Getenv("version"), + initialLogFields := map[string]interface{}{ + "tenant_id": os.Getenv("tenant"), + "connector_id": os.Getenv("connector"), + "app_id": os.Getenv("app"), + "release_version": os.Getenv("version"), + "installation": os.Getenv("installation"), + "catalog_id": os.Getenv("catalog_id"), + "catalog_name": os.Getenv("catalog_name"), + "tenant_name": os.Getenv("tenant_name"), + "tenant_is_internal": os.Getenv("tenant_is_internal"), } runCtx, err := initLogger( @@ -80,13 +88,13 @@ func OptionallyAddLambdaCommand[T field.Configurable]( name, logging.WithLogFormat(v.GetString("log-format")), logging.WithLogLevel(logLevel), - logging.WithInitialFields(initalLogFields), + logging.WithInitialFields(initialLogFields), ) if err != nil { return err } - runCtx, otelShutdown, err := initOtel(runCtx, name, v, initalLogFields) + runCtx, otelShutdown, err := initOtel(runCtx, name, v, initialLogFields) if err != nil { return err } @@ -117,10 +125,10 @@ func OptionallyAddLambdaCommand[T field.Configurable]( } // Create connector config service client using the DPoP client - configClient := pb_connector_api.NewConnectorConfigServiceClient(grpcClient) + configClient := v1.NewConnectorConfigServiceClient(grpcClient) // Get configuration, convert it to viper flag values, then proceed. - config, err := configClient.GetConnectorConfig(runCtx, &pb_connector_api.GetConnectorConfigRequest{}) + config, err := configClient.GetConnectorConfig(runCtx, &v1.GetConnectorConfigRequest{}) if err != nil { return fmt.Errorf("lambda-run: failed to get connector config: %w", err) } @@ -141,7 +149,10 @@ func OptionallyAddLambdaCommand[T field.Configurable]( return fmt.Errorf("lambda-run: failed to unmarshal decrypted config: %w", err) } - t, err := MakeGenericConfiguration[T](v) + // parse content directly for lambdas, don't read from file + readFromPath := false + decodeOpts := field.WithAdditionalDecodeHooks(field.FileUploadDecodeHook(readFromPath)) + t, err := MakeGenericConfiguration[T](v, decodeOpts) if err != nil { return fmt.Errorf("lambda-run: failed to make generic configuration: %w", err) } @@ -151,22 +162,31 @@ func OptionallyAddLambdaCommand[T field.Configurable]( cfg.Set(k, v) } default: - err = mapstructure.Decode(configStruct.AsMap(), cfg) + // Use mapstructure with decode hook for file upload fields + decoder, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{ + DecodeHook: field.ComposeDecodeHookFunc(decodeOpts), + Result: cfg, + }) + if err != nil { + return fmt.Errorf("lambda-run: failed to create decoder: %w", err) + } + err = decoder.Decode(configStruct.AsMap()) if err != nil { return fmt.Errorf("lambda-run: failed to decode config: %w", err) } } - if err := field.Validate(connectorSchema, t); err != nil { - return fmt.Errorf("lambda-run: failed to validate config: %w", err) + configStructMap := configStruct.AsMap() + + var fieldOptions []field.Option + if authMethod, ok := configStructMap["auth-method"]; ok { + if authMethodStr, ok := authMethod.(string); ok { + fieldOptions = append(fieldOptions, field.WithAuthMethod(authMethodStr)) + } } - // Create session cache and add to context - // Use the same DPoP credentials for the session cache - sessionCacheConstructor := createSessionCacheConstructor(grpcClient) - runCtx, err = WithSessionCache(runCtx, sessionCacheConstructor) - if err != nil { - return fmt.Errorf("lambda-run: failed to create session cache: %w", err) + if err := field.Validate(connectorSchema, t, fieldOptions...); err != nil { + return fmt.Errorf("lambda-run: failed to validate config: %w", err) } clientSecret := v.GetString("client-secret") @@ -177,8 +197,33 @@ func OptionallyAddLambdaCommand[T field.Configurable]( } runCtx = context.WithValue(runCtx, crypto.ContextClientSecretKey, secretJwk) } + sessionStoreMaximumSize := v.GetInt(field.ServerSessionStoreMaximumSizeField.GetName()) + var sessionStoreConstructor sessions.SessionStoreConstructor + if sessionStoreEnabled { + sessionStoreConstructor = createSessionCacheConstructor(grpcClient) + } else { + sessionStoreConstructor = func(ctx context.Context, opt ...sessions.SessionStoreConstructorOption) (sessions.SessionStore, error) { + return &session.NoOpSessionStore{}, nil + } + } + ops := RunTimeOpts{ + SessionStore: NewLazyCachingSessionStore(sessionStoreConstructor, func(otterOptions *otter.Options[string, []byte]) { + if sessionStoreMaximumSize <= 0 { + otterOptions.MaximumWeight = 0 + } else { + otterOptions.MaximumWeight = uint64(sessionStoreMaximumSize) + } + }), + } - c, err := getconnector(runCtx, t) + if hasOauthField(connectorSchema.Fields) { + ops.TokenSource = &lambdaTokenSource{ + ctx: runCtx, + webKey: webKey, + client: configClient, + } + } + c, err := getconnector(runCtx, t, ops) if err != nil { return fmt.Errorf("lambda-run: failed to get connector: %w", err) } @@ -210,7 +255,7 @@ func OptionallyAddLambdaCommand[T field.Configurable]( TicketingEnabled: true, } - chain := ugrpc.ChainUnaryInterceptors(authOpt, ugrpc.SessionCacheUnaryInterceptor(runCtx)) + chain := ugrpc.ChainUnaryInterceptors(authOpt) s := c1_lambda_grpc.NewServer(chain) connector.Register(runCtx, s, c, opts) @@ -222,12 +267,51 @@ func OptionallyAddLambdaCommand[T field.Configurable]( return nil } -// createSessionCacheConstructor creates a session cache constructor function that uses the provided gRPC client -func createSessionCacheConstructor(grpcClient grpc.ClientConnInterface) types.SessionCacheConstructor { - return func(ctx context.Context, opt ...types.SessionCacheConstructorOption) (types.SessionCache, error) { +// createSessionCacheConstructor creates a session cache constructor function that uses the provided gRPC client. +func createSessionCacheConstructor(grpcClient grpc.ClientConnInterface) sessions.SessionStoreConstructor { + return func(ctx context.Context, opt ...sessions.SessionStoreConstructorOption) (sessions.SessionStore, error) { // Create the gRPC session client using the same gRPC connection - client := pb_connector_api.NewBatonSessionServiceClient(grpcClient) + client := v1.NewBatonSessionServiceClient(grpcClient) // Create and return the session cache - return session.NewGRPCSessionCache(ctx, client, opt...) + return session.NewGRPCSessionStore(ctx, client, opt...) + } +} + +type lambdaTokenSource struct { + ctx context.Context + webKey *jose.JSONWebKey + client v1.ConnectorConfigServiceClient +} + +func (s *lambdaTokenSource) Token() (*oauth2.Token, error) { + resp, err := s.client.GetConnectorOauthToken(s.ctx, &v1.GetConnectorOauthTokenRequest{}) + if err != nil { + return nil, err + } + + ed25519PrivateKey, ok := s.webKey.Key.(ed25519.PrivateKey) + if !ok { + return nil, fmt.Errorf("lambda-run: failed to cast webkey to ed25519.PrivateKey") + } + + decrypted, err := jwk.DecryptED25519(ed25519PrivateKey, resp.Token) + if err != nil { + return nil, fmt.Errorf("lambda-run: failed to decrypt config: %w", err) + } + + t := oauth2.Token{} + err = json.Unmarshal(decrypted, &t) + if err != nil { + return nil, fmt.Errorf("lambda-run: failed to unmarshal decrypted config: %w", err) + } + return &t, nil +} + +func hasOauthField(fields []field.SchemaField) bool { + for _, f := range fields { + if f.ConnectorConfig.FieldType == field.OAuth2 { + return true + } } + return false } diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/cli/lambda_server_omitted.go b/vendor/github.com/conductorone/baton-sdk/pkg/cli/lambda_server_omitted.go index 902887d0..eaaa608b 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/cli/lambda_server_omitted.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/cli/lambda_server_omitted.go @@ -15,9 +15,10 @@ func OptionallyAddLambdaCommand[T field.Configurable]( ctx context.Context, name string, v *viper.Viper, - getconnector GetConnectorFunc[T], + getconnector GetConnectorFunc2[T], connectorSchema field.Configuration, mainCmd *cobra.Command, + sessionStoreEnabled bool, ) error { return nil } diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/cli/lazy_session.go b/vendor/github.com/conductorone/baton-sdk/pkg/cli/lazy_session.go new file mode 100644 index 00000000..224ff248 --- /dev/null +++ b/vendor/github.com/conductorone/baton-sdk/pkg/cli/lazy_session.go @@ -0,0 +1,129 @@ +package cli + +import ( + "context" + "math" + "sync" + "time" + + "github.com/conductorone/baton-sdk/pkg/session" + "github.com/conductorone/baton-sdk/pkg/types/sessions" + "github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/ctxzap" + "github.com/maypok86/otter/v2" + "github.com/maypok86/otter/v2/stats" +) + +var _ sessions.SessionStore = (*lazyCachingSessionStore)(nil) + +type OtterAdjuster func(otterOptions *otter.Options[string, []byte]) + +func NewLazyCachingSessionStore(constructor sessions.SessionStoreConstructor, otterAdjuster OtterAdjuster) *lazyCachingSessionStore { + otterOptions := &otter.Options[string, []byte]{ + // 15MB Note(kans): not much rigor went into this number. An arbirary sampling of lambda invocations suggests they use around 50MB out of 128MB. + MaximumWeight: 1024 * 1024 * 15, + ExpiryCalculator: otter.ExpiryWriting[string, []byte](10 * time.Minute), + StatsRecorder: stats.NewCounter(), + Weigher: func(key string, value []byte) uint32 { + totalLen := 32 + len(key) + len(value) + if totalLen < 0 { + return math.MaxUint32 + } + if totalLen > math.MaxInt32 { + return math.MaxUint32 + } + return uint32(totalLen) + }, + } + if otterAdjuster != nil { + otterAdjuster(otterOptions) + } + + if otterOptions.MaximumWeight == 0 { + otterOptions = nil + } + return &lazyCachingSessionStore{constructor: constructor, otterOptions: otterOptions} +} + +// lazyCachingSessionStore implements types.SessionStore interface but only creates the actual session +// when a method is called for the first time. +type lazyCachingSessionStore struct { + constructor sessions.SessionStoreConstructor + once sync.Once + session sessions.SessionStore + err error + otterOptions *otter.Options[string, []byte] +} + +// ensureSession creates the actual session store if it hasn't been created yet. +func (l *lazyCachingSessionStore) ensureSession(ctx context.Context) error { + l.once.Do(func() { + var ss sessions.SessionStore + ss, l.err = l.constructor(ctx) + if l.err != nil { + return + } + if l.otterOptions == nil { + ctxzap.Extract(ctx).Info("Session store cache is disabled") + l.session = ss + return + } + l.session, l.err = session.NewMemorySessionCache(l.otterOptions, ss) + }) + return l.err +} + +// Get implements types.SessionStore. +func (l *lazyCachingSessionStore) Get(ctx context.Context, key string, opt ...sessions.SessionStoreOption) ([]byte, bool, error) { + if err := l.ensureSession(ctx); err != nil { + return nil, false, err + } + return l.session.Get(ctx, key, opt...) +} + +// GetMany implements types.SessionStore. +func (l *lazyCachingSessionStore) GetMany(ctx context.Context, keys []string, opt ...sessions.SessionStoreOption) (map[string][]byte, []string, error) { + if err := l.ensureSession(ctx); err != nil { + return nil, nil, err + } + return l.session.GetMany(ctx, keys, opt...) +} + +// Set implements types.SessionStore. +func (l *lazyCachingSessionStore) Set(ctx context.Context, key string, value []byte, opt ...sessions.SessionStoreOption) error { + if err := l.ensureSession(ctx); err != nil { + return err + } + return l.session.Set(ctx, key, value, opt...) +} + +// SetMany implements types.SessionStore. +func (l *lazyCachingSessionStore) SetMany(ctx context.Context, values map[string][]byte, opt ...sessions.SessionStoreOption) error { + if err := l.ensureSession(ctx); err != nil { + return err + } + return l.session.SetMany(ctx, values, opt...) +} + +// Delete implements types.SessionStore. +func (l *lazyCachingSessionStore) Delete(ctx context.Context, key string, opt ...sessions.SessionStoreOption) error { + if err := l.ensureSession(ctx); err != nil { + return err + } + return l.session.Delete(ctx, key, opt...) +} + +// Clear implements types.SessionStore. +func (l *lazyCachingSessionStore) Clear(ctx context.Context, opt ...sessions.SessionStoreOption) error { + if err := l.ensureSession(ctx); err != nil { + return err + } + return l.session.Clear(ctx, opt...) +} + +// GetAll implements types.SessionStore. +func (l *lazyCachingSessionStore) GetAll(ctx context.Context, pageToken string, opt ...sessions.SessionStoreOption) (map[string][]byte, string, error) { + if err := l.ensureSession(ctx); err != nil { + return nil, "", err + } + return l.session.GetAll(ctx, pageToken, opt...) +} diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/config/config.go b/vendor/github.com/conductorone/baton-sdk/pkg/config/config.go index cf2fab9b..d9304524 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/config/config.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/config/config.go @@ -10,23 +10,90 @@ import ( "strings" "github.com/conductorone/baton-sdk/pkg/cli" + "github.com/conductorone/baton-sdk/pkg/connectorbuilder" "github.com/conductorone/baton-sdk/pkg/connectorrunner" "github.com/conductorone/baton-sdk/pkg/field" + "github.com/conductorone/baton-sdk/pkg/types" + "github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/ctxzap" "github.com/spf13/cobra" "github.com/spf13/viper" + "go.uber.org/zap" ) +func RunConnector[T field.Configurable]( + ctx context.Context, + connectorName string, + version string, + schema field.Configuration, + cf cli.NewConnector[T], + options ...connectorrunner.Option, +) { + f := func(ctx context.Context, cfg T, runTimeOpts cli.RunTimeOpts) (types.ConnectorServer, error) { + l := ctxzap.Extract(ctx) + connector, builderOpts, err := cf(ctx, cfg, &cli.ConnectorOpts{TokenSource: runTimeOpts.TokenSource}) + if err != nil { + return nil, err + } + + builderOpts = append(builderOpts, connectorbuilder.WithSessionStore(runTimeOpts.SessionStore)) + + c, err := connectorbuilder.NewConnector(ctx, connector, builderOpts...) + if err != nil { + l.Error("error creating connector", zap.Error(err)) + return nil, err + } + return c, nil + } + + _, cmd, err := DefineConfigurationV2(ctx, connectorName, f, schema, options...) + if err != nil { + fmt.Fprintln(os.Stderr, err.Error()) + os.Exit(1) + return + } + + cmd.Version = version + + err = cmd.Execute() + if err != nil { + fmt.Fprintln(os.Stderr, err.Error()) + os.Exit(1) + } +} + +var ErrDuplicateField = errors.New("multiple fields with the same name") + +// GetConnectorFunc is a function type that creates a connector instance. +// It takes a context and configuration. The session cache constructor is retrieved from the context. +// deprecated - prefer RunConnector. func DefineConfiguration[T field.Configurable]( ctx context.Context, connectorName string, connector cli.GetConnectorFunc[T], schema field.Configuration, options ...connectorrunner.Option, +) (*viper.Viper, *cobra.Command, error) { + f := func(ctx context.Context, cfg T, runTimeOpts cli.RunTimeOpts) (types.ConnectorServer, error) { + connector, err := connector(ctx, cfg) + if err != nil { + return nil, err + } + return connector, nil + } + return DefineConfigurationV2(ctx, connectorName, f, schema, options...) +} + +// deprecated - prefer RunConnector. +func DefineConfigurationV2[T field.Configurable]( + ctx context.Context, + connectorName string, + connector cli.GetConnectorFunc2[T], + schema field.Configuration, + options ...connectorrunner.Option, ) (*viper.Viper, *cobra.Command, error) { if err := verifyStructFields[T](schema); err != nil { return nil, nil, fmt.Errorf("VerifyStructFields failed: %w", err) } - v := viper.New() v.SetConfigType("yaml") @@ -46,23 +113,48 @@ func DefineConfiguration[T field.Configurable]( v.SetEnvKeyReplacer(strings.NewReplacer("-", "_")) v.AutomaticEnv() + defaultFieldsByName := make(map[string]field.SchemaField) + for _, f := range field.DefaultFields { + if _, ok := defaultFieldsByName[f.FieldName]; ok { + return nil, nil, fmt.Errorf("multiple default fields with the same name: %s", f.FieldName) + } + defaultFieldsByName[f.FieldName] = f + } + confschema := schema confschema.Fields = append(field.DefaultFields, confschema.Fields...) // Ensure unique fields uniqueFields := make(map[string]field.SchemaField) + fieldsToDelete := make(map[string]bool) for _, f := range confschema.Fields { - if s, ok := uniqueFields[f.FieldName]; ok { - if !f.WasReExported && !s.WasReExported { - return nil, nil, fmt.Errorf("multiple fields with the same name: %s.If you want to use a default field in the SDK, use ExportAs on the connector schema field", f.FieldName) + if existingField, ok := uniqueFields[f.FieldName]; ok { + // If the duplicate field is not a default field, error. + if _, ok := defaultFieldsByName[f.FieldName]; !ok { + return nil, nil, fmt.Errorf("%w: %s", ErrDuplicateField, f.FieldName) } + // If redeclaring a default field and not reexporting it, error. + if !f.WasReExported { + return nil, nil, fmt.Errorf("%w: %s. If you want to use a default field in the SDK, use ExportAs on the connector schema field", ErrDuplicateField, f.FieldName) + } + if existingField.WasReExported { + return nil, nil, fmt.Errorf("%w: %s. If you want to use a default field in the SDK, use ExportAs on the connector schema field", ErrDuplicateField, f.FieldName) + } + + fieldsToDelete[existingField.FieldName] = true } uniqueFields[f.FieldName] = f } - confschema.Fields = make([]field.SchemaField, 0, len(uniqueFields)) - for _, f := range uniqueFields { - confschema.Fields = append(confschema.Fields, f) + + // Filter out fields that were not reexported and were in the fieldsToDelete list. + fields := make([]field.SchemaField, 0, len(confschema.Fields)) + for _, f := range confschema.Fields { + if !f.WasReExported && fieldsToDelete[f.FieldName] { + continue + } + fields = append(fields, f) } + confschema.Fields = fields // setup CLI with cobra mainCMD := &cobra.Command{ @@ -78,7 +170,14 @@ func DefineConfiguration[T field.Configurable]( relationships = append(relationships, field.DefaultRelationships...) relationships = append(relationships, confschema.Constraints...) - err = cli.SetFlagsAndConstraints(mainCMD, field.NewConfiguration(confschema.Fields, field.WithConstraints(relationships...))) + err = cli.SetFlagsAndConstraints( + mainCMD, + field.NewConfiguration( + confschema.Fields, + field.WithConstraints(relationships...), + field.WithFieldGroups(confschema.FieldGroups), + ), + ) if err != nil { return nil, nil, err } @@ -86,7 +185,12 @@ func DefineConfiguration[T field.Configurable]( mainCMD.AddCommand(cli.AdditionalCommands(connectorName, confschema.Fields)...) cli.VisitFlags(mainCMD, v) - err = cli.OptionallyAddLambdaCommand(ctx, connectorName, v, connector, confschema, mainCMD) + sessionStoreEnabled, err := connectorrunner.IsSessionStoreEnabled(ctx, options...) + if err != nil { + return nil, nil, err + } + + err = cli.OptionallyAddLambdaCommand(ctx, connectorName, v, connector, confschema, mainCMD, sessionStoreEnabled) if err != nil { return nil, nil, err diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/config/generate.go b/vendor/github.com/conductorone/baton-sdk/pkg/config/generate.go index 25498beb..8d9b0709 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/config/generate.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/config/generate.go @@ -27,6 +27,18 @@ func Generate(name string, schema field.Configuration) { if len(schema.Fields) == 0 { panic("schema must contain at least one field") } + + defaultGroupCount := 0 + for _, group := range schema.FieldGroups { + if group.Default { + defaultGroupCount++ + } + } + + if defaultGroupCount > 1 { + panic("schema must not contain more than one default field group") + } + confschema := schema confschema.Fields = append(confschema.Fields, field.DefaultFields...) // Ensure unique fields @@ -68,7 +80,11 @@ func Generate(name string, schema field.Configuration) { } switch f.Variant { case field.StringVariant: - nf.FieldType = "string" + if f.ConnectorConfig.FieldType == field.FileUpload { + nf.FieldType = "[]byte" + } else { + nf.FieldType = "string" + } case field.BoolVariant: nf.FieldType = "bool" case field.IntVariant: @@ -108,7 +124,7 @@ type {{ .StructName }} struct { {{- end }} } -func (c* {{ .StructName }}) findFieldByTag(tagValue string) (any, bool) { +func (c *{{ .StructName }}) findFieldByTag(tagValue string) (any, bool) { v := reflect.ValueOf(c).Elem() // Dereference pointer to struct t := v.Type() @@ -140,11 +156,13 @@ func (c *{{ .StructName }}) GetString(fieldName string) string { if !ok { return "" } - t, ok := v.(string) - if !ok { - panic("wrong type") + if t, ok := v.(string); ok { + return t } - return t + if t, ok := v.([]byte); ok { + return string(t) + } + panic("wrong type") } func (c *{{ .StructName }}) GetInt(fieldName string) int { diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/connectorbuilder/accounts.go b/vendor/github.com/conductorone/baton-sdk/pkg/connectorbuilder/accounts.go new file mode 100644 index 00000000..e9c59e63 --- /dev/null +++ b/vendor/github.com/conductorone/baton-sdk/pkg/connectorbuilder/accounts.go @@ -0,0 +1,136 @@ +package connectorbuilder + +import ( + "context" + "fmt" + + v2 "github.com/conductorone/baton-sdk/pb/c1/connector/v2" + "github.com/conductorone/baton-sdk/pkg/annotations" + "github.com/conductorone/baton-sdk/pkg/crypto" + "github.com/conductorone/baton-sdk/pkg/types/tasks" + "github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/ctxzap" + "go.uber.org/zap" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" +) + +// CreateAccountResponse is a semi-opaque type returned from CreateAccount operations. +// +// This is used to communicate the result of account creation back to Baton. +type CreateAccountResponse interface { + proto.Message + GetIsCreateAccountResult() bool +} + +// AccountManager extends ResourceSyncer to add capabilities for managing user accounts. +// +// Implementing this interface indicates the connector supports creating accounts +// in the external system. A resource type should implement this interface if it +// represents users or accounts that can be provisioned. +type AccountManager interface { + ResourceSyncer + AccountManagerLimited +} + +type AccountManagerV2 interface { + ResourceSyncerV2 + AccountManagerLimited +} + +type AccountManagerLimited interface { + CreateAccount(ctx context.Context, + accountInfo *v2.AccountInfo, + credentialOptions *v2.LocalCredentialOptions) (CreateAccountResponse, []*v2.PlaintextData, annotations.Annotations, error) + CreateAccountCapabilityDetails(ctx context.Context) (*v2.CredentialDetailsAccountProvisioning, annotations.Annotations, error) +} + +type OldAccountManager interface { + ResourceSyncer + CreateAccount(ctx context.Context, + accountInfo *v2.AccountInfo, + credentialOptions *v2.CredentialOptions) (CreateAccountResponse, []*v2.PlaintextData, annotations.Annotations, error) +} + +func (b *builder) CreateAccount(ctx context.Context, request *v2.CreateAccountRequest) (*v2.CreateAccountResponse, error) { + ctx, span := tracer.Start(ctx, "builder.CreateAccount") + defer span.End() + + start := b.nowFunc() + tt := tasks.CreateAccountType + l := ctxzap.Extract(ctx) + if b.accountManager == nil { + l.Error("error: connector does not have account manager configured") + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) + return nil, status.Error(codes.Unimplemented, "connector does not have account manager configured") + } + + opts, err := crypto.ConvertCredentialOptions(ctx, b.clientSecret, request.GetCredentialOptions(), request.GetEncryptionConfigs()) + if err != nil { + l.Error("error: converting credential options failed", zap.Error(err)) + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) + return nil, fmt.Errorf("error: converting credential options failed: %w", err) + } + + result, plaintexts, annos, err := b.accountManager.CreateAccount(ctx, request.GetAccountInfo(), opts) + if err != nil { + l.Error("error: create account failed", zap.Error(err)) + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) + return nil, fmt.Errorf("error: create account failed: %w", err) + } + + pkem, err := crypto.NewEncryptionManager(request.GetCredentialOptions(), request.GetEncryptionConfigs()) + if err != nil { + l.Error("error: creating encryption manager failed", zap.Error(err)) + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) + return nil, fmt.Errorf("error: creating encryption manager failed: %w", err) + } + + var encryptedDatas []*v2.EncryptedData + for _, plaintextCredential := range plaintexts { + encryptedData, err := pkem.Encrypt(ctx, plaintextCredential) + if err != nil { + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) + return nil, err + } + encryptedDatas = append(encryptedDatas, encryptedData...) + } + + rv := v2.CreateAccountResponse_builder{ + EncryptedData: encryptedDatas, + Annotations: annos, + }.Build() + + switch r := result.(type) { + case *v2.CreateAccountResponse_SuccessResult: + rv.SetSuccess(proto.ValueOrDefault(r)) + case *v2.CreateAccountResponse_ActionRequiredResult: + rv.SetActionRequired(proto.ValueOrDefault(r)) + case *v2.CreateAccountResponse_AlreadyExistsResult: + rv.SetAlreadyExists(proto.ValueOrDefault(r)) + case *v2.CreateAccountResponse_InProgressResult: + rv.SetInProgress(proto.ValueOrDefault(r)) + default: + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) + return nil, status.Error(codes.Unimplemented, fmt.Sprintf("unknown result type: %T", result)) + } + + b.m.RecordTaskSuccess(ctx, tt, b.nowFunc().Sub(start)) + return rv, nil +} + +func (b *builder) addAccountManager(_ context.Context, typeId string, in interface{}) error { + if _, ok := in.(OldAccountManager); ok { + return fmt.Errorf("error: old account manager interface implemented for %s", typeId) + } + + if accountManager, ok := in.(AccountManagerLimited); ok { + // NOTE(kans): currently unused - but these should probably be (resource) typed + b.accountManagers[typeId] = accountManager + if b.accountManager != nil { + return fmt.Errorf("error: duplicate resource type found for account manager %s", typeId) + } + b.accountManager = accountManager + } + return nil +} diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/connectorbuilder/actions.go b/vendor/github.com/conductorone/baton-sdk/pkg/connectorbuilder/actions.go new file mode 100644 index 00000000..0edab281 --- /dev/null +++ b/vendor/github.com/conductorone/baton-sdk/pkg/connectorbuilder/actions.go @@ -0,0 +1,241 @@ +package connectorbuilder + +import ( + "context" + "fmt" + + v2 "github.com/conductorone/baton-sdk/pb/c1/connector/v2" + "github.com/conductorone/baton-sdk/pkg/actions" + "github.com/conductorone/baton-sdk/pkg/annotations" + "github.com/conductorone/baton-sdk/pkg/types/tasks" + "google.golang.org/protobuf/types/known/structpb" +) + +// ActionManager defines the interface for managing actions in the connector builder. +// This is the internal interface used by the builder for dispatch. +// The *actions.ActionManager type implements this interface. +type ActionManager interface { + // ListActionSchemas returns all action schemas, optionally filtered by resource type. + // If resourceTypeID is empty, returns all actions (both global and resource-scoped). + // If resourceTypeID is set, returns only actions for that resource type. + ListActionSchemas(ctx context.Context, resourceTypeID string) ([]*v2.BatonActionSchema, annotations.Annotations, error) + + // GetActionSchema returns the schema for a specific action by name. + GetActionSchema(ctx context.Context, name string) (*v2.BatonActionSchema, annotations.Annotations, error) + + // InvokeAction invokes an action. If resourceTypeID is set, invokes a resource-scoped action. + InvokeAction( + ctx context.Context, + name string, + resourceTypeID string, + args *structpb.Struct, + ) (string, v2.BatonActionStatus, *structpb.Struct, annotations.Annotations, error) + + // GetActionStatus returns the status of an outstanding action. + GetActionStatus(ctx context.Context, id string) (v2.BatonActionStatus, string, *structpb.Struct, annotations.Annotations, error) + + // GetTypeRegistry returns a registry for registering resource-scoped actions. + GetTypeRegistry(ctx context.Context, resourceTypeID string) (actions.ActionRegistry, error) + + // HasActions returns true if there are any registered actions. + HasActions() bool +} + +// GlobalActionProvider allows connectors to register global (non-resource-scoped) actions. +// This is the preferred method for registering global actions in new connectors. +// Implement this interface instead of the deprecated CustomActionManager or RegisterActionManagerLimited. +type GlobalActionProvider interface { + GlobalActions(ctx context.Context, registry actions.ActionRegistry) error +} + +// ResourceActionProvider is an interface that resource builders can implement +// to provide resource-scoped actions for their resource type. +type ResourceActionProvider interface { + // ResourceActions returns the schemas and handlers for all resource actions + // supported by this resource type. + ResourceActions(ctx context.Context, registry actions.ActionRegistry) error +} + +// Deprecated: CustomActionManager is deprecated. Implement GlobalActionProvider instead, +// which registers actions directly into the SDK's ActionManager. +// +// This interface allows connectors to define and execute custom actions +// that can be triggered from Baton. It supports both global actions and +// resource-scoped actions through the resourceTypeID parameter. +type CustomActionManager interface { + // ListActionSchemas returns all action schemas, optionally filtered by resource type. + // If resourceTypeID is empty, returns all actions (both global and resource-scoped). + // If resourceTypeID is set, returns only actions for that resource type. + ListActionSchemas(ctx context.Context, resourceTypeID string) ([]*v2.BatonActionSchema, annotations.Annotations, error) + + // GetActionSchema returns the schema for a specific action by name. + GetActionSchema(ctx context.Context, name string) (*v2.BatonActionSchema, annotations.Annotations, error) + + // InvokeAction invokes an action. If resourceTypeID is set, invokes a resource-scoped action. + InvokeAction( + ctx context.Context, + name string, + resourceTypeID string, + args *structpb.Struct, + ) (string, v2.BatonActionStatus, *structpb.Struct, annotations.Annotations, error) + + // GetActionStatus returns the status of an outstanding action. + GetActionStatus(ctx context.Context, id string) (v2.BatonActionStatus, string, *structpb.Struct, annotations.Annotations, error) +} + +// Deprecated: RegisterActionManager is deprecated. Implement GlobalActionProvider instead. +// +// RegisterActionManager extends ConnectorBuilder to add capabilities for registering custom actions. +// It provides a mechanism to register a CustomActionManager with the connector. +type RegisterActionManager interface { + ConnectorBuilder + RegisterActionManagerLimited +} + +// Deprecated: RegisterActionManagerLimited is deprecated. Implement GlobalActionProvider instead. +type RegisterActionManagerLimited interface { + RegisterActionManager(ctx context.Context) (CustomActionManager, error) +} + +func (b *builder) ListActionSchemas(ctx context.Context, request *v2.ListActionSchemasRequest) (*v2.ListActionSchemasResponse, error) { + ctx, span := tracer.Start(ctx, "builder.ListActionSchemas") + defer span.End() + + start := b.nowFunc() + tt := tasks.ActionListSchemasType + + resourceTypeID := request.GetResourceTypeId() + + actionSchemas, _, err := b.actionManager.ListActionSchemas(ctx, resourceTypeID) + if err != nil { + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) + return nil, fmt.Errorf("error: listing action schemas failed: %w", err) + } + + rv := v2.ListActionSchemasResponse_builder{ + Schemas: actionSchemas, + }.Build() + + b.m.RecordTaskSuccess(ctx, tt, b.nowFunc().Sub(start)) + return rv, nil +} + +func (b *builder) GetActionSchema(ctx context.Context, request *v2.GetActionSchemaRequest) (*v2.GetActionSchemaResponse, error) { + ctx, span := tracer.Start(ctx, "builder.GetActionSchema") + defer span.End() + + start := b.nowFunc() + tt := tasks.ActionGetSchemaType + + actionSchema, annos, err := b.actionManager.GetActionSchema(ctx, request.GetName()) + if err != nil { + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) + return nil, fmt.Errorf("error: action schema %s not found: %w", request.GetName(), err) + } + + rv := v2.GetActionSchemaResponse_builder{ + Schema: actionSchema, + Annotations: annos, + }.Build() + b.m.RecordTaskSuccess(ctx, tt, b.nowFunc().Sub(start)) + return rv, nil +} + +func (b *builder) InvokeAction(ctx context.Context, request *v2.InvokeActionRequest) (*v2.InvokeActionResponse, error) { + ctx, span := tracer.Start(ctx, "builder.InvokeAction") + defer span.End() + + start := b.nowFunc() + tt := tasks.ActionInvokeType + + resourceTypeID := request.GetResourceTypeId() + + id, actionStatus, resp, annos, err := b.actionManager.InvokeAction(ctx, request.GetName(), resourceTypeID, request.GetArgs()) + if err != nil { + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) + return nil, fmt.Errorf("error: invoking action failed: %w", err) + } + + rv := v2.InvokeActionResponse_builder{ + Id: id, + Name: request.GetName(), + Status: actionStatus, + Annotations: annos, + Response: resp, + }.Build() + + b.m.RecordTaskSuccess(ctx, tt, b.nowFunc().Sub(start)) + return rv, nil +} + +func (b *builder) GetActionStatus(ctx context.Context, request *v2.GetActionStatusRequest) (*v2.GetActionStatusResponse, error) { + ctx, span := tracer.Start(ctx, "builder.GetActionStatus") + defer span.End() + + start := b.nowFunc() + tt := tasks.ActionStatusType + + actionStatus, name, rv, annos, err := b.actionManager.GetActionStatus(ctx, request.GetId()) + if err != nil { + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) + return nil, fmt.Errorf("error: action status for id %s not found: %w", request.GetId(), err) + } + + resp := v2.GetActionStatusResponse_builder{ + Id: request.GetId(), + Name: name, + Status: actionStatus, + Annotations: annos, + Response: rv, + }.Build() + b.m.RecordTaskSuccess(ctx, tt, b.nowFunc().Sub(start)) + return resp, nil +} + +// registerLegacyAction wraps a legacy CustomActionManager action as an ActionHandler and registers it. +func registerLegacyAction(ctx context.Context, registry actions.ActionRegistry, schema *v2.BatonActionSchema, legacyManager CustomActionManager) error { + handler := func(ctx context.Context, args *structpb.Struct) (*structpb.Struct, annotations.Annotations, error) { + _, _, resp, annos, err := legacyManager.InvokeAction(ctx, schema.GetName(), "", args) + return resp, annos, err + } + return registry.Register(ctx, schema, handler) +} + +// addActionManager handles deprecated CustomActionManager and RegisterActionManagerLimited interfaces +// by extracting their actions and registering them into the unified ActionManager. +func (b *builder) addActionManager(ctx context.Context, in interface{}, registry actions.ActionRegistry) error { + // Handle deprecated CustomActionManager - extract and re-register actions + if customManager, ok := in.(CustomActionManager); ok { + schemas, _, err := customManager.ListActionSchemas(ctx, "") + if err != nil { + return fmt.Errorf("error listing schemas from custom action manager: %w", err) + } + for _, schema := range schemas { + if err := registerLegacyAction(ctx, registry, schema, customManager); err != nil { + return fmt.Errorf("error registering legacy action %s: %w", schema.GetName(), err) + } + } + return nil + } + + // Handle deprecated RegisterActionManagerLimited + if registerManager, ok := in.(RegisterActionManagerLimited); ok { + customManager, err := registerManager.RegisterActionManager(ctx) + if err != nil { + return fmt.Errorf("error registering action manager: %w", err) + } + if customManager == nil { + return nil // No action manager provided + } + schemas, _, err := customManager.ListActionSchemas(ctx, "") + if err != nil { + return fmt.Errorf("error listing schemas from custom action manager: %w", err) + } + for _, schema := range schemas { + if err := registerLegacyAction(ctx, registry, schema, customManager); err != nil { + return fmt.Errorf("error registering legacy action %s: %w", schema.GetName(), err) + } + } + } + return nil +} diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/connectorbuilder/assets.go b/vendor/github.com/conductorone/baton-sdk/pkg/connectorbuilder/assets.go index f6431512..d0e551c6 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/connectorbuilder/assets.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/connectorbuilder/assets.go @@ -1,5 +1,7 @@ package connectorbuilder +import v2 "github.com/conductorone/baton-sdk/pb/c1/connector/v2" + // FIXME(jirwin): Come back to streaming assets soon. // // const ( @@ -40,3 +42,12 @@ package connectorbuilder // } // return nil // } + +// GetAsset streams the asset to the client. +// FIXME(jirwin): Asset streaming is disabled. +func (b *builder) GetAsset(request *v2.AssetServiceGetAssetRequest, server v2.AssetService_GetAssetServer) error { + _, span := tracer.Start(server.Context(), "builderImpl.GetAsset") + defer span.End() + + return nil +} diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/connectorbuilder/connectorbuilder.go b/vendor/github.com/conductorone/baton-sdk/pkg/connectorbuilder/connectorbuilder.go index 231b40cd..40df7dfd 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/connectorbuilder/connectorbuilder.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/connectorbuilder/connectorbuilder.go @@ -14,1449 +14,319 @@ import ( "go.uber.org/zap" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" - "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/types/known/structpb" - "google.golang.org/protobuf/types/known/timestamppb" v2 "github.com/conductorone/baton-sdk/pb/c1/connector/v2" + "github.com/conductorone/baton-sdk/pkg/actions" "github.com/conductorone/baton-sdk/pkg/annotations" "github.com/conductorone/baton-sdk/pkg/crypto" "github.com/conductorone/baton-sdk/pkg/metrics" - "github.com/conductorone/baton-sdk/pkg/pagination" "github.com/conductorone/baton-sdk/pkg/retry" - "github.com/conductorone/baton-sdk/pkg/session" + "github.com/conductorone/baton-sdk/pkg/sdk" "github.com/conductorone/baton-sdk/pkg/types" + "github.com/conductorone/baton-sdk/pkg/types/sessions" "github.com/conductorone/baton-sdk/pkg/types/tasks" "github.com/conductorone/baton-sdk/pkg/uhttp" ) var tracer = otel.Tracer("baton-sdk/pkg.connectorbuilder") -// ResourceSyncer is the primary interface for connector developers to implement. -// -// It defines the core functionality for synchronizing resources, entitlements, and grants -// from external systems into Baton. Every connector must implement at least this interface -// for each resource type it supports. -// -// Extensions to this interface include: -// - ResourceProvisioner/ResourceProvisionerV2: For adding/removing access -// - ResourceManager: For creating and managing resources -// - ResourceDeleter: For deleting resources -// - AccountManager: For account provisioning operations -// - CredentialManager: For credential rotation operations. -// - ResourceTargetedSyncer: For directly getting a resource supporting targeted sync. -type ResourceSyncer interface { - ResourceType(ctx context.Context) *v2.ResourceType - List(ctx context.Context, parentResourceID *v2.ResourceId, pToken *pagination.Token) ([]*v2.Resource, string, annotations.Annotations, error) - Entitlements(ctx context.Context, resource *v2.Resource, pToken *pagination.Token) ([]*v2.Entitlement, string, annotations.Annotations, error) - Grants(ctx context.Context, resource *v2.Resource, pToken *pagination.Token) ([]*v2.Grant, string, annotations.Annotations, error) -} - -// ResourceProvisioner extends ResourceSyncer to add capabilities for granting and revoking access. -// -// Note: ResourceProvisionerV2 is preferred for new connectors as it provides -// enhanced grant capabilities. -// -// Implementing this interface indicates the connector supports provisioning operations -// for the associated resource type. -type ResourceProvisioner interface { - ResourceSyncer - ResourceType(ctx context.Context) *v2.ResourceType - Grant(ctx context.Context, resource *v2.Resource, entitlement *v2.Entitlement) (annotations.Annotations, error) - Revoke(ctx context.Context, grant *v2.Grant) (annotations.Annotations, error) -} - -// ResourceProvisionerV2 extends ResourceSyncer to add capabilities for granting and revoking access -// with enhanced functionality compared to ResourceProvisioner. -// -// This is the recommended interface for implementing provisioning operations in new connectors. -// It differs from ResourceProvisioner by returning a list of grants from the Grant method. -type ResourceProvisionerV2 interface { - ResourceSyncer - ResourceType(ctx context.Context) *v2.ResourceType - Grant(ctx context.Context, resource *v2.Resource, entitlement *v2.Entitlement) ([]*v2.Grant, annotations.Annotations, error) - Revoke(ctx context.Context, grant *v2.Grant) (annotations.Annotations, error) -} - -// ResourceManager extends ResourceSyncer to add capabilities for creating resources. -// -// Implementing this interface indicates the connector supports creating and deleting resources -// of the associated resource type. A ResourceManager automatically provides ResourceDeleter -// functionality. -type ResourceManager interface { - ResourceSyncer - Create(ctx context.Context, resource *v2.Resource) (*v2.Resource, annotations.Annotations, error) - ResourceDeleter -} - -// ResourceManagerV2 extends ResourceSyncer to add capabilities for creating resources. -// -// This is the recommended interface for implementing resource creation operations in new connectors. -type ResourceManagerV2 interface { - ResourceSyncer - Create(ctx context.Context, resource *v2.Resource) (*v2.Resource, annotations.Annotations, error) - ResourceDeleterV2 -} - -// ResourceDeleter extends ResourceSyncer to add capabilities for deleting resources. -// -// Implementing this interface indicates the connector supports deleting resources -// of the associated resource type. -type ResourceDeleter interface { - ResourceSyncer - Delete(ctx context.Context, resourceId *v2.ResourceId) (annotations.Annotations, error) -} - -// ResourceDeleterV2 extends ResourceSyncer to add capabilities for deleting resources. -// -// This is the recommended interface for implementing resource deletion operations in new connectors. -// It differs from ResourceDeleter by having the resource, not just the id. -type ResourceDeleterV2 interface { - ResourceSyncer - Delete(ctx context.Context, resourceId *v2.ResourceId, parentResourceID *v2.ResourceId) (annotations.Annotations, error) -} - -// ResourceTargetedSyncer extends ResourceSyncer to add capabilities for directly syncing an individual resource -// -// Implementing this interface indicates the connector supports calling "get" on a resource -// of the associated resource type. -type ResourceTargetedSyncer interface { - ResourceSyncer - Get(ctx context.Context, resourceId *v2.ResourceId, parentResourceId *v2.ResourceId) (*v2.Resource, annotations.Annotations, error) -} - -// CreateAccountResponse is a semi-opaque type returned from CreateAccount operations. -// -// This is used to communicate the result of account creation back to Baton. -type CreateAccountResponse interface { - proto.Message - GetIsCreateAccountResult() bool -} - -// AccountManager extends ResourceSyncer to add capabilities for managing user accounts. -// -// Implementing this interface indicates the connector supports creating accounts -// in the external system. A resource type should implement this interface if it -// represents users or accounts that can be provisioned. -type AccountManager interface { - ResourceSyncer - CreateAccount(ctx context.Context, - accountInfo *v2.AccountInfo, - credentialOptions *v2.LocalCredentialOptions) (CreateAccountResponse, []*v2.PlaintextData, annotations.Annotations, error) - CreateAccountCapabilityDetails(ctx context.Context) (*v2.CredentialDetailsAccountProvisioning, annotations.Annotations, error) -} - -type OldAccountManager interface { - CreateAccount(ctx context.Context, - accountInfo *v2.AccountInfo, - credentialOptions *v2.CredentialOptions) (CreateAccountResponse, []*v2.PlaintextData, annotations.Annotations, error) -} - -// CredentialManager extends ResourceSyncer to add capabilities for managing credentials. -// -// Implementing this interface indicates the connector supports rotating credentials -// for resources of the associated type. This is commonly used for user accounts -// or service accounts that have rotatable credentials. -type CredentialManager interface { - ResourceSyncer - Rotate(ctx context.Context, - resourceId *v2.ResourceId, - credentialOptions *v2.LocalCredentialOptions) ([]*v2.PlaintextData, annotations.Annotations, error) - RotateCapabilityDetails(ctx context.Context) (*v2.CredentialDetailsCredentialRotation, annotations.Annotations, error) -} - -type OldCredentialManager interface { - Rotate(ctx context.Context, - resourceId *v2.ResourceId, - credentialOptions *v2.CredentialOptions) ([]*v2.PlaintextData, annotations.Annotations, error) -} - -// Compatibility interface lets us handle both EventFeed and EventProvider the same. -type EventLister interface { - ListEvents(ctx context.Context, earliestEvent *timestamppb.Timestamp, pToken *pagination.StreamToken) ([]*v2.Event, *pagination.StreamState, annotations.Annotations, error) -} - -// Deprecated: This interface is deprecated in favor of EventProviderV2 which supports -// multiple event feeds. Implementing this interface indicates the connector can provide -// a single stream of events from the external system, enabling near real-time updates -// in Baton. New connectors should implement EventProviderV2 instead. -type EventProvider interface { - ConnectorBuilder - EventLister -} - -// NewEventProviderV2 is a new interface that allows connectors to provide multiple event feeds. -// -// This is the recommended interface for implementing event feed support in new connectors. -type EventProviderV2 interface { - ConnectorBuilder - EventFeeds(ctx context.Context) []EventFeed -} - -// EventFeed is a single stream of events from the external system. -// -// EventFeedMetadata describes this feed, and a connector can have multiple feeds. -type EventFeed interface { - EventLister - EventFeedMetadata(ctx context.Context) *v2.EventFeedMetadata -} - -type oldEventFeedWrapper struct { - feed EventLister -} - -const ( - LegacyBatonFeedId = "baton_feed_event" -) - -func (e *oldEventFeedWrapper) EventFeedMetadata(ctx context.Context) *v2.EventFeedMetadata { - return &v2.EventFeedMetadata{ - Id: LegacyBatonFeedId, - SupportedEventTypes: []v2.EventType{v2.EventType_EVENT_TYPE_UNSPECIFIED}, - } -} - -func (e *oldEventFeedWrapper) ListEvents( - ctx context.Context, - earliestEvent *timestamppb.Timestamp, - pToken *pagination.StreamToken, -) ([]*v2.Event, *pagination.StreamState, annotations.Annotations, error) { - return e.feed.ListEvents(ctx, earliestEvent, pToken) -} - -// TicketManager extends ConnectorBuilder to add capabilities for ticket management. -// -// Implementing this interface indicates the connector can integrate with an external -// ticketing system, allowing Baton to create and track tickets in that system. -type TicketManager interface { - ConnectorBuilder - GetTicket(ctx context.Context, ticketId string) (*v2.Ticket, annotations.Annotations, error) - CreateTicket(ctx context.Context, ticket *v2.Ticket, schema *v2.TicketSchema) (*v2.Ticket, annotations.Annotations, error) - GetTicketSchema(ctx context.Context, schemaID string) (*v2.TicketSchema, annotations.Annotations, error) - ListTicketSchemas(ctx context.Context, pToken *pagination.Token) ([]*v2.TicketSchema, string, annotations.Annotations, error) - BulkCreateTickets(context.Context, *v2.TicketsServiceBulkCreateTicketsRequest) (*v2.TicketsServiceBulkCreateTicketsResponse, error) - BulkGetTickets(context.Context, *v2.TicketsServiceBulkGetTicketsRequest) (*v2.TicketsServiceBulkGetTicketsResponse, error) -} - -// CustomActionManager defines capabilities for handling custom actions. -// -// Note: RegisterActionManager is preferred for new connectors. -// -// This interface allows connectors to define and execute custom actions -// that can be triggered from Baton. -type CustomActionManager interface { - ListActionSchemas(ctx context.Context) ([]*v2.BatonActionSchema, annotations.Annotations, error) - GetActionSchema(ctx context.Context, name string) (*v2.BatonActionSchema, annotations.Annotations, error) - InvokeAction(ctx context.Context, name string, args *structpb.Struct) (string, v2.BatonActionStatus, *structpb.Struct, annotations.Annotations, error) - GetActionStatus(ctx context.Context, id string) (v2.BatonActionStatus, string, *structpb.Struct, annotations.Annotations, error) -} - -// RegisterActionManager extends ConnectorBuilder to add capabilities for registering custom actions. -// -// This is the recommended interface for implementing custom action support in new connectors. -// It provides a mechanism to register a CustomActionManager with the connector. -type RegisterActionManager interface { - ConnectorBuilder - RegisterActionManager(ctx context.Context) (CustomActionManager, error) -} - // ConnectorBuilder is the foundational interface for creating Baton connectors. // // This interface defines the core capabilities required by all connectors, including // metadata, validation, and registering resource syncers. Additional functionality -// can be added by implementing extension interfaces such as: -// - RegisterActionManager: For custom action support -// - EventProvider: For event stream support -// - TicketManager: For ticket management integration. -type ConnectorBuilder interface { - Metadata(ctx context.Context) (*v2.ConnectorMetadata, error) - Validate(ctx context.Context) (annotations.Annotations, error) - ResourceSyncers(ctx context.Context) []ResourceSyncer -} - -type builderImpl struct { - resourceBuilders map[string]ResourceSyncer - resourceProvisioners map[string]ResourceProvisioner - resourceProvisionersV2 map[string]ResourceProvisionerV2 - resourceManagers map[string]ResourceManager - resourceManagersV2 map[string]ResourceManagerV2 - resourceDeleters map[string]ResourceDeleter - resourceDeletersV2 map[string]ResourceDeleterV2 - resourceTargetedSyncers map[string]ResourceTargetedSyncer - accountManager AccountManager - actionManager CustomActionManager - credentialManagers map[string]CredentialManager - eventFeeds map[string]EventFeed - cb ConnectorBuilder - ticketManager TicketManager - ticketingEnabled bool - m *metrics.M - nowFunc func() time.Time - clientSecret *jose.JSONWebKey -} - -func (b *builderImpl) BulkCreateTickets(ctx context.Context, request *v2.TicketsServiceBulkCreateTicketsRequest) (*v2.TicketsServiceBulkCreateTicketsResponse, error) { - ctx, span := tracer.Start(ctx, "builderImpl.BulkCreateTickets") - defer span.End() - - start := b.nowFunc() - tt := tasks.BulkCreateTicketsType - if b.ticketManager == nil { - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) - return nil, fmt.Errorf("error: ticket manager not implemented") - } - - reqBody := request.GetTicketRequests() - if len(reqBody) == 0 { - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) - return nil, fmt.Errorf("error: request body had no items") - } - - ticketsResponse, err := b.ticketManager.BulkCreateTickets(ctx, request) - if err != nil { - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) - return nil, fmt.Errorf("error: creating tickets failed: %w", err) - } - - b.m.RecordTaskSuccess(ctx, tt, b.nowFunc().Sub(start)) - return &v2.TicketsServiceBulkCreateTicketsResponse{ - Tickets: ticketsResponse.GetTickets(), - }, nil -} - -func (b *builderImpl) BulkGetTickets(ctx context.Context, request *v2.TicketsServiceBulkGetTicketsRequest) (*v2.TicketsServiceBulkGetTicketsResponse, error) { - ctx, span := tracer.Start(ctx, "builderImpl.BulkGetTickets") - defer span.End() - - start := b.nowFunc() - tt := tasks.BulkGetTicketsType - if b.ticketManager == nil { - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) - return nil, fmt.Errorf("error: ticket manager not implemented") - } - - reqBody := request.GetTicketRequests() - if len(reqBody) == 0 { - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) - return nil, fmt.Errorf("error: request body had no items") - } - - ticketsResponse, err := b.ticketManager.BulkGetTickets(ctx, request) - if err != nil { - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) - return nil, fmt.Errorf("error: fetching tickets failed: %w", err) - } - - b.m.RecordTaskSuccess(ctx, tt, b.nowFunc().Sub(start)) - return &v2.TicketsServiceBulkGetTicketsResponse{ - Tickets: ticketsResponse.GetTickets(), - }, nil -} - -func (b *builderImpl) ListTicketSchemas(ctx context.Context, request *v2.TicketsServiceListTicketSchemasRequest) (*v2.TicketsServiceListTicketSchemasResponse, error) { - ctx, span := tracer.Start(ctx, "builderImpl.ListTicketSchemas") - defer span.End() - - start := b.nowFunc() - tt := tasks.ListTicketSchemasType - if b.ticketManager == nil { - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) - return nil, fmt.Errorf("error: ticket manager not implemented") - } - - retryer := retry.NewRetryer(ctx, retry.RetryConfig{ - MaxAttempts: 10, - InitialDelay: 15 * time.Second, - MaxDelay: 0, - }) - - for { - out, nextPageToken, annos, err := b.ticketManager.ListTicketSchemas(ctx, &pagination.Token{ - Size: int(request.PageSize), - Token: request.PageToken, - }) - if err == nil { - if request.PageToken != "" && request.PageToken == nextPageToken { - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) - return nil, fmt.Errorf("error: listing ticket schemas failed: next page token is the same as the current page token. this is most likely a connector bug") - } - - b.m.RecordTaskSuccess(ctx, tt, b.nowFunc().Sub(start)) - return &v2.TicketsServiceListTicketSchemasResponse{ - List: out, - NextPageToken: nextPageToken, - Annotations: annos, - }, nil - } - if retryer.ShouldWaitAndRetry(ctx, err) { - continue - } - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) - return nil, fmt.Errorf("error: listing ticket schemas failed: %w", err) - } -} - -func (b *builderImpl) CreateTicket(ctx context.Context, request *v2.TicketsServiceCreateTicketRequest) (*v2.TicketsServiceCreateTicketResponse, error) { - ctx, span := tracer.Start(ctx, "builderImpl.CreateTicket") - defer span.End() - - start := b.nowFunc() - tt := tasks.CreateTicketType - if b.ticketManager == nil { - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) - return nil, fmt.Errorf("error: ticket manager not implemented") - } - - reqBody := request.GetRequest() - if reqBody == nil { - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) - return nil, fmt.Errorf("error: request body is nil") - } - cTicket := &v2.Ticket{ - DisplayName: reqBody.GetDisplayName(), - Description: reqBody.GetDescription(), - Status: reqBody.GetStatus(), - Labels: reqBody.GetLabels(), - CustomFields: reqBody.GetCustomFields(), - RequestedFor: reqBody.GetRequestedFor(), - } - - ticket, annos, err := b.ticketManager.CreateTicket(ctx, cTicket, request.GetSchema()) - var resp *v2.TicketsServiceCreateTicketResponse - if err != nil { - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) - if ticket != nil { - resp = &v2.TicketsServiceCreateTicketResponse{ - Ticket: ticket, - Annotations: annos, - } - } - return resp, fmt.Errorf("error: creating ticket failed: %w", err) - } - - b.m.RecordTaskSuccess(ctx, tt, b.nowFunc().Sub(start)) - return &v2.TicketsServiceCreateTicketResponse{ - Ticket: ticket, - Annotations: annos, - }, nil -} - -func (b *builderImpl) GetTicket(ctx context.Context, request *v2.TicketsServiceGetTicketRequest) (*v2.TicketsServiceGetTicketResponse, error) { - ctx, span := tracer.Start(ctx, "builderImpl.GetTicket") - defer span.End() - - start := b.nowFunc() - tt := tasks.GetTicketType - if b.ticketManager == nil { - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) - return nil, fmt.Errorf("error: ticket manager not implemented") - } - - var resp *v2.TicketsServiceGetTicketResponse - ticket, annos, err := b.ticketManager.GetTicket(ctx, request.GetId()) - if err != nil { - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) - if ticket != nil { - resp = &v2.TicketsServiceGetTicketResponse{ - Ticket: ticket, - Annotations: annos, - } - } - return resp, fmt.Errorf("error: getting ticket failed: %w", err) - } - - b.m.RecordTaskSuccess(ctx, tt, b.nowFunc().Sub(start)) - return &v2.TicketsServiceGetTicketResponse{ - Ticket: ticket, - Annotations: annos, - }, nil -} - -func (b *builderImpl) GetTicketSchema(ctx context.Context, request *v2.TicketsServiceGetTicketSchemaRequest) (*v2.TicketsServiceGetTicketSchemaResponse, error) { - ctx, span := tracer.Start(ctx, "builderImpl.GetTicketSchema") - defer span.End() - - start := b.nowFunc() - tt := tasks.GetTicketSchemaType - if b.ticketManager == nil { - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) - return nil, fmt.Errorf("error: ticket manager not implemented") - } - - ticketSchema, annos, err := b.ticketManager.GetTicketSchema(ctx, request.GetId()) - if err != nil { - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) - return nil, fmt.Errorf("error: getting ticket metadata failed: %w", err) - } - - b.m.RecordTaskSuccess(ctx, tt, b.nowFunc().Sub(start)) - return &v2.TicketsServiceGetTicketSchemaResponse{ - Schema: ticketSchema, - Annotations: annos, - }, nil -} - -// NewConnector creates a new ConnectorServer for a new resource. -func NewConnector(ctx context.Context, in interface{}, opts ...Opt) (types.ConnectorServer, error) { - switch c := in.(type) { - case ConnectorBuilder: - clientSecretValue := ctx.Value(crypto.ContextClientSecretKey) - clientSecretJWK, _ := clientSecretValue.(*jose.JSONWebKey) - - ret := &builderImpl{ - resourceBuilders: make(map[string]ResourceSyncer), - resourceProvisioners: make(map[string]ResourceProvisioner), - resourceProvisionersV2: make(map[string]ResourceProvisionerV2), - resourceManagers: make(map[string]ResourceManager), - resourceManagersV2: make(map[string]ResourceManagerV2), - resourceDeleters: make(map[string]ResourceDeleter), - resourceDeletersV2: make(map[string]ResourceDeleterV2), - resourceTargetedSyncers: make(map[string]ResourceTargetedSyncer), - accountManager: nil, - actionManager: nil, - credentialManagers: make(map[string]CredentialManager), - eventFeeds: make(map[string]EventFeed), - cb: c, - ticketManager: nil, - nowFunc: time.Now, - clientSecret: clientSecretJWK, - } - - err := ret.options(opts...) - if err != nil { - return nil, err - } - - if ret.m == nil { - ret.m = metrics.New(metrics.NewNoOpHandler(ctx)) - } - - if b, ok := c.(EventProviderV2); ok { - for _, ef := range b.EventFeeds(ctx) { - feedData := ef.EventFeedMetadata(ctx) - if feedData == nil { - return nil, fmt.Errorf("error: event feed metadata is nil") - } - if err := feedData.Validate(); err != nil { - return nil, fmt.Errorf("error: event feed metadata for %s is invalid: %w", feedData.Id, err) - } - if _, ok := ret.eventFeeds[feedData.Id]; ok { - return nil, fmt.Errorf("error: duplicate event feed id found: %s", feedData.Id) - } - ret.eventFeeds[feedData.Id] = ef - } - } - - if b, ok := c.(EventProvider); ok { - // Register the legacy Baton feed as a v2 event feed - // implementing both v1 and v2 event feeds is not supported. - if len(ret.eventFeeds) != 0 { - return nil, fmt.Errorf("error: using legacy event feed is not supported when using EventProviderV2") - } - ret.eventFeeds[LegacyBatonFeedId] = &oldEventFeedWrapper{ - feed: b, - } - } - - if ticketManager, ok := c.(TicketManager); ok { - if ret.ticketManager != nil { - return nil, fmt.Errorf("error: cannot set multiple ticket managers") - } - ret.ticketManager = ticketManager - } - - if actionManager, ok := c.(CustomActionManager); ok { - if ret.actionManager != nil { - return nil, fmt.Errorf("error: cannot set multiple action managers") - } - ret.actionManager = actionManager - } - - if registerActionManager, ok := c.(RegisterActionManager); ok { - if ret.actionManager != nil { - return nil, fmt.Errorf("error: cannot register multiple action managers") - } - actionManager, err := registerActionManager.RegisterActionManager(ctx) - if err != nil { - return nil, fmt.Errorf("error: registering action manager failed: %w", err) - } - if actionManager == nil { - return nil, fmt.Errorf("error: action manager is nil") - } - ret.actionManager = actionManager - } - - for _, rb := range c.ResourceSyncers(ctx) { - rType := rb.ResourceType(ctx) - if _, ok := ret.resourceBuilders[rType.Id]; ok { - return nil, fmt.Errorf("error: duplicate resource type found for resource builder %s", rType.Id) - } - ret.resourceBuilders[rType.Id] = rb - - if err := validateProvisionerVersion(ctx, rb); err != nil { - return nil, err - } - - if provisioner, ok := rb.(ResourceProvisioner); ok { - if _, ok := ret.resourceProvisioners[rType.Id]; ok { - return nil, fmt.Errorf("error: duplicate resource type found for resource provisioner %s", rType.Id) - } - ret.resourceProvisioners[rType.Id] = provisioner - } - if provisioner, ok := rb.(ResourceProvisionerV2); ok { - if _, ok := ret.resourceProvisionersV2[rType.Id]; ok { - return nil, fmt.Errorf("error: duplicate resource type found for resource provisioner v2 %s", rType.Id) - } - ret.resourceProvisionersV2[rType.Id] = provisioner - } - if targetedSyncer, ok := rb.(ResourceTargetedSyncer); ok { - if _, ok := ret.resourceTargetedSyncers[rType.Id]; ok { - return nil, fmt.Errorf("error: duplicate resource type found for resource targeted syncer %s", rType.Id) - } - ret.resourceTargetedSyncers[rType.Id] = targetedSyncer - } - - if resourceManager, ok := rb.(ResourceManager); ok { - if _, ok := ret.resourceManagers[rType.Id]; ok { - return nil, fmt.Errorf("error: duplicate resource type found for resource manager %s", rType.Id) - } - ret.resourceManagers[rType.Id] = resourceManager - // Support DeleteResourceV2 if connector implements both Create and Delete - if _, ok := ret.resourceDeleters[rType.Id]; ok { - // This should never happen - return nil, fmt.Errorf("error: duplicate resource type found for resource deleter %s", rType.Id) - } - ret.resourceDeleters[rType.Id] = resourceManager - } else { - if resourceDeleter, ok := rb.(ResourceDeleter); ok { - if _, ok := ret.resourceDeleters[rType.Id]; ok { - return nil, fmt.Errorf("error: duplicate resource type found for resource deleter %s", rType.Id) - } - ret.resourceDeleters[rType.Id] = resourceDeleter - } - } - - if resourceManager, ok := rb.(ResourceManagerV2); ok { - if _, ok := ret.resourceManagersV2[rType.Id]; ok { - return nil, fmt.Errorf("error: duplicate resource type found for resource managerV2 %s", rType.Id) - } - ret.resourceManagersV2[rType.Id] = resourceManager - // Support DeleteResourceV2 if connector implements both Create and Delete - if _, ok := ret.resourceDeletersV2[rType.Id]; ok { - // This should never happen - return nil, fmt.Errorf("error: duplicate resource type found for resource deleterV2 %s", rType.Id) - } - ret.resourceDeletersV2[rType.Id] = resourceManager - } else { - if resourceDeleter, ok := rb.(ResourceDeleterV2); ok { - if _, ok := ret.resourceDeletersV2[rType.Id]; ok { - return nil, fmt.Errorf("error: duplicate resource type found for resource deleterV2 %s", rType.Id) - } - ret.resourceDeletersV2[rType.Id] = resourceDeleter - } - } - - if _, ok := rb.(OldAccountManager); ok { - return nil, fmt.Errorf("error: old account manager interface implemented for %s", rType.Id) - } - - if accountManager, ok := rb.(AccountManager); ok { - if ret.accountManager != nil { - return nil, fmt.Errorf("error: duplicate resource type found for account manager %s", rType.Id) - } - ret.accountManager = accountManager - } - - if _, ok := rb.(OldCredentialManager); ok { - return nil, fmt.Errorf("error: old credential manager interface implemented for %s", rType.Id) - } - - if credentialManagers, ok := rb.(CredentialManager); ok { - if _, ok := ret.credentialManagers[rType.Id]; ok { - return nil, fmt.Errorf("error: duplicate resource type found for credential manager %s", rType.Id) - } - ret.credentialManagers[rType.Id] = credentialManagers - } - } - return ret, nil - - case types.ConnectorServer: - return c, nil - - default: - return nil, fmt.Errorf("input was not a ConnectorBuilder or a ConnectorServer") - } -} - -type Opt func(b *builderImpl) error - -func WithTicketingEnabled() Opt { - return func(b *builderImpl) error { - if _, ok := b.cb.(TicketManager); ok { - b.ticketingEnabled = true - return nil - } - return errors.New("external ticketing not supported") - } -} - -func WithMetricsHandler(h metrics.Handler) Opt { - return func(b *builderImpl) error { - b.m = metrics.New(h) - return nil - } -} - -func (b *builderImpl) options(opts ...Opt) error { - for _, opt := range opts { - if err := opt(b); err != nil { - return err - } - } - - return nil -} - -func validateProvisionerVersion(ctx context.Context, p ResourceSyncer) error { - _, ok := p.(ResourceProvisioner) - _, okV2 := p.(ResourceProvisionerV2) - - if ok && okV2 { - return fmt.Errorf("error: resource type %s implements both ResourceProvisioner and ResourceProvisionerV2", p.ResourceType(ctx).Id) - } - return nil -} - -// ListResourceTypes lists all available resource types. -func (b *builderImpl) ListResourceTypes( - ctx context.Context, - request *v2.ResourceTypesServiceListResourceTypesRequest, -) (*v2.ResourceTypesServiceListResourceTypesResponse, error) { - ctx, span := tracer.Start(ctx, "builderImpl.ListResourceTypes") - defer span.End() - - start := b.nowFunc() - tt := tasks.ListResourceTypesType - var out []*v2.ResourceType - - if len(b.resourceBuilders) == 0 { - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) - return nil, fmt.Errorf("error: no resource builders found") - } - - for _, rb := range b.resourceBuilders { - out = append(out, rb.ResourceType(ctx)) - } - - if len(out) == 0 { - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) - return nil, fmt.Errorf("error: no resource types found") - } - - b.m.RecordTaskSuccess(ctx, tt, b.nowFunc().Sub(start)) - return &v2.ResourceTypesServiceListResourceTypesResponse{List: out}, nil -} - -// ListResources returns all available resources for a given resource type ID. -func (b *builderImpl) ListResources(ctx context.Context, request *v2.ResourcesServiceListResourcesRequest) (*v2.ResourcesServiceListResourcesResponse, error) { - ctx, span := tracer.Start(ctx, "builderImpl.ListResources") - defer span.End() - - start := b.nowFunc() - tt := tasks.ListResourcesType - rb, ok := b.resourceBuilders[request.ResourceTypeId] - if !ok { - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) - return nil, fmt.Errorf("error: list resources with unknown resource type %s", request.ResourceTypeId) - } - out, nextPageToken, annos, err := rb.List(ctx, request.ParentResourceId, &pagination.Token{ - Size: int(request.PageSize), - Token: request.PageToken, - }) - resp := &v2.ResourcesServiceListResourcesResponse{ - List: out, - NextPageToken: nextPageToken, - Annotations: annos, - } - if err != nil { - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) - return resp, fmt.Errorf("error: listing resources failed: %w", err) - } - if request.PageToken != "" && request.PageToken == nextPageToken { - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) - return resp, fmt.Errorf("error: listing resources failed: next page token is the same as the current page token. this is most likely a connector bug") - } - - b.m.RecordTaskSuccess(ctx, tt, b.nowFunc().Sub(start)) - return resp, nil -} - -func (b *builderImpl) GetResource(ctx context.Context, request *v2.ResourceGetterServiceGetResourceRequest) (*v2.ResourceGetterServiceGetResourceResponse, error) { - ctx, span := tracer.Start(ctx, "builderImpl.GetResource") - defer span.End() - - start := b.nowFunc() - tt := tasks.GetResourceType - resourceType := request.GetResourceId().GetResourceType() - rb, ok := b.resourceTargetedSyncers[resourceType] - if !ok { - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) - return nil, status.Errorf(codes.Unimplemented, "error: get resource with unknown resource type %s", resourceType) - } - - resource, annos, err := rb.Get(ctx, request.GetResourceId(), request.GetParentResourceId()) - if err != nil { - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) - return nil, fmt.Errorf("error: get resource failed: %w", err) - } - if resource == nil { - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) - return nil, status.Error(codes.NotFound, "error: get resource returned nil") - } - - b.m.RecordTaskSuccess(ctx, tt, b.nowFunc().Sub(start)) - return &v2.ResourceGetterServiceGetResourceResponse{ - Resource: resource, - Annotations: annos, - }, nil -} - -// ListEntitlements returns all the entitlements for a given resource. -func (b *builderImpl) ListEntitlements(ctx context.Context, request *v2.EntitlementsServiceListEntitlementsRequest) (*v2.EntitlementsServiceListEntitlementsResponse, error) { - ctx, span := tracer.Start(ctx, "builderImpl.ListEntitlements") - defer span.End() - - start := b.nowFunc() - tt := tasks.ListEntitlementsType - rb, ok := b.resourceBuilders[request.Resource.Id.ResourceType] - if !ok { - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) - return nil, fmt.Errorf("error: list entitlements with unknown resource type %s", request.Resource.Id.ResourceType) - } - - out, nextPageToken, annos, err := rb.Entitlements(ctx, request.Resource, &pagination.Token{ - Size: int(request.PageSize), - Token: request.PageToken, - }) - resp := &v2.EntitlementsServiceListEntitlementsResponse{ - List: out, - NextPageToken: nextPageToken, - Annotations: annos, - } - if err != nil { - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) - return resp, fmt.Errorf("error: listing entitlements failed: %w", err) - } - if request.PageToken != "" && request.PageToken == nextPageToken { - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) - return resp, fmt.Errorf("error: listing entitlements failed: next page token is the same as the current page token. this is most likely a connector bug") - } - - b.m.RecordTaskSuccess(ctx, tt, b.nowFunc().Sub(start)) - return resp, nil -} - -// ListGrants lists all the grants for a given resource. -func (b *builderImpl) ListGrants(ctx context.Context, request *v2.GrantsServiceListGrantsRequest) (*v2.GrantsServiceListGrantsResponse, error) { - ctx, span := tracer.Start(ctx, "builderImpl.ListGrants") - defer span.End() - - start := b.nowFunc() - tt := tasks.ListGrantsType - rid := request.Resource.Id - rb, ok := b.resourceBuilders[rid.ResourceType] - if !ok { - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) - return nil, fmt.Errorf("error: list entitlements with unknown resource type %s", rid.ResourceType) - } - - out, nextPageToken, annos, err := rb.Grants(ctx, request.Resource, &pagination.Token{ - Size: int(request.PageSize), - Token: request.PageToken, - }) - - // annos.Append(&v2.ActiveSync{ActiveSyncId: request.Annotations.GetActiveSyncId()}) - resp := &v2.GrantsServiceListGrantsResponse{ - List: out, - NextPageToken: nextPageToken, - Annotations: annos, - } - if err != nil { - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) - return resp, fmt.Errorf("error: listing grants for resource %s/%s failed: %w", rid.ResourceType, rid.Resource, err) - } - if request.PageToken != "" && request.PageToken == nextPageToken { - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) - return resp, fmt.Errorf("error: listing grants for resource %s/%s failed: next page token is the same as the current page token. this is most likely a connector bug", - rid.ResourceType, - rid.Resource) - } - - b.m.RecordTaskSuccess(ctx, tt, b.nowFunc().Sub(start)) - return resp, nil -} - -// GetMetadata gets all metadata for a connector. -func (b *builderImpl) GetMetadata(ctx context.Context, request *v2.ConnectorServiceGetMetadataRequest) (*v2.ConnectorServiceGetMetadataResponse, error) { - ctx, span := tracer.Start(ctx, "builderImpl.GetMetadata") - defer span.End() - - start := b.nowFunc() - tt := tasks.GetMetadataType - md, err := b.cb.Metadata(ctx) - if err != nil { - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) - return nil, err - } - - md.Capabilities, err = getCapabilities(ctx, b) - if err != nil { - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) - return nil, err - } - - annos := annotations.Annotations(md.Annotations) - if b.ticketManager != nil { - annos.Append(&v2.ExternalTicketSettings{Enabled: b.ticketingEnabled}) - } - md.Annotations = annos - - b.m.RecordTaskSuccess(ctx, tt, b.nowFunc().Sub(start)) - return &v2.ConnectorServiceGetMetadataResponse{Metadata: md}, nil -} - -func validateCapabilityDetails(ctx context.Context, credDetails *v2.CredentialDetails) error { - if credDetails.CapabilityAccountProvisioning != nil { - // Ensure that the preferred option is included and is part of the supported options - if credDetails.CapabilityAccountProvisioning.PreferredCredentialOption == v2.CapabilityDetailCredentialOption_CAPABILITY_DETAIL_CREDENTIAL_OPTION_UNSPECIFIED { - return status.Error(codes.InvalidArgument, "error: preferred credential creation option is not set") - } - if !slices.Contains(credDetails.CapabilityAccountProvisioning.SupportedCredentialOptions, credDetails.CapabilityAccountProvisioning.PreferredCredentialOption) { - return status.Error(codes.InvalidArgument, "error: preferred credential creation option is not part of the supported options") - } - } - - if credDetails.CapabilityCredentialRotation != nil { - // Ensure that the preferred option is included and is part of the supported options - if credDetails.CapabilityCredentialRotation.PreferredCredentialOption == v2.CapabilityDetailCredentialOption_CAPABILITY_DETAIL_CREDENTIAL_OPTION_UNSPECIFIED { - return status.Error(codes.InvalidArgument, "error: preferred credential rotation option is not set") - } - if !slices.Contains(credDetails.CapabilityCredentialRotation.SupportedCredentialOptions, credDetails.CapabilityCredentialRotation.PreferredCredentialOption) { - return status.Error(codes.InvalidArgument, "error: preferred credential rotation option is not part of the supported options") - } - } - - return nil -} - -func getCredentialDetails(ctx context.Context, b *builderImpl) (*v2.CredentialDetails, error) { - l := ctxzap.Extract(ctx) - rv := &v2.CredentialDetails{} - - for _, rb := range b.resourceBuilders { - if am, ok := rb.(AccountManager); ok { - accountProvisioningCapabilityDetails, _, err := am.CreateAccountCapabilityDetails(ctx) - if err != nil { - l.Error("error: getting account provisioning details", zap.Error(err)) - return nil, fmt.Errorf("error: getting account provisioning details: %w", err) - } - rv.CapabilityAccountProvisioning = accountProvisioningCapabilityDetails - } - - if cm, ok := rb.(CredentialManager); ok { - credentialRotationCapabilityDetails, _, err := cm.RotateCapabilityDetails(ctx) - if err != nil { - l.Error("error: getting credential management details", zap.Error(err)) - return nil, fmt.Errorf("error: getting credential management details: %w", err) - } - rv.CapabilityCredentialRotation = credentialRotationCapabilityDetails - } - } - - err := validateCapabilityDetails(ctx, rv) - if err != nil { - return nil, fmt.Errorf("error: validating capability details: %w", err) - } - return rv, nil -} - -// getCapabilities gets all capabilities for a connector. -func getCapabilities(ctx context.Context, b *builderImpl) (*v2.ConnectorCapabilities, error) { - connectorCaps := make(map[v2.Capability]struct{}) - resourceTypeCapabilities := []*v2.ResourceTypeCapability{} - for _, rb := range b.resourceBuilders { - resourceTypeCapability := &v2.ResourceTypeCapability{ - ResourceType: rb.ResourceType(ctx), - // Currently by default all resource types support sync. - Capabilities: []v2.Capability{v2.Capability_CAPABILITY_SYNC}, - } - connectorCaps[v2.Capability_CAPABILITY_SYNC] = struct{}{} - if _, ok := rb.(ResourceTargetedSyncer); ok { - resourceTypeCapability.Capabilities = append(resourceTypeCapability.Capabilities, v2.Capability_CAPABILITY_TARGETED_SYNC) - connectorCaps[v2.Capability_CAPABILITY_TARGETED_SYNC] = struct{}{} - } - if _, ok := rb.(ResourceProvisioner); ok { - resourceTypeCapability.Capabilities = append(resourceTypeCapability.Capabilities, v2.Capability_CAPABILITY_PROVISION) - connectorCaps[v2.Capability_CAPABILITY_PROVISION] = struct{}{} - } else if _, ok = rb.(ResourceProvisionerV2); ok { - resourceTypeCapability.Capabilities = append(resourceTypeCapability.Capabilities, v2.Capability_CAPABILITY_PROVISION) - connectorCaps[v2.Capability_CAPABILITY_PROVISION] = struct{}{} - } - if _, ok := rb.(AccountManager); ok { - resourceTypeCapability.Capabilities = append(resourceTypeCapability.Capabilities, v2.Capability_CAPABILITY_ACCOUNT_PROVISIONING) - connectorCaps[v2.Capability_CAPABILITY_ACCOUNT_PROVISIONING] = struct{}{} - } - - if _, ok := rb.(CredentialManager); ok { - resourceTypeCapability.Capabilities = append(resourceTypeCapability.Capabilities, v2.Capability_CAPABILITY_CREDENTIAL_ROTATION) - connectorCaps[v2.Capability_CAPABILITY_CREDENTIAL_ROTATION] = struct{}{} - } - - if _, ok := rb.(ResourceManager); ok { - resourceTypeCapability.Capabilities = append(resourceTypeCapability.Capabilities, v2.Capability_CAPABILITY_RESOURCE_CREATE, v2.Capability_CAPABILITY_RESOURCE_DELETE) - connectorCaps[v2.Capability_CAPABILITY_RESOURCE_CREATE] = struct{}{} - connectorCaps[v2.Capability_CAPABILITY_RESOURCE_DELETE] = struct{}{} - } else if _, ok := rb.(ResourceDeleter); ok { - resourceTypeCapability.Capabilities = append(resourceTypeCapability.Capabilities, v2.Capability_CAPABILITY_RESOURCE_DELETE) - connectorCaps[v2.Capability_CAPABILITY_RESOURCE_DELETE] = struct{}{} - } - - if _, ok := rb.(ResourceManagerV2); ok { - resourceTypeCapability.Capabilities = append(resourceTypeCapability.Capabilities, v2.Capability_CAPABILITY_RESOURCE_CREATE, v2.Capability_CAPABILITY_RESOURCE_DELETE) - connectorCaps[v2.Capability_CAPABILITY_RESOURCE_CREATE] = struct{}{} - connectorCaps[v2.Capability_CAPABILITY_RESOURCE_DELETE] = struct{}{} - } else if _, ok := rb.(ResourceDeleterV2); ok { - resourceTypeCapability.Capabilities = append(resourceTypeCapability.Capabilities, v2.Capability_CAPABILITY_RESOURCE_DELETE) - connectorCaps[v2.Capability_CAPABILITY_RESOURCE_DELETE] = struct{}{} - } - - resourceTypeCapabilities = append(resourceTypeCapabilities, resourceTypeCapability) - } - sort.Slice(resourceTypeCapabilities, func(i, j int) bool { - return resourceTypeCapabilities[i].ResourceType.GetId() < resourceTypeCapabilities[j].ResourceType.GetId() - }) - - if len(b.eventFeeds) > 0 { - connectorCaps[v2.Capability_CAPABILITY_EVENT_FEED_V2] = struct{}{} - } - - if b.ticketManager != nil { - connectorCaps[v2.Capability_CAPABILITY_TICKETING] = struct{}{} - } - - if b.actionManager != nil { - connectorCaps[v2.Capability_CAPABILITY_ACTIONS] = struct{}{} - } - - var caps []v2.Capability - for c := range connectorCaps { - caps = append(caps, c) - } - slices.Sort(caps) - - credDetails, err := getCredentialDetails(ctx, b) - if err != nil { - return nil, err - } - - return &v2.ConnectorCapabilities{ - ResourceTypeCapabilities: resourceTypeCapabilities, - ConnectorCapabilities: caps, - CredentialDetails: credDetails, - }, nil -} - -// Validate validates the connector. -func (b *builderImpl) Validate(ctx context.Context, request *v2.ConnectorServiceValidateRequest) (*v2.ConnectorServiceValidateResponse, error) { - ctx, span := tracer.Start(ctx, "builderImpl.Validate") - defer span.End() - - retryer := retry.NewRetryer(ctx, retry.RetryConfig{ - MaxAttempts: 5, - InitialDelay: 1 * time.Second, - MaxDelay: 0, - }) - - for { - annos, err := b.cb.Validate(ctx) - if err == nil { - return &v2.ConnectorServiceValidateResponse{Annotations: annos}, nil - } - - if retryer.ShouldWaitAndRetry(ctx, err) { - continue - } - - return nil, fmt.Errorf("validate failed: %w", err) - } -} - -func (b *builderImpl) Grant(ctx context.Context, request *v2.GrantManagerServiceGrantRequest) (*v2.GrantManagerServiceGrantResponse, error) { - ctx, span := tracer.Start(ctx, "builderImpl.Grant") - defer span.End() - - start := b.nowFunc() - tt := tasks.GrantType - l := ctxzap.Extract(ctx) - - rt := request.Entitlement.Resource.Id.ResourceType - - retryer := retry.NewRetryer(ctx, retry.RetryConfig{ - MaxAttempts: 3, - InitialDelay: 15 * time.Second, - MaxDelay: 60 * time.Second, - }) - - var grantFunc func(ctx context.Context, principal *v2.Resource, entitlement *v2.Entitlement) ([]*v2.Grant, annotations.Annotations, error) - provisioner, ok := b.resourceProvisioners[rt] - if ok { - grantFunc = func(ctx context.Context, principal *v2.Resource, entitlement *v2.Entitlement) ([]*v2.Grant, annotations.Annotations, error) { - annos, err := provisioner.Grant(ctx, principal, entitlement) - if err != nil { - return nil, annos, err - } - return nil, annos, nil - } - } - provisionerV2, ok := b.resourceProvisionersV2[rt] - if ok { - grantFunc = provisionerV2.Grant - } - - if grantFunc == nil { - l.Error("error: resource type does not have provisioner configured", zap.String("resource_type", rt)) - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) - return nil, fmt.Errorf("error: resource type does not have provisioner configured") - } - - for { - grants, annos, err := grantFunc(ctx, request.Principal, request.Entitlement) - if err == nil { - b.m.RecordTaskSuccess(ctx, tt, b.nowFunc().Sub(start)) - return &v2.GrantManagerServiceGrantResponse{Annotations: annos, Grants: grants}, nil - } - if retryer.ShouldWaitAndRetry(ctx, err) { - continue - } - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) - return nil, fmt.Errorf("grant failed: %w", err) - } -} - -func (b *builderImpl) Revoke(ctx context.Context, request *v2.GrantManagerServiceRevokeRequest) (*v2.GrantManagerServiceRevokeResponse, error) { - ctx, span := tracer.Start(ctx, "builderImpl.Revoke") - defer span.End() - - start := b.nowFunc() - tt := tasks.RevokeType - - l := ctxzap.Extract(ctx) - - rt := request.Grant.Entitlement.Resource.Id.ResourceType - - retryer := retry.NewRetryer(ctx, retry.RetryConfig{ - MaxAttempts: 3, - InitialDelay: 15 * time.Second, - MaxDelay: 60 * time.Second, - }) - - var revokeFunc func(ctx context.Context, grant *v2.Grant) (annotations.Annotations, error) - provisioner, ok := b.resourceProvisioners[rt] - if ok { - revokeFunc = provisioner.Revoke - } - provisionerV2, ok := b.resourceProvisionersV2[rt] - if ok { - revokeFunc = provisionerV2.Revoke - } - - if revokeFunc == nil { - l.Error("error: resource type does not have provisioner configured", zap.String("resource_type", rt)) - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) - return nil, fmt.Errorf("error: resource type does not have provisioner configured") - } - - for { - annos, err := revokeFunc(ctx, request.Grant) - if err == nil { - b.m.RecordTaskSuccess(ctx, tt, b.nowFunc().Sub(start)) - return &v2.GrantManagerServiceRevokeResponse{Annotations: annos}, nil - } - if retryer.ShouldWaitAndRetry(ctx, err) { - continue - } - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) - return nil, fmt.Errorf("revoke failed: %w", err) - } -} - -// GetAsset streams the asset to the client. -// FIXME(jirwin): Asset streaming is disabled. -func (b *builderImpl) GetAsset(request *v2.AssetServiceGetAssetRequest, server v2.AssetService_GetAssetServer) error { - _, span := tracer.Start(server.Context(), "builderImpl.GetAsset") - defer span.End() +// can be added by implementing extension interfaces such as: +// - RegisterActionManager: For custom action support +// - EventProvider: For event stream support +// - TicketManager: For ticket management integration. - return nil +type MetadataProvider interface { + Metadata(ctx context.Context) (*v2.ConnectorMetadata, error) } -func (b *builderImpl) ListEventFeeds(ctx context.Context, request *v2.ListEventFeedsRequest) (*v2.ListEventFeedsResponse, error) { - ctx, span := tracer.Start(ctx, "builderImpl.ListEventFeeds") - defer span.End() - - start := b.nowFunc() - tt := tasks.ListEventFeedsType - - feeds := make([]*v2.EventFeedMetadata, 0, len(b.eventFeeds)) - - for _, feed := range b.eventFeeds { - feeds = append(feeds, feed.EventFeedMetadata(ctx)) - } +type ValidateProvider interface { + Validate(ctx context.Context) (annotations.Annotations, error) +} - b.m.RecordTaskSuccess(ctx, tt, b.nowFunc().Sub(start)) - return &v2.ListEventFeedsResponse{ - List: feeds, - }, nil +type ConnectorBuilder interface { + MetadataProvider + ValidateProvider + ResourceSyncers(ctx context.Context) []ResourceSyncer } -func (b *builderImpl) ListEvents(ctx context.Context, request *v2.ListEventsRequest) (*v2.ListEventsResponse, error) { - ctx, span := tracer.Start(ctx, "builderImpl.ListEvents") - defer span.End() +type ConnectorBuilderV2 interface { + MetadataProvider + ValidateProvider + ResourceSyncers(ctx context.Context) []ResourceSyncerV2 +} - start := b.nowFunc() - feedId := request.GetEventFeedId() +type builder struct { + ticketingEnabled bool + m *metrics.M + nowFunc func() time.Time + clientSecret *jose.JSONWebKey + sessionStore sessions.SessionStore + metadataProvider MetadataProvider + validateProvider ValidateProvider + ticketManager TicketManagerLimited + accountManager AccountManagerLimited + resourceSyncers map[string]ResourceSyncerV2 + resourceProvisioners map[string]ResourceProvisionerV2Limited + resourceManagers map[string]ResourceManagerV2Limited + resourceDeleters map[string]ResourceDeleterV2Limited + resourceTargetedSyncers map[string]ResourceTargetedSyncerLimited + credentialManagers map[string]CredentialManagerLimited + eventFeeds map[string]EventFeed + accountManagers map[string]AccountManagerLimited // NOTE(kans): currently unused + actionManager ActionManager // Unified action manager for all actions +} - // If no feedId is provided, use the legacy Baton feed Id - if feedId == "" { - feedId = LegacyBatonFeedId +// NewConnector creates a new ConnectorServer for a new resource. +func NewConnector(ctx context.Context, in interface{}, opts ...Opt) (types.ConnectorServer, error) { + if in == nil { + return nil, fmt.Errorf("input cannot be nil") } - feed, ok := b.eventFeeds[feedId] - if !ok { - return nil, status.Errorf(codes.NotFound, "error: event feed not found") + switch t := in.(type) { + case types.ConnectorServer: + // its likely nothing uses this code path anymore + return t, nil + case ConnectorBuilder, ConnectorBuilderV2: + default: + return nil, fmt.Errorf("input is not a ConnectorServer, ConnectorBuilder, or ConnectorBuilderV2") + } + + clientSecretValue := ctx.Value(crypto.ContextClientSecretKey) + clientSecretJWK, _ := clientSecretValue.(*jose.JSONWebKey) + + // Create the action manager (concrete type for registration, stored as interface for dispatch) + actionMgr := actions.NewActionManager(ctx) + + b := &builder{ + metadataProvider: nil, + validateProvider: nil, + ticketManager: nil, + accountManager: nil, + nowFunc: time.Now, + clientSecret: clientSecretJWK, + resourceSyncers: make(map[string]ResourceSyncerV2), + resourceProvisioners: make(map[string]ResourceProvisionerV2Limited), + resourceManagers: make(map[string]ResourceManagerV2Limited), + resourceDeleters: make(map[string]ResourceDeleterV2Limited), + resourceTargetedSyncers: make(map[string]ResourceTargetedSyncerLimited), + credentialManagers: make(map[string]CredentialManagerLimited), + eventFeeds: make(map[string]EventFeed), + accountManagers: make(map[string]AccountManagerLimited), + actionManager: actionMgr, + } + + // WithTicketingEnabled checks for the ticketManager + if err := b.addTicketManager(ctx, in); err != nil { + return nil, err } - tt := tasks.ListEventsType - events, streamState, annotations, err := feed.ListEvents(ctx, request.StartAt, &pagination.StreamToken{ - Size: int(request.PageSize), - Cursor: request.Cursor, - }) + err := b.options(opts...) if err != nil { - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) - return nil, fmt.Errorf("error: listing events failed: %w", err) + return nil, err } - b.m.RecordTaskSuccess(ctx, tt, b.nowFunc().Sub(start)) - return &v2.ListEventsResponse{ - Events: events, - Cursor: streamState.Cursor, - HasMore: streamState.HasMore, - Annotations: annotations, - }, nil -} -func (b *builderImpl) CreateResource(ctx context.Context, request *v2.CreateResourceRequest) (*v2.CreateResourceResponse, error) { - ctx, span := tracer.Start(ctx, "builderImpl.CreateResource") - defer span.End() + if b.m == nil { + b.m = metrics.New(metrics.NewNoOpHandler(ctx)) + } - start := b.nowFunc() - tt := tasks.CreateResourceType - l := ctxzap.Extract(ctx) - rt := request.GetResource().GetId().GetResourceType() + if err := b.addConnectorBuilderProviders(ctx, in); err != nil { + return nil, err + } - var manager interface { - Create(ctx context.Context, resource *v2.Resource) (*v2.Resource, annotations.Annotations, error) + if err := b.addEventFeed(ctx, in); err != nil { + return nil, err } - manager, ok := b.resourceManagersV2[rt] - if !ok { - manager, ok = b.resourceManagers[rt] + // Handle deprecated action manager interfaces (pass concrete type for registration) + if err := b.addActionManager(ctx, in, actionMgr); err != nil { + return nil, err } - if ok { - resource, annos, err := manager.Create(ctx, request.Resource) - if err != nil { - l.Error("error: create resource failed", zap.Error(err)) - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) - return nil, fmt.Errorf("error: create resource failed: %w", err) + // Handle the new GlobalActionProvider interface + if globalActionProvider, ok := in.(GlobalActionProvider); ok { + if err := globalActionProvider.GlobalActions(ctx, actionMgr); err != nil { + return nil, fmt.Errorf("error registering global actions: %w", err) } - b.m.RecordTaskSuccess(ctx, tt, b.nowFunc().Sub(start)) - return &v2.CreateResourceResponse{Created: resource, Annotations: annos}, nil } - l.Error("error: resource type does not have resource Create() configured", zap.String("resource_type", rt)) - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) - return nil, status.Error(codes.Unimplemented, fmt.Sprintf("resource type %s does not have resource Create() configured", rt)) -} -func (b *builderImpl) DeleteResource(ctx context.Context, request *v2.DeleteResourceRequest) (*v2.DeleteResourceResponse, error) { - ctx, span := tracer.Start(ctx, "builderImpl.DeleteResource") - defer span.End() + addResourceType := func(ctx context.Context, rType string, rs interface{}) error { + if err := b.addResourceSyncers(ctx, rType, rs); err != nil { + return err + } - start := b.nowFunc() - tt := tasks.DeleteResourceType + if err := b.addProvisioner(ctx, rType, rs); err != nil { + return err + } - l := ctxzap.Extract(ctx) - rt := request.GetResourceId().GetResourceType() - var rsDeleter ResourceDeleter - var rsDeleterV2 ResourceDeleterV2 - var ok bool - - rsDeleterV2, ok = b.resourceManagersV2[rt] - if !ok { - rsDeleterV2, ok = b.resourceDeletersV2[rt] - } + if err := b.addTargetedSyncer(ctx, rType, rs); err != nil { + return err + } - if ok { - annos, err := rsDeleterV2.Delete(ctx, request.ResourceId, request.ParentResourceId) - if err != nil { - l.Error("error: deleteV2 resource failed", zap.Error(err)) - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) - return nil, fmt.Errorf("error: delete resource failed: %w", err) + if err := b.addResourceManager(ctx, rType, rs); err != nil { + return err + } + + if err := b.addAccountManager(ctx, rType, rs); err != nil { + return err + } + + if err := b.addCredentialManager(ctx, rType, rs); err != nil { + return err } - b.m.RecordTaskSuccess(ctx, tt, b.nowFunc().Sub(start)) - return &v2.DeleteResourceResponse{Annotations: annos}, nil + + return nil } - rsDeleter, ok = b.resourceManagers[rt] - if !ok { - rsDeleter, ok = b.resourceDeleters[rt] + if cb, ok := in.(ConnectorBuilder); ok { + for _, rb := range cb.ResourceSyncers(ctx) { + rType := rb.ResourceType(ctx) + if err := addResourceType(ctx, rType.GetId(), rb); err != nil { + return nil, err + } + } + return b, nil } - if ok { - annos, err := rsDeleter.Delete(ctx, request.GetResourceId()) - if err != nil { - l.Error("error: delete resource failed", zap.Error(err)) - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) - return nil, fmt.Errorf("error: delete resource failed: %w", err) + + if cb2, ok := in.(ConnectorBuilderV2); ok { + for _, rb := range cb2.ResourceSyncers(ctx) { + rType := rb.ResourceType(ctx) + if err := addResourceType(ctx, rType.GetId(), rb); err != nil { + return nil, err + } } - b.m.RecordTaskSuccess(ctx, tt, b.nowFunc().Sub(start)) - return &v2.DeleteResourceResponse{Annotations: annos}, nil + return b, nil } - l.Error("error: resource type does not have resource Delete() configured", zap.String("resource_type", rt)) - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) - return nil, status.Error(codes.Unimplemented, fmt.Sprintf("resource type %s does not have resource Delete() configured", rt)) + + return nil, fmt.Errorf("input is not a ConnectorBuilder or a ConnectorBuilderV2") } -func (b *builderImpl) DeleteResourceV2(ctx context.Context, request *v2.DeleteResourceV2Request) (*v2.DeleteResourceV2Response, error) { - ctx, span := tracer.Start(ctx, "builderImpl.DeleteResourceV2") - defer span.End() +type Opt func(b *builder) error - start := b.nowFunc() - tt := tasks.DeleteResourceType +func WithTicketingEnabled() Opt { + return func(b *builder) error { + if b.ticketManager == nil { + return errors.New("external ticketing not supported") + } + b.ticketingEnabled = true + return nil + } +} - l := ctxzap.Extract(ctx) - rt := request.GetResourceId().GetResourceType() - var rsDeleter ResourceDeleter - var rsDeleterV2 ResourceDeleterV2 - var ok bool - - rsDeleterV2, ok = b.resourceManagersV2[rt] - if !ok { - rsDeleterV2, ok = b.resourceDeletersV2[rt] +func WithMetricsHandler(h metrics.Handler) Opt { + return func(b *builder) error { + b.m = metrics.New(h) + return nil + } +} + +func WithSessionStore(ss sessions.SessionStore) Opt { + return func(b *builder) error { + b.sessionStore = ss + return nil } +} - if ok { - annos, err := rsDeleterV2.Delete(ctx, request.ResourceId, request.ParentResourceId) - if err != nil { - l.Error("error: deleteV2 resource failed", zap.Error(err)) - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) - return nil, fmt.Errorf("error: delete resource failed: %w", err) +func (b *builder) options(opts ...Opt) error { + for _, opt := range opts { + if err := opt(b); err != nil { + return err } - b.m.RecordTaskSuccess(ctx, tt, b.nowFunc().Sub(start)) - return &v2.DeleteResourceV2Response{Annotations: annos}, nil } - rsDeleter, ok = b.resourceManagers[rt] - if !ok { - rsDeleter, ok = b.resourceDeleters[rt] + return nil +} + +func (b *builder) addConnectorBuilderProviders(_ context.Context, in interface{}) error { + if mp, ok := in.(MetadataProvider); ok { + b.metadataProvider = mp + } else { + return fmt.Errorf("error: metadata provider not implemented") } - if ok { - annos, err := rsDeleter.Delete(ctx, request.GetResourceId()) - if err != nil { - l.Error("error: delete resource failed", zap.Error(err)) - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) - return nil, fmt.Errorf("error: delete resource failed: %w", err) - } - b.m.RecordTaskSuccess(ctx, tt, b.nowFunc().Sub(start)) - return &v2.DeleteResourceV2Response{Annotations: annos}, nil + + if vp, ok := in.(ValidateProvider); ok { + b.validateProvider = vp + } else { + return fmt.Errorf("error: validate provider not implemented") } - l.Error("error: resource type does not have resource Delete() configured", zap.String("resource_type", rt)) - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) - return nil, status.Error(codes.Unimplemented, fmt.Sprintf("resource type %s does not have resource Delete() configured", rt)) + + return nil } -func (b *builderImpl) RotateCredential(ctx context.Context, request *v2.RotateCredentialRequest) (*v2.RotateCredentialResponse, error) { - ctx, span := tracer.Start(ctx, "builderImpl.RotateCredential") +// GetMetadata gets all metadata for a connector. +func (b *builder) GetMetadata(ctx context.Context, request *v2.ConnectorServiceGetMetadataRequest) (*v2.ConnectorServiceGetMetadataResponse, error) { + ctx, span := tracer.Start(ctx, "builder.GetMetadata") defer span.End() start := b.nowFunc() - tt := tasks.RotateCredentialsType - l := ctxzap.Extract(ctx) - rt := request.GetResourceId().GetResourceType() - manager, ok := b.credentialManagers[rt] - if !ok { - l.Error("error: resource type does not have credential manager configured", zap.String("resource_type", rt)) - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) - return nil, status.Error(codes.Unimplemented, "resource type does not have credential manager configured") - } - - opts, err := crypto.ConvertCredentialOptions(ctx, b.clientSecret, request.GetCredentialOptions(), request.GetEncryptionConfigs()) + tt := tasks.GetMetadataType + md, err := b.metadataProvider.Metadata(ctx) if err != nil { - l.Error("error: converting credential options failed", zap.Error(err)) b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) - return nil, fmt.Errorf("error: converting credential options failed: %w", err) + return nil, err } - plaintexts, annos, err := manager.Rotate(ctx, request.GetResourceId(), opts) + md.Capabilities, err = b.getCapabilities(ctx) if err != nil { - l.Error("error: rotate credentials on resource failed", zap.Error(err)) b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) - return nil, fmt.Errorf("error: rotate credentials on resource failed: %w", err) + return nil, err } - pkem, err := crypto.NewEncryptionManager(request.GetCredentialOptions(), request.GetEncryptionConfigs()) - if err != nil { - l.Error("error: creating encryption manager failed", zap.Error(err)) - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) - return nil, fmt.Errorf("error: creating encryption manager failed: %w", err) + annos := annotations.Annotations(md.GetAnnotations()) + if b.ticketManager != nil { + annos.Append(v2.ExternalTicketSettings_builder{Enabled: b.ticketingEnabled}.Build()) } + md.SetAnnotations(annos) - var encryptedDatas []*v2.EncryptedData - for _, plaintextCredential := range plaintexts { - encryptedData, err := pkem.Encrypt(ctx, plaintextCredential) - if err != nil { - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) - return nil, err + b.m.RecordTaskSuccess(ctx, tt, b.nowFunc().Sub(start)) + return v2.ConnectorServiceGetMetadataResponse_builder{Metadata: md}.Build(), nil +} + +// Validate validates the connector. +func (b *builder) Validate(ctx context.Context, request *v2.ConnectorServiceValidateRequest) (*v2.ConnectorServiceValidateResponse, error) { + ctx, span := tracer.Start(ctx, "builder.Validate") + defer span.End() + + retryer := retry.NewRetryer(ctx, retry.RetryConfig{ + MaxAttempts: 5, + InitialDelay: 1 * time.Second, + MaxDelay: 0, + }) + + for { + annos, err := b.validateProvider.Validate(ctx) + if err == nil { + return v2.ConnectorServiceValidateResponse_builder{ + Annotations: annos, + SdkVersion: sdk.Version, + }.Build(), nil } - encryptedDatas = append(encryptedDatas, encryptedData...) - } - b.m.RecordTaskSuccess(ctx, tt, b.nowFunc().Sub(start)) - return &v2.RotateCredentialResponse{ - Annotations: annos, - ResourceId: request.GetResourceId(), - EncryptedData: encryptedDatas, - }, nil + if retryer.ShouldWaitAndRetry(ctx, err) { + continue + } + + return nil, fmt.Errorf("validate failed: %w", err) + } } -func (b *builderImpl) Cleanup(ctx context.Context, request *v2.ConnectorServiceCleanupRequest) (*v2.ConnectorServiceCleanupResponse, error) { +func (b *builder) Cleanup(ctx context.Context, request *v2.ConnectorServiceCleanupRequest) (*v2.ConnectorServiceCleanupResponse, error) { l := ctxzap.Extract(ctx) - - // Clear session cache if available in context - sessionCache, err := session.GetSession(ctx) - if err != nil { - l.Warn("error getting session cache", zap.Error(err)) - } else { - activeSync, err := annotations.GetActiveSyncIdFromAnnotations(annotations.Annotations(request.GetAnnotations())) + if b.sessionStore != nil { + // Limit c1z size before we upload, because the uploads time out... + // TODO(kans): we could hold onto the session store if we are in debug mode. + // TODO(kans): we should probably not do this for lambda connectors. + err := b.sessionStore.Clear(ctx, sessions.WithSyncID(request.GetActiveSyncId())) if err != nil { - l.Warn("error getting active sync id", zap.Error(err)) - } - if activeSync != "" { - err = sessionCache.Clear(ctx) - if err != nil { - l.Warn("error clearing session cache", zap.Error(err)) - } + l.Warn("error clearing session store", zap.Error(err)) } } + // Clear all http caches at the end of a sync. This must be run in the child process, which is why it's in this function and not in syncer.go - err = uhttp.ClearCaches(ctx) + err := uhttp.ClearCaches(ctx) if err != nil { l.Warn("error clearing http caches", zap.Error(err)) } @@ -1464,175 +334,147 @@ func (b *builderImpl) Cleanup(ctx context.Context, request *v2.ConnectorServiceC return resp, err } -func (b *builderImpl) CreateAccount(ctx context.Context, request *v2.CreateAccountRequest) (*v2.CreateAccountResponse, error) { - ctx, span := tracer.Start(ctx, "builderImpl.CreateAccount") - defer span.End() +// getCapabilities gets all capabilities for a connector. +func (b *builder) getCapabilities(ctx context.Context) (*v2.ConnectorCapabilities, error) { + connectorCaps := make(map[v2.Capability]struct{}) + resourceTypeCapabilities := []*v2.ResourceTypeCapability{} - start := b.nowFunc() - tt := tasks.CreateAccountType - l := ctxzap.Extract(ctx) - if b.accountManager == nil { - l.Error("error: connector does not have account manager configured") - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) - return nil, status.Error(codes.Unimplemented, "connector does not have credential manager configured") - } + for resourceTypeID, rb := range b.resourceSyncers { + connectorCaps[v2.Capability_CAPABILITY_SYNC] = struct{}{} + caps := []v2.Capability{v2.Capability_CAPABILITY_SYNC} - opts, err := crypto.ConvertCredentialOptions(ctx, b.clientSecret, request.GetCredentialOptions(), request.GetEncryptionConfigs()) - if err != nil { - l.Error("error: converting credential options failed", zap.Error(err)) - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) - return nil, fmt.Errorf("error: converting credential options failed: %w", err) - } + if _, exists := b.resourceTargetedSyncers[resourceTypeID]; exists { + caps = append(caps, v2.Capability_CAPABILITY_TARGETED_SYNC) + } - result, plaintexts, annos, err := b.accountManager.CreateAccount(ctx, request.GetAccountInfo(), opts) - if err != nil { - l.Error("error: create account failed", zap.Error(err)) - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) - return nil, fmt.Errorf("error: create account failed: %w", err) - } + if _, exists := b.resourceProvisioners[resourceTypeID]; exists { + caps = append(caps, v2.Capability_CAPABILITY_PROVISION) + } - pkem, err := crypto.NewEncryptionManager(request.GetCredentialOptions(), request.GetEncryptionConfigs()) - if err != nil { - l.Error("error: creating encryption manager failed", zap.Error(err)) - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) - return nil, fmt.Errorf("error: creating encryption manager failed: %w", err) - } + if _, exists := b.accountManagers[resourceTypeID]; exists { + caps = append(caps, v2.Capability_CAPABILITY_ACCOUNT_PROVISIONING) + } + + if _, exists := b.resourceManagers[resourceTypeID]; exists { + caps = append(caps, v2.Capability_CAPABILITY_RESOURCE_DELETE, v2.Capability_CAPABILITY_RESOURCE_CREATE) + } else if _, exists := b.resourceDeleters[resourceTypeID]; exists { + caps = append(caps, v2.Capability_CAPABILITY_RESOURCE_DELETE) + } + + if _, exists := b.credentialManagers[resourceTypeID]; exists { + caps = append(caps, v2.Capability_CAPABILITY_CREDENTIAL_ROTATION) + } + + // Extend the capabilities with the resource type specificcapabilities + for _, cap := range caps { + connectorCaps[cap] = struct{}{} + } - var encryptedDatas []*v2.EncryptedData - for _, plaintextCredential := range plaintexts { - encryptedData, err := pkem.Encrypt(ctx, plaintextCredential) + r := rb.ResourceType(ctx) + annos := annotations.Annotations(r.Annotations) + p := &v2.CapabilityPermissions{} + _, err := annos.Pick(p) if err != nil { - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) return nil, err } - encryptedDatas = append(encryptedDatas, encryptedData...) - } - rv := &v2.CreateAccountResponse{ - EncryptedData: encryptedDatas, - Annotations: annos, + resourceTypeCapabilities = append(resourceTypeCapabilities, v2.ResourceTypeCapability_builder{ + ResourceType: rb.ResourceType(ctx), + Capabilities: caps, + Permissions: p, + }.Build()) } - switch r := result.(type) { - case *v2.CreateAccountResponse_SuccessResult: - rv.Result = &v2.CreateAccountResponse_Success{Success: r} - case *v2.CreateAccountResponse_ActionRequiredResult: - rv.Result = &v2.CreateAccountResponse_ActionRequired{ActionRequired: r} - default: - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) - return nil, status.Error(codes.Unimplemented, fmt.Sprintf("unknown result type: %T", result)) + // Check for account provisioning capability (global, not per resource type) + if b.accountManager != nil { + connectorCaps[v2.Capability_CAPABILITY_ACCOUNT_PROVISIONING] = struct{}{} } + sort.Slice(resourceTypeCapabilities, func(i, j int) bool { + return resourceTypeCapabilities[i].GetResourceType().GetId() < resourceTypeCapabilities[j].GetResourceType().GetId() + }) - b.m.RecordTaskSuccess(ctx, tt, b.nowFunc().Sub(start)) - return rv, nil -} - -func (b *builderImpl) ListActionSchemas(ctx context.Context, request *v2.ListActionSchemasRequest) (*v2.ListActionSchemasResponse, error) { - ctx, span := tracer.Start(ctx, "builderImpl.ListActionSchemas") - defer span.End() - - start := b.nowFunc() - tt := tasks.ActionListSchemasType - if b.actionManager == nil { - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) - return nil, fmt.Errorf("error: action manager not implemented") + if len(b.eventFeeds) > 0 { + connectorCaps[v2.Capability_CAPABILITY_EVENT_FEED_V2] = struct{}{} } - actionSchemas, annos, err := b.actionManager.ListActionSchemas(ctx) - if err != nil { - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) - return nil, fmt.Errorf("error: listing action schemas failed: %w", err) + if b.ticketManager != nil { + connectorCaps[v2.Capability_CAPABILITY_TICKETING] = struct{}{} } - rv := &v2.ListActionSchemasResponse{ - Schemas: actionSchemas, - Annotations: annos, + if b.actionManager.HasActions() { + connectorCaps[v2.Capability_CAPABILITY_ACTIONS] = struct{}{} } - b.m.RecordTaskSuccess(ctx, tt, b.nowFunc().Sub(start)) - return rv, nil -} - -func (b *builderImpl) GetActionSchema(ctx context.Context, request *v2.GetActionSchemaRequest) (*v2.GetActionSchemaResponse, error) { - ctx, span := tracer.Start(ctx, "builderImpl.GetActionSchema") - defer span.End() - - start := b.nowFunc() - tt := tasks.ActionGetSchemaType - if b.actionManager == nil { - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) - return nil, fmt.Errorf("error: action manager not implemented") + var caps []v2.Capability + for c := range connectorCaps { + caps = append(caps, c) } + slices.Sort(caps) - actionSchema, annos, err := b.actionManager.GetActionSchema(ctx, request.GetName()) + credDetails, err := getCredentialDetails(ctx, b) if err != nil { - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) - return nil, fmt.Errorf("error: getting action schema failed: %w", err) - } - - rv := &v2.GetActionSchemaResponse{ - Schema: actionSchema, - Annotations: annos, + return nil, err } - b.m.RecordTaskSuccess(ctx, tt, b.nowFunc().Sub(start)) - return rv, nil + return v2.ConnectorCapabilities_builder{ + ResourceTypeCapabilities: resourceTypeCapabilities, + ConnectorCapabilities: caps, + CredentialDetails: credDetails, + }.Build(), nil } -func (b *builderImpl) InvokeAction(ctx context.Context, request *v2.InvokeActionRequest) (*v2.InvokeActionResponse, error) { - ctx, span := tracer.Start(ctx, "builderImpl.InvokeAction") - defer span.End() - - start := b.nowFunc() - tt := tasks.ActionInvokeType - if b.actionManager == nil { - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) - return nil, fmt.Errorf("error: action manager not implemented") - } - - id, status, resp, annos, err := b.actionManager.InvokeAction(ctx, request.GetName(), request.GetArgs()) - if err != nil { - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) - return nil, fmt.Errorf("error: invoking action failed: %w", err) +func validateCapabilityDetails(_ context.Context, credDetails *v2.CredentialDetails) error { + if credDetails.HasCapabilityAccountProvisioning() { + // Ensure that the preferred option is included and is part of the supported options + if credDetails.GetCapabilityAccountProvisioning().GetPreferredCredentialOption() == v2.CapabilityDetailCredentialOption_CAPABILITY_DETAIL_CREDENTIAL_OPTION_UNSPECIFIED { + return status.Error(codes.InvalidArgument, "error: preferred credential creation option is not set") + } + if !slices.Contains(credDetails.GetCapabilityAccountProvisioning().GetSupportedCredentialOptions(), credDetails.GetCapabilityAccountProvisioning().GetPreferredCredentialOption()) { + return status.Error(codes.InvalidArgument, "error: preferred credential creation option is not part of the supported options") + } } - rv := &v2.InvokeActionResponse{ - Id: id, - Name: request.GetName(), - Status: status, - Annotations: annos, - Response: resp, + if credDetails.HasCapabilityCredentialRotation() { + // Ensure that the preferred option is included and is part of the supported options + if credDetails.GetCapabilityCredentialRotation().GetPreferredCredentialOption() == v2.CapabilityDetailCredentialOption_CAPABILITY_DETAIL_CREDENTIAL_OPTION_UNSPECIFIED { + return status.Error(codes.InvalidArgument, "error: preferred credential rotation option is not set") + } + if !slices.Contains(credDetails.GetCapabilityCredentialRotation().GetSupportedCredentialOptions(), credDetails.GetCapabilityCredentialRotation().GetPreferredCredentialOption()) { + return status.Error(codes.InvalidArgument, "error: preferred credential rotation option is not part of the supported options") + } } - b.m.RecordTaskSuccess(ctx, tt, b.nowFunc().Sub(start)) - return rv, nil + return nil } -func (b *builderImpl) GetActionStatus(ctx context.Context, request *v2.GetActionStatusRequest) (*v2.GetActionStatusResponse, error) { - ctx, span := tracer.Start(ctx, "builderImpl.GetActionStatus") - defer span.End() +func getCredentialDetails(ctx context.Context, b *builder) (*v2.CredentialDetails, error) { + l := ctxzap.Extract(ctx) + rv := &v2.CredentialDetails{} - start := b.nowFunc() - tt := tasks.ActionStatusType - if b.actionManager == nil { - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) - return nil, fmt.Errorf("error: action manager not implemented") + // Check for account provisioning capability details + if b.accountManager != nil { + accountProvisioningCapabilityDetails, _, err := b.accountManager.CreateAccountCapabilityDetails(ctx) + if err != nil { + l.Error("error: getting account provisioning details", zap.Error(err)) + return nil, fmt.Errorf("error: getting account provisioning details: %w", err) + } + rv.SetCapabilityAccountProvisioning(accountProvisioningCapabilityDetails) } - status, name, rv, annos, err := b.actionManager.GetActionStatus(ctx, request.GetId()) - if err != nil { - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) - return nil, fmt.Errorf("error: getting action status failed: %w", err) + // Check for credential rotation capability details + for _, cm := range b.credentialManagers { + credentialRotationCapabilityDetails, _, err := cm.RotateCapabilityDetails(ctx) + if err != nil { + l.Error("error: getting credential management details", zap.Error(err)) + return nil, fmt.Errorf("error: getting credential management details: %w", err) + } + rv.SetCapabilityCredentialRotation(credentialRotationCapabilityDetails) + break // Only need one credential manager's details } - resp := &v2.GetActionStatusResponse{ - Id: request.GetId(), - Name: name, - Status: status, - Annotations: annos, - Response: rv, + err := validateCapabilityDetails(ctx, rv) + if err != nil { + return nil, fmt.Errorf("error: validating capability details: %w", err) } - - b.m.RecordTaskSuccess(ctx, tt, b.nowFunc().Sub(start)) - return resp, nil + return rv, nil } diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/connectorbuilder/credentials.go b/vendor/github.com/conductorone/baton-sdk/pkg/connectorbuilder/credentials.go new file mode 100644 index 00000000..4f303e94 --- /dev/null +++ b/vendor/github.com/conductorone/baton-sdk/pkg/connectorbuilder/credentials.go @@ -0,0 +1,105 @@ +package connectorbuilder + +import ( + "context" + "fmt" + + v2 "github.com/conductorone/baton-sdk/pb/c1/connector/v2" + "github.com/conductorone/baton-sdk/pkg/annotations" + "github.com/conductorone/baton-sdk/pkg/crypto" + "github.com/conductorone/baton-sdk/pkg/types/tasks" + "github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/ctxzap" + "go.uber.org/zap" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// CredentialManager extends ResourceSyncer to add capabilities for managing credentials. +// Implementing this interface indicates the connector supports rotating credentials +// for resources of the associated type. This is commonly used for user accounts +// or service accounts that have rotatable credentials. +type CredentialManager interface { + ResourceSyncer + CredentialManagerLimited +} + +type CredentialManagerLimited interface { + Rotate(ctx context.Context, + resourceId *v2.ResourceId, + credentialOptions *v2.LocalCredentialOptions) ([]*v2.PlaintextData, annotations.Annotations, error) + RotateCapabilityDetails(ctx context.Context) (*v2.CredentialDetailsCredentialRotation, annotations.Annotations, error) +} + +type OldCredentialManager interface { + Rotate(ctx context.Context, + resourceId *v2.ResourceId, + credentialOptions *v2.CredentialOptions) ([]*v2.PlaintextData, annotations.Annotations, error) +} + +func (b *builder) RotateCredential(ctx context.Context, request *v2.RotateCredentialRequest) (*v2.RotateCredentialResponse, error) { + ctx, span := tracer.Start(ctx, "builder.RotateCredential") + defer span.End() + + start := b.nowFunc() + tt := tasks.RotateCredentialsType + l := ctxzap.Extract(ctx) + rt := request.GetResourceId().GetResourceType() + manager, ok := b.credentialManagers[rt] + if !ok { + l.Error("error: resource type does not have credential manager configured", zap.String("resource_type", rt)) + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) + return nil, status.Error(codes.Unimplemented, "resource type does not have credential manager configured") + } + + opts, err := crypto.ConvertCredentialOptions(ctx, b.clientSecret, request.GetCredentialOptions(), request.GetEncryptionConfigs()) + if err != nil { + l.Error("error: converting credential options failed", zap.Error(err)) + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) + return nil, fmt.Errorf("error: converting credential options failed: %w", err) + } + + plaintexts, annos, err := manager.Rotate(ctx, request.GetResourceId(), opts) + if err != nil { + l.Error("error: rotate credentials on resource failed", zap.Error(err)) + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) + return nil, fmt.Errorf("error: rotate credentials on resource failed: %w", err) + } + + pkem, err := crypto.NewEncryptionManager(request.GetCredentialOptions(), request.GetEncryptionConfigs()) + if err != nil { + l.Error("error: creating encryption manager failed", zap.Error(err)) + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) + return nil, fmt.Errorf("error: creating encryption manager failed: %w", err) + } + + var encryptedDatas []*v2.EncryptedData + for _, plaintextCredential := range plaintexts { + encryptedData, err := pkem.Encrypt(ctx, plaintextCredential) + if err != nil { + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) + return nil, err + } + encryptedDatas = append(encryptedDatas, encryptedData...) + } + + b.m.RecordTaskSuccess(ctx, tt, b.nowFunc().Sub(start)) + return v2.RotateCredentialResponse_builder{ + Annotations: annos, + ResourceId: request.GetResourceId(), + EncryptedData: encryptedDatas, + }.Build(), nil +} + +func (b *builder) addCredentialManager(_ context.Context, typeId string, in interface{}) error { + if _, ok := in.(OldCredentialManager); ok { + return fmt.Errorf("error: old credential manager interface implemented for %s", typeId) + } + + if credentialManagers, ok := in.(CredentialManagerLimited); ok { + if _, ok := b.credentialManagers[typeId]; ok { + return fmt.Errorf("error: duplicate resource type found for credential manager %s", typeId) + } + b.credentialManagers[typeId] = credentialManagers + } + return nil +} diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/connectorbuilder/events.go b/vendor/github.com/conductorone/baton-sdk/pkg/connectorbuilder/events.go new file mode 100644 index 00000000..c6bf352f --- /dev/null +++ b/vendor/github.com/conductorone/baton-sdk/pkg/connectorbuilder/events.go @@ -0,0 +1,163 @@ +package connectorbuilder + +import ( + "context" + "fmt" + + v2 "github.com/conductorone/baton-sdk/pb/c1/connector/v2" + "github.com/conductorone/baton-sdk/pkg/annotations" + "github.com/conductorone/baton-sdk/pkg/pagination" + "github.com/conductorone/baton-sdk/pkg/types/tasks" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/types/known/timestamppb" +) + +const ( + LegacyBatonFeedId = "baton_feed_event" +) + +// Deprecated: This interface is deprecated in favor of EventProviderV2 which supports +// multiple event feeds. Implementing this interface indicates the connector can provide +// a single stream of events from the external system, enabling near real-time updates +// in Baton. New connectors should implement EventProviderV2 instead. +type EventProvider interface { + ConnectorBuilder + EventLister +} + +// NewEventProviderV2 is a new interface that allows connectors to provide multiple event feeds. +// +// This is the recommended interface for implementing event feed support in new connectors. +type EventProviderV2 interface { + ConnectorBuilder + EventFeedsLimited +} + +type EventFeedsLimited interface { + EventFeeds(ctx context.Context) []EventFeed +} + +// EventFeed is a single stream of events from the external system. +// +// EventFeedMetadata describes this feed, and a connector can have multiple feeds. +type EventFeed interface { + EventLister + EventFeedLimited +} + +type EventFeedLimited interface { + EventFeedMetadata(ctx context.Context) *v2.EventFeedMetadata +} + +// Compatibility interface lets us handle both EventFeed and EventProvider the same. +type EventLister interface { + ListEvents(ctx context.Context, earliestEvent *timestamppb.Timestamp, pToken *pagination.StreamToken) ([]*v2.Event, *pagination.StreamState, annotations.Annotations, error) +} + +func newEventFeedV1to2(eventFeed EventLister) EventFeed { + return &oldEventFeedWrapper{ + feed: eventFeed, + } +} + +type oldEventFeedWrapper struct { + feed EventLister +} + +func (e *oldEventFeedWrapper) EventFeedMetadata(ctx context.Context) *v2.EventFeedMetadata { + return v2.EventFeedMetadata_builder{ + Id: LegacyBatonFeedId, + SupportedEventTypes: []v2.EventType{v2.EventType_EVENT_TYPE_UNSPECIFIED}, + }.Build() +} + +func (e *oldEventFeedWrapper) ListEvents( + ctx context.Context, + earliestEvent *timestamppb.Timestamp, + pToken *pagination.StreamToken, +) ([]*v2.Event, *pagination.StreamState, annotations.Annotations, error) { + return e.feed.ListEvents(ctx, earliestEvent, pToken) +} + +func (b *builder) ListEventFeeds(ctx context.Context, request *v2.ListEventFeedsRequest) (*v2.ListEventFeedsResponse, error) { + ctx, span := tracer.Start(ctx, "builder.ListEventFeeds") + defer span.End() + + start := b.nowFunc() + tt := tasks.ListEventFeedsType + + feeds := make([]*v2.EventFeedMetadata, 0, len(b.eventFeeds)) + + for _, feed := range b.eventFeeds { + feeds = append(feeds, feed.EventFeedMetadata(ctx)) + } + + b.m.RecordTaskSuccess(ctx, tt, b.nowFunc().Sub(start)) + return v2.ListEventFeedsResponse_builder{ + List: feeds, + }.Build(), nil +} + +func (b *builder) ListEvents(ctx context.Context, request *v2.ListEventsRequest) (*v2.ListEventsResponse, error) { + ctx, span := tracer.Start(ctx, "builder.ListEvents") + defer span.End() + + start := b.nowFunc() + feedId := request.GetEventFeedId() + + // If no feedId is provided, use the legacy Baton feed Id + if feedId == "" { + feedId = LegacyBatonFeedId + } + + feed, ok := b.eventFeeds[feedId] + if !ok { + return nil, status.Errorf(codes.NotFound, "error: event feed not found") + } + + tt := tasks.ListEventsType + events, streamState, annotations, err := feed.ListEvents(ctx, request.GetStartAt(), &pagination.StreamToken{ + Size: int(request.GetPageSize()), + Cursor: request.GetCursor(), + }) + if err != nil { + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) + return nil, fmt.Errorf("error: listing events failed: %w", err) + } + b.m.RecordTaskSuccess(ctx, tt, b.nowFunc().Sub(start)) + return v2.ListEventsResponse_builder{ + Events: events, + Cursor: streamState.Cursor, + HasMore: streamState.HasMore, + Annotations: annotations, + }.Build(), nil +} + +func (b *builder) addEventFeed(ctx context.Context, in interface{}) error { + if ep, ok := in.(EventFeedsLimited); ok { + for _, ef := range ep.EventFeeds(ctx) { + feedData := ef.EventFeedMetadata(ctx) + if feedData == nil { + return fmt.Errorf("error: event feed metadata is nil") + } + if err := feedData.Validate(); err != nil { + return fmt.Errorf("error: event feed metadata for %s is invalid: %w", feedData.GetId(), err) + } + if _, ok := b.eventFeeds[feedData.GetId()]; ok { + return fmt.Errorf("error: duplicate event feed id found: %s", feedData.GetId()) + } + b.eventFeeds[feedData.GetId()] = ef + } + } + + if ep, ok := in.(EventLister); ok { + // Register the legacy Baton feed as a v2 event feed + // implementing both v1 and v2 event feeds is not supported. + if len(b.eventFeeds) != 0 { + return fmt.Errorf("error: using legacy event feed is not supported when using EventProviderV2") + } + b.eventFeeds[LegacyBatonFeedId] = newEventFeedV1to2(ep) + } + return nil +} diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/connectorbuilder/resource_manager.go b/vendor/github.com/conductorone/baton-sdk/pkg/connectorbuilder/resource_manager.go new file mode 100644 index 00000000..4c47e653 --- /dev/null +++ b/vendor/github.com/conductorone/baton-sdk/pkg/connectorbuilder/resource_manager.go @@ -0,0 +1,230 @@ +package connectorbuilder + +import ( + "context" + "fmt" + + v2 "github.com/conductorone/baton-sdk/pb/c1/connector/v2" + "github.com/conductorone/baton-sdk/pkg/annotations" + "github.com/conductorone/baton-sdk/pkg/types/tasks" + "github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/ctxzap" + "go.uber.org/zap" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// ResourceManager extends ResourceSyncer to add capabilities for creating resources. +// +// Implementing this interface indicates the connector supports creating and deleting resources +// of the associated resource type. A ResourceManager automatically provides ResourceDeleter +// functionality. +type ResourceManager interface { + ResourceSyncer + ResourceManagerLimited +} + +type ResourceManagerLimited interface { + ResourceCreator + ResourceDeleterLimited +} + +type ResourceManagerV2Limited interface { + ResourceCreator + ResourceDeleterV2Limited +} + +// ResourceManagerV2 extends ResourceSyncer to add capabilities for creating resources. +// +// This is the recommended interface for implementing resource creation operations in new connectors. +type ResourceManagerV2 interface { + ResourceSyncer + ResourceManagerV2Limited +} + +type ResourceCreator interface { + Create(ctx context.Context, resource *v2.Resource) (*v2.Resource, annotations.Annotations, error) +} + +// ResourceDeleter extends ResourceSyncer to add capabilities for deleting resources. +// +// Implementing this interface indicates the connector supports deleting resources +// of the associated resource type. +type ResourceDeleter interface { + ResourceSyncer + ResourceDeleterLimited +} +type ResourceDeleterLimited interface { + Delete(ctx context.Context, resourceId *v2.ResourceId) (annotations.Annotations, error) +} + +// ResourceDeleterV2 extends ResourceSyncer to add capabilities for deleting resources. +// +// This is the recommended interface for implementing resource deletion operations in new connectors. +// It differs from ResourceDeleter by having the resource, not just the id. +type ResourceDeleterV2 interface { + ResourceSyncer + ResourceDeleterV2Limited +} + +type ResourceDeleterV2Limited interface { + Delete(ctx context.Context, resourceId *v2.ResourceId, parentResourceID *v2.ResourceId) (annotations.Annotations, error) +} + +func (b *builder) CreateResource(ctx context.Context, request *v2.CreateResourceRequest) (*v2.CreateResourceResponse, error) { + ctx, span := tracer.Start(ctx, "builder.CreateResource") + defer span.End() + + start := b.nowFunc() + tt := tasks.CreateResourceType + l := ctxzap.Extract(ctx) + rt := request.GetResource().GetId().GetResourceType() + + manager, ok := b.resourceManagers[rt] + if !ok { + l.Error("error: resource type does not have resource Create() configured", zap.String("resource_type", rt)) + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) + return nil, status.Error(codes.Unimplemented, fmt.Sprintf("resource type %s does not have resource Create() configured", rt)) + } + resource, annos, err := manager.Create(ctx, request.GetResource()) + if err != nil { + l.Error("error: create resource failed", zap.Error(err)) + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) + return nil, fmt.Errorf("error: create resource failed: %w", err) + } + b.m.RecordTaskSuccess(ctx, tt, b.nowFunc().Sub(start)) + return v2.CreateResourceResponse_builder{Created: resource, Annotations: annos}.Build(), nil +} + +func (b *builder) DeleteResource(ctx context.Context, request *v2.DeleteResourceRequest) (*v2.DeleteResourceResponse, error) { + ctx, span := tracer.Start(ctx, "builder.DeleteResource") + defer span.End() + + start := b.nowFunc() + tt := tasks.DeleteResourceType + + l := ctxzap.Extract(ctx) + rt := request.GetResourceId().GetResourceType() + var rsDeleter ResourceDeleterV2Limited + var ok bool + + rsDeleter, ok = b.resourceManagers[rt] + if !ok { + rsDeleter, ok = b.resourceDeleters[rt] + } + + if !ok { + l.Error("error: resource type does not have resource Delete() configured", zap.String("resource_type", rt)) + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) + return nil, status.Error(codes.Unimplemented, fmt.Sprintf("resource type %s does not have resource Delete() configured", rt)) + } + + annos, err := rsDeleter.Delete(ctx, request.GetResourceId(), request.GetParentResourceId()) + if err != nil { + l.Error("error: deleteV2 resource failed", zap.Error(err)) + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) + return nil, fmt.Errorf("error: delete resource failed: %w", err) + } + b.m.RecordTaskSuccess(ctx, tt, b.nowFunc().Sub(start)) + return v2.DeleteResourceResponse_builder{Annotations: annos}.Build(), nil +} + +func (b *builder) DeleteResourceV2(ctx context.Context, request *v2.DeleteResourceV2Request) (*v2.DeleteResourceV2Response, error) { + ctx, span := tracer.Start(ctx, "builder.DeleteResourceV2") + defer span.End() + + start := b.nowFunc() + tt := tasks.DeleteResourceType + + l := ctxzap.Extract(ctx) + rt := request.GetResourceId().GetResourceType() + var rsDeleter ResourceDeleterV2Limited + var ok bool + + rsDeleter, ok = b.resourceManagers[rt] + if !ok { + rsDeleter, ok = b.resourceDeleters[rt] + } + + if !ok { + l.Error("error: resource type does not have resource Delete() configured", zap.String("resource_type", rt)) + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) + return nil, status.Error(codes.Unimplemented, fmt.Sprintf("resource type %s does not have resource Delete() configured", rt)) + } + + annos, err := rsDeleter.Delete(ctx, request.GetResourceId(), request.GetParentResourceId()) + if err != nil { + l.Error("error: deleteV2 resource failed", zap.Error(err)) + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) + return nil, fmt.Errorf("error: delete resource failed: %w", err) + } + b.m.RecordTaskSuccess(ctx, tt, b.nowFunc().Sub(start)) + return v2.DeleteResourceV2Response_builder{Annotations: annos}.Build(), nil +} + +func newResourceManager1to2(resourceManager ResourceManagerLimited) ResourceManagerV2Limited { + return &resourceManager1to2{resourceManager} +} + +type resourceManager1to2 struct { + ResourceManagerLimited +} + +func (r *resourceManager1to2) Delete(ctx context.Context, resourceId *v2.ResourceId, parentResourceID *v2.ResourceId) (annotations.Annotations, error) { + return r.ResourceManagerLimited.Delete(ctx, resourceId) +} + +func newDeleter1to2(resourceDeleter ResourceDeleterLimited) ResourceDeleterV2Limited { + return &deleter1to2{resourceDeleter} +} + +type deleter1to2 struct { + ResourceDeleterLimited +} + +func (d *deleter1to2) Delete(ctx context.Context, resourceId *v2.ResourceId, parentResourceID *v2.ResourceId) (annotations.Annotations, error) { + // Just drop the parentResourceID... + return d.ResourceDeleterLimited.Delete(ctx, resourceId) +} + +func (b *builder) addResourceManager(_ context.Context, typeId string, in interface{}) error { + if resourceManager, ok := in.(ResourceManagerLimited); ok { + if _, ok := b.resourceManagers[typeId]; ok { + return fmt.Errorf("error: duplicate resource type found for resource manager %s", typeId) + } + b.resourceManagers[typeId] = newResourceManager1to2(resourceManager) + // Support DeleteResourceV2 if connector implements both Create and Delete + if _, ok := b.resourceDeleters[typeId]; ok { + // This should never happen + return fmt.Errorf("error: duplicate resource type found for resource deleter %s", typeId) + } + b.resourceDeleters[typeId] = newDeleter1to2(resourceManager) + } else { + if resourceDeleter, ok := in.(ResourceDeleterLimited); ok { + if _, ok := b.resourceDeleters[typeId]; ok { + return fmt.Errorf("error: duplicate resource type found for resource deleter %s", typeId) + } + b.resourceDeleters[typeId] = newDeleter1to2(resourceDeleter) + } + } + + if resourceManager, ok := in.(ResourceManagerV2Limited); ok { + if _, ok := b.resourceManagers[typeId]; ok { + return fmt.Errorf("error: duplicate resource type found for resource managerV2 %s", typeId) + } + b.resourceManagers[typeId] = resourceManager + // Support DeleteResourceV2 if connector implements both Create and Delete + if _, ok := b.resourceDeleters[typeId]; ok { + // This should never happen + return fmt.Errorf("error: duplicate resource type found for resource deleterV2 %s", typeId) + } + b.resourceDeleters[typeId] = resourceManager + } else { + if resourceDeleter, ok := in.(ResourceDeleterV2Limited); ok { + if _, ok := b.resourceDeleters[typeId]; ok { + return fmt.Errorf("error: duplicate resource type found for resource deleterV2 %s", typeId) + } + b.resourceDeleters[typeId] = resourceDeleter + } + } + return nil +} diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/connectorbuilder/resource_provisioner.go b/vendor/github.com/conductorone/baton-sdk/pkg/connectorbuilder/resource_provisioner.go new file mode 100644 index 00000000..d0f8cf2e --- /dev/null +++ b/vendor/github.com/conductorone/baton-sdk/pkg/connectorbuilder/resource_provisioner.go @@ -0,0 +1,173 @@ +package connectorbuilder + +import ( + "context" + "fmt" + "time" + + v2 "github.com/conductorone/baton-sdk/pb/c1/connector/v2" + "github.com/conductorone/baton-sdk/pkg/annotations" + "github.com/conductorone/baton-sdk/pkg/retry" + "github.com/conductorone/baton-sdk/pkg/types/tasks" + "github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/ctxzap" + "go.uber.org/zap" +) + +// ResourceProvisioner extends ResourceSyncer to add capabilities for granting and revoking access. +// +// Note: ResourceProvisionerV2 is preferred for new connectors as it provides +// enhanced grant capabilities. +// +// Implementing this interface indicates the connector supports provisioning operations +// for the associated resource type. +type ResourceProvisioner interface { + ResourceSyncer + ResourceProvisionerLimited +} + +type ResourceProvisionerLimited interface { + RevokeProvisioner + GrantProvisioner +} + +type RevokeProvisioner interface { + Revoke(ctx context.Context, grant *v2.Grant) (annotations.Annotations, error) +} + +type GrantProvisioner interface { + Grant(ctx context.Context, resource *v2.Resource, entitlement *v2.Entitlement) (annotations.Annotations, error) +} + +// ResourceProvisionerV2 extends ResourceSyncer to add capabilities for granting and revoking access +// with enhanced functionality compared to ResourceProvisioner. +// +// This is the recommended interface for implementing provisioning operations in new connectors. +// It differs from ResourceProvisioner by returning a list of grants from the Grant method. +type ResourceProvisionerV2 interface { + ResourceSyncer + ResourceProvisionerV2Limited +} + +type ResourceProvisionerV2Limited interface { + RevokeProvisioner + GrantProvisionerV2 +} + +type GrantProvisionerV2 interface { + Grant(ctx context.Context, resource *v2.Resource, entitlement *v2.Entitlement) ([]*v2.Grant, annotations.Annotations, error) +} + +func (b *builder) Grant(ctx context.Context, request *v2.GrantManagerServiceGrantRequest) (*v2.GrantManagerServiceGrantResponse, error) { + ctx, span := tracer.Start(ctx, "builder.Grant") + defer span.End() + + start := b.nowFunc() + tt := tasks.GrantType + l := ctxzap.Extract(ctx) + + rt := request.GetEntitlement().GetResource().GetId().GetResourceType() + + provisioner, ok := b.resourceProvisioners[rt] + + if !ok { + l.Error("error: resource type does not have provisioner configured", zap.String("resource_type", rt)) + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) + return nil, fmt.Errorf("error: resource type does not have provisioner configured") + } + + retryer := retry.NewRetryer(ctx, retry.RetryConfig{ + MaxAttempts: 3, + InitialDelay: 15 * time.Second, + MaxDelay: 60 * time.Second, + }) + + for { + grants, annos, err := provisioner.Grant(ctx, request.GetPrincipal(), request.GetEntitlement()) + if err == nil { + b.m.RecordTaskSuccess(ctx, tt, b.nowFunc().Sub(start)) + return v2.GrantManagerServiceGrantResponse_builder{Annotations: annos, Grants: grants}.Build(), nil + } + if retryer.ShouldWaitAndRetry(ctx, err) { + continue + } + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) + return nil, fmt.Errorf("grant failed: %w", err) + } +} + +func (b *builder) Revoke(ctx context.Context, request *v2.GrantManagerServiceRevokeRequest) (*v2.GrantManagerServiceRevokeResponse, error) { + ctx, span := tracer.Start(ctx, "builder.Revoke") + defer span.End() + + start := b.nowFunc() + tt := tasks.RevokeType + + l := ctxzap.Extract(ctx) + + rt := request.GetGrant().GetEntitlement().GetResource().GetId().GetResourceType() + + var revokeProvisioner RevokeProvisioner + provisioner, ok := b.resourceProvisioners[rt] + if ok { + revokeProvisioner = provisioner + } + + if revokeProvisioner == nil { + l.Error("error: resource type does not have provisioner configured", zap.String("resource_type", rt)) + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) + return nil, fmt.Errorf("error: resource type does not have provisioner configured") + } + + retryer := retry.NewRetryer(ctx, retry.RetryConfig{ + MaxAttempts: 3, + InitialDelay: 15 * time.Second, + MaxDelay: 60 * time.Second, + }) + + for { + annos, err := revokeProvisioner.Revoke(ctx, request.GetGrant()) + if err == nil { + b.m.RecordTaskSuccess(ctx, tt, b.nowFunc().Sub(start)) + return v2.GrantManagerServiceRevokeResponse_builder{Annotations: annos}.Build(), nil + } + if retryer.ShouldWaitAndRetry(ctx, err) { + continue + } + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) + return nil, fmt.Errorf("revoke failed: %w", err) + } +} + +func newResourceProvisionerV1to2(p ResourceProvisionerLimited) ResourceProvisionerV2Limited { + return &resourceProvisionerV1to2{ + ResourceProvisionerLimited: p, + } +} + +type resourceProvisionerV1to2 struct { + ResourceProvisionerLimited +} + +func (r *resourceProvisionerV1to2) Grant(ctx context.Context, resource *v2.Resource, entitlement *v2.Entitlement) ([]*v2.Grant, annotations.Annotations, error) { + annos, err := r.ResourceProvisionerLimited.Grant(ctx, resource, entitlement) + if err != nil { + return nil, annos, err + } + return nil, annos, nil +} + +func (b *builder) addProvisioner(_ context.Context, typeId string, in interface{}) error { + if provisioner, ok := in.(ResourceProvisionerLimited); ok { + if _, ok := b.resourceProvisioners[typeId]; ok { + return fmt.Errorf("error: duplicate resource type found for resource provisioner %s", typeId) + } + b.resourceProvisioners[typeId] = newResourceProvisionerV1to2(provisioner) + } + if provisioner, ok := in.(ResourceProvisionerV2Limited); ok { + if _, ok := b.resourceProvisioners[typeId]; ok { + return fmt.Errorf("error: duplicate resource type found for resource provisioner v2 %s", typeId) + } + b.resourceProvisioners[typeId] = provisioner + } + return nil +} diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/connectorbuilder/resource_syncer.go b/vendor/github.com/conductorone/baton-sdk/pkg/connectorbuilder/resource_syncer.go new file mode 100644 index 00000000..4d52b830 --- /dev/null +++ b/vendor/github.com/conductorone/baton-sdk/pkg/connectorbuilder/resource_syncer.go @@ -0,0 +1,403 @@ +package connectorbuilder + +import ( + "context" + "fmt" + + v2 "github.com/conductorone/baton-sdk/pb/c1/connector/v2" + "github.com/conductorone/baton-sdk/pkg/annotations" + "github.com/conductorone/baton-sdk/pkg/pagination" + "github.com/conductorone/baton-sdk/pkg/types/resource" + "github.com/conductorone/baton-sdk/pkg/types/tasks" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// ResourceSyncer is the primary interface for connector developers to implement. +// +// It defines the core functionality for synchronizing resources, entitlements, and grants +// from external systems into Baton. Every connector must implement at least this interface +// for each resource type it supports. +// +// Extensions to this interface include: +// - ResourceProvisioner/ResourceProvisionerV2: For adding/removing access +// - ResourceManager: For creating and managing resources +// - ResourceDeleter: For deleting resources +// - AccountManager: For account provisioning operations +// - CredentialManager: For credential rotation operations. +// - ResourceTargetedSyncer: For directly getting a resource supporting targeted sync. + +type ResourceType interface { + ResourceType(ctx context.Context) *v2.ResourceType +} + +type ResourceSyncer interface { + ResourceType + List(ctx context.Context, parentResourceID *v2.ResourceId, pToken *pagination.Token) ([]*v2.Resource, string, annotations.Annotations, error) + Entitlements(ctx context.Context, resource *v2.Resource, pToken *pagination.Token) ([]*v2.Entitlement, string, annotations.Annotations, error) + Grants(ctx context.Context, resource *v2.Resource, pToken *pagination.Token) ([]*v2.Grant, string, annotations.Annotations, error) +} + +type ResourceSyncerLimited interface { + List(ctx context.Context, parentResourceID *v2.ResourceId, pToken *pagination.Token) ([]*v2.Resource, string, annotations.Annotations, error) + Entitlements(ctx context.Context, resource *v2.Resource, pToken *pagination.Token) ([]*v2.Entitlement, string, annotations.Annotations, error) + Grants(ctx context.Context, resource *v2.Resource, pToken *pagination.Token) ([]*v2.Grant, string, annotations.Annotations, error) +} + +type StaticEntitlementSyncer interface { + StaticEntitlements(ctx context.Context, pToken *pagination.Token) ([]*v2.Entitlement, string, annotations.Annotations, error) +} + +type ResourceSyncerV2 interface { + ResourceType + ResourceSyncerV2Limited +} + +type ResourceSyncerV2Limited interface { + List(ctx context.Context, parentResourceID *v2.ResourceId, opts resource.SyncOpAttrs) ([]*v2.Resource, *resource.SyncOpResults, error) + Entitlements(ctx context.Context, resource *v2.Resource, opts resource.SyncOpAttrs) ([]*v2.Entitlement, *resource.SyncOpResults, error) + Grants(ctx context.Context, resource *v2.Resource, opts resource.SyncOpAttrs) ([]*v2.Grant, *resource.SyncOpResults, error) +} + +type StaticEntitlementSyncerV2 interface { + StaticEntitlements(ctx context.Context, opts resource.SyncOpAttrs) ([]*v2.Entitlement, *resource.SyncOpResults, error) +} + +// ResourceTargetedSyncer extends ResourceSyncer to add capabilities for directly syncing an individual resource +// +// Implementing this interface indicates the connector supports calling "get" on a resource +// of the associated resource type. +type ResourceTargetedSyncer interface { + ResourceSyncer + ResourceTargetedSyncerLimited +} + +type ResourceTargetedSyncerLimited interface { + Get(ctx context.Context, resourceId *v2.ResourceId, parentResourceId *v2.ResourceId) (*v2.Resource, annotations.Annotations, error) +} + +// ListResourceTypes lists all available resource types. +func (b *builder) ListResourceTypes( + ctx context.Context, + request *v2.ResourceTypesServiceListResourceTypesRequest, +) (*v2.ResourceTypesServiceListResourceTypesResponse, error) { + ctx, span := tracer.Start(ctx, "builder.ListResourceTypes") + defer span.End() + + start := b.nowFunc() + tt := tasks.ListResourceTypesType + var out []*v2.ResourceType + + if len(b.resourceSyncers) == 0 { + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) + return nil, fmt.Errorf("error: no resource builders found") + } + + for _, rb := range b.resourceSyncers { + out = append(out, rb.ResourceType(ctx)) + } + + if len(out) == 0 { + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) + return nil, fmt.Errorf("error: no resource types found") + } + + b.m.RecordTaskSuccess(ctx, tt, b.nowFunc().Sub(start)) + return v2.ResourceTypesServiceListResourceTypesResponse_builder{List: out}.Build(), nil +} + +// ListResources returns all available resources for a given resource type ID. +func (b *builder) ListResources(ctx context.Context, request *v2.ResourcesServiceListResourcesRequest) (*v2.ResourcesServiceListResourcesResponse, error) { + ctx, span := tracer.Start(ctx, "builder.ListResources") + defer span.End() + + start := b.nowFunc() + tt := tasks.ListResourcesType + rb, ok := b.resourceSyncers[request.GetResourceTypeId()] + if !ok { + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) + return nil, fmt.Errorf("error: list resources with unknown resource type %s", request.GetResourceTypeId()) + } + + token := pagination.Token{ + Size: int(request.GetPageSize()), + Token: request.GetPageToken(), + } + opts := resource.SyncOpAttrs{SyncID: request.GetActiveSyncId(), PageToken: token, Session: WithSyncId(b.sessionStore, request.GetActiveSyncId())} + out, retOptions, err := rb.List(ctx, request.GetParentResourceId(), opts) + if retOptions == nil { + retOptions = &resource.SyncOpResults{} + } + + resp := v2.ResourcesServiceListResourcesResponse_builder{ + List: out, + NextPageToken: retOptions.NextPageToken, + Annotations: retOptions.Annotations, + }.Build() + if err != nil { + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) + return resp, fmt.Errorf("error: listing resources failed: %w", err) + } + if request.GetPageToken() != "" && request.GetPageToken() == retOptions.NextPageToken { + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) + errMsg := fmt.Sprintf(" with page token %s resource type id %s and resource parent id: %s this is most likely a connector bug", + request.GetPageToken(), request.GetResourceTypeId(), request.GetParentResourceId()) + return resp, fmt.Errorf("error: listing resources failed: next page token is the same as the current page token %s", errMsg) + } + + b.m.RecordTaskSuccess(ctx, tt, b.nowFunc().Sub(start)) + return resp, nil +} + +func (b *builder) GetResource(ctx context.Context, request *v2.ResourceGetterServiceGetResourceRequest) (*v2.ResourceGetterServiceGetResourceResponse, error) { + ctx, span := tracer.Start(ctx, "builder.GetResource") + defer span.End() + + start := b.nowFunc() + tt := tasks.GetResourceType + resourceType := request.GetResourceId().GetResourceType() + rb, ok := b.resourceTargetedSyncers[resourceType] + if !ok { + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) + return nil, status.Errorf(codes.Unimplemented, "error: get resource with unknown resource type %s", resourceType) + } + resource, annos, err := rb.Get(ctx, request.GetResourceId(), request.GetParentResourceId()) + if err != nil { + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) + return nil, fmt.Errorf("error: get resource failed: %w", err) + } + if resource == nil { + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) + return nil, status.Error(codes.NotFound, "error: get resource returned nil") + } + + b.m.RecordTaskSuccess(ctx, tt, b.nowFunc().Sub(start)) + return v2.ResourceGetterServiceGetResourceResponse_builder{ + Resource: resource, + Annotations: annos, + }.Build(), nil +} + +// ListStaticEntitlements returns all the static entitlements for a given resource type. +// Static entitlements are used to create entitlements for all resources of a given resource type. +func (b *builder) ListStaticEntitlements(ctx context.Context, request *v2.EntitlementsServiceListStaticEntitlementsRequest) (*v2.EntitlementsServiceListStaticEntitlementsResponse, error) { + ctx, span := tracer.Start(ctx, "builder.ListStaticEntitlements") + defer span.End() + + start := b.nowFunc() + tt := tasks.ListStaticEntitlementsType + rb, ok := b.resourceSyncers[request.GetResourceTypeId()] + if !ok { + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) + return nil, fmt.Errorf("error: list static entitlements with unknown resource type %s", request.GetResourceTypeId()) + } + rbse, ok := rb.(StaticEntitlementSyncerV2) + if !ok { + // Resource syncer doesn't support static entitlements. Return empty response. + return v2.EntitlementsServiceListStaticEntitlementsResponse_builder{ + List: []*v2.Entitlement{}, + NextPageToken: "", + Annotations: nil, + }.Build(), nil + } + + token := pagination.Token{ + Size: int(request.GetPageSize()), + Token: request.GetPageToken(), + } + opts := resource.SyncOpAttrs{SyncID: request.GetActiveSyncId(), PageToken: token, Session: WithSyncId(b.sessionStore, request.GetActiveSyncId())} + out, retOptions, err := rbse.StaticEntitlements(ctx, opts) + if retOptions == nil { + retOptions = &resource.SyncOpResults{} + } + + resp := v2.EntitlementsServiceListStaticEntitlementsResponse_builder{ + List: out, + NextPageToken: retOptions.NextPageToken, + Annotations: retOptions.Annotations, + }.Build() + if err != nil { + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) + return nil, fmt.Errorf("error: listing static entitlements failed: %w", err) + } + if request.GetPageToken() != "" && request.GetPageToken() == retOptions.NextPageToken { + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) + return resp, fmt.Errorf("error: listing static entitlements failed: next page token is the same as the current page token. this is most likely a connector bug") + } + + b.m.RecordTaskSuccess(ctx, tt, b.nowFunc().Sub(start)) + return resp, nil +} + +// ListEntitlements returns all the entitlements for a given resource. +func (b *builder) ListEntitlements(ctx context.Context, request *v2.EntitlementsServiceListEntitlementsRequest) (*v2.EntitlementsServiceListEntitlementsResponse, error) { + ctx, span := tracer.Start(ctx, "builder.ListEntitlements") + defer span.End() + + start := b.nowFunc() + tt := tasks.ListEntitlementsType + rb, ok := b.resourceSyncers[request.GetResource().GetId().GetResourceType()] + if !ok { + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) + return nil, fmt.Errorf("error: list entitlements with unknown resource type %s", request.GetResource().GetId().GetResourceType()) + } + token := pagination.Token{ + Size: int(request.GetPageSize()), + Token: request.GetPageToken(), + } + opts := resource.SyncOpAttrs{SyncID: request.GetActiveSyncId(), PageToken: token, Session: WithSyncId(b.sessionStore, request.GetActiveSyncId())} + out, retOptions, err := rb.Entitlements(ctx, request.GetResource(), opts) + if retOptions == nil { + retOptions = &resource.SyncOpResults{} + } + + resp := v2.EntitlementsServiceListEntitlementsResponse_builder{ + List: out, + NextPageToken: retOptions.NextPageToken, + Annotations: retOptions.Annotations, + }.Build() + if err != nil { + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) + return resp, fmt.Errorf("error: listing entitlements failed: %w", err) + } + if request.GetPageToken() != "" && request.GetPageToken() == retOptions.NextPageToken { + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) + return resp, fmt.Errorf("error: listing entitlements failed: next page token is the same as the current page token. this is most likely a connector bug") + } + + b.m.RecordTaskSuccess(ctx, tt, b.nowFunc().Sub(start)) + return resp, nil +} + +// ListGrants lists all the grants for a given resource. +func (b *builder) ListGrants(ctx context.Context, request *v2.GrantsServiceListGrantsRequest) (*v2.GrantsServiceListGrantsResponse, error) { + ctx, span := tracer.Start(ctx, "builder.ListGrants") + defer span.End() + + start := b.nowFunc() + tt := tasks.ListGrantsType + rid := request.GetResource().GetId() + rb, ok := b.resourceSyncers[rid.GetResourceType()] + if !ok { + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) + return nil, fmt.Errorf("error: list grants with unknown resource type %s", rid.GetResourceType()) + } + + token := pagination.Token{ + Size: int(request.GetPageSize()), + Token: request.GetPageToken(), + } + opts := resource.SyncOpAttrs{SyncID: request.GetActiveSyncId(), PageToken: token, Session: WithSyncId(b.sessionStore, request.GetActiveSyncId())} + out, retOptions, err := rb.Grants(ctx, request.GetResource(), opts) + if retOptions == nil { + retOptions = &resource.SyncOpResults{} + } + + resp := v2.GrantsServiceListGrantsResponse_builder{ + List: out, + Annotations: retOptions.Annotations, + NextPageToken: retOptions.NextPageToken, + }.Build() + + if err != nil { + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) + return resp, fmt.Errorf("error: listing grants for resource %s/%s failed: %w", rid.GetResourceType(), rid.GetResource(), err) + } + if request.GetPageToken() != "" && request.GetPageToken() == retOptions.NextPageToken { + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) + return resp, fmt.Errorf("error: listing grants for resource %s/%s failed: next page token is the same as the current page token. this is most likely a connector bug", + rid.GetResourceType(), + rid.GetResource()) + } + + b.m.RecordTaskSuccess(ctx, tt, b.nowFunc().Sub(start)) + return resp, nil +} + +func newResourceSyncerV1toV2(rb ResourceSyncer) ResourceSyncerV2 { + return &resourceSyncerV1toV2{rb: rb} +} + +type resourceSyncerV1toV2 struct { + rb ResourceSyncer +} + +var _ ResourceSyncerV2 = &resourceSyncerV1toV2{} +var _ StaticEntitlementSyncerV2 = &resourceSyncerV1toV2{} + +func (rw *resourceSyncerV1toV2) ResourceType(ctx context.Context) *v2.ResourceType { + return rw.rb.ResourceType(ctx) +} + +func (rw *resourceSyncerV1toV2) List(ctx context.Context, parentResourceID *v2.ResourceId, opts resource.SyncOpAttrs) ([]*v2.Resource, *resource.SyncOpResults, error) { + resources, pageToken, annos, err := rw.rb.List(ctx, parentResourceID, &opts.PageToken) + ret := &resource.SyncOpResults{NextPageToken: pageToken, Annotations: annos} + return resources, ret, err +} + +func (rw *resourceSyncerV1toV2) Entitlements(ctx context.Context, r *v2.Resource, opts resource.SyncOpAttrs) ([]*v2.Entitlement, *resource.SyncOpResults, error) { + ents, pageToken, annos, err := rw.rb.Entitlements(ctx, r, &opts.PageToken) + ret := &resource.SyncOpResults{NextPageToken: pageToken, Annotations: annos} + return ents, ret, err +} + +func (rw *resourceSyncerV1toV2) StaticEntitlements(ctx context.Context, opts resource.SyncOpAttrs) ([]*v2.Entitlement, *resource.SyncOpResults, error) { + rb, ok := rw.rb.(StaticEntitlementSyncer) + if !ok { + return nil, &resource.SyncOpResults{NextPageToken: "", Annotations: annotations.Annotations{}}, nil + } + + ents, pageToken, annos, err := rb.StaticEntitlements(ctx, &opts.PageToken) + ret := &resource.SyncOpResults{NextPageToken: pageToken, Annotations: annos} + return ents, ret, err +} + +func (rw *resourceSyncerV1toV2) Grants(ctx context.Context, r *v2.Resource, opts resource.SyncOpAttrs) ([]*v2.Grant, *resource.SyncOpResults, error) { + grants, pageToken, annos, err := rw.rb.Grants(ctx, r, &opts.PageToken) + ret := &resource.SyncOpResults{NextPageToken: pageToken, Annotations: annos} + return grants, ret, err +} + +func (b *builder) addTargetedSyncer(_ context.Context, typeId string, in any) error { + if targetedSyncer, ok := in.(ResourceTargetedSyncerLimited); ok { + if _, ok := b.resourceTargetedSyncers[typeId]; ok { + return fmt.Errorf("error: duplicate resource type found for resource targeted syncer %s", typeId) + } + b.resourceTargetedSyncers[typeId] = targetedSyncer + } + return nil +} + +func (b *builder) addResourceSyncers(ctx context.Context, typeId string, in any) error { + // no duplicates + if _, ok := b.resourceSyncers[typeId]; ok { + return fmt.Errorf("error: duplicate resource type found for resource builder %s", typeId) + } + + if rb, ok := in.(ResourceSyncer); ok { + b.resourceSyncers[typeId] = newResourceSyncerV1toV2(rb) + } + + if rb, ok := in.(ResourceSyncerV2); ok { + b.resourceSyncers[typeId] = rb + } + + // A resource syncer is required + if _, ok := b.resourceSyncers[typeId]; !ok { + return fmt.Errorf("error: the resource syncer interface must be implemented for all types (%s)", typeId) + } + + // Check for resource actions + if actionProvider, ok := in.(ResourceActionProvider); ok { + registry, err := b.actionManager.GetTypeRegistry(ctx, typeId) + if err != nil { + return fmt.Errorf("error getting resource type action registry for %s: %w", typeId, err) + } + err = actionProvider.ResourceActions(ctx, registry) + if err != nil { + return fmt.Errorf("error getting resource actions for %s: %w", typeId, err) + } + } + + return nil +} diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/connectorbuilder/session_store.go b/vendor/github.com/conductorone/baton-sdk/pkg/connectorbuilder/session_store.go new file mode 100644 index 00000000..0362404d --- /dev/null +++ b/vendor/github.com/conductorone/baton-sdk/pkg/connectorbuilder/session_store.go @@ -0,0 +1,58 @@ +package connectorbuilder + +import ( + "context" + + "github.com/conductorone/baton-sdk/pkg/types/sessions" +) + +var _ sessions.SessionStore = (*SessionStoreWithSyncID)(nil) + +// SessionStoreWithSyncID wraps a SessionStore to automatically inject sync ID into all operations. +type SessionStoreWithSyncID struct { + ss sessions.SessionStore + syncID string +} + +// WithSyncId creates a new SessionStore wrapper that prepends sync ID to all operations. +func WithSyncId(ss sessions.SessionStore, syncID string) sessions.SessionStore { + return &SessionStoreWithSyncID{ + ss: ss, + syncID: syncID, + } +} + +func (w *SessionStoreWithSyncID) Get(ctx context.Context, key string, opt ...sessions.SessionStoreOption) ([]byte, bool, error) { + opts := append([]sessions.SessionStoreOption{sessions.WithSyncID(w.syncID)}, opt...) + return w.ss.Get(ctx, key, opts...) +} + +func (w *SessionStoreWithSyncID) GetMany(ctx context.Context, keys []string, opt ...sessions.SessionStoreOption) (map[string][]byte, []string, error) { + opts := append([]sessions.SessionStoreOption{sessions.WithSyncID(w.syncID)}, opt...) + return w.ss.GetMany(ctx, keys, opts...) +} + +func (w *SessionStoreWithSyncID) Set(ctx context.Context, key string, value []byte, opt ...sessions.SessionStoreOption) error { + opts := append([]sessions.SessionStoreOption{sessions.WithSyncID(w.syncID)}, opt...) + return w.ss.Set(ctx, key, value, opts...) +} + +func (w *SessionStoreWithSyncID) SetMany(ctx context.Context, values map[string][]byte, opt ...sessions.SessionStoreOption) error { + opts := append([]sessions.SessionStoreOption{sessions.WithSyncID(w.syncID)}, opt...) + return w.ss.SetMany(ctx, values, opts...) +} + +func (w *SessionStoreWithSyncID) Delete(ctx context.Context, key string, opt ...sessions.SessionStoreOption) error { + opts := append([]sessions.SessionStoreOption{sessions.WithSyncID(w.syncID)}, opt...) + return w.ss.Delete(ctx, key, opts...) +} + +func (w *SessionStoreWithSyncID) Clear(ctx context.Context, opt ...sessions.SessionStoreOption) error { + opts := append([]sessions.SessionStoreOption{sessions.WithSyncID(w.syncID)}, opt...) + return w.ss.Clear(ctx, opts...) +} + +func (w *SessionStoreWithSyncID) GetAll(ctx context.Context, pageToken string, opt ...sessions.SessionStoreOption) (map[string][]byte, string, error) { + opts := append([]sessions.SessionStoreOption{sessions.WithSyncID(w.syncID)}, opt...) + return w.ss.GetAll(ctx, pageToken, opts...) +} diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/connectorbuilder/tickets.go b/vendor/github.com/conductorone/baton-sdk/pkg/connectorbuilder/tickets.go new file mode 100644 index 00000000..740cc3c5 --- /dev/null +++ b/vendor/github.com/conductorone/baton-sdk/pkg/connectorbuilder/tickets.go @@ -0,0 +1,242 @@ +package connectorbuilder + +import ( + "context" + "fmt" + "time" + + v2 "github.com/conductorone/baton-sdk/pb/c1/connector/v2" + "github.com/conductorone/baton-sdk/pkg/annotations" + "github.com/conductorone/baton-sdk/pkg/pagination" + "github.com/conductorone/baton-sdk/pkg/retry" + "github.com/conductorone/baton-sdk/pkg/types/tasks" +) + +// TicketManager extends ConnectorBuilder to add capabilities for ticket management. +// +// Implementing this interface indicates the connector can integrate with an external +// ticketing system, allowing Baton to create and track tickets in that system. +type TicketManager interface { + ConnectorBuilder + TicketManagerLimited +} + +type TicketManagerLimited interface { + GetTicket(ctx context.Context, ticketId string) (*v2.Ticket, annotations.Annotations, error) + CreateTicket(ctx context.Context, ticket *v2.Ticket, schema *v2.TicketSchema) (*v2.Ticket, annotations.Annotations, error) + GetTicketSchema(ctx context.Context, schemaID string) (*v2.TicketSchema, annotations.Annotations, error) + ListTicketSchemas(ctx context.Context, pToken *pagination.Token) ([]*v2.TicketSchema, string, annotations.Annotations, error) + BulkCreateTickets(context.Context, *v2.TicketsServiceBulkCreateTicketsRequest) (*v2.TicketsServiceBulkCreateTicketsResponse, error) + BulkGetTickets(context.Context, *v2.TicketsServiceBulkGetTicketsRequest) (*v2.TicketsServiceBulkGetTicketsResponse, error) +} + +func (b *builder) BulkCreateTickets(ctx context.Context, request *v2.TicketsServiceBulkCreateTicketsRequest) (*v2.TicketsServiceBulkCreateTicketsResponse, error) { + ctx, span := tracer.Start(ctx, "builder.BulkCreateTickets") + defer span.End() + + start := b.nowFunc() + tt := tasks.BulkCreateTicketsType + if b.ticketManager == nil { + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) + return nil, fmt.Errorf("error: ticket manager not implemented") + } + + reqBody := request.GetTicketRequests() + if len(reqBody) == 0 { + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) + return nil, fmt.Errorf("error: request body had no items") + } + + ticketsResponse, err := b.ticketManager.BulkCreateTickets(ctx, request) + if err != nil { + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) + return nil, fmt.Errorf("error: creating tickets failed: %w", err) + } + + b.m.RecordTaskSuccess(ctx, tt, b.nowFunc().Sub(start)) + return v2.TicketsServiceBulkCreateTicketsResponse_builder{ + Tickets: ticketsResponse.GetTickets(), + }.Build(), nil +} + +func (b *builder) BulkGetTickets(ctx context.Context, request *v2.TicketsServiceBulkGetTicketsRequest) (*v2.TicketsServiceBulkGetTicketsResponse, error) { + ctx, span := tracer.Start(ctx, "builder.BulkGetTickets") + defer span.End() + + start := b.nowFunc() + tt := tasks.BulkGetTicketsType + if b.ticketManager == nil { + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) + return nil, fmt.Errorf("error: ticket manager not implemented") + } + + reqBody := request.GetTicketRequests() + if len(reqBody) == 0 { + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) + return nil, fmt.Errorf("error: request body had no items") + } + + ticketsResponse, err := b.ticketManager.BulkGetTickets(ctx, request) + if err != nil { + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) + return nil, fmt.Errorf("error: fetching tickets failed: %w", err) + } + + b.m.RecordTaskSuccess(ctx, tt, b.nowFunc().Sub(start)) + return v2.TicketsServiceBulkGetTicketsResponse_builder{ + Tickets: ticketsResponse.GetTickets(), + }.Build(), nil +} + +func (b *builder) ListTicketSchemas(ctx context.Context, request *v2.TicketsServiceListTicketSchemasRequest) (*v2.TicketsServiceListTicketSchemasResponse, error) { + ctx, span := tracer.Start(ctx, "builder.ListTicketSchemas") + defer span.End() + + start := b.nowFunc() + tt := tasks.ListTicketSchemasType + if b.ticketManager == nil { + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) + return nil, fmt.Errorf("error: ticket manager not implemented") + } + + retryer := retry.NewRetryer(ctx, retry.RetryConfig{ + MaxAttempts: 10, + InitialDelay: 15 * time.Second, + MaxDelay: 0, + }) + + for { + out, nextPageToken, annos, err := b.ticketManager.ListTicketSchemas(ctx, &pagination.Token{ + Size: int(request.GetPageSize()), + Token: request.GetPageToken(), + }) + if err == nil { + if request.GetPageToken() != "" && request.GetPageToken() == nextPageToken { + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) + return nil, fmt.Errorf("error: listing ticket schemas failed: next page token is the same as the current page token. this is most likely a connector bug") + } + + b.m.RecordTaskSuccess(ctx, tt, b.nowFunc().Sub(start)) + return v2.TicketsServiceListTicketSchemasResponse_builder{ + List: out, + NextPageToken: nextPageToken, + Annotations: annos, + }.Build(), nil + } + if retryer.ShouldWaitAndRetry(ctx, err) { + continue + } + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) + return nil, fmt.Errorf("error: listing ticket schemas failed: %w", err) + } +} + +func (b *builder) CreateTicket(ctx context.Context, request *v2.TicketsServiceCreateTicketRequest) (*v2.TicketsServiceCreateTicketResponse, error) { + ctx, span := tracer.Start(ctx, "builder.CreateTicket") + defer span.End() + + start := b.nowFunc() + tt := tasks.CreateTicketType + if b.ticketManager == nil { + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) + return nil, fmt.Errorf("error: ticket manager not implemented") + } + + reqBody := request.GetRequest() + if reqBody == nil { + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) + return nil, fmt.Errorf("error: request body is nil") + } + cTicket := v2.Ticket_builder{ + DisplayName: reqBody.GetDisplayName(), + Description: reqBody.GetDescription(), + Status: reqBody.GetStatus(), + Labels: reqBody.GetLabels(), + CustomFields: reqBody.GetCustomFields(), + RequestedFor: reqBody.GetRequestedFor(), + }.Build() + + ticket, annos, err := b.ticketManager.CreateTicket(ctx, cTicket, request.GetSchema()) + var resp *v2.TicketsServiceCreateTicketResponse + if err != nil { + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) + if ticket != nil { + resp = v2.TicketsServiceCreateTicketResponse_builder{ + Ticket: ticket, + Annotations: annos, + }.Build() + } + return resp, fmt.Errorf("error: creating ticket failed: %w", err) + } + + b.m.RecordTaskSuccess(ctx, tt, b.nowFunc().Sub(start)) + return v2.TicketsServiceCreateTicketResponse_builder{ + Ticket: ticket, + Annotations: annos, + }.Build(), nil +} + +func (b *builder) GetTicket(ctx context.Context, request *v2.TicketsServiceGetTicketRequest) (*v2.TicketsServiceGetTicketResponse, error) { + ctx, span := tracer.Start(ctx, "builder.GetTicket") + defer span.End() + + start := b.nowFunc() + tt := tasks.GetTicketType + if b.ticketManager == nil { + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) + return nil, fmt.Errorf("error: ticket manager not implemented") + } + + var resp *v2.TicketsServiceGetTicketResponse + ticket, annos, err := b.ticketManager.GetTicket(ctx, request.GetId()) + if err != nil { + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) + if ticket != nil { + resp = v2.TicketsServiceGetTicketResponse_builder{ + Ticket: ticket, + Annotations: annos, + }.Build() + } + return resp, fmt.Errorf("error: getting ticket failed: %w", err) + } + + b.m.RecordTaskSuccess(ctx, tt, b.nowFunc().Sub(start)) + return v2.TicketsServiceGetTicketResponse_builder{ + Ticket: ticket, + Annotations: annos, + }.Build(), nil +} + +func (b *builder) GetTicketSchema(ctx context.Context, request *v2.TicketsServiceGetTicketSchemaRequest) (*v2.TicketsServiceGetTicketSchemaResponse, error) { + ctx, span := tracer.Start(ctx, "builder.GetTicketSchema") + defer span.End() + + start := b.nowFunc() + tt := tasks.GetTicketSchemaType + if b.ticketManager == nil { + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) + return nil, fmt.Errorf("error: ticket manager not implemented") + } + + ticketSchema, annos, err := b.ticketManager.GetTicketSchema(ctx, request.GetId()) + if err != nil { + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) + return nil, fmt.Errorf("error: getting ticket metadata failed: %w", err) + } + + b.m.RecordTaskSuccess(ctx, tt, b.nowFunc().Sub(start)) + return v2.TicketsServiceGetTicketSchemaResponse_builder{ + Schema: ticketSchema, + Annotations: annos, + }.Build(), nil +} + +func (b *builder) addTicketManager(_ context.Context, in interface{}) error { + if ticketManager, ok := in.(TicketManagerLimited); ok { + if b.ticketManager != nil { + return fmt.Errorf("error: cannot set multiple ticket managers") + } + b.ticketManager = ticketManager + } + return nil +} diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/connectorrunner/runner.go b/vendor/github.com/conductorone/baton-sdk/pkg/connectorrunner/runner.go index a0b76df3..7974ef45 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/connectorrunner/runner.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/connectorrunner/runner.go @@ -10,6 +10,7 @@ import ( "strings" "time" + "github.com/conductorone/baton-sdk/pkg/bid" "github.com/conductorone/baton-sdk/pkg/synccompactor" "golang.org/x/sync/semaphore" "google.golang.org/protobuf/types/known/structpb" @@ -19,6 +20,7 @@ import ( "go.uber.org/zap/zapcore" "google.golang.org/protobuf/types/known/durationpb" + v2 "github.com/conductorone/baton-sdk/pb/c1/connector/v2" v1 "github.com/conductorone/baton-sdk/pb/c1/connectorapi/baton/v1" ratelimitV1 "github.com/conductorone/baton-sdk/pb/c1/ratelimit/v1" "github.com/conductorone/baton-sdk/pkg/tasks" @@ -133,7 +135,7 @@ func (c *connectorRunner) processTask(ctx context.Context, task *v1.Task) error return nil } -func (c *connectorRunner) backoff(ctx context.Context, errCount int) time.Duration { +func (c *connectorRunner) backoff(_ context.Context, errCount int) time.Duration { waitDuration := time.Duration(errCount*errCount) * time.Second if waitDuration > time.Minute { waitDuration = time.Minute @@ -189,7 +191,7 @@ func (c *connectorRunner) run(ctx context.Context) error { continue } - l.Debug("runner: got task", zap.String("task_id", nextTask.Id), zap.String("task_type", tasks.GetType(nextTask).String())) + l.Debug("runner: got task", zap.String("task_id", nextTask.GetId()), zap.String("task_type", tasks.GetType(nextTask).String())) // If we're in one-shot mode, process the task synchronously. if c.oneShot { @@ -200,7 +202,7 @@ func (c *connectorRunner) run(ctx context.Context) error { l.Error( "runner: error processing on-demand task", zap.Error(err), - zap.String("task_id", nextTask.Id), + zap.String("task_id", nextTask.GetId()), zap.String("task_type", tasks.GetType(nextTask).String()), ) return err @@ -210,17 +212,17 @@ func (c *connectorRunner) run(ctx context.Context) error { // We got a task, so process it concurrently. go func(t *v1.Task) { - l.Debug("runner: starting processing task", zap.String("task_id", t.Id), zap.String("task_type", tasks.GetType(t).String())) + l.Debug("runner: starting processing task", zap.String("task_id", t.GetId()), zap.String("task_type", tasks.GetType(t).String())) defer sem.Release(1) err := c.processTask(ctx, t) if err != nil { if strings.Contains(err.Error(), "grpc: the client connection is closing") { stopForLoop = true } - l.Error("runner: error processing task", zap.Error(err), zap.String("task_id", t.Id), zap.String("task_type", tasks.GetType(t).String())) + l.Error("runner: error processing task", zap.Error(err), zap.String("task_id", t.GetId()), zap.String("task_type", tasks.GetType(t).String())) return } - l.Debug("runner: task processed", zap.String("task_id", t.Id), zap.String("task_type", tasks.GetType(t).String())) + l.Debug("runner: task processed", zap.String("task_id", t.GetId()), zap.String("task_type", tasks.GetType(t).String())) }(nextTask) l.Debug("runner: dispatched task, waiting for next task", zap.Duration("wait_duration", waitDuration)) @@ -284,8 +286,13 @@ type createAccountConfig struct { } type invokeActionConfig struct { - action string - args *structpb.Struct + action string + resourceTypeID string // Optional: if set, invokes a resource-scoped action + args *structpb.Struct +} + +type listActionSchemasConfig struct { + resourceTypeID string // Optional: filter by resource type } type deleteResourceConfig struct { @@ -301,6 +308,7 @@ type rotateCredentialsConfig struct { type eventStreamConfig struct { feedId string startAt time.Time + cursor string } type syncDifferConfig struct { @@ -331,6 +339,7 @@ type runnerConfig struct { tempDir string createAccountConfig *createAccountConfig invokeActionConfig *invokeActionConfig + listActionSchemasConfig *listActionSchemasConfig deleteResourceConfig *deleteResourceConfig rotateCredentialsConfig *rotateCredentialsConfig createTicketConfig *createTicketConfig @@ -344,6 +353,16 @@ type runnerConfig struct { externalResourceC1Z string externalResourceEntitlementIdFilter string skipEntitlementsAndGrants bool + skipGrants bool + sessionStoreEnabled bool + syncResourceTypeIDs []string +} + +func WithSessionStoreEnabled() Option { + return func(ctx context.Context, w *runnerConfig) error { + w.sessionStoreEnabled = true + return nil + } } // WithRateLimiterConfig sets the RateLimiterConfig for a runner. @@ -361,14 +380,12 @@ func WithRateLimiterConfig(cfg *ratelimitV1.RateLimiterConfig) Option { // The `opts` map is injected into the environment in order for the service to be configured. func WithExternalLimiter(address string, opts map[string]string) Option { return func(ctx context.Context, w *runnerConfig) error { - w.rlCfg = &ratelimitV1.RateLimiterConfig{ - Type: &ratelimitV1.RateLimiterConfig_External{ - External: &ratelimitV1.ExternalLimiter{ - Address: address, - Options: opts, - }, - }, - } + w.rlCfg = ratelimitV1.RateLimiterConfig_builder{ + External: ratelimitV1.ExternalLimiter_builder{ + Address: address, + Options: opts, + }.Build(), + }.Build() return nil } @@ -379,13 +396,14 @@ func WithExternalLimiter(address string, opts map[string]string) Option { // `usePercent` is value between 0 and 100. func WithSlidingMemoryLimiter(usePercent int64) Option { return func(ctx context.Context, w *runnerConfig) error { - w.rlCfg = &ratelimitV1.RateLimiterConfig{ - Type: &ratelimitV1.RateLimiterConfig_SlidingMem{ - SlidingMem: &ratelimitV1.SlidingMemoryLimiter{ - UsePercent: float64(usePercent / 100), - }, - }, + if usePercent < 0 || usePercent > 100 { + return fmt.Errorf("usePercent must be between 0 and 100") } + w.rlCfg = ratelimitV1.RateLimiterConfig_builder{ + SlidingMem: ratelimitV1.SlidingMemoryLimiter_builder{ + UsePercent: float64(usePercent) / 100.0, + }.Build(), + }.Build() return nil } @@ -396,14 +414,12 @@ func WithSlidingMemoryLimiter(usePercent int64) Option { // `period` represents the elapsed time between two instants as an int64 nanosecond count. func WithFixedMemoryLimiter(rate int64, period time.Duration) Option { return func(ctx context.Context, w *runnerConfig) error { - w.rlCfg = &ratelimitV1.RateLimiterConfig{ - Type: &ratelimitV1.RateLimiterConfig_FixedMem{ - FixedMem: &ratelimitV1.FixedMemoryLimiter{ - Rate: rate, - Period: durationpb.New(period), - }, - }, - } + w.rlCfg = ratelimitV1.RateLimiterConfig_builder{ + FixedMem: ratelimitV1.FixedMemoryLimiter_builder{ + Rate: rate, + Period: durationpb.New(period), + }.Build(), + }.Build() return nil } @@ -467,13 +483,29 @@ func WithOnDemandCreateAccount(c1zPath string, login string, email string, profi } } -func WithOnDemandInvokeAction(c1zPath string, action string, args *structpb.Struct) Option { +// WithOnDemandInvokeAction creates an option for invoking an action. +// If resourceTypeID is provided, it invokes a resource-scoped action. +func WithOnDemandInvokeAction(c1zPath string, action string, resourceTypeID string, args *structpb.Struct) Option { return func(ctx context.Context, cfg *runnerConfig) error { cfg.onDemand = true cfg.c1zPath = c1zPath cfg.invokeActionConfig = &invokeActionConfig{ - action: action, - args: args, + action: action, + resourceTypeID: resourceTypeID, + args: args, + } + return nil + } +} + +// WithOnDemandListActionSchemas creates an option for listing action schemas. +// If resourceTypeID is provided, it filters schemas for that specific resource type. +func WithOnDemandListActionSchemas(c1zPath string, resourceTypeID string) Option { + return func(ctx context.Context, cfg *runnerConfig) error { + cfg.onDemand = true + cfg.c1zPath = c1zPath + cfg.listActionSchemasConfig = &listActionSchemasConfig{ + resourceTypeID: resourceTypeID, } return nil } @@ -511,12 +543,13 @@ func WithOnDemandSync(c1zPath string) Option { } } -func WithOnDemandEventStream(feedId string, startAt time.Time) Option { +func WithOnDemandEventStream(feedId string, startAt time.Time, cursor string) Option { return func(ctx context.Context, cfg *runnerConfig) error { cfg.onDemand = true cfg.eventFeedConfig = &eventStreamConfig{ feedId: feedId, startAt: startAt, + cursor: cursor, } return nil } @@ -543,13 +576,20 @@ func WithFullSyncDisabled() Option { } } -func WithTargetedSyncResourceIDs(resourceIDs []string) Option { +func WithTargetedSyncResources(resourceIDs []string) Option { return func(ctx context.Context, cfg *runnerConfig) error { cfg.targetedSyncResourceIDs = resourceIDs return nil } } +func WithSyncResourceTypeIDs(resourceTypeIDs []string) Option { + return func(ctx context.Context, cfg *runnerConfig) error { + cfg.syncResourceTypeIDs = resourceTypeIDs + return nil + } +} + func WithTicketingEnabled() Option { return func(ctx context.Context, cfg *runnerConfig) error { cfg.ticketingEnabled = true @@ -649,6 +689,29 @@ func WithSkipEntitlementsAndGrants(skip bool) Option { } } +func WithSkipGrants(skip bool) Option { + return func(ctx context.Context, cfg *runnerConfig) error { + if skip && len(cfg.targetedSyncResourceIDs) == 0 { + return fmt.Errorf("skip-grants can only be set within a targeted sync") + } + cfg.skipGrants = skip + return nil + } +} + +func IsSessionStoreEnabled(ctx context.Context, options ...Option) (bool, error) { + cfg := &runnerConfig{} + + for _, o := range options { + err := o(ctx, cfg) + if err != nil { + return false, err + } + } + + return cfg.sessionStoreEnabled, nil +} + // NewConnectorRunner creates a new connector runner. func NewConnectorRunner(ctx context.Context, c types.ConnectorServer, opts ...Option) (*connectorRunner, error) { runner := &connectorRunner{} @@ -681,7 +744,15 @@ func NewConnectorRunner(ctx context.Context, c types.ConnectorServer, opts ...Op } if len(cfg.targetedSyncResourceIDs) > 0 { - wrapperOpts = append(wrapperOpts, connector.WithTargetedSyncResourceIDs(cfg.targetedSyncResourceIDs)) + wrapperOpts = append(wrapperOpts, connector.WithTargetedSyncResources(cfg.targetedSyncResourceIDs)) + } + + if cfg.sessionStoreEnabled { + wrapperOpts = append(wrapperOpts, connector.WithSessionStoreEnabled()) + } + + if len(cfg.syncResourceTypeIDs) > 0 { + wrapperOpts = append(wrapperOpts, connector.WithSyncResourceTypeIDs(cfg.syncResourceTypeIDs)) } cw, err := connector.NewWrapper(ctx, c, wrapperOpts...) @@ -689,10 +760,25 @@ func NewConnectorRunner(ctx context.Context, c types.ConnectorServer, opts ...Op return nil, err } + resources := make([]*v2.Resource, 0, len(cfg.targetedSyncResourceIDs)) + for _, resourceId := range cfg.targetedSyncResourceIDs { + r, err := bid.ParseResourceBid(resourceId) + if err != nil { + return nil, err + } + resources = append(resources, r) + } + runner.cw = cw if cfg.onDemand { - if cfg.c1zPath == "" && cfg.eventFeedConfig == nil && cfg.createTicketConfig == nil && cfg.listTicketSchemasConfig == nil && cfg.getTicketConfig == nil && cfg.bulkCreateTicketConfig == nil { + if cfg.c1zPath == "" && + cfg.eventFeedConfig == nil && + cfg.createTicketConfig == nil && + cfg.listTicketSchemasConfig == nil && + cfg.getTicketConfig == nil && + cfg.bulkCreateTicketConfig == nil && + cfg.listActionSchemasConfig == nil { return nil, errors.New("c1zPath must be set when in on-demand mode") } @@ -714,7 +800,10 @@ func NewConnectorRunner(ctx context.Context, c types.ConnectorServer, opts ...Op tm = local.NewCreateAccountManager(ctx, cfg.c1zPath, cfg.createAccountConfig.login, cfg.createAccountConfig.email, cfg.createAccountConfig.profile) case cfg.invokeActionConfig != nil: - tm = local.NewActionInvoker(ctx, cfg.c1zPath, cfg.invokeActionConfig.action, cfg.invokeActionConfig.args) + tm = local.NewActionInvoker(ctx, cfg.c1zPath, cfg.invokeActionConfig.action, cfg.invokeActionConfig.resourceTypeID, cfg.invokeActionConfig.args) + + case cfg.listActionSchemasConfig != nil: + tm = local.NewListActionSchemas(ctx, cfg.listActionSchemasConfig.resourceTypeID) case cfg.deleteResourceConfig != nil: tm = local.NewResourceDeleter(ctx, cfg.c1zPath, cfg.deleteResourceConfig.resourceId, cfg.deleteResourceConfig.resourceType) @@ -723,7 +812,7 @@ func NewConnectorRunner(ctx context.Context, c types.ConnectorServer, opts ...Op tm = local.NewCredentialRotator(ctx, cfg.c1zPath, cfg.rotateCredentialsConfig.resourceId, cfg.rotateCredentialsConfig.resourceType) case cfg.eventFeedConfig != nil: - tm = local.NewEventFeed(ctx, cfg.eventFeedConfig.feedId, cfg.eventFeedConfig.startAt) + tm = local.NewEventFeed(ctx, cfg.eventFeedConfig.feedId, cfg.eventFeedConfig.startAt, cfg.eventFeedConfig.cursor) case cfg.createTicketConfig != nil: tm = local.NewTicket(ctx, cfg.createTicketConfig.templatePath) case cfg.listTicketSchemasConfig != nil: @@ -752,8 +841,10 @@ func NewConnectorRunner(ctx context.Context, c types.ConnectorServer, opts ...Op local.WithTmpDir(cfg.tempDir), local.WithExternalResourceC1Z(cfg.externalResourceC1Z), local.WithExternalResourceEntitlementIdFilter(cfg.externalResourceEntitlementIdFilter), - local.WithTargetedSyncResourceIDs(cfg.targetedSyncResourceIDs), + local.WithTargetedSyncResources(resources), local.WithSkipEntitlementsAndGrants(cfg.skipEntitlementsAndGrants), + local.WithSkipGrants(cfg.skipGrants), + local.WithSyncResourceTypeIDs(cfg.syncResourceTypeIDs), ) if err != nil { return nil, err @@ -766,7 +857,16 @@ func NewConnectorRunner(ctx context.Context, c types.ConnectorServer, opts ...Op return runner, nil } - tm, err := c1api.NewC1TaskManager(ctx, cfg.clientID, cfg.clientSecret, cfg.tempDir, cfg.skipFullSync, cfg.externalResourceC1Z, cfg.externalResourceEntitlementIdFilter, cfg.targetedSyncResourceIDs) + tm, err := c1api.NewC1TaskManager(ctx, + cfg.clientID, + cfg.clientSecret, + cfg.tempDir, + cfg.skipFullSync, + cfg.externalResourceC1Z, + cfg.externalResourceEntitlementIdFilter, + resources, + cfg.syncResourceTypeIDs, + ) if err != nil { return nil, err } diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/crypto/client_secret.go b/vendor/github.com/conductorone/baton-sdk/pkg/crypto/client_secret.go index 6126db43..f5fd9e9d 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/crypto/client_secret.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/crypto/client_secret.go @@ -1,4 +1,4 @@ -package crypto +package crypto //nolint:revive,nolintlint // we can't change the package name for backwards compatibility import ( "bytes" diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/crypto/crypto.go b/vendor/github.com/conductorone/baton-sdk/pkg/crypto/crypto.go index 2f7a6522..7a287bce 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/crypto/crypto.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/crypto/crypto.go @@ -1,4 +1,4 @@ -package crypto +package crypto //nolint:revive,nolintlint // we can't change the package name for backwards compatibility import ( "context" @@ -81,7 +81,7 @@ func decryptPassword(ctx context.Context, encryptedPassword *v2.EncryptedData, d return "", status.Errorf(codes.Internal, "error decrypting password: %v", err) } - return string(plaintext.Bytes), nil + return string(plaintext.GetBytes()), nil } func ConvertCredentialOptions(ctx context.Context, clientSecret *jose.JSONWebKey, opts *v2.CredentialOptions, encryptionConfigs []*v2.EncryptionConfig) (*v2.LocalCredentialOptions, error) { @@ -90,29 +90,23 @@ func ConvertCredentialOptions(ctx context.Context, clientSecret *jose.JSONWebKey return nil, nil } - localOpts := &v2.LocalCredentialOptions{ - ForceChangeAtNextLogin: opts.ForceChangeAtNextLogin, - } - - switch opts.Options.(type) { - case *v2.CredentialOptions_RandomPassword_: - localOpts.Options = &v2.LocalCredentialOptions_RandomPassword_{ - RandomPassword: &v2.LocalCredentialOptions_RandomPassword{ - Length: opts.GetRandomPassword().GetLength(), - Constraints: opts.GetRandomPassword().GetConstraints(), - }, - } - case *v2.CredentialOptions_NoPassword_: - localOpts.Options = &v2.LocalCredentialOptions_NoPassword_{ - NoPassword: &v2.LocalCredentialOptions_NoPassword{}, - } - case *v2.CredentialOptions_Sso: - localOpts.Options = &v2.LocalCredentialOptions_Sso{ - Sso: &v2.LocalCredentialOptions_SSO{ - SsoProvider: opts.GetSso().GetSsoProvider(), - }, - } - case *v2.CredentialOptions_EncryptedPassword_: + localOpts := v2.LocalCredentialOptions_builder{ + ForceChangeAtNextLogin: opts.GetForceChangeAtNextLogin(), + }.Build() + + switch opts.WhichOptions() { + case v2.CredentialOptions_RandomPassword_case: + localOpts.SetRandomPassword(v2.LocalCredentialOptions_RandomPassword_builder{ + Length: opts.GetRandomPassword().GetLength(), + Constraints: opts.GetRandomPassword().GetConstraints(), + }.Build()) + case v2.CredentialOptions_NoPassword_case: + localOpts.SetNoPassword(&v2.LocalCredentialOptions_NoPassword{}) + case v2.CredentialOptions_Sso_case: + localOpts.SetSso(v2.LocalCredentialOptions_SSO_builder{ + SsoProvider: opts.GetSso().GetSsoProvider(), + }.Build()) + case v2.CredentialOptions_EncryptedPassword_case: default: return nil, status.Error(codes.InvalidArgument, "invalid credential options") } @@ -153,19 +147,17 @@ func ConvertCredentialOptions(ctx context.Context, clientSecret *jose.JSONWebKey if err != nil { return nil, fmt.Errorf("convert-credential-options: error decrypting password: %w", err) } - localOpts.Options = &v2.LocalCredentialOptions_PlaintextPassword_{ - PlaintextPassword: &v2.LocalCredentialOptions_PlaintextPassword{ - PlaintextPassword: password, - }, - } + localOpts.SetPlaintextPassword(v2.LocalCredentialOptions_PlaintextPassword_builder{ + PlaintextPassword: password, + }.Build()) break } - if localOpts.Options != nil { + if localOpts.HasOptions() { break } } - if localOpts.Options == nil { + if !localOpts.HasOptions() { return nil, status.Errorf(codes.InvalidArgument, "no encrypted password matched client secret key id %q", clientSecret.KeyID) } diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/crypto/password.go b/vendor/github.com/conductorone/baton-sdk/pkg/crypto/password.go index 512f91ef..cfd16ac0 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/crypto/password.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/crypto/password.go @@ -1,4 +1,4 @@ -package crypto +package crypto //nolint:revive,nolintlint // we can't change the package name for backwards compatibility import ( "context" @@ -67,12 +67,12 @@ func GenerateRandomPassword(randomPassword *v2.LocalCredentialOptions_RandomPass } var password strings.Builder - constraints := randomPassword.Constraints + constraints := randomPassword.GetConstraints() if len(constraints) > 0 { // apply constraints for _, constraint := range constraints { - for i := int64(0); i < int64(constraint.MinCount); i++ { - err := addCharacterToPassword(&password, constraint.CharSet) + for i := int64(0); i < int64(constraint.GetMinCount()); i++ { + err := addCharacterToPassword(&password, constraint.GetCharSet()) if err != nil { return "", err } diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/crypto/providers/jwk/jwk.go b/vendor/github.com/conductorone/baton-sdk/pkg/crypto/providers/jwk/jwk.go index ffc82a93..c39a73af 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/crypto/providers/jwk/jwk.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/crypto/providers/jwk/jwk.go @@ -61,16 +61,14 @@ func (j *JWKEncryptionProvider) marshalKey(ctx context.Context, privKeyJWK *jose return nil, nil, err } - return &v2.EncryptionConfig{ + return v2.EncryptionConfig_builder{ Principal: nil, Provider: EncryptionProviderJwk, // TODO(morgabra): Fix the circular dependency/entire registry pattern. KeyId: kid, - Config: &v2.EncryptionConfig_JwkPublicKeyConfig{ - JwkPublicKeyConfig: &v2.EncryptionConfig_JWKPublicKeyConfig{ - PubKey: pubKeyJWKBytes, - }, - }, - }, privKeyJWK, nil + JwkPublicKeyConfig: v2.EncryptionConfig_JWKPublicKeyConfig_builder{ + PubKey: pubKeyJWKBytes, + }.Build(), + }.Build(), privKeyJWK, nil } func (j *JWKEncryptionProvider) Encrypt(ctx context.Context, conf *v2.EncryptionConfig, plainText *v2.PlaintextData) (*v2.EncryptedData, error) { @@ -82,17 +80,17 @@ func (j *JWKEncryptionProvider) Encrypt(ctx context.Context, conf *v2.Encryption var ciphertext []byte switch pubKey := jwk.Public().Key.(type) { case ed25519.PublicKey: - ciphertext, err = EncryptED25519(pubKey, plainText.Bytes) + ciphertext, err = EncryptED25519(pubKey, plainText.GetBytes()) if err != nil { return nil, err } case *ecdsa.PublicKey: - ciphertext, err = EncryptECDSA(pubKey, plainText.Bytes) + ciphertext, err = EncryptECDSA(pubKey, plainText.GetBytes()) if err != nil { return nil, err } case *rsa.PublicKey: - ciphertext, err = EncryptRSA(pubKey, plainText.Bytes) + ciphertext, err = EncryptRSA(pubKey, plainText.GetBytes()) if err != nil { return nil, err } @@ -107,19 +105,19 @@ func (j *JWKEncryptionProvider) Encrypt(ctx context.Context, conf *v2.Encryption encCipherText := base64.StdEncoding.EncodeToString(ciphertext) - return &v2.EncryptedData{ + return v2.EncryptedData_builder{ Provider: EncryptionProviderJwk, KeyId: tp, - Name: plainText.Name, - Description: plainText.Description, - Schema: plainText.Schema, + Name: plainText.GetName(), + Description: plainText.GetDescription(), + Schema: plainText.GetSchema(), EncryptedBytes: []byte(encCipherText), KeyIds: []string{tp}, - }, nil + }.Build(), nil } func (j *JWKEncryptionProvider) Decrypt(ctx context.Context, cipherText *v2.EncryptedData, jwk *jose.JSONWebKey) (*v2.PlaintextData, error) { - decCipherText, err := base64.StdEncoding.DecodeString(string(cipherText.EncryptedBytes)) + decCipherText, err := base64.StdEncoding.DecodeString(string(cipherText.GetEncryptedBytes())) if err != nil { return nil, fmt.Errorf("jwk: failed to decode encrypted bytes: %w", err) } @@ -145,12 +143,12 @@ func (j *JWKEncryptionProvider) Decrypt(ctx context.Context, cipherText *v2.Encr return nil, ErrJWKUnsupportedKeyType } - return &v2.PlaintextData{ - Name: cipherText.Name, - Description: cipherText.Description, - Schema: cipherText.Schema, + return v2.PlaintextData_builder{ + Name: cipherText.GetName(), + Description: cipherText.GetDescription(), + Schema: cipherText.GetSchema(), Bytes: plaintext, - }, nil + }.Build(), nil } func Thumbprint(jwk *jose.JSONWebKey) (string, error) { diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/assets.go b/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/assets.go index 035fbcf6..e67256f4 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/assets.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/assets.go @@ -58,6 +58,10 @@ func (c *C1File) PutAsset(ctx context.Context, assetRef *v2.AssetRef, contentTyp ctx, span := tracer.Start(ctx, "C1File.PutAsset") defer span.End() + if c.readOnly { + return ErrReadOnly + } + l := ctxzap.Extract(ctx) if len(data) == 0 { @@ -76,7 +80,7 @@ func (c *C1File) PutAsset(ctx context.Context, assetRef *v2.AssetRef, contentTyp } fields := goqu.Record{ - "external_id": assetRef.Id, + "external_id": assetRef.GetId(), "content_type": contentType, "data": data, "sync_id": c.currentSyncID, @@ -113,13 +117,13 @@ func (c *C1File) GetAsset(ctx context.Context, request *v2.AssetServiceGetAssetR return "", nil, err } - if request.Asset == nil { + if !request.HasAsset() { return "", nil, fmt.Errorf("asset is required") } q := c.db.From(assets.Name()).Prepared(true) q = q.Select("content_type", "data") - q = q.Where(goqu.C("external_id").Eq(request.Asset.Id)) + q = q.Where(goqu.C("external_id").Eq(request.GetAsset().GetId())) if c.currentSyncID != "" { q = q.Where(goqu.C("sync_id").Eq(c.currentSyncID)) diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/c1file.go b/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/c1file.go index dc269cbb..5de57194 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/c1file.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/c1file.go @@ -34,15 +34,22 @@ type pragma struct { } type C1File struct { - rawDb *sql.DB - db *goqu.Database - currentSyncID string - viewSyncID string - outputFilePath string - dbFilePath string - dbUpdated bool - tempDir string - pragmas []pragma + rawDb *sql.DB + db *goqu.Database + currentSyncID string + viewSyncID string + outputFilePath string + dbFilePath string + dbUpdated bool + tempDir string + pragmas []pragma + readOnly bool + encoderConcurrency int + + // Cached sync run for listConnectorObjects (avoids N+1 queries) + cachedViewSyncRun *syncRun + cachedViewSyncOnce sync.Once + cachedViewSyncErr error // Slow query tracking slowQueryLogTimes map[string]time.Time @@ -55,18 +62,33 @@ var _ connectorstore.Writer = (*C1File)(nil) type C1FOption func(*C1File) +// WithC1FTmpDir sets the temporary directory to use when cloning a sync. +// If not provided, os.TempDir() will be used. func WithC1FTmpDir(tempDir string) C1FOption { return func(o *C1File) { o.tempDir = tempDir } } +// WithC1FPragma sets a sqlite pragma for the c1z file. func WithC1FPragma(name string, value string) C1FOption { return func(o *C1File) { o.pragmas = append(o.pragmas, pragma{name, value}) } } +func WithC1FReadOnly(readOnly bool) C1FOption { + return func(o *C1File) { + o.readOnly = readOnly + } +} + +func WithC1FEncoderConcurrency(concurrency int) C1FOption { + return func(o *C1File) { + o.encoderConcurrency = concurrency + } +} + // Returns a C1File instance for the given db filepath. func NewC1File(ctx context.Context, dbFilePath string, opts ...C1FOption) (*C1File, error) { ctx, span := tracer.Start(ctx, "NewC1File") @@ -87,6 +109,7 @@ func NewC1File(ctx context.Context, dbFilePath string, opts ...C1FOption) (*C1Fi slowQueryLogTimes: make(map[string]time.Time), slowQueryThreshold: 5 * time.Second, slowQueryLogFrequency: 1 * time.Minute, + encoderConcurrency: 1, } for _, opt := range opts { @@ -107,18 +130,23 @@ func NewC1File(ctx context.Context, dbFilePath string, opts ...C1FOption) (*C1Fi } type c1zOptions struct { - tmpDir string - pragmas []pragma - decoderOptions []DecoderOption + tmpDir string + pragmas []pragma + decoderOptions []DecoderOption + readOnly bool + encoderConcurrency int } type C1ZOption func(*c1zOptions) +// WithTmpDir sets the temporary directory to extract the c1z file to. +// If not provided, os.TempDir() will be used. func WithTmpDir(tmpDir string) C1ZOption { return func(o *c1zOptions) { o.tmpDir = tmpDir } } +// WithPragma sets a sqlite pragma for the c1z file. func WithPragma(name string, value string) C1ZOption { return func(o *c1zOptions) { o.pragmas = append(o.pragmas, pragma{name, value}) @@ -131,12 +159,30 @@ func WithDecoderOptions(opts ...DecoderOption) C1ZOption { } } +// WithReadOnly opens the c1z file in read only mode. Modifying the c1z will result in an error on close. +func WithReadOnly(readOnly bool) C1ZOption { + return func(o *c1zOptions) { + o.readOnly = readOnly + } +} + +// WithEncoderConcurrency sets the number of created encoders. +// Default is 1, which disables async encoding/concurrency. +// 0 uses GOMAXPROCS. +func WithEncoderConcurrency(concurrency int) C1ZOption { + return func(o *c1zOptions) { + o.encoderConcurrency = concurrency + } +} + // Returns a new C1File instance with its state stored at the provided filename. func NewC1ZFile(ctx context.Context, outputFilePath string, opts ...C1ZOption) (*C1File, error) { ctx, span := tracer.Start(ctx, "NewC1ZFile") defer span.End() - options := &c1zOptions{} + options := &c1zOptions{ + encoderConcurrency: 1, + } for _, opt := range opts { opt(options) } @@ -150,6 +196,13 @@ func NewC1ZFile(ctx context.Context, outputFilePath string, opts ...C1ZOption) ( for _, pragma := range options.pragmas { c1fopts = append(c1fopts, WithC1FPragma(pragma.name, pragma.value)) } + if options.readOnly { + c1fopts = append(c1fopts, WithC1FReadOnly(true)) + } + if options.encoderConcurrency < 0 { + return nil, fmt.Errorf("encoder concurrency must be greater than 0") + } + c1fopts = append(c1fopts, WithC1FEncoderConcurrency(options.encoderConcurrency)) c1File, err := NewC1File(ctx, dbFilePath, c1fopts...) if err != nil { @@ -169,6 +222,8 @@ func cleanupDbDir(dbFilePath string, err error) error { return err } +var ErrReadOnly = errors.New("c1z: read only mode") + // Close ensures that the sqlite database is flushed to disk, and if any changes were made we update the original database // with our changes. func (c *C1File) Close() error { @@ -185,7 +240,10 @@ func (c *C1File) Close() error { // We only want to save the file if we've made any changes if c.dbUpdated { - err = saveC1z(c.dbFilePath, c.outputFilePath) + if c.readOnly { + return cleanupDbDir(c.dbFilePath, ErrReadOnly) + } + err = saveC1z(c.dbFilePath, c.outputFilePath, c.encoderConcurrency) if err != nil { return cleanupDbDir(c.dbFilePath, err) } @@ -204,6 +262,30 @@ func (c *C1File) init(ctx context.Context) error { return err } + err = c.InitTables(ctx) + if err != nil { + return err + } + + for _, pragma := range c.pragmas { + _, err := c.db.ExecContext(ctx, fmt.Sprintf("PRAGMA %s = %s", pragma.name, pragma.value)) + if err != nil { + return err + } + } + + return nil +} + +func (c *C1File) InitTables(ctx context.Context) error { + ctx, span := tracer.Start(ctx, "C1File.InitTables") + defer span.End() + + err := c.validateDb(ctx) + if err != nil { + return err + } + for _, t := range allTableDescriptors { query, args := t.Schema() _, err = c.db.ExecContext(ctx, fmt.Sprintf(query, args...)) @@ -216,13 +298,6 @@ func (c *C1File) init(ctx context.Context) error { } } - for _, pragma := range c.pragmas { - _, err := c.db.ExecContext(ctx, fmt.Sprintf("PRAGMA %s = %s", pragma.name, pragma.value)) - if err != nil { - return err - } - } - return nil } @@ -241,47 +316,47 @@ func (c *C1File) Stats(ctx context.Context, syncType connectorstore.SyncType, sy return nil, err } } - resp, err := c.GetSync(ctx, &reader_v2.SyncsReaderServiceGetSyncRequest{SyncId: syncId}) + resp, err := c.GetSync(ctx, reader_v2.SyncsReaderServiceGetSyncRequest_builder{SyncId: syncId}.Build()) if err != nil { return nil, err } - if resp == nil || resp.Sync == nil { + if resp == nil || !resp.HasSync() { return nil, status.Errorf(codes.NotFound, "sync '%s' not found", syncId) } - sync := resp.Sync - if syncType != connectorstore.SyncTypeAny && syncType != connectorstore.SyncType(sync.SyncType) { + sync := resp.GetSync() + if syncType != connectorstore.SyncTypeAny && syncType != connectorstore.SyncType(sync.GetSyncType()) { return nil, status.Errorf(codes.InvalidArgument, "sync '%s' is not of type '%s'", syncId, syncType) } - syncType = connectorstore.SyncType(sync.SyncType) + syncType = connectorstore.SyncType(sync.GetSyncType()) counts["resource_types"] = 0 var rtStats []*v2.ResourceType pageToken := "" for { - resp, err := c.ListResourceTypes(ctx, &v2.ResourceTypesServiceListResourceTypesRequest{PageToken: pageToken}) + resp, err := c.ListResourceTypes(ctx, v2.ResourceTypesServiceListResourceTypesRequest_builder{PageToken: pageToken}.Build()) if err != nil { return nil, err } - rtStats = append(rtStats, resp.List...) + rtStats = append(rtStats, resp.GetList()...) - if resp.NextPageToken == "" { + if resp.GetNextPageToken() == "" { break } - pageToken = resp.NextPageToken + pageToken = resp.GetNextPageToken() } counts["resource_types"] = int64(len(rtStats)) for _, rt := range rtStats { resourceCount, err := c.db.From(resources.Name()). - Where(goqu.C("resource_type_id").Eq(rt.Id)). + Where(goqu.C("resource_type_id").Eq(rt.GetId())). Where(goqu.C("sync_id").Eq(syncId)). CountContext(ctx) if err != nil { return nil, err } - counts[rt.Id] = resourceCount + counts[rt.GetId()] = resourceCount } if syncType != connectorstore.SyncTypeResourcesOnly { @@ -367,14 +442,14 @@ func (c *C1File) GrantStats(ctx context.Context, syncType connectorstore.SyncTyp return nil, err } } else { - lastSync, err := c.GetSync(ctx, &reader_v2.SyncsReaderServiceGetSyncRequest{SyncId: syncId}) + lastSync, err := c.GetSync(ctx, reader_v2.SyncsReaderServiceGetSyncRequest_builder{SyncId: syncId}.Build()) if err != nil { return nil, err } if lastSync == nil { return nil, status.Errorf(codes.NotFound, "sync '%s' not found", syncId) } - if syncType != connectorstore.SyncTypeAny && syncType != connectorstore.SyncType(lastSync.Sync.SyncType) { + if syncType != connectorstore.SyncTypeAny && syncType != connectorstore.SyncType(lastSync.GetSync().GetSyncType()) { return nil, status.Errorf(codes.InvalidArgument, "sync '%s' is not of type '%s'", syncId, syncType) } } @@ -382,18 +457,18 @@ func (c *C1File) GrantStats(ctx context.Context, syncType connectorstore.SyncTyp var allResourceTypes []*v2.ResourceType pageToken := "" for { - resp, err := c.ListResourceTypes(ctx, &v2.ResourceTypesServiceListResourceTypesRequest{PageToken: pageToken}) + resp, err := c.ListResourceTypes(ctx, v2.ResourceTypesServiceListResourceTypesRequest_builder{PageToken: pageToken}.Build()) if err != nil { return nil, err } - allResourceTypes = append(allResourceTypes, resp.List...) + allResourceTypes = append(allResourceTypes, resp.GetList()...) - if resp.NextPageToken == "" { + if resp.GetNextPageToken() == "" { break } - pageToken = resp.NextPageToken + pageToken = resp.GetNextPageToken() } stats := make(map[string]int64) @@ -401,13 +476,13 @@ func (c *C1File) GrantStats(ctx context.Context, syncType connectorstore.SyncTyp for _, resourceType := range allResourceTypes { grantsCount, err := c.db.From(grants.Name()). Where(goqu.C("sync_id").Eq(syncId)). - Where(goqu.C("resource_type_id").Eq(resourceType.Id)). + Where(goqu.C("resource_type_id").Eq(resourceType.GetId())). CountContext(ctx) if err != nil { return nil, err } - stats[resourceType.Id] = grantsCount + stats[resourceType.GetId()] = grantsCount } return stats, nil diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/diff.go b/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/diff.go index 6db52b61..4324a7cc 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/diff.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/diff.go @@ -11,6 +11,10 @@ import ( ) func (c *C1File) GenerateSyncDiff(ctx context.Context, baseSyncID string, appliedSyncID string) (string, error) { + if c.readOnly { + return "", ErrReadOnly + } + // Validate that both sync runs exist baseSync, err := c.getSync(ctx, baseSyncID) if err != nil { @@ -44,6 +48,9 @@ func (c *C1File) GenerateSyncDiff(ctx context.Context, baseSyncID string, applie if err != nil { return "", err } + if q == "" { + continue + } _, err = c.db.ExecContext(ctx, q, args...) if err != nil { return "", err @@ -70,6 +77,9 @@ func (c *C1File) diffTableQuery(table tableDescriptor, baseSyncID, appliedSyncID tableName := table.Name() // Add table-specific columns switch { + case strings.Contains(tableName, sessionStoreTableName): + // caching is not relevant to diffs. + return "", nil, nil case strings.Contains(tableName, resourcesTableName): columns = append(columns, "resource_type_id", "parent_resource_type_id", "parent_resource_id") case strings.Contains(tableName, resourceTypesTableName): diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/entitlements.go b/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/entitlements.go index 9456ae77..31b1aac6 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/entitlements.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/entitlements.go @@ -5,7 +5,6 @@ import ( "fmt" "github.com/doug-martin/goqu/v9" - "google.golang.org/protobuf/proto" v2 "github.com/conductorone/baton-sdk/pb/c1/connector/v2" reader_v2 "github.com/conductorone/baton-sdk/pb/c1/reader/v2" @@ -57,25 +56,15 @@ func (c *C1File) ListEntitlements(ctx context.Context, request *v2.EntitlementsS ctx, span := tracer.Start(ctx, "C1File.ListEntitlements") defer span.End() - objs, nextPageToken, err := c.listConnectorObjects(ctx, entitlements.Name(), request) + objs, nextPageToken, err := listConnectorObjects(ctx, c, entitlements.Name(), request, func() *v2.Entitlement { return &v2.Entitlement{} }) if err != nil { return nil, fmt.Errorf("error listing entitlements: %w", err) } - ret := make([]*v2.Entitlement, 0, len(objs)) - for _, o := range objs { - en := &v2.Entitlement{} - err = proto.Unmarshal(o, en) - if err != nil { - return nil, err - } - ret = append(ret, en) - } - - return &v2.EntitlementsServiceListEntitlementsResponse{ - List: ret, + return v2.EntitlementsServiceListEntitlementsResponse_builder{ + List: objs, NextPageToken: nextPageToken, - }, nil + }.Build(), nil } func (c *C1File) GetEntitlement(ctx context.Context, request *reader_v2.EntitlementsReaderServiceGetEntitlementRequest) (*reader_v2.EntitlementsReaderServiceGetEntitlementResponse, error) { @@ -85,16 +74,26 @@ func (c *C1File) GetEntitlement(ctx context.Context, request *reader_v2.Entitlem ret := &v2.Entitlement{} syncId, err := annotations.GetSyncIdFromAnnotations(request.GetAnnotations()) if err != nil { - return nil, fmt.Errorf("error getting sync id from annotations for entitlement '%s': %w", request.EntitlementId, err) + return nil, fmt.Errorf("error getting sync id from annotations for entitlement '%s': %w", request.GetEntitlementId(), err) } - err = c.getConnectorObject(ctx, entitlements.Name(), request.EntitlementId, syncId, ret) + err = c.getConnectorObject(ctx, entitlements.Name(), request.GetEntitlementId(), syncId, ret) if err != nil { - return nil, fmt.Errorf("error fetching entitlement '%s': %w", request.EntitlementId, err) + return nil, fmt.Errorf("error fetching entitlement '%s': %w", request.GetEntitlementId(), err) } - return &reader_v2.EntitlementsReaderServiceGetEntitlementResponse{ + return reader_v2.EntitlementsReaderServiceGetEntitlementResponse_builder{ Entitlement: ret, - }, nil + }.Build(), nil +} + +func (c *C1File) ListStaticEntitlements(ctx context.Context, request *v2.EntitlementsServiceListStaticEntitlementsRequest) (*v2.EntitlementsServiceListStaticEntitlementsResponse, error) { + _, span := tracer.Start(ctx, "C1File.ListStaticEntitlements") + defer span.End() + + return v2.EntitlementsServiceListStaticEntitlementsResponse_builder{ + List: []*v2.Entitlement{}, + NextPageToken: "", + }.Build(), nil } func (c *C1File) PutEntitlements(ctx context.Context, entitlementObjs ...*v2.Entitlement) error { @@ -114,11 +113,15 @@ func (c *C1File) PutEntitlementsIfNewer(ctx context.Context, entitlementObjs ... type entitlementPutFunc func(context.Context, *C1File, string, func(m *v2.Entitlement) (goqu.Record, error), ...*v2.Entitlement) error func (c *C1File) putEntitlementsInternal(ctx context.Context, f entitlementPutFunc, entitlementObjs ...*v2.Entitlement) error { + if c.readOnly { + return ErrReadOnly + } + err := f(ctx, c, entitlements.Name(), func(entitlement *v2.Entitlement) (goqu.Record, error) { return goqu.Record{ - "resource_id": entitlement.Resource.Id.Resource, - "resource_type_id": entitlement.Resource.Id.ResourceType, + "resource_id": entitlement.GetResource().GetId().GetResource(), + "resource_type_id": entitlement.GetResource().GetId().GetResourceType(), }, nil }, entitlementObjs..., diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/file.go b/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/file.go index 8e0b296e..b4f9d72a 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/file.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/file.go @@ -2,13 +2,17 @@ package dotc1z import ( "errors" + "fmt" "io" "os" "path/filepath" + "runtime" "syscall" "github.com/klauspost/compress/zstd" "go.uber.org/zap" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" ) func loadC1z(filePath string, tmpDir string, opts ...DecoderOption) (string, error) { @@ -55,9 +59,9 @@ func loadC1z(filePath string, tmpDir string, opts ...DecoderOption) (string, err return dbFilePath, nil } -func saveC1z(dbFilePath string, outputFilePath string) error { +func saveC1z(dbFilePath string, outputFilePath string, encoderConcurrency int) error { if outputFilePath == "" { - return errors.New("c1z: output file path not configured") + return status.Errorf(codes.InvalidArgument, "c1z: output file path not configured") } dbFile, err := os.Open(dbFilePath) @@ -65,9 +69,11 @@ func saveC1z(dbFilePath string, outputFilePath string) error { return err } defer func() { - err = dbFile.Close() - if err != nil { - zap.L().Error("failed to close db file", zap.Error(err)) + if dbFile != nil { + err = dbFile.Close() + if err != nil { + zap.L().Error("failed to close db file", zap.Error(err)) + } } }() @@ -75,7 +81,14 @@ func saveC1z(dbFilePath string, outputFilePath string) error { if err != nil { return err } - defer outFile.Close() + defer func() { + if outFile != nil { + err = outFile.Close() + if err != nil { + zap.L().Error("failed to close out file", zap.Error(err)) + } + } + }() // Write the magic file header _, err = outFile.Write(C1ZFileHeader) @@ -83,7 +96,15 @@ func saveC1z(dbFilePath string, outputFilePath string) error { return err } - c1z, err := zstd.NewWriter(outFile) + // zstd.WithEncoderConcurrency does not work the same as WithDecoderConcurrency. + // WithDecoderConcurrency uses GOMAXPROCS if set to 0. + // WithEncoderConcurrency errors if set to 0 (but defaults to GOMAXPROCS). + if encoderConcurrency == 0 { + encoderConcurrency = runtime.GOMAXPROCS(0) + } + c1z, err := zstd.NewWriter(outFile, + zstd.WithEncoderConcurrency(encoderConcurrency), + ) if err != nil { return err } @@ -95,12 +116,29 @@ func saveC1z(dbFilePath string, outputFilePath string) error { err = c1z.Flush() if err != nil { - return err + return fmt.Errorf("failed to flush c1z: %w", err) } err = c1z.Close() if err != nil { - return err + return fmt.Errorf("failed to close c1z: %w", err) + } + + err = outFile.Sync() + if err != nil { + return fmt.Errorf("failed to sync out file: %w", err) + } + + err = outFile.Close() + if err != nil { + return fmt.Errorf("failed to close out file: %w", err) + } + outFile = nil + + err = dbFile.Close() + if err != nil { + return fmt.Errorf("failed to close db file: %w", err) } + dbFile = nil return nil } diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/grants.go b/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/grants.go index 4edc283d..69096da3 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/grants.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/grants.go @@ -5,7 +5,6 @@ import ( "fmt" "github.com/doug-martin/goqu/v9" - "google.golang.org/protobuf/proto" v2 "github.com/conductorone/baton-sdk/pb/c1/connector/v2" reader_v2 "github.com/conductorone/baton-sdk/pb/c1/reader/v2" @@ -34,6 +33,8 @@ create unique index if not exists %s on %s (external_id, sync_id);` var grants = (*grantsTable)(nil) +var _ tableDescriptor = (*grantsTable)(nil) + type grantsTable struct{} func (r *grantsTable) Version() string { @@ -44,8 +45,8 @@ func (r *grantsTable) Name() string { return fmt.Sprintf("v%s_%s", r.Version(), grantsTableName) } -func (r *grantsTable) Schema() (string, []interface{}) { - return grantsTableSchema, []interface{}{ +func (r *grantsTable) Schema() (string, []any) { + return grantsTableSchema, []any{ r.Name(), fmt.Sprintf("idx_grants_resource_type_id_resource_id_v%s", r.Version()), r.Name(), @@ -62,29 +63,42 @@ func (r *grantsTable) Migrations(ctx context.Context, db *goqu.Database) error { return nil } -func (c *C1File) ListGrants(ctx context.Context, request *v2.GrantsServiceListGrantsRequest) (*v2.GrantsServiceListGrantsResponse, error) { - ctx, span := tracer.Start(ctx, "C1File.ListGrants") +// DropGrantIndexes drops the indexes on the grants table. +// This should only be called when compacting the grants table. +// These indexes are re-created when we open the database again. +func (c *C1File) DropGrantIndexes(ctx context.Context) error { + ctx, span := tracer.Start(ctx, "C1File.DropGrantsIndexes") defer span.End() - objs, nextPageToken, err := c.listConnectorObjects(ctx, grants.Name(), request) - if err != nil { - return nil, fmt.Errorf("error listing grants: %w", err) + indexes := []string{ + fmt.Sprintf("idx_grants_resource_type_id_resource_id_v%s", grants.Version()), + fmt.Sprintf("idx_grants_principal_id_v%s", grants.Version()), + fmt.Sprintf("idx_grants_entitlement_id_principal_id_v%s", grants.Version()), + fmt.Sprintf("idx_grants_external_sync_v%s", grants.Version()), } - ret := make([]*v2.Grant, 0, len(objs)) - for _, o := range objs { - g := &v2.Grant{} - err = proto.Unmarshal(o, g) + for _, index := range indexes { + _, err := c.db.ExecContext(ctx, fmt.Sprintf("DROP INDEX IF EXISTS %s", index)) if err != nil { - return nil, err + return err } - ret = append(ret, g) + } + return nil +} + +func (c *C1File) ListGrants(ctx context.Context, request *v2.GrantsServiceListGrantsRequest) (*v2.GrantsServiceListGrantsResponse, error) { + ctx, span := tracer.Start(ctx, "C1File.ListGrants") + defer span.End() + + ret, nextPageToken, err := listConnectorObjects(ctx, c, grants.Name(), request, func() *v2.Grant { return &v2.Grant{} }) + if err != nil { + return nil, fmt.Errorf("error listing grants: %w", err) } - return &v2.GrantsServiceListGrantsResponse{ + return v2.GrantsServiceListGrantsResponse_builder{ List: ret, NextPageToken: nextPageToken, - }, nil + }.Build(), nil } func (c *C1File) GetGrant(ctx context.Context, request *reader_v2.GrantsReaderServiceGetGrantRequest) (*reader_v2.GrantsReaderServiceGetGrantResponse, error) { @@ -94,16 +108,16 @@ func (c *C1File) GetGrant(ctx context.Context, request *reader_v2.GrantsReaderSe ret := &v2.Grant{} syncId, err := annotations.GetSyncIdFromAnnotations(request.GetAnnotations()) if err != nil { - return nil, fmt.Errorf("error getting sync id from annotations for grant '%s': %w", request.GrantId, err) + return nil, fmt.Errorf("error getting sync id from annotations for grant '%s': %w", request.GetGrantId(), err) } - err = c.getConnectorObject(ctx, grants.Name(), request.GrantId, syncId, ret) + err = c.getConnectorObject(ctx, grants.Name(), request.GetGrantId(), syncId, ret) if err != nil { return nil, fmt.Errorf("error fetching grant '%s': %w", request.GetGrantId(), err) } - return &reader_v2.GrantsReaderServiceGetGrantResponse{ + return reader_v2.GrantsReaderServiceGetGrantResponse_builder{ Grant: ret, - }, nil + }.Build(), nil } func (c *C1File) ListGrantsForEntitlement( @@ -112,26 +126,15 @@ func (c *C1File) ListGrantsForEntitlement( ) (*reader_v2.GrantsReaderServiceListGrantsForEntitlementResponse, error) { ctx, span := tracer.Start(ctx, "C1File.ListGrantsForEntitlement") defer span.End() - - objs, nextPageToken, err := c.listConnectorObjects(ctx, grants.Name(), request) + ret, nextPageToken, err := listConnectorObjects(ctx, c, grants.Name(), request, func() *v2.Grant { return &v2.Grant{} }) if err != nil { return nil, fmt.Errorf("error listing grants for entitlement '%s': %w", request.GetEntitlement().GetId(), err) } - ret := make([]*v2.Grant, 0, len(objs)) - for _, o := range objs { - en := &v2.Grant{} - err = proto.Unmarshal(o, en) - if err != nil { - return nil, err - } - ret = append(ret, en) - } - - return &reader_v2.GrantsReaderServiceListGrantsForEntitlementResponse{ + return reader_v2.GrantsReaderServiceListGrantsForEntitlementResponse_builder{ List: ret, NextPageToken: nextPageToken, - }, nil + }.Build(), nil } func (c *C1File) ListGrantsForPrincipal( @@ -141,25 +144,15 @@ func (c *C1File) ListGrantsForPrincipal( ctx, span := tracer.Start(ctx, "C1File.ListGrantsForPrincipal") defer span.End() - objs, nextPageToken, err := c.listConnectorObjects(ctx, grants.Name(), request) + ret, nextPageToken, err := listConnectorObjects(ctx, c, grants.Name(), request, func() *v2.Grant { return &v2.Grant{} }) if err != nil { return nil, fmt.Errorf("error listing grants for principal '%s': %w", request.GetPrincipalId(), err) } - ret := make([]*v2.Grant, 0, len(objs)) - for _, o := range objs { - en := &v2.Grant{} - err = proto.Unmarshal(o, en) - if err != nil { - return nil, err - } - ret = append(ret, en) - } - - return &reader_v2.GrantsReaderServiceListGrantsForEntitlementResponse{ + return reader_v2.GrantsReaderServiceListGrantsForEntitlementResponse_builder{ List: ret, NextPageToken: nextPageToken, - }, nil + }.Build(), nil } func (c *C1File) ListGrantsForResourceType( @@ -169,25 +162,15 @@ func (c *C1File) ListGrantsForResourceType( ctx, span := tracer.Start(ctx, "C1File.ListGrantsForResourceType") defer span.End() - objs, nextPageToken, err := c.listConnectorObjects(ctx, grants.Name(), request) + ret, nextPageToken, err := listConnectorObjects(ctx, c, grants.Name(), request, func() *v2.Grant { return &v2.Grant{} }) if err != nil { return nil, fmt.Errorf("error listing grants for resource type '%s': %w", request.GetResourceTypeId(), err) } - ret := make([]*v2.Grant, 0, len(objs)) - for _, o := range objs { - en := &v2.Grant{} - err = proto.Unmarshal(o, en) - if err != nil { - return nil, err - } - ret = append(ret, en) - } - - return &reader_v2.GrantsReaderServiceListGrantsForResourceTypeResponse{ + return reader_v2.GrantsReaderServiceListGrantsForResourceTypeResponse_builder{ List: ret, NextPageToken: nextPageToken, - }, nil + }.Build(), nil } func (c *C1File) PutGrants(ctx context.Context, bulkGrants ...*v2.Grant) error { @@ -207,14 +190,18 @@ func (c *C1File) PutGrantsIfNewer(ctx context.Context, bulkGrants ...*v2.Grant) type grantPutFunc func(context.Context, *C1File, string, func(m *v2.Grant) (goqu.Record, error), ...*v2.Grant) error func (c *C1File) putGrantsInternal(ctx context.Context, f grantPutFunc, bulkGrants ...*v2.Grant) error { + if c.readOnly { + return ErrReadOnly + } + err := f(ctx, c, grants.Name(), func(grant *v2.Grant) (goqu.Record, error) { return goqu.Record{ - "resource_type_id": grant.Entitlement.Resource.Id.ResourceType, - "resource_id": grant.Entitlement.Resource.Id.Resource, - "entitlement_id": grant.Entitlement.Id, - "principal_resource_type_id": grant.Principal.Id.ResourceType, - "principal_resource_id": grant.Principal.Id.Resource, + "resource_type_id": grant.GetEntitlement().GetResource().GetId().GetResourceType(), + "resource_id": grant.GetEntitlement().GetResource().GetId().GetResource(), + "entitlement_id": grant.GetEntitlement().GetId(), + "principal_resource_type_id": grant.GetPrincipal().GetId().GetResourceType(), + "principal_resource_id": grant.GetPrincipal().GetId().GetResource(), }, nil }, bulkGrants..., diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/manager/local/local.go b/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/manager/local/local.go index f07f6b6e..3f319079 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/manager/local/local.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/manager/local/local.go @@ -59,10 +59,36 @@ func (l *localManager) copyFileToTmp(ctx context.Context) error { } defer f.Close() - _, err = io.Copy(tmp, f) + // Get source file size for verification + sourceStat, err := f.Stat() + if err != nil { + return fmt.Errorf("failed to stat source file: %w", err) + } + expectedSize := sourceStat.Size() + + written, err := io.Copy(tmp, f) if err != nil { return err } + + // CRITICAL: Sync to ensure all data is written before file is used. + // This is especially important on ZFS ARC where writes may be cached + // and reads can happen before buffers are flushed to disk. + if err := tmp.Sync(); err != nil { + return fmt.Errorf("failed to sync temp file: %w", err) + } + + // Verify file size matches what we wrote (defensive check) + stat, err := tmp.Stat() + if err != nil { + return fmt.Errorf("failed to stat temp file: %w", err) + } + if stat.Size() != written { + return fmt.Errorf("file size mismatch: wrote %d bytes but file is %d bytes", written, stat.Size()) + } + if written != expectedSize { + return fmt.Errorf("copy size mismatch: expected %d bytes from source but copied %d bytes", expectedSize, written) + } } return nil @@ -102,6 +128,7 @@ func (l *localManager) LoadC1Z(ctx context.Context) (*dotc1z.C1File, error) { "successfully loaded c1z locally", zap.String("file_path", l.filePath), zap.String("temp_path", l.tmpPath), + zap.String("tmp_dir", l.tmpDir), ) opts := []dotc1z.C1ZOption{ @@ -146,10 +173,17 @@ func (l *localManager) SaveC1Z(ctx context.Context) error { return err } + // CRITICAL: Sync to ensure data is written before function returns. + // This is especially important on ZFS ARC where writes may be cached. + if err := dstFile.Sync(); err != nil { + return fmt.Errorf("failed to sync destination file: %w", err) + } + log.Debug( "successfully saved c1z locally", zap.String("file_path", l.filePath), zap.String("temp_path", l.tmpPath), + zap.String("tmp_dir", l.tmpDir), zap.Int64("bytes", size), ) diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/manager/s3/s3.go b/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/manager/s3/s3.go index 385b1bc4..2660b953 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/manager/s3/s3.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/manager/s3/s3.go @@ -53,11 +53,30 @@ func (s *s3Manager) copyToTempFile(ctx context.Context, r io.Reader) error { s.tmpFile = f.Name() if r != nil { - _, err = io.Copy(f, r) + written, err := io.Copy(f, r) if err != nil { _ = f.Close() return err } + + // CRITICAL: Sync to ensure all data is written before file is used. + // This is especially important on ZFS ARC where writes may be cached + // and reads can happen before buffers are flushed to disk. + if err := f.Sync(); err != nil { + _ = f.Close() + return fmt.Errorf("failed to sync temp file: %w", err) + } + + // Verify file size matches what we wrote (defensive check) + stat, err := f.Stat() + if err != nil { + _ = f.Close() + return fmt.Errorf("failed to stat temp file: %w", err) + } + if stat.Size() != written { + _ = f.Close() + return fmt.Errorf("file size mismatch: wrote %d bytes but file is %d bytes", written, stat.Size()) + } } return nil diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/resouce_types.go b/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/resouce_types.go index 53a7f64e..ee02a847 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/resouce_types.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/resouce_types.go @@ -4,8 +4,6 @@ import ( "context" "fmt" - "google.golang.org/protobuf/proto" - "github.com/doug-martin/goqu/v9" v2 "github.com/conductorone/baton-sdk/pb/c1/connector/v2" @@ -53,25 +51,15 @@ func (c *C1File) ListResourceTypes(ctx context.Context, request *v2.ResourceType ctx, span := tracer.Start(ctx, "C1File.ListResourceTypes") defer span.End() - objs, nextPageToken, err := c.listConnectorObjects(ctx, resourceTypes.Name(), request) + ret, nextPageToken, err := listConnectorObjects(ctx, c, resourceTypes.Name(), request, func() *v2.ResourceType { return &v2.ResourceType{} }) if err != nil { return nil, fmt.Errorf("error listing resource types: %w", err) } - ret := make([]*v2.ResourceType, 0, len(objs)) - for _, o := range objs { - rt := &v2.ResourceType{} - err = proto.Unmarshal(o, rt) - if err != nil { - return nil, err - } - ret = append(ret, rt) - } - - return &v2.ResourceTypesServiceListResourceTypesResponse{ + return v2.ResourceTypesServiceListResourceTypesResponse_builder{ List: ret, NextPageToken: nextPageToken, - }, nil + }.Build(), nil } func (c *C1File) GetResourceType(ctx context.Context, request *reader_v2.ResourceTypesReaderServiceGetResourceTypeRequest) (*reader_v2.ResourceTypesReaderServiceGetResourceTypeResponse, error) { @@ -81,16 +69,16 @@ func (c *C1File) GetResourceType(ctx context.Context, request *reader_v2.Resourc ret := &v2.ResourceType{} syncId, err := annotations.GetSyncIdFromAnnotations(request.GetAnnotations()) if err != nil { - return nil, fmt.Errorf("error getting sync id from annotations for resource type '%s': %w", request.ResourceTypeId, err) + return nil, fmt.Errorf("error getting sync id from annotations for resource type '%s': %w", request.GetResourceTypeId(), err) } - err = c.getConnectorObject(ctx, resourceTypes.Name(), request.ResourceTypeId, syncId, ret) + err = c.getConnectorObject(ctx, resourceTypes.Name(), request.GetResourceTypeId(), syncId, ret) if err != nil { - return nil, fmt.Errorf("error fetching resource type '%s': %w", request.ResourceTypeId, err) + return nil, fmt.Errorf("error fetching resource type '%s': %w", request.GetResourceTypeId(), err) } - return &reader_v2.ResourceTypesReaderServiceGetResourceTypeResponse{ + return reader_v2.ResourceTypesReaderServiceGetResourceTypeResponse_builder{ ResourceType: ret, - }, nil + }.Build(), nil } func (c *C1File) PutResourceTypes(ctx context.Context, resourceTypesObjs ...*v2.ResourceType) error { @@ -110,6 +98,10 @@ func (c *C1File) PutResourceTypesIfNewer(ctx context.Context, resourceTypesObjs type resourceTypePutFunc func(context.Context, *C1File, string, func(m *v2.ResourceType) (goqu.Record, error), ...*v2.ResourceType) error func (c *C1File) putResourceTypesInternal(ctx context.Context, f resourceTypePutFunc, resourceTypesObjs ...*v2.ResourceType) error { + if c.readOnly { + return ErrReadOnly + } + err := f(ctx, c, resourceTypes.Name(), func(resource *v2.ResourceType) (goqu.Record, error) { return nil, nil diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/resources.go b/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/resources.go index b28b8bb5..7e350562 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/resources.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/resources.go @@ -5,7 +5,6 @@ import ( "fmt" "github.com/doug-martin/goqu/v9" - "google.golang.org/protobuf/proto" "github.com/conductorone/baton-sdk/pkg/annotations" @@ -62,25 +61,15 @@ func (c *C1File) ListResources(ctx context.Context, request *v2.ResourcesService ctx, span := tracer.Start(ctx, "C1File.ListResources") defer span.End() - objs, nextPageToken, err := c.listConnectorObjects(ctx, resources.Name(), request) + ret, nextPageToken, err := listConnectorObjects(ctx, c, resources.Name(), request, func() *v2.Resource { return &v2.Resource{} }) if err != nil { return nil, fmt.Errorf("error listing resources: %w", err) } - ret := make([]*v2.Resource, 0, len(objs)) - for _, o := range objs { - rt := &v2.Resource{} - err = proto.Unmarshal(o, rt) - if err != nil { - return nil, err - } - ret = append(ret, rt) - } - - return &v2.ResourcesServiceListResourcesResponse{ + return v2.ResourcesServiceListResourcesResponse_builder{ List: ret, NextPageToken: nextPageToken, - }, nil + }.Build(), nil } func (c *C1File) GetResource(ctx context.Context, request *reader_v2.ResourcesReaderServiceGetResourceRequest) (*reader_v2.ResourcesReaderServiceGetResourceResponse, error) { @@ -90,16 +79,16 @@ func (c *C1File) GetResource(ctx context.Context, request *reader_v2.ResourcesRe ret := &v2.Resource{} syncId, err := annotations.GetSyncIdFromAnnotations(request.GetAnnotations()) if err != nil { - return nil, fmt.Errorf("error getting sync id from annotations for resource '%s': %w", request.ResourceId, err) + return nil, fmt.Errorf("error getting sync id from annotations for resource '%s': %w", request.GetResourceId(), err) } - err = c.getResourceObject(ctx, request.ResourceId, ret, syncId) + err = c.getResourceObject(ctx, request.GetResourceId(), ret, syncId) if err != nil { - return nil, fmt.Errorf("error fetching resource '%s': %w", request.ResourceId, err) + return nil, fmt.Errorf("error fetching resource '%s': %w", request.GetResourceId(), err) } - return &reader_v2.ResourcesReaderServiceGetResourceResponse{ + return reader_v2.ResourcesReaderServiceGetResourceResponse_builder{ Resource: ret, - }, nil + }.Build(), nil } func (c *C1File) PutResources(ctx context.Context, resourceObjs ...*v2.Resource) error { @@ -119,20 +108,24 @@ func (c *C1File) PutResourcesIfNewer(ctx context.Context, resourceObjs ...*v2.Re type resourcePutFunc func(context.Context, *C1File, string, func(m *v2.Resource) (goqu.Record, error), ...*v2.Resource) error func (c *C1File) putResourcesInternal(ctx context.Context, f resourcePutFunc, resourceObjs ...*v2.Resource) error { + if c.readOnly { + return ErrReadOnly + } + err := f(ctx, c, resources.Name(), func(resource *v2.Resource) (goqu.Record, error) { fields := goqu.Record{ - "resource_type_id": resource.Id.ResourceType, - "external_id": fmt.Sprintf("%s:%s", resource.Id.ResourceType, resource.Id.Resource), + "resource_type_id": resource.GetId().GetResourceType(), + "external_id": fmt.Sprintf("%s:%s", resource.GetId().GetResourceType(), resource.GetId().GetResource()), } // If we bulk insert some resources with parent ids and some without, goqu errors because of the different number of fields. - if resource.ParentResourceId == nil { + if !resource.HasParentResourceId() { fields["parent_resource_type_id"] = nil fields["parent_resource_id"] = nil } else { - fields["parent_resource_type_id"] = resource.ParentResourceId.ResourceType - fields["parent_resource_id"] = resource.ParentResourceId.Resource + fields["parent_resource_type_id"] = resource.GetParentResourceId().GetResourceType() + fields["parent_resource_id"] = resource.GetParentResourceId().GetResource() } return fields, nil }, diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/session_store.go b/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/session_store.go new file mode 100644 index 00000000..796c0648 --- /dev/null +++ b/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/session_store.go @@ -0,0 +1,428 @@ +package dotc1z + +import ( + "context" + "fmt" + "maps" + "strings" + + "github.com/doug-martin/goqu/v9" + + "github.com/conductorone/baton-sdk/pkg/types/sessions" +) + +type SessionStore interface { + sessions.SessionStore +} + +var _ sessions.SessionStore = (*C1File)(nil) + +const sessionStoreTableVersion = "1" +const sessionStoreTableName = "connector_sessions" +const sessionStoreTableSchema = ` +CREATE TABLE IF NOT EXISTS %s ( + id integer primary key, + sync_id text NOT NULL, + key TEXT NOT NULL, + value BLOB NOT NULL +); +create unique index if not exists %s on %s (sync_id, key);` + +var sessionStore = (*sessionStoreTable)(nil) + +type sessionStoreTable struct{} + +func escapeLike(s string) string { + s = strings.ReplaceAll(s, `\`, `\\`) + s = strings.ReplaceAll(s, `%`, `\%`) + s = strings.ReplaceAll(s, `_`, `\_`) + return s +} + +func (r *sessionStoreTable) Name() string { + return fmt.Sprintf("v%s_%s", r.Version(), sessionStoreTableName) +} + +func (r *sessionStoreTable) Version() string { + return sessionStoreTableVersion +} + +func (r *sessionStoreTable) Schema() (string, []interface{}) { + return sessionStoreTableSchema, []interface{}{ + r.Name(), + fmt.Sprintf("idx_session_store_sync_key_v%s", r.Version()), + r.Name(), + } +} + +func (r *sessionStoreTable) Migrations(ctx context.Context, db *goqu.Database) error { + return nil +} + +func applyBag(ctx context.Context, opt ...sessions.SessionStoreOption) (*sessions.SessionStoreBag, error) { + bag := &sessions.SessionStoreBag{} + for _, o := range opt { + err := o(ctx, bag) + if err != nil { + return nil, fmt.Errorf("error applying session option: %w", err) + } + } + if bag.SyncID == "" { + return nil, fmt.Errorf("sync id is required") + } + return bag, nil +} + +// Get implements types.SessionCache. +func (c *C1File) Get(ctx context.Context, key string, opt ...sessions.SessionStoreOption) ([]byte, bool, error) { + bag, err := applyBag(ctx, opt...) + if err != nil { + return nil, false, fmt.Errorf("error applying session option: %w", err) + } + + q := c.db.From(sessionStore.Name()).Prepared(true) + q = q.Select("value") + q = q.Where(goqu.C("sync_id").Eq(bag.SyncID)) + q = q.Where(goqu.C("key").Eq(bag.Prefix + key)) + + sql, params, err := q.ToSQL() + if err != nil { + return nil, false, fmt.Errorf("error getting session: %w", err) + } + + rows, err := c.db.QueryContext(ctx, sql, params...) + if err != nil { + return nil, false, fmt.Errorf("error getting session: %w", err) + } + defer rows.Close() + + var ret []byte + found := false + + for rows.Next() { + err = rows.Scan(&ret) + if err != nil { + return nil, false, fmt.Errorf("error scanning session: %w", err) + } + found = true + } + + if err := rows.Err(); err != nil { + return nil, false, fmt.Errorf("error getting data from session: %w", err) + } + + return ret, found, nil +} + +// Set implements types.SessionStore. +func (c *C1File) Set(ctx context.Context, key string, value []byte, opt ...sessions.SessionStoreOption) error { + bag, err := applyBag(ctx, opt...) + if err != nil { + return fmt.Errorf("error applying session option: %w", err) + } + + // Use goqu's OnConflict for upsert behavior + q := c.db.Insert(sessionStore.Name()).Prepared(true) + q = q.Rows(goqu.Record{ + "sync_id": bag.SyncID, + "key": bag.Prefix + key, + "value": value, + }) + q = q.OnConflict(goqu.DoUpdate("sync_id, key", goqu.C("value").Set(value))) + + sql, params, err := q.ToSQL() + if err != nil { + return fmt.Errorf("error setting session: %w", err) + } + + _, err = c.db.ExecContext(ctx, sql, params...) + if err != nil { + return fmt.Errorf("error setting session: %w", err) + } + + return nil +} + +// SetMany implements types.SessionStore. +func (c *C1File) SetMany(ctx context.Context, values map[string][]byte, opt ...sessions.SessionStoreOption) error { + bag, err := applyBag(ctx, opt...) + if err != nil { + return fmt.Errorf("error applying session option: %w", err) + } + + if len(values) == 0 { + return nil + } + + // Build batch insert + var rows []interface{} + for key, value := range values { + rows = append(rows, goqu.Record{ + "sync_id": bag.SyncID, + "key": bag.Prefix + key, + "value": value, + }) + } + + q := c.db.Insert(sessionStore.Name()).Prepared(true) + q = q.Rows(rows...) + q = q.OnConflict(goqu.DoUpdate("sync_id, key", goqu.C("value").Set(goqu.I("EXCLUDED.value")))) + + sql, params, err := q.ToSQL() + if err != nil { + return fmt.Errorf("error setting many sessions: %w", err) + } + + _, err = c.db.ExecContext(ctx, sql, params...) + if err != nil { + return fmt.Errorf("error setting many sessions: %w", err) + } + + return nil +} + +// Delete implements types.SessionStore. +func (c *C1File) Delete(ctx context.Context, key string, opt ...sessions.SessionStoreOption) error { + bag, err := applyBag(ctx, opt...) + if err != nil { + return fmt.Errorf("error applying session option: %w", err) + } + + q := c.db.Delete(sessionStore.Name()).Prepared(true) + q = q.Where(goqu.C("sync_id").Eq(bag.SyncID)) + q = q.Where(goqu.C("key").Eq(bag.Prefix + key)) + + sql, params, err := q.ToSQL() + if err != nil { + return fmt.Errorf("error deleting session: %w", err) + } + + _, err = c.db.ExecContext(ctx, sql, params...) + if err != nil { + return fmt.Errorf("error deleting session: %w", err) + } + + return nil +} + +// Clear implements types.SessionStore. +func (c *C1File) Clear(ctx context.Context, opt ...sessions.SessionStoreOption) error { + bag, err := applyBag(ctx, opt...) + if err != nil { + return fmt.Errorf("error applying session option: %w", err) + } + + q := c.db.Delete(sessionStore.Name()).Prepared(true) + q = q.Where(goqu.C("sync_id").Eq(bag.SyncID)) + + if bag.Prefix != "" { + q = q.Where(goqu.C("key").Like(escapeLike(bag.Prefix) + "%")) + } + + sql, params, err := q.ToSQL() + if err != nil { + return fmt.Errorf("error clearing sessions: %w", err) + } + + _, err = c.db.ExecContext(ctx, sql, params...) + if err != nil { + return fmt.Errorf("error clearing sessions: %w", err) + } + + return nil +} + +// GetMany implements types.SessionStore. +func (c *C1File) GetMany(ctx context.Context, keys []string, opt ...sessions.SessionStoreOption) (map[string][]byte, []string, error) { + bag, err := applyBag(ctx, opt...) + if err != nil { + return nil, nil, fmt.Errorf("session-get-many: error applying session option: %w", err) + } + + if len(keys) == 0 { + return make(map[string][]byte), nil, nil + } + prefixedKeys := make([]string, len(keys)) + if bag.Prefix == "" { + prefixedKeys = keys + } else { + for i, key := range keys { + prefixedKeys[i] = bag.Prefix + key + } + } + + q := c.db.From(sessionStore.Name()).Prepared(true) + q = q.Select("key", "value") + q = q.Where(goqu.C("sync_id").Eq(bag.SyncID)) + q = q.Where(goqu.C("key").In(prefixedKeys)) + q = q.Order(goqu.C("key").Asc()) + + sql, params, err := q.ToSQL() + if err != nil { + return nil, nil, fmt.Errorf("session-get-many: error generating SQL: %w", err) + } + + rows, err := c.db.QueryContext(ctx, sql, params...) + if err != nil { + return nil, nil, fmt.Errorf("session-get-many: error executing SQL: %w", err) + } + defer rows.Close() + + unprocessedKeys := make(map[string]struct{}, len(keys)) + // Initialize unprocessedKeys with all keys - we'll remove them as we process results + // Start by calculating size of all unprocessed keys (they'll be in the return slice) + + type item struct { + key string + value []byte + } + results := make([]item, 0, len(keys)) + messageSize := 0 + for rows.Next() { + var key string + var value []byte + err = rows.Scan(&key, &value) + if err != nil { + return nil, nil, fmt.Errorf("session-get-many: error scanning row: %w", err) + } + // Remove prefix from key to return original key + if bag.Prefix != "" && len(key) >= len(bag.Prefix) && key[:len(bag.Prefix)] == bag.Prefix { + key = key[len(bag.Prefix):] + } + results = append(results, item{key: key, value: value}) + // 10 is extra padding. The key goes into the response unconditionally. + messageSize += len(key) + 10 + } + + if err := rows.Err(); err != nil { + return nil, nil, fmt.Errorf("session-get-many: error getting data from session: %w", err) + } + + ret := make(map[string][]byte) + for _, r := range results { + value := r.value + key := r.key + + netItemSize := len(value) + 10 // 10 is extra padding for overhead. + if messageSize+netItemSize <= sessions.MaxSessionStoreSizeLimit { + messageSize += netItemSize + ret[key] = value + } else { + unprocessedKeys[key] = struct{}{} + } + } + + unprocessedKeysSlice := make([]string, 0, len(unprocessedKeys)) + for key := range unprocessedKeys { + unprocessedKeysSlice = append(unprocessedKeysSlice, key) + } + return ret, unprocessedKeysSlice, nil +} + +// GetAll implements types.SessionStore. +func (c *C1File) GetAll(ctx context.Context, pageToken string, opt ...sessions.SessionStoreOption) (map[string][]byte, string, error) { + bag, err := applyBag(ctx, opt...) + if err != nil { + return nil, "", fmt.Errorf("session-get-all: error applying session option: %w", err) + } + + result := make(map[string][]byte) + messageSizeRemaining := sessions.MaxSessionStoreSizeLimit + for { + items, nextPageToken, itemsSize, err := c.getAllChunk(ctx, pageToken, messageSizeRemaining, bag) + if err != nil { + return nil, "", fmt.Errorf("session-get-all: error getting all data from session: %w", err) + } + maps.Copy(result, items) + + if len(items) == 0 { + break + } + + if nextPageToken == "" { + pageToken = "" + break + } + + if pageToken == nextPageToken { + return nil, "", fmt.Errorf("page token is the same as the next page token: %s", pageToken) + } + pageToken = nextPageToken + + messageSizeRemaining -= itemsSize + if messageSizeRemaining <= 0 { + break + } + } + + return result, pageToken, nil +} + +func (c *C1File) getAllChunk(ctx context.Context, pageToken string, sizeLimit int, bag *sessions.SessionStoreBag) (map[string][]byte, string, int, error) { + q := c.db.From(sessionStore.Name()).Prepared(true). + Select("key", "value"). + Where(goqu.C("sync_id").Eq(bag.SyncID)). + Order(goqu.C("key").Asc()). + Limit(100) + + if bag.Prefix != "" { + q = q.Where(goqu.C("key").Like(escapeLike(bag.Prefix) + "%")) + } + + if pageToken != "" { + q = q.Where(goqu.C("key").Gte(bag.Prefix + pageToken)) + } + + sql, params, err := q.ToSQL() + if err != nil { + return nil, "", 0, fmt.Errorf("session-get-all: error generating SQL: %w", err) + } + + rows, err := c.db.QueryContext(ctx, sql, params...) + if err != nil { + return nil, "", 0, fmt.Errorf("session-get-all: error executing SQL: %w", err) + } + defer rows.Close() + + result := make(map[string][]byte) + nextPageToken := "" + messageSize := 0 + tooBig := false + for rows.Next() { + var key string + var value []byte + err = rows.Scan(&key, &value) + if err != nil { + return nil, "", 0, fmt.Errorf("session-get-all: error scanning row: %w", err) + } + // Remove prefix from key to return original key + if bag.Prefix != "" && len(key) >= len(bag.Prefix) && key[:len(bag.Prefix)] == bag.Prefix { + key = key[len(bag.Prefix):] + } + nextPageToken = key + itemSize := len(key) + len(value) + 20 + if messageSize+itemSize > sizeLimit { + tooBig = true + break + } + if len(result) >= 100 { + break + } + result[key] = value + messageSize += itemSize + } + + if err := rows.Err(); err != nil { + return nil, "", 0, fmt.Errorf("session-get-all: error getting data from session: %w", err) + } + + if tooBig { + return result, nextPageToken, messageSize, nil + } + + if len(result) < 100 { + return result, "", messageSize, nil + } + + return result, nextPageToken, messageSize, nil +} diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/sql_helpers.go b/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/sql_helpers.go index 60ede846..9c3efa34 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/sql_helpers.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/sql_helpers.go @@ -2,9 +2,12 @@ package dotc1z import ( "context" + "database/sql" "errors" "fmt" + "runtime" "strconv" + "sync" "time" "github.com/doug-martin/goqu/v9" @@ -19,8 +22,13 @@ import ( v2 "github.com/conductorone/baton-sdk/pb/c1/connector/v2" ) +const bulkPutParallelThreshold = 100 +const insertChunkSize = 200 const maxPageSize = 10000 +// Use worker pool to limit goroutines. +var numWorkers = min(max(runtime.GOMAXPROCS(0), 1), 4) + var allTableDescriptors = []tableDescriptor{ resourceTypes, resources, @@ -28,11 +36,12 @@ var allTableDescriptors = []tableDescriptor{ grants, syncRuns, assets, + sessionStore, } type tableDescriptor interface { Name() string - Schema() (string, []interface{}) + Schema() (string, []any) Version() string Migrations(ctx context.Context, db *goqu.Database) error } @@ -69,6 +78,11 @@ type hasPrincipalIdListRequest interface { GetPrincipalId() *v2.ResourceId } +type hasPrincipalResourceTypeIDsListRequest interface { + listRequest + GetPrincipalResourceTypeIds() []string +} + type protoHasID interface { proto.Message GetId() string @@ -92,8 +106,8 @@ func (c *C1File) throttledWarnSlowQuery(ctx context.Context, query string, durat } // listConnectorObjects uses a connector list request to fetch the corresponding data from the local db. -// It returns the raw bytes that need to be unmarshalled into the correct proto message. -func (c *C1File) listConnectorObjects(ctx context.Context, tableName string, req proto.Message) ([][]byte, string, error) { +// It returns a slice of typed proto messages constructed via the provided factory function. +func listConnectorObjects[T proto.Message](ctx context.Context, c *C1File, tableName string, req listRequest, factory func() T) ([]T, string, error) { ctx, span := tracer.Start(ctx, "C1File.listConnectorObjects") defer span.End() @@ -102,13 +116,7 @@ func (c *C1File) listConnectorObjects(ctx context.Context, tableName string, req return nil, "", err } - // If this doesn't look like a list request, bail - listReq, ok := req.(listRequest) - if !ok { - return nil, "", fmt.Errorf("c1file: invalid list request") - } - - annoSyncID, err := annotations.GetSyncIdFromAnnotations(listReq.GetAnnotations()) + annoSyncID, err := annotations.GetSyncIdFromAnnotations(req.GetAnnotations()) if err != nil { return nil, "", fmt.Errorf("error getting sync id from annotations for list request: %w", err) } @@ -145,32 +153,39 @@ func (c *C1File) listConnectorObjects(ctx context.Context, tableName string, req if resourceIdReq, ok := req.(hasResourceIdListRequest); ok { r := resourceIdReq.GetResourceId() - if r != nil && r.Resource != "" { - q = q.Where(goqu.C("resource_id").Eq(r.Resource)) - q = q.Where(goqu.C("resource_type_id").Eq(r.ResourceType)) + if r != nil && r.GetResource() != "" { + q = q.Where(goqu.C("resource_id").Eq(r.GetResource())) + q = q.Where(goqu.C("resource_type_id").Eq(r.GetResourceType())) } } if resourceReq, ok := req.(hasResourceListRequest); ok { r := resourceReq.GetResource() if r != nil { - q = q.Where(goqu.C("resource_id").Eq(r.Id.Resource)) - q = q.Where(goqu.C("resource_type_id").Eq(r.Id.ResourceType)) + q = q.Where(goqu.C("resource_id").Eq(r.GetId().GetResource())) + q = q.Where(goqu.C("resource_type_id").Eq(r.GetId().GetResourceType())) } } if entitlementReq, ok := req.(hasEntitlementListRequest); ok { e := entitlementReq.GetEntitlement() if e != nil { - q = q.Where(goqu.C("entitlement_id").Eq(e.Id)) + q = q.Where(goqu.C("entitlement_id").Eq(e.GetId())) } } if principalIdReq, ok := req.(hasPrincipalIdListRequest); ok { p := principalIdReq.GetPrincipalId() if p != nil { - q = q.Where(goqu.C("principal_resource_id").Eq(p.Resource)) - q = q.Where(goqu.C("principal_resource_type_id").Eq(p.ResourceType)) + q = q.Where(goqu.C("principal_resource_id").Eq(p.GetResource())) + q = q.Where(goqu.C("principal_resource_type_id").Eq(p.GetResourceType())) + } + } + + if principalResourceTypeIDsReq, ok := req.(hasPrincipalResourceTypeIDsListRequest); ok { + p := principalResourceTypeIDsReq.GetPrincipalResourceTypeIds() + if len(p) > 0 { + q = q.Where(goqu.C("principal_resource_type_id").In(p)) } } @@ -179,32 +194,24 @@ func (c *C1File) listConnectorObjects(ctx context.Context, tableName string, req case reqSyncID != "": q = q.Where(goqu.C("sync_id").Eq(reqSyncID)) default: - var latestSyncRun *syncRun - var err error - latestSyncRun, err = c.getFinishedSync(ctx, 0, connectorstore.SyncTypeFull) + // Use cached sync run to avoid N+1 queries during pagination + latestSyncRun, err := c.getCachedViewSyncRun(ctx) if err != nil { return nil, "", err } - if latestSyncRun == nil { - latestSyncRun, err = c.getLatestUnfinishedSync(ctx, connectorstore.SyncTypeAny) - if err != nil { - return nil, "", err - } - } - if latestSyncRun != nil { q = q.Where(goqu.C("sync_id").Eq(latestSyncRun.ID)) } } // If a page token is provided, begin listing rows greater than or equal to the token - if listReq.GetPageToken() != "" { - q = q.Where(goqu.C("id").Gte(listReq.GetPageToken())) + if req.GetPageToken() != "" { + q = q.Where(goqu.C("id").Gte(req.GetPageToken())) } // Clamp the page size - pageSize := listReq.GetPageSize() + pageSize := req.GetPageSize() if pageSize > maxPageSize || pageSize == 0 { pageSize = maxPageSize } @@ -214,8 +221,6 @@ func (c *C1File) listConnectorObjects(ctx context.Context, tableName string, req // Select 1 more than we asked for so we know if there is another page q = q.Limit(uint(pageSize + 1)) - var ret [][]byte - query, args, err := q.ToSQL() if err != nil { return nil, "", err @@ -239,21 +244,29 @@ func (c *C1File) listConnectorObjects(ctx context.Context, tableName string, req c.throttledWarnSlowQuery(ctx, query, queryDuration) } + var unmarshalerOptions = proto.UnmarshalOptions{ + Merge: true, + DiscardUnknown: true, + } var count uint32 = 0 lastRow := 0 + var data sql.RawBytes + var ret []T for rows.Next() { count++ if count > pageSize { break } - rowId := 0 - data := make([]byte, 0) - err := rows.Scan(&rowId, &data) + err := rows.Scan(&lastRow, &data) if err != nil { return nil, "", err } - lastRow = rowId - ret = append(ret, data) + t := factory() + err = unmarshalerOptions.Unmarshal(data, t) + if err != nil { + return nil, "", err + } + ret = append(ret, t) } if rows.Err() != nil { return nil, "", rows.Err() @@ -263,48 +276,158 @@ func (c *C1File) listConnectorObjects(ctx context.Context, tableName string, req if count > pageSize { nextPageToken = strconv.Itoa(lastRow + 1) } - return ret, nextPageToken, nil } -var protoMarshaler = proto.MarshalOptions{Deterministic: true} +var protoMarshaler = proto.MarshalOptions{Deterministic: false} -// prepareConnectorObjectRows prepares the rows for bulk insertion. -func prepareConnectorObjectRows[T proto.Message]( +// prepareSingleConnectorObjectRow processes a single message and returns the prepared record. +func prepareSingleConnectorObjectRow[T proto.Message]( + c *C1File, + msg T, + extractFields func(m T) (goqu.Record, error), +) (*goqu.Record, error) { + messageBlob, err := protoMarshaler.Marshal(msg) + if err != nil { + return nil, err + } + + fields, err := extractFields(msg) + if err != nil { + return nil, err + } + if fields == nil { + fields = goqu.Record{} + } + + if _, idSet := fields["external_id"]; !idSet { + idGetter, ok := any(msg).(protoHasID) + if !ok { + return nil, fmt.Errorf("unable to get ID for object") + } + fields["external_id"] = idGetter.GetId() + } + fields["data"] = messageBlob + fields["sync_id"] = c.currentSyncID + fields["discovered_at"] = time.Now().Format("2006-01-02 15:04:05.999999999") + + return &fields, nil +} + +// prepareConnectorObjectRowsSerial prepares rows sequentially for bulk insertion. +func prepareConnectorObjectRowsSerial[T proto.Message]( c *C1File, msgs []T, extractFields func(m T) (goqu.Record, error), ) ([]*goqu.Record, error) { rows := make([]*goqu.Record, len(msgs)) for i, m := range msgs { - messageBlob, err := protoMarshaler.Marshal(m) + row, err := prepareSingleConnectorObjectRow(c, m, extractFields) if err != nil { return nil, err } + rows[i] = row + } + return rows, nil +} - fields, err := extractFields(m) - if err != nil { - return nil, err - } - if fields == nil { - fields = goqu.Record{} +// prepareConnectorObjectRowsParallel prepares rows for bulk insertion using parallel processing. +// For batches smaller than bulkPutParallelThreshold, it falls back to sequential processing. +func prepareConnectorObjectRowsParallel[T proto.Message]( + c *C1File, + msgs []T, + extractFields func(m T) (goqu.Record, error), +) ([]*goqu.Record, error) { + if len(msgs) == 0 { + return nil, nil + } + + protoMarshallers := make([]proto.MarshalOptions, numWorkers) + for i := range numWorkers { + // Don't enable deterministic marshaling, as it sorts keys in lexicographical order which hurts performance. + protoMarshallers[i] = proto.MarshalOptions{} + } + + rows := make([]*goqu.Record, len(msgs)) + errs := make([]error, len(msgs)) + + // Capture values that are the same for all rows (avoid repeated access) + syncID := c.currentSyncID + discoveredAt := time.Now().Format("2006-01-02 15:04:05.999999999") + + chunkSize := (len(msgs) + numWorkers - 1) / numWorkers + + var wg sync.WaitGroup + + for w := range numWorkers { + start := w * chunkSize + end := min(start+chunkSize, len(msgs)) + if start >= len(msgs) { + break } - if _, idSet := fields["external_id"]; !idSet { - idGetter, ok := any(m).(protoHasID) - if !ok { - return nil, fmt.Errorf("unable to get ID for object") + wg.Add(1) + go func(start, end int, worker int) { + defer wg.Done() + for i := start; i < end; i++ { + m := msgs[i] + + messageBlob, err := protoMarshallers[worker].Marshal(m) + if err != nil { + errs[i] = err + continue + } + + fields, err := extractFields(m) + if err != nil { + errs[i] = err + continue + } + if fields == nil { + fields = goqu.Record{} + } + + if _, idSet := fields["external_id"]; !idSet { + idGetter, ok := any(m).(protoHasID) + if !ok { + errs[i] = fmt.Errorf("unable to get ID for object at index %d", i) + continue + } + fields["external_id"] = idGetter.GetId() + } + fields["data"] = messageBlob + fields["sync_id"] = syncID + fields["discovered_at"] = discoveredAt + rows[i] = &fields } - fields["external_id"] = idGetter.GetId() + }(start, end, w) + } + + wg.Wait() + + // Check for errors (return first error encountered) + for i, err := range errs { + if err != nil { + return nil, fmt.Errorf("error preparing row %d: %w", i, err) } - fields["data"] = messageBlob - fields["sync_id"] = c.currentSyncID - fields["discovered_at"] = time.Now().Format("2006-01-02 15:04:05.999999999") - rows[i] = &fields } + return rows, nil } +// prepareConnectorObjectRows prepares the rows for bulk insertion. +// It uses parallel processing if the row count is greater than bulkPutParallelThreshold. +func prepareConnectorObjectRows[T proto.Message]( + c *C1File, + msgs []T, + extractFields func(m T) (goqu.Record, error), +) ([]*goqu.Record, error) { + if len(msgs) > bulkPutParallelThreshold { + return prepareConnectorObjectRowsParallel(c, msgs, extractFields) + } + return prepareConnectorObjectRowsSerial(c, msgs, extractFields) +} + // executeChunkedInsert executes the insert query in chunks. func executeChunkedInsert( ctx context.Context, @@ -313,7 +436,7 @@ func executeChunkedInsert( rows []*goqu.Record, buildQueryFn func(*goqu.InsertDataset, []*goqu.Record) (*goqu.InsertDataset, error), ) error { - chunkSize := 100 + chunkSize := insertChunkSize chunks := len(rows) / chunkSize if len(rows)%chunkSize != 0 { chunks++ @@ -457,8 +580,8 @@ func (c *C1File) getResourceObject(ctx context.Context, resourceID *v2.ResourceI q := c.db.From(resources.Name()).Prepared(true) q = q.Select("data") - q = q.Where(goqu.C("resource_type_id").Eq(resourceID.ResourceType)) - q = q.Where(goqu.C("external_id").Eq(fmt.Sprintf("%s:%s", resourceID.ResourceType, resourceID.Resource))) + q = q.Where(goqu.C("resource_type_id").Eq(resourceID.GetResourceType())) + q = q.Where(goqu.C("external_id").Eq(fmt.Sprintf("%s:%s", resourceID.GetResourceType(), resourceID.GetResource()))) switch { case syncID != "": diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/sync_runs.go b/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/sync_runs.go index 08157f2f..c8a7b11a 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/sync_runs.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/sync_runs.go @@ -95,6 +95,29 @@ type syncRun struct { ParentSyncID string } +// getCachedViewSyncRun returns the cached sync run for read operations. +// This avoids N+1 queries when paginating through listConnectorObjects. +// The result is computed once and cached for the lifetime of the C1File. +func (c *C1File) getCachedViewSyncRun(ctx context.Context) (*syncRun, error) { + ctx, span := tracer.Start(ctx, "C1File.getCachedViewSyncRun") + defer span.End() + + c.cachedViewSyncOnce.Do(func() { + // First try to get a finished full sync + c.cachedViewSyncRun, c.cachedViewSyncErr = c.getFinishedSync(ctx, 0, connectorstore.SyncTypeFull) + if c.cachedViewSyncErr != nil { + return + } + + // If no finished sync, try to get an unfinished one + if c.cachedViewSyncRun == nil { + c.cachedViewSyncRun, c.cachedViewSyncErr = c.getLatestUnfinishedSync(ctx, connectorstore.SyncTypeAny) + } + }) + + return c.cachedViewSyncRun, c.cachedViewSyncErr +} + func (c *C1File) getLatestUnfinishedSync(ctx context.Context, syncType connectorstore.SyncType) (*syncRun, error) { ctx, span := tracer.Start(ctx, "C1File.getLatestUnfinishedSync") defer span.End() @@ -359,6 +382,10 @@ func (c *C1File) CheckpointSync(ctx context.Context, syncToken string) error { ctx, span := tracer.Start(ctx, "C1File.CheckpointSync") defer span.End() + if c.readOnly { + return ErrReadOnly + } + err := c.validateSyncDb(ctx) if err != nil { return err @@ -468,6 +495,12 @@ func (c *C1File) StartOrResumeSync(ctx context.Context, syncType connectorstore. return c.currentSyncID, true, nil } +// SetSyncID sets the current sync ID. This is only intended for testing. +func (c *C1File) SetSyncID(_ context.Context, syncID string) error { + c.currentSyncID = syncID + return nil +} + func (c *C1File) StartNewSync(ctx context.Context, syncType connectorstore.SyncType, parentSyncID string) (string, error) { ctx, span := tracer.Start(ctx, "C1File.StartNewSync") defer span.End() @@ -511,6 +544,10 @@ func (c *C1File) StartNewSync(ctx context.Context, syncType connectorstore.SyncT } func (c *C1File) insertSyncRun(ctx context.Context, syncID string, syncType connectorstore.SyncType, parentSyncID string) error { + if c.readOnly { + return ErrReadOnly + } + q := c.db.Insert(syncRuns.Name()) q = q.Rows(goqu.Record{ "sync_id": syncID, @@ -672,10 +709,12 @@ func (c *C1File) Cleanup(ctx context.Context) error { } } + l.Debug("vacuuming database") err = c.Vacuum(ctx) if err != nil { return err } + l.Debug("vacuum complete") c.dbUpdated = true @@ -747,73 +786,73 @@ func (c *C1File) GetSync(ctx context.Context, request *reader_v2.SyncsReaderServ ctx, span := tracer.Start(ctx, "C1File.GetSync") defer span.End() - sr, err := c.getSync(ctx, request.SyncId) + sr, err := c.getSync(ctx, request.GetSyncId()) if err != nil { - return nil, fmt.Errorf("error getting sync '%s': %w", request.SyncId, err) + return nil, fmt.Errorf("error getting sync '%s': %w", request.GetSyncId(), err) } - return &reader_v2.SyncsReaderServiceGetSyncResponse{ - Sync: &reader_v2.SyncRun{ + return reader_v2.SyncsReaderServiceGetSyncResponse_builder{ + Sync: reader_v2.SyncRun_builder{ Id: sr.ID, StartedAt: toTimeStamp(sr.StartedAt), EndedAt: toTimeStamp(sr.EndedAt), SyncToken: sr.SyncToken, SyncType: string(sr.Type), ParentSyncId: sr.ParentSyncID, - }, - }, nil + }.Build(), + }.Build(), nil } func (c *C1File) ListSyncs(ctx context.Context, request *reader_v2.SyncsReaderServiceListSyncsRequest) (*reader_v2.SyncsReaderServiceListSyncsResponse, error) { ctx, span := tracer.Start(ctx, "C1File.ListSyncs") defer span.End() - syncs, nextPageToken, err := c.ListSyncRuns(ctx, request.PageToken, request.PageSize) + syncs, nextPageToken, err := c.ListSyncRuns(ctx, request.GetPageToken(), request.GetPageSize()) if err != nil { return nil, fmt.Errorf("error listing syncs: %w", err) } syncRuns := make([]*reader_v2.SyncRun, len(syncs)) for i, sr := range syncs { - syncRuns[i] = &reader_v2.SyncRun{ + syncRuns[i] = reader_v2.SyncRun_builder{ Id: sr.ID, StartedAt: toTimeStamp(sr.StartedAt), EndedAt: toTimeStamp(sr.EndedAt), SyncToken: sr.SyncToken, SyncType: string(sr.Type), ParentSyncId: sr.ParentSyncID, - } + }.Build() } - return &reader_v2.SyncsReaderServiceListSyncsResponse{ + return reader_v2.SyncsReaderServiceListSyncsResponse_builder{ Syncs: syncRuns, NextPageToken: nextPageToken, - }, nil + }.Build(), nil } func (c *C1File) GetLatestFinishedSync(ctx context.Context, request *reader_v2.SyncsReaderServiceGetLatestFinishedSyncRequest) (*reader_v2.SyncsReaderServiceGetLatestFinishedSyncResponse, error) { ctx, span := tracer.Start(ctx, "C1File.GetLatestFinishedSync") defer span.End() - sync, err := c.getFinishedSync(ctx, 0, connectorstore.SyncType(request.SyncType)) + sync, err := c.getFinishedSync(ctx, 0, connectorstore.SyncType(request.GetSyncType())) if err != nil { return nil, fmt.Errorf("error fetching latest finished sync: %w", err) } if sync == nil { - return &reader_v2.SyncsReaderServiceGetLatestFinishedSyncResponse{ + return reader_v2.SyncsReaderServiceGetLatestFinishedSyncResponse_builder{ Sync: nil, - }, nil + }.Build(), nil } - return &reader_v2.SyncsReaderServiceGetLatestFinishedSyncResponse{ - Sync: &reader_v2.SyncRun{ + return reader_v2.SyncsReaderServiceGetLatestFinishedSyncResponse_builder{ + Sync: reader_v2.SyncRun_builder{ Id: sync.ID, StartedAt: toTimeStamp(sync.StartedAt), EndedAt: toTimeStamp(sync.EndedAt), SyncToken: sync.SyncToken, SyncType: string(sync.Type), ParentSyncId: sync.ParentSyncID, - }, - }, nil + }.Build(), + }.Build(), nil } diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/field/decode_hooks.go b/vendor/github.com/conductorone/baton-sdk/pkg/field/decode_hooks.go new file mode 100644 index 00000000..202ebb56 --- /dev/null +++ b/vendor/github.com/conductorone/baton-sdk/pkg/field/decode_hooks.go @@ -0,0 +1,171 @@ +package field + +import ( + "encoding/base64" + "fmt" + "net/url" + "os" + "reflect" + "strings" + + "github.com/mitchellh/mapstructure" +) + +type DecodeHookOption func(*decodeHookConfig) + +type decodeHookConfig struct { + hookFuncs []mapstructure.DecodeHookFunc +} + +// ComposeDecodeHookFunc returns a mapstructure.DecodeHookFunc that composes +// the default hook functions with any additional hook functions configured. +func ComposeDecodeHookFunc(opts ...DecodeHookOption) mapstructure.DecodeHookFunc { + config := &decodeHookConfig{ + hookFuncs: []mapstructure.DecodeHookFunc{ + // default hook functions used by viper + mapstructure.StringToTimeDurationHookFunc(), + StringToSliceHookFunc(","), + }, + } + for _, opt := range opts { + opt(config) + } + return mapstructure.ComposeDecodeHookFunc(config.hookFuncs...) +} + +func WithAdditionalDecodeHooks(funcs ...mapstructure.DecodeHookFunc) DecodeHookOption { + return func(c *decodeHookConfig) { + c.hookFuncs = append(c.hookFuncs, funcs...) + } +} + +// FileUploadDecodeHook returns a mapstructure.DecodeHookFunc that automatically +// converts string values to []byte for file upload fields, supporting: +// 1. File paths (reads file content) +// 2. Data URLs of JSON with base64 encoding (data:application/json;base64,) +// 3. Raw base64 content +// 4. Raw unencoded content. +func FileUploadDecodeHook(readFromPath bool) mapstructure.DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) { + // Only apply to string -> []byte conversions + if f.Kind() != reflect.String || t.Kind() != reflect.Slice || t.Elem().Kind() != reflect.Uint8 { + return data, nil + } + + str, ok := data.(string) + if !ok { + return data, nil + } + + if readFromPath { + return getFileContentFromPath(str) + } + + return parseFileContent(str) + } +} + +// getFileContentFromPath returns the file content from a path. +func getFileContentFromPath(path string) ([]byte, error) { + if path == "" { + // don't error if the path is empty, leave that to the field validation rules + return []byte{}, nil + } + + // Check if the file exists + fileInfo, err := os.Stat(path) + if err != nil { + return nil, fmt.Errorf("cannot access file: %w", err) + } + + // Check file size limit (2MB) + maxFileSize := 2 * 1024 * 1024 + if fileInfo.Size() > int64(maxFileSize) { + return nil, fmt.Errorf("file too large: %d bytes exceeds limit of %d bytes", fileInfo.Size(), maxFileSize) + } + + // Read the file + content, err := os.ReadFile(path) + if err != nil { + return nil, fmt.Errorf("error reading file: %w", err) + } + return content, nil +} + +// parseFileContent returns the file upload content from a string field value. +func parseFileContent(data string) ([]byte, error) { + if data == "" { + // don't error if the data is empty, leave that to the field validation rules + return []byte{}, nil + } + + // Check if it's a data URL first + if strings.HasPrefix(data, "data:") { + return parseJSONBase64DataURL(data) + } + + // Check if it's a base64 encoded string + if decoded, err := base64.StdEncoding.DecodeString(data); err == nil { + return decoded, nil + } + + // Return the content as-is + return []byte(data), nil +} + +// parseJSONBase64DataURL parses a data URL and returns the decoded content. +// Errors if the data is not MIME type application/json and base64 encoded. +func parseJSONBase64DataURL(dataURL string) ([]byte, error) { + parsedURL, err := url.Parse(dataURL) + if err != nil { + return nil, fmt.Errorf("invalid data URL: %w", err) + } + + if parsedURL.Scheme != "data" { + return nil, fmt.Errorf("expected data URL scheme, got: %s", parsedURL.Scheme) + } + + // Split the data URL into media type and data + parts := strings.SplitN(parsedURL.Opaque, ",", 2) + if len(parts) != 2 { + return nil, fmt.Errorf("invalid data URL format: missing comma separator") + } + mediaType := parts[0] + data := parts[1] + + // Check if it's base64 encoded and MIME type application/json + if !strings.HasSuffix(mediaType, ";base64") { + return nil, fmt.Errorf("expected base64 data, got: %s", mediaType) + } + if !strings.HasPrefix(mediaType, "application/json") { + return nil, fmt.Errorf("expected MIME type application/json, got: %s", mediaType) + } + + decoded, err := base64.StdEncoding.DecodeString(data) + if err != nil { + return nil, fmt.Errorf("failed to decode base64 data: %w", err) + } + return decoded, nil +} + +// StringToSliceHookFunc returns a DecodeHookFunc that converts +// string to []string by splitting on the given sep. +// Note: this differs from mapstructure.StringToSliceHookFunc only in that it +// skips cases when the target type is []uint8 (ie []byte). +func StringToSliceHookFunc(sep string) mapstructure.DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}) (interface{}, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Slice || t.Elem().Kind() == reflect.Uint8 { + return data, nil + } + + raw := data.(string) + if raw == "" { + return []string{}, nil + } + + return strings.Split(raw, sep), nil + } +} diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/field/default_relationships.go b/vendor/github.com/conductorone/baton-sdk/pkg/field/default_relationships.go index ef8f093e..bc995d11 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/field/default_relationships.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/field/default_relationships.go @@ -35,6 +35,10 @@ var DefaultRelationships = []SchemaFieldRelationship{ []SchemaField{externalResourceEntitlementIdFilter}, []SchemaField{externalResourceC1ZField}, ), + FieldsDependentOn( + []SchemaField{skipGrants}, + []SchemaField{targetedSyncResourceIDs}, + ), } func EnsureDefaultRelationships(original []SchemaFieldRelationship) []SchemaFieldRelationship { diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/field/defaults.go b/vendor/github.com/conductorone/baton-sdk/pkg/field/defaults.go index 8e92b832..5805b9f0 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/field/defaults.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/field/defaults.go @@ -59,6 +59,11 @@ var ( WithDescription("The start time of the event feed to read events from"), WithPersistent(true), WithExportTarget(ExportTargetNone)) + eventFeedCursorField = StringField("event-feed-cursor", + WithHidden(true), + WithDescription("The cursor to use for resuming the event feed from a specific point"), + WithPersistent(true), + WithExportTarget(ExportTargetNone)) fileField = StringField("file", WithShortHand("f"), WithDefaultValue("sync.c1z"), WithDescription("The path to the c1z file to sync with"), WithPersistent(true), WithExportTarget(ExportTargetNone)) grantEntitlementField = StringField("grant-entitlement", WithHidden(true), WithDescription("The id of the entitlement to grant to the supplied principal"), @@ -90,6 +95,25 @@ var ( WithPersistent(true), WithExportTarget(ExportTargetNone), ) + + authMethod = StringField( + "auth-method", + WithDescription(""), + WithPersistent(true), + WithExportTarget(ExportTargetNone), + ) + + skipGrants = BoolField("skip-grants", + WithDescription("This must be set to skip syncing of grants only (entitlements will still be synced)"), + WithPersistent(true), + WithExportTarget(ExportTargetNone), + WithHidden(true), + ) + + syncResourceTypeIDs = StringSliceField("sync-resource-types", + WithDescription("The resource type IDs to sync"), + WithPersistent(true), + WithExportTarget(ExportTargetNone)) diffSyncsField = BoolField( "diff-syncs", WithDescription("Create a new partial SyncID from a base and applied sync."), @@ -148,6 +172,52 @@ var ( WithPersistent(true), WithExportTarget(ExportTargetNone), ) + invokeActionResourceTypeField = StringField("invoke-action-resource-type", + WithHidden(true), + WithDescription("The resource type ID for resource-scoped actions"), + WithPersistent(true), + WithExportTarget(ExportTargetNone), + ) + + listActionSchemasField = BoolField("list-action-schemas", + WithHidden(true), + WithDescription("List available action schemas"), + WithPersistent(true), + WithExportTarget(ExportTargetNone), + ) + listActionSchemasResourceTypeField = StringField("list-action-schemas-resource-type", + WithHidden(true), + WithDescription("Filter action schemas by resource type ID"), + WithPersistent(true), + WithExportTarget(ExportTargetNone), + ) + + listResourceActionsField = StringField("list-resource-actions", + WithDescription("The resource type ID to list actions for"), + WithHidden(true), + WithPersistent(true), + WithExportTarget(ExportTargetNone), + ) + + invokeResourceActionField = StringField("invoke-resource-action", + WithDescription("The name of the action to invoke"), + WithHidden(true), + WithPersistent(true), + WithExportTarget(ExportTargetNone), + ) + invokeResourceActionTypeField = StringField("invoke-resource-action-resource-type", + WithDescription("The resource type of the action to invoke"), + WithHidden(true), + WithPersistent(true), + WithExportTarget(ExportTargetNone), + ) + invokeResourceActionArgsField = StringField("invoke-resource-action-args", + WithHidden(true), + WithDescription("JSON-formatted object of map keys and values like '{ 'key': 'value' }'"), + WithDefaultValue("{}"), + WithPersistent(true), + WithExportTarget(ExportTargetNone), + ) otelCollectorEndpoint = StringField(OtelCollectorEndpointFieldName, WithDescription("The endpoint of the OpenTelemetry collector to send observability data to (used for both tracing and logging if specific endpoints are not provided)"), @@ -207,6 +277,13 @@ var ( WithRequired(true), WithDescription("The expected audience claim in the JWT (optional)"), WithExportTarget(ExportTargetNone)) + + ServerSessionStoreMaximumSizeField = IntField("session-store-maximum-size", + WithDescription("The maximum size of the local in-memory session store cache in bytes."), + WithDefaultValue(1024*1024*15), + WithExportTarget(ExportTargetOps), + WithHidden(true), + WithPersistent(true)) ) func LambdaServerFields() []SchemaField { @@ -243,6 +320,7 @@ var DefaultFields = []SchemaField{ eventFeedField, eventFeedIdField, eventFeedStartAtField, + eventFeedCursorField, fileField, grantEntitlementField, grantPrincipalField, @@ -257,7 +335,9 @@ var DefaultFields = []SchemaField{ logLevelDebugExpiresAtField, skipFullSync, targetedSyncResourceIDs, + syncResourceTypeIDs, skipEntitlementsAndGrants, + skipGrants, externalResourceC1ZField, externalResourceEntitlementIdFilter, diffSyncsField, @@ -269,6 +349,14 @@ var DefaultFields = []SchemaField{ compactSyncsField, invokeActionField, invokeActionArgsField, + invokeActionResourceTypeField, + listActionSchemasField, + listActionSchemasResourceTypeField, + listResourceActionsField, + invokeResourceActionField, + invokeResourceActionTypeField, + invokeResourceActionArgsField, + ServerSessionStoreMaximumSizeField, otelCollectorEndpoint, otelCollectorEndpointTLSCertPath, @@ -276,6 +364,8 @@ var DefaultFields = []SchemaField{ otelCollectorEndpointTlSInsecure, otelTracingDisabled, otelLoggingDisabled, + + authMethod, } func IsFieldAmongDefaultList(f SchemaField) bool { diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/field/field_group.go b/vendor/github.com/conductorone/baton-sdk/pkg/field/field_group.go new file mode 100644 index 00000000..884c23e1 --- /dev/null +++ b/vendor/github.com/conductorone/baton-sdk/pkg/field/field_group.go @@ -0,0 +1,26 @@ +package field + +type SchemaFieldGroup struct { + Name string + DisplayName string + HelpText string + Fields []SchemaField + Default bool +} + +func WithFieldGroups(fieldGroups []SchemaFieldGroup) configOption { + return func(c Configuration) Configuration { + c.FieldGroups = fieldGroups + + return c + } +} + +func (i *SchemaFieldGroup) FieldMap() map[string]SchemaField { + fieldMap := make(map[string]SchemaField, len(i.Fields)) + for _, f := range i.Fields { + fieldMap[f.FieldName] = f + } + + return fieldMap +} diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/field/field_options.go b/vendor/github.com/conductorone/baton-sdk/pkg/field/field_options.go index 874c7535..90288881 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/field/field_options.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/field/field_options.go @@ -21,22 +21,22 @@ func WithRequired(required bool) fieldOption { if o.Rules.i == nil { o.Rules.i = &v1_conf.Int64Rules{} } - o.Rules.i.IsRequired = required + o.Rules.i.SetIsRequired(required) case StringVariant: if o.Rules.s == nil { o.Rules.s = &v1_conf.StringRules{} } - o.Rules.s.IsRequired = required + o.Rules.s.SetIsRequired(required) case StringSliceVariant: if o.Rules.ss == nil { o.Rules.ss = &v1_conf.RepeatedStringRules{} } - o.Rules.ss.IsRequired = required + o.Rules.ss.SetIsRequired(required) case StringMapVariant: if o.Rules.sm == nil { o.Rules.sm = &v1_conf.StringMapRules{} } - o.Rules.sm.IsRequired = required + o.Rules.sm.SetIsRequired(required) default: panic(fmt.Sprintf("field %s has unsupported type %s", o.FieldName, o.Variant)) } @@ -236,11 +236,11 @@ func NewStringMapBuilder(rules *v1_conf.StringMapRules) *StringMapRuler { } func (r *StringMapRuler) WithRequired(required bool) *StringMapRuler { - r.rules.IsRequired = required + r.rules.SetIsRequired(required) return r } func (r *StringMapRuler) WithValidateEmpty(validateEmpty bool) *StringMapRuler { - r.rules.ValidateEmpty = validateEmpty + r.rules.SetValidateEmpty(validateEmpty) return r } diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/field/fields.go b/vendor/github.com/conductorone/baton-sdk/pkg/field/fields.go index 5aca646d..a43363e2 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/field/fields.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/field/fields.go @@ -74,6 +74,9 @@ type SchemaField struct { ConnectorConfig connectorConfig WasReExported bool + + // Groups + FieldGroups []SchemaFieldGroup } type SchemaTypes interface { @@ -213,6 +216,27 @@ func StringField(name string, optional ...fieldOption) SchemaField { return field } +func FileUploadField(name string, bonusStrings []string, optional ...fieldOption) SchemaField { + field := SchemaField{ + FieldName: name, + Variant: StringVariant, + DefaultValue: "", + ExportTarget: ExportTargetGUI, + Rules: FieldRule{}, + SyncerConfig: syncerConfig{}, + ConnectorConfig: connectorConfig{ + FieldType: FileUpload, + BonusStrings: bonusStrings, + }, + } + + for _, o := range optional { + field = o(field) + } + + return field +} + func IntField(name string, optional ...fieldOption) SchemaField { field := SchemaField{ FieldName: name, @@ -274,7 +298,7 @@ func SelectField(name string, options []string, optional ...fieldOption) SchemaF DefaultValue: "", ExportTarget: ExportTargetGUI, Rules: FieldRule{ - s: &v1_conf.StringRules{In: options}, + s: v1_conf.StringRules_builder{In: options}.Build(), }, SyncerConfig: syncerConfig{}, ConnectorConfig: connectorConfig{FieldType: Text}, @@ -286,3 +310,21 @@ func SelectField(name string, options []string, optional ...fieldOption) SchemaF return field } + +func Oauth2Field(name string, optional ...fieldOption) SchemaField { + field := SchemaField{ + FieldName: name, + Variant: StringVariant, + DefaultValue: "", + ExportTarget: ExportTargetGUI, + Rules: FieldRule{}, + SyncerConfig: syncerConfig{}, + ConnectorConfig: connectorConfig{FieldType: OAuth2}, + } + + for _, o := range optional { + field = o(field) + } + + return field +} diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/field/marshal.go b/vendor/github.com/conductorone/baton-sdk/pkg/field/marshal.go index a143d98a..029e97f8 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/field/marshal.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/field/marshal.go @@ -36,7 +36,9 @@ func (c *Configuration) Marshal() ([]byte, error) { } func (c Configuration) marshal() (*v1_conf.Configuration, error) { - conf := &v1_conf.Configuration{ + var err error + + conf := v1_conf.Configuration_builder{ DisplayName: c.DisplayName, HelpUrl: c.HelpUrl, IconUrl: c.IconUrl, @@ -44,147 +46,208 @@ func (c Configuration) marshal() (*v1_conf.Configuration, error) { IsDirectory: c.IsDirectory, SupportsExternalResources: c.SupportsExternalResources, RequiresExternalConnector: c.RequiresExternalConnector, + }.Build() + + // Fields + conf.Fields, conf.Constraints, err = mapFieldsAndConstraints(c.Fields, c.Constraints) + if err != nil { + return nil, fmt.Errorf("failed to convert fields and constraints to v1: %w", err) + } + + fieldGroups := make([]*v1_conf.FieldGroup, 0, len(c.FieldGroups)) + for _, group := range c.FieldGroups { + fieldGroups = append(fieldGroups, fieldGroupToV1(group)) } + conf.SetFieldGroups(fieldGroups) + + return conf, nil +} + +func fieldGroupToV1(fg SchemaFieldGroup) *v1_conf.FieldGroup { + fieldGroupV1 := v1_conf.FieldGroup_builder{ + Name: fg.Name, + DisplayName: fg.DisplayName, + HelpText: fg.HelpText, + Default: fg.Default, + }.Build() + + fieldGroupV1.SetFields(make([]string, 0, len(fg.Fields))) + for _, f := range fg.Fields { + fieldGroupV1.SetFields(append(fieldGroupV1.GetFields(), f.FieldName)) + } + + return fieldGroupV1 +} + +func mapFieldsAndConstraints(fields []SchemaField, constraints []SchemaFieldRelationship) ([]*v1_conf.Field, []*v1_conf.Constraint, error) { + resultFields := make([]*v1_conf.Field, 0, len(fields)) + resultConstraints := make([]*v1_conf.Constraint, 0, len(constraints)) ignore := make(map[string]struct{}) - for _, f := range c.Fields { + for _, f := range fields { if f.ExportTarget != ExportTargetGUI && f.ExportTarget != ExportTargetOps { ignore[f.FieldName] = struct{}{} continue } - field := v1_conf.Field{ - Name: f.FieldName, - DisplayName: f.ConnectorConfig.DisplayName, - Description: f.Description, - Placeholder: f.ConnectorConfig.Placeholder, - IsRequired: f.Required, - IsOps: f.ExportTarget == ExportTargetOps, - IsSecret: f.Secret, - } - - switch f.Variant { - case IntVariant: - intField := &v1_conf.IntField{Rules: f.Rules.i} - d, err := GetDefaultValue[int](f) - if err != nil { - return nil, err - } - if d != nil { - intField.DefaultValue = int64(*d) - } - field.Field = &v1_conf.Field_IntField{IntField: intField} + fieldv1, err := schemaFieldToV1(f) + if err != nil { + return nil, nil, fmt.Errorf("failed to convert field '%s' to v1: %w", f.FieldName, err) + } - case BoolVariant: - boolField := &v1_conf.BoolField{Rules: f.Rules.b} - d, err := GetDefaultValue[bool](f) - if err != nil { - return nil, err - } - if d != nil { - boolField.DefaultValue = *d - } - field.Field = &v1_conf.Field_BoolField{BoolField: boolField} - case StringSliceVariant: - stringSliceField := &v1_conf.StringSliceField{Rules: f.Rules.ss} - d, err := GetDefaultValue[[]string](f) - if err != nil { - return nil, err - } - if d != nil { - stringSliceField.DefaultValue = *d - } - field.Field = &v1_conf.Field_StringSliceField{StringSliceField: stringSliceField} - case StringMapVariant: - stringMapField := &v1_conf.StringMapField{Rules: f.Rules.sm} - d, err := GetDefaultValue[map[string]any](f) - if err != nil { - return nil, err - } - if d != nil { - // Convert map[string]any to map[string]*anypb.Any - anyMap := make(map[string]*anypb.Any) - for k, v := range *d { - // Convert the value to a structpb.Value - value, err := structpb.NewValue(v) - if err != nil { - return nil, fmt.Errorf("failed to convert map value to structpb.Value: %w", err) - } - anyValue, err := anypb.New(value) - if err != nil { - return nil, fmt.Errorf("failed to convert structpb.Value to Any: %w", err) - } - anyMap[k] = anyValue - } - stringMapField.DefaultValue = anyMap - } - field.Field = &v1_conf.Field_StringMapField{StringMapField: stringMapField} - case StringVariant: - stringField := &v1_conf.StringField{Rules: f.Rules.s} - d, err := GetDefaultValue[string](f) - if err != nil { - return nil, err - } - if d != nil { - stringField.DefaultValue = *d - } + resultFields = append(resultFields, fieldv1) + } - switch f.ConnectorConfig.FieldType { - case Text: - stringField.Type = v1_conf.StringFieldType_STRING_FIELD_TYPE_TEXT_UNSPECIFIED - case Randomize: - stringField.Type = v1_conf.StringFieldType_STRING_FIELD_TYPE_RANDOM - case OAuth2: - stringField.Type = v1_conf.StringFieldType_STRING_FIELD_TYPE_OAUTH2 - case ConnectorDerivedOptions: - stringField.Type = v1_conf.StringFieldType_STRING_FIELD_TYPE_CONNECTOR_DERIVED_OPTIONS - case FileUpload: - stringField.Type = v1_conf.StringFieldType_STRING_FIELD_TYPE_FILE_UPLOAD - stringField.AllowedExtensions = f.ConnectorConfig.BonusStrings - default: - return nil, fmt.Errorf("invalid field type: '%s'", f.ConnectorConfig.FieldType) - } + for _, rel := range constraints { + constraint, err := constraintToV1(rel, ignore) + if err != nil { + return nil, nil, fmt.Errorf("failed to convert constraint to v1: %w", err) + } - field.Field = &v1_conf.Field_StringField{StringField: stringField} + if constraint == nil { + continue } - conf.Fields = append(conf.Fields, &field) + + resultConstraints = append(resultConstraints, constraint) } - for _, rel := range c.Constraints { - constraint := v1_conf.Constraint{} + return resultFields, resultConstraints, nil +} - contraintForIgnoredField := false - for _, f := range rel.Fields { - if _, ok := ignore[f.FieldName]; ok { - contraintForIgnoredField = true - break - } - constraint.FieldNames = append(constraint.FieldNames, f.FieldName) +func constraintToV1(rel SchemaFieldRelationship, ignore map[string]struct{}) (*v1_conf.Constraint, error) { + constraint := &v1_conf.Constraint{} + + constraintForIgnoredField := false + for _, f := range rel.Fields { + if _, ok := ignore[f.FieldName]; ok { + constraintForIgnoredField = true + break } - if contraintForIgnoredField { - continue + constraint.SetFieldNames(append(constraint.GetFieldNames(), f.FieldName)) + } + if constraintForIgnoredField { + return nil, nil + } + + for _, f := range rel.ExpectedFields { + if _, ok := ignore[f.FieldName]; ok { + constraintForIgnoredField = true + break } + constraint.SetSecondaryFieldNames(append(constraint.GetSecondaryFieldNames(), f.FieldName)) + } - for _, f := range rel.ExpectedFields { - if _, ok := ignore[f.FieldName]; ok { - contraintForIgnoredField = true - break - } - constraint.SecondaryFieldNames = append(constraint.SecondaryFieldNames, f.FieldName) + if constraintForIgnoredField { + return nil, nil + } + + kind, ok := RelationshipToConstraintKind[rel.Kind] + if !ok { + return nil, fmt.Errorf("invalid constraint kind: %d", rel.Kind) + } + constraint.SetKind(kind) + + return constraint, nil +} + +func schemaFieldToV1(f SchemaField) (*v1_conf.Field, error) { + field := v1_conf.Field_builder{ + Name: f.FieldName, + DisplayName: f.ConnectorConfig.DisplayName, + Description: f.Description, + Placeholder: f.ConnectorConfig.Placeholder, + IsRequired: f.Required, + IsOps: f.ExportTarget == ExportTargetOps, + IsSecret: f.Secret, + }.Build() + + switch f.Variant { + case IntVariant: + intField := v1_conf.IntField_builder{Rules: f.Rules.i}.Build() + d, err := GetDefaultValue[int](f) + if err != nil { + return nil, err + } + if d != nil { + intField.SetDefaultValue(int64(*d)) } - if contraintForIgnoredField { - continue + field.SetIntField(proto.ValueOrDefault(intField)) + + case BoolVariant: + boolField := v1_conf.BoolField_builder{Rules: f.Rules.b}.Build() + d, err := GetDefaultValue[bool](f) + if err != nil { + return nil, err + } + if d != nil { + boolField.SetDefaultValue(*d) + } + field.SetBoolField(proto.ValueOrDefault(boolField)) + case StringSliceVariant: + stringSliceField := v1_conf.StringSliceField_builder{Rules: f.Rules.ss}.Build() + d, err := GetDefaultValue[[]string](f) + if err != nil { + return nil, err + } + if d != nil { + stringSliceField.SetDefaultValue(*d) + } + field.SetStringSliceField(proto.ValueOrDefault(stringSliceField)) + case StringMapVariant: + stringMapField := v1_conf.StringMapField_builder{Rules: f.Rules.sm}.Build() + d, err := GetDefaultValue[map[string]any](f) + if err != nil { + return nil, err + } + if d != nil { + // Convert map[string]any to map[string]*anypb.Any + anyMap := make(map[string]*anypb.Any) + for k, v := range *d { + // Convert the value to a structpb.Value + value, err := structpb.NewValue(v) + if err != nil { + return nil, fmt.Errorf("failed to convert map value to structpb.Value: %w", err) + } + anyValue, err := anypb.New(value) + if err != nil { + return nil, fmt.Errorf("failed to convert structpb.Value to Any: %w", err) + } + anyMap[k] = anyValue + } + stringMapField.SetDefaultValue(anyMap) + } + field.SetStringMapField(proto.ValueOrDefault(stringMapField)) + case StringVariant: + stringField := v1_conf.StringField_builder{Rules: f.Rules.s}.Build() + d, err := GetDefaultValue[string](f) + if err != nil { + return nil, err + } + if d != nil { + stringField.SetDefaultValue(*d) } - kind, ok := RelationshipToConstraintKind[rel.Kind] - if !ok { - return nil, fmt.Errorf("invalid constraint kind: %d", rel.Kind) + switch f.ConnectorConfig.FieldType { + case Text: + stringField.SetType(v1_conf.StringFieldType_STRING_FIELD_TYPE_TEXT_UNSPECIFIED) + case Randomize: + stringField.SetType(v1_conf.StringFieldType_STRING_FIELD_TYPE_RANDOM) + case OAuth2: + stringField.SetType(v1_conf.StringFieldType_STRING_FIELD_TYPE_OAUTH2) + case ConnectorDerivedOptions: + stringField.SetType(v1_conf.StringFieldType_STRING_FIELD_TYPE_CONNECTOR_DERIVED_OPTIONS) + case FileUpload: + stringField.SetType(v1_conf.StringFieldType_STRING_FIELD_TYPE_FILE_UPLOAD) + stringField.SetAllowedExtensions(f.ConnectorConfig.BonusStrings) + default: + return nil, fmt.Errorf("invalid field type: '%s'", f.ConnectorConfig.FieldType) } - constraint.Kind = kind - conf.Constraints = append(conf.Constraints, &constraint) + field.SetStringField(proto.ValueOrDefault(stringField)) + default: + return nil, fmt.Errorf("invalid variant: '%s'", f.Variant) } - return conf, nil + return field, nil } diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/field/rule_builders.go b/vendor/github.com/conductorone/baton-sdk/pkg/field/rule_builders.go index d9f006a4..4e7afef7 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/field/rule_builders.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/field/rule_builders.go @@ -16,42 +16,42 @@ func NewIntBuilder(rules *v1_conf.Int64Rules) *IntRuler { } func (b *IntRuler) Eq(value int64) *IntRuler { - b.rules.Eq = &value + b.rules.SetEq(value) return b } func (b *IntRuler) Gt(value int64) *IntRuler { - b.rules.Gt = &value + b.rules.SetGt(value) return b } func (b *IntRuler) Lt(value int64) *IntRuler { - b.rules.Lt = &value + b.rules.SetLt(value) return b } func (b *IntRuler) Lte(value int64) *IntRuler { - b.rules.Lte = &value + b.rules.SetLte(value) return b } func (b *IntRuler) Gte(value int64) *IntRuler { - b.rules.Gte = &value + b.rules.SetGte(value) return b } func (b *IntRuler) In(values []int64) *IntRuler { - b.rules.In = values + b.rules.SetIn(values) return b } func (b *IntRuler) NotIn(values []int64) *IntRuler { - b.rules.NotIn = values + b.rules.SetNotIn(values) return b } func (b *IntRuler) ValidateEmpty(value bool) *IntRuler { - b.rules.ValidateEmpty = value + b.rules.SetValidateEmpty(value) return b } @@ -64,7 +64,7 @@ func NewBoolBuilder(rules *v1_conf.BoolRules) *BoolRuler { } func (b *BoolRuler) Eq(v bool) *BoolRuler { - b.rules.Eq = &v + b.rules.SetEq(v) return b } @@ -77,22 +77,22 @@ func NewStringBuilder(rules *v1_conf.StringRules) *StringRuler { } func (b *StringRuler) Eq(value string) *StringRuler { - b.rules.Eq = &value + b.rules.SetEq(value) return b } func (b *StringRuler) Len(value uint64) *StringRuler { - b.rules.Len = &value + b.rules.SetLen(value) return b } func (b *StringRuler) MinLen(value uint64) *StringRuler { - b.rules.MinLen = &value + b.rules.SetMinLen(value) return b } func (b *StringRuler) MaxLen(value uint64) *StringRuler { - b.rules.MaxLen = &value + b.rules.SetMaxLen(value) return b } @@ -101,101 +101,101 @@ func (b *StringRuler) Pattern(value string) *StringRuler { if err != nil { panic(fmt.Errorf("invalid regex: %w", err)) } - b.rules.Pattern = &value + b.rules.SetPattern(value) return b } func (b *StringRuler) Prefix(value string) *StringRuler { - b.rules.Prefix = &value + b.rules.SetPrefix(value) return b } func (b *StringRuler) Suffix(value string) *StringRuler { - b.rules.Suffix = &value + b.rules.SetSuffix(value) return b } func (b *StringRuler) Contains(value string) *StringRuler { - b.rules.Contains = &value + b.rules.SetContains(value) return b } func (b *StringRuler) NotContains(value string) *StringRuler { - b.rules.NotContains = &value + b.rules.SetNotContains(value) return b } func (b *StringRuler) In(values []string) *StringRuler { - b.rules.In = values + b.rules.SetIn(values) return b } func (b *StringRuler) NotIn(values []string) *StringRuler { - b.rules.NotIn = values + b.rules.SetNotIn(values) return b } func (b *StringRuler) IsEmail() *StringRuler { - if b.rules.WellKnown != 0 { + if b.rules.GetWellKnown() != 0 { panic("well known rules are already set") } - b.rules.WellKnown = v1_conf.WellKnownString_WELL_KNOWN_STRING_EMAIL + b.rules.SetWellKnown(v1_conf.WellKnownString_WELL_KNOWN_STRING_EMAIL) return b } func (b *StringRuler) IsHostname() *StringRuler { - if b.rules.WellKnown != 0 { + if b.rules.GetWellKnown() != 0 { panic("well known rules are already set") } - b.rules.WellKnown = v1_conf.WellKnownString_WELL_KNOWN_STRING_HOSTNAME + b.rules.SetWellKnown(v1_conf.WellKnownString_WELL_KNOWN_STRING_HOSTNAME) return b } func (b *StringRuler) IsIP() *StringRuler { - if b.rules.WellKnown != 0 { + if b.rules.GetWellKnown() != 0 { panic("well known rules are already set") } - b.rules.WellKnown = v1_conf.WellKnownString_WELL_KNOWN_STRING_IP + b.rules.SetWellKnown(v1_conf.WellKnownString_WELL_KNOWN_STRING_IP) return b } func (b *StringRuler) IsIpv4() *StringRuler { - if b.rules.WellKnown != 0 { + if b.rules.GetWellKnown() != 0 { panic("well known rules are already set") } - b.rules.WellKnown = v1_conf.WellKnownString_WELL_KNOWN_STRING_IPV4 + b.rules.SetWellKnown(v1_conf.WellKnownString_WELL_KNOWN_STRING_IPV4) return b } func (b *StringRuler) IsIpv6() *StringRuler { - if b.rules.WellKnown != 0 { + if b.rules.GetWellKnown() != 0 { panic("well known rules are already set") } - b.rules.WellKnown = v1_conf.WellKnownString_WELL_KNOWN_STRING_IPV6 + b.rules.SetWellKnown(v1_conf.WellKnownString_WELL_KNOWN_STRING_IPV6) return b } func (b *StringRuler) IsURI() *StringRuler { - if b.rules.WellKnown != 0 { + if b.rules.GetWellKnown() != 0 { panic("well known rules are already set") } - b.rules.WellKnown = v1_conf.WellKnownString_WELL_KNOWN_STRING_URI + b.rules.SetWellKnown(v1_conf.WellKnownString_WELL_KNOWN_STRING_URI) return b } func (b *StringRuler) IsAddress() *StringRuler { - if b.rules.WellKnown != 0 { + if b.rules.GetWellKnown() != 0 { panic("well known rules are already set") } - b.rules.WellKnown = v1_conf.WellKnownString_WELL_KNOWN_STRING_ADDRESS + b.rules.SetWellKnown(v1_conf.WellKnownString_WELL_KNOWN_STRING_ADDRESS) return b } func (b *StringRuler) IsUUID() *StringRuler { - if b.rules.WellKnown != 0 { + if b.rules.GetWellKnown() != 0 { panic("well known rules are already set") } - b.rules.WellKnown = v1_conf.WellKnownString_WELL_KNOWN_STRING_UUID + b.rules.SetWellKnown(v1_conf.WellKnownString_WELL_KNOWN_STRING_UUID) return b } @@ -205,32 +205,32 @@ type StringSliceRuler struct { } func NewRepeatedStringBuilder(rules *v1_conf.RepeatedStringRules) *StringSliceRuler { - itemRules := rules.ItemRules + itemRules := rules.GetItemRules() if itemRules == nil { itemRules = &v1_conf.StringRules{} - rules.ItemRules = itemRules + rules.SetItemRules(itemRules) } stringer := NewStringBuilder(itemRules) return &StringSliceRuler{rules: rules, stringer: stringer} } func (b *StringSliceRuler) MinItems(value uint64) *StringSliceRuler { - b.rules.MinItems = &value + b.rules.SetMinItems(value) return b } func (b *StringSliceRuler) MaxItems(value uint64) *StringSliceRuler { - b.rules.MaxItems = &value + b.rules.SetMaxItems(value) return b } func (b *StringSliceRuler) Unique(unique bool) *StringSliceRuler { - b.rules.Unique = unique + b.rules.SetUnique(unique) return b } func (b *StringSliceRuler) ValidateEmpty(value bool) *StringSliceRuler { - b.rules.ValidateEmpty = value + b.rules.SetValidateEmpty(value) return b } diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/field/struct.go b/vendor/github.com/conductorone/baton-sdk/pkg/field/struct.go index 6e614e0d..26c69581 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/field/struct.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/field/struct.go @@ -10,6 +10,7 @@ type Configuration struct { IsDirectory bool SupportsExternalResources bool RequiresExternalConnector bool + FieldGroups []SchemaFieldGroup } type configOption func(Configuration) Configuration @@ -89,3 +90,29 @@ func NewConfiguration(fields []SchemaField, opts ...configOption) Configuration return configuration } + +func (c *Configuration) FieldGroupFields(group string) map[string]SchemaField { + var fieldGroupMap map[string]SchemaField + + for _, fg := range c.FieldGroups { + if fg.Name == group { + fieldGroupMap = fg.FieldMap() + break + } + } + + if fieldGroupMap == nil { + for _, fg := range c.FieldGroups { + if fg.Default { + fieldGroupMap = fg.FieldMap() + break + } + } + } + + if fieldGroupMap == nil && len(c.FieldGroups) >= 1 { + fieldGroupMap = c.FieldGroups[0].FieldMap() + } + + return fieldGroupMap +} diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/field/validation.go b/vendor/github.com/conductorone/baton-sdk/pkg/field/validation.go index aeebb3e6..9ec1dff3 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/field/validation.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/field/validation.go @@ -24,44 +24,44 @@ func ValidateIntRules(r *v1_conf.Int64Rules, vInt int, name string) error { return nil } v := int64(vInt) - if r.IsRequired && v == 0 { + if r.GetIsRequired() && v == 0 { return fmt.Errorf("field %s of type int is marked as required but it has a zero-value", name) } - if !r.ValidateEmpty && v == 0 { + if !r.GetValidateEmpty() && v == 0 { return nil } - if r.Eq != nil && *r.Eq != v { - return fmt.Errorf("field %s: expected %v but got %v", name, *r.Eq, v) + if r.HasEq() && r.GetEq() != v { + return fmt.Errorf("field %s: expected %v but got %v", name, r.GetEq(), v) } - if r.Lt != nil && v >= *r.Lt { - return fmt.Errorf("field %s: value must be less than %d but got %d", name, *r.Lt, v) + if r.HasLt() && v >= r.GetLt() { + return fmt.Errorf("field %s: value must be less than %d but got %d", name, r.GetLt(), v) } - if r.Lte != nil && v > *r.Lte { - return fmt.Errorf("field %s: value must be less than or equal to %d but got %d", name, *r.Lte, v) + if r.HasLte() && v > r.GetLte() { + return fmt.Errorf("field %s: value must be less than or equal to %d but got %d", name, r.GetLte(), v) } - if r.Gt != nil && v <= *r.Gt { - return fmt.Errorf("field %s: value must be greater than %d but got %d", name, *r.Gt, v) + if r.HasGt() && v <= r.GetGt() { + return fmt.Errorf("field %s: value must be greater than %d but got %d", name, r.GetGt(), v) } - if r.Gte != nil && v < *r.Gte { - return fmt.Errorf("field %s: value must be greater than or equal to %d but got %d", name, *r.Gte, v) + if r.HasGte() && v < r.GetGte() { + return fmt.Errorf("field %s: value must be greater than or equal to %d but got %d", name, r.GetGte(), v) } - if r.In != nil { + if r.GetIn() != nil { found := false - for _, val := range r.In { + for _, val := range r.GetIn() { if v == val { found = true break } } if !found { - return fmt.Errorf("field %s: value must be one of %v but got %d", name, r.In, v) + return fmt.Errorf("field %s: value must be one of %v but got %d", name, r.GetIn(), v) } } - if r.NotIn != nil { - for _, val := range r.NotIn { + if r.GetNotIn() != nil { + for _, val := range r.GetNotIn() { if v == val { - return fmt.Errorf("field %s: value must not be one of %v but got %d", name, r.NotIn, v) + return fmt.Errorf("field %s: value must not be one of %v but got %d", name, r.GetNotIn(), v) } } } @@ -72,8 +72,8 @@ func ValidateBoolRules(r *v1_conf.BoolRules, v bool, name string) error { if r == nil { return nil } - if r.Eq != nil && *r.Eq != v { - return fmt.Errorf("expected %v but got %v", *r.Eq, v) + if r.HasEq() && r.GetEq() != v { + return fmt.Errorf("expected %v but got %v", r.GetEq(), v) } return nil } @@ -134,28 +134,28 @@ func ValidateStringRules(r *v1_conf.StringRules, v string, name string) error { return nil } - if r.IsRequired && v == "" { + if r.GetIsRequired() && v == "" { return fmt.Errorf("field %s of type string is marked as required but it has a zero-value", name) } - if !r.ValidateEmpty && v == "" { + if !r.GetValidateEmpty() && v == "" { return nil } - if r.Eq != nil && *r.Eq != v { - return fmt.Errorf("field %s: expected '%v' but got '%v'", name, *r.Eq, v) + if r.HasEq() && r.GetEq() != v { + return fmt.Errorf("field %s: expected '%v' but got '%v'", name, r.GetEq(), v) } - if r.Len != nil && uint64(len(v)) != *r.Len { - return fmt.Errorf("field %s: value must be exactly %d characters long but got %d", name, *r.Len, len(v)) + if r.HasLen() && uint64(len(v)) != r.GetLen() { + return fmt.Errorf("field %s: value must be exactly %d characters long but got %d", name, r.GetLen(), len(v)) } - if r.MinLen != nil && uint64(len(v)) < *r.MinLen { - return fmt.Errorf("field %s: value must be at least %d characters long but got %d", name, *r.MinLen, len(v)) + if r.HasMinLen() && uint64(len(v)) < r.GetMinLen() { + return fmt.Errorf("field %s: value must be at least %d characters long but got %d", name, r.GetMinLen(), len(v)) } - if r.MaxLen != nil && uint64(len(v)) > *r.MaxLen { - return fmt.Errorf("field %s: value must be at most %d characters long but got %d", name, *r.MaxLen, len(v)) + if r.HasMaxLen() && uint64(len(v)) > r.GetMaxLen() { + return fmt.Errorf("field %s: value must be at most %d characters long but got %d", name, r.GetMaxLen(), len(v)) } - if r.Pattern != nil { - pattern, err := regexp.CompilePOSIX(*r.Pattern) + if r.HasPattern() { + pattern, err := regexp.CompilePOSIX(r.GetPattern()) if err != nil { return fmt.Errorf("field %s: invalid pattern: %w", name, err) } @@ -163,39 +163,39 @@ func ValidateStringRules(r *v1_conf.StringRules, v string, name string) error { return fmt.Errorf("field %s: value must match pattern %s but got '%s'", name, pattern.String(), v) } } - if r.Prefix != nil && !strings.HasPrefix(v, *r.Prefix) { - return fmt.Errorf("field %s: value must have prefix '%s' but got '%s'", name, *r.Prefix, v) + if r.HasPrefix() && !strings.HasPrefix(v, r.GetPrefix()) { + return fmt.Errorf("field %s: value must have prefix '%s' but got '%s'", name, r.GetPrefix(), v) } - if r.Suffix != nil && !strings.HasSuffix(v, *r.Suffix) { - return fmt.Errorf("field %s: value must have suffix '%s' but got '%s'", name, *r.Suffix, v) + if r.HasSuffix() && !strings.HasSuffix(v, r.GetSuffix()) { + return fmt.Errorf("field %s: value must have suffix '%s' but got '%s'", name, r.GetSuffix(), v) } - if r.Contains != nil && !strings.Contains(v, *r.Contains) { - return fmt.Errorf("field %s: value must contain '%s' but got '%s'", name, *r.Contains, v) + if r.HasContains() && !strings.Contains(v, r.GetContains()) { + return fmt.Errorf("field %s: value must contain '%s' but got '%s'", name, r.GetContains(), v) } - if r.In != nil { + if r.GetIn() != nil { found := false - for _, val := range r.In { + for _, val := range r.GetIn() { if v == val { found = true break } } if !found { - return fmt.Errorf("field %s: value must be one of %v but got '%s'", name, r.In, v) + return fmt.Errorf("field %s: value must be one of %v but got '%s'", name, r.GetIn(), v) } } - if r.NotIn != nil { - for _, val := range r.NotIn { + if r.GetNotIn() != nil { + for _, val := range r.GetNotIn() { if v == val { - return fmt.Errorf("field %s: value must not be one of %v but got '%s'", name, r.NotIn, v) + return fmt.Errorf("field %s: value must not be one of %v but got '%s'", name, r.GetNotIn(), v) } } } - if r.WellKnown == v1_conf.WellKnownString_WELL_KNOWN_STRING_UNSPECIFIED { + if r.GetWellKnown() == v1_conf.WellKnownString_WELL_KNOWN_STRING_UNSPECIFIED { return nil } - switch r.WellKnown { + switch r.GetWellKnown() { case v1_conf.WellKnownString_WELL_KNOWN_STRING_EMAIL: _, err := mail.ParseAddress(v) if err != nil { @@ -234,7 +234,7 @@ func ValidateStringRules(r *v1_conf.StringRules, v string, name string) error { } default: - return fmt.Errorf("field %s: unknown well-known validation rule: %T", name, r.WellKnown) + return fmt.Errorf("field %s: unknown well-known validation rule: %T", name, r.GetWellKnown()) } return nil @@ -256,21 +256,21 @@ func ValidateRepeatedStringRules(r *v1_conf.RepeatedStringRules, v []string, nam if r == nil { return nil } - if r.IsRequired && len(v) == 0 { + if r.GetIsRequired() && len(v) == 0 { return fmt.Errorf("field %s of type []string is marked as required but it has a zero-value", name) } - if !r.ValidateEmpty && len(v) == 0 { + if !r.GetValidateEmpty() && len(v) == 0 { return nil } - if r.MinItems != nil && uint64(len(v)) < *r.MinItems { - return fmt.Errorf("field %s: value must have at least %d items but got %d", name, *r.MinItems, len(v)) + if r.HasMinItems() && uint64(len(v)) < r.GetMinItems() { + return fmt.Errorf("field %s: value must have at least %d items but got %d", name, r.GetMinItems(), len(v)) } - if r.MaxItems != nil && uint64(len(v)) > *r.MaxItems { - return fmt.Errorf("field %s: value must have at most %d items but got %d", name, *r.MaxItems, len(v)) + if r.HasMaxItems() && uint64(len(v)) > r.GetMaxItems() { + return fmt.Errorf("field %s: value must have at most %d items but got %d", name, r.GetMaxItems(), len(v)) } - if r.Unique { + if r.GetUnique() { uniqueValues := make(map[string]struct{}) for _, item := range v { if _, exists := uniqueValues[item]; exists { @@ -279,12 +279,12 @@ func ValidateRepeatedStringRules(r *v1_conf.RepeatedStringRules, v []string, nam uniqueValues[item] = struct{}{} } } - if r.ItemRules == nil { + if !r.HasItemRules() { return nil } for i, item := range v { - if err := ValidateStringRules(r.ItemRules, item, strconv.Itoa(i)); err != nil { + if err := ValidateStringRules(r.GetItemRules(), item, strconv.Itoa(i)); err != nil { return fmt.Errorf("field %s invalid item at %w", name, err) } } @@ -295,11 +295,11 @@ func ValidateStringMapRules(r *v1_conf.StringMapRules, v map[string]any, name st if r == nil { return nil } - if r.IsRequired && len(v) == 0 { + if r.GetIsRequired() && len(v) == 0 { return fmt.Errorf("field %s of type map[string]any is marked as required but it has a zero-value", name) } - if !r.ValidateEmpty && len(v) == 0 { + if !r.GetValidateEmpty() && len(v) == 0 { return nil } @@ -328,16 +328,43 @@ type Configurable interface { GetStringMap(key string) map[string]any } +type validateOptions struct { + authGroup string +} + +type Option func(*validateOptions) + +func WithAuthMethod(authMethod string) Option { + return func(o *validateOptions) { + o.authGroup = authMethod + } +} + // Validate perform validation of field requirement and constraints // relationships after the configuration is read. // We don't check the following: // - if sets of fields are mutually exclusive and required // together at the same time -func Validate(c Configuration, v Configurable) error { +func Validate(c Configuration, v Configurable, opts ...Option) error { + var validateOpts validateOptions + + for _, opt := range opts { + opt(&validateOpts) + } + present := make(map[string]int) validationErrors := &ErrConfigurationMissingFields{} + fieldGroupMap := c.FieldGroupFields(validateOpts.authGroup) + for _, f := range c.Fields { + if fieldGroupMap != nil { + if _, ok := fieldGroupMap[f.FieldName]; !ok { + // skip fields not in the selected auth method group + continue + } + } + // Note: the viper methods are actually casting // internal strings into the desired type. var isPresent bool diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/lambda/grpc/config/sts.go b/vendor/github.com/conductorone/baton-sdk/pkg/lambda/grpc/config/sts.go index d52644b7..1ff870f9 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/lambda/grpc/config/sts.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/lambda/grpc/config/sts.go @@ -55,17 +55,17 @@ func createSigv4STSGetCallerIdentityRequest(ctx context.Context, cfg *aws.Config for signedHeaderKey, signedHeaderValues := range req.Header { v := make([]string, len(signedHeaderValues)) copy(v, signedHeaderValues) - signedHeader := &pb_connector_manager.SignedHeader{ + signedHeader := pb_connector_manager.SignedHeader_builder{ Key: signedHeaderKey, Value: v, - } + }.Build() signedHeaders = append(signedHeaders, signedHeader) } - return &pb_connector_manager.Sigv4SignedRequestSTSGetCallerIdentity{ + return pb_connector_manager.Sigv4SignedRequestSTSGetCallerIdentity_builder{ Method: method, Endpoint: endpoint, Headers: signedHeaders, Body: []byte(body), - }, nil + }.Build(), nil } diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/lambda/grpc/server.go b/vendor/github.com/conductorone/baton-sdk/pkg/lambda/grpc/server.go index 494fda49..4fe82bd0 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/lambda/grpc/server.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/lambda/grpc/server.go @@ -63,12 +63,12 @@ func (r *TransportStream) Response() (*Response, error) { } return &Response{ - msg: &pbtransport.Response{ + msg: pbtransport.Response_builder{ Resp: anyResp, Status: anyStatus, Headers: headers, Trailers: trailers, - }, + }.Build(), }, nil } diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/lambda/grpc/transport.go b/vendor/github.com/conductorone/baton-sdk/pkg/lambda/grpc/transport.go index caabb8ec..880c0e27 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/lambda/grpc/transport.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/lambda/grpc/transport.go @@ -21,9 +21,9 @@ type Request struct { } /* -UnmarshalJSON unmarshals the JSON into a Request of course, +UnmarshalJSON unmarshals the JSON into a Request, discarding any unknown fields. -filtering out any annotations that are not known to the global registry +It also filters out any annotations that are not known to the global registry which happens frequently for new features and would otherwise require rolling every lambda function. @@ -32,7 +32,10 @@ so the performance impact is negligible. */ func (f *Request) UnmarshalJSON(b []byte) error { f.msg = &pbtransport.Request{} - err := protojson.Unmarshal(b, f.msg) + unmarshalOptions := protojson.UnmarshalOptions{ + DiscardUnknown: true, + } + err := unmarshalOptions.Unmarshal(b, f.msg) if err == nil { return nil } @@ -92,7 +95,7 @@ func (f *Request) UnmarshalJSON(b []byte) error { return errors.Join(originalErr, err) } - err = protojson.Unmarshal(filteredJSON, f.msg) + err = unmarshalOptions.Unmarshal(filteredJSON, f.msg) if err != nil { return errors.Join(originalErr, err) } @@ -135,11 +138,11 @@ func NewRequest(method string, req proto.Message, headers metadata.MD) (*Request return nil, status.Errorf(codes.Internal, "error marshalling headers: %v", err) } return &Request{ - msg: &pbtransport.Request{ + msg: pbtransport.Request_builder{ Method: method, Req: reqAny, Headers: reqHdrs, - }, + }.Build(), }, nil } diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/lambda/grpc/util.go b/vendor/github.com/conductorone/baton-sdk/pkg/lambda/grpc/util.go index f0112657..9754a2d4 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/lambda/grpc/util.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/lambda/grpc/util.go @@ -145,11 +145,11 @@ func ErrorResponse(err error) *Response { panic(fmt.Errorf("server: unable to serialize status: %w", err)) } return &Response{ - msg: &pbtransport.Response{ + msg: pbtransport.Response_builder{ Resp: nil, Status: anyst, Headers: nil, Trailers: nil, - }, + }.Build(), } } diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/provisioner/provisioner.go b/vendor/github.com/conductorone/baton-sdk/pkg/provisioner/provisioner.go index 6ff7e811..263f1bad 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/provisioner/provisioner.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/provisioner/provisioner.go @@ -59,13 +59,11 @@ func makeCrypto(ctx context.Context) (*v2.CredentialOptions, []*v2.EncryptionCon return nil, nil, err } - opts := &v2.CredentialOptions{ - Options: &v2.CredentialOptions_RandomPassword_{ - RandomPassword: &v2.CredentialOptions_RandomPassword{ - Length: 20, - }, - }, - } + opts := v2.CredentialOptions_builder{ + RandomPassword: v2.CredentialOptions_RandomPassword_builder{ + Length: 20, + }.Build(), + }.Build() return opts, []*v2.EncryptionConfig{config}, nil } @@ -151,16 +149,16 @@ func (p *Provisioner) grant(ctx context.Context) error { return err } - entitlement, err := store.GetEntitlement(ctx, &reader_v2.EntitlementsReaderServiceGetEntitlementRequest{ + entitlement, err := store.GetEntitlement(ctx, reader_v2.EntitlementsReaderServiceGetEntitlementRequest_builder{ EntitlementId: p.grantEntitlementID, - }) + }.Build()) if err != nil { return err } - entitlementResource, err := store.GetResource(ctx, &reader_v2.ResourcesReaderServiceGetResourceRequest{ + entitlementResource, err := store.GetResource(ctx, reader_v2.ResourcesReaderServiceGetResourceRequest_builder{ ResourceId: entitlement.GetEntitlement().GetResource().GetId(), - }) + }.Build()) if err != nil { return err } @@ -170,30 +168,30 @@ func (p *Provisioner) grant(ctx context.Context) error { return errors.New("cannot grant entitlement on external resource") } - principal, err := store.GetResource(ctx, &reader_v2.ResourcesReaderServiceGetResourceRequest{ - ResourceId: &v2.ResourceId{ + principal, err := store.GetResource(ctx, reader_v2.ResourcesReaderServiceGetResourceRequest_builder{ + ResourceId: v2.ResourceId_builder{ Resource: p.grantPrincipalID, ResourceType: p.grantPrincipalType, - }, - }) + }.Build(), + }.Build()) if err != nil { return err } - resource := &v2.Resource{ - Id: principal.Resource.Id, - DisplayName: principal.Resource.DisplayName, - Annotations: principal.Resource.Annotations, - Description: principal.Resource.Description, - ExternalId: principal.Resource.ExternalId, + resource := v2.Resource_builder{ + Id: principal.GetResource().GetId(), + DisplayName: principal.GetResource().GetDisplayName(), + Annotations: principal.GetResource().GetAnnotations(), + Description: principal.GetResource().GetDescription(), + ExternalId: principal.GetResource().GetExternalId(), // Omit parent resource ID so that behavior is the same as ConductorOne's provisioning mode ParentResourceId: nil, - } + }.Build() - _, err = p.connector.Grant(ctx, &v2.GrantManagerServiceGrantRequest{ - Entitlement: entitlement.Entitlement, + _, err = p.connector.Grant(ctx, v2.GrantManagerServiceGrantRequest_builder{ + Entitlement: entitlement.GetEntitlement(), Principal: resource, - }) + }.Build()) if err != nil { return err } @@ -210,30 +208,30 @@ func (p *Provisioner) revoke(ctx context.Context) error { return err } - grant, err := store.GetGrant(ctx, &reader_v2.GrantsReaderServiceGetGrantRequest{ + grant, err := store.GetGrant(ctx, reader_v2.GrantsReaderServiceGetGrantRequest_builder{ GrantId: p.revokeGrantID, - }) + }.Build()) if err != nil { return err } - entitlement, err := store.GetEntitlement(ctx, &reader_v2.EntitlementsReaderServiceGetEntitlementRequest{ - EntitlementId: grant.Grant.Entitlement.Id, - }) + entitlement, err := store.GetEntitlement(ctx, reader_v2.EntitlementsReaderServiceGetEntitlementRequest_builder{ + EntitlementId: grant.GetGrant().GetEntitlement().GetId(), + }.Build()) if err != nil { return err } - principal, err := store.GetResource(ctx, &reader_v2.ResourcesReaderServiceGetResourceRequest{ - ResourceId: grant.Grant.Principal.Id, - }) + principal, err := store.GetResource(ctx, reader_v2.ResourcesReaderServiceGetResourceRequest_builder{ + ResourceId: grant.GetGrant().GetPrincipal().GetId(), + }.Build()) if err != nil { return err } - entitlementResource, err := store.GetResource(ctx, &reader_v2.ResourcesReaderServiceGetResourceRequest{ + entitlementResource, err := store.GetResource(ctx, reader_v2.ResourcesReaderServiceGetResourceRequest_builder{ ResourceId: entitlement.GetEntitlement().GetResource().GetId(), - }) + }.Build()) if err != nil { return err } @@ -243,24 +241,24 @@ func (p *Provisioner) revoke(ctx context.Context) error { return errors.New("cannot revoke grant on external resource") } - resource := &v2.Resource{ - Id: principal.Resource.Id, - DisplayName: principal.Resource.DisplayName, - Annotations: principal.Resource.Annotations, - Description: principal.Resource.Description, - ExternalId: principal.Resource.ExternalId, + resource := v2.Resource_builder{ + Id: principal.GetResource().GetId(), + DisplayName: principal.GetResource().GetDisplayName(), + Annotations: principal.GetResource().GetAnnotations(), + Description: principal.GetResource().GetDescription(), + ExternalId: principal.GetResource().GetExternalId(), // Omit parent resource ID so that behavior is the same as ConductorOne's provisioning mode ParentResourceId: nil, - } + }.Build() - _, err = p.connector.Revoke(ctx, &v2.GrantManagerServiceRevokeRequest{ - Grant: &v2.Grant{ - Id: grant.Grant.Id, - Entitlement: entitlement.Entitlement, + _, err = p.connector.Revoke(ctx, v2.GrantManagerServiceRevokeRequest_builder{ + Grant: v2.Grant_builder{ + Id: grant.GetGrant().GetId(), + Entitlement: entitlement.GetEntitlement(), Principal: resource, - Annotations: grant.Grant.Annotations, - }, - }) + Annotations: grant.GetGrant().GetAnnotations(), + }.Build(), + }.Build()) if err != nil { return err } @@ -275,10 +273,10 @@ func (p *Provisioner) createAccount(ctx context.Context) error { l := ctxzap.Extract(ctx) var emails []*v2.AccountInfo_Email if p.createAccountEmail != "" { - emails = append(emails, &v2.AccountInfo_Email{ + emails = append(emails, v2.AccountInfo_Email_builder{ Address: p.createAccountEmail, IsPrimary: true, - }) + }.Build()) } opts, config, err := makeCrypto(ctx) @@ -286,15 +284,15 @@ func (p *Provisioner) createAccount(ctx context.Context) error { return err } - _, err = p.connector.CreateAccount(ctx, &v2.CreateAccountRequest{ - AccountInfo: &v2.AccountInfo{ + _, err = p.connector.CreateAccount(ctx, v2.CreateAccountRequest_builder{ + AccountInfo: v2.AccountInfo_builder{ Emails: emails, Login: p.createAccountLogin, Profile: p.createAccountProfile, - }, + }.Build(), CredentialOptions: opts, EncryptionConfigs: config, - }) + }.Build()) if err != nil { return err } @@ -308,12 +306,12 @@ func (p *Provisioner) deleteResource(ctx context.Context) error { ctx, span := tracer.Start(ctx, "Provisioner.deleteResource") defer span.End() - _, err := p.connector.DeleteResource(ctx, &v2.DeleteResourceRequest{ - ResourceId: &v2.ResourceId{ + _, err := p.connector.DeleteResource(ctx, v2.DeleteResourceRequest_builder{ + ResourceId: v2.ResourceId_builder{ Resource: p.deleteResourceID, ResourceType: p.deleteResourceType, - }, - }) + }.Build(), + }.Build()) if err != nil { return err } @@ -331,14 +329,14 @@ func (p *Provisioner) rotateCredentials(ctx context.Context) error { return err } - _, err = p.connector.RotateCredential(ctx, &v2.RotateCredentialRequest{ - ResourceId: &v2.ResourceId{ + _, err = p.connector.RotateCredential(ctx, v2.RotateCredentialRequest_builder{ + ResourceId: v2.ResourceId_builder{ Resource: p.rotateCredentialsId, ResourceType: p.rotateCredentialsType, - }, + }.Build(), CredentialOptions: opts, EncryptionConfigs: config, - }) + }.Build()) if err != nil { return err } diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/ratelimit/grpc.go b/vendor/github.com/conductorone/baton-sdk/pkg/ratelimit/grpc.go index e288b995..cd6351cb 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/ratelimit/grpc.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/ratelimit/grpc.go @@ -52,30 +52,34 @@ type hasResourceType interface { } func getRatelimitDescriptors(ctx context.Context, method string, in interface{}, descriptors ...*ratelimitV1.RateLimitDescriptors_Entry) *ratelimitV1.RateLimitDescriptors { - ret := &ratelimitV1.RateLimitDescriptors{ + ret := ratelimitV1.RateLimitDescriptors_builder{ Entries: descriptors, - } + }.Build() - ret.Entries = append(ret.Entries, &ratelimitV1.RateLimitDescriptors_Entry{ + ret.SetEntries(append(ret.GetEntries(), ratelimitV1.RateLimitDescriptors_Entry_builder{ Key: descriptorKeyConnectorMethod, Value: method, - }) + }.Build())) // ListEntitlements, ListGrants if req, ok := in.(hasResource); ok { - ret.Entries = append(ret.Entries, &ratelimitV1.RateLimitDescriptors_Entry{ - Key: descriptorKeyConnectorResourceType, - Value: req.GetResource().Id.ResourceType, - }) + if resourceType := req.GetResource().GetId().GetResourceType(); resourceType != "" { + ret.SetEntries(append(ret.GetEntries(), ratelimitV1.RateLimitDescriptors_Entry_builder{ + Key: descriptorKeyConnectorResourceType, + Value: resourceType, + }.Build())) + } return ret } - // ListResources + // ListResources, ListActionSchemas if req, ok := in.(hasResourceType); ok { - ret.Entries = append(ret.Entries, &ratelimitV1.RateLimitDescriptors_Entry{ - Key: descriptorKeyConnectorResourceType, - Value: req.GetResourceTypeId(), - }) + if resourceTypeID := req.GetResourceTypeId(); resourceTypeID != "" { + ret.SetEntries(append(ret.GetEntries(), ratelimitV1.RateLimitDescriptors_Entry_builder{ + Key: descriptorKeyConnectorResourceType, + Value: resourceTypeID, + }.Build())) + } return ret } @@ -102,19 +106,19 @@ func UnaryInterceptor(now func() time.Time, descriptors ...*ratelimitV1.RateLimi rlDescriptors := getRatelimitDescriptors(ctx, method, req, descriptors...) for { - rlReq := &ratelimitV1.DoRequest{ + rlReq := ratelimitV1.DoRequest_builder{ RequestToken: token, Service: connectorServiceKey, Descriptors: rlDescriptors, - } + }.Build() resp, err := rlClient.Do(ctx, rlReq) if err != nil { l.Error("ratelimit: error", zap.Error(err)) return status.Error(codes.Unknown, err.Error()) } - token = resp.RequestToken + token = resp.GetRequestToken() - switch resp.Description.Status { + switch resp.GetDescription().GetStatus() { case ratelimitV1.RateLimitDescription_STATUS_OK, ratelimitV1.RateLimitDescription_STATUS_EMPTY: l.Debug("ratelimit ok - calling method", zap.String("method", method)) err = invoker(ctx, method, req, reply, cc, opts...) @@ -122,7 +126,7 @@ func UnaryInterceptor(now func() time.Time, descriptors ...*ratelimitV1.RateLimi rlErr := reportRatelimit( ctx, rlClient, - rlReq.RequestToken, + rlReq.GetRequestToken(), ratelimitV1.RateLimitDescription_STATUS_ERROR, rlDescriptors, nil, @@ -137,7 +141,7 @@ func UnaryInterceptor(now func() time.Time, descriptors ...*ratelimitV1.RateLimi if reply != nil { if resp, ok := req.(hasAnnos); ok { - err = reportRatelimit(ctx, rlClient, rlReq.RequestToken, ratelimitV1.RateLimitDescription_STATUS_OK, rlDescriptors, resp.GetAnnotations()) + err = reportRatelimit(ctx, rlClient, rlReq.GetRequestToken(), ratelimitV1.RateLimitDescription_STATUS_OK, rlDescriptors, resp.GetAnnotations()) if err != nil { l.Error("ratelimit: error reporting rate limit", zap.Error(err)) return nil // Explicitly not failing the request as it has already been run successfully. @@ -148,7 +152,7 @@ func UnaryInterceptor(now func() time.Time, descriptors ...*ratelimitV1.RateLimi return nil case ratelimitV1.RateLimitDescription_STATUS_OVERLIMIT: - resetAt := resp.Description.ResetAt.AsTime() + resetAt := resp.GetDescription().GetResetAt().AsTime() d, ok := wait(start, now().UTC(), resetAt) if !ok { l.Error("ratelimit: timeout") @@ -184,21 +188,21 @@ func reportRatelimit( l := ctxzap.Extract(ctx) annos := annotations.Annotations(anys) - rlAnnotation := &ratelimitV1.RateLimitDescription{ + rlAnnotation := ratelimitV1.RateLimitDescription_builder{ Status: status, - } + }.Build() _, err := annos.Pick(rlAnnotation) if err != nil { return err } - _, err = rlClient.Report(ctx, &ratelimitV1.ReportRequest{ + _, err = rlClient.Report(ctx, ratelimitV1.ReportRequest_builder{ RequestToken: token, Description: rlAnnotation, Descriptors: descriptors, Service: "connector", - }) + }.Build()) if err != nil { l.Error("ratelimit: report failed", zap.Error(err)) return err diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/ratelimit/http.go b/vendor/github.com/conductorone/baton-sdk/pkg/ratelimit/http.go index 2e6e3a41..a693ea5e 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/ratelimit/http.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/ratelimit/http.go @@ -121,10 +121,10 @@ func ExtractRateLimitData(statusCode int, header *http.Header) (*v2.RateLimitDes resetAt = time.Now().Add(time.Second * 60) } - return &v2.RateLimitDescription{ + return v2.RateLimitDescription_builder{ Status: rlstatus, Limit: limit, Remaining: remaining, ResetAt: timestamppb.New(resetAt), - }, nil + }.Build(), nil } diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/ratelimit/mem_ratelimiter.go b/vendor/github.com/conductorone/baton-sdk/pkg/ratelimit/mem_ratelimiter.go index ec27d7f2..5e182ac4 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/ratelimit/mem_ratelimiter.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/ratelimit/mem_ratelimiter.go @@ -21,22 +21,22 @@ type MemRateLimiter struct { // TODO func (m *MemRateLimiter) Do(ctx context.Context, req *ratelimitV1.DoRequest) (*ratelimitV1.DoResponse, error) { if m.limiter == nil { - return &ratelimitV1.DoResponse{ - RequestToken: req.RequestToken, - Description: &ratelimitV1.RateLimitDescription{ + return ratelimitV1.DoResponse_builder{ + RequestToken: req.GetRequestToken(), + Description: ratelimitV1.RateLimitDescription_builder{ Status: ratelimitV1.RateLimitDescription_STATUS_EMPTY, - }, - }, nil + }.Build(), + }.Build(), nil } m.limiter.Take() - return &ratelimitV1.DoResponse{ - RequestToken: req.RequestToken, - Description: &ratelimitV1.RateLimitDescription{ + return ratelimitV1.DoResponse_builder{ + RequestToken: req.GetRequestToken(), + Description: ratelimitV1.RateLimitDescription_builder{ Status: ratelimitV1.RateLimitDescription_STATUS_EMPTY, - }, - }, nil + }.Build(), + }.Build(), nil } // Report updates the rate limiter with relevant information. @@ -53,20 +53,20 @@ func (m *MemRateLimiter) Report(ctx context.Context, req *ratelimitV1.ReportRequ } desc := req.GetDescription() - if desc.ResetAt == nil { + if !desc.HasResetAt() { return &ratelimitV1.ReportResponse{}, nil } - if desc.Remaining == 0 { + if desc.GetRemaining() == 0 { return &ratelimitV1.ReportResponse{}, nil } - resetAt := desc.ResetAt.AsTime().UTC() + resetAt := desc.GetResetAt().AsTime().UTC() windowDuration := resetAt.Sub(m.now()) if windowDuration > 5*time.Minute { windowDuration = 5 * time.Minute } - remaining := int64(m.usePercent * float64(desc.Remaining)) + remaining := int64(m.usePercent * float64(desc.GetRemaining())) if remaining < 1 { remaining = 1 } @@ -75,7 +75,7 @@ func (m *MemRateLimiter) Report(ctx context.Context, req *ratelimitV1.ReportRequ ctxzap.Extract(ctx).Debug( "updating rate limiter", zap.Int64("calculated_remaining", remaining), - zap.Int64("remaining", desc.Remaining), + zap.Int64("remaining", desc.GetRemaining()), zap.Int64("rate", limiterSize), zap.Time("reset_at", resetAt), ) diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/ratelimit/noop_ratelimiter.go b/vendor/github.com/conductorone/baton-sdk/pkg/ratelimit/noop_ratelimiter.go index 1a6f3432..5734443e 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/ratelimit/noop_ratelimiter.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/ratelimit/noop_ratelimiter.go @@ -9,12 +9,12 @@ import ( type NoOpRateLimiter struct{} func (r *NoOpRateLimiter) Do(ctx context.Context, req *v1.DoRequest) (*v1.DoResponse, error) { - return &v1.DoResponse{ - RequestToken: req.RequestToken, - Description: &v1.RateLimitDescription{ + return v1.DoResponse_builder{ + RequestToken: req.GetRequestToken(), + Description: v1.RateLimitDescription_builder{ Status: v1.RateLimitDescription_STATUS_EMPTY, - }, - }, nil + }.Build(), + }.Build(), nil } func (r *NoOpRateLimiter) Report(ctx context.Context, req *v1.ReportRequest) (*v1.ReportResponse, error) { diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/ratelimit/ratelimit.go b/vendor/github.com/conductorone/baton-sdk/pkg/ratelimit/ratelimit.go index a580a8de..e94f41bb 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/ratelimit/ratelimit.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/ratelimit/ratelimit.go @@ -28,11 +28,11 @@ func NewLimiter(ctx context.Context, now func() time.Time, cfg *ratelimitV1.Rate } if c := cfg.GetSlidingMem(); c != nil { - return NewSlidingMemoryRateLimiter(ctx, now, c.UsePercent), nil + return NewSlidingMemoryRateLimiter(ctx, now, c.GetUsePercent()), nil } if c := cfg.GetFixedMem(); c != nil { - return NewFixedMemoryRateLimiter(ctx, now, c.Rate, c.Period.AsDuration()), nil + return NewFixedMemoryRateLimiter(ctx, now, c.GetRate(), c.GetPeriod().AsDuration()), nil } if c := cfg.GetExternal(); c != nil { diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/retry/retry.go b/vendor/github.com/conductorone/baton-sdk/pkg/retry/retry.go index ed9fea89..b9335c9b 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/retry/retry.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/retry/retry.go @@ -77,11 +77,11 @@ func (r *Retryer) ShouldWaitAndRetry(ctx context.Context, err error) bool { details := st.Details() for _, detail := range details { if rlData, ok := detail.(*v2.RateLimitDescription); ok { - waitResetAt := time.Until(rlData.ResetAt.AsTime()) + waitResetAt := time.Until(rlData.GetResetAt().AsTime()) if waitResetAt <= 0 { continue } - duration := time.Duration(rlData.Limit) + duration := time.Duration(rlData.GetLimit()) if duration <= 0 { continue } diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/sdk/empty_connector.go b/vendor/github.com/conductorone/baton-sdk/pkg/sdk/empty_connector.go index 2667bbab..4126ad1b 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/sdk/empty_connector.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/sdk/empty_connector.go @@ -22,16 +22,16 @@ func (n *emptyConnector) ListResourceTypes( request *v2.ResourceTypesServiceListResourceTypesRequest, opts ...grpc.CallOption, ) (*v2.ResourceTypesServiceListResourceTypesResponse, error) { - return &v2.ResourceTypesServiceListResourceTypesResponse{ + return v2.ResourceTypesServiceListResourceTypesResponse_builder{ List: []*v2.ResourceType{}, - }, nil + }.Build(), nil } // ListResources returns a list of resources. func (n *emptyConnector) ListResources(ctx context.Context, request *v2.ResourcesServiceListResourcesRequest, opts ...grpc.CallOption) (*v2.ResourcesServiceListResourcesResponse, error) { - return &v2.ResourcesServiceListResourcesResponse{ + return v2.ResourcesServiceListResourcesResponse_builder{ List: []*v2.Resource{}, - }, nil + }.Build(), nil } func (n *emptyConnector) GetResource( @@ -48,16 +48,26 @@ func (n *emptyConnector) ListEntitlements( request *v2.EntitlementsServiceListEntitlementsRequest, opts ...grpc.CallOption, ) (*v2.EntitlementsServiceListEntitlementsResponse, error) { - return &v2.EntitlementsServiceListEntitlementsResponse{ + return v2.EntitlementsServiceListEntitlementsResponse_builder{ List: []*v2.Entitlement{}, - }, nil + }.Build(), nil +} + +func (n *emptyConnector) ListStaticEntitlements( + ctx context.Context, + request *v2.EntitlementsServiceListStaticEntitlementsRequest, + opts ...grpc.CallOption, +) (*v2.EntitlementsServiceListStaticEntitlementsResponse, error) { + return v2.EntitlementsServiceListStaticEntitlementsResponse_builder{ + List: []*v2.Entitlement{}, + }.Build(), nil } // ListGrants returns a list of grants. func (n *emptyConnector) ListGrants(ctx context.Context, request *v2.GrantsServiceListGrantsRequest, opts ...grpc.CallOption) (*v2.GrantsServiceListGrantsResponse, error) { - return &v2.GrantsServiceListGrantsResponse{ + return v2.GrantsServiceListGrantsResponse_builder{ List: []*v2.Grant{}, - }, nil + }.Build(), nil } func (n *emptyConnector) Grant(ctx context.Context, request *v2.GrantManagerServiceGrantRequest, opts ...grpc.CallOption) (*v2.GrantManagerServiceGrantResponse, error) { @@ -70,7 +80,7 @@ func (n *emptyConnector) Revoke(ctx context.Context, request *v2.GrantManagerSer // GetMetadata returns a connector metadata. func (n *emptyConnector) GetMetadata(ctx context.Context, request *v2.ConnectorServiceGetMetadataRequest, opts ...grpc.CallOption) (*v2.ConnectorServiceGetMetadataResponse, error) { - return &v2.ConnectorServiceGetMetadataResponse{Metadata: &v2.ConnectorMetadata{}}, nil + return v2.ConnectorServiceGetMetadataResponse_builder{Metadata: &v2.ConnectorMetadata{}}.Build(), nil } // Validate is called by the connector framework to validate the correct response. @@ -83,9 +93,9 @@ func (n *emptyConnector) BulkCreateTickets(ctx context.Context, request *v2.Tick } func (n *emptyConnector) BulkGetTickets(ctx context.Context, request *v2.TicketsServiceBulkGetTicketsRequest, opts ...grpc.CallOption) (*v2.TicketsServiceBulkGetTicketsResponse, error) { - return &v2.TicketsServiceBulkGetTicketsResponse{ + return v2.TicketsServiceBulkGetTicketsResponse_builder{ Tickets: []*v2.TicketsServiceGetTicketResponse{}, - }, nil + }.Build(), nil } func (n *emptyConnector) CreateTicket(ctx context.Context, request *v2.TicketsServiceCreateTicketRequest, opts ...grpc.CallOption) (*v2.TicketsServiceCreateTicketResponse, error) { @@ -97,9 +107,9 @@ func (n *emptyConnector) GetTicket(ctx context.Context, request *v2.TicketsServi } func (n *emptyConnector) ListTicketSchemas(ctx context.Context, request *v2.TicketsServiceListTicketSchemasRequest, opts ...grpc.CallOption) (*v2.TicketsServiceListTicketSchemasResponse, error) { - return &v2.TicketsServiceListTicketSchemasResponse{ + return v2.TicketsServiceListTicketSchemasResponse_builder{ List: []*v2.TicketSchema{}, - }, nil + }.Build(), nil } func (n *emptyConnector) GetTicketSchema(ctx context.Context, request *v2.TicketsServiceGetTicketSchemaRequest, opts ...grpc.CallOption) (*v2.TicketsServiceGetTicketSchemaResponse, error) { @@ -143,21 +153,21 @@ func (n *emptyConnector) InvokeAction(ctx context.Context, request *v2.InvokeAct } func (n *emptyConnector) ListActionSchemas(ctx context.Context, request *v2.ListActionSchemasRequest, opts ...grpc.CallOption) (*v2.ListActionSchemasResponse, error) { - return &v2.ListActionSchemasResponse{ + return v2.ListActionSchemasResponse_builder{ Schemas: []*v2.BatonActionSchema{}, - }, nil + }.Build(), nil } func (n *emptyConnector) ListEvents(ctx context.Context, request *v2.ListEventsRequest, opts ...grpc.CallOption) (*v2.ListEventsResponse, error) { - return &v2.ListEventsResponse{ + return v2.ListEventsResponse_builder{ Events: []*v2.Event{}, - }, nil + }.Build(), nil } func (n *emptyConnector) ListEventFeeds(ctx context.Context, request *v2.ListEventFeedsRequest, opts ...grpc.CallOption) (*v2.ListEventFeedsResponse, error) { - return &v2.ListEventFeedsResponse{ + return v2.ListEventFeedsResponse_builder{ List: []*v2.EventFeedMetadata{}, - }, nil + }.Build(), nil } // NewEmptyConnector returns a new emptyConnector. diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/sdk/version.go b/vendor/github.com/conductorone/baton-sdk/pkg/sdk/version.go index c8f67d9b..5f7db3e1 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/sdk/version.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/sdk/version.go @@ -1,3 +1,3 @@ package sdk -const Version = "v0.4.1" +const Version = "v0.6.8" diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/session/json.go b/vendor/github.com/conductorone/baton-sdk/pkg/session/json.go deleted file mode 100644 index 285bbac3..00000000 --- a/vendor/github.com/conductorone/baton-sdk/pkg/session/json.go +++ /dev/null @@ -1,136 +0,0 @@ -package session - -import ( - "context" - "encoding/json" - - "github.com/conductorone/baton-sdk/pkg/types" -) - -func GetManyJSON[T any](ctx context.Context, keys []string, opt ...types.SessionCacheOption) (map[string]T, error) { - cache, err := GetSession(ctx) - if err != nil { - return nil, err - } - - // Get the raw bytes from cache - rawMap, err := cache.GetMany(ctx, keys, opt...) - if err != nil { - return nil, err - } - result := make(map[string]T) - // Unmarshal each item to the generic type - for key, bytes := range rawMap { - var item T - err = json.Unmarshal(bytes, &item) - if err != nil { - return nil, err - } - result[key] = item - } - - return result, nil -} - -func SetManyJSON[T any](ctx context.Context, items map[string]T, opt ...types.SessionCacheOption) error { - cache, err := GetSession(ctx) - if err != nil { - return err - } - - // Marshal each item to JSON bytes - bytesMap := make(map[string][]byte) - for key, item := range items { - bytes, err := json.Marshal(item) - if err != nil { - return err - } - bytesMap[key] = bytes - } - - // Store in cache - return cache.SetMany(ctx, bytesMap, opt...) -} - -func GetJSON[T any](ctx context.Context, key string, opt ...types.SessionCacheOption) (T, bool, error) { - var zero T - cache, err := GetSession(ctx) - if err != nil { - return zero, false, err - } - - // Get the raw bytes from cache - bytes, found, err := cache.Get(ctx, key, opt...) - if err != nil || !found { - return zero, found, err - } - - // Unmarshal to the generic type - var item T - err = json.Unmarshal(bytes, &item) - if err != nil { - return zero, false, err - } - - return item, true, nil -} - -func SetJSON[T any](ctx context.Context, key string, item T, opt ...types.SessionCacheOption) error { - cache, err := GetSession(ctx) - if err != nil { - return err - } - - // Marshal the item to JSON bytes - bytes, err := json.Marshal(item) - if err != nil { - return err - } - - // Store in cache - return cache.Set(ctx, key, bytes, opt...) -} - -func DeleteJSON(ctx context.Context, key string, opt ...types.SessionCacheOption) error { - cache, err := GetSession(ctx) - if err != nil { - return err - } - - return cache.Delete(ctx, key, opt...) -} - -func ClearJSON(ctx context.Context, opt ...types.SessionCacheOption) error { - cache, err := GetSession(ctx) - if err != nil { - return err - } - - return cache.Clear(ctx, opt...) -} - -func GetAllJSON[T any](ctx context.Context, opt ...types.SessionCacheOption) (map[string]T, error) { - cache, err := GetSession(ctx) - if err != nil { - return nil, err - } - - // Get all raw bytes from cache - rawMap, err := cache.GetAll(ctx, opt...) - if err != nil { - return nil, err - } - - result := make(map[string]T) - // Unmarshal each item to the generic type - for key, bytes := range rawMap { - var item T - err = json.Unmarshal(bytes, &item) - if err != nil { - return nil, err - } - result[key] = item - } - - return result, nil -} diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/session/json_session.go b/vendor/github.com/conductorone/baton-sdk/pkg/session/json_session.go new file mode 100644 index 00000000..bbd97158 --- /dev/null +++ b/vendor/github.com/conductorone/baton-sdk/pkg/session/json_session.go @@ -0,0 +1,118 @@ +package session + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/conductorone/baton-sdk/pkg/types/sessions" +) + +// See GRPC validation rules for eg GetManyRequest. + +func GetManyJSON[T any](ctx context.Context, ss sessions.SessionStore, keys []string, opt ...sessions.SessionStoreOption) (map[string]T, error) { + allBytes, err := UnrollGetMany[[]byte](ctx, ss, keys, opt...) + if err != nil { + return nil, err + } + + result := make(map[string]T) + for key, bytes := range allBytes { + var item T + err = json.Unmarshal(bytes, &item) + if err != nil { + return nil, fmt.Errorf("failed to unmarshal item for key %s: %w", key, err) + } + result[key] = item + } + + return result, nil +} + +func SetManyJSON[T any](ctx context.Context, ss sessions.SessionStore, items map[string]T, opt ...sessions.SessionStoreOption) error { + // Lazy iterator that marshals items on demand, yielding (item, error) pairs + sizedItems := func(yield func(SizedItem[[]byte], error) bool) { + for key, item := range items { + bytes, err := json.Marshal(item) + if err != nil { + yield(SizedItem[[]byte]{}, fmt.Errorf("failed to marshal item for key %s: %w", key, err)) + return + } + if !yield(SizedItem[[]byte]{ + Key: key, + Value: bytes, + Size: len(key) + len(bytes) + 20, + }, nil) { + return + } + } + } + + return UnrollSetMany(ctx, ss, sizedItems, opt...) +} + +func GetJSON[T any](ctx context.Context, ss sessions.SessionStore, key string, opt ...sessions.SessionStoreOption) (T, bool, error) { + var zero T + + // Get the raw bytes from cache + bytes, found, err := ss.Get(ctx, key, opt...) + if err != nil || !found { + return zero, found, err + } + + // Unmarshal to the generic type + var item T + err = json.Unmarshal(bytes, &item) + if err != nil { + return zero, false, err + } + + return item, true, nil +} + +func SetJSON[T any](ctx context.Context, ss sessions.SessionStore, key string, item T, opt ...sessions.SessionStoreOption) error { + // Marshal the item to JSON bytes + bytes, err := json.Marshal(item) + if err != nil { + return err + } + + // Store in cache + return ss.Set(ctx, key, bytes, opt...) +} + +func DeleteJSON(ctx context.Context, ss sessions.SessionStore, key string, opt ...sessions.SessionStoreOption) error { + return ss.Delete(ctx, key, opt...) +} + +func ClearJSON(ctx context.Context, ss sessions.SessionStore, opt ...sessions.SessionStoreOption) error { + return ss.Clear(ctx, opt...) +} + +func GetAllJSON[T any](ctx context.Context, ss sessions.SessionStore, opt ...sessions.SessionStoreOption) (map[string]T, error) { + result := make(map[string]T) + pageToken := "" + for { + rawMap, nextPageToken, err := ss.GetAll(ctx, pageToken, opt...) + if err != nil { + return nil, err + } + for key, bytes := range rawMap { + var item T + err = json.Unmarshal(bytes, &item) + if err != nil { + return nil, err + } + result[key] = item + } + if nextPageToken == "" { + break + } + if pageToken == nextPageToken { + return nil, fmt.Errorf("page token is the same as the next page token") + } + pageToken = nextPageToken + } + + return result, nil +} diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/session/memory.go b/vendor/github.com/conductorone/baton-sdk/pkg/session/memory.go deleted file mode 100644 index 6aa5a4b5..00000000 --- a/vendor/github.com/conductorone/baton-sdk/pkg/session/memory.go +++ /dev/null @@ -1,220 +0,0 @@ -package session - -import ( - "context" - "fmt" - "strings" - "sync" - "time" - - "github.com/conductorone/baton-sdk/pkg/types" -) - -// MemorySessionCache implements SessionCache interface using an in-memory store with TTL. -type MemorySessionCache struct { - cache map[string]map[string][]byte // syncID -> key -> value - mu sync.RWMutex - defaultTTL time.Duration -} - -// NewMemorySessionCache creates a new in-memory session cache with default TTL of 1 hour. -func NewMemorySessionCache(ctx context.Context, opt ...types.SessionCacheConstructorOption) (types.SessionCache, error) { - return NewMemorySessionCacheWithTTL(ctx, time.Hour, opt...) -} - -// NewMemorySessionCacheWithTTL creates a new in-memory session cache with custom TTL. -func NewMemorySessionCacheWithTTL(ctx context.Context, ttl time.Duration, opt ...types.SessionCacheConstructorOption) (types.SessionCache, error) { - // Apply constructor options - for _, option := range opt { - var err error - ctx, err = option(ctx) - if err != nil { - return nil, err - } - } - - return &MemorySessionCache{ - cache: make(map[string]map[string][]byte), - defaultTTL: ttl, - }, nil -} - -// Get retrieves a value from the cache by key. -func (m *MemorySessionCache) Get(ctx context.Context, key string, opt ...types.SessionCacheOption) ([]byte, bool, error) { - bag, err := applyOptions(ctx, opt...) - if err != nil { - return nil, false, err - } - - if bag.Prefix != "" { - key = bag.Prefix + KeyPrefixDelimiter + key - } - - m.mu.RLock() - defer m.mu.RUnlock() - - syncCache, ok := m.cache[bag.SyncID] - if !ok { - return nil, false, nil - } - - value, found := syncCache[key] - if !found { - return nil, false, nil - } - dst := make([]byte, len(value)) // allocate destination - _ = copy(dst, value) - return dst, true, nil -} - -// Set stores a value in the cache with the given key. -func (m *MemorySessionCache) Set(ctx context.Context, key string, value []byte, opt ...types.SessionCacheOption) error { - bag, err := applyOptions(ctx, opt...) - if err != nil { - return err - } - - if bag.Prefix != "" { - key = bag.Prefix + KeyPrefixDelimiter + key - } - - m.mu.Lock() - defer m.mu.Unlock() - - // Get or create the sync cache - syncCache, ok := m.cache[bag.SyncID] - if !ok { - syncCache = make(map[string][]byte) - m.cache[bag.SyncID] = syncCache - } - - syncCache[key] = value - return nil -} - -// Delete removes a value from the cache by key. -func (m *MemorySessionCache) Delete(ctx context.Context, key string, opt ...types.SessionCacheOption) error { - bag, err := applyOptions(ctx, opt...) - if err != nil { - return err - } - m.mu.Lock() - defer m.mu.Unlock() - - if bag.Prefix != "" { - key = bag.Prefix + KeyPrefixDelimiter + key - } - - syncCache, ok := m.cache[bag.SyncID] - if ok { - delete(syncCache, key) - } - return nil -} - -// Clear removes all values from the cache. -func (m *MemorySessionCache) Clear(ctx context.Context, opt ...types.SessionCacheOption) error { - bag, err := applyOptions(ctx, opt...) - if err != nil { - return err - } - - m.mu.Lock() - defer m.mu.Unlock() - - delete(m.cache, bag.SyncID) - return nil -} - -// GetAll returns all key-value pairs. -func (m *MemorySessionCache) GetAll(ctx context.Context, opt ...types.SessionCacheOption) (map[string][]byte, error) { - bag, err := applyOptions(ctx, opt...) - if err != nil { - return nil, err - } - - if bag.Prefix != "" { - return nil, fmt.Errorf("prefix is not supported for GetAll in memory session cache") - } - - m.mu.RLock() - defer m.mu.RUnlock() - - syncCache, ok := m.cache[bag.SyncID] - if !ok { - return map[string][]byte{}, nil - } - - result := make(map[string][]byte) - for key, value := range syncCache { - dst := make([]byte, len(value)) // allocate destination - _ = copy(dst, value) - result[key] = dst - } - return result, nil -} - -// GetMany retrieves multiple values from the cache by keys. -func (m *MemorySessionCache) GetMany(ctx context.Context, keys []string, opt ...types.SessionCacheOption) (map[string][]byte, error) { - bag, err := applyOptions(ctx, opt...) - if err != nil { - return nil, err - } - - m.mu.RLock() - defer m.mu.RUnlock() - - syncCache, ok := m.cache[bag.SyncID] - if !ok { - return map[string][]byte{}, nil - } - - result := make(map[string][]byte) - for _, key := range keys { - if value, found := syncCache[key]; found { - k := strings.TrimPrefix(key, bag.Prefix+KeyPrefixDelimiter) - dst := make([]byte, len(value)) // allocate destination - _ = copy(dst, value) - result[k] = dst - } - } - - return result, nil -} - -// SetMany stores multiple values in the cache. -func (m *MemorySessionCache) SetMany(ctx context.Context, values map[string][]byte, opt ...types.SessionCacheOption) error { - // Apply options to get syncID - bag, err := applyOptions(ctx, opt...) - if err != nil { - return err - } - - m.mu.Lock() - defer m.mu.Unlock() - - // Get or create the sync cache - syncCache, ok := m.cache[bag.SyncID] - if !ok { - syncCache = make(map[string][]byte) - m.cache[bag.SyncID] = syncCache - } - - for key, value := range values { - if bag.Prefix != "" { - key = bag.Prefix + KeyPrefixDelimiter + key - } - syncCache[key] = value - } - - return nil -} - -// Close performs any necessary cleanup when the cache is no longer needed. -func (m *MemorySessionCache) Close(ctx context.Context) error { - m.mu.Lock() - defer m.mu.Unlock() - // Clear all data - m.cache = make(map[string]map[string][]byte) - return nil -} diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/session/memory_cache.go b/vendor/github.com/conductorone/baton-sdk/pkg/session/memory_cache.go new file mode 100644 index 00000000..b5e70c5a --- /dev/null +++ b/vendor/github.com/conductorone/baton-sdk/pkg/session/memory_cache.go @@ -0,0 +1,218 @@ +package session + +import ( + "context" + "errors" + "fmt" + "strings" + + "github.com/conductorone/baton-sdk/pkg/types/sessions" + "github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/ctxzap" + "github.com/maypok86/otter/v2" + "go.uber.org/zap" +) + +var _ sessions.SessionStore = (*MemorySessionCache)(nil) + +func NewMemorySessionCache(otterOptions *otter.Options[string, []byte], ss sessions.SessionStore) (*MemorySessionCache, error) { + cache, err := otter.New(otterOptions) + if err != nil { + return nil, err + } + return &MemorySessionCache{cache: cache, ss: ss}, nil +} + +type MemorySessionCache struct { + cache *otter.Cache[string, []byte] + ss sessions.SessionStore +} + +// The cache is potentially used across syncs. +// Cross sync isolation is achieved by using the syncID in the cache key. +func cacheKey(bag *sessions.SessionStoreBag, key string) string { + return fmt.Sprintf("%s/%s/%s", bag.SyncID, bag.Prefix, key) +} + +func cacheKeys(bag *sessions.SessionStoreBag, keys []string) []string { + newKeys := make([]string, len(keys)) + prefix := fmt.Sprintf("%s/%s/", bag.SyncID, bag.Prefix) + for i, key := range keys { + newKeys[i] = fmt.Sprintf("%s%s", prefix, key) + } + return newKeys +} + +func stripPrefix(bag *sessions.SessionStoreBag, key string) string { + prefix := fmt.Sprintf("%s/%s/", bag.SyncID, bag.Prefix) + return strings.TrimPrefix(key, prefix) +} + +func stripPrefixes(bag *sessions.SessionStoreBag, keys []string) []string { + prefix := fmt.Sprintf("%s/%s/", bag.SyncID, bag.Prefix) + newKeys := make([]string, len(keys)) + for i, key := range keys { + newKeys[i] = strings.TrimPrefix(key, prefix) + } + return newKeys +} + +func (m *MemorySessionCache) Clear(ctx context.Context, opt ...sessions.SessionStoreOption) error { + l := ctxzap.Extract(ctx) + s := m.cache.Stats() + l.Info( + "MemorySessionCache Stats", + zap.Uint64("hits", s.Hits), + zap.Uint64("misses", s.Misses), + zap.Int("estimatedEntries", m.cache.EstimatedSize()), + zap.Uint64("weightedSize", m.cache.WeightedSize()), + ) + + bag, err := applyOptions(ctx, opt...) + if err != nil { + return err + } + err = m.ss.Clear(ctx, opt...) + if err != nil { + return err + } + prefix := fmt.Sprintf("%s/", bag.SyncID) + if bag.Prefix != "" { + prefix = cacheKey(bag, "") + } + + var keysToInvalidate []string + for key := range m.cache.Keys() { + if strings.HasPrefix(key, prefix) { + keysToInvalidate = append(keysToInvalidate, key) + } + } + for _, key := range keysToInvalidate { + _, _ = m.cache.Invalidate(key) + } + + return nil +} + +func (m *MemorySessionCache) Delete(ctx context.Context, key string, opt ...sessions.SessionStoreOption) error { + bag, err := applyOptions(ctx, opt...) + if err != nil { + return err + } + + err = m.ss.Delete(ctx, key, opt...) + if err != nil { + return err + } + _, _ = m.cache.Invalidate(cacheKey(bag, key)) + return nil +} + +type CacheItem struct { + Value []byte +} + +func (m *MemorySessionCache) Get(ctx context.Context, key string, opt ...sessions.SessionStoreOption) ([]byte, bool, error) { + bag, err := applyOptions(ctx, opt...) + if err != nil { + return nil, false, err + } + + v, err := m.cache.Get(ctx, cacheKey(bag, key), otter.LoaderFunc[string, []byte](func(ctx context.Context, _ string) ([]byte, error) { + v, found, err := m.ss.Get(ctx, key, opt...) + if err != nil { + return nil, err + } + if !found { + return nil, otter.ErrNotFound + } + return v, nil + })) + if errors.Is(err, otter.ErrNotFound) { + return nil, false, nil + } + if err != nil { + return nil, false, err + } + return v, true, nil +} + +// GetAll always calls the backing store and caches the results. +func (m *MemorySessionCache) GetAll(ctx context.Context, pageToken string, opt ...sessions.SessionStoreOption) (map[string][]byte, string, error) { + bag, err := applyOptions(ctx, opt...) + if err != nil { + return nil, "", err + } + values, nextPageToken, err := m.ss.GetAll(ctx, pageToken, opt...) + if err != nil { + return nil, "", err + } + for key, value := range values { + _, _ = m.cache.Set(cacheKey(bag, key), value) + } + + return values, nextPageToken, nil +} + +func (m *MemorySessionCache) GetMany(ctx context.Context, keys []string, opt ...sessions.SessionStoreOption) (map[string][]byte, []string, error) { + bag, err := applyOptions(ctx, opt...) + if err != nil { + return nil, nil, err + } + values, err := m.cache.BulkGet(ctx, cacheKeys(bag, keys), otter.BulkLoaderFunc[string, []byte](func(ctx context.Context, cacheKeys []string) (map[string][]byte, error) { + backingValues, unprocessedKeys, err := m.ss.GetMany(ctx, stripPrefixes(bag, cacheKeys), opt...) + if err != nil { + return nil, err + } + if len(unprocessedKeys) > 0 { + return nil, fmt.Errorf("get many returned unprocessed keys") + } + cacheKeyValues := make(map[string][]byte, len(backingValues)) + for k, v := range backingValues { + cacheKeyValues[cacheKey(bag, k)] = v + } + + return cacheKeyValues, nil + })) + + if err != nil { + return nil, nil, err + } + unprefixedValues := make(map[string][]byte) + for k, v := range values { + // NOTE(kans): GetMany returns nil values for missing keys, so we need to filter them out. + // We do not allow nil values in the session store. + if v == nil { + continue + } + unprefixedValues[stripPrefix(bag, k)] = v + } + return unprefixedValues, nil, nil +} + +func (m *MemorySessionCache) Set(ctx context.Context, key string, value []byte, opt ...sessions.SessionStoreOption) error { + bag, err := applyOptions(ctx, opt...) + if err != nil { + return err + } + err = m.ss.Set(ctx, key, value, opt...) + if err != nil { + return err + } + _, _ = m.cache.Set(cacheKey(bag, key), value) + return nil +} + +func (m *MemorySessionCache) SetMany(ctx context.Context, values map[string][]byte, opt ...sessions.SessionStoreOption) error { + bag, err := applyOptions(ctx, opt...) + if err != nil { + return err + } + err = m.ss.SetMany(ctx, values, opt...) + if err != nil { + return err + } + for key, value := range values { + _, _ = m.cache.Set(cacheKey(bag, key), value) + } + return nil +} diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/session/noop_session.go b/vendor/github.com/conductorone/baton-sdk/pkg/session/noop_session.go new file mode 100644 index 00000000..901c96bc --- /dev/null +++ b/vendor/github.com/conductorone/baton-sdk/pkg/session/noop_session.go @@ -0,0 +1,52 @@ +package session + +import ( + "context" + "fmt" + + "github.com/conductorone/baton-sdk/pkg/types/sessions" + "github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/ctxzap" + "go.uber.org/zap" +) + +var _ sessions.SessionStore = (*NoOpSessionStore)(nil) + +// Don't panic in dev (ideally). +type NoOpSessionStore struct{} + +var ErrSessionStoreDisabled = fmt.Errorf("session store is disabled by connector author. It must be explicitly enabled via RunConnector WithSessionStoreEnabled()") + +func (n *NoOpSessionStore) logAndError(ctx context.Context, operation string) error { + l := ctxzap.Extract(ctx) + l.Warn("NoOpSessionStore operation ignored", zap.String("operation", operation)) + return fmt.Errorf("%w: operation %s is not supported", ErrSessionStoreDisabled, operation) +} + +func (n *NoOpSessionStore) Get(ctx context.Context, key string, opt ...sessions.SessionStoreOption) ([]byte, bool, error) { + return nil, false, n.logAndError(ctx, "Get") +} + +func (n *NoOpSessionStore) GetMany(ctx context.Context, keys []string, opt ...sessions.SessionStoreOption) (map[string][]byte, []string, error) { + return nil, nil, n.logAndError(ctx, "GetMany") +} + +func (n *NoOpSessionStore) Set(ctx context.Context, key string, value []byte, opt ...sessions.SessionStoreOption) error { + return n.logAndError(ctx, "Set") +} + +func (n *NoOpSessionStore) SetMany(ctx context.Context, values map[string][]byte, opt ...sessions.SessionStoreOption) error { + return n.logAndError(ctx, "SetMany") +} + +func (n *NoOpSessionStore) Delete(ctx context.Context, key string, opt ...sessions.SessionStoreOption) error { + return n.logAndError(ctx, "Delete") +} + +func (n *NoOpSessionStore) Clear(ctx context.Context, opt ...sessions.SessionStoreOption) error { + // NOTE: we call this unconditionally for cleanup, so don't throw. + return nil +} + +func (n *NoOpSessionStore) GetAll(ctx context.Context, pageToken string, opt ...sessions.SessionStoreOption) (map[string][]byte, string, error) { + return nil, "", n.logAndError(ctx, "GetAll") +} diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/session/session.go b/vendor/github.com/conductorone/baton-sdk/pkg/session/session.go index 92ee1bfc..e480ebb9 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/session/session.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/session/session.go @@ -3,61 +3,135 @@ package session import ( "context" "fmt" + "iter" + "maps" - "github.com/conductorone/baton-sdk/pkg/types" + "github.com/conductorone/baton-sdk/pkg/types/sessions" ) -// KeyPrefixDelimiter is the delimiter used to separate prefixes from keys in the session cache. -const KeyPrefixDelimiter = "::" - -// GetSession retrieves the session cache instance from the context. -// Returns an error if no session cache is found in the context. -func GetSession(ctx context.Context) (types.SessionCache, error) { - if sessionCache, ok := ctx.Value(types.SessionCacheKey{}).(types.SessionCache); ok { - return sessionCache, nil +func Chunk[T any](items []T, chunkSize int) iter.Seq[[]T] { + return func(yield func([]T) bool) { + for i := 0; i < len(items); i += chunkSize { + end := min(i+chunkSize, len(items)) + if !yield(items[i:end]) { + return + } + } } - return nil, fmt.Errorf("no session cache found in context") } -func WithSyncID(syncID string) types.SessionCacheOption { - return func(ctx context.Context, bag *types.SessionCacheBag) error { - bag.SyncID = syncID - return nil +type GetManyable[T any] interface { + GetMany(ctx context.Context, keys []string, opt ...sessions.SessionStoreOption) (map[string]T, []string, error) +} + +func UnrollGetMany[T any](ctx context.Context, ss GetManyable[T], keys []string, opt ...sessions.SessionStoreOption) (map[string]T, error) { + all := make(map[string]T) + if len(keys) == 0 { + return all, nil } + + // TODO(Kans): parallelize this? + for keyChunk := range Chunk(keys, sessions.MaxKeysPerRequest) { + // For each chunk, unroll any unprocessed keys until all are processed + remainingKeys := keyChunk + for { + some, unprocessedKeys, err := ss.GetMany(ctx, remainingKeys, opt...) + if err != nil { + return nil, err + } + + // Accumulate results + maps.Copy(all, some) + + // If no unprocessed keys, we're done with this chunk + if len(unprocessedKeys) == 0 { + break + } + + // Check for infinite loop: if unprocessed keys haven't been reduced, something is wrong + if len(unprocessedKeys) == len(remainingKeys) { + return nil, fmt.Errorf("unprocessed keys not reduced: %d unprocessed out of %d requested", len(unprocessedKeys), len(remainingKeys)) + } + + // Continue with unprocessed keys + remainingKeys = unprocessedKeys + } + } + return all, nil +} + +type SetManyable[T any] interface { + SetMany(ctx context.Context, values map[string]T, opt ...sessions.SessionStoreOption) error +} + +// SizedItem represents a key-value pair with its size in bytes. +type SizedItem[T any] struct { + Key string + Value T + Size int // size in bytes of key + value } -func WithPrefix(prefix string) types.SessionCacheOption { - return func(ctx context.Context, bag *types.SessionCacheBag) error { - bag.Prefix = prefix +// UnrollSetMany takes an iterator of sized items and batches them into SetMany calls, +// respecting both MaxKeysPerRequest and MaxSessionStoreSizeLimit. +// The iterator yields (item, error) pairs; iteration stops on the first error. +func UnrollSetMany[T any](ctx context.Context, ss SetManyable[T], items iter.Seq2[SizedItem[T], error], opt ...sessions.SessionStoreOption) error { + currentChunk := make(map[string]T) + currentSize := 0 + + flush := func() error { + if len(currentChunk) == 0 { + return nil + } + err := ss.SetMany(ctx, currentChunk, opt...) + if err != nil { + return err + } + currentChunk = make(map[string]T) + currentSize = 0 return nil } -} -// GetSyncIDFromContext retrieves the sync ID from the context, returning empty string if not found. -func GetSyncIDFromContext(ctx context.Context) string { - if syncID, ok := ctx.Value(types.SyncIDKey{}).(string); ok { - return syncID + for item, err := range items { + if err != nil { + return err + } + + // Flush if adding this item would exceed either limit + if len(currentChunk) >= sessions.MaxKeysPerRequest || (currentSize+item.Size >= sessions.MaxSessionStoreSizeLimit && len(currentChunk) > 0) { + if err := flush(); err != nil { + return err + } + } + + currentChunk[item.Key] = item.Value + currentSize += item.Size } - return "" + + return flush() } -// applyOptions applies session cache options and returns a configured bag. -func applyOptions(ctx context.Context, opt ...types.SessionCacheOption) (*types.SessionCacheBag, error) { - bag := &types.SessionCacheBag{} +type GetAllable[T any] interface { + GetAll(ctx context.Context, pageToken string, opt ...sessions.SessionStoreOption) (map[string]T, string, error) +} - for _, option := range opt { - err := option(ctx, bag) +func UnrollGetAll[T any](ctx context.Context, ss GetAllable[T], opt ...sessions.SessionStoreOption) (map[string]T, error) { + pageToken := "" + all := make(map[string]T) + for { + // TODO(Kans): parallelize this? + some, nextPageToken, err := ss.GetAll(ctx, pageToken, opt...) if err != nil { return nil, err } - } + maps.Copy(all, some) + if nextPageToken == "" { + break + } + if pageToken == nextPageToken { + return nil, fmt.Errorf("page token is the same as the next page token: %s", pageToken) + } - if bag.SyncID == "" { - bag.SyncID = GetSyncIDFromContext(ctx) + pageToken = nextPageToken } - if bag.SyncID == "" { - return nil, fmt.Errorf("no syncID set on context or in options") - } - - return bag, nil + return all, nil } diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/session/grpc_session.go b/vendor/github.com/conductorone/baton-sdk/pkg/session/session_client.go similarity index 55% rename from vendor/github.com/conductorone/baton-sdk/pkg/session/grpc_session.go rename to vendor/github.com/conductorone/baton-sdk/pkg/session/session_client.go index 70b8a868..f75d920a 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/session/grpc_session.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/session/session_client.go @@ -1,5 +1,3 @@ -//go:build baton_lambda_support - package session import ( @@ -7,15 +5,14 @@ import ( "crypto/tls" "crypto/x509" "fmt" - "io" "net" "os" - "strings" + "slices" "time" v1 "github.com/conductorone/baton-sdk/pb/c1/connectorapi/baton/v1" "github.com/conductorone/baton-sdk/pkg/sdk" - "github.com/conductorone/baton-sdk/pkg/types" + "github.com/conductorone/baton-sdk/pkg/types/sessions" dpop_grpc "github.com/conductorone/dpop/integrations/dpop_grpc" "github.com/conductorone/dpop/pkg/dpop" "github.com/go-jose/go-jose/v4" @@ -24,18 +21,35 @@ import ( "google.golang.org/grpc/credentials" ) -// No longer needed since we're reusing existing credentials +var _ sessions.SessionStore = (*GRPCSessionStoreClient)(nil) -// GRPCSessionCache implements SessionCache interface using gRPC calls to BatonSessionService. -type GRPCSessionCache struct { +type GRPCSessionStoreClient struct { client v1.BatonSessionServiceClient } +// applyOptions applies session cache options and returns a configured bag. +func applyOptions(ctx context.Context, opt ...sessions.SessionStoreOption) (*sessions.SessionStoreBag, error) { + bag := &sessions.SessionStoreBag{} + + for _, option := range opt { + err := option(ctx, bag) + if err != nil { + return nil, err + } + } + + if bag.SyncID == "" { + return nil, fmt.Errorf("no syncID set in options") + } + + return bag, nil +} + // NewGRPCSessionClient creates a new gRPC session service client using existing DPoP credentials. // It reuses an existing access token and DPoP key instead of performing a new authentication round. // It reads the session service address from the BATON_SESSION_SERVICE_ADDR environment variable, // defaulting to "localhost:50051" if not set. -func NewGRPCSessionClient(ctx context.Context, accessToken string, dpopKey *jose.JSONWebKey, opt ...types.SessionCacheConstructorOption) (v1.BatonSessionServiceClient, error) { +func NewGRPCSessionClient(ctx context.Context, accessToken string, dpopKey *jose.JSONWebKey, opt ...sessions.SessionStoreConstructorOption) (v1.BatonSessionServiceClient, error) { // Apply constructor options for _, option := range opt { var err error @@ -44,7 +58,6 @@ func NewGRPCSessionClient(ctx context.Context, accessToken string, dpopKey *jose return nil, err } } - // Get the session service address from environment variable addr := os.Getenv("BATON_SESSION_SERVICE_ADDR") if addr == "" { @@ -108,7 +121,6 @@ func NewGRPCSessionClient(ctx context.Context, accessToken string, dpopKey *jose return v1.NewBatonSessionServiceClient(conn), nil } -// staticTokenSource implements oauth2.TokenSource to return a static access token type staticTokenSource struct { accessToken string } @@ -122,8 +134,8 @@ func (s *staticTokenSource) Token() (*oauth2.Token, error) { // These functions are no longer needed since we're reusing existing credentials -// NewGRPCSessionCache creates a new gRPC session cache instance. -func NewGRPCSessionCache(ctx context.Context, client v1.BatonSessionServiceClient, opt ...types.SessionCacheConstructorOption) (types.SessionCache, error) { +// NewGRPCSessionStore creates a new gRPC session cache instance. +func NewGRPCSessionStore(ctx context.Context, client v1.BatonSessionServiceClient, opt ...sessions.SessionStoreConstructorOption) (sessions.SessionStore, error) { // Apply constructor options for _, option := range opt { var err error @@ -133,100 +145,72 @@ func NewGRPCSessionCache(ctx context.Context, client v1.BatonSessionServiceClien } } - return &GRPCSessionCache{ + return &GRPCSessionStoreClient{ client: client, }, nil } // Get retrieves a value from the cache by key. -func (g *GRPCSessionCache) Get(ctx context.Context, key string, opt ...types.SessionCacheOption) ([]byte, bool, error) { +func (g *GRPCSessionStoreClient) Get(ctx context.Context, key string, opt ...sessions.SessionStoreOption) ([]byte, bool, error) { bag, err := applyOptions(ctx, opt...) if err != nil { return nil, false, err } - if bag.Prefix != "" { - key = bag.Prefix + KeyPrefixDelimiter + key - } - - req := &v1.GetRequest{ + req := v1.GetRequest_builder{ SyncId: bag.SyncID, Key: key, - } + Prefix: bag.Prefix, + }.Build() resp, err := g.client.Get(ctx, req) if err != nil { return nil, false, fmt.Errorf("failed to get value from gRPC session cache: %w", err) } - if resp == nil { - return nil, false, nil - } - - return resp.Value, true, nil + return resp.GetValue(), resp.GetFound(), nil } // GetMany retrieves multiple values from the cache by keys. -func (g *GRPCSessionCache) GetMany(ctx context.Context, keys []string, opt ...types.SessionCacheOption) (map[string][]byte, error) { +func (g *GRPCSessionStoreClient) GetMany(ctx context.Context, keys []string, opt ...sessions.SessionStoreOption) (map[string][]byte, []string, error) { bag, err := applyOptions(ctx, opt...) if err != nil { - return nil, err + return nil, nil, err } - // Apply prefix to keys if specified - prefixedKeys := make([]string, len(keys)) - for i, key := range keys { - if bag.Prefix != "" { - prefixedKeys[i] = bag.Prefix + KeyPrefixDelimiter + key - } else { - prefixedKeys[i] = key - } - } + slices.Sort(keys) + keys = slices.Compact(keys) - req := &v1.GetManyRequest{ + resp, err := g.client.GetMany(ctx, v1.GetManyRequest_builder{ SyncId: bag.SyncID, - Keys: prefixedKeys, - } - - stream, err := g.client.GetMany(ctx, req) + Keys: keys, + Prefix: bag.Prefix, + }.Build()) if err != nil { - return nil, fmt.Errorf("failed to get many values from gRPC session cache: %w", err) + return nil, nil, fmt.Errorf("failed to get many values from gRPC session cache: %w", err) } - result := make(map[string][]byte) - for { - resp, err := stream.Recv() - if err == io.EOF { - break - } - if err != nil { - return nil, fmt.Errorf("failed to get many values from gRPC session cache: %w", err) - } - if bag.Prefix != "" { - resp.Key = strings.TrimPrefix(resp.Key, bag.Prefix+KeyPrefixDelimiter) - } - result[resp.Key] = resp.Value + results := make(map[string][]byte, len(resp.Items)) + for _, item := range resp.Items { + results[item.Key] = item.Value } - return result, nil + return results, resp.UnprocessedKeys, nil } // Set stores a value in the cache with the given key. -func (g *GRPCSessionCache) Set(ctx context.Context, key string, value []byte, opt ...types.SessionCacheOption) error { +func (g *GRPCSessionStoreClient) Set(ctx context.Context, key string, value []byte, opt ...sessions.SessionStoreOption) error { bag, err := applyOptions(ctx, opt...) if err != nil { return err } - if bag.Prefix != "" { - key = bag.Prefix + KeyPrefixDelimiter + key - } - - req := &v1.SetRequest{ + req := v1.SetRequest_builder{ SyncId: bag.SyncID, Key: key, Value: value, - } + Prefix: bag.Prefix, + }.Build() _, err = g.client.Set(ctx, req) if err != nil { @@ -237,28 +221,17 @@ func (g *GRPCSessionCache) Set(ctx context.Context, key string, value []byte, op } // SetMany stores multiple values in the cache. -func (g *GRPCSessionCache) SetMany(ctx context.Context, values map[string][]byte, opt ...types.SessionCacheOption) error { +func (g *GRPCSessionStoreClient) SetMany(ctx context.Context, values map[string][]byte, opt ...sessions.SessionStoreOption) error { bag, err := applyOptions(ctx, opt...) if err != nil { return err } - // Apply prefix to keys if specified - prefixedValues := make(map[string][]byte) - for key, value := range values { - if bag.Prefix != "" { - prefixedValues[bag.Prefix+KeyPrefixDelimiter+key] = value - } else { - prefixedValues[key] = value - } - } - - req := &v1.SetManyRequest{ + _, err = g.client.SetMany(ctx, v1.SetManyRequest_builder{ + Values: values, SyncId: bag.SyncID, - Values: prefixedValues, - } - - _, err = g.client.SetMany(ctx, req) + Prefix: bag.Prefix, + }.Build()) if err != nil { return fmt.Errorf("failed to set many values in gRPC session cache: %w", err) } @@ -267,20 +240,17 @@ func (g *GRPCSessionCache) SetMany(ctx context.Context, values map[string][]byte } // Delete removes a value from the cache by key. -func (g *GRPCSessionCache) Delete(ctx context.Context, key string, opt ...types.SessionCacheOption) error { +func (g *GRPCSessionStoreClient) Delete(ctx context.Context, key string, opt ...sessions.SessionStoreOption) error { bag, err := applyOptions(ctx, opt...) if err != nil { return err } - if bag.Prefix != "" { - key = bag.Prefix + KeyPrefixDelimiter + key - } - - req := &v1.DeleteRequest{ + req := v1.DeleteRequest_builder{ SyncId: bag.SyncID, Key: key, - } + Prefix: bag.Prefix, + }.Build() _, err = g.client.Delete(ctx, req) if err != nil { @@ -291,15 +261,16 @@ func (g *GRPCSessionCache) Delete(ctx context.Context, key string, opt ...types. } // Clear removes all values from the cache. -func (g *GRPCSessionCache) Clear(ctx context.Context, opt ...types.SessionCacheOption) error { +func (g *GRPCSessionStoreClient) Clear(ctx context.Context, opt ...sessions.SessionStoreOption) error { bag, err := applyOptions(ctx, opt...) if err != nil { return err } - req := &v1.ClearRequest{ + req := v1.ClearRequest_builder{ SyncId: bag.SyncID, - } + Prefix: bag.Prefix, + }.Build() _, err = g.client.Clear(ctx, req) if err != nil { @@ -309,65 +280,29 @@ func (g *GRPCSessionCache) Clear(ctx context.Context, opt ...types.SessionCacheO return nil } -// GetAll returns all key-value pairs. -// Note: The gRPC service doesn't have a GetAll method, so we'll need to implement this -// by getting all keys first and then using GetMany. This is a limitation of the current -// gRPC service definition. -func (g *GRPCSessionCache) GetAll(ctx context.Context, opt ...types.SessionCacheOption) (map[string][]byte, error) { +func (g *GRPCSessionStoreClient) GetAll(ctx context.Context, pageToken string, opt ...sessions.SessionStoreOption) (map[string][]byte, string, error) { bag, err := applyOptions(ctx, opt...) if err != nil { - return nil, err - } - - if bag.Prefix != "" { - return nil, fmt.Errorf("prefix is not supported for GetAll in gRPC session cache") + return nil, "", err } result := make(map[string][]byte) - pageToken := "" - for { - req := &v1.GetAllRequest{ - SyncId: bag.SyncID, - PageToken: pageToken, - } + req := v1.GetAllRequest_builder{ + SyncId: bag.SyncID, + PageToken: pageToken, + Prefix: bag.Prefix, + }.Build() - stream, err := g.client.GetAll(ctx, req) - if err != nil { - return nil, fmt.Errorf("failed to get all values from gRPC session cache: %w", err) - } - - nextToken := "" - for { - resp, err := stream.Recv() - if err == io.EOF { - break - } - if err != nil { - return nil, fmt.Errorf("failed to get all values from gRPC session cache: %w", err) - } - - if resp.NextPageToken != "" { - nextToken = resp.NextPageToken - } - - key := resp.Key - if key != "" { - result[key] = resp.Value - } - } - - if nextToken == "" { - break - } - pageToken = nextToken + resp, err := g.client.GetAll(ctx, req) + if err != nil { + return nil, "", fmt.Errorf("failed to get all values from gRPC session cache: %w", err) } - return result, nil -} + // Add items from this page to the result + for _, item := range resp.Items { + result[item.Key] = item.Value + } -// Close performs any necessary cleanup when the cache is no longer needed. -func (g *GRPCSessionCache) Close(ctx context.Context) error { - // No cleanup needed for gRPC client - return nil + return result, resp.PageToken, nil } diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/session/session_server.go b/vendor/github.com/conductorone/baton-sdk/pkg/session/session_server.go new file mode 100644 index 00000000..622ec1ee --- /dev/null +++ b/vendor/github.com/conductorone/baton-sdk/pkg/session/session_server.go @@ -0,0 +1,215 @@ +package session + +import ( + "context" + "fmt" + "log" + "net" + + v1 "github.com/conductorone/baton-sdk/pb/c1/connectorapi/baton/v1" + "github.com/conductorone/baton-sdk/pkg/types/sessions" + "github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/ctxzap" + "google.golang.org/grpc" +) + +var _ v1.BatonSessionServiceServer = (*GRPCSessionServer)(nil) + +type GRPCSessionServer struct { + // v1.UnimplementedBatonSessionServiceServer + store sessions.SessionStore +} + +func NewGRPCSessionServer() *GRPCSessionServer { + return &GRPCSessionServer{} +} + +type SetSessionStore interface { + SetSessionStore(ctx context.Context, store sessions.SessionStore) +} + +func (s *GRPCSessionServer) SetSessionStore(ctx context.Context, store sessions.SessionStore) { + s.store = store +} + +func (s *GRPCSessionServer) Validate() error { + if s.store == nil { + return fmt.Errorf("session store is not set") + } + + return nil +} + +func (s *GRPCSessionServer) Get(ctx context.Context, req *v1.GetRequest) (*v1.GetResponse, error) { + if err := s.Validate(); err != nil { + return nil, err + } + + value, found, err := s.store.Get(ctx, req.GetKey(), sessions.WithSyncID(req.GetSyncId()), sessions.WithPrefix(req.GetPrefix())) + if err != nil { + return nil, fmt.Errorf("failed to get value from cache: %w", err) + } + + return v1.GetResponse_builder{ + Value: value, + Found: found, + }.Build(), nil +} + +func (s *GRPCSessionServer) GetMany(ctx context.Context, req *v1.GetManyRequest) (*v1.GetManyResponse, error) { + if err := s.Validate(); err != nil { + return nil, err + } + + values, unprocessedKeys, err := s.store.GetMany( + ctx, + req.GetKeys(), + sessions.WithSyncID(req.GetSyncId()), + sessions.WithPrefix(req.GetPrefix()), + ) + if err != nil { + return nil, fmt.Errorf("failed to get many values from cache: %w", err) + } + + // Convert the map to items array + items := make([]*v1.GetManyItem, 0, len(values)) + for key, value := range values { + items = append(items, v1.GetManyItem_builder{ + Key: key, + Value: value, + }.Build()) + } + + return v1.GetManyResponse_builder{ + Items: items, + UnprocessedKeys: unprocessedKeys, + }.Build(), nil +} + +func (s *GRPCSessionServer) Set(ctx context.Context, req *v1.SetRequest) (*v1.SetResponse, error) { + if err := s.Validate(); err != nil { + return nil, err + } + + err := s.store.Set(ctx, req.GetKey(), req.GetValue(), sessions.WithSyncID(req.GetSyncId()), sessions.WithPrefix(req.GetPrefix())) + if err != nil { + return nil, fmt.Errorf("failed to set value in cache: %w", err) + } + + return &v1.SetResponse{}, nil +} + +func (s *GRPCSessionServer) SetMany(ctx context.Context, req *v1.SetManyRequest) (*v1.SetManyResponse, error) { + if err := s.Validate(); err != nil { + return nil, err + } + + err := s.store.SetMany(ctx, req.GetValues(), sessions.WithSyncID(req.GetSyncId()), sessions.WithPrefix(req.GetPrefix())) + if err != nil { + return nil, fmt.Errorf("failed to set many values in cache: %w", err) + } + + return &v1.SetManyResponse{}, nil +} + +func (s *GRPCSessionServer) Delete(ctx context.Context, req *v1.DeleteRequest) (*v1.DeleteResponse, error) { + if err := s.Validate(); err != nil { + return nil, err + } + + err := s.store.Delete(ctx, req.GetKey(), sessions.WithSyncID(req.GetSyncId()), sessions.WithPrefix(req.GetPrefix())) + if err != nil { + return nil, fmt.Errorf("failed to delete value from cache: %w", err) + } + + return &v1.DeleteResponse{}, nil +} + +func (s *GRPCSessionServer) DeleteMany(ctx context.Context, req *v1.DeleteManyRequest) (*v1.DeleteManyResponse, error) { + if err := s.Validate(); err != nil { + return nil, err + } + + for _, key := range req.GetKeys() { + err := s.store.Delete( + ctx, + key, + sessions.WithSyncID(req.GetSyncId()), + sessions.WithPrefix(req.GetPrefix()), + ) + if err != nil { + return nil, fmt.Errorf("failed to delete value for key %s: %w", key, err) + } + } + + return &v1.DeleteManyResponse{}, nil +} + +func (s *GRPCSessionServer) Clear(ctx context.Context, req *v1.ClearRequest) (*v1.ClearResponse, error) { + if s.store == nil { + // we sometimes clean up the session store after the connector is done + ctxzap.Extract(ctx).Warn("session store is not set") + return &v1.ClearResponse{}, nil + } + + err := s.store.Clear(ctx, sessions.WithSyncID(req.GetSyncId()), sessions.WithPrefix(req.GetPrefix())) + if err != nil { + return nil, fmt.Errorf("failed to clear cache: %w", err) + } + + return &v1.ClearResponse{}, nil +} + +func (s *GRPCSessionServer) GetAll(ctx context.Context, req *v1.GetAllRequest) (*v1.GetAllResponse, error) { + if err := s.Validate(); err != nil { + return nil, err + } + + values, nextPageToken, err := s.store.GetAll( + ctx, + req.PageToken, + sessions.WithSyncID(req.GetSyncId()), + sessions.WithPrefix(req.GetPrefix()), + ) + if err != nil { + return nil, fmt.Errorf("failed to get all values from cache: %w", err) + } + + // Convert the map to items array + items := make([]*v1.GetAllItem, 0, len(values)) + for key, value := range values { + items = append(items, v1.GetAllItem_builder{ + Key: key, + Value: value, + }.Build()) + } + + return v1.GetAllResponse_builder{ + Items: items, + PageToken: nextPageToken, + }.Build(), nil +} + +func StartGRPCSessionServerWithOptions(ctx context.Context, listener net.Listener, sessionServer v1.BatonSessionServiceServer, opts ...grpc.ServerOption) error { + // Create the gRPC server with custom options + server := grpc.NewServer(opts...) + + // Create and register the session service + v1.RegisterBatonSessionServiceServer(server, sessionServer) + + defer listener.Close() + + // Start serving + go func() { + if err := server.Serve(listener); err != nil { + log.Printf("gRPC session server failed: %v", err) + } + }() + + // Wait for context cancellation + <-ctx.Done() + + // Graceful shutdown + server.GracefulStop() + + return nil +} diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/session/typed_session.go b/vendor/github.com/conductorone/baton-sdk/pkg/session/typed_session.go new file mode 100644 index 00000000..8933b555 --- /dev/null +++ b/vendor/github.com/conductorone/baton-sdk/pkg/session/typed_session.go @@ -0,0 +1,156 @@ +package session + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/conductorone/baton-sdk/pkg/types/sessions" +) + +type Codec[T any] interface { + Encode(value T) ([]byte, error) + Decode(data []byte) (T, error) +} + +type TypedSessionCache[T any] struct { + cache sessions.SessionStore + codec Codec[T] +} + +func NewTypedSessionCache[T any](cache sessions.SessionStore, codec Codec[T]) *TypedSessionCache[T] { + return &TypedSessionCache[T]{ + cache: cache, + codec: codec, + } +} + +func (t *TypedSessionCache[T]) Get(ctx context.Context, key string, opt ...sessions.SessionStoreOption) (T, bool, error) { + var zero T + data, found, err := t.cache.Get(ctx, key, opt...) + if err != nil { + return zero, false, err + } + if !found { + return zero, false, nil + } + + value, err := t.codec.Decode(data) + if err != nil { + return zero, false, fmt.Errorf("failed to decode value: %w", err) + } + + return value, true, nil +} + +func (t *TypedSessionCache[T]) Set(ctx context.Context, key string, value T, opt ...sessions.SessionStoreOption) error { + data, err := t.codec.Encode(value) + if err != nil { + return fmt.Errorf("failed to encode value: %w", err) + } + + return t.cache.Set(ctx, key, data, opt...) +} + +func (t *TypedSessionCache[T]) GetMany(ctx context.Context, keys []string, opt ...sessions.SessionStoreOption) (map[string]T, []string, error) { + dataMap, unprocessedKeys, err := t.cache.GetMany(ctx, keys, opt...) + if err != nil { + return nil, nil, err + } + + result := make(map[string]T) + for key, data := range dataMap { + value, err := t.codec.Decode(data) + if err != nil { + return nil, nil, fmt.Errorf("failed to decode value for key %s: %w", key, err) + } + result[key] = value + } + + return result, unprocessedKeys, nil +} + +func (t *TypedSessionCache[T]) SetMany(ctx context.Context, values map[string]T, opt ...sessions.SessionStoreOption) error { + dataMap := make(map[string][]byte) + for key, value := range values { + data, err := t.codec.Encode(value) + if err != nil { + return fmt.Errorf("failed to encode value for key %s: %w", key, err) + } + dataMap[key] = data + } + + return t.cache.SetMany(ctx, dataMap, opt...) +} + +func (t *TypedSessionCache[T]) Delete(ctx context.Context, key string, opt ...sessions.SessionStoreOption) error { + return t.cache.Delete(ctx, key, opt...) +} + +func (t *TypedSessionCache[T]) Clear(ctx context.Context, opt ...sessions.SessionStoreOption) error { + return t.cache.Clear(ctx, opt...) +} + +func (t *TypedSessionCache[T]) GetAll(ctx context.Context, pageToken string, opt ...sessions.SessionStoreOption) (map[string]T, string, error) { + dataMap, pageToken, err := t.cache.GetAll(ctx, pageToken, opt...) + if err != nil { + return nil, "", err + } + + result := make(map[string]T) + for key, data := range dataMap { + value, err := t.codec.Decode(data) + if err != nil { + return nil, "", fmt.Errorf("failed to decode value for key %s: %w", key, err) + } + result[key] = value + } + + return result, pageToken, nil +} + +type JSONCodec[T any] struct{} + +func (j *JSONCodec[T]) Encode(value T) ([]byte, error) { + return json.Marshal(value) +} + +func (j *JSONCodec[T]) Decode(data []byte) (T, error) { + var value T + err := json.Unmarshal(data, &value) + return value, err +} + +type StringCodec struct{} + +func (s *StringCodec) Encode(value string) ([]byte, error) { + return []byte(value), nil +} + +func (s *StringCodec) Decode(data []byte) (string, error) { + return string(data), nil +} + +type IntCodec struct{} + +func (i *IntCodec) Encode(value int) ([]byte, error) { + return []byte(fmt.Sprintf("%d", value)), nil +} + +func (i *IntCodec) Decode(data []byte) (int, error) { + var value int + _, err := fmt.Sscanf(string(data), "%d", &value) + return value, err +} + +func NewJSONSessionCache[T any](cache sessions.SessionStore) *TypedSessionCache[T] { + return NewTypedSessionCache(cache, &JSONCodec[T]{}) +} + +func NewStringSessionCache(cache sessions.SessionStore) *TypedSessionCache[string] { + return NewTypedSessionCache(cache, &StringCodec{}) +} + +func NewIntSessionCache(cache sessions.SessionStore) *TypedSessionCache[int] { + return NewTypedSessionCache(cache, &IntCodec{}) +} diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/sync/client_wrapper.go b/vendor/github.com/conductorone/baton-sdk/pkg/sync/client_wrapper.go deleted file mode 100644 index bdfdfd46..00000000 --- a/vendor/github.com/conductorone/baton-sdk/pkg/sync/client_wrapper.go +++ /dev/null @@ -1,223 +0,0 @@ -package sync - -import ( - "context" - "reflect" - - connectorV2 "github.com/conductorone/baton-sdk/pb/c1/connector/v2" - "github.com/conductorone/baton-sdk/pkg/annotations" - "github.com/conductorone/baton-sdk/pkg/types" - "google.golang.org/grpc" - "google.golang.org/protobuf/types/known/anypb" -) - -// syncIDClientWrapper wraps a ConnectorClient to add syncID to annotations in requests, -// to be used by the Session Manager. -type syncIDClientWrapper struct { - types.ConnectorClient // Embed the original client - syncID string -} - -// ensure syncIDClientWrapper implements types.ConnectorClient. -var _ types.ConnectorClient = (*syncIDClientWrapper)(nil) - -// requestWithAnnotations is an interface that all request types implement. -type requestWithAnnotations interface { - GetAnnotations() []*anypb.Any -} - -// addSyncIDToRequest adds syncID to the annotations of a request. -func (w *syncIDClientWrapper) addSyncIDToRequest(req requestWithAnnotations) { - if req == nil || w.syncID == "" { - return - } - - // Get current annotations - currentAnnotations := req.GetAnnotations() - if currentAnnotations == nil { - currentAnnotations = []*anypb.Any{} - } - - // Create ActiveSync annotation - activeSync := &connectorV2.ActiveSync{ - Id: w.syncID, - } - - // Add the ActiveSync annotation - annos := annotations.Annotations(currentAnnotations) - annos.Update(activeSync) - - // Use reflection to set the Annotations field since the interface only provides a getter - reqValue := reflect.ValueOf(req) - if reqValue.Kind() == reflect.Ptr { - reqValue = reqValue.Elem() - } - - annotationsField := reqValue.FieldByName("Annotations") - if annotationsField.IsValid() { - annotationsField.Set(reflect.ValueOf([]*anypb.Any(annos))) - } -} - -// Only override methods that have requests with annotations. -func (w *syncIDClientWrapper) ListResourceTypes( - ctx context.Context, - in *connectorV2.ResourceTypesServiceListResourceTypesRequest, - opts ...grpc.CallOption, -) (*connectorV2.ResourceTypesServiceListResourceTypesResponse, error) { - w.addSyncIDToRequest(in) - return w.ConnectorClient.ListResourceTypes(ctx, in, opts...) -} - -func (w *syncIDClientWrapper) ListResources( - ctx context.Context, - in *connectorV2.ResourcesServiceListResourcesRequest, - opts ...grpc.CallOption, -) (*connectorV2.ResourcesServiceListResourcesResponse, error) { - w.addSyncIDToRequest(in) - return w.ConnectorClient.ListResources(ctx, in, opts...) -} - -func (w *syncIDClientWrapper) GetResource( - ctx context.Context, - in *connectorV2.ResourceGetterServiceGetResourceRequest, - opts ...grpc.CallOption, -) (*connectorV2.ResourceGetterServiceGetResourceResponse, error) { - w.addSyncIDToRequest(in) - return w.ConnectorClient.GetResource(ctx, in, opts...) -} - -func (w *syncIDClientWrapper) ListEntitlements( - ctx context.Context, - in *connectorV2.EntitlementsServiceListEntitlementsRequest, - opts ...grpc.CallOption, -) (*connectorV2.EntitlementsServiceListEntitlementsResponse, error) { - w.addSyncIDToRequest(in) - return w.ConnectorClient.ListEntitlements(ctx, in, opts...) -} - -func (w *syncIDClientWrapper) ListGrants( - ctx context.Context, - in *connectorV2.GrantsServiceListGrantsRequest, - opts ...grpc.CallOption, -) (*connectorV2.GrantsServiceListGrantsResponse, error) { - w.addSyncIDToRequest(in) - return w.ConnectorClient.ListGrants(ctx, in, opts...) -} - -func (w *syncIDClientWrapper) Grant( - ctx context.Context, - in *connectorV2.GrantManagerServiceGrantRequest, - opts ...grpc.CallOption, -) (*connectorV2.GrantManagerServiceGrantResponse, error) { - w.addSyncIDToRequest(in) - return w.ConnectorClient.Grant(ctx, in, opts...) -} - -func (w *syncIDClientWrapper) Revoke( - ctx context.Context, - in *connectorV2.GrantManagerServiceRevokeRequest, - opts ...grpc.CallOption, -) (*connectorV2.GrantManagerServiceRevokeResponse, error) { - w.addSyncIDToRequest(in) - return w.ConnectorClient.Revoke(ctx, in, opts...) -} - -func (w *syncIDClientWrapper) ListEventFeeds( - ctx context.Context, - in *connectorV2.ListEventFeedsRequest, - opts ...grpc.CallOption, -) (*connectorV2.ListEventFeedsResponse, error) { - w.addSyncIDToRequest(in) - return w.ConnectorClient.ListEventFeeds(ctx, in, opts...) -} - -func (w *syncIDClientWrapper) ListEvents( - ctx context.Context, - in *connectorV2.ListEventsRequest, - opts ...grpc.CallOption, -) (*connectorV2.ListEventsResponse, error) { - w.addSyncIDToRequest(in) - return w.ConnectorClient.ListEvents(ctx, in, opts...) -} - -func (w *syncIDClientWrapper) ListActionSchemas( - ctx context.Context, - in *connectorV2.ListActionSchemasRequest, - opts ...grpc.CallOption, -) (*connectorV2.ListActionSchemasResponse, error) { - w.addSyncIDToRequest(in) - return w.ConnectorClient.ListActionSchemas(ctx, in, opts...) -} - -func (w *syncIDClientWrapper) GetActionSchema( - ctx context.Context, - in *connectorV2.GetActionSchemaRequest, - opts ...grpc.CallOption, -) (*connectorV2.GetActionSchemaResponse, error) { - w.addSyncIDToRequest(in) - return w.ConnectorClient.GetActionSchema(ctx, in, opts...) -} - -func (w *syncIDClientWrapper) InvokeAction( - ctx context.Context, - in *connectorV2.InvokeActionRequest, - opts ...grpc.CallOption, -) (*connectorV2.InvokeActionResponse, error) { - w.addSyncIDToRequest(in) - return w.ConnectorClient.InvokeAction(ctx, in, opts...) -} - -func (w *syncIDClientWrapper) GetActionStatus( - ctx context.Context, - in *connectorV2.GetActionStatusRequest, - opts ...grpc.CallOption, -) (*connectorV2.GetActionStatusResponse, error) { - w.addSyncIDToRequest(in) - return w.ConnectorClient.GetActionStatus(ctx, in, opts...) -} - -func (w *syncIDClientWrapper) CreateTicket( - ctx context.Context, - in *connectorV2.TicketsServiceCreateTicketRequest, - opts ...grpc.CallOption, -) (*connectorV2.TicketsServiceCreateTicketResponse, error) { - w.addSyncIDToRequest(in) - return w.ConnectorClient.CreateTicket(ctx, in, opts...) -} - -func (w *syncIDClientWrapper) GetTicket( - ctx context.Context, - in *connectorV2.TicketsServiceGetTicketRequest, - opts ...grpc.CallOption, -) (*connectorV2.TicketsServiceGetTicketResponse, error) { - w.addSyncIDToRequest(in) - return w.ConnectorClient.GetTicket(ctx, in, opts...) -} - -func (w *syncIDClientWrapper) GetTicketSchema( - ctx context.Context, - in *connectorV2.TicketsServiceGetTicketSchemaRequest, - opts ...grpc.CallOption, -) (*connectorV2.TicketsServiceGetTicketSchemaResponse, error) { - w.addSyncIDToRequest(in) - return w.ConnectorClient.GetTicketSchema(ctx, in, opts...) -} - -func (w *syncIDClientWrapper) ListTicketSchemas( - ctx context.Context, - in *connectorV2.TicketsServiceListTicketSchemasRequest, - opts ...grpc.CallOption, -) (*connectorV2.TicketsServiceListTicketSchemasResponse, error) { - w.addSyncIDToRequest(in) - return w.ConnectorClient.ListTicketSchemas(ctx, in, opts...) -} - -func (w *syncIDClientWrapper) Cleanup( - ctx context.Context, - in *connectorV2.ConnectorServiceCleanupRequest, - opts ...grpc.CallOption, -) (*connectorV2.ConnectorServiceCleanupResponse, error) { - w.addSyncIDToRequest(in) - return w.ConnectorClient.Cleanup(ctx, in, opts...) -} diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/sync/expand/expander.go b/vendor/github.com/conductorone/baton-sdk/pkg/sync/expand/expander.go new file mode 100644 index 00000000..45199e62 --- /dev/null +++ b/vendor/github.com/conductorone/baton-sdk/pkg/sync/expand/expander.go @@ -0,0 +1,328 @@ +package expand + +import ( + "context" + "database/sql" + "errors" + "fmt" + "os" + "strconv" + + "github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/ctxzap" + "go.uber.org/zap" + + v2 "github.com/conductorone/baton-sdk/pb/c1/connector/v2" + reader_v2 "github.com/conductorone/baton-sdk/pb/c1/reader/v2" + "github.com/conductorone/baton-sdk/pkg/annotations" +) + +const defaultMaxDepth int64 = 20 + +var maxDepth, _ = strconv.ParseInt(os.Getenv("BATON_GRAPH_EXPAND_MAX_DEPTH"), 10, 64) + +// ErrMaxDepthExceeded is returned when the expansion graph exceeds the maximum allowed depth. +var ErrMaxDepthExceeded = errors.New("max depth exceeded") + +// ExpanderStore defines the minimal store interface needed for grant expansion. +// This interface can be implemented by the connectorstore or by a mock for testing. +type ExpanderStore interface { + GetEntitlement(ctx context.Context, req *reader_v2.EntitlementsReaderServiceGetEntitlementRequest) (*reader_v2.EntitlementsReaderServiceGetEntitlementResponse, error) + ListGrantsForEntitlement(ctx context.Context, req *reader_v2.GrantsReaderServiceListGrantsForEntitlementRequest) (*reader_v2.GrantsReaderServiceListGrantsForEntitlementResponse, error) + PutGrants(ctx context.Context, grants ...*v2.Grant) error +} + +// Expander handles the grant expansion algorithm. +// It can be used standalone for testing or called from the syncer. +type Expander struct { + store ExpanderStore + graph *EntitlementGraph +} + +// NewExpander creates a new Expander with the given store and graph. +func NewExpander(store ExpanderStore, graph *EntitlementGraph) *Expander { + return &Expander{ + store: store, + graph: graph, + } +} + +// Graph returns the entitlement graph. +func (e *Expander) Graph() *EntitlementGraph { + return e.graph +} + +// Run executes the complete expansion algorithm until the graph is fully expanded. +// This is useful for testing where you want to run the entire expansion in one call. +func (e *Expander) Run(ctx context.Context) error { + for { + err := e.RunSingleStep(ctx) + if err != nil { + return err + } + if e.IsDone(ctx) { + return nil + } + } +} + +// RunSingleStep executes one step of the expansion algorithm. +// Returns true when the graph is fully expanded, false if more work is needed. +// This matches the syncer's step-by-step execution model. +func (e *Expander) RunSingleStep(ctx context.Context) error { + l := ctxzap.Extract(ctx) + l = l.With(zap.Int("depth", e.graph.Depth)) + l.Debug("expander: starting step") + + // Process current action if any + if len(e.graph.Actions) > 0 { + action := e.graph.Actions[0] + nextPageToken, err := e.runAction(ctx, action) + if err != nil { + l.Error("expander: error running graph action", zap.Error(err), zap.Any("action", action)) + _ = e.graph.DeleteEdge(ctx, action.SourceEntitlementID, action.DescendantEntitlementID) + if errors.Is(err, sql.ErrNoRows) { + // Skip action and delete the edge that caused the error. + e.graph.Actions = e.graph.Actions[1:] + return nil + } + return err + } + + if nextPageToken != "" { + // More pages to process + action.PageToken = nextPageToken + } else { + // Action is complete - mark edge expanded and remove from queue + e.graph.MarkEdgeExpanded(action.SourceEntitlementID, action.DescendantEntitlementID) + e.graph.Actions = e.graph.Actions[1:] + } + } + + // If there are still actions remaining, continue processing + if len(e.graph.Actions) > 0 { + return nil + } + + // Check max depth + depth := maxDepth + if depth == 0 { + depth = defaultMaxDepth + } + + if int64(e.graph.Depth) > depth { + l.Error("expander: exceeded max depth", zap.Int64("max_depth", depth)) + return fmt.Errorf("expander: %w (%d)", ErrMaxDepthExceeded, depth) + } + + // Generate new actions from expandable entitlements + for sourceEntitlementID := range e.graph.GetExpandableEntitlements(ctx) { + for descendantEntitlementID, grantInfo := range e.graph.GetExpandableDescendantEntitlements(ctx, sourceEntitlementID) { + e.graph.Actions = append(e.graph.Actions, &EntitlementGraphAction{ + SourceEntitlementID: sourceEntitlementID, + DescendantEntitlementID: descendantEntitlementID, + PageToken: "", + Shallow: grantInfo.IsShallow, + ResourceTypeIDs: grantInfo.ResourceTypeIDs, + }) + } + } + + e.graph.Depth++ + l.Debug("expander: graph is not expanded, incrementing depth") + return nil +} + +func (e *Expander) IsDone(ctx context.Context) bool { + return e.graph.IsExpanded() +} + +// runAction processes a single action and returns the next page token. +// If the returned page token is empty, the action is complete. +func (e *Expander) runAction(ctx context.Context, action *EntitlementGraphAction) (string, error) { + l := ctxzap.Extract(ctx) + l = l.With( + zap.Int("depth", e.graph.Depth), + zap.String("source_entitlement_id", action.SourceEntitlementID), + zap.String("descendant_entitlement_id", action.DescendantEntitlementID), + ) + + // Fetch source and descendant entitlement + sourceEntitlement, err := e.store.GetEntitlement(ctx, reader_v2.EntitlementsReaderServiceGetEntitlementRequest_builder{ + EntitlementId: action.SourceEntitlementID, + }.Build()) + if err != nil { + l.Error("runAction: error fetching source entitlement", zap.Error(err)) + return "", fmt.Errorf("runAction: error fetching source entitlement: %w", err) + } + + descendantEntitlement, err := e.store.GetEntitlement(ctx, reader_v2.EntitlementsReaderServiceGetEntitlementRequest_builder{ + EntitlementId: action.DescendantEntitlementID, + }.Build()) + if err != nil { + l.Error("runAction: error fetching descendant entitlement", zap.Error(err)) + return "", fmt.Errorf("runAction: error fetching descendant entitlement: %w", err) + } + + // Fetch a page of source grants + sourceGrants, err := e.store.ListGrantsForEntitlement(ctx, reader_v2.GrantsReaderServiceListGrantsForEntitlementRequest_builder{ + Entitlement: sourceEntitlement.GetEntitlement(), + PageToken: action.PageToken, + PrincipalResourceTypeIds: action.ResourceTypeIDs, + }.Build()) + if err != nil { + l.Error("runAction: error fetching source grants", zap.Error(err)) + return "", fmt.Errorf("runAction: error fetching source grants: %w", err) + } + + var newGrants = make([]*v2.Grant, 0) + for _, sourceGrant := range sourceGrants.GetList() { + // If this is a shallow action, then we only want to expand grants that have no sources + // which indicates that it was directly assigned. + if action.Shallow { + sourcesMap := sourceGrant.GetSources().GetSources() + // If we have no sources, this is a direct grant + foundDirectGrant := len(sourcesMap) == 0 + // If the source grant has sources, then we need to see if any of them are the source entitlement itself + if sourcesMap[action.SourceEntitlementID] != nil { + foundDirectGrant = true + } + + // This is not a direct grant, so skip it since we are a shallow action + if !foundDirectGrant { + continue + } + } + + // Unroll all grants for the principal on the descendant entitlement. + pageToken := "" + for { + req := reader_v2.GrantsReaderServiceListGrantsForEntitlementRequest_builder{ + Entitlement: descendantEntitlement.GetEntitlement(), + PrincipalId: sourceGrant.GetPrincipal().GetId(), + PageToken: pageToken, + Annotations: nil, + }.Build() + + resp, err := e.store.ListGrantsForEntitlement(ctx, req) + if err != nil { + l.Error("runAction: error fetching descendant grants", zap.Error(err)) + return "", fmt.Errorf("runAction: error fetching descendant grants: %w", err) + } + descendantGrants := resp.GetList() + + // If we have no grants for the principal in the descendant entitlement, make one. + if pageToken == "" && resp.GetNextPageToken() == "" && len(descendantGrants) == 0 { + descendantGrant, err := newExpandedGrant(descendantEntitlement.GetEntitlement(), sourceGrant.GetPrincipal(), action.SourceEntitlementID) + if err != nil { + l.Error("runAction: error creating new grant", zap.Error(err)) + return "", fmt.Errorf("runAction: error creating new grant: %w", err) + } + newGrants = append(newGrants, descendantGrant) + newGrants, err = PutGrantsInChunks(ctx, e.store, newGrants, 10000) + if err != nil { + l.Error("runAction: error updating descendant grants", zap.Error(err)) + return "", fmt.Errorf("runAction: error updating descendant grants: %w", err) + } + break + } + + // Add the source entitlement as a source to all descendant grants. + grantsToUpdate := make([]*v2.Grant, 0) + for _, descendantGrant := range descendantGrants { + sourcesMap := descendantGrant.GetSources().GetSources() + if sourcesMap == nil { + sourcesMap = make(map[string]*v2.GrantSources_GrantSource) + } + + updated := false + + if len(sourcesMap) == 0 { + // If we are already granted this entitlement, make sure to add ourselves as a source. + sourcesMap[action.DescendantEntitlementID] = &v2.GrantSources_GrantSource{} + updated = true + } + // Include the source grant as a source. + if sourcesMap[action.SourceEntitlementID] == nil { + sourcesMap[action.SourceEntitlementID] = &v2.GrantSources_GrantSource{} + updated = true + } + + if updated { + sources := v2.GrantSources_builder{Sources: sourcesMap}.Build() + descendantGrant.SetSources(sources) + grantsToUpdate = append(grantsToUpdate, descendantGrant) + } + } + newGrants = append(newGrants, grantsToUpdate...) + + newGrants, err = PutGrantsInChunks(ctx, e.store, newGrants, 10000) + if err != nil { + l.Error("runAction: error updating descendant grants", zap.Error(err)) + return "", fmt.Errorf("runAction: error updating descendant grants: %w", err) + } + + pageToken = resp.GetNextPageToken() + if pageToken == "" { + break + } + } + } + + _, err = PutGrantsInChunks(ctx, e.store, newGrants, 0) + if err != nil { + l.Error("runAction: error updating descendant grants", zap.Error(err)) + return "", fmt.Errorf("runAction: error updating descendant grants: %w", err) + } + + return sourceGrants.GetNextPageToken(), nil +} + +// PutGrantsInChunks accumulates grants until the buffer exceeds minChunkSize, +// then writes all grants to the store at once. +func PutGrantsInChunks(ctx context.Context, store ExpanderStore, grants []*v2.Grant, minChunkSize int) ([]*v2.Grant, error) { + if len(grants) < minChunkSize { + return grants, nil + } + + err := store.PutGrants(ctx, grants...) + if err != nil { + return nil, fmt.Errorf("PutGrantsInChunks: error putting grants: %w", err) + } + + return make([]*v2.Grant, 0), nil +} + +// newExpandedGrant creates a new grant for a principal on a descendant entitlement. +func newExpandedGrant(descEntitlement *v2.Entitlement, principal *v2.Resource, sourceEntitlementID string) (*v2.Grant, error) { + enResource := descEntitlement.GetResource() + if enResource == nil { + return nil, fmt.Errorf("newExpandedGrant: entitlement has no resource") + } + + if principal == nil { + return nil, fmt.Errorf("newExpandedGrant: principal is nil") + } + + // Add immutable annotation since this function is only called if no direct grant exists + var annos annotations.Annotations + annos.Update(&v2.GrantImmutable{}) + + var sources *v2.GrantSources + if sourceEntitlementID != "" { + sources = &v2.GrantSources{ + Sources: map[string]*v2.GrantSources_GrantSource{ + sourceEntitlementID: {}, + }, + } + } + + grant := v2.Grant_builder{ + Id: fmt.Sprintf("%s:%s:%s", descEntitlement.GetId(), principal.GetId().GetResourceType(), principal.GetId().GetResource()), + Entitlement: descEntitlement, + Principal: principal, + Sources: sources, + Annotations: annos, + }.Build() + + return grant, nil +} diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/sync/expand/graph.go b/vendor/github.com/conductorone/baton-sdk/pkg/sync/expand/graph.go index ef7d9850..4dce1baf 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/sync/expand/graph.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/sync/expand/graph.go @@ -2,6 +2,7 @@ package expand import ( "context" + "iter" v2 "github.com/conductorone/baton-sdk/pb/c1/connector/v2" "github.com/conductorone/baton-sdk/pkg/sync/expand/scc" @@ -138,9 +139,9 @@ func (g *EntitlementGraph) GetDescendantEntitlements(entitlementID string) map[s if destinations, ok := g.SourcesToDestinations[node.Id]; ok { for destinationID, edgeID := range destinations { if destination, ok := g.Nodes[destinationID]; ok { - for _, entitlementID := range destination.EntitlementIDs { + for _, e := range destination.EntitlementIDs { if edge, ok := g.Edges[edgeID]; ok { - entitlementsToEdges[entitlementID] = &edge + entitlementsToEdges[e] = &edge } } } @@ -149,6 +150,31 @@ func (g *EntitlementGraph) GetDescendantEntitlements(entitlementID string) map[s return entitlementsToEdges } +func (g *EntitlementGraph) GetExpandableDescendantEntitlements(ctx context.Context, entitlementID string) iter.Seq2[string, *Edge] { + return func(yield func(string, *Edge) bool) { + node := g.GetNode(entitlementID) + if node == nil { + return + } + if destinations, ok := g.SourcesToDestinations[node.Id]; ok { + for destinationID, edgeID := range destinations { + if destination, ok := g.Nodes[destinationID]; ok { + for _, e := range destination.EntitlementIDs { + if edge, ok := g.Edges[edgeID]; ok { + if edge.IsExpanded { + continue + } + if !yield(e, &edge) { + return + } + } + } + } + } + } + } +} + func (g *EntitlementGraph) HasEntitlement(entitlementID string) bool { return g.GetNode(entitlementID) != nil } @@ -156,7 +182,7 @@ func (g *EntitlementGraph) HasEntitlement(entitlementID string) bool { // AddEntitlement - add an entitlement's ID as an unconnected node in the graph. func (g *EntitlementGraph) AddEntitlement(entitlement *v2.Entitlement) { // If the entitlement is already in the graph, fail silently. - found := g.GetNode(entitlement.Id) + found := g.GetNode(entitlement.GetId()) if found != nil { return } @@ -168,12 +194,12 @@ func (g *EntitlementGraph) AddEntitlement(entitlement *v2.Entitlement) { // Create a new node. node := Node{ Id: g.NextNodeID, - EntitlementIDs: []string{entitlement.Id}, + EntitlementIDs: []string{entitlement.GetId()}, } // Add the node to the data structures. g.Nodes[node.Id] = node - g.EntitlementsToNodes[entitlement.Id] = node.Id + g.EntitlementsToNodes[entitlement.GetId()] = node.Id } // GetEntitlements returns a combined list of _all_ entitlements from all nodes. @@ -185,6 +211,28 @@ func (g *EntitlementGraph) GetEntitlements() []string { return entitlements } +func (g *EntitlementGraph) GetExpandableEntitlements(ctx context.Context) iter.Seq[string] { + return func(yield func(string) bool) { + l := ctxzap.Extract(ctx) + for _, node := range g.Nodes { + for _, entitlementID := range node.EntitlementIDs { + // We've already expanded this entitlement, so skip it. + if g.IsEntitlementExpanded(entitlementID) { + continue + } + // We have ancestors who have not been expanded yet, so we can't expand ourselves. + if g.HasUnexpandedAncestors(entitlementID) { + l.Debug("expandGrantsForEntitlements: skipping source entitlement because it has unexpanded ancestors", zap.String("source_entitlement_id", entitlementID)) + continue + } + if !yield(entitlementID) { + return + } + } + } + } +} + // MarkEdgeExpanded given source and destination entitlements, mark the edge // between them as "expanded". func (g *EntitlementGraph) MarkEdgeExpanded(sourceEntitlementID string, descendantEntitlementID string) { diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/sync/state.go b/vendor/github.com/conductorone/baton-sdk/pkg/sync/state.go index 2cc70186..81eeace7 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/sync/state.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/sync/state.go @@ -19,6 +19,7 @@ type State interface { ResourceTypeID(ctx context.Context) string ResourceID(ctx context.Context) string EntitlementGraph(ctx context.Context) *expand.EntitlementGraph + ClearEntitlementGraph(ctx context.Context) ParentResourceID(ctx context.Context) string ParentResourceTypeID(ctx context.Context) string PageToken(ctx context.Context) string @@ -33,6 +34,9 @@ type State interface { SetShouldFetchRelatedResources() ShouldSkipEntitlementsAndGrants() bool SetShouldSkipEntitlementsAndGrants() + ShouldSkipGrants() bool + SetShouldSkipGrants() + GetCompletedActionsCount() uint64 } // ActionOp represents a sync operation. @@ -49,6 +53,8 @@ func (s ActionOp) String() string { return "list-resources" case SyncEntitlementsOp: return "list-entitlements" + case ListResourcesForEntitlementsOp: + return "list-resources-for-entitlements" case SyncGrantsOp: return "list-grants" case SyncExternalResourcesOp: @@ -59,6 +65,8 @@ func (s ActionOp) String() string { return "grant-expansion" case SyncTargetedResourceOp: return "targeted-resource-sync" + case SyncStaticEntitlementsOp: + return "list-static-entitlements" default: return "unknown" } @@ -102,11 +110,17 @@ func newActionOp(str string) ActionOp { return SyncExternalResourcesOp case SyncTargetedResourceOp.String(): return SyncTargetedResourceOp + case SyncStaticEntitlementsOp.String(): + return SyncStaticEntitlementsOp + case ListResourcesForEntitlementsOp.String(): + return ListResourcesForEntitlementsOp default: return UnknownOp } } +// Do not change the order of these constants, and only append new ones at the end. +// Otherwise resuming a sync started by an older version of baton-sdk will cause very strange behavior. const ( UnknownOp ActionOp = iota InitOp @@ -119,6 +133,7 @@ const ( SyncAssetsOp SyncGrantExpansionOp SyncTargetedResourceOp + SyncStaticEntitlementsOp ) // Action stores the current operation, page token, and optional fields for which resource is being worked with. @@ -141,6 +156,8 @@ type state struct { hasExternalResourceGrants bool shouldFetchRelatedResources bool shouldSkipEntitlementsAndGrants bool + shouldSkipGrants bool + completedActionsCount uint64 } // serializedToken is used to serialize the token to JSON. This separate object is used to avoid having exported fields @@ -153,6 +170,8 @@ type serializedToken struct { HasExternalResourceGrants bool `json:"has_external_resource_grants,omitempty"` ShouldFetchRelatedResources bool `json:"should_fetch_related_resources,omitempty"` ShouldSkipEntitlementsAndGrants bool `json:"should_skip_entitlements_and_grants,omitempty"` + ShouldSkipGrants bool `json:"should_skip_grants,omitempty"` + CompletedActionsCount uint64 `json:"completed_actions_count,omitempty"` } // push adds a new action to the stack. If there is no current state, the action is directly set to current, else @@ -180,6 +199,7 @@ func (st *state) pop() *Action { } ret := *st.currentAction + st.completedActionsCount++ if len(st.actions) > 0 { st.currentAction = &st.actions[len(st.actions)-1] @@ -221,13 +241,17 @@ func (st *state) Unmarshal(input string) error { st.actions = token.Actions st.currentAction = token.CurrentAction st.needsExpansion = token.NeedsExpansion + st.entitlementGraph = token.EntitlementGraph st.hasExternalResourceGrants = token.HasExternalResourceGrants st.shouldSkipEntitlementsAndGrants = token.ShouldSkipEntitlementsAndGrants + st.shouldSkipGrants = token.ShouldSkipGrants st.shouldFetchRelatedResources = token.ShouldFetchRelatedResources + st.completedActionsCount = token.CompletedActionsCount } else { st.actions = nil st.entitlementGraph = nil st.currentAction = &Action{Op: InitOp} + st.completedActionsCount = 0 } return nil @@ -246,6 +270,8 @@ func (st *state) Marshal() (string, error) { HasExternalResourceGrants: st.hasExternalResourceGrants, ShouldFetchRelatedResources: st.shouldFetchRelatedResources, ShouldSkipEntitlementsAndGrants: st.shouldSkipEntitlementsAndGrants, + ShouldSkipGrants: st.shouldSkipGrants, + CompletedActionsCount: st.completedActionsCount, }) if err != nil { return "", err @@ -314,6 +340,14 @@ func (st *state) SetShouldSkipEntitlementsAndGrants() { st.shouldSkipEntitlementsAndGrants = true } +func (st *state) ShouldSkipGrants() bool { + return st.shouldSkipGrants +} + +func (st *state) SetShouldSkipGrants() { + st.shouldSkipGrants = true +} + // PageToken returns the page token for the current action. func (st *state) PageToken(ctx context.Context) string { c := st.Current() @@ -356,6 +390,11 @@ func (st *state) EntitlementGraph(ctx context.Context) *expand.EntitlementGraph return st.entitlementGraph } +// ClearEntitlementGraph clears the entitlement graph. This is meant to make the final sync token less confusing. +func (st *state) ClearEntitlementGraph(ctx context.Context) { + st.entitlementGraph = nil +} + func (st *state) ParentResourceID(ctx context.Context) string { c := st.Current() if c == nil { @@ -373,3 +412,9 @@ func (st *state) ParentResourceTypeID(ctx context.Context) string { return c.ParentResourceTypeID } + +func (st *state) GetCompletedActionsCount() uint64 { + st.mtx.RLock() + defer st.mtx.RUnlock() + return st.completedActionsCount +} diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/sync/syncer.go b/vendor/github.com/conductorone/baton-sdk/pkg/sync/syncer.go index d258f0dd..110efa35 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/sync/syncer.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/sync/syncer.go @@ -7,12 +7,14 @@ import ( "errors" "fmt" "io" + "iter" "os" "slices" "strconv" "strings" "time" + "github.com/Masterminds/semver/v3" "github.com/conductorone/baton-sdk/pkg/bid" "github.com/conductorone/baton-sdk/pkg/dotc1z" "github.com/conductorone/baton-sdk/pkg/retry" @@ -20,6 +22,7 @@ import ( "github.com/conductorone/baton-sdk/pkg/types/entitlement" batonGrant "github.com/conductorone/baton-sdk/pkg/types/grant" "github.com/conductorone/baton-sdk/pkg/types/resource" + "github.com/conductorone/baton-sdk/pkg/types/sessions" mapset "github.com/deckarep/golang-set/v2" "github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/ctxzap" "go.opentelemetry.io/otel" @@ -40,9 +43,6 @@ import ( var tracer = otel.Tracer("baton-sdk/sync") -const defaultMaxDepth int64 = 20 - -var maxDepth, _ = strconv.ParseInt(os.Getenv("BATON_GRAPH_EXPAND_MAX_DEPTH"), 10, 64) var dontFixCycles, _ = strconv.ParseBool(os.Getenv("BATON_DONT_FIX_CYCLES")) var ErrSyncNotComplete = fmt.Errorf("sync exited without finishing") @@ -207,15 +207,19 @@ type syncer struct { skipFullSync bool lastCheckPointTime time.Time counts *ProgressCounts - targetedSyncResourceIDs []string + targetedSyncResources []*v2.Resource onlyExpandGrants bool dontExpandGrants bool syncID string skipEGForResourceType map[string]bool + skipEntitlementsForResourceType map[string]bool skipEntitlementsAndGrants bool + skipGrants bool resourceTypeTraits map[string][]v2.ResourceType_Trait syncType connectorstore.SyncType injectSyncIDAnnotation bool + setSessionStore sessions.SetSessionStore + syncResourceTypes []string } const minCheckpointInterval = 10 * time.Second @@ -270,9 +274,9 @@ func isWarning(ctx context.Context, err error) bool { func (s *syncer) startOrResumeSync(ctx context.Context) (string, bool, error) { // Sync resuming logic: // If we know our sync ID, set it as the current sync and return (resuming that sync). - // If targetedSyncResourceIDs is not set, find the most recent unfinished sync of our desired sync type & resume it (regardless of partial or full). + // If targetedSyncResources is not set, find the most recent unfinished sync of our desired sync type & resume it (regardless of partial or full). // If there are no unfinished syncs of our desired sync type, start a new sync. - // If targetedSyncResourceIDs is set, start a new partial sync. Use the most recent completed sync as the parent sync ID (if it exists). + // If targetedSyncResources is set, start a new partial sync. Use the most recent completed sync as the parent sync ID (if it exists). if s.syncID != "" { err := s.store.SetCurrentSync(ctx, s.syncID) @@ -285,7 +289,7 @@ func (s *syncer) startOrResumeSync(ctx context.Context) (string, bool, error) { var syncID string var newSync bool var err error - if len(s.targetedSyncResourceIDs) == 0 { + if len(s.targetedSyncResources) == 0 { syncID, newSync, err = s.store.StartOrResumeSync(ctx, s.syncType, "") if err != nil { return "", false, err @@ -294,16 +298,16 @@ func (s *syncer) startOrResumeSync(ctx context.Context) (string, bool, error) { } // Get most recent completed full sync if it exists - latestFullSyncResponse, err := s.store.GetLatestFinishedSync(ctx, &reader_v2.SyncsReaderServiceGetLatestFinishedSyncRequest{ + latestFullSyncResponse, err := s.store.GetLatestFinishedSync(ctx, reader_v2.SyncsReaderServiceGetLatestFinishedSyncRequest_builder{ SyncType: string(connectorstore.SyncTypeFull), - }) + }.Build()) if err != nil { return "", false, err } var latestFullSyncId string - latestFullSync := latestFullSyncResponse.Sync + latestFullSync := latestFullSyncResponse.GetSync() if latestFullSync != nil { - latestFullSyncId = latestFullSync.Id + latestFullSyncId = latestFullSync.GetId() } syncID, err = s.store.StartNewSync(ctx, connectorstore.SyncTypePartial, latestFullSyncId) if err != nil { @@ -314,6 +318,13 @@ func (s *syncer) startOrResumeSync(ctx context.Context) (string, bool, error) { return syncID, newSync, nil } +func (s *syncer) getActiveSyncID() string { + if s.injectSyncIDAnnotation { + return s.syncID + } + return "" +} + // Sync starts the syncing process. The sync process is driven by the action stack that is part of the state object. // For each page of data that is required to be fetched from the connector, a new action is pushed on to the stack. Once // an action is completed, it is popped off of the queue. Before processing each action, we checkpoint the state object @@ -341,18 +352,41 @@ func (s *syncer) Sync(ctx context.Context) error { if err != nil { return err } - _, err = s.connector.Validate(ctx, &v2.ConnectorServiceValidateRequest{}) + + resp, err := s.connector.Validate(ctx, &v2.ConnectorServiceValidateRequest{}) if err != nil { return err } + if resp.GetSdkVersion() != "" { + sdkVersion, err := semver.NewVersion(resp.GetSdkVersion()) + if err != nil { + l.Warn("error parsing sdk version", zap.String("sdk_version", resp.GetSdkVersion()), zap.Error(err)) + } else { + supportsActiveSyncId, err := semver.NewConstraint(">= 0.4.3") + if err != nil { + return fmt.Errorf("error parsing sdk version %s: %w", resp.GetSdkVersion(), err) + } + s.injectSyncIDAnnotation = supportsActiveSyncId.Check(sdkVersion) + } + } + + syncResourceTypeMap := make(map[string]bool) + if len(s.syncResourceTypes) > 0 { + for _, rt := range s.syncResourceTypes { + syncResourceTypeMap[rt] = true + } + } + // Validate any targeted resource IDs before starting a sync. targetedResources := []*v2.Resource{} - for _, resourceID := range s.targetedSyncResourceIDs { - r, err := bid.ParseResourceBid(resourceID) - if err != nil { - return fmt.Errorf("error parsing resource id %s: %w", resourceID, err) + for _, r := range s.targetedSyncResources { + if len(s.syncResourceTypes) > 0 { + if _, ok := syncResourceTypeMap[r.GetId().GetResourceType()]; !ok { + continue + } } + targetedResources = append(targetedResources, r) } @@ -368,19 +402,6 @@ func (s *syncer) Sync(ctx context.Context) error { l.Error("no syncID found after starting or resuming sync", zap.Error(err)) return err } - if s.injectSyncIDAnnotation { - if wrapper, ok := s.connector.(*syncIDClientWrapper); ok { - wrapper.syncID = syncID - } else { - l.Error("connector is not a syncIDClientWrapper") - return errors.New("connector is not a syncIDClientWrapper") - } - } - - // Add ActiveSync to context once after we have the syncID - if syncID != "" { - ctx = types.SetSyncIDInContext(ctx, syncID) - } span.SetAttributes(attribute.String("sync_id", syncID)) @@ -401,6 +422,39 @@ func (s *syncer) Sync(ctx context.Context) error { return err } s.state = state + if !newSync { + currentAction := s.state.Current() + currentActionOp := "" + currentActionPageToken := "" + currentActionResourceID := "" + currentActionResourceTypeID := "" + if currentAction != nil { + currentActionOp = currentAction.Op.String() + currentActionPageToken = currentAction.PageToken + currentActionResourceID = currentAction.ResourceID + currentActionResourceTypeID = currentAction.ResourceTypeID + } + entitlementGraph := s.state.EntitlementGraph(ctx) + l.Info("resumed previous sync", + zap.String("sync_id", syncID), + zap.String("sync_type", string(s.syncType)), + zap.String("current_action_op", currentActionOp), + zap.String("current_action_resource_id", currentActionResourceID), + zap.String("current_action_resource_type_id", currentActionResourceTypeID), + zap.String("current_action_page_token", currentActionPageToken), + zap.Bool("needs_expansion", s.state.NeedsExpansion()), + zap.Bool("has_external_resources_grants", s.state.HasExternalResourcesGrants()), + zap.Bool("should_fetch_related_resources", s.state.ShouldFetchRelatedResources()), + zap.Bool("should_skip_entitlements_and_grants", s.state.ShouldSkipEntitlementsAndGrants()), + zap.Bool("should_skip_grants", s.state.ShouldSkipGrants()), + zap.Bool("graph_loaded", entitlementGraph.Loaded), + zap.Bool("graph_has_no_cycles", entitlementGraph.HasNoCycles), + zap.Int("graph_depth", entitlementGraph.Depth), + zap.Int("graph_actions", len(entitlementGraph.Actions)), + zap.Int("graph_edges", len(entitlementGraph.Edges)), + zap.Int("graph_nodes", len(entitlementGraph.Nodes)), + ) + } retryer := retry.NewRetryer(ctx, retry.RetryConfig{ MaxAttempts: 0, @@ -415,17 +469,25 @@ func (s *syncer) Sync(ctx context.Context) error { return err } - // TODO: count actions divided by warnings and error if warning percentage is too high + // If we have more than 10 warnings and more than 10% of actions ended in a warning, exit the sync. if len(warnings) > 10 { - return fmt.Errorf("too many warnings, exiting sync. warnings: %v", warnings) + completedActionsCount := s.state.GetCompletedActionsCount() + if completedActionsCount > 0 && float64(len(warnings))/float64(completedActionsCount) > 0.1 { + return fmt.Errorf("too many warnings, exiting sync. warnings: %v completed actions: %d", warnings, completedActionsCount) + } } select { case <-runCtx.Done(): err = context.Cause(runCtx) switch { case errors.Is(err, context.DeadlineExceeded): - l.Debug("sync run duration has expired, exiting sync early", zap.String("sync_id", syncID)) - return ErrSyncNotComplete + l.Info("sync run duration has expired, exiting sync early", zap.String("sync_id", syncID)) + // It would be nice to remove this once we're more confident in the checkpointing logic. + checkpointErr := s.Checkpoint(ctx, true) + if checkpointErr != nil { + l.Error("error checkpointing before exiting sync", zap.Error(checkpointErr)) + } + return errors.Join(checkpointErr, ErrSyncNotComplete) default: l.Error("sync context cancelled", zap.String("sync_id", syncID), zap.Error(err)) return err @@ -442,6 +504,9 @@ func (s *syncer) Sync(ctx context.Context) error { if s.skipEntitlementsAndGrants { s.state.SetShouldSkipEntitlementsAndGrants() } + if s.skipGrants { + s.state.SetShouldSkipGrants() + } if len(targetedResources) > 0 { for _, r := range targetedResources { s.state.PushAction(ctx, Action{ @@ -479,8 +544,13 @@ func (s *syncer) Sync(ctx context.Context) error { continue } if !s.state.ShouldSkipEntitlementsAndGrants() { - s.state.PushAction(ctx, Action{Op: SyncGrantsOp}) + if !s.state.ShouldSkipGrants() { + s.state.PushAction(ctx, Action{Op: SyncGrantsOp}) + } + s.state.PushAction(ctx, Action{Op: SyncEntitlementsOp}) + + s.state.PushAction(ctx, Action{Op: SyncStaticEntitlementsOp}) } s.state.PushAction(ctx, Action{Op: SyncResourcesOp}) s.state.PushAction(ctx, Action{Op: SyncResourceTypesOp}) @@ -518,6 +588,18 @@ func (s *syncer) Sync(ctx context.Context) error { } continue + case SyncStaticEntitlementsOp: + err = s.SyncStaticEntitlements(ctx) + if isWarning(ctx, err) { + l.Warn("skipping sync static entitlements action", zap.Any("stateAction", stateAction), zap.Error(err)) + warnings = append(warnings, err) + s.state.FinishAction(ctx) + continue + } + if !retryer.ShouldWaitAndRetry(ctx, err) { + return err + } + continue case SyncEntitlementsOp: err = s.SyncEntitlements(ctx) if isWarning(ctx, err) { @@ -574,7 +656,9 @@ func (s *syncer) Sync(ctx context.Context) error { } } - // Force a checkpoint to clear sync_token. + // Force a checkpoint to clear completed actions & entitlement graph in sync_token. + s.state.ClearEntitlementGraph(ctx) + err = s.Checkpoint(ctx, true) if err != nil { return err @@ -592,9 +676,9 @@ func (s *syncer) Sync(ctx context.Context) error { return err } - _, err = s.connector.Cleanup(ctx, &v2.ConnectorServiceCleanupRequest{ - Annotations: annotations.New(&v2.ActiveSync{Id: s.syncID}), - }) + _, err = s.connector.Cleanup(ctx, v2.ConnectorServiceCleanupRequest_builder{ + ActiveSyncId: s.getActiveSyncID(), + }.Build()) if err != nil { l.Error("error clearing connector caches", zap.Error(err)) } @@ -649,6 +733,29 @@ func (s *syncer) SkipSync(ctx context.Context) error { return nil } +func (s *syncer) listAllResourceTypes(ctx context.Context) iter.Seq2[[]*v2.ResourceType, error] { + return func(yield func([]*v2.ResourceType, error) bool) { + pageToken := "" + for { + resp, err := s.connector.ListResourceTypes(ctx, v2.ResourceTypesServiceListResourceTypesRequest_builder{PageToken: pageToken}.Build()) + if err != nil { + _ = yield(nil, err) + return + } + resourceTypes := resp.GetList() + if len(resourceTypes) > 0 { + if !yield(resourceTypes, err) { + return + } + } + pageToken = resp.GetNextPageToken() + if pageToken == "" { + return + } + } + } +} + // SyncResourceTypes calls the ListResourceType() connector endpoint and persists the results in to the datasource. func (s *syncer) SyncResourceTypes(ctx context.Context) error { ctx, span := tracer.Start(ctx, "syncer.SyncResourceTypes") @@ -666,29 +773,56 @@ func (s *syncer) SyncResourceTypes(ctx context.Context) error { return err } - resp, err := s.connector.ListResourceTypes(ctx, &v2.ResourceTypesServiceListResourceTypesRequest{ - PageToken: pageToken, - Annotations: annotations.New(&v2.ActiveSync{Id: s.syncID}), - }) + resp, err := s.connector.ListResourceTypes(ctx, v2.ResourceTypesServiceListResourceTypesRequest_builder{ + PageToken: pageToken, + ActiveSyncId: s.getActiveSyncID(), + }.Build()) if err != nil { return err } - err = s.store.PutResourceTypes(ctx, resp.List...) + var resourceTypes []*v2.ResourceType + if len(s.syncResourceTypes) > 0 { + syncResourceTypeMap := make(map[string]bool) + for _, rt := range s.syncResourceTypes { + syncResourceTypeMap[rt] = true + } + for _, rt := range resp.GetList() { + if shouldSync := syncResourceTypeMap[rt.GetId()]; shouldSync { + resourceTypes = append(resourceTypes, rt) + } + } + } else { + resourceTypes = resp.GetList() + } + + err = s.store.PutResourceTypes(ctx, resourceTypes...) if err != nil { return err } - s.counts.ResourceTypes += len(resp.List) - s.handleProgress(ctx, s.state.Current(), len(resp.List)) + s.counts.ResourceTypes += len(resourceTypes) + s.handleProgress(ctx, s.state.Current(), len(resourceTypes)) - if resp.NextPageToken == "" { + if resp.GetNextPageToken() == "" { s.counts.LogResourceTypesProgress(ctx) + + if len(s.syncResourceTypes) > 0 { + validResourceTypesResp, err := s.store.ListResourceTypes(ctx, v2.ResourceTypesServiceListResourceTypesRequest_builder{PageToken: pageToken}.Build()) + if err != nil { + return err + } + err = validateSyncResourceTypesFilter(s.syncResourceTypes, validResourceTypesResp.GetList()) + if err != nil { + return err + } + } + s.state.FinishAction(ctx) return nil } - err = s.state.NextPage(ctx, resp.NextPageToken) + err = s.state.NextPage(ctx, resp.GetNextPageToken()) if err != nil { return err } @@ -696,24 +830,46 @@ func (s *syncer) SyncResourceTypes(ctx context.Context) error { return nil } +func validateSyncResourceTypesFilter(resourceTypesFilter []string, validResourceTypes []*v2.ResourceType) error { + validResourceTypesMap := make(map[string]bool) + for _, rt := range validResourceTypes { + validResourceTypesMap[rt.GetId()] = true + } + for _, rt := range resourceTypesFilter { + if _, ok := validResourceTypesMap[rt]; !ok { + return fmt.Errorf("invalid resource type '%s' in filter", rt) + } + } + return nil +} + // getSubResources fetches the sub resource types from a resources' annotations. func (s *syncer) getSubResources(ctx context.Context, parent *v2.Resource) error { ctx, span := tracer.Start(ctx, "syncer.getSubResources") defer span.End() - for _, a := range parent.Annotations { + syncResourceTypeMap := make(map[string]bool) + for _, rt := range s.syncResourceTypes { + syncResourceTypeMap[rt] = true + } + + for _, a := range parent.GetAnnotations() { if a.MessageIs((*v2.ChildResourceType)(nil)) { crt := &v2.ChildResourceType{} err := a.UnmarshalTo(crt) if err != nil { return err } - + if len(s.syncResourceTypes) > 0 { + if shouldSync := syncResourceTypeMap[crt.GetResourceTypeId()]; !shouldSync { + continue + } + } childAction := Action{ Op: SyncResourcesOp, - ResourceTypeID: crt.ResourceTypeId, - ParentResourceID: parent.Id.Resource, - ParentResourceTypeID: parent.Id.ResourceType, + ResourceTypeID: crt.GetResourceTypeId(), + ParentResourceID: parent.GetId().GetResource(), + ParentResourceTypeID: parent.GetId().GetResourceType(), } s.state.PushAction(ctx, childAction) } @@ -727,14 +883,14 @@ func (s *syncer) getResourceFromConnector(ctx context.Context, resourceID *v2.Re defer span.End() resourceResp, err := s.connector.GetResource(ctx, - &v2.ResourceGetterServiceGetResourceRequest{ + v2.ResourceGetterServiceGetResourceRequest_builder{ ResourceId: resourceID, ParentResourceId: parentResourceID, - Annotations: annotations.New(&v2.ActiveSync{Id: s.syncID}), - }, + ActiveSyncId: s.getActiveSyncID(), + }.Build(), ) if err == nil { - return resourceResp.Resource, nil + return resourceResp.GetResource(), nil } l := ctxzap.Extract(ctx) if status.Code(err) == codes.NotFound { @@ -762,16 +918,16 @@ func (s *syncer) SyncTargetedResource(ctx context.Context) error { parentResourceTypeID := s.state.ParentResourceTypeID(ctx) var prID *v2.ResourceId if parentResourceID != "" && parentResourceTypeID != "" { - prID = &v2.ResourceId{ + prID = v2.ResourceId_builder{ ResourceType: parentResourceTypeID, Resource: parentResourceID, - } + }.Build() } - resource, err := s.getResourceFromConnector(ctx, &v2.ResourceId{ + resource, err := s.getResourceFromConnector(ctx, v2.ResourceId_builder{ ResourceType: resourceTypeID, Resource: resourceID, - }, prID) + }.Build(), prID) if err != nil { return err } @@ -803,7 +959,7 @@ func (s *syncer) SyncTargetedResource(ctx context.Context) error { }) } - shouldSkipEnts, err := s.shouldSkipEntitlementsAndGrants(ctx, resource) + shouldSkipEnts, err := s.shouldSkipEntitlements(ctx, resource) if err != nil { return err } @@ -838,13 +994,13 @@ func (s *syncer) SyncResources(ctx context.Context) error { s.handleInitialActionForStep(ctx, *s.state.Current()) } - resp, err := s.store.ListResourceTypes(ctx, &v2.ResourceTypesServiceListResourceTypesRequest{PageToken: pageToken}) + resp, err := s.store.ListResourceTypes(ctx, v2.ResourceTypesServiceListResourceTypesRequest_builder{PageToken: pageToken}.Build()) if err != nil { return err } - if resp.NextPageToken != "" { - err = s.state.NextPage(ctx, resp.NextPageToken) + if resp.GetNextPageToken() != "" { + err = s.state.NextPage(ctx, resp.GetNextPageToken()) if err != nil { return err } @@ -852,8 +1008,8 @@ func (s *syncer) SyncResources(ctx context.Context) error { s.state.FinishAction(ctx) } - for _, rt := range resp.List { - action := Action{Op: SyncResourcesOp, ResourceTypeID: rt.Id} + for _, rt := range resp.GetList() { + action := Action{Op: SyncResourcesOp, ResourceTypeID: rt.GetId()} // If this request specified a parent resource, only queue up syncing resources for children of the parent resource if s.state.Current() != nil && s.state.Current().ParentResourceTypeID != "" && s.state.Current().ParentResourceID != "" { action.ParentResourceID = s.state.Current().ParentResourceID @@ -874,16 +1030,16 @@ func (s *syncer) syncResources(ctx context.Context) error { ctx, span := tracer.Start(ctx, "syncer.syncResources") defer span.End() - req := &v2.ResourcesServiceListResourcesRequest{ + req := v2.ResourcesServiceListResourcesRequest_builder{ ResourceTypeId: s.state.ResourceTypeID(ctx), PageToken: s.state.PageToken(ctx), - Annotations: annotations.New(&v2.ActiveSync{Id: s.syncID}), - } + ActiveSyncId: s.getActiveSyncID(), + }.Build() if s.state.ParentResourceTypeID(ctx) != "" && s.state.ParentResourceID(ctx) != "" { - req.ParentResourceId = &v2.ResourceId{ + req.SetParentResourceId(v2.ResourceId_builder{ ResourceType: s.state.ParentResourceTypeID(ctx), Resource: s.state.ParentResourceID(ctx), - } + }.Build()) } resp, err := s.connector.ListResources(ctx, req) @@ -891,27 +1047,27 @@ func (s *syncer) syncResources(ctx context.Context) error { return err } - s.handleProgress(ctx, s.state.Current(), len(resp.List)) + s.handleProgress(ctx, s.state.Current(), len(resp.GetList())) resourceTypeId := s.state.ResourceTypeID(ctx) - s.counts.Resources[resourceTypeId] += len(resp.List) + s.counts.Resources[resourceTypeId] += len(resp.GetList()) - if resp.NextPageToken == "" { + if resp.GetNextPageToken() == "" { s.counts.LogResourcesProgress(ctx, resourceTypeId) s.state.FinishAction(ctx) } else { - err = s.state.NextPage(ctx, resp.NextPageToken) + err = s.state.NextPage(ctx, resp.GetNextPageToken()) if err != nil { return err } } bulkPutResoruces := []*v2.Resource{} - for _, r := range resp.List { + for _, r := range resp.GetList() { // Check if we've already synced this resource, skip it if we have - _, err = s.store.GetResource(ctx, &reader_v2.ResourcesReaderServiceGetResourceRequest{ - ResourceId: &v2.ResourceId{ResourceType: r.Id.ResourceType, Resource: r.Id.Resource}, - }) + _, err = s.store.GetResource(ctx, reader_v2.ResourcesReaderServiceGetResourceRequest_builder{ + ResourceId: v2.ResourceId_builder{ResourceType: r.GetId().GetResourceType(), Resource: r.GetId().GetResource()}.Build(), + }.Build()) if err == nil { continue } @@ -926,7 +1082,7 @@ func (s *syncer) syncResources(ctx context.Context) error { } // Set the resource creation source - r.CreationSource = v2.Resource_CREATION_SOURCE_CONNECTOR_LIST_RESOURCES + r.SetCreationSource(v2.Resource_CREATION_SOURCE_CONNECTOR_LIST_RESOURCES) bulkPutResoruces = append(bulkPutResoruces, r) @@ -950,16 +1106,16 @@ func (s *syncer) validateResourceTraits(ctx context.Context, r *v2.Resource) err ctx, span := tracer.Start(ctx, "syncer.validateResourceTraits") defer span.End() - resourceTypeTraits, ok := s.resourceTypeTraits[r.Id.ResourceType] + resourceTypeTraits, ok := s.resourceTypeTraits[r.GetId().GetResourceType()] if !ok { - resourceTypeResponse, err := s.store.GetResourceType(ctx, &reader_v2.ResourceTypesReaderServiceGetResourceTypeRequest{ - ResourceTypeId: r.Id.ResourceType, - }) + resourceTypeResponse, err := s.store.GetResourceType(ctx, reader_v2.ResourceTypesReaderServiceGetResourceTypeRequest_builder{ + ResourceTypeId: r.GetId().GetResourceType(), + }.Build()) if err != nil { return err } - resourceTypeTraits = resourceTypeResponse.ResourceType.Traits - s.resourceTypeTraits[r.Id.ResourceType] = resourceTypeTraits + resourceTypeTraits = resourceTypeResponse.GetResourceType().GetTraits() + s.resourceTypeTraits[r.GetId().GetResourceType()] = resourceTypeTraits } for _, t := range resourceTypeTraits { @@ -979,13 +1135,13 @@ func (s *syncer) validateResourceTraits(ctx context.Context, r *v2.Resource) err } if trait != nil { - annos := annotations.Annotations(r.Annotations) + annos := annotations.Annotations(r.GetAnnotations()) if !annos.Contains(trait) { ctxzap.Extract(ctx).Error( "resource was missing expected trait", zap.String("trait", string(trait.ProtoReflect().Descriptor().Name())), - zap.String("resource_type_id", r.Id.ResourceType), - zap.String("resource_id", r.Id.Resource), + zap.String("resource_type_id", r.GetId().GetResourceType()), + zap.String("resource_id", r.GetId().GetResource()), ) return fmt.Errorf("resource was missing expected trait %s", trait.ProtoReflect().Descriptor().Name()) } @@ -1011,21 +1167,21 @@ func (s *syncer) shouldSkipEntitlementsAndGrants(ctx context.Context, r *v2.Reso } // We've checked this resource type, so we can return what we have cached directly. - if skip, ok := s.skipEGForResourceType[r.Id.ResourceType]; ok { + if skip, ok := s.skipEGForResourceType[r.GetId().GetResourceType()]; ok { return skip, nil } - rt, err := s.store.GetResourceType(ctx, &reader_v2.ResourceTypesReaderServiceGetResourceTypeRequest{ - ResourceTypeId: r.Id.ResourceType, - }) + rt, err := s.store.GetResourceType(ctx, reader_v2.ResourceTypesReaderServiceGetResourceTypeRequest_builder{ + ResourceTypeId: r.GetId().GetResourceType(), + }.Build()) if err != nil { return false, err } - rtAnnos := annotations.Annotations(rt.ResourceType.Annotations) + rtAnnos := annotations.Annotations(rt.GetResourceType().GetAnnotations()) skipEntitlements := rtAnnos.Contains(&v2.SkipEntitlementsAndGrants{}) - s.skipEGForResourceType[r.Id.ResourceType] = skipEntitlements + s.skipEGForResourceType[r.GetId().GetResourceType()] = skipEntitlements return skipEntitlements, nil } @@ -1036,9 +1192,50 @@ func (s *syncer) shouldSkipGrants(ctx context.Context, r *v2.Resource) (bool, er return true, nil } + if s.state.ShouldSkipGrants() { + return true, nil + } + return s.shouldSkipEntitlementsAndGrants(ctx, r) } +func (s *syncer) shouldSkipEntitlements(ctx context.Context, r *v2.Resource) (bool, error) { + ctx, span := tracer.Start(ctx, "syncer.shouldSkipEntitlements") + defer span.End() + + ok, err := s.shouldSkipEntitlementsAndGrants(ctx, r) + if err != nil { + return false, err + } + + if ok { + return true, nil + } + + rAnnos := annotations.Annotations(r.GetAnnotations()) + if rAnnos.Contains(&v2.SkipEntitlements{}) || rAnnos.Contains(&v2.SkipEntitlementsAndGrants{}) { + return true, nil + } + + if skip, ok := s.skipEntitlementsForResourceType[r.GetId().GetResourceType()]; ok { + return skip, nil + } + + rt, err := s.store.GetResourceType(ctx, reader_v2.ResourceTypesReaderServiceGetResourceTypeRequest_builder{ + ResourceTypeId: r.GetId().GetResourceType(), + }.Build()) + if err != nil { + return false, err + } + + rtAnnos := annotations.Annotations(rt.GetResourceType().GetAnnotations()) + + skipEntitlements := rtAnnos.Contains(&v2.SkipEntitlements{}) || rtAnnos.Contains(&v2.SkipEntitlementsAndGrants{}) + s.skipEntitlementsForResourceType[r.GetId().GetResourceType()] = skipEntitlements + + return skipEntitlements, nil +} + // SyncEntitlements fetches the entitlements from the connector. It first lists each resource from the datastore, // and pushes an action to fetch the entitlements for each resource. func (s *syncer) SyncEntitlements(ctx context.Context) error { @@ -1053,14 +1250,14 @@ func (s *syncer) SyncEntitlements(ctx context.Context) error { s.handleInitialActionForStep(ctx, *s.state.Current()) } - resp, err := s.store.ListResources(ctx, &v2.ResourcesServiceListResourcesRequest{PageToken: pageToken}) + resp, err := s.store.ListResources(ctx, v2.ResourcesServiceListResourcesRequest_builder{PageToken: pageToken}.Build()) if err != nil { return err } // We want to take action on the next page before we push any new actions - if resp.NextPageToken != "" { - err = s.state.NextPage(ctx, resp.NextPageToken) + if resp.GetNextPageToken() != "" { + err = s.state.NextPage(ctx, resp.GetNextPageToken()) if err != nil { return err } @@ -1068,24 +1265,24 @@ func (s *syncer) SyncEntitlements(ctx context.Context) error { s.state.FinishAction(ctx) } - for _, r := range resp.List { - shouldSkipEntitlements, err := s.shouldSkipEntitlementsAndGrants(ctx, r) + for _, r := range resp.GetList() { + shouldSkipEntitlements, err := s.shouldSkipEntitlements(ctx, r) if err != nil { return err } if shouldSkipEntitlements { continue } - s.state.PushAction(ctx, Action{Op: SyncEntitlementsOp, ResourceID: r.Id.Resource, ResourceTypeID: r.Id.ResourceType}) + s.state.PushAction(ctx, Action{Op: SyncEntitlementsOp, ResourceID: r.GetId().GetResource(), ResourceTypeID: r.GetId().GetResourceType()}) } return nil } - err := s.syncEntitlementsForResource(ctx, &v2.ResourceId{ + err := s.syncEntitlementsForResource(ctx, v2.ResourceId_builder{ ResourceType: s.state.ResourceTypeID(ctx), Resource: s.state.ResourceID(ctx), - }) + }.Build()) if err != nil { return err } @@ -1098,41 +1295,144 @@ func (s *syncer) syncEntitlementsForResource(ctx context.Context, resourceID *v2 ctx, span := tracer.Start(ctx, "syncer.syncEntitlementsForResource") defer span.End() - resourceResponse, err := s.store.GetResource(ctx, &reader_v2.ResourcesReaderServiceGetResourceRequest{ + resourceResponse, err := s.store.GetResource(ctx, reader_v2.ResourcesReaderServiceGetResourceRequest_builder{ ResourceId: resourceID, - }) + }.Build()) if err != nil { return err } pageToken := s.state.PageToken(ctx) - resource := resourceResponse.Resource + resource := resourceResponse.GetResource() - resp, err := s.connector.ListEntitlements(ctx, &v2.EntitlementsServiceListEntitlementsRequest{ - Resource: resource, - PageToken: pageToken, - Annotations: annotations.New(&v2.ActiveSync{Id: s.syncID}), - }) + resp, err := s.connector.ListEntitlements(ctx, v2.EntitlementsServiceListEntitlementsRequest_builder{ + Resource: resource, + PageToken: pageToken, + ActiveSyncId: s.getActiveSyncID(), + }.Build()) if err != nil { return err } - err = s.store.PutEntitlements(ctx, resp.List...) + err = s.store.PutEntitlements(ctx, resp.GetList()...) if err != nil { return err } - s.handleProgress(ctx, s.state.Current(), len(resp.List)) + s.handleProgress(ctx, s.state.Current(), len(resp.GetList())) - if resp.NextPageToken != "" { - err = s.state.NextPage(ctx, resp.NextPageToken) + if resp.GetNextPageToken() != "" { + err = s.state.NextPage(ctx, resp.GetNextPageToken()) if err != nil { return err } } else { - s.counts.EntitlementsProgress[resourceID.ResourceType] += 1 - s.counts.LogEntitlementsProgress(ctx, resourceID.ResourceType) + s.counts.EntitlementsProgress[resourceID.GetResourceType()] += 1 + s.counts.LogEntitlementsProgress(ctx, resourceID.GetResourceType()) + + s.state.FinishAction(ctx) + } + + return nil +} + +func (s *syncer) SyncStaticEntitlements(ctx context.Context) error { + ctx, span := tracer.Start(ctx, "syncer.SyncStaticEntitlements") + defer span.End() + + if s.state.ResourceTypeID(ctx) != "" { + return s.syncStaticEntitlementsForResourceType(ctx, s.state.ResourceTypeID(ctx)) + } + + ctxzap.Extract(ctx).Info("Syncing static entitlements...") + s.handleInitialActionForStep(ctx, *s.state.Current()) + + s.state.FinishAction(ctx) + for rts, err := range s.listAllResourceTypes(ctx) { + if err != nil { + return err + } + for _, rt := range rts { + // Queue up actions to sync static entitlements for each resource type + s.state.PushAction(ctx, Action{Op: SyncStaticEntitlementsOp, ResourceTypeID: rt.GetId()}) + } + } + + return nil +} + +func (s *syncer) syncStaticEntitlementsForResourceType(ctx context.Context, resourceTypeID string) error { + ctx, span := tracer.Start(ctx, "syncer.syncStaticEntitlementsForResource") + defer span.End() + + resp, err := s.connector.ListStaticEntitlements(ctx, v2.EntitlementsServiceListStaticEntitlementsRequest_builder{ + ResourceTypeId: resourceTypeID, + PageToken: s.state.PageToken(ctx), + ActiveSyncId: s.getActiveSyncID(), + }.Build()) + if err != nil { + // Ignore prefixError if we're calling a lambda with an old version of baton-sdk. + if strings.Contains(err.Error(), `unable to resolve \"type.googleapis.com/c1.connector.v2.EntitlementsServiceListStaticEntitlementsRequest\": \"not found\"","errorType":"prefixError"`) { + l := ctxzap.Extract(ctx) + l.Info("ignoring prefixError when calling ListStaticEntitlements", zap.Error(err)) + s.state.FinishAction(ctx) + return nil + } + + return err + } + + for _, ent := range resp.GetList() { + resourcePageToken := "" + for { + // Get all resources of resource type and create entitlements for each one. + resourcesResp, err := s.store.ListResources(ctx, v2.ResourcesServiceListResourcesRequest_builder{ + ResourceTypeId: resourceTypeID, + PageToken: resourcePageToken, + ActiveSyncId: s.getActiveSyncID(), + }.Build()) + if err != nil { + return err + } + entitlements := []*v2.Entitlement{} + for _, resource := range resourcesResp.GetList() { + displayName := ent.GetDisplayName() + if displayName == "" { + displayName = resource.GetDisplayName() + } + description := ent.GetDescription() + if description == "" { + description = resource.GetDescription() + } + + entitlements = append(entitlements, &v2.Entitlement{ + Resource: resource, + Id: entitlement.NewEntitlementID(resource, ent.GetSlug()), + DisplayName: displayName, + Description: description, + GrantableTo: ent.GetGrantableTo(), + Annotations: ent.GetAnnotations(), + }) + } + err = s.store.PutEntitlements(ctx, entitlements...) + if err != nil { + return err + } + resourcePageToken = resourcesResp.GetNextPageToken() + if resourcePageToken == "" { + break + } + } + } + s.handleProgress(ctx, s.state.Current(), len(resp.GetList())) + + if resp.GetNextPageToken() != "" { + err = s.state.NextPage(ctx, resp.GetNextPageToken()) + if err != nil { + return err + } + } else { s.state.FinishAction(ctx) } @@ -1147,16 +1447,16 @@ func (s *syncer) syncAssetsForResource(ctx context.Context, resourceID *v2.Resou defer span.End() l := ctxzap.Extract(ctx) - resourceResponse, err := s.store.GetResource(ctx, &reader_v2.ResourcesReaderServiceGetResourceRequest{ + resourceResponse, err := s.store.GetResource(ctx, reader_v2.ResourcesReaderServiceGetResourceRequest_builder{ ResourceId: resourceID, - }) + }.Build()) if err != nil { return err } var assetRefs []*v2.AssetRef - rAnnos := annotations.Annotations(resourceResponse.Resource.Annotations) + rAnnos := annotations.Annotations(resourceResponse.GetResource().GetAnnotations()) userTrait := &v2.UserTrait{} ok, err := rAnnos.Pick(userTrait) @@ -1164,7 +1464,7 @@ func (s *syncer) syncAssetsForResource(ctx context.Context, resourceID *v2.Resou return err } if ok { - assetRefs = append(assetRefs, userTrait.Icon) + assetRefs = append(assetRefs, userTrait.GetIcon()) } grpTrait := &v2.GroupTrait{} @@ -1173,7 +1473,7 @@ func (s *syncer) syncAssetsForResource(ctx context.Context, resourceID *v2.Resou return err } if ok { - assetRefs = append(assetRefs, grpTrait.Icon) + assetRefs = append(assetRefs, grpTrait.GetIcon()) } appTrait := &v2.AppTrait{} @@ -1182,7 +1482,7 @@ func (s *syncer) syncAssetsForResource(ctx context.Context, resourceID *v2.Resou return err } if ok { - assetRefs = append(assetRefs, appTrait.Icon, appTrait.Logo) + assetRefs = append(assetRefs, appTrait.GetIcon(), appTrait.GetLogo()) } for _, assetRef := range assetRefs { @@ -1190,8 +1490,8 @@ func (s *syncer) syncAssetsForResource(ctx context.Context, resourceID *v2.Resou continue } - l.Debug("fetching asset", zap.String("asset_ref_id", assetRef.Id)) - resp, err := s.connector.GetAsset(ctx, &v2.AssetServiceGetAssetRequest{Asset: assetRef}) + l.Debug("fetching asset", zap.String("asset_ref_id", assetRef.GetId())) + resp, err := s.connector.GetAsset(ctx, v2.AssetServiceGetAssetRequest_builder{Asset: assetRef}.Build()) if err != nil { return err } @@ -1219,17 +1519,19 @@ func (s *syncer) syncAssetsForResource(ctx context.Context, resourceID *v2.Resou l.Debug("received asset message") - switch assetMsg := msg.Msg.(type) { - case *v2.AssetServiceGetAssetResponse_Metadata_: - metadata = assetMsg.Metadata - - case *v2.AssetServiceGetAssetResponse_Data_: + switch msg.WhichMsg() { + case v2.AssetServiceGetAssetResponse_Metadata_case: + metadata = msg.GetMetadata() + case v2.AssetServiceGetAssetResponse_Data_case: l.Debug("Received data for asset") - _, err := io.Copy(assetBytes, bytes.NewReader(assetMsg.Data.Data)) + _, err := io.Copy(assetBytes, bytes.NewReader(msg.GetData().GetData())) if err != nil { _ = resp.CloseSend() return err } + case v2.AssetServiceGetAssetResponse_Msg_not_set_case: + l.Debug("Received unset asset message") + continue } } @@ -1237,7 +1539,7 @@ func (s *syncer) syncAssetsForResource(ctx context.Context, resourceID *v2.Resou return fmt.Errorf("no metadata received, unable to store asset") } - err = s.store.PutAsset(ctx, assetRef, metadata.ContentType, assetBytes.Bytes()) + err = s.store.PutAsset(ctx, assetRef, metadata.GetContentType(), assetBytes.Bytes()) if err != nil { return err } @@ -1260,14 +1562,14 @@ func (s *syncer) SyncAssets(ctx context.Context) error { s.handleInitialActionForStep(ctx, *s.state.Current()) } - resp, err := s.store.ListResources(ctx, &v2.ResourcesServiceListResourcesRequest{PageToken: pageToken}) + resp, err := s.store.ListResources(ctx, v2.ResourcesServiceListResourcesRequest_builder{PageToken: pageToken}.Build()) if err != nil { return err } // We want to take action on the next page before we push any new actions - if resp.NextPageToken != "" { - err = s.state.NextPage(ctx, resp.NextPageToken) + if resp.GetNextPageToken() != "" { + err = s.state.NextPage(ctx, resp.GetNextPageToken()) if err != nil { return err } @@ -1275,17 +1577,17 @@ func (s *syncer) SyncAssets(ctx context.Context) error { s.state.FinishAction(ctx) } - for _, r := range resp.List { - s.state.PushAction(ctx, Action{Op: SyncAssetsOp, ResourceID: r.Id.Resource, ResourceTypeID: r.Id.ResourceType}) + for _, r := range resp.GetList() { + s.state.PushAction(ctx, Action{Op: SyncAssetsOp, ResourceID: r.GetId().GetResource(), ResourceTypeID: r.GetId().GetResourceType()}) } return nil } - err := s.syncAssetsForResource(ctx, &v2.ResourceId{ + err := s.syncAssetsForResource(ctx, v2.ResourceId_builder{ ResourceType: s.state.ResourceTypeID(ctx), Resource: s.state.ResourceID(ctx), - }) + }.Build()) if err != nil { ctxzap.Extract(ctx).Error("error syncing assets", zap.Error(err)) return err @@ -1294,127 +1596,32 @@ func (s *syncer) SyncAssets(ctx context.Context) error { return nil } -// SyncGrantExpansion documentation pending. +// SyncGrantExpansion handles the grant expansion phase of sync. +// It first loads the entitlement graph from grants, fixes any cycles, then runs expansion. func (s *syncer) SyncGrantExpansion(ctx context.Context) error { ctx, span := tracer.Start(ctx, "syncer.SyncGrantExpansion") defer span.End() - l := ctxzap.Extract(ctx) entitlementGraph := s.state.EntitlementGraph(ctx) - if !entitlementGraph.Loaded { - pageToken := s.state.PageToken(ctx) - if pageToken == "" { - l.Info("Expanding grants...") - s.handleInitialActionForStep(ctx, *s.state.Current()) - } - resp, err := s.store.ListGrants(ctx, &v2.GrantsServiceListGrantsRequest{PageToken: pageToken}) + // Phase 1: Load the entitlement graph from grants (paginated) + if !entitlementGraph.Loaded { + err := s.loadEntitlementGraph(ctx, entitlementGraph) if err != nil { return err } - - // We want to take action on the next page before we push any new actions - if resp.NextPageToken != "" { - err = s.state.NextPage(ctx, resp.NextPageToken) - if err != nil { - return err - } - } else { - l.Debug("Finished loading grants to expand") - entitlementGraph.Loaded = true - } - - for _, grant := range resp.List { - annos := annotations.Annotations(grant.Annotations) - expandable := &v2.GrantExpandable{} - _, err := annos.Pick(expandable) - if err != nil { - return err - } - if len(expandable.GetEntitlementIds()) == 0 { - continue - } - - principalID := grant.GetPrincipal().GetId() - if principalID == nil { - return fmt.Errorf("principal id was nil") - } - - // FIXME(morgabra) Log and skip some of the error paths here? - for _, srcEntitlementID := range expandable.EntitlementIds { - l.Debug( - "Expandable entitlement found", - zap.String("src_entitlement_id", srcEntitlementID), - zap.String("dst_entitlement_id", grant.GetEntitlement().GetId()), - ) - - srcEntitlement, err := s.store.GetEntitlement(ctx, &reader_v2.EntitlementsReaderServiceGetEntitlementRequest{ - EntitlementId: srcEntitlementID, - }) - if err != nil { - l.Error("error fetching source entitlement", - zap.String("src_entitlement_id", srcEntitlementID), - zap.String("dst_entitlement_id", grant.GetEntitlement().GetId()), - zap.Error(err), - ) - continue - } - - // The expand annotation points at entitlements by id. Those entitlements' resource should match - // the current grant's principal, so we don't allow expanding arbitrary entitlements. - sourceEntitlementResourceID := srcEntitlement.GetEntitlement().GetResource().GetId() - if sourceEntitlementResourceID == nil { - return fmt.Errorf("source entitlement resource id was nil") - } - if principalID.ResourceType != sourceEntitlementResourceID.ResourceType || - principalID.Resource != sourceEntitlementResourceID.Resource { - l.Error( - "source entitlement resource id did not match grant principal id", - zap.String("grant_principal_id", principalID.String()), - zap.String("source_entitlement_resource_id", sourceEntitlementResourceID.String())) - - return fmt.Errorf("source entitlement resource id did not match grant principal id") - } - - entitlementGraph.AddEntitlement(grant.Entitlement) - entitlementGraph.AddEntitlement(srcEntitlement.GetEntitlement()) - err = entitlementGraph.AddEdge(ctx, - srcEntitlement.GetEntitlement().GetId(), - grant.GetEntitlement().GetId(), - expandable.Shallow, - expandable.ResourceTypeIds, - ) - if err != nil { - return fmt.Errorf("error adding edge to graph: %w", err) - } - } - } - if entitlementGraph.Loaded { - l.Info("Finished loading entitlement graph", zap.Int("edges", len(entitlementGraph.Edges))) - } return nil } - if entitlementGraph.Loaded { - comps, sccMetrics := entitlementGraph.ComputeCyclicComponents(ctx) - if len(comps) > 0 { - // Log a sample cycle - l.Warn( - "cycle detected in entitlement graph", - zap.Any("cycle", comps[0]), - zap.Any("scc_metrics", sccMetrics), - ) - l.Debug("initial graph", zap.Any("initial graph", entitlementGraph)) - if dontFixCycles { - return fmt.Errorf("cycles detected in entitlement graph") - } - err := entitlementGraph.FixCyclesFromComponents(ctx, comps) - if err != nil { - return err - } + // Phase 2: Fix cycles in the graph (only runs once after loading completes) + if !entitlementGraph.HasNoCycles { + err := s.fixEntitlementGraphCycles(ctx, entitlementGraph) + if err != nil { + return err } } + // Phase 3: Run the expansion algorithm err := s.expandGrantsForEntitlements(ctx) if err != nil { return err @@ -1423,36 +1630,202 @@ func (s *syncer) SyncGrantExpansion(ctx context.Context) error { return nil } -// SyncGrants fetches the grants for each resource from the connector. It iterates each resource -// from the datastore, and pushes a new action to sync the grants for each individual resource. -func (s *syncer) SyncGrants(ctx context.Context) error { - ctx, span := tracer.Start(ctx, "syncer.SyncGrants") - defer span.End() +// loadEntitlementGraph loads one page of grants and adds expandable relationships to the graph. +// This method handles pagination via the syncer's state machine. +func (s *syncer) loadEntitlementGraph(ctx context.Context, graph *expand.EntitlementGraph) error { + l := ctxzap.Extract(ctx) + pageToken := s.state.PageToken(ctx) - if s.state.ResourceTypeID(ctx) == "" && s.state.ResourceID(ctx) == "" { - pageToken := s.state.PageToken(ctx) + if pageToken == "" { + l.Info("Expanding grants...") + s.handleInitialActionForStep(ctx, *s.state.Current()) + } - if pageToken == "" { - ctxzap.Extract(ctx).Info("Syncing grants...") - s.handleInitialActionForStep(ctx, *s.state.Current()) + resp, err := s.store.ListGrants(ctx, v2.GrantsServiceListGrantsRequest_builder{PageToken: pageToken}.Build()) + if err != nil { + return err + } + + // Handle pagination + if resp.GetNextPageToken() != "" { + err = s.state.NextPage(ctx, resp.GetNextPageToken()) + if err != nil { + return err } + } else { + l.Debug("Finished loading grants to expand") + graph.Loaded = true + } - resp, err := s.store.ListResources(ctx, &v2.ResourcesServiceListResourcesRequest{PageToken: pageToken}) + // Process grants and add edges to the graph + updatedGrants := make([]*v2.Grant, 0) + for _, grant := range resp.GetList() { + err := s.processGrantForGraph(ctx, grant, graph) if err != nil { return err } - // We want to take action on the next page before we push any new actions - if resp.NextPageToken != "" { - err = s.state.NextPage(ctx, resp.NextPageToken) - if err != nil { - return err + // Remove expandable annotation from descendant grant now that we've added it to the graph. + // That way if this sync is part of a compaction, expanding grants at the end of compaction won't redo work. + newAnnos := make(annotations.Annotations, 0) + updated := false + for _, anno := range grant.GetAnnotations() { + if anno.MessageIs(&v2.GrantExpandable{}) { + updated = true + } else { + newAnnos = append(newAnnos, anno) + } + } + if !updated { + continue + } + + grant.SetAnnotations(newAnnos) + l.Debug("removed expandable annotation from grant", zap.String("grant_id", grant.GetId())) + updatedGrants = append(updatedGrants, grant) + updatedGrants, err = expand.PutGrantsInChunks(ctx, s.store, updatedGrants, 10000) + if err != nil { + return err + } + } + + _, err = expand.PutGrantsInChunks(ctx, s.store, updatedGrants, 0) + if err != nil { + return err + } + + if graph.Loaded { + l.Info("Finished loading entitlement graph", zap.Int("edges", len(graph.Edges))) + } + return nil +} + +// processGrantForGraph examines a grant for expandable annotations and adds edges to the graph. +func (s *syncer) processGrantForGraph(ctx context.Context, grant *v2.Grant, graph *expand.EntitlementGraph) error { + l := ctxzap.Extract(ctx) + + annos := annotations.Annotations(grant.GetAnnotations()) + expandable := &v2.GrantExpandable{} + _, err := annos.Pick(expandable) + if err != nil { + return err + } + if len(expandable.GetEntitlementIds()) == 0 { + return nil + } + + principalID := grant.GetPrincipal().GetId() + if principalID == nil { + return fmt.Errorf("principal id was nil") + } + + for _, srcEntitlementID := range expandable.GetEntitlementIds() { + l.Debug( + "Expandable entitlement found", + zap.String("src_entitlement_id", srcEntitlementID), + zap.String("dst_entitlement_id", grant.GetEntitlement().GetId()), + ) + + srcEntitlement, err := s.store.GetEntitlement(ctx, reader_v2.EntitlementsReaderServiceGetEntitlementRequest_builder{ + EntitlementId: srcEntitlementID, + }.Build()) + if err != nil { + l.Error("error fetching source entitlement", + zap.String("src_entitlement_id", srcEntitlementID), + zap.String("dst_entitlement_id", grant.GetEntitlement().GetId()), + zap.Error(err), + ) + continue + } + + // The expand annotation points at entitlements by id. Those entitlements' resource should match + // the current grant's principal, so we don't allow expanding arbitrary entitlements. + sourceEntitlementResourceID := srcEntitlement.GetEntitlement().GetResource().GetId() + if sourceEntitlementResourceID == nil { + return fmt.Errorf("source entitlement resource id was nil") + } + if principalID.GetResourceType() != sourceEntitlementResourceID.GetResourceType() || + principalID.GetResource() != sourceEntitlementResourceID.GetResource() { + l.Error( + "source entitlement resource id did not match grant principal id", + zap.String("grant_principal_id", principalID.String()), + zap.String("source_entitlement_resource_id", sourceEntitlementResourceID.String())) + + return fmt.Errorf("source entitlement resource id did not match grant principal id") + } + + graph.AddEntitlement(grant.GetEntitlement()) + graph.AddEntitlement(srcEntitlement.GetEntitlement()) + err = graph.AddEdge(ctx, + srcEntitlement.GetEntitlement().GetId(), + grant.GetEntitlement().GetId(), + expandable.GetShallow(), + expandable.GetResourceTypeIds(), + ) + if err != nil { + return fmt.Errorf("error adding edge to graph: %w", err) + } + } + return nil +} + +// fixEntitlementGraphCycles detects and fixes cycles in the entitlement graph. +func (s *syncer) fixEntitlementGraphCycles(ctx context.Context, graph *expand.EntitlementGraph) error { + l := ctxzap.Extract(ctx) + + comps, sccMetrics := graph.ComputeCyclicComponents(ctx) + if len(comps) == 0 { + graph.HasNoCycles = true + return nil + } + l.Warn( + "cycle detected in entitlement graph", + zap.Any("cycle", comps[0]), + zap.Any("scc_metrics", sccMetrics), + ) + l.Debug("initial graph stats", + zap.Int("edges", len(graph.Edges)), + zap.Int("nodes", len(graph.Nodes)), + zap.Int("actions", len(graph.Actions)), + zap.Int("depth", graph.Depth), + zap.Bool("has_no_cycles", graph.HasNoCycles), + ) + if dontFixCycles { + return fmt.Errorf("cycles detected in entitlement graph") + } + return graph.FixCyclesFromComponents(ctx, comps) +} + +// SyncGrants fetches the grants for each resource from the connector. It iterates each resource +// from the datastore, and pushes a new action to sync the grants for each individual resource. +func (s *syncer) SyncGrants(ctx context.Context) error { + ctx, span := tracer.Start(ctx, "syncer.SyncGrants") + defer span.End() + + if s.state.ResourceTypeID(ctx) == "" && s.state.ResourceID(ctx) == "" { + pageToken := s.state.PageToken(ctx) + + if pageToken == "" { + ctxzap.Extract(ctx).Info("Syncing grants...") + s.handleInitialActionForStep(ctx, *s.state.Current()) + } + + resp, err := s.store.ListResources(ctx, v2.ResourcesServiceListResourcesRequest_builder{PageToken: pageToken}.Build()) + if err != nil { + return err + } + + // We want to take action on the next page before we push any new actions + if resp.GetNextPageToken() != "" { + err = s.state.NextPage(ctx, resp.GetNextPageToken()) + if err != nil { + return err } } else { s.state.FinishAction(ctx) } - for _, r := range resp.List { + for _, r := range resp.GetList() { shouldSkip, err := s.shouldSkipGrants(ctx, r) if err != nil { return err @@ -1461,15 +1834,15 @@ func (s *syncer) SyncGrants(ctx context.Context) error { if shouldSkip { continue } - s.state.PushAction(ctx, Action{Op: SyncGrantsOp, ResourceID: r.Id.Resource, ResourceTypeID: r.Id.ResourceType}) + s.state.PushAction(ctx, Action{Op: SyncGrantsOp, ResourceID: r.GetId().GetResource(), ResourceTypeID: r.GetId().GetResourceType()}) } return nil } - err := s.syncGrantsForResource(ctx, &v2.ResourceId{ + err := s.syncGrantsForResource(ctx, v2.ResourceId_builder{ ResourceType: s.state.ResourceTypeID(ctx), Resource: s.state.ResourceID(ctx), - }) + }.Build()) if err != nil { return err } @@ -1502,18 +1875,18 @@ func (s *syncer) fetchResourceForPreviousSync(ctx context.Context, resourceID *v } var lastSyncResourceReqAnnos annotations.Annotations - lastSyncResourceReqAnnos.Update(&c1zpb.SyncDetails{Id: previousSyncID}) - prevResource, err := s.store.GetResource(ctx, &reader_v2.ResourcesReaderServiceGetResourceRequest{ + lastSyncResourceReqAnnos.Update(c1zpb.SyncDetails_builder{Id: previousSyncID}.Build()) + prevResource, err := s.store.GetResource(ctx, reader_v2.ResourcesReaderServiceGetResourceRequest_builder{ ResourceId: resourceID, Annotations: lastSyncResourceReqAnnos, - }) + }.Build()) // If we get an error while attempting to look up the previous sync, we should just log it and continue. if err != nil { if errors.Is(err, sql.ErrNoRows) { l.Debug( "resource was not found in previous sync", - zap.String("resource_id", resourceID.Resource), - zap.String("resource_type_id", resourceID.ResourceType), + zap.String("resource_id", resourceID.GetResource()), + zap.String("resource_type_id", resourceID.GetResourceType()), ) return "", nil, nil } @@ -1523,7 +1896,7 @@ func (s *syncer) fetchResourceForPreviousSync(ctx context.Context, resourceID *v } pETag := &v2.ETag{} - prevAnnos := annotations.Annotations(prevResource.Resource.GetAnnotations()) + prevAnnos := annotations.Annotations(prevResource.GetResource().GetAnnotations()) ok, err := prevAnnos.Pick(pETag) if err != nil { return "", nil, err @@ -1564,7 +1937,7 @@ func (s *syncer) fetchEtaggedGrantsForResource( } // The previous etag is for a different entitlement - if prevEtag.EntitlementId != etagMatch.EntitlementId { + if prevEtag.GetEntitlementId() != etagMatch.GetEntitlementId() { return nil, false, errors.New("connector returned an etag match but the entitlement id does not match the previous sync") } @@ -1572,30 +1945,30 @@ func (s *syncer) fetchEtaggedGrantsForResource( var npt string // Fetch the grants for this resource from the previous sync, and store them in the current sync. storeAnnos := annotations.Annotations{} - storeAnnos.Update(&c1zpb.SyncDetails{ + storeAnnos.Update(c1zpb.SyncDetails_builder{ Id: prevSyncID, - }) + }.Build()) for { - prevGrantsResp, err := s.store.ListGrants(ctx, &v2.GrantsServiceListGrantsRequest{ + prevGrantsResp, err := s.store.ListGrants(ctx, v2.GrantsServiceListGrantsRequest_builder{ Resource: resource, Annotations: storeAnnos, PageToken: npt, - }) + }.Build()) if err != nil { return nil, false, err } - for _, g := range prevGrantsResp.List { - if g.Entitlement.Id != etagMatch.EntitlementId { + for _, g := range prevGrantsResp.GetList() { + if g.GetEntitlement().GetId() != etagMatch.GetEntitlementId() { continue } ret = append(ret, g) } - if prevGrantsResp.NextPageToken == "" { + if prevGrantsResp.GetNextPageToken() == "" { break } - npt = prevGrantsResp.NextPageToken + npt = prevGrantsResp.GetNextPageToken() } return ret, true, nil @@ -1606,14 +1979,14 @@ func (s *syncer) syncGrantsForResource(ctx context.Context, resourceID *v2.Resou ctx, span := tracer.Start(ctx, "syncer.syncGrantsForResource") defer span.End() - resourceResponse, err := s.store.GetResource(ctx, &reader_v2.ResourcesReaderServiceGetResourceRequest{ + resourceResponse, err := s.store.GetResource(ctx, reader_v2.ResourcesReaderServiceGetResourceRequest_builder{ ResourceId: resourceID, - }) + }.Build()) if err != nil { return err } - resource := resourceResponse.Resource + resource := resourceResponse.GetResource() var prevSyncID string var prevEtag *v2.ETag @@ -1628,13 +2001,13 @@ func (s *syncer) syncGrantsForResource(ctx context.Context, resourceID *v2.Resou return err } resourceAnnos.Update(prevEtag) - resource.Annotations = resourceAnnos + resource.SetAnnotations(resourceAnnos) - resp, err := s.connector.ListGrants(ctx, &v2.GrantsServiceListGrantsRequest{ - Resource: resource, - PageToken: pageToken, - Annotations: annotations.New(&v2.ActiveSync{Id: s.syncID}), - }) + resp, err := s.connector.ListGrants(ctx, v2.GrantsServiceListGrantsRequest_builder{ + Resource: resource, + PageToken: pageToken, + ActiveSyncId: s.getActiveSyncID(), + }.Build()) if err != nil { return err } @@ -1648,9 +2021,13 @@ func (s *syncer) syncGrantsForResource(ctx context.Context, resourceID *v2.Resou grants = append(grants, etaggedGrants...) // We want to process any grants from the previous sync first so that if there is a conflict, the newer data takes precedence - grants = append(grants, resp.List...) + grants = append(grants, resp.GetList()...) l := ctxzap.Extract(ctx) + resourcesToInsertMap := make(map[string]*v2.Resource, 0) + respAnnos := annotations.Annotations(resp.GetAnnotations()) + insertResourceGrants := respAnnos.Contains(&v2.InsertResourceGrants{}) + for _, grant := range grants { grantAnnos := annotations.Annotations(grant.GetAnnotations()) if !s.dontExpandGrants && grantAnnos.Contains(&v2.GrantExpandable{}) { @@ -1660,14 +2037,23 @@ func (s *syncer) syncGrantsForResource(ctx context.Context, resourceID *v2.Resou s.state.SetHasExternalResourcesGrants() } + if insertResourceGrants { + resource := grant.GetEntitlement().GetResource() + bid, err := bid.MakeBid(resource) + if err != nil { + return err + } + resourcesToInsertMap[bid] = resource + } + if !s.state.ShouldFetchRelatedResources() { continue } // Some connectors emit grants for other resources. If we're doing a partial sync, check if it exists and queue a fetch if not. entitlementResource := grant.GetEntitlement().GetResource() - _, err := s.store.GetResource(ctx, &reader_v2.ResourcesReaderServiceGetResourceRequest{ + _, err := s.store.GetResource(ctx, reader_v2.ResourcesReaderServiceGetResourceRequest_builder{ ResourceId: entitlementResource.GetId(), - }) + }.Build()) if err != nil { if !errors.Is(err, sql.ErrNoRows) { return err @@ -1688,6 +2074,18 @@ func (s *syncer) syncGrantsForResource(ctx context.Context, resourceID *v2.Resou } } } + + if len(resourcesToInsertMap) > 0 { + resourcesToInsert := make([]*v2.Resource, 0) + for _, resource := range resourcesToInsertMap { + resourcesToInsert = append(resourcesToInsert, resource) + } + err = s.store.PutResources(ctx, resourcesToInsert...) + if err != nil { + return err + } + } + err = s.store.PutGrants(ctx, grants...) if err != nil { return err @@ -1715,23 +2113,23 @@ func (s *syncer) syncGrantsForResource(ctx context.Context, resourceID *v2.Resou if updatedETag != nil { resourceAnnos.Update(updatedETag) - resource.Annotations = resourceAnnos + resource.SetAnnotations(resourceAnnos) err = s.store.PutResources(ctx, resource) if err != nil { return err } } - if resp.NextPageToken != "" { - err = s.state.NextPage(ctx, resp.NextPageToken) + if resp.GetNextPageToken() != "" { + err = s.state.NextPage(ctx, resp.GetNextPageToken()) if err != nil { return err } return nil } - s.counts.GrantsProgress[resourceID.ResourceType] += 1 - s.counts.LogGrantsProgress(ctx, resourceID.ResourceType) + s.counts.GrantsProgress[resourceID.GetResourceType()] += 1 + s.counts.LogGrantsProgress(ctx, resourceID.GetResourceType()) s.state.FinishAction(ctx) return nil @@ -1760,57 +2158,59 @@ func (s *syncer) SyncExternalResourcesWithGrantToEntitlement(ctx context.Context skipEGForResourceType := make(map[string]bool) - filterEntitlement, err := s.externalResourceReader.GetEntitlement(ctx, &reader_v2.EntitlementsReaderServiceGetEntitlementRequest{ + filterEntitlement, err := s.externalResourceReader.GetEntitlement(ctx, reader_v2.EntitlementsReaderServiceGetEntitlementRequest_builder{ EntitlementId: entitlementId, - }) - if err != nil { - return err - } - - grants, err := s.listExternalGrantsForEntitlement(ctx, filterEntitlement.GetEntitlement()) + }.Build()) if err != nil { return err } - ents := make([]*v2.Entitlement, 0) - principals := make([]*v2.Resource, 0) - resourceTypes := make([]*v2.ResourceType, 0) resourceTypeIDs := mapset.NewSet[string]() resourceIDs := make(map[string]*v2.ResourceId) - grantsForEnts := make([]*v2.Grant, 0) - - for _, g := range grants { - resourceTypeIDs.Add(g.Principal.Id.ResourceType) - resourceIDs[g.Principal.Id.Resource] = g.Principal.Id + for grants, err := range s.listExternalGrantsForEntitlement(ctx, filterEntitlement.GetEntitlement()) { + if err != nil { + return err + } + for _, g := range grants { + resourceTypeIDs.Add(g.GetPrincipal().GetId().GetResourceType()) + resourceIDs[g.GetPrincipal().GetId().GetResource()] = g.GetPrincipal().GetId() + } } + resourceTypes := make([]*v2.ResourceType, 0) for _, resourceTypeId := range resourceTypeIDs.ToSlice() { - resourceTypeResp, err := s.externalResourceReader.GetResourceType(ctx, &reader_v2.ResourceTypesReaderServiceGetResourceTypeRequest{ResourceTypeId: resourceTypeId}) + resourceTypeResp, err := s.externalResourceReader.GetResourceType(ctx, reader_v2.ResourceTypesReaderServiceGetResourceTypeRequest_builder{ResourceTypeId: resourceTypeId}.Build()) if err != nil { return err } // Should we error or skip if this is not user or group? - for _, t := range resourceTypeResp.ResourceType.Traits { + for _, t := range resourceTypeResp.GetResourceType().GetTraits() { if t == v2.ResourceType_TRAIT_USER || t == v2.ResourceType_TRAIT_GROUP { - resourceTypes = append(resourceTypes, resourceTypeResp.ResourceType) + resourceTypes = append(resourceTypes, resourceTypeResp.GetResourceType()) continue } } - rtAnnos := annotations.Annotations(resourceTypeResp.ResourceType.Annotations) + rtAnnos := annotations.Annotations(resourceTypeResp.GetResourceType().GetAnnotations()) skipEntitlements := rtAnnos.Contains(&v2.SkipEntitlementsAndGrants{}) - skipEGForResourceType[resourceTypeResp.ResourceType.Id] = skipEntitlements + skipEGForResourceType[resourceTypeResp.GetResourceType().GetId()] = skipEntitlements } + err = s.store.PutResourceTypes(ctx, resourceTypes...) + if err != nil { + return err + } + + principals := make([]*v2.Resource, 0) for _, resourceId := range resourceIDs { - resourceResp, err := s.externalResourceReader.GetResource(ctx, &reader_v2.ResourcesReaderServiceGetResourceRequest{ResourceId: resourceId}) + resourceResp, err := s.externalResourceReader.GetResource(ctx, reader_v2.ResourcesReaderServiceGetResourceRequest_builder{ResourceId: resourceId}.Build()) if err != nil { if errors.Is(err, sql.ErrNoRows) { l.Debug( "resource was not found in external sync", - zap.String("resource_id", resourceId.Resource), - zap.String("resource_type_id", resourceId.ResourceType), + zap.String("resource_id", resourceId.GetResource()), + zap.String("resource_type_id", resourceId.GetResourceType()), ) continue } @@ -1820,13 +2220,20 @@ func (s *syncer) SyncExternalResourcesWithGrantToEntitlement(ctx context.Context resourceAnnos := annotations.Annotations(resourceVal.GetAnnotations()) batonID := &v2.BatonID{} resourceAnnos.Update(batonID) - resourceVal.Annotations = resourceAnnos + resourceVal.SetAnnotations(resourceAnnos) principals = append(principals, resourceVal) } + err = s.store.PutResources(ctx, principals...) + if err != nil { + return err + } + + entsCount := 0 + ents := make([]*v2.Entitlement, 0) for _, principal := range principals { rAnnos := annotations.Annotations(principal.GetAnnotations()) - skipEnts := skipEGForResourceType[principal.Id.ResourceType] || rAnnos.Contains(&v2.SkipEntitlementsAndGrants{}) + skipEnts := skipEGForResourceType[principal.GetId().GetResourceType()] || rAnnos.Contains(&v2.SkipEntitlementsAndGrants{}) if skipEnts { continue } @@ -1836,45 +2243,37 @@ func (s *syncer) SyncExternalResourcesWithGrantToEntitlement(ctx context.Context return err } ents = append(ents, resourceEnts...) + entsCount += len(resourceEnts) + } + + err = s.store.PutEntitlements(ctx, ents...) + if err != nil { + return err } + grantsForEntsCount := 0 for _, ent := range ents { rAnnos := annotations.Annotations(ent.GetResource().GetAnnotations()) if rAnnos.Contains(&v2.SkipGrants{}) { continue } - grantsForEnt, err := s.listExternalGrantsForEntitlement(ctx, ent) - if err != nil { - return err + for grants, err := range s.listExternalGrantsForEntitlement(ctx, ent) { + if err != nil { + return err + } + grantsForEntsCount += len(grants) + err = s.store.PutGrants(ctx, grants...) + if err != nil { + return err + } } - grantsForEnts = append(grantsForEnts, grantsForEnt...) - } - - err = s.store.PutResourceTypes(ctx, resourceTypes...) - if err != nil { - return err - } - - err = s.store.PutResources(ctx, principals...) - if err != nil { - return err - } - - err = s.store.PutEntitlements(ctx, ents...) - if err != nil { - return err - } - - err = s.store.PutGrants(ctx, grantsForEnts...) - if err != nil { - return err } l.Info("Synced external resources for entitlement", zap.Int("resource_type_count", len(resourceTypes)), zap.Int("resource_count", len(principals)), - zap.Int("entitlement_count", len(ents)), - zap.Int("grant_count", len(grantsForEnts)), + zap.Int("entitlement_count", entsCount), + zap.Int("grant_count", grantsForEntsCount), ) err = s.processGrantsWithExternalPrincipals(ctx, principals) @@ -1904,9 +2303,8 @@ func (s *syncer) SyncExternalResourcesUsersAndGroups(ctx context.Context) error userAndGroupResourceTypes := make([]*v2.ResourceType, 0) ents := make([]*v2.Entitlement, 0) principals := make([]*v2.Resource, 0) - grantsForEnts := make([]*v2.Grant, 0) for _, rt := range resourceTypes { - for _, t := range rt.Traits { + for _, t := range rt.GetTraits() { if t == v2.ResourceType_TRAIT_USER || t == v2.ResourceType_TRAIT_GROUP { userAndGroupResourceTypes = append(userAndGroupResourceTypes, rt) continue @@ -1914,12 +2312,17 @@ func (s *syncer) SyncExternalResourcesUsersAndGroups(ctx context.Context) error } } + err = s.store.PutResourceTypes(ctx, userAndGroupResourceTypes...) + if err != nil { + return err + } + for _, rt := range userAndGroupResourceTypes { - rtAnnos := annotations.Annotations(rt.Annotations) + rtAnnos := annotations.Annotations(rt.GetAnnotations()) skipEntitlements := rtAnnos.Contains(&v2.SkipEntitlementsAndGrants{}) - skipEGForResourceType[rt.Id] = skipEntitlements + skipEGForResourceType[rt.GetId()] = skipEntitlements - resourceListResp, err := s.listExternalResourcesForResourceType(ctx, rt.Id) + resourceListResp, err := s.listExternalResourcesForResourceType(ctx, rt.GetId()) if err != nil { return err } @@ -1928,13 +2331,20 @@ func (s *syncer) SyncExternalResourcesUsersAndGroups(ctx context.Context) error resourceAnnos := annotations.Annotations(resourceVal.GetAnnotations()) batonID := &v2.BatonID{} resourceAnnos.Update(batonID) - resourceVal.Annotations = resourceAnnos + resourceVal.SetAnnotations(resourceAnnos) principals = append(principals, resourceVal) } } + err = s.store.PutResources(ctx, principals...) + if err != nil { + return err + } + + entsCount := 0 + principalsCount := len(principals) for _, principal := range principals { - skipEnts := skipEGForResourceType[principal.Id.ResourceType] + skipEnts := skipEGForResourceType[principal.GetId().GetResourceType()] if skipEnts { continue } @@ -1948,45 +2358,36 @@ func (s *syncer) SyncExternalResourcesUsersAndGroups(ctx context.Context) error return err } ents = append(ents, resourceEnts...) + entsCount += len(resourceEnts) + err = s.store.PutEntitlements(ctx, resourceEnts...) + if err != nil { + return err + } } + grantsForEntsCount := 0 for _, ent := range ents { rAnnos := annotations.Annotations(ent.GetResource().GetAnnotations()) if rAnnos.Contains(&v2.SkipGrants{}) { continue } - grantsForEnt, err := s.listExternalGrantsForEntitlement(ctx, ent) - if err != nil { - return err + for grants, err := range s.listExternalGrantsForEntitlement(ctx, ent) { + if err != nil { + return err + } + grantsForEntsCount += len(grants) + err = s.store.PutGrants(ctx, grants...) + if err != nil { + return err + } } - grantsForEnts = append(grantsForEnts, grantsForEnt...) - } - - err = s.store.PutResourceTypes(ctx, userAndGroupResourceTypes...) - if err != nil { - return err - } - - err = s.store.PutResources(ctx, principals...) - if err != nil { - return err - } - - err = s.store.PutEntitlements(ctx, ents...) - if err != nil { - return err - } - - err = s.store.PutGrants(ctx, grantsForEnts...) - if err != nil { - return err } l.Info("Synced external resources", zap.Int("resource_type_count", len(userAndGroupResourceTypes)), - zap.Int("resource_count", len(principals)), - zap.Int("entitlement_count", len(ents)), - zap.Int("grant_count", len(grantsForEnts)), + zap.Int("resource_count", principalsCount), + zap.Int("entitlement_count", entsCount), + zap.Int("grant_count", grantsForEntsCount), ) err = s.processGrantsWithExternalPrincipals(ctx, principals) @@ -2003,15 +2404,15 @@ func (s *syncer) listExternalResourcesForResourceType(ctx context.Context, resou resources := make([]*v2.Resource, 0) pageToken := "" for { - resourceResp, err := s.externalResourceReader.ListResources(ctx, &v2.ResourcesServiceListResourcesRequest{ + resourceResp, err := s.externalResourceReader.ListResources(ctx, v2.ResourcesServiceListResourcesRequest_builder{ PageToken: pageToken, ResourceTypeId: resourceTypeId, - }) + }.Build()) if err != nil { return nil, err } - resources = append(resources, resourceResp.List...) - pageToken = resourceResp.NextPageToken + resources = append(resources, resourceResp.GetList()...) + pageToken = resourceResp.GetNextPageToken() if pageToken == "" { break } @@ -2024,15 +2425,15 @@ func (s *syncer) listExternalEntitlementsForResource(ctx context.Context, resour entitlementToken := "" for { - entitlementsList, err := s.externalResourceReader.ListEntitlements(ctx, &v2.EntitlementsServiceListEntitlementsRequest{ + entitlementsList, err := s.externalResourceReader.ListEntitlements(ctx, v2.EntitlementsServiceListEntitlementsRequest_builder{ PageToken: entitlementToken, Resource: resource, - }) + }.Build()) if err != nil { return nil, err } - ents = append(ents, entitlementsList.List...) - entitlementToken = entitlementsList.NextPageToken + ents = append(ents, entitlementsList.GetList()...) + entitlementToken = entitlementsList.GetNextPageToken() if entitlementToken == "" { break } @@ -2040,38 +2441,44 @@ func (s *syncer) listExternalEntitlementsForResource(ctx context.Context, resour return ents, nil } -func (s *syncer) listExternalGrantsForEntitlement(ctx context.Context, ent *v2.Entitlement) ([]*v2.Grant, error) { - grantsForEnts := make([]*v2.Grant, 0) - entitlementGrantPageToken := "" - for { - grantsForEntitlementResp, err := s.externalResourceReader.ListGrantsForEntitlement(ctx, &reader_v2.GrantsReaderServiceListGrantsForEntitlementRequest{ - Entitlement: ent, - PageToken: entitlementGrantPageToken, - }) - if err != nil { - return nil, err - } - grantsForEnts = append(grantsForEnts, grantsForEntitlementResp.List...) - entitlementGrantPageToken = grantsForEntitlementResp.NextPageToken - if entitlementGrantPageToken == "" { - break +func (s *syncer) listExternalGrantsForEntitlement(ctx context.Context, ent *v2.Entitlement) iter.Seq2[[]*v2.Grant, error] { + return func(yield func([]*v2.Grant, error) bool) { + pageToken := "" + for { + grantsForEntitlementResp, err := s.externalResourceReader.ListGrantsForEntitlement(ctx, reader_v2.GrantsReaderServiceListGrantsForEntitlementRequest_builder{ + Entitlement: ent, + PageToken: pageToken, + }.Build()) + if err != nil { + _ = yield(nil, err) + return + } + grants := grantsForEntitlementResp.GetList() + if len(grants) > 0 { + if !yield(grants, err) { + return + } + } + pageToken = grantsForEntitlementResp.GetNextPageToken() + if pageToken == "" { + return + } } } - return grantsForEnts, nil } func (s *syncer) listExternalResourceTypes(ctx context.Context) ([]*v2.ResourceType, error) { resourceTypes := make([]*v2.ResourceType, 0) rtPageToken := "" for { - resourceTypesResp, err := s.externalResourceReader.ListResourceTypes(ctx, &v2.ResourceTypesServiceListResourceTypesRequest{ + resourceTypesResp, err := s.externalResourceReader.ListResourceTypes(ctx, v2.ResourceTypesServiceListResourceTypesRequest_builder{ PageToken: rtPageToken, - }) + }.Build()) if err != nil { return nil, err } - resourceTypes = append(resourceTypes, resourceTypesResp.List...) - rtPageToken = resourceTypesResp.NextPageToken + resourceTypes = append(resourceTypes, resourceTypesResp.GetList()...) + rtPageToken = resourceTypesResp.GetNextPageToken() if rtPageToken == "" { break } @@ -2079,24 +2486,29 @@ func (s *syncer) listExternalResourceTypes(ctx context.Context) ([]*v2.ResourceT return resourceTypes, nil } -func (s *syncer) listAllGrants(ctx context.Context) ([]*v2.Grant, error) { - grants := make([]*v2.Grant, 0) - pageToken := "" - for { - grantsResp, err := s.store.ListGrants(ctx, &v2.GrantsServiceListGrantsRequest{ - PageToken: pageToken, - }) - if err != nil { - return nil, err - } +func (s *syncer) listAllGrants(ctx context.Context) iter.Seq2[[]*v2.Grant, error] { + return func(yield func([]*v2.Grant, error) bool) { + pageToken := "" + for { + grantsResp, err := s.store.ListGrants(ctx, v2.GrantsServiceListGrantsRequest_builder{ + PageToken: pageToken, + }.Build()) + if err != nil { + _ = yield(nil, err) + return + } - grants = append(grants, grantsResp.List...) - pageToken = grantsResp.NextPageToken - if pageToken == "" { - break + if len(grantsResp.GetList()) > 0 { + if !yield(grantsResp.GetList(), err) { + return + } + } + pageToken = grantsResp.GetNextPageToken() + if pageToken == "" { + return + } } } - return grants, nil } func (s *syncer) processGrantsWithExternalPrincipals(ctx context.Context, principals []*v2.Resource) error { @@ -2136,205 +2548,206 @@ func (s *syncer) processGrantsWithExternalPrincipals(ctx context.Context, princi grantsToDelete := make([]string, 0) expandedGrants := make([]*v2.Grant, 0) - grants, err := s.listAllGrants(ctx) - if err != nil { - return err - } - - for _, grant := range grants { - annos := annotations.Annotations(grant.Annotations) - if !annos.ContainsAny(&v2.ExternalResourceMatchAll{}, &v2.ExternalResourceMatch{}, &v2.ExternalResourceMatchID{}) { - continue - } - - // Match all - matchResourceMatchAllAnno, err := GetExternalResourceMatchAllAnnotation(annos) + for grants, err := range s.listAllGrants(ctx) { if err != nil { return err } - if matchResourceMatchAllAnno != nil { - var processPrincipals []*v2.Resource - switch matchResourceMatchAllAnno.ResourceType { - case v2.ResourceType_TRAIT_USER: - processPrincipals = userPrincipals - case v2.ResourceType_TRAIT_GROUP: - processPrincipals = groupPrincipals - default: - l.Error("unexpected external resource type trait", zap.Any("trait", matchResourceMatchAllAnno.ResourceType)) - } - for _, principal := range processPrincipals { - newGrant := newGrantForExternalPrincipal(grant, principal) - expandedGrants = append(expandedGrants, newGrant) + + for _, grant := range grants { + annos := annotations.Annotations(grant.GetAnnotations()) + if !annos.ContainsAny(&v2.ExternalResourceMatchAll{}, &v2.ExternalResourceMatch{}, &v2.ExternalResourceMatchID{}) { + continue } - grantsToDelete = append(grantsToDelete, grant.Id) - continue - } - expandableAnno, err := GetExpandableAnnotation(annos) - if err != nil { - return err - } - expandableEntitlementsResourceMap := make(map[string][]*v2.Entitlement) - if expandableAnno != nil { - for _, entId := range expandableAnno.EntitlementIds { - parsedEnt, err := bid.ParseEntitlementBid(entId) - if err != nil { - l.Error("error parsing expandable entitlement bid", zap.Any("entitlementId", entId)) - continue - } - resourceBID, err := bid.MakeBid(parsedEnt.Resource) - if err != nil { - l.Error("error making resource bid", zap.Any("parsedEnt.Resource", parsedEnt.Resource)) - continue + // Match all + matchResourceMatchAllAnno, err := GetExternalResourceMatchAllAnnotation(annos) + if err != nil { + return err + } + if matchResourceMatchAllAnno != nil { + var processPrincipals []*v2.Resource + switch matchResourceMatchAllAnno.GetResourceType() { + case v2.ResourceType_TRAIT_USER: + processPrincipals = userPrincipals + case v2.ResourceType_TRAIT_GROUP: + processPrincipals = groupPrincipals + default: + l.Error("unexpected external resource type trait", zap.Any("trait", matchResourceMatchAllAnno.GetResourceType())) } - entitlementMap, ok := expandableEntitlementsResourceMap[resourceBID] - if !ok { - entitlementMap = make([]*v2.Entitlement, 0) + for _, principal := range processPrincipals { + newGrant := newGrantForExternalPrincipal(grant, principal) + expandedGrants = append(expandedGrants, newGrant) } - entitlementMap = append(entitlementMap, parsedEnt) - expandableEntitlementsResourceMap[resourceBID] = entitlementMap + grantsToDelete = append(grantsToDelete, grant.GetId()) + continue } - } - - // Match by ID - matchResourceMatchIDAnno, err := GetExternalResourceMatchIDAnnotation(annos) - if err != nil { - return err - } - if matchResourceMatchIDAnno != nil { - if principal, ok := principalMap[matchResourceMatchIDAnno.Id]; ok { - newGrant := newGrantForExternalPrincipal(grant, principal) - expandedGrants = append(expandedGrants, newGrant) - - newGrantAnnos := annotations.Annotations(newGrant.Annotations) - newExpandableEntitlementIDs := make([]string, 0) - if expandableAnno != nil { - groupPrincipalBID, err := bid.MakeBid(grant.Principal) + expandableAnno, err := GetExpandableAnnotation(annos) + if err != nil { + return err + } + expandableEntitlementsResourceMap := make(map[string][]*v2.Entitlement) + if expandableAnno != nil { + for _, entId := range expandableAnno.GetEntitlementIds() { + parsedEnt, err := bid.ParseEntitlementBid(entId) if err != nil { - l.Error("error making group principal bid", zap.Error(err), zap.Any("grant.Principal", grant.Principal)) + l.Error("error parsing expandable entitlement bid", zap.Any("entitlementId", entId)) continue } - - principalEntitlements := expandableEntitlementsResourceMap[groupPrincipalBID] - for _, expandableGrant := range principalEntitlements { - newExpandableEntId := entitlement.NewEntitlementID(principal, expandableGrant.Slug) - _, err := s.store.GetEntitlement(ctx, &reader_v2.EntitlementsReaderServiceGetEntitlementRequest{EntitlementId: newExpandableEntId}) - if err != nil { - if errors.Is(err, sql.ErrNoRows) { - l.Error("found no entitlement with entitlement id generated from external source sync", zap.Any("entitlementId", newExpandableEntId)) - continue - } - return err - } - newExpandableEntitlementIDs = append(newExpandableEntitlementIDs, newExpandableEntId) + resourceBID, err := bid.MakeBid(parsedEnt.GetResource()) + if err != nil { + l.Error("error making resource bid", zap.Any("parsedEnt.Resource", parsedEnt.GetResource())) + continue } - - newExpandableAnno := &v2.GrantExpandable{ - EntitlementIds: newExpandableEntitlementIDs, - Shallow: expandableAnno.Shallow, - ResourceTypeIds: expandableAnno.ResourceTypeIds, + entitlementMap, ok := expandableEntitlementsResourceMap[resourceBID] + if !ok { + entitlementMap = make([]*v2.Entitlement, 0) } - newGrantAnnos.Update(newExpandableAnno) - newGrant.Annotations = newGrantAnnos - expandedGrants = append(expandedGrants, newGrant) + entitlementMap = append(entitlementMap, parsedEnt) + expandableEntitlementsResourceMap[resourceBID] = entitlementMap } } - // We still want to delete the grant even if there are no matches - // Since it does not correspond to any known user - grantsToDelete = append(grantsToDelete, grant.Id) - } + // Match by ID + matchResourceMatchIDAnno, err := GetExternalResourceMatchIDAnnotation(annos) + if err != nil { + return err + } + if matchResourceMatchIDAnno != nil { + if principal, ok := principalMap[matchResourceMatchIDAnno.GetId()]; ok { + newGrant := newGrantForExternalPrincipal(grant, principal) + expandedGrants = append(expandedGrants, newGrant) - // Match by key/val - matchExternalResource, err := GetExternalResourceMatchAnnotation(annos) - if err != nil { - return err - } + newGrantAnnos := annotations.Annotations(newGrant.GetAnnotations()) - if matchExternalResource != nil { - switch matchExternalResource.ResourceType { - case v2.ResourceType_TRAIT_USER: - for _, userPrincipal := range userPrincipals { - userTrait, err := resource.GetUserTrait(userPrincipal) - if err != nil { - l.Error("error getting user trait", zap.Any("userPrincipal", userPrincipal)) - continue - } - if matchExternalResource.Key == "email" { - if userTraitContainsEmail(userTrait.Emails, matchExternalResource.Value) { - newGrant := newGrantForExternalPrincipal(grant, userPrincipal) - expandedGrants = append(expandedGrants, newGrant) - // continue to next principal since we found an email match + newExpandableEntitlementIDs := make([]string, 0) + if expandableAnno != nil { + groupPrincipalBID, err := bid.MakeBid(grant.GetPrincipal()) + if err != nil { + l.Error("error making group principal bid", zap.Error(err), zap.Any("grant.Principal", grant.GetPrincipal())) continue } - } - profileVal, ok := resource.GetProfileStringValue(userTrait.Profile, matchExternalResource.Key) - if ok && strings.EqualFold(profileVal, matchExternalResource.Value) { - newGrant := newGrantForExternalPrincipal(grant, userPrincipal) + + principalEntitlements := expandableEntitlementsResourceMap[groupPrincipalBID] + for _, expandableGrant := range principalEntitlements { + newExpandableEntId := entitlement.NewEntitlementID(principal, expandableGrant.GetSlug()) + _, err := s.store.GetEntitlement(ctx, reader_v2.EntitlementsReaderServiceGetEntitlementRequest_builder{EntitlementId: newExpandableEntId}.Build()) + if err != nil { + if errors.Is(err, sql.ErrNoRows) { + l.Error("found no entitlement with entitlement id generated from external source sync", zap.Any("entitlementId", newExpandableEntId)) + continue + } + return err + } + newExpandableEntitlementIDs = append(newExpandableEntitlementIDs, newExpandableEntId) + } + + newExpandableAnno := v2.GrantExpandable_builder{ + EntitlementIds: newExpandableEntitlementIDs, + Shallow: expandableAnno.GetShallow(), + ResourceTypeIds: expandableAnno.GetResourceTypeIds(), + }.Build() + newGrantAnnos.Update(newExpandableAnno) + newGrant.SetAnnotations(newGrantAnnos) expandedGrants = append(expandedGrants, newGrant) } } - case v2.ResourceType_TRAIT_GROUP: - for _, groupPrincipal := range groupPrincipals { - groupTrait, err := resource.GetGroupTrait(groupPrincipal) - if err != nil { - l.Error("error getting group trait", zap.Any("groupPrincipal", groupPrincipal)) - continue - } - profileVal, ok := resource.GetProfileStringValue(groupTrait.Profile, matchExternalResource.Key) - if ok && strings.EqualFold(profileVal, matchExternalResource.Value) { - newGrant := newGrantForExternalPrincipal(grant, groupPrincipal) - newGrantAnnos := annotations.Annotations(newGrant.Annotations) - - newExpandableEntitlementIDs := make([]string, 0) - if expandableAnno != nil { - groupPrincipalBID, err := bid.MakeBid(grant.Principal) - if err != nil { - l.Error("error making group principal bid", zap.Error(err), zap.Any("grant.Principal", grant.Principal)) + + // We still want to delete the grant even if there are no matches + // Since it does not correspond to any known user + grantsToDelete = append(grantsToDelete, grant.GetId()) + } + + // Match by key/val + matchExternalResource, err := GetExternalResourceMatchAnnotation(annos) + if err != nil { + return err + } + + if matchExternalResource != nil { + switch matchExternalResource.GetResourceType() { + case v2.ResourceType_TRAIT_USER: + for _, userPrincipal := range userPrincipals { + userTrait, err := resource.GetUserTrait(userPrincipal) + if err != nil { + l.Error("error getting user trait", zap.Any("userPrincipal", userPrincipal)) + continue + } + if matchExternalResource.GetKey() == "email" { + if userTraitContainsEmail(userTrait.GetEmails(), matchExternalResource.GetValue()) { + newGrant := newGrantForExternalPrincipal(grant, userPrincipal) + expandedGrants = append(expandedGrants, newGrant) + // continue to next principal since we found an email match continue } - - principalEntitlements := expandableEntitlementsResourceMap[groupPrincipalBID] - for _, expandableGrant := range principalEntitlements { - newExpandableEntId := entitlement.NewEntitlementID(groupPrincipal, expandableGrant.Slug) - _, err := s.store.GetEntitlement(ctx, &reader_v2.EntitlementsReaderServiceGetEntitlementRequest{EntitlementId: newExpandableEntId}) + } + profileVal, ok := resource.GetProfileStringValue(userTrait.GetProfile(), matchExternalResource.GetKey()) + if ok && strings.EqualFold(profileVal, matchExternalResource.GetValue()) { + newGrant := newGrantForExternalPrincipal(grant, userPrincipal) + expandedGrants = append(expandedGrants, newGrant) + } + } + case v2.ResourceType_TRAIT_GROUP: + for _, groupPrincipal := range groupPrincipals { + groupTrait, err := resource.GetGroupTrait(groupPrincipal) + if err != nil { + l.Error("error getting group trait", zap.Any("groupPrincipal", groupPrincipal)) + continue + } + profileVal, ok := resource.GetProfileStringValue(groupTrait.GetProfile(), matchExternalResource.GetKey()) + if ok && strings.EqualFold(profileVal, matchExternalResource.GetValue()) { + newGrant := newGrantForExternalPrincipal(grant, groupPrincipal) + newGrantAnnos := annotations.Annotations(newGrant.GetAnnotations()) + + newExpandableEntitlementIDs := make([]string, 0) + if expandableAnno != nil { + groupPrincipalBID, err := bid.MakeBid(grant.GetPrincipal()) if err != nil { - if errors.Is(err, sql.ErrNoRows) { - l.Error("found no entitlement with entitlement id generated from external source sync", zap.Any("entitlementId", newExpandableEntId)) - continue + l.Error("error making group principal bid", zap.Error(err), zap.Any("grant.Principal", grant.GetPrincipal())) + continue + } + + principalEntitlements := expandableEntitlementsResourceMap[groupPrincipalBID] + for _, expandableGrant := range principalEntitlements { + newExpandableEntId := entitlement.NewEntitlementID(groupPrincipal, expandableGrant.GetSlug()) + _, err := s.store.GetEntitlement(ctx, reader_v2.EntitlementsReaderServiceGetEntitlementRequest_builder{EntitlementId: newExpandableEntId}.Build()) + if err != nil { + if errors.Is(err, sql.ErrNoRows) { + l.Error("found no entitlement with entitlement id generated from external source sync", zap.Any("entitlementId", newExpandableEntId)) + continue + } + return err } - return err + newExpandableEntitlementIDs = append(newExpandableEntitlementIDs, newExpandableEntId) } - newExpandableEntitlementIDs = append(newExpandableEntitlementIDs, newExpandableEntId) - } - newExpandableAnno := &v2.GrantExpandable{ - EntitlementIds: newExpandableEntitlementIDs, - Shallow: expandableAnno.Shallow, - ResourceTypeIds: expandableAnno.ResourceTypeIds, + newExpandableAnno := v2.GrantExpandable_builder{ + EntitlementIds: newExpandableEntitlementIDs, + Shallow: expandableAnno.GetShallow(), + ResourceTypeIds: expandableAnno.GetResourceTypeIds(), + }.Build() + newGrantAnnos.Update(newExpandableAnno) + newGrant.SetAnnotations(newGrantAnnos) + expandedGrants = append(expandedGrants, newGrant) } - newGrantAnnos.Update(newExpandableAnno) - newGrant.Annotations = newGrantAnnos - expandedGrants = append(expandedGrants, newGrant) } } + default: + l.Error("unexpected external resource type trait", zap.Any("trait", matchExternalResource.GetResourceType())) } - default: - l.Error("unexpected external resource type trait", zap.Any("trait", matchExternalResource.ResourceType)) - } - // We still want to delete the grant even if there are no matches - grantsToDelete = append(grantsToDelete, grant.Id) + // We still want to delete the grant even if there are no matches + grantsToDelete = append(grantsToDelete, grant.GetId()) + } } } newGrantIDs := mapset.NewSet[string]() for _, ng := range expandedGrants { - newGrantIDs.Add(ng.Id) + newGrantIDs.Add(ng.GetId()) } - err = s.store.PutGrants(ctx, expandedGrants...) + err := s.store.PutGrants(ctx, expandedGrants...) if err != nil { return err } @@ -2354,18 +2767,18 @@ func (s *syncer) processGrantsWithExternalPrincipals(ctx context.Context, princi func userTraitContainsEmail(emails []*v2.UserTrait_Email, address string) bool { return slices.ContainsFunc(emails, func(e *v2.UserTrait_Email) bool { - return strings.EqualFold(e.Address, address) + return strings.EqualFold(e.GetAddress(), address) }) } func newGrantForExternalPrincipal(grant *v2.Grant, principal *v2.Resource) *v2.Grant { - newGrant := &v2.Grant{ - Entitlement: grant.Entitlement, + newGrant := v2.Grant_builder{ + Entitlement: grant.GetEntitlement(), Principal: principal, - Id: batonGrant.NewGrantID(principal, grant.Entitlement), - Sources: grant.Sources, - Annotations: grant.Annotations, - } + Id: batonGrant.NewGrantID(principal, grant.GetEntitlement()), + Sources: grant.GetSources(), + Annotations: grant.GetAnnotations(), + }.Build() return newGrant } @@ -2405,270 +2818,35 @@ func GetExpandableAnnotation(annos annotations.Annotations) (*v2.GrantExpandable return expandableAnno, nil } -func (s *syncer) runGrantExpandActions(ctx context.Context) (bool, error) { - ctx, span := tracer.Start(ctx, "syncer.runGrantExpandActions") - defer span.End() - - l := ctxzap.Extract(ctx) - - graph := s.state.EntitlementGraph(ctx) - l = l.With(zap.Int("depth", graph.Depth)) - - // Peek the next action on the stack - if len(graph.Actions) == 0 { - l.Debug("runGrantExpandActions: no actions") // zap.Any("graph", graph), - - return true, nil - } - action := graph.Actions[0] - - l = l.With(zap.String("source_entitlement_id", action.SourceEntitlementID), zap.String("descendant_entitlement_id", action.DescendantEntitlementID)) - - // Fetch source and descendant entitlement - sourceEntitlement, err := s.store.GetEntitlement(ctx, &reader_v2.EntitlementsReaderServiceGetEntitlementRequest{ - EntitlementId: action.SourceEntitlementID, - }) - if err != nil { - l.Error("runGrantExpandActions: error fetching source entitlement", zap.Error(err)) - return false, fmt.Errorf("runGrantExpandActions: error fetching source entitlement: %w", err) - } - - descendantEntitlement, err := s.store.GetEntitlement(ctx, &reader_v2.EntitlementsReaderServiceGetEntitlementRequest{ - EntitlementId: action.DescendantEntitlementID, - }) - if err != nil { - l.Error("runGrantExpandActions: error fetching descendant entitlement", zap.Error(err)) - return false, fmt.Errorf("runGrantExpandActions: error fetching descendant entitlement: %w", err) - } - - // Fetch a page of source grants - sourceGrants, err := s.store.ListGrantsForEntitlement(ctx, &reader_v2.GrantsReaderServiceListGrantsForEntitlementRequest{ - Entitlement: sourceEntitlement.GetEntitlement(), - PageToken: action.PageToken, - }) - if err != nil { - l.Error("runGrantExpandActions: error fetching source grants", zap.Error(err)) - return false, fmt.Errorf("runGrantExpandActions: error fetching source grants: %w", err) - } - - var newGrants = make([]*v2.Grant, 0) - for _, sourceGrant := range sourceGrants.List { - // Skip this grant if it is not for a resource type we care about - if len(action.ResourceTypeIDs) > 0 { - relevantResourceType := false - for _, resourceTypeID := range action.ResourceTypeIDs { - if sourceGrant.GetPrincipal().Id.ResourceType == resourceTypeID { - relevantResourceType = true - break - } - } - - if !relevantResourceType { - continue - } - } - - // If this is a shallow action, then we only want to expand grants that have no sources which indicates that it was directly assigned. - if action.Shallow { - // If we have no sources, this is a direct grant - foundDirectGrant := len(sourceGrant.GetSources().GetSources()) == 0 - // If the source grant has sources, then we need to see if any of them are the source entitlement itself - for src := range sourceGrant.GetSources().GetSources() { - if src == sourceEntitlement.GetEntitlement().GetId() { - foundDirectGrant = true - break - } - } - - // This is not a direct grant, so skip it since we are a shallow action - if !foundDirectGrant { - continue - } - } - - // Unroll all grants for the principal on the descendant entitlement. This should, on average, be... 1. - descendantGrants := make([]*v2.Grant, 0, 1) - pageToken := "" - for { - req := &reader_v2.GrantsReaderServiceListGrantsForEntitlementRequest{ - Entitlement: descendantEntitlement.GetEntitlement(), - PrincipalId: sourceGrant.GetPrincipal().GetId(), - PageToken: pageToken, - Annotations: nil, - } - - resp, err := s.store.ListGrantsForEntitlement(ctx, req) - if err != nil { - l.Error("runGrantExpandActions: error fetching descendant grants", zap.Error(err)) - return false, fmt.Errorf("runGrantExpandActions: error fetching descendant grants: %w", err) - } - - descendantGrants = append(descendantGrants, resp.List...) - pageToken = resp.NextPageToken - if pageToken == "" { - break - } - } - - // If we have no grants for the principal in the descendant entitlement, make one. - directGrant := true - if len(descendantGrants) == 0 { - directGrant = false - // TODO(morgabra): This is kinda gnarly, grant ID won't have any special meaning. - // FIXME(morgabra): We should probably conflict check with grant id? - descendantGrant, err := s.newExpandedGrant(ctx, descendantEntitlement.Entitlement, sourceGrant.GetPrincipal()) - if err != nil { - l.Error("runGrantExpandActions: error creating new grant", zap.Error(err)) - return false, fmt.Errorf("runGrantExpandActions: error creating new grant: %w", err) - } - descendantGrants = append(descendantGrants, descendantGrant) - l.Debug( - "runGrantExpandActions: created new grant for expansion", - zap.String("grant_id", descendantGrant.GetId()), - ) - } - - // Add the source entitlement as a source to all descendant grants. - for _, descendantGrant := range descendantGrants { - sources := descendantGrant.GetSources() - if sources == nil { - sources = &v2.GrantSources{} - descendantGrant.Sources = sources - } - sourcesMap := sources.GetSources() - if sourcesMap == nil { - sourcesMap = make(map[string]*v2.GrantSources_GrantSource) - sources.Sources = sourcesMap - } - - if directGrant && len(sources.Sources) == 0 { - // If we are already granted this entitlement, make sure to add ourselves as a source. - sourcesMap[descendantGrant.GetEntitlement().GetId()] = &v2.GrantSources_GrantSource{} - } - // Include the source grant as a source. - sourcesMap[sourceGrant.GetEntitlement().GetId()] = &v2.GrantSources_GrantSource{} - } - newGrants = append(newGrants, descendantGrants...) - } - - err = s.store.PutGrants(ctx, newGrants...) - if err != nil { - l.Error("runGrantExpandActions: error updating descendant grants", zap.Error(err)) - return false, fmt.Errorf("runGrantExpandActions: error updating descendant grants: %w", err) - } - - // If we have no more pages of work, pop the action off the stack and mark this edge in the graph as done - action.PageToken = sourceGrants.NextPageToken - if action.PageToken == "" { - graph.MarkEdgeExpanded(action.SourceEntitlementID, action.DescendantEntitlementID) - graph.Actions = graph.Actions[1:] - } - return false, nil -} - -func (s *syncer) newExpandedGrant(_ context.Context, descEntitlement *v2.Entitlement, principal *v2.Resource) (*v2.Grant, error) { - enResource := descEntitlement.GetResource() - if enResource == nil { - return nil, fmt.Errorf("newExpandedGrant: entitlement has no resource") - } - - if principal == nil { - return nil, fmt.Errorf("newExpandedGrant: principal is nil") - } - - // Add immutable annotation since this function is only called if no direct grant exists - var annos annotations.Annotations - annos.Update(&v2.GrantImmutable{}) - - grant := &v2.Grant{ - Id: fmt.Sprintf("%s:%s:%s", descEntitlement.Id, principal.Id.ResourceType, principal.Id.Resource), - Entitlement: descEntitlement, - Principal: principal, - Annotations: annos, - } - - return grant, nil -} - // expandGrantsForEntitlements expands grants for the given entitlement. +// This method delegates to the expand.Expander for the actual expansion logic. func (s *syncer) expandGrantsForEntitlements(ctx context.Context) error { ctx, span := tracer.Start(ctx, "syncer.expandGrantsForEntitlements") defer span.End() l := ctxzap.Extract(ctx) - graph := s.state.EntitlementGraph(ctx) - l = l.With(zap.Int("depth", graph.Depth)) - l.Debug("expandGrantsForEntitlements: start") // zap.Any("graph", graph) s.counts.LogExpandProgress(ctx, graph.Actions) - actionsDone, err := s.runGrantExpandActions(ctx) + // Create an expander and run a single step + expander := expand.NewExpander(s.store, graph) + err := expander.RunSingleStep(ctx) if err != nil { - // Skip action and delete the edge that caused the error. - erroredAction := graph.Actions[0] - l.Error("expandGrantsForEntitlements: error running graph action", zap.Error(err), zap.Any("action", erroredAction)) - _ = graph.DeleteEdge(ctx, erroredAction.SourceEntitlementID, erroredAction.DescendantEntitlementID) - graph.Actions = graph.Actions[1:] - if len(graph.Actions) == 0 { - actionsDone = true - } - // TODO: return a warning - } - if !actionsDone { - return nil - } - - if maxDepth == 0 { - maxDepth = defaultMaxDepth - } - - if int64(graph.Depth) > maxDepth { - l.Error( - "expandGrantsForEntitlements: exceeded max depth", - // zap.Any("graph", graph), - zap.Int64("max_depth", maxDepth), - ) - s.state.FinishAction(ctx) - return fmt.Errorf("expandGrantsForEntitlements: exceeded max depth (%d)", maxDepth) - } - - // TODO(morgabra) Yield here after some amount of work? - // traverse edges or call some sort of getEntitlements - for _, sourceEntitlementID := range graph.GetEntitlements() { - // We've already expanded this entitlement, so skip it. - if graph.IsEntitlementExpanded(sourceEntitlementID) { - continue - } - - // We have ancestors who have not been expanded yet, so we can't expand ourselves. - if graph.HasUnexpandedAncestors(sourceEntitlementID) { - l.Debug("expandGrantsForEntitlements: skipping source entitlement because it has unexpanded ancestors", zap.String("source_entitlement_id", sourceEntitlementID)) - continue - } - - for descendantEntitlementID, grantInfo := range graph.GetDescendantEntitlements(sourceEntitlementID) { - if grantInfo.IsExpanded { - continue - } - graph.Actions = append(graph.Actions, &expand.EntitlementGraphAction{ - SourceEntitlementID: sourceEntitlementID, - DescendantEntitlementID: descendantEntitlementID, - PageToken: "", - Shallow: grantInfo.IsShallow, - ResourceTypeIDs: grantInfo.ResourceTypeIDs, - }) + l.Error("expandGrantsForEntitlements: error during expansion", zap.Error(err)) + // If max depth exceeded, finish the action before returning the error + // to prevent the state machine from getting stuck + if errors.Is(err, expand.ErrMaxDepthExceeded) { + s.state.FinishAction(ctx) } + return err } - if graph.IsExpanded() { - l.Debug("expandGrantsForEntitlements: graph is expanded") // zap.Any("graph", graph) + if expander.IsDone(ctx) { + l.Debug("expandGrantsForEntitlements: graph is expanded") s.state.FinishAction(ctx) - return nil } - graph.Depth++ - l.Debug("expandGrantsForEntitlements: graph is not expanded") // zap.Any("graph", graph) return nil } @@ -2693,6 +2871,9 @@ func (s *syncer) loadStore(ctx context.Context) error { return err } + if s.setSessionStore != nil { + s.setSessionStore.SetSessionStore(ctx, store) + } s.store = store return nil @@ -2711,6 +2892,13 @@ func (s *syncer) Close(ctx context.Context) error { } } + if s.externalResourceReader != nil { + err = s.externalResourceReader.Close() + if err != nil { + return fmt.Errorf("error closing external resource reader: %w", err) + } + } + if s.c1zManager != nil { err = s.c1zManager.SaveC1Z(ctx) if err != nil { @@ -2792,10 +2980,10 @@ func WithExternalResourceEntitlementIdFilter(entitlementId string) SyncOpt { } } -func WithTargetedSyncResourceIDs(resourceIDs []string) SyncOpt { +func WithTargetedSyncResources(resources []*v2.Resource) SyncOpt { return func(s *syncer) { - s.targetedSyncResourceIDs = resourceIDs - if len(resourceIDs) > 0 { + s.targetedSyncResources = resources + if len(resources) > 0 { s.syncType = connectorstore.SyncTypePartial return } @@ -2804,6 +2992,18 @@ func WithTargetedSyncResourceIDs(resourceIDs []string) SyncOpt { } } +func WithSessionStore(sessionStore sessions.SetSessionStore) SyncOpt { + return func(s *syncer) { + s.setSessionStore = sessionStore + } +} + +func WithSyncResourceTypes(resourceTypeIDs []string) SyncOpt { + return func(s *syncer) { + s.syncResourceTypes = resourceTypeIDs + } +} + func WithOnlyExpandGrants() SyncOpt { return func(s *syncer) { s.onlyExpandGrants = true @@ -2821,12 +3021,6 @@ func WithSyncID(syncID string) SyncOpt { } } -func WithInjectSyncIDAnnotation(inject bool) SyncOpt { - return func(s *syncer) { - s.injectSyncIDAnnotation = inject - } -} - func WithSkipEntitlementsAndGrants(skip bool) SyncOpt { return func(s *syncer) { s.skipEntitlementsAndGrants = skip @@ -2842,14 +3036,21 @@ func WithSkipEntitlementsAndGrants(skip bool) SyncOpt { } } +func WithSkipGrants(skip bool) SyncOpt { + return func(s *syncer) { + s.skipGrants = skip + } +} + // NewSyncer returns a new syncer object. func NewSyncer(ctx context.Context, c types.ConnectorClient, opts ...SyncOpt) (Syncer, error) { s := &syncer{ - connector: &syncIDClientWrapper{ConnectorClient: c, syncID: ""}, // we only get the syncid later - skipEGForResourceType: make(map[string]bool), - resourceTypeTraits: make(map[string][]v2.ResourceType_Trait), - counts: NewProgressCounts(), - syncType: connectorstore.SyncTypeFull, + connector: c, + skipEGForResourceType: make(map[string]bool), + skipEntitlementsForResourceType: make(map[string]bool), + resourceTypeTraits: make(map[string][]v2.ResourceType_Trait), + counts: NewProgressCounts(), + syncType: connectorstore.SyncTypeFull, } for _, o := range opts { diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/synccompactor/attached/attached.go b/vendor/github.com/conductorone/baton-sdk/pkg/synccompactor/attached/attached.go index e3e9fa4b..cb675996 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/synccompactor/attached/attached.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/synccompactor/attached/attached.go @@ -66,10 +66,22 @@ func (c *Compactor) CompactWithSyncID(ctx context.Context, destSyncID string) er } }() + // Drop grants indexes to improve performance. + err = c.dest.DropGrantIndexes(ctx) + if err != nil { + return fmt.Errorf("failed to drop grants indexes: %w", err) + } + if err := c.processRecords(ctx, attached, destSyncID, baseSyncID, appliedSyncID); err != nil { return fmt.Errorf("failed to process records: %w", err) } + // Re-create the destination database to re-create the grant indexes. + err = c.dest.InitTables(ctx) + if err != nil { + return fmt.Errorf("failed to re-create destination database: %w", err) + } + return nil } diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/synccompactor/compactor.go b/vendor/github.com/conductorone/baton-sdk/pkg/synccompactor/compactor.go index 1d638341..dc8a070d 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/synccompactor/compactor.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/synccompactor/compactor.go @@ -8,11 +8,11 @@ import ( "os" "path" "path/filepath" + "time" reader_v2 "github.com/conductorone/baton-sdk/pb/c1/reader/v2" "github.com/conductorone/baton-sdk/pkg/connectorstore" "github.com/conductorone/baton-sdk/pkg/dotc1z" - c1zmanager "github.com/conductorone/baton-sdk/pkg/dotc1z/manager" "github.com/conductorone/baton-sdk/pkg/sdk" "github.com/conductorone/baton-sdk/pkg/sync" "github.com/conductorone/baton-sdk/pkg/synccompactor/attached" @@ -35,8 +35,9 @@ type Compactor struct { compactorType CompactorType entries []*CompactableSync - tmpDir string - destDir string + tmpDir string + destDir string + runDuration time.Duration } type CompactableSync struct { @@ -62,6 +63,12 @@ func WithCompactorType(compactorType CompactorType) Option { } } +func WithRunDuration(runDuration time.Duration) Option { + return func(c *Compactor) { + c.runDuration = runDuration + } +} + func NewCompactor(ctx context.Context, outputDir string, compactableSyncs []*CompactableSync, opts ...Option) (*Compactor, func() error, error) { if len(compactableSyncs) < 2 { return nil, nil, ErrNotEnoughFilesToCompact @@ -103,7 +110,32 @@ func (c *Compactor) Compact(ctx context.Context) (*CompactableSync, error) { return nil, nil } + compactionStart := time.Now() + runCtx := ctx + var runCanc context.CancelFunc + if c.runDuration > 0 { + runCtx, runCanc = context.WithTimeout(ctx, c.runDuration) + } + if runCanc != nil { + defer runCanc() + } + + l := ctxzap.Extract(ctx) var err error + select { + case <-runCtx.Done(): + err = context.Cause(runCtx) + switch { + case errors.Is(err, context.DeadlineExceeded): + l.Info("compaction run duration has expired, exiting compaction early") + return nil, fmt.Errorf("compaction run duration has expired: %w", err) + default: + l.Error("compaction context cancelled", zap.Error(err)) + return nil, err + } + default: + } + // Base sync is c.entries[0], so compact all incrementals first, then apply that onto the base. applied := c.entries[len(c.entries)-1] for i := len(c.entries) - 2; i >= 0; i-- { @@ -113,7 +145,6 @@ func (c *Compactor) Compact(ctx context.Context) (*CompactableSync, error) { } } - l := ctxzap.Extract(ctx) // Grant expansion doesn't use the connector interface at all, so giving syncer an empty connector is safe... for now. // If that ever changes, we should implement a file connector that is a wrapper around the reader. emptyConnector, err := sdk.NewEmptyConnector() @@ -124,12 +155,28 @@ func (c *Compactor) Compact(ctx context.Context) (*CompactableSync, error) { // Use syncer to expand grants. // TODO: Handle external resources. - syncer, err := sync.NewSyncer( - ctx, - emptyConnector, + syncOpts := []sync.SyncOpt{ sync.WithC1ZPath(applied.FilePath), + sync.WithTmpDir(c.tmpDir), sync.WithSyncID(applied.SyncID), sync.WithOnlyExpandGrants(), + } + + compactionDuration := time.Since(compactionStart) + runDuration := c.runDuration - compactionDuration + l.Debug("finished compaction", zap.Duration("compaction_duration", compactionDuration)) + + switch { + case c.runDuration > 0 && runDuration < 0: + return nil, fmt.Errorf("unable to finish compaction sync in run duration (%s). compactions took %s", c.runDuration, compactionDuration) + case runDuration > 0: + syncOpts = append(syncOpts, sync.WithRunDuration(runDuration)) + } + + syncer, err := sync.NewSyncer( + ctx, + emptyConnector, + syncOpts..., ) if err != nil { l.Error("error creating syncer", zap.Error(err)) @@ -182,36 +229,36 @@ func cpFile(sourcePath string, destPath string) error { return nil } -func (c *Compactor) getLatestObjects(ctx context.Context, info *CompactableSync) (*reader_v2.SyncRun, *dotc1z.C1File, c1zmanager.Manager, func(), error) { +func (c *Compactor) getLatestObjects(ctx context.Context, info *CompactableSync) (*reader_v2.SyncRun, *dotc1z.C1File, func(), error) { cleanup := func() {} - baseC1Z, err := c1zmanager.New(ctx, info.FilePath, c1zmanager.WithTmpDir(c.tmpDir)) - if err != nil { - return nil, nil, nil, cleanup, err - } - - cleanup = func() { - _ = baseC1Z.Close(ctx) - } - baseFile, err := baseC1Z.LoadC1Z(ctx) + baseFile, err := dotc1z.NewC1ZFile( + ctx, + info.FilePath, + dotc1z.WithTmpDir(c.tmpDir), + dotc1z.WithDecoderOptions(dotc1z.WithDecoderConcurrency(0)), + dotc1z.WithReadOnly(true), + // We're only reading, so it's safe to use these pragmas. + dotc1z.WithPragma("journal_mode", "OFF"), + dotc1z.WithPragma("synchronous", "OFF"), + ) if err != nil { - return nil, nil, nil, cleanup, err + return nil, nil, cleanup, err } cleanup = func() { _ = baseFile.Close() - _ = baseC1Z.Close(ctx) } - latestAppliedSync, err := baseFile.GetSync(ctx, &reader_v2.SyncsReaderServiceGetSyncRequest{ + latestAppliedSync, err := baseFile.GetSync(ctx, reader_v2.SyncsReaderServiceGetSyncRequest_builder{ SyncId: info.SyncID, Annotations: nil, - }) + }.Build()) if err != nil { - return nil, nil, nil, cleanup, err + return nil, nil, cleanup, err } - return latestAppliedSync.Sync, baseFile, baseC1Z, cleanup, nil + return latestAppliedSync.GetSync(), baseFile, cleanup, nil } func unionSyncTypes(a, b connectorstore.SyncType) connectorstore.SyncType { @@ -237,10 +284,22 @@ func (c *Compactor) doOneCompaction(ctx context.Context, base *CompactableSync, zap.String("applied_sync", applied.SyncID), zap.String("tmp_dir", c.tmpDir), ) - opts := []dotc1z.C1ZOption{ - dotc1z.WithPragma("journal_mode", "WAL"), dotc1z.WithTmpDir(c.tmpDir), + // Performance improvements: + // Disable journaling. + dotc1z.WithPragma("journal_mode", "OFF"), + // Disable synchronous writes + dotc1z.WithPragma("synchronous", "OFF"), + // Use exclusive locking. + dotc1z.WithPragma("main.locking_mode", "EXCLUSIVE"), + // Use memory for temporary storage. + dotc1z.WithPragma("temp_store", "MEMORY"), + // We close this c1z after compaction, so syncer won't have these pragmas when expanding grants. + // Use parallel decoding. + dotc1z.WithDecoderOptions(dotc1z.WithDecoderConcurrency(0)), + // Use parallel encoding. + dotc1z.WithEncoderConcurrency(0), } fileName := fmt.Sprintf("compacted-%s-%s.c1z", base.SyncID, applied.SyncID) @@ -251,19 +310,19 @@ func (c *Compactor) doOneCompaction(ctx context.Context, base *CompactableSync, } defer func() { _ = newFile.Close() }() - baseSync, baseFile, _, cleanupBase, err := c.getLatestObjects(ctx, base) + baseSync, baseFile, cleanupBase, err := c.getLatestObjects(ctx, base) defer cleanupBase() if err != nil { return nil, err } - appliedSync, appliedFile, _, cleanupApplied, err := c.getLatestObjects(ctx, applied) + appliedSync, appliedFile, cleanupApplied, err := c.getLatestObjects(ctx, applied) defer cleanupApplied() if err != nil { return nil, err } - syncType := unionSyncTypes(connectorstore.SyncType(baseSync.SyncType), connectorstore.SyncType(appliedSync.SyncType)) + syncType := unionSyncTypes(connectorstore.SyncType(baseSync.GetSyncType()), connectorstore.SyncType(appliedSync.GetSyncType())) newSyncId, err := newFile.StartNewSync(ctx, syncType, "") if err != nil { diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/actions.go b/vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/actions.go index cb59bb5c..5ceda817 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/actions.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/actions.go @@ -36,9 +36,13 @@ func (c *actionListSchemasTaskHandler) HandleTask(ctx context.Context) error { if t == nil { return c.helpers.FinishTask(ctx, nil, nil, errors.New("action list schemas task is nil")) } - resp, err := cc.ListActionSchemas(ctx, &v2.ListActionSchemasRequest{ + reqBuilder := v2.ListActionSchemasRequest_builder{ Annotations: t.GetAnnotations(), - }) + } + if resourceTypeID := t.GetResourceTypeId(); resourceTypeID != "" { + reqBuilder.ResourceTypeId = resourceTypeID + } + resp, err := cc.ListActionSchemas(ctx, reqBuilder.Build()) if err != nil { return c.helpers.FinishTask(ctx, nil, nil, err) } @@ -78,10 +82,10 @@ func (c *actionGetSchemaTaskHandler) HandleTask(ctx context.Context) error { return c.helpers.FinishTask(ctx, nil, nil, errors.New("action name required")) } - resp, err := cc.GetActionSchema(ctx, &v2.GetActionSchemaRequest{ + resp, err := cc.GetActionSchema(ctx, v2.GetActionSchemaRequest_builder{ Name: t.GetName(), Annotations: t.GetAnnotations(), - }) + }.Build()) if err != nil { return c.helpers.FinishTask(ctx, nil, nil, err) } @@ -120,15 +124,16 @@ func (c *actionInvokeTaskHandler) HandleTask(ctx context.Context) error { if t == nil || t.GetName() == "" { return c.helpers.FinishTask(ctx, nil, nil, errors.New("action name required")) } - if t.GetArgs() == nil { - return c.helpers.FinishTask(ctx, nil, nil, errors.New("args required")) - } - resp, err := cc.InvokeAction(ctx, &v2.InvokeActionRequest{ + reqBuilder := v2.InvokeActionRequest_builder{ Name: t.GetName(), Args: t.GetArgs(), Annotations: t.GetAnnotations(), - }) + } + if resourceTypeID := t.GetResourceTypeId(); resourceTypeID != "" { + reqBuilder.ResourceTypeId = resourceTypeID + } + resp, err := cc.InvokeAction(ctx, reqBuilder.Build()) if err != nil { return c.helpers.FinishTask(ctx, nil, nil, err) } @@ -168,16 +173,16 @@ func (c *actionStatusTaskHandler) HandleTask(ctx context.Context) error { return c.helpers.FinishTask(ctx, nil, nil, errors.New("action id required")) } - resp, err := cc.GetActionStatus(ctx, &v2.GetActionStatusRequest{ + resp, err := cc.GetActionStatus(ctx, v2.GetActionStatusRequest_builder{ Name: t.GetName(), Id: t.GetId(), Annotations: t.GetAnnotations(), - }) + }.Build()) if err != nil { return c.helpers.FinishTask(ctx, nil, nil, err) } - l.Debug("ActionInvoke response", zap.Any("resp", resp)) + l.Debug("ActionStatus response", zap.Any("resp", resp)) return c.helpers.FinishTask(ctx, resp, nil, nil) } diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/bulk_create_tickets.go b/vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/bulk_create_tickets.go index 24bb2b6c..f3d7bd81 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/bulk_create_tickets.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/bulk_create_tickets.go @@ -38,17 +38,17 @@ func (c *bulkCreateTicketTaskHandler) HandleTask(ctx context.Context) error { ticketRequests := make([]*v2.TicketsServiceCreateTicketRequest, 0) for _, createTicketTask := range t.GetTicketRequests() { - ticketRequests = append(ticketRequests, &v2.TicketsServiceCreateTicketRequest{ + ticketRequests = append(ticketRequests, v2.TicketsServiceCreateTicketRequest_builder{ Request: createTicketTask.GetTicketRequest(), Schema: createTicketTask.GetTicketSchema(), Annotations: createTicketTask.GetAnnotations(), - }) + }.Build()) } cc := c.helpers.ConnectorClient() - resp, err := cc.BulkCreateTickets(ctx, &v2.TicketsServiceBulkCreateTicketsRequest{ + resp, err := cc.BulkCreateTickets(ctx, v2.TicketsServiceBulkCreateTicketsRequest_builder{ TicketRequests: ticketRequests, - }) + }.Build()) if err != nil { l.Error("failed bulk creating tickets", zap.Error(err)) return c.helpers.FinishTask(ctx, nil, nil, err) diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/bulk_get_tickets.go b/vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/bulk_get_tickets.go index 703a5ef9..dd585ee2 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/bulk_get_tickets.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/bulk_get_tickets.go @@ -40,15 +40,15 @@ func (c *bulkGetTicketTaskHandler) HandleTask(ctx context.Context) error { ticketRequests := make([]*v2.TicketsServiceGetTicketRequest, 0) for _, getTicketTask := range t.GetTicketRequests() { - ticketRequests = append(ticketRequests, &v2.TicketsServiceGetTicketRequest{ + ticketRequests = append(ticketRequests, v2.TicketsServiceGetTicketRequest_builder{ Id: getTicketTask.GetTicketId(), Annotations: getTicketTask.GetAnnotations(), - }) + }.Build()) } - resp, err := cc.BulkGetTickets(ctx, &v2.TicketsServiceBulkGetTicketsRequest{ + resp, err := cc.BulkGetTickets(ctx, v2.TicketsServiceBulkGetTicketsRequest_builder{ TicketRequests: ticketRequests, - }) + }.Build()) if err != nil { return c.helpers.FinishTask(ctx, nil, nil, err) } diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/create_account.go b/vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/create_account.go index 7f7b442d..eb45b3c4 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/create_account.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/create_account.go @@ -29,7 +29,7 @@ func (g *createAccountTaskHandler) HandleTask(ctx context.Context) error { ctx, span := tracer.Start(ctx, "createAccountTaskHandler.HandleTask") defer span.End() - l := ctxzap.Extract(ctx).With(zap.String("task_id", g.task.Id), zap.Stringer("task_type", tasks.GetType(g.task))) + l := ctxzap.Extract(ctx).With(zap.String("task_id", g.task.GetId()), zap.Stringer("task_type", tasks.GetType(g.task))) t := g.task.GetCreateAccount() if t == nil || t.GetAccountInfo() == nil { @@ -41,11 +41,11 @@ func (g *createAccountTaskHandler) HandleTask(ctx context.Context) error { } cc := g.helpers.ConnectorClient() - resp, err := cc.CreateAccount(ctx, &v2.CreateAccountRequest{ + resp, err := cc.CreateAccount(ctx, v2.CreateAccountRequest_builder{ AccountInfo: t.GetAccountInfo(), CredentialOptions: t.GetCredentialOptions(), EncryptionConfigs: t.GetEncryptionConfigs(), - }) + }.Build()) if err != nil { l.Error("failed creating account", zap.Error(err)) return g.helpers.FinishTask(ctx, nil, nil, errors.Join(err, ErrTaskNonRetryable)) diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/create_resource.go b/vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/create_resource.go index 8b97f2eb..e9a06bc2 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/create_resource.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/create_resource.go @@ -29,7 +29,7 @@ func (g *createResourceTaskHandler) HandleTask(ctx context.Context) error { ctx, span := tracer.Start(ctx, "createResourceTaskHandler.HandleTask") defer span.End() - l := ctxzap.Extract(ctx).With(zap.String("task_id", g.task.Id), zap.Stringer("task_type", tasks.GetType(g.task))) + l := ctxzap.Extract(ctx).With(zap.String("task_id", g.task.GetId()), zap.Stringer("task_type", tasks.GetType(g.task))) t := g.task.GetCreateResource() if t == nil || t.GetResource() == nil { @@ -41,9 +41,9 @@ func (g *createResourceTaskHandler) HandleTask(ctx context.Context) error { } cc := g.helpers.ConnectorClient() - resp, err := cc.CreateResource(ctx, &v2.CreateResourceRequest{ + resp, err := cc.CreateResource(ctx, v2.CreateResourceRequest_builder{ Resource: t.GetResource(), - }) + }.Build()) if err != nil { l.Error("failed create resource task", zap.Error(err)) return g.helpers.FinishTask(ctx, nil, nil, errors.Join(err, ErrTaskNonRetryable)) diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/create_ticket.go b/vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/create_ticket.go index 3cd82079..0873a7c2 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/create_ticket.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/create_ticket.go @@ -37,11 +37,11 @@ func (c *createTicketTaskHandler) HandleTask(ctx context.Context) error { } cc := c.helpers.ConnectorClient() - resp, err := cc.CreateTicket(ctx, &v2.TicketsServiceCreateTicketRequest{ + resp, err := cc.CreateTicket(ctx, v2.TicketsServiceCreateTicketRequest_builder{ Request: t.GetTicketRequest(), Schema: t.GetTicketSchema(), Annotations: t.GetAnnotations(), - }) + }.Build()) if err != nil { l.Error("failed creating ticket", zap.Error(err)) return c.helpers.FinishTask(ctx, nil, t.GetAnnotations(), err) @@ -50,7 +50,7 @@ func (c *createTicketTaskHandler) HandleTask(ctx context.Context) error { respAnnos := annotations.Annotations(resp.GetAnnotations()) respAnnos.Merge(t.GetAnnotations()...) - resp.Annotations = respAnnos + resp.SetAnnotations(respAnnos) return c.helpers.FinishTask(ctx, resp, respAnnos, nil) } diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/delete_resource.go b/vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/delete_resource.go index dc2f2b8b..ef6aa747 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/delete_resource.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/delete_resource.go @@ -29,7 +29,7 @@ func (g *deleteResourceTaskHandler) HandleTask(ctx context.Context) error { ctx, span := tracer.Start(ctx, "deleteResourceTaskHandler.HandleTask") defer span.End() - l := ctxzap.Extract(ctx).With(zap.String("task_id", g.task.Id), zap.Stringer("task_type", tasks.GetType(g.task))) + l := ctxzap.Extract(ctx).With(zap.String("task_id", g.task.GetId()), zap.Stringer("task_type", tasks.GetType(g.task))) t := g.task.GetDeleteResource() if t == nil || t.GetResourceId() == nil || t.GetResourceId().GetResource() == "" || t.GetResourceId().GetResourceType() == "" { @@ -41,10 +41,10 @@ func (g *deleteResourceTaskHandler) HandleTask(ctx context.Context) error { } cc := g.helpers.ConnectorClient() - resp, err := cc.DeleteResource(ctx, &v2.DeleteResourceRequest{ + resp, err := cc.DeleteResource(ctx, v2.DeleteResourceRequest_builder{ ResourceId: t.GetResourceId(), ParentResourceId: t.GetParentResourceId(), - }) + }.Build()) if err != nil { l.Error("failed delete resource task", zap.Error(err)) return g.helpers.FinishTask(ctx, nil, nil, errors.Join(err, ErrTaskNonRetryable)) diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/full_sync.go b/vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/full_sync.go index 20b98c39..4171ecc2 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/full_sync.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/full_sync.go @@ -11,8 +11,10 @@ import ( "go.uber.org/zap" "google.golang.org/protobuf/proto" + v2 "github.com/conductorone/baton-sdk/pb/c1/connector/v2" v1 "github.com/conductorone/baton-sdk/pb/c1/connectorapi/baton/v1" "github.com/conductorone/baton-sdk/pkg/annotations" + "github.com/conductorone/baton-sdk/pkg/session" sdkSync "github.com/conductorone/baton-sdk/pkg/sync" "github.com/conductorone/baton-sdk/pkg/tasks" "github.com/conductorone/baton-sdk/pkg/types" @@ -32,7 +34,8 @@ type fullSyncTaskHandler struct { skipFullSync bool externalResourceC1ZPath string externalResourceEntitlementIdFilter string - targetedSyncResourceIDs []string + targetedSyncResources []*v2.Resource + syncResourceTypeIDs []string } func (c *fullSyncTaskHandler) sync(ctx context.Context, c1zPath string) error { @@ -55,6 +58,10 @@ func (c *fullSyncTaskHandler) sync(ctx context.Context, c1zPath string) error { syncOpts = append(syncOpts, sdkSync.WithDontExpandGrants()) } + if resources := c.task.GetSyncFull().GetTargetedSyncResources(); len(resources) > 0 { + syncOpts = append(syncOpts, sdkSync.WithTargetedSyncResources(resources)) + } + if c.task.GetSyncFull().GetSkipEntitlementsAndGrants() { // Sync only resources. This is meant to be used for a first sync so initial data gets into the UI faster. syncOpts = append(syncOpts, sdkSync.WithSkipEntitlementsAndGrants(true)) @@ -72,11 +79,20 @@ func (c *fullSyncTaskHandler) sync(ctx context.Context, c1zPath string) error { syncOpts = append(syncOpts, sdkSync.WithSkipFullSync()) } - if len(c.targetedSyncResourceIDs) > 0 { - syncOpts = append(syncOpts, sdkSync.WithTargetedSyncResourceIDs(c.targetedSyncResourceIDs)) + if len(c.targetedSyncResources) > 0 { + syncOpts = append(syncOpts, sdkSync.WithTargetedSyncResources(c.targetedSyncResources)) + } + cc := c.helpers.ConnectorClient() + + if len(c.syncResourceTypeIDs) > 0 { + syncOpts = append(syncOpts, sdkSync.WithSyncResourceTypes(c.syncResourceTypeIDs)) + } + + if setSessionStore, ok := cc.(session.SetSessionStore); ok { + syncOpts = append(syncOpts, sdkSync.WithSessionStore(setSessionStore)) } - syncer, err := sdkSync.NewSyncer(ctx, c.helpers.ConnectorClient(), syncOpts...) + syncer, err := sdkSync.NewSyncer(ctx, cc, syncOpts...) if err != nil { l.Error("failed to create syncer", zap.Error(err)) return err @@ -181,7 +197,8 @@ func newFullSyncTaskHandler( skipFullSync bool, externalResourceC1ZPath string, externalResourceEntitlementIdFilter string, - targetedSyncResourceIDs []string, + targetedSyncResources []*v2.Resource, + syncResourceTypeIDs []string, ) tasks.TaskHandler { return &fullSyncTaskHandler{ task: task, @@ -189,7 +206,8 @@ func newFullSyncTaskHandler( skipFullSync: skipFullSync, externalResourceC1ZPath: externalResourceC1ZPath, externalResourceEntitlementIdFilter: externalResourceEntitlementIdFilter, - targetedSyncResourceIDs: targetedSyncResourceIDs, + targetedSyncResources: targetedSyncResources, + syncResourceTypeIDs: syncResourceTypeIDs, } } diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/get_ticket.go b/vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/get_ticket.go index 3406f7ea..52068b4a 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/get_ticket.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/get_ticket.go @@ -38,9 +38,9 @@ func (c *getTicketTaskHandler) HandleTask(ctx context.Context) error { return c.helpers.FinishTask(ctx, nil, nil, errors.Join(errors.New("malformed get ticket task"), ErrTaskNonRetryable)) } - ticket, err := cc.GetTicket(ctx, &v2.TicketsServiceGetTicketRequest{ + ticket, err := cc.GetTicket(ctx, v2.TicketsServiceGetTicketRequest_builder{ Id: t.GetTicketId(), - }) + }.Build()) if err != nil { return c.helpers.FinishTask(ctx, nil, t.GetAnnotations(), err) } @@ -49,14 +49,14 @@ func (c *getTicketTaskHandler) HandleTask(ctx context.Context) error { return c.helpers.FinishTask(ctx, nil, t.GetAnnotations(), errors.Join(errors.New("connector returned empty ticket"), ErrTaskNonRetryable)) } - resp := &v2.TicketsServiceGetTicketResponse{ + resp := v2.TicketsServiceGetTicketResponse_builder{ Ticket: ticket.GetTicket(), - } + }.Build() respAnnos := annotations.Annotations(resp.GetAnnotations()) respAnnos.Merge(t.GetAnnotations()...) - resp.Annotations = respAnnos + resp.SetAnnotations(respAnnos) l.Debug("GetTicket response", zap.Any("resp", resp)) diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/grant.go b/vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/grant.go index 4d4c3801..c0afb76f 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/grant.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/grant.go @@ -29,7 +29,7 @@ func (g *grantTaskHandler) HandleTask(ctx context.Context) error { ctx, span := tracer.Start(ctx, "grantTaskHandler.HandleTask") defer span.End() - l := ctxzap.Extract(ctx).With(zap.String("task_id", g.task.Id), zap.Stringer("task_type", tasks.GetType(g.task))) + l := ctxzap.Extract(ctx).With(zap.String("task_id", g.task.GetId()), zap.Stringer("task_type", tasks.GetType(g.task))) if g.task.GetGrant() == nil || g.task.GetGrant().GetEntitlement() == nil || g.task.GetGrant().GetPrincipal() == nil { l.Error( @@ -44,10 +44,10 @@ func (g *grantTaskHandler) HandleTask(ctx context.Context) error { grant := g.task.GetGrant() cc := g.helpers.ConnectorClient() - resp, err := cc.Grant(ctx, &v2.GrantManagerServiceGrantRequest{ - Entitlement: grant.Entitlement, - Principal: grant.Principal, - }) + resp, err := cc.Grant(ctx, v2.GrantManagerServiceGrantRequest_builder{ + Entitlement: grant.GetEntitlement(), + Principal: grant.GetPrincipal(), + }.Build()) if err != nil { l.Error("failed while granting entitlement", zap.Error(err)) return g.helpers.FinishTask(ctx, nil, nil, errors.Join(err, ErrTaskNonRetryable)) diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/hello.go b/vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/hello.go index da5b0223..dee82c7b 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/hello.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/hello.go @@ -6,7 +6,7 @@ import ( "runtime/debug" "github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/ctxzap" - "github.com/shirou/gopsutil/v3/host" + "github.com/shirou/gopsutil/v4/host" "go.uber.org/zap" v2 "github.com/conductorone/baton-sdk/pb/c1/connector/v2" @@ -38,7 +38,11 @@ func (c *helloTaskHandler) osInfo(ctx context.Context) (*v1.BatonServiceHelloReq info.VirtualizationSystem = "none" } - return &v1.BatonServiceHelloRequest_OSInfo{ + if info.PlatformVersion == "" { + info.PlatformVersion = info.KernelVersion + } + + return v1.BatonServiceHelloRequest_OSInfo_builder{ Hostname: info.Hostname, Os: info.OS, Platform: info.Platform, @@ -47,23 +51,42 @@ func (c *helloTaskHandler) osInfo(ctx context.Context) (*v1.BatonServiceHelloReq KernelVersion: info.KernelVersion, KernelArch: info.KernelArch, VirtualizationSystem: info.VirtualizationSystem, - }, nil + }.Build(), nil } func (c *helloTaskHandler) buildInfo(ctx context.Context) *v1.BatonServiceHelloRequest_BuildInfo { l := ctxzap.Extract(ctx) + buildInfo := v1.BatonServiceHelloRequest_BuildInfo_builder{ + LangVersion: "0.0.0", + Package: "/dummy/path", + PackageVersion: "0.0.0", + }.Build() bi, ok := debug.ReadBuildInfo() if !ok { l.Error("failed to get build info") - return &v1.BatonServiceHelloRequest_BuildInfo{} + return buildInfo + } + + if bi.Main.Path == "" { + l.Warn("missing build info Main.path") + } else { + buildInfo.SetPackage(bi.Main.Path) } - return &v1.BatonServiceHelloRequest_BuildInfo{ - LangVersion: bi.GoVersion, - Package: bi.Main.Path, - PackageVersion: bi.Main.Version, + if bi.Main.Version == "" { + l.Warn("missing build info Main.version") + } else { + buildInfo.SetPackageVersion(bi.Main.Version) } + + if bi.GoVersion == "" { + l.Warn("missing build info GoVersion") + } else { + buildInfo.SetLangVersion(bi.GoVersion) + } + + return buildInfo } func (c *helloTaskHandler) HandleTask(ctx context.Context) error { @@ -91,12 +114,12 @@ func (c *helloTaskHandler) HandleTask(ctx context.Context) error { if err != nil { return err } - _, err = c.helpers.HelloClient().Hello(ctx, &v1.BatonServiceHelloRequest{ + _, err = c.helpers.HelloClient().Hello(ctx, v1.BatonServiceHelloRequest_builder{ TaskId: taskID, BuildInfo: c.buildInfo(ctx), OsInfo: osInfo, ConnectorMetadata: mdResp.GetMetadata(), - }) + }.Build()) if err != nil { l.Error("failed while sending hello", zap.Error(err)) return err diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/list_ticket_schemas.go b/vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/list_ticket_schemas.go index 95579eb1..bd48f298 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/list_ticket_schemas.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/list_ticket_schemas.go @@ -44,9 +44,9 @@ func (c *listTicketSchemasTaskHandler) HandleTask(ctx context.Context) error { var err error pageToken := "" for { - schemas, err := cc.ListTicketSchemas(ctx, &v2.TicketsServiceListTicketSchemasRequest{ + schemas, err := cc.ListTicketSchemas(ctx, v2.TicketsServiceListTicketSchemasRequest_builder{ PageToken: pageToken, - }) + }.Build()) if err != nil { return err } @@ -81,10 +81,10 @@ func (c *listTicketSchemasTaskHandler) HandleTask(ctx context.Context) error { return c.helpers.FinishTask(ctx, nil, nil, err) } - resp := &v2.TicketsServiceListTicketSchemasResponse{ + resp := v2.TicketsServiceListTicketSchemasResponse_builder{ List: ticketSchemas, NextPageToken: "", - } + }.Build() return c.helpers.FinishTask(ctx, resp, resp.GetAnnotations(), nil) } diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/manager.go b/vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/manager.go index 21866f93..7b6aec9d 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/manager.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/manager.go @@ -18,6 +18,7 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/status" + v2 "github.com/conductorone/baton-sdk/pb/c1/connector/v2" v1 "github.com/conductorone/baton-sdk/pb/c1/connectorapi/baton/v1" "github.com/conductorone/baton-sdk/pkg/tasks" "github.com/conductorone/baton-sdk/pkg/types" @@ -52,7 +53,8 @@ type c1ApiTaskManager struct { runnerShouldDebug bool externalResourceC1Z string externalResourceEntitlementIdFilter string - targetedSyncResourceIDs []string + targetedSyncResources []*v2.Resource + syncResourceTypeIDs []string } // getHeartbeatInterval returns an appropriate heartbeat interval. If the interval is 0, it will return the default heartbeat interval. @@ -94,13 +96,11 @@ func (c *c1ApiTaskManager) Next(ctx context.Context) (*v1.Task, time.Duration, e l.Debug("c1_api_task_manager.Next(): queueing initial hello task") c.started = true // Append a hello task to the queue on startup. - c.queue = append(c.queue, &v1.Task{ + c.queue = append(c.queue, v1.Task_builder{ Id: "", Status: v1.Task_STATUS_PENDING, - TaskType: &v1.Task_Hello{ - Hello: &v1.Task_HelloTask{}, - }, - }) + Hello: &v1.Task_HelloTask{}, + }.Build()) // TODO(morgabra) Get resumable tasks here and queue them. } @@ -161,16 +161,14 @@ func (c *c1ApiTaskManager) finishTask(ctx context.Context, task *v1.Task, resp p if err == nil { l.Info("c1_api_task_manager.finishTask(): finishing task successfully") - _, err = c.serviceClient.FinishTask(finishCtx, &v1.BatonServiceFinishTaskRequest{ + _, err = c.serviceClient.FinishTask(finishCtx, v1.BatonServiceFinishTaskRequest_builder{ TaskId: task.GetId(), Status: nil, - FinalState: &v1.BatonServiceFinishTaskRequest_Success_{ - Success: &v1.BatonServiceFinishTaskRequest_Success{ - Annotations: annos, - Response: marshalledResp, - }, - }, - }) + Success: v1.BatonServiceFinishTaskRequest_Success_builder{ + Annotations: annos, + Response: marshalledResp, + }.Build(), + }.Build()) if err != nil { l.Error("c1_api_task_manager.finishTask(): error while attempting to finish task successfully", zap.Error(err)) return err @@ -186,20 +184,18 @@ func (c *c1ApiTaskManager) finishTask(ctx context.Context, task *v1.Task, resp p statusErr = status.New(codes.Unknown, err.Error()) } - _, rpcErr := c.serviceClient.FinishTask(finishCtx, &v1.BatonServiceFinishTaskRequest{ + _, rpcErr := c.serviceClient.FinishTask(finishCtx, v1.BatonServiceFinishTaskRequest_builder{ TaskId: task.GetId(), Status: &pbstatus.Status{ //nolint:gosec // No risk of overflow because `Code` is a small enum. Code: int32(statusErr.Code()), Message: statusErr.Message(), }, - FinalState: &v1.BatonServiceFinishTaskRequest_Error_{ - Error: &v1.BatonServiceFinishTaskRequest_Error{ - NonRetryable: errors.Is(err, ErrTaskNonRetryable), - Annotations: annos, - }, - }, - }) + Error: v1.BatonServiceFinishTaskRequest_Error_builder{ + NonRetryable: errors.Is(err, ErrTaskNonRetryable), + Annotations: annos, + }.Build(), + }.Build()) if rpcErr != nil { l.Error("c1_api_task_manager.finishTask(): error finishing task", zap.Error(rpcErr)) return errors.Join(err, rpcErr) @@ -253,7 +249,8 @@ func (c *c1ApiTaskManager) Process(ctx context.Context, task *v1.Task, cc types. c.skipFullSync, c.externalResourceC1Z, c.externalResourceEntitlementIdFilter, - c.targetedSyncResourceIDs, + c.targetedSyncResources, + c.syncResourceTypeIDs, ) case taskTypes.HelloType: handler = newHelloTaskHandler(task, tHelpers) @@ -304,7 +301,8 @@ func (c *c1ApiTaskManager) Process(ctx context.Context, task *v1.Task, cc types. func NewC1TaskManager( ctx context.Context, clientID string, clientSecret string, tempDir string, skipFullSync bool, - externalC1Z string, externalResourceEntitlementIdFilter string, targetedSyncResourceIDs []string, + externalC1Z string, externalResourceEntitlementIdFilter string, targetedSyncResources []*v2.Resource, + syncResourceTypeIDs []string, ) (tasks.Manager, error) { serviceClient, err := newServiceClient(ctx, clientID, clientSecret) if err != nil { @@ -317,6 +315,7 @@ func NewC1TaskManager( skipFullSync: skipFullSync, externalResourceC1Z: externalC1Z, externalResourceEntitlementIdFilter: externalResourceEntitlementIdFilter, - targetedSyncResourceIDs: targetedSyncResourceIDs, + targetedSyncResources: targetedSyncResources, + syncResourceTypeIDs: syncResourceTypeIDs, }, nil } diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/revoke.go b/vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/revoke.go index efe072cd..ee57c2ba 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/revoke.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/revoke.go @@ -31,7 +31,7 @@ func (r *revokeTaskHandler) HandleTask(ctx context.Context) error { ctx, span := tracer.Start(ctx, "revokeTaskHandler.HandleTask") defer span.End() - l := ctxzap.Extract(ctx).With(zap.String("task_id", r.task.Id), zap.Stringer("task_type", tasks.GetType(r.task))) + l := ctxzap.Extract(ctx).With(zap.String("task_id", r.task.GetId()), zap.Stringer("task_type", tasks.GetType(r.task))) if r.task.GetRevoke() == nil || r.task.GetRevoke().GetGrant() == nil { l.Error("revoke task was nil or missing grant", zap.Any("revoke", r.task.GetRevoke()), zap.Any("grant", r.task.GetRevoke().GetGrant())) @@ -39,9 +39,9 @@ func (r *revokeTaskHandler) HandleTask(ctx context.Context) error { } cc := r.helpers.ConnectorClient() - resp, err := cc.Revoke(ctx, &v2.GrantManagerServiceRevokeRequest{ + resp, err := cc.Revoke(ctx, v2.GrantManagerServiceRevokeRequest_builder{ Grant: r.task.GetRevoke().GetGrant(), - }) + }.Build()) if err != nil { l.Error("failed while granting entitlement", zap.Error(err)) return r.helpers.FinishTask(ctx, nil, nil, errors.Join(err, ErrTaskNonRetryable)) diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/rotate_credentials.go b/vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/rotate_credentials.go index 36d5f803..45f28d9c 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/rotate_credentials.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/rotate_credentials.go @@ -29,7 +29,7 @@ func (g *rotateCredentialsTaskHandler) HandleTask(ctx context.Context) error { ctx, span := tracer.Start(ctx, "rotateCredentialsTaskHandler.HandleTask") defer span.End() - l := ctxzap.Extract(ctx).With(zap.String("task_id", g.task.Id), zap.Stringer("task_type", tasks.GetType(g.task))) + l := ctxzap.Extract(ctx).With(zap.String("task_id", g.task.GetId()), zap.Stringer("task_type", tasks.GetType(g.task))) t := g.task.GetRotateCredentials() if t == nil || t.GetResourceId() == nil { @@ -41,11 +41,11 @@ func (g *rotateCredentialsTaskHandler) HandleTask(ctx context.Context) error { } cc := g.helpers.ConnectorClient() - resp, err := cc.RotateCredential(ctx, &v2.RotateCredentialRequest{ + resp, err := cc.RotateCredential(ctx, v2.RotateCredentialRequest_builder{ ResourceId: t.GetResourceId(), CredentialOptions: t.GetCredentialOptions(), EncryptionConfigs: t.GetEncryptionConfigs(), - }) + }.Build()) if err != nil { l.Error("failed rotating credentials", zap.Error(err)) return g.helpers.FinishTask(ctx, nil, nil, errors.Join(err, ErrTaskNonRetryable)) diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/service_client.go b/vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/service_client.go index 29ab8ccd..f7ef2c82 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/service_client.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/service_client.go @@ -92,7 +92,7 @@ func (c *c1ServiceClient) Hello(ctx context.Context, in *v1.BatonServiceHelloReq } defer done() - in.HostId = c.getHostID() + in.SetHostId(c.getHostID()) return client.Hello(ctx, in) } @@ -107,7 +107,7 @@ func (c *c1ServiceClient) GetTask(ctx context.Context, in *v1.BatonServiceGetTas } defer done() - in.HostId = c.getHostID() + in.SetHostId(c.getHostID()) return client.GetTask(ctx, in) } @@ -122,7 +122,7 @@ func (c *c1ServiceClient) Heartbeat(ctx context.Context, in *v1.BatonServiceHear } defer done() - in.HostId = c.getHostID() + in.SetHostId(c.getHostID()) return client.Heartbeat(ctx, in) } @@ -137,7 +137,7 @@ func (c *c1ServiceClient) FinishTask(ctx context.Context, in *v1.BatonServiceFin } defer done() - in.HostId = c.getHostID() + in.SetHostId(c.getHostID()) return client.FinishTask(ctx, in) } @@ -206,14 +206,12 @@ func (c *c1ServiceClient) upload(ctx context.Context, task *v1.Task, r io.ReadSe return err } - err = uc.Send(&v1.BatonServiceUploadAssetRequest{ - Msg: &v1.BatonServiceUploadAssetRequest_Metadata{ - Metadata: &v1.BatonServiceUploadAssetRequest_UploadMetadata{ - HostId: c.getHostID(), - TaskId: task.Id, - }, - }, - }) + err = uc.Send(v1.BatonServiceUploadAssetRequest_builder{ + Metadata: v1.BatonServiceUploadAssetRequest_UploadMetadata_builder{ + HostId: c.getHostID(), + TaskId: task.GetId(), + }.Build(), + }.Build()) if err != nil { l.Error("failed to send upload metadata", zap.Error(err)) return err @@ -239,26 +237,22 @@ func (c *c1ServiceClient) upload(ctx context.Context, task *v1.Task, r io.ReadSe return err } - err = uc.Send(&v1.BatonServiceUploadAssetRequest{ - Msg: &v1.BatonServiceUploadAssetRequest_Data{ - Data: &v1.BatonServiceUploadAssetRequest_UploadData{ - Data: chunk, - }, - }, - }) + err = uc.Send(v1.BatonServiceUploadAssetRequest_builder{ + Data: v1.BatonServiceUploadAssetRequest_UploadData_builder{ + Data: chunk, + }.Build(), + }.Build()) if err != nil { l.Error("failed to send upload chunk", zap.Error(err)) return err } } - err = uc.Send(&v1.BatonServiceUploadAssetRequest{ - Msg: &v1.BatonServiceUploadAssetRequest_Eof{ - Eof: &v1.BatonServiceUploadAssetRequest_UploadEOF{ - Sha256Checksum: shaChecksum, - }, - }, - }) + err = uc.Send(v1.BatonServiceUploadAssetRequest_builder{ + Eof: v1.BatonServiceUploadAssetRequest_UploadEOF_builder{ + Sha256Checksum: shaChecksum, + }.Build(), + }.Build()) if err != nil { l.Error("failed to send upload metadata", zap.Error(err)) return err @@ -270,7 +264,7 @@ func (c *c1ServiceClient) upload(ctx context.Context, task *v1.Task, r io.ReadSe return err } - l.Info("uploaded asset", zap.String("task_id", task.Id), zap.Int64("size", rLen)) + l.Info("uploaded asset", zap.String("task_id", task.GetId()), zap.Int64("size", rLen)) return nil } diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/task_helpers.go b/vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/task_helpers.go index 7e8342d8..8a8a8dd4 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/task_helpers.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/task_helpers.go @@ -71,17 +71,17 @@ func (t *taskHelpers) HeartbeatTask(ctx context.Context, annos annotations.Annot rCtx, rCancel := context.WithCancelCause(ctx) l.Debug("heartbeat: sending initial heartbeat") - resp, err := t.serviceClient.Heartbeat(ctx, &v1.BatonServiceHeartbeatRequest{ + resp, err := t.serviceClient.Heartbeat(ctx, v1.BatonServiceHeartbeatRequest_builder{ TaskId: t.task.GetId(), Annotations: annos, - }) + }.Build()) if err != nil { err = errors.Join(ErrTaskHeartbeatFailed, err) l.Error("heartbeat: failed sending initial heartbeat", zap.Error(err)) rCancel(err) return nil, err } - if resp.Cancelled { + if resp.GetCancelled() { err = ErrTaskCancelled l.Debug("heartbeat: task was cancelled by server") rCancel(err) @@ -111,10 +111,10 @@ func (t *taskHelpers) HeartbeatTask(ctx context.Context, annos annotations.Annot return case <-time.After(heartbeatInterval): - resp, err := t.serviceClient.Heartbeat(ctx, &v1.BatonServiceHeartbeatRequest{ + resp, err := t.serviceClient.Heartbeat(ctx, v1.BatonServiceHeartbeatRequest_builder{ TaskId: t.task.GetId(), Annotations: annos, - }) + }.Build()) if err != nil { // If our parent context gets cancelled we can just leave. if ctxErr := ctx.Err(); ctxErr != nil { @@ -136,7 +136,7 @@ func (t *taskHelpers) HeartbeatTask(ctx context.Context, annos annotations.Annot heartbeatInterval = getHeartbeatInterval(resp.GetNextHeartbeat().AsDuration()) l.Debug("heartbeat: success", zap.Duration("next_heartbeat", heartbeatInterval)) - if resp.Cancelled { + if resp.GetCancelled() { l.Debug("heartbeat: task was cancelled by server") rCancel(ErrTaskCancelled) return diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/tasks/local/accounter.go b/vendor/github.com/conductorone/baton-sdk/pkg/tasks/local/accounter.go index 33c03735..7d507117 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/tasks/local/accounter.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/tasks/local/accounter.go @@ -34,9 +34,9 @@ func (m *localAccountManager) ShouldDebug() bool { func (m *localAccountManager) Next(ctx context.Context) (*v1.Task, time.Duration, error) { var task *v1.Task m.o.Do(func() { - task = &v1.Task{ - TaskType: &v1.Task_CreateAccount{}, - } + task = v1.Task_builder{ + CreateAccount: &v1.Task_CreateAccountTask{}, + }.Build() }) return task, 0, nil } diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/tasks/local/action_invoker.go b/vendor/github.com/conductorone/baton-sdk/pkg/tasks/local/action_invoker.go index 236842d1..eb38605a 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/tasks/local/action_invoker.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/tasks/local/action_invoker.go @@ -21,8 +21,9 @@ type localActionInvoker struct { dbPath string o sync.Once - action string - args *structpb.Struct + action string + resourceTypeID string // Optional: if set, invokes a resource-scoped action + args *structpb.Struct } func (m *localActionInvoker) GetTempDir() string { @@ -36,14 +37,13 @@ func (m *localActionInvoker) ShouldDebug() bool { func (m *localActionInvoker) Next(ctx context.Context) (*v1.Task, time.Duration, error) { var task *v1.Task m.o.Do(func() { - task = &v1.Task{ - TaskType: &v1.Task_ActionInvoke{ - ActionInvoke: &v1.Task_ActionInvokeTask{ - Name: m.action, - Args: m.args, - }, - }, - } + task = v1.Task_builder{ + ActionInvoke: v1.Task_ActionInvokeTask_builder{ + Name: m.action, + Args: m.args, + ResourceTypeId: m.resourceTypeID, + }.Build(), + }.Build() }) return task, 0, nil } @@ -54,29 +54,63 @@ func (m *localActionInvoker) Process(ctx context.Context, task *v1.Task, cc type defer span.End() t := task.GetActionInvoke() - resp, err := cc.InvokeAction(ctx, &v2.InvokeActionRequest{ + reqBuilder := v2.InvokeActionRequest_builder{ Name: t.GetName(), Args: t.GetArgs(), Annotations: t.GetAnnotations(), - }) + } + if resourceTypeID := t.GetResourceTypeId(); resourceTypeID != "" { + reqBuilder.ResourceTypeId = resourceTypeID + } + resp, err := cc.InvokeAction(ctx, reqBuilder.Build()) if err != nil { return err } - l.Info("ActionInvoke response", zap.Any("resp", resp)) + status := resp.GetStatus() + finalResp := resp.GetResponse() + l.Info("ActionInvoke response", + zap.String("action_id", resp.GetId()), + zap.String("name", resp.GetName()), + zap.String("status", resp.GetStatus().String()), + zap.Any("response", resp.GetResponse()), + ) + + ticker := time.NewTicker(1 * time.Second) + defer ticker.Stop() + + for status == v2.BatonActionStatus_BATON_ACTION_STATUS_PENDING || status == v2.BatonActionStatus_BATON_ACTION_STATUS_RUNNING { + select { + case <-ctx.Done(): + return ctx.Err() + case <-ticker.C: + r, err := cc.GetActionStatus(ctx, &v2.GetActionStatusRequest{ + Id: resp.GetId(), + }) + if err != nil { + return fmt.Errorf("failed to poll action status: %w", err) + } + status = r.GetStatus() + finalResp = r.GetResponse() + } + } + + l.Info("ActionInvoke response", zap.Any("resp", finalResp)) - if resp.GetStatus() == v2.BatonActionStatus_BATON_ACTION_STATUS_FAILED { - return fmt.Errorf("action invoke failed: %v", resp.GetResponse()) + if status == v2.BatonActionStatus_BATON_ACTION_STATUS_FAILED { + return fmt.Errorf("action invoke failed: %v", finalResp) } return nil } // NewActionInvoker returns a task manager that queues an action invoke task. -func NewActionInvoker(ctx context.Context, dbPath string, action string, args *structpb.Struct) tasks.Manager { +// If resourceTypeID is provided, it invokes a resource-scoped action. +func NewActionInvoker(ctx context.Context, dbPath string, action string, resourceTypeID string, args *structpb.Struct) tasks.Manager { return &localActionInvoker{ - dbPath: dbPath, - action: action, - args: args, + dbPath: dbPath, + action: action, + resourceTypeID: resourceTypeID, + args: args, } } diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/tasks/local/action_schema_list.go b/vendor/github.com/conductorone/baton-sdk/pkg/tasks/local/action_schema_list.go new file mode 100644 index 00000000..5e4a261d --- /dev/null +++ b/vendor/github.com/conductorone/baton-sdk/pkg/tasks/local/action_schema_list.go @@ -0,0 +1,77 @@ +package local + +import ( + "context" + "sync" + "time" + + "github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/ctxzap" + "go.uber.org/zap" + + v2 "github.com/conductorone/baton-sdk/pb/c1/connector/v2" + v1 "github.com/conductorone/baton-sdk/pb/c1/connectorapi/baton/v1" + "github.com/conductorone/baton-sdk/pkg/tasks" + "github.com/conductorone/baton-sdk/pkg/types" +) + +type localListActionSchemas struct { + o sync.Once + resourceTypeID string // Optional: filter by resource type +} + +func (m *localListActionSchemas) GetTempDir() string { + return "" +} + +func (m *localListActionSchemas) ShouldDebug() bool { + return false +} + +func (m *localListActionSchemas) Next(ctx context.Context) (*v1.Task, time.Duration, error) { + var task *v1.Task + m.o.Do(func() { + task = v1.Task_builder{ + ActionListSchemas: v1.Task_ActionListSchemasTask_builder{ + ResourceTypeId: m.resourceTypeID, + }.Build(), + }.Build() + }) + return task, 0, nil +} + +func (m *localListActionSchemas) Process(ctx context.Context, task *v1.Task, cc types.ConnectorClient) error { + l := ctxzap.Extract(ctx) + + reqBuilder := v2.ListActionSchemasRequest_builder{} + if m.resourceTypeID != "" { + reqBuilder.ResourceTypeId = m.resourceTypeID + } + + resp, err := cc.ListActionSchemas(ctx, reqBuilder.Build()) + if err != nil { + return err + } + + if m.resourceTypeID != "" { + l.Info("Action Schemas", + zap.String("resource_type_id", m.resourceTypeID), + zap.Int("count", len(resp.GetSchemas())), + zap.Any("schemas", resp.GetSchemas()), + ) + } else { + l.Info("Action Schemas", + zap.Int("count", len(resp.GetSchemas())), + zap.Any("schemas", resp.GetSchemas()), + ) + } + + return nil +} + +// NewListActionSchemas returns a task manager that queues a list action schemas task. +// If resourceTypeID is provided, it filters schemas for that specific resource type. +func NewListActionSchemas(ctx context.Context, resourceTypeID string) tasks.Manager { + return &localListActionSchemas{ + resourceTypeID: resourceTypeID, + } +} diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/tasks/local/compactor.go b/vendor/github.com/conductorone/baton-sdk/pkg/tasks/local/compactor.go index 1153134f..e13d756a 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/tasks/local/compactor.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/tasks/local/compactor.go @@ -32,9 +32,9 @@ func (m *localCompactor) ShouldDebug() bool { func (m *localCompactor) Next(ctx context.Context) (*v1.Task, time.Duration, error) { var task *v1.Task m.o.Do(func() { - task = &v1.Task{ - TaskType: &v1.Task_CompactSyncs_{}, - } + task = v1.Task_builder{ + CompactSyncs: &v1.Task_CompactSyncs{}, + }.Build() }) return task, 0, nil } diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/tasks/local/deleter.go b/vendor/github.com/conductorone/baton-sdk/pkg/tasks/local/deleter.go index d5db8cb1..400c88b2 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/tasks/local/deleter.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/tasks/local/deleter.go @@ -32,9 +32,9 @@ func (m *localResourceDeleter) ShouldDebug() bool { func (m *localResourceDeleter) Next(ctx context.Context) (*v1.Task, time.Duration, error) { var task *v1.Task m.o.Do(func() { - task = &v1.Task{ - TaskType: &v1.Task_DeleteResource{}, - } + task = v1.Task_builder{ + DeleteResource: &v1.Task_DeleteResourceTask{}, + }.Build() }) return task, 0, nil } diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/tasks/local/differ.go b/vendor/github.com/conductorone/baton-sdk/pkg/tasks/local/differ.go index 75212f8c..856f2690 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/tasks/local/differ.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/tasks/local/differ.go @@ -34,9 +34,9 @@ func (m *localDiffer) ShouldDebug() bool { func (m *localDiffer) Next(ctx context.Context) (*v1.Task, time.Duration, error) { var task *v1.Task m.o.Do(func() { - task = &v1.Task{ - TaskType: &v1.Task_CreateSyncDiff{}, - } + task = v1.Task_builder{ + CreateSyncDiff: &v1.Task_CreateSyncDiffTask{}, + }.Build() }) return task, 0, nil } diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/tasks/local/event_feed.go b/vendor/github.com/conductorone/baton-sdk/pkg/tasks/local/event_feed.go index 18e04a4b..a4dab4cf 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/tasks/local/event_feed.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/tasks/local/event_feed.go @@ -19,6 +19,7 @@ type localEventFeed struct { o sync.Once feedId string startAt time.Time + cursor string } const EventsPerPageLocally = 100 @@ -34,13 +35,11 @@ func (m *localEventFeed) ShouldDebug() bool { func (m *localEventFeed) Next(ctx context.Context) (*v1.Task, time.Duration, error) { var task *v1.Task m.o.Do(func() { - task = &v1.Task{ - TaskType: &v1.Task_EventFeed{ - EventFeed: &v1.Task_EventFeedTask{ - StartAt: timestamppb.New(m.startAt), - }, - }, - } + task = v1.Task_builder{ + EventFeed: v1.Task_EventFeedTask_builder{ + StartAt: timestamppb.New(m.startAt), + }.Build(), + }.Build() }) return task, 0, nil } @@ -49,14 +48,14 @@ func (m *localEventFeed) Process(ctx context.Context, task *v1.Task, cc types.Co ctx, span := tracer.Start(ctx, "localEventFeed.Process", trace.WithNewRoot()) defer span.End() - var pageToken string + pageToken := m.cursor for { - resp, err := cc.ListEvents(ctx, &v2.ListEventsRequest{ + resp, err := cc.ListEvents(ctx, v2.ListEventsRequest_builder{ PageSize: EventsPerPageLocally, Cursor: pageToken, StartAt: task.GetEventFeed().GetStartAt(), EventFeedId: m.feedId, - }) + }.Build()) if err != nil { return err } @@ -78,9 +77,10 @@ func (m *localEventFeed) Process(ctx context.Context, task *v1.Task, cc types.Co } // NewEventFeed returns a task manager that queues an event feed task. -func NewEventFeed(ctx context.Context, feedId string, startAt time.Time) tasks.Manager { +func NewEventFeed(ctx context.Context, feedId string, startAt time.Time, cursor string) tasks.Manager { return &localEventFeed{ feedId: feedId, startAt: startAt, + cursor: cursor, } } diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/tasks/local/granter.go b/vendor/github.com/conductorone/baton-sdk/pkg/tasks/local/granter.go index 33c091a7..a6dab321 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/tasks/local/granter.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/tasks/local/granter.go @@ -33,9 +33,9 @@ func (m *localGranter) ShouldDebug() bool { func (m *localGranter) Next(ctx context.Context) (*v1.Task, time.Duration, error) { var task *v1.Task m.o.Do(func() { - task = &v1.Task{ - TaskType: &v1.Task_Grant{}, - } + task = v1.Task_builder{ + Grant: &v1.Task_GrantTask{}, + }.Build() }) return task, 0, nil } diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/tasks/local/revoker.go b/vendor/github.com/conductorone/baton-sdk/pkg/tasks/local/revoker.go index b0ef0e9b..2c9825de 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/tasks/local/revoker.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/tasks/local/revoker.go @@ -31,9 +31,9 @@ func (m *localRevoker) ShouldDebug() bool { func (m *localRevoker) Next(ctx context.Context) (*v1.Task, time.Duration, error) { var task *v1.Task m.o.Do(func() { - task = &v1.Task{ - TaskType: &v1.Task_Revoke{}, - } + task = v1.Task_builder{ + Revoke: &v1.Task_RevokeTask{}, + }.Build() }) return task, 0, nil } diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/tasks/local/rotator.go b/vendor/github.com/conductorone/baton-sdk/pkg/tasks/local/rotator.go index 4984b6f1..ccea5684 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/tasks/local/rotator.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/tasks/local/rotator.go @@ -32,9 +32,9 @@ func (m *localCredentialRotator) ShouldDebug() bool { func (m *localCredentialRotator) Next(ctx context.Context) (*v1.Task, time.Duration, error) { var task *v1.Task m.o.Do(func() { - task = &v1.Task{ - TaskType: &v1.Task_RotateCredentials{}, - } + task = v1.Task_builder{ + RotateCredentials: &v1.Task_RotateCredentialsTask{}, + }.Build() }) return task, 0, nil } diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/tasks/local/syncer.go b/vendor/github.com/conductorone/baton-sdk/pkg/tasks/local/syncer.go index 86084448..b502f464 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/tasks/local/syncer.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/tasks/local/syncer.go @@ -8,7 +8,9 @@ import ( "go.opentelemetry.io/otel/trace" + v2 "github.com/conductorone/baton-sdk/pb/c1/connector/v2" v1 "github.com/conductorone/baton-sdk/pb/c1/connectorapi/baton/v1" + "github.com/conductorone/baton-sdk/pkg/session" sdkSync "github.com/conductorone/baton-sdk/pkg/sync" "github.com/conductorone/baton-sdk/pkg/tasks" "github.com/conductorone/baton-sdk/pkg/types" @@ -20,8 +22,10 @@ type localSyncer struct { tmpDir string externalResourceC1Z string externalResourceEntitlementIdFilter string - targetedSyncResourceIDs []string + targetedSyncResources []*v2.Resource skipEntitlementsAndGrants bool + skipGrants bool + syncResourceTypeIDs []string } type Option func(*localSyncer) @@ -44,9 +48,15 @@ func WithExternalResourceEntitlementIdFilter(entitlementId string) Option { } } -func WithTargetedSyncResourceIDs(resourceIDs []string) Option { +func WithTargetedSyncResources(resources []*v2.Resource) Option { return func(m *localSyncer) { - m.targetedSyncResourceIDs = resourceIDs + m.targetedSyncResources = resources + } +} + +func WithSyncResourceTypeIDs(resourceTypeIDs []string) Option { + return func(m *localSyncer) { + m.syncResourceTypeIDs = resourceTypeIDs } } @@ -56,6 +66,12 @@ func WithSkipEntitlementsAndGrants(skip bool) Option { } } +func WithSkipGrants(skip bool) Option { + return func(m *localSyncer) { + m.skipGrants = skip + } +} + func (m *localSyncer) GetTempDir() string { return "" } @@ -67,9 +83,9 @@ func (m *localSyncer) ShouldDebug() bool { func (m *localSyncer) Next(ctx context.Context) (*v1.Task, time.Duration, error) { var task *v1.Task m.o.Do(func() { - task = &v1.Task{ - TaskType: &v1.Task_SyncFull{}, - } + task = v1.Task_builder{ + SyncFull: &v1.Task_SyncFullTask{}, + }.Build() }) return task, 0, nil } @@ -78,13 +94,20 @@ func (m *localSyncer) Process(ctx context.Context, task *v1.Task, cc types.Conne ctx, span := tracer.Start(ctx, "localSyncer.Process", trace.WithNewRoot()) defer span.End() + var setSessionStore session.SetSessionStore + if ssetSessionStore, ok := cc.(session.SetSessionStore); ok { + setSessionStore = ssetSessionStore + } syncer, err := sdkSync.NewSyncer(ctx, cc, sdkSync.WithC1ZPath(m.dbPath), sdkSync.WithTmpDir(m.tmpDir), sdkSync.WithExternalResourceC1ZPath(m.externalResourceC1Z), sdkSync.WithExternalResourceEntitlementIdFilter(m.externalResourceEntitlementIdFilter), - sdkSync.WithTargetedSyncResourceIDs(m.targetedSyncResourceIDs), + sdkSync.WithTargetedSyncResources(m.targetedSyncResources), sdkSync.WithSkipEntitlementsAndGrants(m.skipEntitlementsAndGrants), + sdkSync.WithSkipGrants(m.skipGrants), + sdkSync.WithSessionStore(setSessionStore), + sdkSync.WithSyncResourceTypes(m.syncResourceTypeIDs), ) if err != nil { return err diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/tasks/local/ticket.go b/vendor/github.com/conductorone/baton-sdk/pkg/tasks/local/ticket.go index 385e8b47..8aed696f 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/tasks/local/ticket.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/tasks/local/ticket.go @@ -56,11 +56,9 @@ func (m *localBulkCreateTicket) ShouldDebug() bool { func (m *localBulkCreateTicket) Next(ctx context.Context) (*v1.Task, time.Duration, error) { var task *v1.Task m.o.Do(func() { - task = &v1.Task{ - TaskType: &v1.Task_BulkCreateTickets{ - BulkCreateTickets: &v1.Task_BulkCreateTicketsTask{}, - }, - } + task = v1.Task_builder{ + BulkCreateTickets: &v1.Task_BulkCreateTicketsTask{}, + }.Build() }) return task, 0, nil } @@ -78,23 +76,23 @@ func (m *localBulkCreateTicket) Process(ctx context.Context, task *v1.Task, cc t ticketReqs := make([]*v2.TicketsServiceCreateTicketRequest, 0) for _, template := range templates.Tickets { - schema, err := cc.GetTicketSchema(ctx, &v2.TicketsServiceGetTicketSchemaRequest{ + schema, err := cc.GetTicketSchema(ctx, v2.TicketsServiceGetTicketSchemaRequest_builder{ Id: template.SchemaID, - }) + }.Build()) if err != nil { return err } - ticketRequestBody := &v2.TicketRequest{ + ticketRequestBody := v2.TicketRequest_builder{ DisplayName: template.DisplayName, Description: template.Description, Labels: template.Labels, - } + }.Build() if template.StatusId != "" { - ticketRequestBody.Status = &v2.TicketStatus{ + ticketRequestBody.SetStatus(v2.TicketStatus_builder{ Id: template.StatusId, - } + }.Build()) } if template.RequestedForId != "" { @@ -103,28 +101,28 @@ func (m *localBulkCreateTicket) Process(ctx context.Context, task *v1.Task, cc t if err != nil { return err } - ticketRequestBody.RequestedFor = requestedUser + ticketRequestBody.SetRequestedFor(requestedUser) } cfs := make(map[string]*v2.TicketCustomField) for k, v := range template.CustomFields { - newCfs, err := sdkTicket.CustomFieldForSchemaField(k, schema.Schema, v) + newCfs, err := sdkTicket.CustomFieldForSchemaField(k, schema.GetSchema(), v) if err != nil { return err } cfs[k] = newCfs } - ticketRequestBody.CustomFields = cfs + ticketRequestBody.SetCustomFields(cfs) - ticketReqs = append(ticketReqs, &v2.TicketsServiceCreateTicketRequest{ + ticketReqs = append(ticketReqs, v2.TicketsServiceCreateTicketRequest_builder{ Request: ticketRequestBody, Schema: schema.GetSchema(), - }) + }.Build()) } - bulkTicketReq := &v2.TicketsServiceBulkCreateTicketsRequest{ + bulkTicketReq := v2.TicketsServiceBulkCreateTicketsRequest_builder{ TicketRequests: ticketReqs, - } + }.Build() resp, err := cc.BulkCreateTickets(ctx, bulkTicketReq) if err != nil { @@ -185,11 +183,9 @@ func (m *localCreateTicket) ShouldDebug() bool { func (m *localCreateTicket) Next(ctx context.Context) (*v1.Task, time.Duration, error) { var task *v1.Task m.o.Do(func() { - task = &v1.Task{ - TaskType: &v1.Task_CreateTicketTask_{ - CreateTicketTask: &v1.Task_CreateTicketTask{}, - }, - } + task = v1.Task_builder{ + CreateTicketTask: &v1.Task_CreateTicketTask{}, + }.Build() }) return task, 0, nil } @@ -202,23 +198,23 @@ func (m *localCreateTicket) Process(ctx context.Context, task *v1.Task, cc types return err } - schema, err := cc.GetTicketSchema(ctx, &v2.TicketsServiceGetTicketSchemaRequest{ + schema, err := cc.GetTicketSchema(ctx, v2.TicketsServiceGetTicketSchemaRequest_builder{ Id: template.SchemaID, - }) + }.Build()) if err != nil { return err } - ticketRequestBody := &v2.TicketRequest{ + ticketRequestBody := v2.TicketRequest_builder{ DisplayName: template.DisplayName, Description: template.Description, Labels: template.Labels, - } + }.Build() if template.StatusId != "" { - ticketRequestBody.Status = &v2.TicketStatus{ + ticketRequestBody.SetStatus(v2.TicketStatus_builder{ Id: template.StatusId, - } + }.Build()) } if template.RequestedForId != "" { @@ -227,22 +223,22 @@ func (m *localCreateTicket) Process(ctx context.Context, task *v1.Task, cc types if err != nil { return err } - ticketRequestBody.RequestedFor = requestedUser + ticketRequestBody.SetRequestedFor(requestedUser) } cfs := make(map[string]*v2.TicketCustomField) for k, v := range template.CustomFields { - newCfs, err := sdkTicket.CustomFieldForSchemaField(k, schema.Schema, v) + newCfs, err := sdkTicket.CustomFieldForSchemaField(k, schema.GetSchema(), v) if err != nil { return err } cfs[k] = newCfs } - ticketRequestBody.CustomFields = cfs - ticketReq := &v2.TicketsServiceCreateTicketRequest{ + ticketRequestBody.SetCustomFields(cfs) + ticketReq := v2.TicketsServiceCreateTicketRequest_builder{ Request: ticketRequestBody, Schema: schema.GetSchema(), - } + }.Build() resp, err := cc.CreateTicket(ctx, ticketReq) if err != nil { @@ -278,13 +274,11 @@ func (m *localGetTicket) ShouldDebug() bool { func (m *localGetTicket) Next(ctx context.Context) (*v1.Task, time.Duration, error) { var task *v1.Task m.o.Do(func() { - task = &v1.Task{ - TaskType: &v1.Task_GetTicket{ - GetTicket: &v1.Task_GetTicketTask{ - TicketId: m.ticketId, - }, - }, - } + task = v1.Task_builder{ + GetTicket: v1.Task_GetTicketTask_builder{ + TicketId: m.ticketId, + }.Build(), + }.Build() }) return task, 0, nil } @@ -292,9 +286,9 @@ func (m *localGetTicket) Next(ctx context.Context) (*v1.Task, time.Duration, err func (m *localGetTicket) Process(ctx context.Context, task *v1.Task, cc types.ConnectorClient) error { l := ctxzap.Extract(ctx) - resp, err := cc.GetTicket(ctx, &v2.TicketsServiceGetTicketRequest{ + resp, err := cc.GetTicket(ctx, v2.TicketsServiceGetTicketRequest_builder{ Id: m.ticketId, - }) + }.Build()) if err != nil { return err } @@ -326,9 +320,9 @@ func (m *localListTicketSchemas) ShouldDebug() bool { func (m *localListTicketSchemas) Next(ctx context.Context) (*v1.Task, time.Duration, error) { var task *v1.Task m.o.Do(func() { - task = &v1.Task{ - TaskType: &v1.Task_ListTicketSchemas{}, - } + task = v1.Task_builder{ + ListTicketSchemas: &v1.Task_ListTicketSchemasTask{}, + }.Build() }) return task, 0, nil } diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/tasks/tasks.go b/vendor/github.com/conductorone/baton-sdk/pkg/tasks/tasks.go index d90dd35b..db0e64d4 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/tasks/tasks.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/tasks/tasks.go @@ -21,105 +21,103 @@ type TaskHandler interface { } func Is(task *v1.Task, target taskTypes.TaskType) bool { - if task == nil || task.TaskType == nil { + if task == nil || !task.HasTaskType() { return false } - var ok bool + actualType := task.WhichTaskType() switch target { case taskTypes.FullSyncType: - _, ok = task.GetTaskType().(*v1.Task_SyncFull) + return actualType == v1.Task_SyncFull_case case taskTypes.GrantType: - _, ok = task.GetTaskType().(*v1.Task_Grant) + return actualType == v1.Task_Grant_case case taskTypes.RevokeType: - _, ok = task.GetTaskType().(*v1.Task_Revoke) + return actualType == v1.Task_Revoke_case case taskTypes.HelloType: - _, ok = task.GetTaskType().(*v1.Task_Hello) + return actualType == v1.Task_Hello_case case taskTypes.EventFeedType: - _, ok = task.GetTaskType().(*v1.Task_EventFeed) + return actualType == v1.Task_EventFeed_case case taskTypes.NoneType: - _, ok = task.GetTaskType().(*v1.Task_None) + return actualType == v1.Task_None_case case taskTypes.CreateAccountType: - _, ok = task.GetTaskType().(*v1.Task_CreateAccount) + return actualType == v1.Task_CreateAccount_case case taskTypes.CreateResourceType: - _, ok = task.GetTaskType().(*v1.Task_CreateResource) + return actualType == v1.Task_CreateResource_case case taskTypes.DeleteResourceType: - _, ok = task.GetTaskType().(*v1.Task_DeleteResource) + return actualType == v1.Task_DeleteResource_case case taskTypes.RotateCredentialsType: - _, ok = task.GetTaskType().(*v1.Task_RotateCredentials) + return actualType == v1.Task_RotateCredentials_case case taskTypes.CreateTicketType: - _, ok = task.GetTaskType().(*v1.Task_CreateTicketTask_) + return actualType == v1.Task_CreateTicketTask_case case taskTypes.ListTicketSchemasType: - _, ok = task.GetTaskType().(*v1.Task_ListTicketSchemas) + return actualType == v1.Task_ListTicketSchemas_case case taskTypes.GetTicketType: - _, ok = task.GetTaskType().(*v1.Task_GetTicket) + return actualType == v1.Task_GetTicket_case case taskTypes.BulkCreateTicketsType: - _, ok = task.GetTaskType().(*v1.Task_BulkCreateTickets) + return actualType == v1.Task_BulkCreateTickets_case case taskTypes.BulkGetTicketsType: - _, ok = task.GetTaskType().(*v1.Task_BulkGetTickets) + return actualType == v1.Task_BulkGetTickets_case case taskTypes.ActionListSchemasType: - _, ok = task.GetTaskType().(*v1.Task_ActionListSchemas) + return actualType == v1.Task_ActionListSchemas_case case taskTypes.ActionGetSchemaType: - _, ok = task.GetTaskType().(*v1.Task_ActionGetSchema) + return actualType == v1.Task_ActionGetSchema_case case taskTypes.ActionInvokeType: - _, ok = task.GetTaskType().(*v1.Task_ActionInvoke) + return actualType == v1.Task_ActionInvoke_case case taskTypes.ActionStatusType: - _, ok = task.GetTaskType().(*v1.Task_ActionStatus) + return actualType == v1.Task_ActionStatus_case case taskTypes.CreateSyncDiff: - _, ok = task.GetTaskType().(*v1.Task_CreateSyncDiff) + return actualType == v1.Task_CreateSyncDiff_case default: return false } - - return ok } func GetType(task *v1.Task) taskTypes.TaskType { - if task == nil || task.TaskType == nil { + if task == nil || !task.HasTaskType() { return taskTypes.UnknownType } - switch task.GetTaskType().(type) { - case *v1.Task_SyncFull: + switch task.WhichTaskType() { + case v1.Task_SyncFull_case: return taskTypes.FullSyncType - case *v1.Task_Grant: + case v1.Task_Grant_case: return taskTypes.GrantType - case *v1.Task_Revoke: + case v1.Task_Revoke_case: return taskTypes.RevokeType - case *v1.Task_Hello: + case v1.Task_Hello_case: return taskTypes.HelloType - case *v1.Task_EventFeed: + case v1.Task_EventFeed_case: return taskTypes.EventFeedType - case *v1.Task_None: + case v1.Task_None_case: return taskTypes.NoneType - case *v1.Task_CreateAccount: + case v1.Task_CreateAccount_case: return taskTypes.CreateAccountType - case *v1.Task_CreateResource: + case v1.Task_CreateResource_case: return taskTypes.CreateResourceType - case *v1.Task_DeleteResource: + case v1.Task_DeleteResource_case: return taskTypes.DeleteResourceType - case *v1.Task_RotateCredentials: + case v1.Task_RotateCredentials_case: return taskTypes.RotateCredentialsType - case *v1.Task_CreateTicketTask_: + case v1.Task_CreateTicketTask_case: return taskTypes.CreateTicketType - case *v1.Task_ListTicketSchemas: + case v1.Task_ListTicketSchemas_case: return taskTypes.ListTicketSchemasType - case *v1.Task_GetTicket: + case v1.Task_GetTicket_case: return taskTypes.GetTicketType - case *v1.Task_BulkCreateTickets: + case v1.Task_BulkCreateTickets_case: return taskTypes.BulkCreateTicketsType - case *v1.Task_BulkGetTickets: + case v1.Task_BulkGetTickets_case: return taskTypes.BulkGetTicketsType - case *v1.Task_ActionListSchemas: + case v1.Task_ActionListSchemas_case: return taskTypes.ActionListSchemasType - case *v1.Task_ActionGetSchema: + case v1.Task_ActionGetSchema_case: return taskTypes.ActionGetSchemaType - case *v1.Task_ActionInvoke: + case v1.Task_ActionInvoke_case: return taskTypes.ActionInvokeType - case *v1.Task_ActionStatus: + case v1.Task_ActionStatus_case: return taskTypes.ActionStatusType - case *v1.Task_CreateSyncDiff: + case v1.Task_CreateSyncDiff_case: return taskTypes.CreateSyncDiff default: return taskTypes.UnknownType diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/types/entitlement/entitlement.go b/vendor/github.com/conductorone/baton-sdk/pkg/types/entitlement/entitlement.go index 5be4e840..8715c825 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/types/entitlement/entitlement.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/types/entitlement/entitlement.go @@ -12,44 +12,44 @@ type EntitlementOption func(*v2.Entitlement) func WithAnnotation(msgs ...proto.Message) EntitlementOption { return func(e *v2.Entitlement) { - annos := annotations.Annotations(e.Annotations) + annos := annotations.Annotations(e.GetAnnotations()) for _, msg := range msgs { annos.Append(msg) } - e.Annotations = annos + e.SetAnnotations(annos) } } func WithGrantableTo(grantableTo ...*v2.ResourceType) EntitlementOption { return func(g *v2.Entitlement) { - g.GrantableTo = grantableTo + g.SetGrantableTo(grantableTo) } } func WithDisplayName(displayName string) EntitlementOption { return func(g *v2.Entitlement) { - g.DisplayName = displayName + g.SetDisplayName(displayName) } } func WithDescription(description string) EntitlementOption { return func(g *v2.Entitlement) { - g.Description = description + g.SetDescription(description) } } func NewEntitlementID(resource *v2.Resource, permission string) string { - return fmt.Sprintf("%s:%s:%s", resource.Id.ResourceType, resource.Id.Resource, permission) + return fmt.Sprintf("%s:%s:%s", resource.GetId().GetResourceType(), resource.GetId().GetResource(), permission) } func NewPermissionEntitlement(resource *v2.Resource, name string, entitlementOptions ...EntitlementOption) *v2.Entitlement { - entitlement := &v2.Entitlement{ + entitlement := v2.Entitlement_builder{ Id: NewEntitlementID(resource, name), DisplayName: name, Slug: name, Purpose: v2.Entitlement_PURPOSE_VALUE_PERMISSION, Resource: resource, - } + }.Build() for _, entitlementOption := range entitlementOptions { entitlementOption(entitlement) @@ -58,13 +58,13 @@ func NewPermissionEntitlement(resource *v2.Resource, name string, entitlementOpt } func NewAssignmentEntitlement(resource *v2.Resource, name string, entitlementOptions ...EntitlementOption) *v2.Entitlement { - entitlement := &v2.Entitlement{ + entitlement := v2.Entitlement_builder{ Id: NewEntitlementID(resource, name), DisplayName: name, Slug: name, Purpose: v2.Entitlement_PURPOSE_VALUE_ASSIGNMENT, Resource: resource, - } + }.Build() for _, entitlementOption := range entitlementOptions { entitlementOption(entitlement) @@ -83,13 +83,13 @@ func NewEntitlement(resource *v2.Resource, name, purposeStr string, entitlementO purpose = v2.Entitlement_PURPOSE_VALUE_UNSPECIFIED } - entitlement := &v2.Entitlement{ + entitlement := v2.Entitlement_builder{ Id: NewEntitlementID(resource, name), DisplayName: name, Slug: name, Purpose: purpose, Resource: resource, - } + }.Build() for _, entitlementOption := range entitlementOptions { entitlementOption(entitlement) diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/types/grant/grant.go b/vendor/github.com/conductorone/baton-sdk/pkg/types/grant/grant.go index c662d7bf..972c6344 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/types/grant/grant.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/types/grant/grant.go @@ -27,10 +27,10 @@ func WithGrantMetadata(metadata map[string]interface{}) GrantOption { return err } - meta := &v2.GrantMetadata{Metadata: md} - annos := annotations.Annotations(g.Annotations) + meta := v2.GrantMetadata_builder{Metadata: md}.Build() + annos := annotations.Annotations(g.GetAnnotations()) annos.Update(meta) - g.Annotations = annos + g.SetAnnotations(annos) return nil } @@ -38,18 +38,18 @@ func WithGrantMetadata(metadata map[string]interface{}) GrantOption { func WithExternalPrincipalID(externalID *v2.ExternalId) GrantOption { return func(g *v2.Grant) error { - g.Principal.ExternalId = externalID + g.GetPrincipal().SetExternalId(externalID) return nil } } func WithAnnotation(msgs ...proto.Message) GrantOption { return func(g *v2.Grant) error { - annos := annotations.Annotations(g.Annotations) + annos := annotations.Annotations(g.GetAnnotations()) for _, msg := range msgs { annos.Append(msg) } - g.Annotations = annos + g.SetAnnotations(annos) return nil } @@ -57,23 +57,23 @@ func WithAnnotation(msgs ...proto.Message) GrantOption { // NewGrant returns a new grant for the given entitlement on the resource for the provided principal resource ID. func NewGrant(resource *v2.Resource, entitlementName string, principal GrantPrincipal, grantOptions ...GrantOption) *v2.Grant { - entitlement := &v2.Entitlement{ + entitlement := v2.Entitlement_builder{ Id: eopt.NewEntitlementID(resource, entitlementName), Resource: resource, - } + }.Build() - grant := &v2.Grant{ + grant := v2.Grant_builder{ Entitlement: entitlement, - } + }.Build() var resourceID *v2.ResourceId switch p := principal.(type) { case *v2.ResourceId: resourceID = p - grant.Principal = &v2.Resource{Id: p} + grant.SetPrincipal(v2.Resource_builder{Id: p}.Build()) case *v2.Resource: - grant.Principal = p - resourceID = p.Id + grant.SetPrincipal(p) + resourceID = p.GetId() default: panic("unexpected principal type") } @@ -81,7 +81,7 @@ func NewGrant(resource *v2.Resource, entitlementName string, principal GrantPrin if resourceID == nil { panic("principal resource must have a valid resource ID") } - grant.Id = fmt.Sprintf("%s:%s:%s", entitlement.Id, resourceID.ResourceType, resourceID.Resource) + grant.SetId(fmt.Sprintf("%s:%s:%s", entitlement.GetId(), resourceID.GetResourceType(), resourceID.GetResource())) for _, grantOption := range grantOptions { err := grantOption(grant) @@ -99,7 +99,7 @@ func NewGrantID(principal GrantPrincipal, entitlement *v2.Entitlement) string { case *v2.ResourceId: resourceID = p case *v2.Resource: - resourceID = p.Id + resourceID = p.GetId() default: panic("unexpected principal type") } @@ -107,5 +107,5 @@ func NewGrantID(principal GrantPrincipal, entitlement *v2.Entitlement) string { if resourceID == nil { panic("principal resource must have a valid resource ID") } - return fmt.Sprintf("%s:%s:%s", entitlement.Id, resourceID.ResourceType, resourceID.Resource) + return fmt.Sprintf("%s:%s:%s", entitlement.GetId(), resourceID.GetResourceType(), resourceID.GetResource()) } diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/types/resource/app_trait.go b/vendor/github.com/conductorone/baton-sdk/pkg/types/resource/app_trait.go index 8d2335d6..b1d41672 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/types/resource/app_trait.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/types/resource/app_trait.go @@ -12,7 +12,7 @@ type AppTraitOption func(gt *v2.AppTrait) error func WithAppIcon(assetRef *v2.AssetRef) AppTraitOption { return func(at *v2.AppTrait) error { - at.Icon = assetRef + at.SetIcon(assetRef) return nil } @@ -20,7 +20,7 @@ func WithAppIcon(assetRef *v2.AssetRef) AppTraitOption { func WithAppLogo(assetRef *v2.AssetRef) AppTraitOption { return func(at *v2.AppTrait) error { - at.Logo = assetRef + at.SetLogo(assetRef) return nil } @@ -28,7 +28,7 @@ func WithAppLogo(assetRef *v2.AssetRef) AppTraitOption { func WithAppFlags(flags ...v2.AppTrait_AppFlag) AppTraitOption { return func(at *v2.AppTrait) error { - at.Flags = flags + at.SetFlags(flags) return nil } } @@ -40,7 +40,7 @@ func WithAppProfile(profile map[string]interface{}) AppTraitOption { return err } - at.Profile = p + at.SetProfile(p) return nil } @@ -48,7 +48,7 @@ func WithAppProfile(profile map[string]interface{}) AppTraitOption { func WithAppHelpURL(helpURL string) AppTraitOption { return func(at *v2.AppTrait) error { - at.HelpUrl = helpURL + at.SetHelpUrl(helpURL) return nil } } @@ -70,7 +70,7 @@ func NewAppTrait(opts ...AppTraitOption) (*v2.AppTrait, error) { // GetAppTrait attempts to return the AppTrait instance on a resource. func GetAppTrait(resource *v2.Resource) (*v2.AppTrait, error) { ret := &v2.AppTrait{} - annos := annotations.Annotations(resource.Annotations) + annos := annotations.Annotations(resource.GetAnnotations()) ok, err := annos.Pick(ret) if err != nil { return nil, err diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/types/resource/group_trait.go b/vendor/github.com/conductorone/baton-sdk/pkg/types/resource/group_trait.go index d37506a7..2857fe99 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/types/resource/group_trait.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/types/resource/group_trait.go @@ -17,7 +17,7 @@ func WithGroupProfile(profile map[string]interface{}) GroupTraitOption { return err } - gt.Profile = p + gt.SetProfile(p) return nil } @@ -25,7 +25,7 @@ func WithGroupProfile(profile map[string]interface{}) GroupTraitOption { func WithGroupIcon(assetRef *v2.AssetRef) GroupTraitOption { return func(gt *v2.GroupTrait) error { - gt.Icon = assetRef + gt.SetIcon(assetRef) return nil } } @@ -47,7 +47,7 @@ func NewGroupTrait(opts ...GroupTraitOption) (*v2.GroupTrait, error) { // GetGroupTrait attempts to return the GroupTrait instance on a resource. func GetGroupTrait(resource *v2.Resource) (*v2.GroupTrait, error) { ret := &v2.GroupTrait{} - annos := annotations.Annotations(resource.Annotations) + annos := annotations.Annotations(resource.GetAnnotations()) ok, err := annos.Pick(ret) if err != nil { return nil, err diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/types/resource/resource.go b/vendor/github.com/conductorone/baton-sdk/pkg/types/resource/resource.go index 585d9bea..33d3f132 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/types/resource/resource.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/types/resource/resource.go @@ -7,6 +7,8 @@ import ( v2 "github.com/conductorone/baton-sdk/pb/c1/connector/v2" "github.com/conductorone/baton-sdk/pkg/annotations" + "github.com/conductorone/baton-sdk/pkg/pagination" + "github.com/conductorone/baton-sdk/pkg/types/sessions" "google.golang.org/protobuf/proto" ) @@ -14,14 +16,14 @@ type ResourceOption func(*v2.Resource) error func WithAnnotation(msgs ...proto.Message) ResourceOption { return func(r *v2.Resource) error { - annos := annotations.Annotations(r.Annotations) + annos := annotations.Annotations(r.GetAnnotations()) for _, msg := range msgs { if msg == nil { continue } annos.Append(msg) } - r.Annotations = annos + r.SetAnnotations(annos) return nil } @@ -29,14 +31,14 @@ func WithAnnotation(msgs ...proto.Message) ResourceOption { func WithExternalID(externalID *v2.ExternalId) ResourceOption { return func(r *v2.Resource) error { - r.ExternalId = externalID + r.SetExternalId(externalID) return nil } } func WithParentResourceID(parentResourceID *v2.ResourceId) ResourceOption { return func(r *v2.Resource) error { - r.ParentResourceId = parentResourceID + r.SetParentResourceId(parentResourceID) return nil } @@ -44,7 +46,7 @@ func WithParentResourceID(parentResourceID *v2.ResourceId) ResourceOption { func WithDescription(description string) ResourceOption { return func(r *v2.Resource) error { - r.Description = description + r.SetDescription(description) return nil } @@ -55,7 +57,7 @@ func WithUserTrait(opts ...UserTraitOption) ResourceOption { var err error ut := &v2.UserTrait{} - annos := annotations.Annotations(r.Annotations) + annos := annotations.Annotations(r.GetAnnotations()) picked, err := annos.Pick(ut) if err != nil { @@ -78,7 +80,7 @@ func WithUserTrait(opts ...UserTraitOption) ResourceOption { } annos.Update(ut) - r.Annotations = annos + r.SetAnnotations(annos) return nil } } @@ -87,7 +89,7 @@ func WithGroupTrait(opts ...GroupTraitOption) ResourceOption { return func(r *v2.Resource) error { ut := &v2.GroupTrait{} - annos := annotations.Annotations(r.Annotations) + annos := annotations.Annotations(r.GetAnnotations()) _, err := annos.Pick(ut) if err != nil { return err @@ -101,7 +103,7 @@ func WithGroupTrait(opts ...GroupTraitOption) ResourceOption { } annos.Update(ut) - r.Annotations = annos + r.SetAnnotations(annos) return nil } } @@ -110,7 +112,7 @@ func WithRoleTrait(opts ...RoleTraitOption) ResourceOption { return func(r *v2.Resource) error { rt := &v2.RoleTrait{} - annos := annotations.Annotations(r.Annotations) + annos := annotations.Annotations(r.GetAnnotations()) _, err := annos.Pick(rt) if err != nil { return err @@ -124,7 +126,7 @@ func WithRoleTrait(opts ...RoleTraitOption) ResourceOption { } annos.Update(rt) - r.Annotations = annos + r.SetAnnotations(annos) return nil } @@ -134,7 +136,7 @@ func WithAppTrait(opts ...AppTraitOption) ResourceOption { return func(r *v2.Resource) error { at := &v2.AppTrait{} - annos := annotations.Annotations(r.Annotations) + annos := annotations.Annotations(r.GetAnnotations()) _, err := annos.Pick(at) if err != nil { return err @@ -148,7 +150,7 @@ func WithAppTrait(opts ...AppTraitOption) ResourceOption { } annos.Update(at) - r.Annotations = annos + r.SetAnnotations(annos) return nil } @@ -158,7 +160,7 @@ func WithSecretTrait(opts ...SecretTraitOption) ResourceOption { return func(r *v2.Resource) error { rt := &v2.SecretTrait{} - annos := annotations.Annotations(r.Annotations) + annos := annotations.Annotations(r.GetAnnotations()) _, err := annos.Pick(rt) if err != nil { return err @@ -172,7 +174,7 @@ func WithSecretTrait(opts ...SecretTraitOption) ResourceOption { } annos.Update(rt) - r.Annotations = annos + r.SetAnnotations(annos) return nil } @@ -203,12 +205,12 @@ func NewResourceType(name string, requiredTraits []v2.ResourceType_Trait, msgs . annos.Append(msg) } - return &v2.ResourceType{ + return v2.ResourceType_builder{ Id: id, DisplayName: name, Traits: requiredTraits, Annotations: annos, - } + }.Build() } // NewResourceID returns a new resource ID given a resource type parent ID, and arbitrary object ID. @@ -218,10 +220,10 @@ func NewResourceID(resourceType *v2.ResourceType, objectID interface{}) (*v2.Res return nil, err } - return &v2.ResourceId{ - ResourceType: resourceType.Id, + return v2.ResourceId_builder{ + ResourceType: resourceType.GetId(), Resource: id, - }, nil + }.Build(), nil } // NewResource returns a new resource instance with no traits. @@ -231,10 +233,10 @@ func NewResource(name string, resourceType *v2.ResourceType, objectID interface{ return nil, err } - resource := &v2.Resource{ + resource := v2.Resource_builder{ Id: rID, DisplayName: name, - } + }.Build() for _, resourceOption := range resourceOptions { err = resourceOption(resource) @@ -337,3 +339,14 @@ func NewSecretResource( return ret, nil } + +type SyncOpAttrs struct { + Session sessions.SessionStore + SyncID string + PageToken pagination.Token +} + +type SyncOpResults struct { + NextPageToken string + Annotations annotations.Annotations +} diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/types/resource/role_trait.go b/vendor/github.com/conductorone/baton-sdk/pkg/types/resource/role_trait.go index 4b80da27..bc534133 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/types/resource/role_trait.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/types/resource/role_trait.go @@ -17,7 +17,7 @@ func WithRoleProfile(profile map[string]interface{}) RoleTraitOption { return err } - rt.Profile = p + rt.SetProfile(p) return nil } @@ -40,7 +40,7 @@ func NewRoleTrait(opts ...RoleTraitOption) (*v2.RoleTrait, error) { // GetRoleTrait attempts to return the RoleTrait instance on a resource. func GetRoleTrait(resource *v2.Resource) (*v2.RoleTrait, error) { ret := &v2.RoleTrait{} - annos := annotations.Annotations(resource.Annotations) + annos := annotations.Annotations(resource.GetAnnotations()) ok, err := annos.Pick(ret) if err != nil { return nil, err diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/types/resource/secret_trait.go b/vendor/github.com/conductorone/baton-sdk/pkg/types/resource/secret_trait.go index 520f677c..babb6d8a 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/types/resource/secret_trait.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/types/resource/secret_trait.go @@ -12,35 +12,35 @@ type SecretTraitOption func(t *v2.SecretTrait) error func WithSecretCreatedAt(createdAt time.Time) SecretTraitOption { return func(t *v2.SecretTrait) error { - t.CreatedAt = timestamppb.New(createdAt) + t.SetCreatedAt(timestamppb.New(createdAt)) return nil } } func WithSecretLastUsedAt(lastUsed time.Time) SecretTraitOption { return func(t *v2.SecretTrait) error { - t.LastUsedAt = timestamppb.New(lastUsed) + t.SetLastUsedAt(timestamppb.New(lastUsed)) return nil } } func WithSecretExpiresAt(expiresAt time.Time) SecretTraitOption { return func(t *v2.SecretTrait) error { - t.ExpiresAt = timestamppb.New(expiresAt) + t.SetExpiresAt(timestamppb.New(expiresAt)) return nil } } func WithSecretCreatedByID(createdById *v2.ResourceId) SecretTraitOption { return func(t *v2.SecretTrait) error { - t.CreatedById = createdById + t.SetCreatedById(createdById) return nil } } func WithSecretIdentityID(identityId *v2.ResourceId) SecretTraitOption { return func(t *v2.SecretTrait) error { - t.IdentityId = identityId + t.SetIdentityId(identityId) return nil } } diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/types/resource/security_insight_trait.go b/vendor/github.com/conductorone/baton-sdk/pkg/types/resource/security_insight_trait.go new file mode 100644 index 00000000..9b8218d3 --- /dev/null +++ b/vendor/github.com/conductorone/baton-sdk/pkg/types/resource/security_insight_trait.go @@ -0,0 +1,287 @@ +package resource + +import ( + "fmt" + "time" + + "google.golang.org/protobuf/types/known/timestamppb" + + v2 "github.com/conductorone/baton-sdk/pb/c1/connector/v2" + "github.com/conductorone/baton-sdk/pkg/annotations" +) + +// SecurityInsightTraitOption is a functional option for configuring a SecurityInsightTrait. +type SecurityInsightTraitOption func(*v2.SecurityInsightTrait) error + +// WithInsightType sets the insight type. This is typically set via NewSecurityInsightTrait, +// but can be used to override or update the type on an existing trait. +func WithInsightType(insightType string) SecurityInsightTraitOption { + return func(t *v2.SecurityInsightTrait) error { + if insightType == "" { + return fmt.Errorf("insight type cannot be empty") + } + t.SetInsightType(insightType) + return nil + } +} + +// WithInsightValue sets the value of the security insight. +func WithInsightValue(value string) SecurityInsightTraitOption { + return func(t *v2.SecurityInsightTrait) error { + t.SetValue(value) + return nil + } +} + +// WithInsightObservedAt sets the observation timestamp for the insight. +func WithInsightObservedAt(observedAt time.Time) SecurityInsightTraitOption { + return func(t *v2.SecurityInsightTrait) error { + t.SetObservedAt(timestamppb.New(observedAt)) + return nil + } +} + +// WithInsightUserTarget sets the user target (by email) for the insight. +// Use this when the insight should be resolved to a C1 User by Uplift. +func WithInsightUserTarget(email string) SecurityInsightTraitOption { + return func(t *v2.SecurityInsightTrait) error { + t.SetUser(v2.SecurityInsightTrait_UserTarget_builder{ + Email: email, + }.Build()) + return nil + } +} + +// WithInsightResourceTarget sets a direct resource reference for the insight. +// Use this when the connector knows the actual resource (synced by this connector). +func WithInsightResourceTarget(resourceId *v2.ResourceId) SecurityInsightTraitOption { + return func(t *v2.SecurityInsightTrait) error { + t.SetResourceId(resourceId) + return nil + } +} + +// WithInsightExternalResourceTarget sets the external resource target for the insight. +// Use this when the connector only has an external ID (e.g., ARN) and needs Uplift to resolve it. +func WithInsightExternalResourceTarget(externalId string, appHint string) SecurityInsightTraitOption { + return func(t *v2.SecurityInsightTrait) error { + t.SetExternalResource(v2.SecurityInsightTrait_ExternalResourceTarget_builder{ + ExternalId: externalId, + AppHint: appHint, + }.Build()) + return nil + } +} + +// NewSecurityInsightTrait creates a new SecurityInsightTrait with the given insight type and options. +func NewSecurityInsightTrait(insightType string, opts ...SecurityInsightTraitOption) (*v2.SecurityInsightTrait, error) { + if insightType == "" { + return nil, fmt.Errorf("insight type cannot be empty") + } + + trait := v2.SecurityInsightTrait_builder{ + InsightType: insightType, + ObservedAt: timestamppb.Now(), + }.Build() + + for _, opt := range opts { + if err := opt(trait); err != nil { + return nil, err + } + } + + return trait, nil +} + +// GetSecurityInsightTrait attempts to return the SecurityInsightTrait from a resource's annotations. +func GetSecurityInsightTrait(resource *v2.Resource) (*v2.SecurityInsightTrait, error) { + ret := &v2.SecurityInsightTrait{} + annos := annotations.Annotations(resource.GetAnnotations()) + ok, err := annos.Pick(ret) + if err != nil { + return nil, err + } + if !ok { + return nil, fmt.Errorf("security insight trait was not found on resource") + } + + return ret, nil +} + +// WithSecurityInsightTrait adds or updates a SecurityInsightTrait annotation on a resource. +// The insightType parameter is required to ensure the trait is always valid. +// If the resource already has a SecurityInsightTrait, it will be updated with the provided options. +// If not, a new trait will be created with the given insightType. +func WithSecurityInsightTrait(insightType string, opts ...SecurityInsightTraitOption) ResourceOption { + return func(r *v2.Resource) error { + t := &v2.SecurityInsightTrait{} + annos := annotations.Annotations(r.GetAnnotations()) + existing, err := annos.Pick(t) + if err != nil { + return err + } + + if !existing { + // Creating a new trait - insightType is required + if insightType == "" { + return fmt.Errorf("insight type is required when creating a new security insight trait") + } + t.SetInsightType(insightType) + } else if insightType != "" { + // Updating existing trait with a new type + t.SetInsightType(insightType) + } + // If existing and insightType is empty, keep the existing type + + for _, o := range opts { + if err := o(t); err != nil { + return err + } + } + + annos.Update(t) + r.SetAnnotations(annos) + + return nil + } +} + +// NewUserSecurityInsightResource creates a security insight resource targeting a user by email. +// Use this when the insight should be resolved to a C1 User by Uplift. +func NewUserSecurityInsightResource( + name string, + resourceType *v2.ResourceType, + objectID interface{}, + insightType string, + value string, + userEmail string, + traitOpts []SecurityInsightTraitOption, + opts ...ResourceOption, +) (*v2.Resource, error) { + allTraitOpts := append([]SecurityInsightTraitOption{ + WithInsightValue(value), + WithInsightUserTarget(userEmail), + }, traitOpts...) + + trait, err := NewSecurityInsightTrait(insightType, allTraitOpts...) + if err != nil { + return nil, err + } + + opts = append(opts, WithAnnotation(trait)) + + return NewResource(name, resourceType, objectID, opts...) +} + +// NewResourceSecurityInsightResource creates a security insight resource with a direct resource reference. +// Use this when the connector knows the actual resource (synced by this connector). +func NewResourceSecurityInsightResource( + name string, + resourceType *v2.ResourceType, + objectID interface{}, + insightType string, + value string, + targetResourceId *v2.ResourceId, + traitOpts []SecurityInsightTraitOption, + opts ...ResourceOption, +) (*v2.Resource, error) { + allTraitOpts := append([]SecurityInsightTraitOption{ + WithInsightValue(value), + WithInsightResourceTarget(targetResourceId), + }, traitOpts...) + + trait, err := NewSecurityInsightTrait(insightType, allTraitOpts...) + if err != nil { + return nil, err + } + + opts = append(opts, WithAnnotation(trait)) + + return NewResource(name, resourceType, objectID, opts...) +} + +// NewExternalResourceSecurityInsightResource creates a security insight resource targeting an external resource. +// Use this when the connector only has an external ID (e.g., ARN) and needs Uplift to resolve it. +func NewExternalResourceSecurityInsightResource( + name string, + resourceType *v2.ResourceType, + objectID interface{}, + insightType string, + value string, + targetExternalId string, + targetAppHint string, + traitOpts []SecurityInsightTraitOption, + opts ...ResourceOption, +) (*v2.Resource, error) { + allTraitOpts := append([]SecurityInsightTraitOption{ + WithInsightValue(value), + WithInsightExternalResourceTarget(targetExternalId, targetAppHint), + }, traitOpts...) + + trait, err := NewSecurityInsightTrait(insightType, allTraitOpts...) + if err != nil { + return nil, err + } + + opts = append(opts, WithAnnotation(trait)) + + return NewResource(name, resourceType, objectID, opts...) +} + +// IsSecurityInsightResource checks if a resource type has the TRAIT_SECURITY_INSIGHT trait. +func IsSecurityInsightResource(resourceType *v2.ResourceType) bool { + for _, trait := range resourceType.GetTraits() { + if trait == v2.ResourceType_TRAIT_SECURITY_INSIGHT { + return true + } + } + return false +} + +// --- Target type checkers --- + +// IsUserTarget returns true if the insight targets a user. +func IsUserTarget(trait *v2.SecurityInsightTrait) bool { + return trait.GetUser() != nil +} + +// IsResourceTarget returns true if the insight has a direct resource reference. +func IsResourceTarget(trait *v2.SecurityInsightTrait) bool { + return trait.GetResourceId() != nil +} + +// IsExternalResourceTarget returns true if the insight targets an external resource. +func IsExternalResourceTarget(trait *v2.SecurityInsightTrait) bool { + return trait.GetExternalResource() != nil +} + +// --- Target data extractors --- + +// GetUserTargetEmail returns the user email from a SecurityInsightTrait, or empty string if not a user target. +func GetUserTargetEmail(trait *v2.SecurityInsightTrait) string { + if user := trait.GetUser(); user != nil { + return user.GetEmail() + } + return "" +} + +// GetResourceTarget returns the ResourceId from a SecurityInsightTrait, or nil if not a resource target. +func GetResourceTarget(trait *v2.SecurityInsightTrait) *v2.ResourceId { + return trait.GetResourceId() +} + +// GetExternalResourceTargetId returns the external ID from a SecurityInsightTrait, or empty string if not an external resource target. +func GetExternalResourceTargetId(trait *v2.SecurityInsightTrait) string { + if ext := trait.GetExternalResource(); ext != nil { + return ext.GetExternalId() + } + return "" +} + +// GetExternalResourceTargetAppHint returns the app hint from a SecurityInsightTrait, or empty string if not an external resource target. +func GetExternalResourceTargetAppHint(trait *v2.SecurityInsightTrait) string { + if ext := trait.GetExternalResource(); ext != nil { + return ext.GetAppHint() + } + return "" +} diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/types/resource/user_trait.go b/vendor/github.com/conductorone/baton-sdk/pkg/types/resource/user_trait.go index 41cc373d..ba16457b 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/types/resource/user_trait.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/types/resource/user_trait.go @@ -15,7 +15,7 @@ type UserTraitOption func(ut *v2.UserTrait) error func WithStatus(status v2.UserTrait_Status_Status) UserTraitOption { return func(ut *v2.UserTrait) error { - ut.Status = &v2.UserTrait_Status{Status: status} + ut.SetStatus(v2.UserTrait_Status_builder{Status: status}.Build()) return nil } @@ -23,7 +23,7 @@ func WithStatus(status v2.UserTrait_Status_Status) UserTraitOption { func WithDetailedStatus(status v2.UserTrait_Status_Status, details string) UserTraitOption { return func(ut *v2.UserTrait) error { - ut.Status = &v2.UserTrait_Status{Status: status, Details: details} + ut.SetStatus(v2.UserTrait_Status_builder{Status: status, Details: details}.Build()) return nil } @@ -35,12 +35,12 @@ func WithEmail(email string, primary bool) UserTraitOption { return nil } - traitEmail := &v2.UserTrait_Email{ + traitEmail := v2.UserTrait_Email_builder{ Address: email, IsPrimary: primary, - } + }.Build() - ut.Emails = append(ut.Emails, traitEmail) + ut.SetEmails(append(ut.GetEmails(), traitEmail)) return nil } @@ -52,22 +52,22 @@ func WithUserLogin(login string, aliases ...string) UserTraitOption { // If login is empty do nothing return nil } - ut.Login = login - ut.LoginAliases = aliases + ut.SetLogin(login) + ut.SetLoginAliases(aliases) return nil } } func WithEmployeeID(employeeIDs ...string) UserTraitOption { return func(ut *v2.UserTrait) error { - ut.EmployeeIds = employeeIDs + ut.SetEmployeeIds(employeeIDs) return nil } } func WithUserIcon(assetRef *v2.AssetRef) UserTraitOption { return func(ut *v2.UserTrait) error { - ut.Icon = assetRef + ut.SetIcon(assetRef) return nil } @@ -80,7 +80,7 @@ func WithUserProfile(profile map[string]interface{}) UserTraitOption { return err } - ut.Profile = p + ut.SetProfile(p) return nil } @@ -88,42 +88,42 @@ func WithUserProfile(profile map[string]interface{}) UserTraitOption { func WithAccountType(accountType v2.UserTrait_AccountType) UserTraitOption { return func(ut *v2.UserTrait) error { - ut.AccountType = accountType + ut.SetAccountType(accountType) return nil } } func WithCreatedAt(createdAt time.Time) UserTraitOption { return func(ut *v2.UserTrait) error { - ut.CreatedAt = timestamppb.New(createdAt) + ut.SetCreatedAt(timestamppb.New(createdAt)) return nil } } func WithLastLogin(lastLogin time.Time) UserTraitOption { return func(ut *v2.UserTrait) error { - ut.LastLogin = timestamppb.New(lastLogin) + ut.SetLastLogin(timestamppb.New(lastLogin)) return nil } } func WithMFAStatus(mfaStatus *v2.UserTrait_MFAStatus) UserTraitOption { return func(ut *v2.UserTrait) error { - ut.MfaStatus = mfaStatus + ut.SetMfaStatus(mfaStatus) return nil } } func WithSSOStatus(ssoStatus *v2.UserTrait_SSOStatus) UserTraitOption { return func(ut *v2.UserTrait) error { - ut.SsoStatus = ssoStatus + ut.SetSsoStatus(ssoStatus) return nil } } func WithStructuredName(structuredName *v2.UserTrait_StructuredName) UserTraitOption { return func(ut *v2.UserTrait) error { - ut.StructuredName = structuredName + ut.SetStructuredName(structuredName) return nil } } @@ -140,13 +140,13 @@ func NewUserTrait(opts ...UserTraitOption) (*v2.UserTrait, error) { } // If no status was set, default to be enabled. - if userTrait.Status == nil { - userTrait.Status = &v2.UserTrait_Status{Status: v2.UserTrait_Status_STATUS_ENABLED} + if !userTrait.HasStatus() { + userTrait.SetStatus(v2.UserTrait_Status_builder{Status: v2.UserTrait_Status_STATUS_ENABLED}.Build()) } // If account type isn't specified, default to a human user. - if userTrait.AccountType == v2.UserTrait_ACCOUNT_TYPE_UNSPECIFIED { - userTrait.AccountType = v2.UserTrait_ACCOUNT_TYPE_HUMAN + if userTrait.GetAccountType() == v2.UserTrait_ACCOUNT_TYPE_UNSPECIFIED { + userTrait.SetAccountType(v2.UserTrait_ACCOUNT_TYPE_HUMAN) } return userTrait, nil @@ -155,7 +155,7 @@ func NewUserTrait(opts ...UserTraitOption) (*v2.UserTrait, error) { // GetUserTrait attempts to return the UserTrait instance on a resource. func GetUserTrait(resource *v2.Resource) (*v2.UserTrait, error) { ret := &v2.UserTrait{} - annos := annotations.Annotations(resource.Annotations) + annos := annotations.Annotations(resource.GetAnnotations()) ok, err := annos.Pick(ret) if err != nil { return nil, err diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/types/session_cache.go b/vendor/github.com/conductorone/baton-sdk/pkg/types/session_cache.go deleted file mode 100644 index 86d69eb5..00000000 --- a/vendor/github.com/conductorone/baton-sdk/pkg/types/session_cache.go +++ /dev/null @@ -1,58 +0,0 @@ -package types - -import ( - "context" -) - -// SessionCacheKey is the context key for storing the session cache instance. -type SessionCacheKey struct{} - -// SessionCache is an interface for caching session data. -type SessionCache interface { - Get(ctx context.Context, key string, opt ...SessionCacheOption) ([]byte, bool, error) - GetMany(ctx context.Context, keys []string, opt ...SessionCacheOption) (map[string][]byte, error) - Set(ctx context.Context, key string, value []byte, opt ...SessionCacheOption) error - SetMany(ctx context.Context, values map[string][]byte, opt ...SessionCacheOption) error - Delete(ctx context.Context, key string, opt ...SessionCacheOption) error - Clear(ctx context.Context, opt ...SessionCacheOption) error - GetAll(ctx context.Context, opt ...SessionCacheOption) (map[string][]byte, error) - Close(ctx context.Context) error -} - -// SessionCacheOption is a function that modifies a SessionCacheBag. -type SessionCacheOption func(ctx context.Context, bag *SessionCacheBag) error - -// SessionCacheConstructor is a function that creates a SessionCache instance. -type SessionCacheConstructor func(ctx context.Context, opt ...SessionCacheConstructorOption) (SessionCache, error) - -// SessionCacheConstructorOption is a function that modifies the context for session cache construction. -type SessionCacheConstructorOption func(ctx context.Context) (context.Context, error) - -// SessionCacheBag holds the configuration for session cache operations. -type SessionCacheBag struct { - SyncID string - Prefix string -} - -// SyncIDKey is the context key for storing the current sync ID. -type SyncIDKey struct{} - -// WithSyncID returns a SessionCacheOption that sets the sync ID for the operation. -func WithSyncID(syncID string) SessionCacheOption { - return func(ctx context.Context, bag *SessionCacheBag) error { - bag.SyncID = syncID - return nil - } -} - -// GetSyncID retrieves the sync ID from the context, returning empty string if not found. -func GetSyncID(ctx context.Context) string { - if syncID, ok := ctx.Value(SyncIDKey{}).(string); ok { - return syncID - } - return "" -} - -func SetSyncIDInContext(ctx context.Context, syncID string) context.Context { - return context.WithValue(ctx, SyncIDKey{}, syncID) -} diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/types/sessions/sessions.go b/vendor/github.com/conductorone/baton-sdk/pkg/types/sessions/sessions.go new file mode 100644 index 00000000..38315a71 --- /dev/null +++ b/vendor/github.com/conductorone/baton-sdk/pkg/types/sessions/sessions.go @@ -0,0 +1,77 @@ +package sessions + +import ( + "context" +) + +const MaxKeysPerRequest = 100 + +// The default gRPC message size limit is 4MB (we subtract 30KB for general overhead, which is overkill). +// Unfortunately, this layer has to be aware of the size limit to avoid exceeding the size limit +// because the client does not know the size of the items it requests. +const MaxSessionStoreSizeLimit = 4163584 + +type SessionStoreKey struct{} + +type SessionStore interface { + Get(ctx context.Context, key string, opt ...SessionStoreOption) ([]byte, bool, error) + GetMany(ctx context.Context, keys []string, opt ...SessionStoreOption) (map[string][]byte, []string, error) + Set(ctx context.Context, key string, value []byte, opt ...SessionStoreOption) error + SetMany(ctx context.Context, values map[string][]byte, opt ...SessionStoreOption) error + Delete(ctx context.Context, key string, opt ...SessionStoreOption) error + Clear(ctx context.Context, opt ...SessionStoreOption) error + GetAll(ctx context.Context, pageToken string, opt ...SessionStoreOption) (map[string][]byte, string, error) +} + +type SessionStoreOption func(ctx context.Context, bag *SessionStoreBag) error + +type SessionStoreConstructor func(ctx context.Context, opt ...SessionStoreConstructorOption) (SessionStore, error) + +type SessionStoreConstructorOption func(ctx context.Context) (context.Context, error) + +type SessionStoreBag struct { + SyncID string + Prefix string + PageToken string +} + +// SyncIDKey is the context key for storing the current sync ID. +type SyncIDKey struct{} + +// WithSyncID returns a SessionCacheOption that sets the sync ID for the operation. +func WithSyncID(syncID string) SessionStoreOption { + return func(ctx context.Context, bag *SessionStoreBag) error { + bag.SyncID = syncID + return nil + } +} + +func WithPrefix(prefix string) SessionStoreOption { + return func(ctx context.Context, bag *SessionStoreBag) error { + bag.Prefix = prefix + return nil + } +} + +// GetSyncID retrieves the sync ID from the context, returning empty string if not found. +func GetSyncID(ctx context.Context) string { + if syncID, ok := ctx.Value(SyncIDKey{}).(string); ok { + return syncID + } + return "" +} + +func WithPageToken(pageToken string) SessionStoreOption { + return func(ctx context.Context, bag *SessionStoreBag) error { + bag.PageToken = pageToken + return nil + } +} + +func SetSyncIDInContext(ctx context.Context, syncID string) context.Context { + return context.WithValue(ctx, SyncIDKey{}, syncID) +} + +type SetSessionStore interface { + SetSessionStore(ctx context.Context, store SessionStore) +} diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/types/tasks/tasks.go b/vendor/github.com/conductorone/baton-sdk/pkg/types/tasks/tasks.go index 73071aa4..07e38a89 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/types/tasks/tasks.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/types/tasks/tasks.go @@ -40,12 +40,16 @@ func (tt TaskType) String() string { return "get_resource" case ListEntitlementsType: return "list_entitlements" + case ListStaticEntitlementsType: + return "list_static_entitlements" case ListGrantsType: return "list_grants" case GetMetadataType: return "get_metadata" case ListEventsType: return "list_events" + case ListEventFeedsType: + return "list_event_feeds" case StartDebugging: return "set_log_file_event" case BulkCreateTicketsType: @@ -60,6 +64,8 @@ func (tt TaskType) String() string { return "invoke_action" case ActionStatusType: return "action_status" + case CreateSyncDiff: + return "create_sync_diff" default: return "unknown" } @@ -97,4 +103,5 @@ const ( ActionInvokeType ActionStatusType CreateSyncDiff + ListStaticEntitlementsType ) diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/types/ticket/custom_fields.go b/vendor/github.com/conductorone/baton-sdk/pkg/types/ticket/custom_fields.go index fd3d6b94..6ce7c247 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/types/ticket/custom_fields.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/types/ticket/custom_fields.go @@ -26,57 +26,57 @@ func CustomFieldForSchemaField(id string, schema *v2.TicketSchema, value interfa return nil, fmt.Errorf("error: id(%s) not found in schema", id) } - switch field.GetValue().(type) { - case *v2.TicketCustomField_StringValue: + switch field.WhichValue() { + case v2.TicketCustomField_StringValue_case: v, ok := value.(string) if !ok { return nil, fmt.Errorf("unexpected value type for custom field: %s %T", id, v) } return StringField(id, v), nil - case *v2.TicketCustomField_StringValues: + case v2.TicketCustomField_StringValues_case: v, ok := value.([]string) if !ok { return nil, fmt.Errorf("unexpected value type for custom field: %s %T", id, v) } return StringsField(id, v), nil - case *v2.TicketCustomField_BoolValue: + case v2.TicketCustomField_BoolValue_case: v, ok := value.(bool) if !ok { return nil, fmt.Errorf("unexpected value type for custom field: %s %T", id, v) } return BoolField(id, v), nil - case *v2.TicketCustomField_NumberValue: + case v2.TicketCustomField_NumberValue_case: v, ok := value.(float32) if !ok { return nil, fmt.Errorf("unexpected value type for custom field: %s %T", id, v) } return NumberField(id, v), nil - case *v2.TicketCustomField_TimestampValue: + case v2.TicketCustomField_TimestampValue_case: v, ok := value.(*timestamppb.Timestamp) if !ok { return nil, fmt.Errorf("unexpected value type for custom field: %s %T", id, v) } return TimestampField(id, v.AsTime()), nil - case *v2.TicketCustomField_PickStringValue: + case v2.TicketCustomField_PickStringValue_case: v, ok := value.(string) if !ok { return nil, fmt.Errorf("unexpected value type for custom field: %s %T", id, v) } return PickStringField(id, v), nil - case *v2.TicketCustomField_PickMultipleStringValues: + case v2.TicketCustomField_PickMultipleStringValues_case: v, ok := value.([]string) if !ok { return nil, fmt.Errorf("unexpected value type for custom field: %s %T", id, v) } return PickMultipleStringsField(id, v), nil - case *v2.TicketCustomField_PickObjectValue: + case v2.TicketCustomField_PickObjectValue_case: rawBytes, err := json.Marshal(value) if err != nil { return nil, err @@ -90,7 +90,7 @@ func CustomFieldForSchemaField(id string, schema *v2.TicketSchema, value interfa return PickObjectValueField(id, ov), nil - case *v2.TicketCustomField_PickMultipleObjectValues: + case v2.TicketCustomField_PickMultipleObjectValues_case: rawValue, err := json.Marshal(value) if err != nil { return nil, err @@ -133,7 +133,7 @@ func GetStringValue(field *v2.TicketCustomField) (string, error) { if !ok { return "", errors.New("error: expected string value") } - return v.StringValue.Value, nil + return v.StringValue.GetValue(), nil } func GetStringsValue(field *v2.TicketCustomField) ([]string, error) { @@ -144,7 +144,7 @@ func GetStringsValue(field *v2.TicketCustomField) ([]string, error) { if !ok { return nil, errors.New("error: expected string values") } - return v.StringValues.Values, nil + return v.StringValues.GetValues(), nil } func GetBoolValue(field *v2.TicketCustomField) (bool, error) { @@ -155,7 +155,7 @@ func GetBoolValue(field *v2.TicketCustomField) (bool, error) { if !ok { return false, errors.New("error: expected bool value") } - return v.BoolValue.Value, nil + return v.BoolValue.GetValue(), nil } func GetNumberValue(field *v2.TicketCustomField) (float32, error) { @@ -177,7 +177,7 @@ func GetTimestampValue(field *v2.TicketCustomField) (time.Time, error) { if !ok { return time.Time{}, errors.New("error: expected timestamp value") } - return v.TimestampValue.Value.AsTime(), nil + return v.TimestampValue.GetValue().AsTime(), nil } func GetPickStringValue(field *v2.TicketCustomField) (string, error) { @@ -188,7 +188,7 @@ func GetPickStringValue(field *v2.TicketCustomField) (string, error) { if !ok { return "", errors.New("error: expected pick string value") } - return v.PickStringValue.Value, nil + return v.PickStringValue.GetValue(), nil } func GetPickMultipleStringValues(field *v2.TicketCustomField) ([]string, error) { @@ -199,7 +199,7 @@ func GetPickMultipleStringValues(field *v2.TicketCustomField) ([]string, error) if !ok { return nil, errors.New("error: expected pick multiple string values") } - return v.PickMultipleStringValues.Values, nil + return v.PickMultipleStringValues.GetValues(), nil } func GetPickObjectValue(field *v2.TicketCustomField) (*v2.TicketCustomFieldObjectValue, error) { @@ -210,7 +210,7 @@ func GetPickObjectValue(field *v2.TicketCustomField) (*v2.TicketCustomFieldObjec if !ok { return nil, errors.New("error: expected pick object value") } - return v.PickObjectValue.Value, nil + return v.PickObjectValue.GetValue(), nil } func GetPickMultipleObjectValues(field *v2.TicketCustomField) ([]*v2.TicketCustomFieldObjectValue, error) { @@ -221,7 +221,7 @@ func GetPickMultipleObjectValues(field *v2.TicketCustomField) ([]*v2.TicketCusto if !ok { return nil, errors.New("error: expected pick multiple object values") } - return v.PickMultipleObjectValues.Values, nil + return v.PickMultipleObjectValues.GetValues(), nil } // GetCustomFieldValue returns the interface{} of the value set on a given custom field. @@ -229,52 +229,52 @@ func GetCustomFieldValue(field *v2.TicketCustomField) (interface{}, error) { if field == nil { return nil, nil } - switch v := field.GetValue().(type) { - case *v2.TicketCustomField_StringValue: - strVal := v.StringValue.GetValue() + switch field.WhichValue() { + case v2.TicketCustomField_StringValue_case: + strVal := field.GetStringValue().GetValue() if strVal == "" { return nil, nil } - return v.StringValue.GetValue(), nil + return field.GetStringValue().GetValue(), nil - case *v2.TicketCustomField_StringValues: - strVals := v.StringValues.GetValues() + case v2.TicketCustomField_StringValues_case: + strVals := field.GetStringValues().GetValues() if len(strVals) == 0 { return nil, nil } return strVals, nil - case *v2.TicketCustomField_BoolValue: - return v.BoolValue.GetValue(), nil + case v2.TicketCustomField_BoolValue_case: + return field.GetBoolValue().GetValue(), nil - case *v2.TicketCustomField_NumberValue: - wrapperVal := v.NumberValue.GetValue() + case v2.TicketCustomField_NumberValue_case: + wrapperVal := field.GetNumberValue().GetValue() if wrapperVal == nil { return nil, nil } return wrapperVal.GetValue(), nil - case *v2.TicketCustomField_TimestampValue: - return v.TimestampValue.GetValue(), nil + case v2.TicketCustomField_TimestampValue_case: + return field.GetTimestampValue().GetValue(), nil - case *v2.TicketCustomField_PickStringValue: - strVal := v.PickStringValue.GetValue() + case v2.TicketCustomField_PickStringValue_case: + strVal := field.GetPickStringValue().GetValue() if strVal == "" { return nil, nil } return strVal, nil - case *v2.TicketCustomField_PickMultipleStringValues: - strVals := v.PickMultipleStringValues.GetValues() + case v2.TicketCustomField_PickMultipleStringValues_case: + strVals := field.GetPickMultipleStringValues().GetValues() if len(strVals) == 0 { return nil, nil } return strVals, nil - case *v2.TicketCustomField_PickObjectValue: - return v.PickObjectValue.GetValue(), nil + case v2.TicketCustomField_PickObjectValue_case: + return field.GetPickObjectValue().GetValue(), nil - case *v2.TicketCustomField_PickMultipleObjectValues: - objVals := v.PickMultipleObjectValues.GetValues() + case v2.TicketCustomField_PickMultipleObjectValues_case: + objVals := field.GetPickMultipleObjectValues().GetValues() if len(objVals) == 0 { return nil, nil } @@ -289,53 +289,53 @@ func GetDefaultCustomFieldValue(field *v2.TicketCustomField) (interface{}, error if field == nil { return nil, nil } - switch v := field.GetValue().(type) { - case *v2.TicketCustomField_StringValue: - strVal := v.StringValue.GetDefaultValue() + switch field.WhichValue() { + case v2.TicketCustomField_StringValue_case: + strVal := field.GetStringValue().GetDefaultValue() if strVal == "" { return nil, nil } return strVal, nil - case *v2.TicketCustomField_StringValues: - strVals := v.StringValues.GetDefaultValues() + case v2.TicketCustomField_StringValues_case: + strVals := field.GetStringValues().GetDefaultValues() if len(strVals) == 0 { return nil, nil } return strVals, nil - case *v2.TicketCustomField_BoolValue: - return v.BoolValue.GetValue(), nil + case v2.TicketCustomField_BoolValue_case: + return field.GetBoolValue().GetValue(), nil - case *v2.TicketCustomField_NumberValue: - defaultWrapper := v.NumberValue.GetDefaultValue() + case v2.TicketCustomField_NumberValue_case: + defaultWrapper := field.GetNumberValue().GetDefaultValue() if defaultWrapper == nil { return nil, nil } return defaultWrapper.GetValue(), nil - case *v2.TicketCustomField_TimestampValue: - return v.TimestampValue.GetDefaultValue(), nil + case v2.TicketCustomField_TimestampValue_case: + return field.GetTimestampValue().GetDefaultValue(), nil - case *v2.TicketCustomField_PickStringValue: - strVal := v.PickStringValue.GetDefaultValue() + case v2.TicketCustomField_PickStringValue_case: + strVal := field.GetPickStringValue().GetDefaultValue() if strVal == "" { return nil, nil } return strVal, nil - case *v2.TicketCustomField_PickMultipleStringValues: - strVals := v.PickMultipleStringValues.GetDefaultValues() + case v2.TicketCustomField_PickMultipleStringValues_case: + strVals := field.GetPickMultipleStringValues().GetDefaultValues() if len(strVals) == 0 { return nil, nil } return strVals, nil - case *v2.TicketCustomField_PickObjectValue: - return v.PickObjectValue.GetDefaultValue(), nil + case v2.TicketCustomField_PickObjectValue_case: + return field.GetPickObjectValue().GetDefaultValue(), nil - case *v2.TicketCustomField_PickMultipleObjectValues: - objVals := v.PickMultipleObjectValues.GetDefaultValues() + case v2.TicketCustomField_PickMultipleObjectValues_case: + objVals := field.GetPickMultipleObjectValues().GetDefaultValues() if len(objVals) == 0 { return nil, nil } @@ -365,17 +365,17 @@ func ValidateTicket(ctx context.Context, schema *v2.TicketSchema, ticket *v2.Tic // Validate the ticket status is one defined in the schema // Ticket status is not required so if a ticket doesn't have a status // we don't need to validate, skip the loop in this case - validTicketStatus := ticket.Status == nil + validTicketStatus := !ticket.HasStatus() if !validTicketStatus { for _, status := range schema.GetStatuses() { - if ticket.Status.GetId() == status.GetId() { + if ticket.GetStatus().GetId() == status.GetId() { validTicketStatus = true break } } } if !validTicketStatus { - l.Debug("error: invalid ticket: could not find status", zap.String("status_id", ticket.Status.GetId())) + l.Debug("error: invalid ticket: could not find status", zap.String("status_id", ticket.GetStatus().GetId())) return false, nil } @@ -385,8 +385,8 @@ func ValidateTicket(ctx context.Context, schema *v2.TicketSchema, ticket *v2.Tic for id, cf := range schemaCustomFields { ticketCf, ok := ticketCustomFields[id] if !ok { - if cf.Required { - l.Debug("error: invalid ticket: missing custom field", zap.String("custom_field_id", cf.Id)) + if cf.GetRequired() { + l.Debug("error: invalid ticket: missing custom field", zap.String("custom_field_id", cf.GetId())) return false, nil } else { // field not present but not required, so skip it @@ -394,11 +394,11 @@ func ValidateTicket(ctx context.Context, schema *v2.TicketSchema, ticket *v2.Tic } } - switch v := cf.GetValue().(type) { - case *v2.TicketCustomField_StringValue: + switch cf.WhichValue() { + case v2.TicketCustomField_StringValue_case: tv, tok := ticketCf.GetValue().(*v2.TicketCustomField_StringValue) if !tok { - l.Debug("error: invalid ticket: expected string value for field", zap.String("custom_field_id", cf.Id), zap.Any("value", tv)) + l.Debug("error: invalid ticket: expected string value for field", zap.String("custom_field_id", cf.GetId()), zap.Any("value", tv)) return false, nil } @@ -407,15 +407,15 @@ func ValidateTicket(ctx context.Context, schema *v2.TicketSchema, ticket *v2.Tic stringValue = tv.StringValue.GetDefaultValue() } - if cf.Required && stringValue == "" { - l.Debug("error: invalid ticket: string value is required but was empty", zap.String("custom_field_id", cf.Id)) + if cf.GetRequired() && stringValue == "" { + l.Debug("error: invalid ticket: string value is required but was empty", zap.String("custom_field_id", cf.GetId())) return false, nil } - case *v2.TicketCustomField_StringValues: + case v2.TicketCustomField_StringValues_case: tv, tok := ticketCf.GetValue().(*v2.TicketCustomField_StringValues) if !tok { - l.Debug("error: invalid ticket: expected string values for field", zap.String("custom_field_id", cf.Id), zap.Any("values", tv)) + l.Debug("error: invalid ticket: expected string values for field", zap.String("custom_field_id", cf.GetId()), zap.Any("values", tv)) return false, nil } @@ -424,28 +424,28 @@ func ValidateTicket(ctx context.Context, schema *v2.TicketSchema, ticket *v2.Tic stringValues = tv.StringValues.GetDefaultValues() } - if cf.Required && len(stringValues) == 0 { - l.Debug("error: invalid ticket: string values is required but was empty", zap.String("custom_field_id", cf.Id)) + if cf.GetRequired() && len(stringValues) == 0 { + l.Debug("error: invalid ticket: string values is required but was empty", zap.String("custom_field_id", cf.GetId())) return false, nil } - case *v2.TicketCustomField_BoolValue: + case v2.TicketCustomField_BoolValue_case: tv, tok := ticketCf.GetValue().(*v2.TicketCustomField_BoolValue) if !tok { - l.Debug("error: invalid ticket: expected bool value for field", zap.String("custom_field_id", cf.Id), zap.Any("value", tv)) + l.Debug("error: invalid ticket: expected bool value for field", zap.String("custom_field_id", cf.GetId()), zap.Any("value", tv)) return false, nil } - case *v2.TicketCustomField_NumberValue: + case v2.TicketCustomField_NumberValue_case: tv, tok := ticketCf.GetValue().(*v2.TicketCustomField_NumberValue) if !tok { - l.Debug("error: invalid ticket: expected number value for field", zap.String("custom_field_id", cf.Id), zap.Any("value", tv)) + l.Debug("error: invalid ticket: expected number value for field", zap.String("custom_field_id", cf.GetId()), zap.Any("value", tv)) return false, nil } - case *v2.TicketCustomField_TimestampValue: + case v2.TicketCustomField_TimestampValue_case: tv, tok := ticketCf.GetValue().(*v2.TicketCustomField_TimestampValue) if !tok { - l.Debug("error: invalid ticket: expected timestamp value for field", zap.String("custom_field_id", cf.Id), zap.Any("value", tv)) + l.Debug("error: invalid ticket: expected timestamp value for field", zap.String("custom_field_id", cf.GetId()), zap.Any("value", tv)) return false, nil } @@ -454,20 +454,20 @@ func ValidateTicket(ctx context.Context, schema *v2.TicketSchema, ticket *v2.Tic timestampValue = tv.TimestampValue.GetDefaultValue() } - if cf.Required && timestampValue == nil { - l.Debug("error: invalid ticket: expected timestamp value for field but was empty", zap.String("custom_field_id", cf.Id)) + if cf.GetRequired() && timestampValue == nil { + l.Debug("error: invalid ticket: expected timestamp value for field but was empty", zap.String("custom_field_id", cf.GetId())) return false, nil } - case *v2.TicketCustomField_PickStringValue: + case v2.TicketCustomField_PickStringValue_case: tv, tok := ticketCf.GetValue().(*v2.TicketCustomField_PickStringValue) if !tok { - l.Debug("error: invalid ticket: expected string value for field", zap.String("custom_field_id", cf.Id), zap.Any("value", tv)) + l.Debug("error: invalid ticket: expected string value for field", zap.String("custom_field_id", cf.GetId()), zap.Any("value", tv)) return false, nil } ticketValue := tv.PickStringValue.GetValue() - allowedValues := v.PickStringValue.GetAllowedValues() + allowedValues := cf.GetPickStringValue().GetAllowedValues() defaultTicketValue := tv.PickStringValue.GetDefaultValue() if ticketValue == "" { @@ -475,18 +475,18 @@ func ValidateTicket(ctx context.Context, schema *v2.TicketSchema, ticket *v2.Tic } // String value is empty but custom field is not required, skip further validation - if !cf.Required && ticketValue == "" { + if !cf.GetRequired() && ticketValue == "" { continue } // Custom field is required, check if string is empty if ticketValue == "" { - l.Debug("error: invalid ticket: expected string value for field but was empty", zap.String("custom_field_id", cf.Id)) + l.Debug("error: invalid ticket: expected string value for field but was empty", zap.String("custom_field_id", cf.GetId())) return false, nil } if len(allowedValues) == 0 { - l.Debug("error: invalid schema: expected schema to specify at least one allowed value", zap.String("custom_field_id", cf.Id)) + l.Debug("error: invalid schema: expected schema to specify at least one allowed value", zap.String("custom_field_id", cf.GetId())) return false, nil } @@ -500,40 +500,40 @@ func ValidateTicket(ctx context.Context, schema *v2.TicketSchema, ticket *v2.Tic if !foundMatch { l.Debug( "error: invalid ticket: expected value from schema", - zap.String("custom_field_id", cf.Id), + zap.String("custom_field_id", cf.GetId()), zap.String("value", ticketValue), zap.Strings("allowed_values", allowedValues), ) return false, nil } - case *v2.TicketCustomField_PickMultipleStringValues: + case v2.TicketCustomField_PickMultipleStringValues_case: tv, tok := ticketCf.GetValue().(*v2.TicketCustomField_PickMultipleStringValues) if !tok { - l.Debug("error: invalid ticket: expected string values for field", zap.String("custom_field_id", cf.Id), zap.Any("values", tv)) + l.Debug("error: invalid ticket: expected string values for field", zap.String("custom_field_id", cf.GetId()), zap.Any("values", tv)) return false, nil } ticketValues := tv.PickMultipleStringValues.GetValues() - allowedValues := v.PickMultipleStringValues.GetAllowedValues() + allowedValues := cf.GetPickMultipleStringValues().GetAllowedValues() if len(ticketValues) == 0 { ticketValues = tv.PickMultipleStringValues.GetDefaultValues() } // String values are empty but custom field is not required, skip further validation - if !cf.Required && len(ticketValues) == 0 { + if !cf.GetRequired() && len(ticketValues) == 0 { continue } // Custom field is required so check if string values are empty if len(ticketValues) == 0 { - l.Debug("error: invalid ticket: string values is required but was empty", zap.String("custom_field_id", cf.Id)) + l.Debug("error: invalid ticket: string values is required but was empty", zap.String("custom_field_id", cf.GetId())) return false, nil } if len(allowedValues) == 0 { - l.Debug("error: invalid schema: expected schema to specify at least one allowed value", zap.String("custom_field_id", cf.Id)) + l.Debug("error: invalid schema: expected schema to specify at least one allowed value", zap.String("custom_field_id", cf.GetId())) return false, nil } @@ -548,40 +548,40 @@ func ValidateTicket(ctx context.Context, schema *v2.TicketSchema, ticket *v2.Tic if len(ticketValues) != foundMatches { l.Debug( "error: invalid ticket: expected value from schema", - zap.String("custom_field_id", cf.Id), + zap.String("custom_field_id", cf.GetId()), zap.Strings("values", ticketValues), zap.Strings("allowed_values", allowedValues), ) return false, nil } - case *v2.TicketCustomField_PickObjectValue: + case v2.TicketCustomField_PickObjectValue_case: tv, tok := ticketCf.GetValue().(*v2.TicketCustomField_PickObjectValue) if !tok { - l.Debug("error: invalid ticket: expected object value for field", zap.String("custom_field_id", cf.Id), zap.Any("value", tv)) + l.Debug("error: invalid ticket: expected object value for field", zap.String("custom_field_id", cf.GetId()), zap.Any("value", tv)) return false, nil } ticketValue := tv.PickObjectValue.GetValue() - allowedValues := v.PickObjectValue.GetAllowedValues() + allowedValues := cf.GetPickObjectValue().GetAllowedValues() if ticketValue == nil || ticketValue.GetId() == "" { ticketValue = tv.PickObjectValue.GetDefaultValue() } // Object value for field is nil, but custom field is not required, skip further validation - if !cf.Required && (ticketValue == nil || ticketValue.GetId() == "") { + if !cf.GetRequired() && (ticketValue == nil || ticketValue.GetId() == "") { continue } // Custom field is required so check if object value for field is nil if ticketValue == nil || ticketValue.GetId() == "" { - l.Debug("error: invalid ticket: expected object value for field but was nil", zap.String("custom_field_id", cf.Id)) + l.Debug("error: invalid ticket: expected object value for field but was nil", zap.String("custom_field_id", cf.GetId())) return false, nil } if len(allowedValues) == 0 { - l.Debug("error: invalid schema: expected schema to specify at least one allowed value", zap.String("custom_field_id", cf.Id)) + l.Debug("error: invalid schema: expected schema to specify at least one allowed value", zap.String("custom_field_id", cf.GetId())) return false, nil } @@ -595,40 +595,40 @@ func ValidateTicket(ctx context.Context, schema *v2.TicketSchema, ticket *v2.Tic if !foundMatch { l.Debug( "error: invalid ticket: expected value from schema", - zap.String("custom_field_id", cf.Id), + zap.String("custom_field_id", cf.GetId()), zap.String("value_id", ticketValue.GetId()), zap.Any("allowed_values", allowedValues), ) return false, nil } - case *v2.TicketCustomField_PickMultipleObjectValues: + case v2.TicketCustomField_PickMultipleObjectValues_case: tv, tok := ticketCf.GetValue().(*v2.TicketCustomField_PickMultipleObjectValues) if !tok { - l.Debug("error: invalid ticket: expected object values for field", zap.String("custom_field_id", cf.Id), zap.Any("values", tv)) + l.Debug("error: invalid ticket: expected object values for field", zap.String("custom_field_id", cf.GetId()), zap.Any("values", tv)) return false, nil } ticketValues := tv.PickMultipleObjectValues.GetValues() if len(ticketValues) == 0 { - ticketValues = v.PickMultipleObjectValues.GetDefaultValues() + ticketValues = cf.GetPickMultipleObjectValues().GetDefaultValues() } - allowedValues := v.PickMultipleObjectValues.GetAllowedValues() + allowedValues := cf.GetPickMultipleObjectValues().GetAllowedValues() // Object values are empty but custom field is not required, skip further validation - if !cf.Required && len(ticketValues) == 0 { + if !cf.GetRequired() && len(ticketValues) == 0 { continue } // Custom field is required so check if object values are empty if len(ticketValues) == 0 { - l.Debug("error: invalid ticket: object values is required but was empty", zap.String("custom_field_id", cf.Id)) + l.Debug("error: invalid ticket: object values is required but was empty", zap.String("custom_field_id", cf.GetId())) return false, nil } if len(allowedValues) == 0 { - l.Debug("error: invalid schema: expected schema to specify at least one allowed value", zap.String("custom_field_id", cf.Id)) + l.Debug("error: invalid schema: expected schema to specify at least one allowed value", zap.String("custom_field_id", cf.GetId())) return false, nil } @@ -643,7 +643,7 @@ func ValidateTicket(ctx context.Context, schema *v2.TicketSchema, ticket *v2.Tic if len(ticketValues) != foundMatches { l.Debug( "error: invalid ticket: expected value from schema", - zap.String("custom_field_id", cf.Id), + zap.String("custom_field_id", cf.GetId()), zap.Any("values", ticketValues), zap.Any("allowed_values", allowedValues), ) @@ -651,7 +651,7 @@ func ValidateTicket(ctx context.Context, schema *v2.TicketSchema, ticket *v2.Tic } default: - l.Debug("error: invalid schema: unknown custom field type", zap.Any("custom_field_type", v)) + l.Debug("error: invalid schema: unknown custom field type", zap.Any("custom_field_type", cf.WhichValue())) return false, errors.New("error: invalid schema: unknown custom field type") } } @@ -660,207 +660,171 @@ func ValidateTicket(ctx context.Context, schema *v2.TicketSchema, ticket *v2.Tic } func StringFieldSchema(id, displayName string, required bool) *v2.TicketCustomField { - return &v2.TicketCustomField{ + return v2.TicketCustomField_builder{ Id: id, DisplayName: displayName, Required: required, - Value: &v2.TicketCustomField_StringValue{ - StringValue: &v2.TicketCustomFieldStringValue{}, - }, - } + StringValue: &v2.TicketCustomFieldStringValue{}, + }.Build() } func StringField(id, value string) *v2.TicketCustomField { - return &v2.TicketCustomField{ + return v2.TicketCustomField_builder{ Id: id, - Value: &v2.TicketCustomField_StringValue{ - StringValue: &v2.TicketCustomFieldStringValue{ - Value: value, - }, - }, - } + StringValue: v2.TicketCustomFieldStringValue_builder{ + Value: value, + }.Build(), + }.Build() } func StringsFieldSchema(id, displayName string, required bool) *v2.TicketCustomField { - return &v2.TicketCustomField{ - Id: id, - DisplayName: displayName, - Required: required, - Value: &v2.TicketCustomField_StringValues{ - StringValues: &v2.TicketCustomFieldStringValues{}, - }, - } + return v2.TicketCustomField_builder{ + Id: id, + DisplayName: displayName, + Required: required, + StringValues: &v2.TicketCustomFieldStringValues{}, + }.Build() } func StringsField(id string, values []string) *v2.TicketCustomField { - return &v2.TicketCustomField{ + return v2.TicketCustomField_builder{ Id: id, - Value: &v2.TicketCustomField_StringValues{ - StringValues: &v2.TicketCustomFieldStringValues{ - Values: values, - }, - }, - } + StringValues: v2.TicketCustomFieldStringValues_builder{ + Values: values, + }.Build(), + }.Build() } func BoolFieldSchema(id, displayName string, required bool) *v2.TicketCustomField { - return &v2.TicketCustomField{ + return v2.TicketCustomField_builder{ Id: id, DisplayName: displayName, Required: required, - Value: &v2.TicketCustomField_BoolValue{ - BoolValue: &v2.TicketCustomFieldBoolValue{}, - }, - } + BoolValue: &v2.TicketCustomFieldBoolValue{}, + }.Build() } func NumberFieldSchema(id, displayName string, required bool) *v2.TicketCustomField { - return &v2.TicketCustomField{ + return v2.TicketCustomField_builder{ Id: id, DisplayName: displayName, Required: required, - Value: &v2.TicketCustomField_NumberValue{ - NumberValue: &v2.TicketCustomFieldNumberValue{}, - }, - } + NumberValue: &v2.TicketCustomFieldNumberValue{}, + }.Build() } func BoolField(id string, value bool) *v2.TicketCustomField { - return &v2.TicketCustomField{ + return v2.TicketCustomField_builder{ Id: id, - Value: &v2.TicketCustomField_BoolValue{ - BoolValue: &v2.TicketCustomFieldBoolValue{ - Value: value, - }, - }, - } + BoolValue: v2.TicketCustomFieldBoolValue_builder{ + Value: value, + }.Build(), + }.Build() } func NumberField(id string, value float32) *v2.TicketCustomField { - return &v2.TicketCustomField{ + return v2.TicketCustomField_builder{ Id: id, - Value: &v2.TicketCustomField_NumberValue{ - NumberValue: &v2.TicketCustomFieldNumberValue{ - Value: wrapperspb.Float(value), - }, - }, - } + NumberValue: v2.TicketCustomFieldNumberValue_builder{ + Value: wrapperspb.Float(value), + }.Build(), + }.Build() } func TimestampFieldSchema(id, displayName string, required bool) *v2.TicketCustomField { - return &v2.TicketCustomField{ - Id: id, - DisplayName: displayName, - Required: required, - Value: &v2.TicketCustomField_TimestampValue{ - TimestampValue: &v2.TicketCustomFieldTimestampValue{}, - }, - } + return v2.TicketCustomField_builder{ + Id: id, + DisplayName: displayName, + Required: required, + TimestampValue: &v2.TicketCustomFieldTimestampValue{}, + }.Build() } func TimestampField(id string, value time.Time) *v2.TicketCustomField { - return &v2.TicketCustomField{ + return v2.TicketCustomField_builder{ Id: id, - Value: &v2.TicketCustomField_TimestampValue{ - TimestampValue: &v2.TicketCustomFieldTimestampValue{ - Value: timestamppb.New(value), - }, - }, - } + TimestampValue: v2.TicketCustomFieldTimestampValue_builder{ + Value: timestamppb.New(value), + }.Build(), + }.Build() } func PickStringFieldSchema(id, displayName string, required bool, allowedValues []string) *v2.TicketCustomField { - return &v2.TicketCustomField{ + return v2.TicketCustomField_builder{ Id: id, DisplayName: displayName, Required: required, - Value: &v2.TicketCustomField_PickStringValue{ - PickStringValue: &v2.TicketCustomFieldPickStringValue{ - AllowedValues: allowedValues, - }, - }, - } + PickStringValue: v2.TicketCustomFieldPickStringValue_builder{ + AllowedValues: allowedValues, + }.Build(), + }.Build() } func PickStringField(id string, value string) *v2.TicketCustomField { - return &v2.TicketCustomField{ + return v2.TicketCustomField_builder{ Id: id, - Value: &v2.TicketCustomField_PickStringValue{ - PickStringValue: &v2.TicketCustomFieldPickStringValue{ - Value: value, - }, - }, - } + PickStringValue: v2.TicketCustomFieldPickStringValue_builder{ + Value: value, + }.Build(), + }.Build() } func PickMultipleStringsFieldSchema(id, displayName string, required bool, allowedValues []string) *v2.TicketCustomField { - return &v2.TicketCustomField{ + return v2.TicketCustomField_builder{ Id: id, DisplayName: displayName, Required: required, - Value: &v2.TicketCustomField_PickMultipleStringValues{ - PickMultipleStringValues: &v2.TicketCustomFieldPickMultipleStringValues{ - AllowedValues: allowedValues, - }, - }, - } + PickMultipleStringValues: v2.TicketCustomFieldPickMultipleStringValues_builder{ + AllowedValues: allowedValues, + }.Build(), + }.Build() } func PickMultipleStringsField(id string, values []string) *v2.TicketCustomField { - return &v2.TicketCustomField{ + return v2.TicketCustomField_builder{ Id: id, - Value: &v2.TicketCustomField_PickMultipleStringValues{ - PickMultipleStringValues: &v2.TicketCustomFieldPickMultipleStringValues{ - Values: values, - }, - }, - } + PickMultipleStringValues: v2.TicketCustomFieldPickMultipleStringValues_builder{ + Values: values, + }.Build(), + }.Build() } func PickObjectValueFieldSchema(id, displayName string, required bool, allowedValues []*v2.TicketCustomFieldObjectValue) *v2.TicketCustomField { - return &v2.TicketCustomField{ + return v2.TicketCustomField_builder{ Id: id, DisplayName: displayName, Required: required, - Value: &v2.TicketCustomField_PickObjectValue{ - PickObjectValue: &v2.TicketCustomFieldPickObjectValue{ - AllowedValues: allowedValues, - }, - }, - } + PickObjectValue: v2.TicketCustomFieldPickObjectValue_builder{ + AllowedValues: allowedValues, + }.Build(), + }.Build() } func PickObjectValueField(id string, value *v2.TicketCustomFieldObjectValue) *v2.TicketCustomField { - return &v2.TicketCustomField{ + return v2.TicketCustomField_builder{ Id: id, - Value: &v2.TicketCustomField_PickObjectValue{ - PickObjectValue: &v2.TicketCustomFieldPickObjectValue{ - Value: value, - }, - }, - } + PickObjectValue: v2.TicketCustomFieldPickObjectValue_builder{ + Value: value, + }.Build(), + }.Build() } func PickMultipleObjectValuesFieldSchema(id, displayName string, required bool, allowedValues []*v2.TicketCustomFieldObjectValue) *v2.TicketCustomField { - return &v2.TicketCustomField{ + return v2.TicketCustomField_builder{ Id: id, DisplayName: displayName, Required: required, - Value: &v2.TicketCustomField_PickMultipleObjectValues{ - PickMultipleObjectValues: &v2.TicketCustomFieldPickMultipleObjectValues{ - AllowedValues: allowedValues, - }, - }, - } + PickMultipleObjectValues: v2.TicketCustomFieldPickMultipleObjectValues_builder{ + AllowedValues: allowedValues, + }.Build(), + }.Build() } func PickMultipleObjectValuesField(id string, values []*v2.TicketCustomFieldObjectValue) *v2.TicketCustomField { - return &v2.TicketCustomField{ + return v2.TicketCustomField_builder{ Id: id, - Value: &v2.TicketCustomField_PickMultipleObjectValues{ - PickMultipleObjectValues: &v2.TicketCustomFieldPickMultipleObjectValues{ - Values: values, - }, - }, - } + PickMultipleObjectValues: v2.TicketCustomFieldPickMultipleObjectValues_builder{ + Values: values, + }.Build(), + }.Build() } diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/ugrpc/interceptors.go b/vendor/github.com/conductorone/baton-sdk/pkg/ugrpc/interceptors.go index 9b96e079..f2a4b979 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/ugrpc/interceptors.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/ugrpc/interceptors.go @@ -2,10 +2,7 @@ package ugrpc import ( "context" - "reflect" - "github.com/conductorone/baton-sdk/pkg/annotations" - "github.com/conductorone/baton-sdk/pkg/types" "github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/ctxzap" grpc_recovery "github.com/grpc-ecosystem/go-grpc-middleware/recovery" grpc_ctxtags "github.com/grpc-ecosystem/go-grpc-middleware/tags" @@ -14,7 +11,6 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" - "google.golang.org/protobuf/types/known/anypb" ) func ChainUnaryInterceptors(interceptors ...grpc.UnaryServerInterceptor) grpc.UnaryServerInterceptor { @@ -32,62 +28,6 @@ func ChainUnaryInterceptors(interceptors ...grpc.UnaryServerInterceptor) grpc.Un } } -/* -SessionCacheUnaryInterceptor creates a unary interceptor that: -1. Propagates the session cache from the server context to the handler context. -2. Extracts annotations from requests and adds syncID to context (for the session manager). -*/ -func SessionCacheUnaryInterceptor(serverCtx context.Context) grpc.UnaryServerInterceptor { - return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { - // Propagate session cache from server context to handler context - ctx = ContextWithSyncID(ctx, req) - - if sessionCache, ok := serverCtx.Value(types.SessionCacheKey{}).(types.SessionCache); ok { - ctx = context.WithValue(ctx, types.SessionCacheKey{}, sessionCache) - } - - return handler(ctx, req) - } -} - -func SessionCacheStreamInterceptor(serverCtx context.Context) grpc.StreamServerInterceptor { - return func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { - // Start with the original stream context - ctx := ss.Context() - - // Propagate session cache from server context to stream context - if sessionCache, ok := serverCtx.Value(types.SessionCacheKey{}).(types.SessionCache); ok { - ctx = context.WithValue(ctx, types.SessionCacheKey{}, sessionCache) - } - - // Create a wrapped stream that handles both session cache and annotation extraction - wrappedStream := &sessionCacheServerStream{ - ServerStream: ss, - ctx: ctx, - } - - return handler(srv, wrappedStream) - } -} - -type sessionCacheServerStream struct { - grpc.ServerStream - ctx context.Context -} - -func (s *sessionCacheServerStream) Context() context.Context { - return s.ctx -} - -func (s *sessionCacheServerStream) RecvMsg(m interface{}) error { - err := s.ServerStream.RecvMsg(m) - if err != nil { - return err - } - s.ctx = ContextWithSyncID(s.ctx, m) - return nil -} - // StreamServerInterceptors returns a slice of interceptors that includes the default interceptors, // plus any interceptors passed in as arguments. func StreamServerInterceptors(ctx context.Context, interceptors ...grpc.StreamServerInterceptor) []grpc.StreamServerInterceptor { @@ -96,7 +36,6 @@ func StreamServerInterceptors(ctx context.Context, interceptors ...grpc.StreamSe LoggingStreamServerInterceptor(ctxzap.Extract(ctx)), grpc_recovery.StreamServerInterceptor(grpc_recovery.WithRecoveryHandlerContext(recoveryHandler)), grpc_validator.StreamServerInterceptor(), - SessionCacheStreamInterceptor(ctx), } rv = append(rv, interceptors...) @@ -111,7 +50,6 @@ func UnaryServerInterceptor(ctx context.Context, interceptors ...grpc.UnaryServe LoggingUnaryServerInterceptor(ctxzap.Extract(ctx)), grpc_recovery.UnaryServerInterceptor(grpc_recovery.WithRecoveryHandlerContext(recoveryHandler)), grpc_validator.UnaryServerInterceptor(), - SessionCacheUnaryInterceptor(ctx), } rv = append(rv, interceptors...) @@ -132,59 +70,3 @@ func recoveryHandler(ctx context.Context, p interface{}) error { ) return err } - -// ContextWithSyncID extracts syncID from a request annotations and adds it to the context. -func ContextWithSyncID(ctx context.Context, req interface{}) context.Context { - if ctx == nil || req == nil { - return ctx - } - - // Use reflection to check if the request has an Annotations field - reqValue := reflect.ValueOf(req) - if reqValue.Kind() == reflect.Ptr { - reqValue = reqValue.Elem() - } - - annotationsField := reqValue.FieldByName("Annotations") - if !annotationsField.IsValid() { - return ctx - } - - // Check if the field is of the correct type - if annotationsField.Type() != reflect.TypeOf(annotations.Annotations{}) && - annotationsField.Type() != reflect.TypeOf([]*anypb.Any{}) { - return ctx - } - - // Get the annotations from the request - if annotationsField.IsNil() { - return ctx - } - - // Handle both annotations.Annotations and []*anypb.Any types - var annos annotations.Annotations - if annotationsField.Type() == reflect.TypeOf(annotations.Annotations{}) { - annos = annotationsField.Interface().(annotations.Annotations) - } else { - // Convert []*anypb.Any to annotations.Annotations - anySlice := annotationsField.Interface().([]*anypb.Any) - annos = annotations.Annotations(anySlice) - } - - if len(annos) == 0 { - return ctx - } - - syncID, err := annotations.GetActiveSyncIdFromAnnotations(annos) - if err != nil { - l := ctxzap.Extract(ctx) - l.Warn("error getting active sync id from annotations", zap.Error(err)) - return ctx - } - - if syncID == "" { - return ctx - } - - return context.WithValue(ctx, types.SyncIDKey{}, syncID) -} diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/uhttp/dbcache.go b/vendor/github.com/conductorone/baton-sdk/pkg/uhttp/dbcache.go index 459990df..a7d91ed5 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/uhttp/dbcache.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/uhttp/dbcache.go @@ -52,13 +52,13 @@ const ( failRollback = "Failed to rollback transaction" failInsert = "Failed to insert response data into cache table" failScanResponse = "Failed to scan rows for cached response" - cacheTTLThreshold = 60 + cacheTTLThreshold = time.Duration(60) * time.Second cacheTTLMultiplier uint64 = 5 ) var errNilConnection = errors.New("database connection is nil") -var defaultWaitDuration = cacheTTLThreshold * time.Second // Default Cleanup interval, 60 seconds +var defaultWaitDuration = cacheTTLThreshold // Default Cleanup interval, 60 seconds const tableName = "http_cache" @@ -67,10 +67,9 @@ func NewDBCache(ctx context.Context, cfg CacheConfig) (*DBCache, error) { var ( err error dc = &DBCache{ - waitDuration: defaultWaitDuration, // Default Cleanup interval, 60 seconds - stats: true, - //nolint:gosec // disable G115 - expirationTime: time.Duration(cfg.TTL) * time.Second, + waitDuration: defaultWaitDuration, // Default Cleanup interval, 60 seconds + stats: true, + expirationTime: cfg.TTL, } ) l := ctxzap.Extract(ctx) @@ -111,8 +110,7 @@ func NewDBCache(ctx context.Context, cfg CacheConfig) (*DBCache, error) { } if cfg.TTL > cacheTTLThreshold { - //nolint:gosec // disable G115 - dc.waitDuration = time.Duration(cfg.TTL*cacheTTLMultiplier) * time.Second // set as a fraction of the Cache TTL + dc.waitDuration = cfg.TTL * time.Duration(cacheTTLMultiplier) // set as a fraction of the Cache TTL } go func(waitDuration, expirationTime time.Duration) { @@ -429,8 +427,8 @@ func (d *DBCache) updateStats(ctx context.Context, field, key string) error { func (d *DBCache) getStats(ctx context.Context) (CacheStats, error) { var ( - hits int64 - misses int64 + hits uint64 + misses uint64 ) if d.db == nil { return CacheStats{}, errNilConnection @@ -456,6 +454,9 @@ func (d *DBCache) getStats(ctx context.Context) (CacheStats, error) { return CacheStats{}, err } } + if rows.Err() != nil { + return CacheStats{}, rows.Err() + } return CacheStats{ Hits: hits, diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/uhttp/gocache.go b/vendor/github.com/conductorone/baton-sdk/pkg/uhttp/gocache.go index 63f769db..81b8a8e0 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/uhttp/gocache.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/uhttp/gocache.go @@ -13,14 +13,15 @@ import ( "time" "github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/ctxzap" - "github.com/maypok86/otter" + "github.com/maypok86/otter/v2" + "github.com/maypok86/otter/v2/stats" "go.uber.org/zap" ) const ( - cacheTTLMaximum uint64 = 31536000 // 31536000 seconds = one year - cacheTTLDefault uint64 = 3600 // 3600 seconds = one hour - defaultCacheSize uint = 5 // MB + cacheTTLMaximum time.Duration = 31536000 * time.Second // 31536000 seconds = one year + cacheTTLDefault time.Duration = 3600 * time.Second // 3600 seconds = one hour + defaultCacheSizeMb uint64 = 5 // MB ) type CacheBackend string @@ -32,15 +33,15 @@ const ( ) type CacheConfig struct { - LogDebug bool - TTL uint64 // If 0, cache is disabled - MaxSize uint // MB - Backend CacheBackend // If noop, cache is disabled + LogDebug bool + TTL time.Duration // If 0, cache is disabled + MaxSizeMb uint64 // MB + Backend CacheBackend // If noop, cache is disabled } type CacheStats struct { - Hits int64 - Misses int64 + Hits uint64 + Misses uint64 } type ContextKey struct{} @@ -50,7 +51,7 @@ type GoCache struct { } type NoopCache struct { - counter int64 + counter uint64 } func NewNoopCache(ctx context.Context) *NoopCache { @@ -79,15 +80,15 @@ func (n *NoopCache) Stats(ctx context.Context) CacheStats { } func (cc *CacheConfig) ToString() string { - return fmt.Sprintf("Backend: %v, TTL: %d, MaxSize: %dMB, LogDebug: %t", cc.Backend, cc.TTL, cc.MaxSize, cc.LogDebug) + return fmt.Sprintf("Backend: %v, TTL: %d, MaxSize: %dMB, LogDebug: %t", cc.Backend, cc.TTL, cc.MaxSizeMb, cc.LogDebug) } func DefaultCacheConfig() CacheConfig { return CacheConfig{ - TTL: cacheTTLDefault, - MaxSize: defaultCacheSize, - LogDebug: false, - Backend: CacheBackendMemory, + TTL: cacheTTLDefault, + MaxSizeMb: defaultCacheSizeMb, + LogDebug: false, + Backend: CacheBackendMemory, } } @@ -96,12 +97,12 @@ func NewCacheConfigFromEnv() *CacheConfig { cacheMaxSize, err := strconv.ParseInt(os.Getenv("BATON_HTTP_CACHE_MAX_SIZE"), 10, 64) if err == nil && cacheMaxSize >= 0 { - config.MaxSize = uint(cacheMaxSize) + config.MaxSizeMb = uint64(cacheMaxSize) } - cacheTTL, err := strconv.ParseUint(os.Getenv("BATON_HTTP_CACHE_TTL"), 10, 64) + cacheTTL, err := strconv.ParseInt(os.Getenv("BATON_HTTP_CACHE_TTL"), 10, 64) if err == nil { - config.TTL = min(cacheTTLMaximum, max(0, cacheTTL)) + config.TTL = min(cacheTTLMaximum, max(0, time.Duration(cacheTTL)*time.Second)) } cacheBackend := os.Getenv("BATON_HTTP_CACHE_BACKEND") @@ -147,7 +148,7 @@ func NewHttpCache(ctx context.Context, config *CacheConfig) (icache, error) { l.Info("http cache config", zap.String("config", config.ToString())) if config.TTL == 0 { - l.Debug("CacheTTL is 0, disabling cache.", zap.Uint64("CacheTTL", config.TTL)) + l.Debug("NewHttpCache: Cache TTL is 0, disabling cache.", zap.Duration("cache_ttl", config.TTL)) return NewNoopCache(ctx), nil } @@ -179,26 +180,30 @@ func NewHttpCache(ctx context.Context, config *CacheConfig) (icache, error) { func NewGoCache(ctx context.Context, cfg CacheConfig) (*GoCache, error) { l := ctxzap.Extract(ctx) gc := GoCache{} - maxSize := cfg.MaxSize * 1024 * 1024 + maxSize := cfg.MaxSizeMb * 1024 * 1024 if maxSize > math.MaxInt { return nil, fmt.Errorf("error converting max size to bytes") } - //nolint:gosec // disable G115: we check the max size - cache, err := otter.MustBuilder[string, []byte](int(maxSize)). - CollectStats(). - Cost(func(key string, value []byte) uint32 { - return uint32(len(key) + len(value)) - }). - WithTTL(time.Duration(cfg.TTL) * time.Second). - Build() + cache, err := otter.New(&otter.Options[string, []byte]{ + MaximumWeight: maxSize, + StatsRecorder: stats.NewCounter(), + Weigher: func(key string, value []byte) uint32 { + weight64 := uint64(len(key)) + uint64(len(value)) + if weight64 > uint64(math.MaxUint32) { + return math.MaxUint32 + } + return uint32(weight64) + }, + ExpiryCalculator: otter.ExpiryWriting[string, []byte](cfg.TTL), + }) if err != nil { l.Error("cache initialization error", zap.Error(err)) return nil, err } - l.Debug("otter cache initialized", zap.Int("capacity", cache.Capacity())) - gc.rootLibrary = &cache + l.Debug("otter cache initialized", zap.Uint64("capacity", cache.GetMaximum())) + gc.rootLibrary = cache return &gc, nil } @@ -209,8 +214,8 @@ func (g *GoCache) Stats(ctx context.Context) CacheStats { } stats := g.rootLibrary.Stats() return CacheStats{ - Hits: stats.Hits(), - Misses: stats.Misses(), + Hits: stats.Hits, + Misses: stats.Misses, } } @@ -224,8 +229,8 @@ func (g *GoCache) Get(req *http.Request) (*http.Response, error) { return nil, err } - entry, ok := g.rootLibrary.Get(key) - if !ok { + entry, found := g.rootLibrary.GetIfPresent(key) + if !found { return nil, nil } @@ -259,7 +264,7 @@ func (g *GoCache) Set(req *http.Request, value *http.Response) error { // Otter's cost function rejects large responses if there's not enough room // TODO: return some error or warning that we couldn't set? - _ = g.rootLibrary.Set(key, newValue) + _, _ = g.rootLibrary.Set(key, newValue) return nil } @@ -269,7 +274,7 @@ func (g *GoCache) Delete(key string) error { return nil } - g.rootLibrary.Delete(key) + g.rootLibrary.Invalidate(key) return nil } @@ -281,7 +286,7 @@ func (g *GoCache) Clear(ctx context.Context) error { return nil } - g.rootLibrary.Clear() + g.rootLibrary.InvalidateAll() l.Debug("reset cache") return nil @@ -291,6 +296,6 @@ func (g *GoCache) Has(key string) bool { if g.rootLibrary == nil { return false } - _, found := g.rootLibrary.Get(key) + _, found := g.rootLibrary.GetIfPresent(key) return found } diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/uhttp/wrapper.go b/vendor/github.com/conductorone/baton-sdk/pkg/uhttp/wrapper.go index 0073f1c1..3a8d4d27 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/uhttp/wrapper.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/uhttp/wrapper.go @@ -242,10 +242,10 @@ func WithRatelimitData(resource *v2.RateLimitDescription) DoOption { return err } - resource.Limit = rl.Limit - resource.Remaining = rl.Remaining - resource.ResetAt = rl.ResetAt - resource.Status = rl.Status + resource.SetLimit(rl.GetLimit()) + resource.SetRemaining(rl.GetRemaining()) + resource.SetResetAt(rl.GetResetAt()) + resource.SetStatus(rl.GetStatus()) return nil } @@ -433,7 +433,7 @@ func (c *BaseHttpClient) Do(req *http.Request, options ...DoOption) (*http.Respo // Log response headers directly for certain errors if resp.StatusCode >= 400 { - redactedHeaders := redactHeaders(resp.Header) + redactedHeaders := RedactSensitiveHeaders(resp.Header) l.Error("base-http-client: HTTP error status", zap.Int("status_code", resp.StatusCode), zap.String("status", resp.Status), @@ -476,13 +476,34 @@ func (c *BaseHttpClient) Do(req *http.Request, options ...DoOption) (*http.Respo return resp, errors.Join(optErrs...) } -func redactHeaders(h http.Header) http.Header { +var sensitiveStrings = []string{ + "api-key", + "auth", + "cookie", + "proxy-authorization", + "set-cookie", + "x-forwarded-for", + "x-forwarded-proto", +} + +func RedactSensitiveHeaders(h http.Header) http.Header { + if h == nil { + return nil + } safe := make(http.Header, len(h)) for k, v := range h { - switch strings.ToLower(k) { - case "authorization", "set-cookie", "cookie": + sensitive := false + headerKey := strings.ToLower(k) + for _, sensitiveString := range sensitiveStrings { + if strings.Contains(headerKey, sensitiveString) { + sensitive = true + break + } + } + + if sensitive { safe[k] = []string{"REDACTED"} - default: + } else { safe[k] = v } } diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/uotel/config.go b/vendor/github.com/conductorone/baton-sdk/pkg/uotel/config.go index 14082a35..9d107225 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/uotel/config.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/uotel/config.go @@ -251,7 +251,7 @@ func (c *otelConfig) initLogging(ctx context.Context, cc *grpc.ClientConn) (cont return nil, fmt.Errorf("failed to initialize otlp exporter: %w", err) } // TODO(morgabra): Whole bunch of tunables _here_... - processor := log.NewBatchProcessor(exp, log.WithExportInterval(time.Second*5)) + processor := log.NewBatchProcessor(exp, log.WithExportInterval(time.Second*1)) // TODO(morgabra): Whole bunch of tunables ALSO HERE... provider := log.NewLoggerProvider( log.WithResource(res), diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/utls/certs.go b/vendor/github.com/conductorone/baton-sdk/pkg/utls/certs.go index e70de8fd..86b072a6 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/utls/certs.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/utls/certs.go @@ -56,10 +56,10 @@ func generateCredential(ctx context.Context, name string, caCert *x509.Certifica return nil, err } - return &v1.Credential{ + return v1.Credential_builder{ Key: privateKey, Cert: signedCert, - }, nil + }.Build(), nil } // GenerateClientServerCredentials generates a new CA and two sets of credentials for use in a client/server configuration. @@ -105,13 +105,13 @@ func GenerateClientServerCredentials(ctx context.Context) (*v1.Credential, *v1.C if err != nil { return nil, nil, err } - clientCreds.CaCert = caCert + clientCreds.SetCaCert(caCert) serverCreds, err := generateCredential(ctx, "c1-connector-server", ca, caKey) if err != nil { return nil, nil, err } - serverCreds.CaCert = caCert + serverCreds.SetCaCert(caCert) return clientCreds, serverCreds, nil } diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/utls/client.go b/vendor/github.com/conductorone/baton-sdk/pkg/utls/client.go index 7426a882..d751a9d0 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/utls/client.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/utls/client.go @@ -11,7 +11,7 @@ import ( // ClientConfig takes a credential and returns a TLS configuration that can be used to connect to a server. func ClientConfig(ctx context.Context, cred *v1.Credential) (*tls.Config, error) { - caCert, err := x509.ParseCertificate(cred.CaCert) + caCert, err := x509.ParseCertificate(cred.GetCaCert()) if err != nil { return nil, err } @@ -20,15 +20,15 @@ func ClientConfig(ctx context.Context, cred *v1.Credential) (*tls.Config, error) pool.AddCert(caCert) // Validate that we have a valid certificate - _, err = x509.ParseCertificate(cred.Cert) + _, err = x509.ParseCertificate(cred.GetCert()) if err != nil { return nil, err } var tlsCert tls.Certificate - tlsCert.Certificate = append(tlsCert.Certificate, cred.Cert) - tlsCert.PrivateKey = ed25519.PrivateKey(cred.Key) + tlsCert.Certificate = append(tlsCert.Certificate, cred.GetCert()) + tlsCert.PrivateKey = ed25519.PrivateKey(cred.GetKey()) return &tls.Config{ ServerName: "127.0.0.1", diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/utls/listener.go b/vendor/github.com/conductorone/baton-sdk/pkg/utls/listener.go index b1a935b4..05fa3d32 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/utls/listener.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/utls/listener.go @@ -12,7 +12,7 @@ import ( // ListenerConfig takes a credential and returns a TLS configuration that can be used to create a TLS listener. func ListenerConfig(ctx context.Context, cred *v1.Credential) (*tls.Config, error) { - caCert, err := x509.ParseCertificate(cred.CaCert) + caCert, err := x509.ParseCertificate(cred.GetCaCert()) if err != nil { return nil, err } @@ -21,15 +21,15 @@ func ListenerConfig(ctx context.Context, cred *v1.Credential) (*tls.Config, erro pool.AddCert(caCert) // Validate that we have a valid certificate - _, err = x509.ParseCertificate(cred.Cert) + _, err = x509.ParseCertificate(cred.GetCert()) if err != nil { return nil, err } var tlsCert tls.Certificate - tlsCert.Certificate = append(tlsCert.Certificate, cred.Cert) - tlsCert.PrivateKey = ed25519.PrivateKey(cred.Key) + tlsCert.Certificate = append(tlsCert.Certificate, cred.GetCert()) + tlsCert.PrivateKey = ed25519.PrivateKey(cred.GetKey()) return &tls.Config{ MinVersion: tls.VersionTLS12, diff --git a/vendor/github.com/dolthub/maphash/.gitignore b/vendor/github.com/dolthub/maphash/.gitignore deleted file mode 100644 index 977a7cad..00000000 --- a/vendor/github.com/dolthub/maphash/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -*.idea -*.test \ No newline at end of file diff --git a/vendor/github.com/dolthub/maphash/README.md b/vendor/github.com/dolthub/maphash/README.md deleted file mode 100644 index d91530f9..00000000 --- a/vendor/github.com/dolthub/maphash/README.md +++ /dev/null @@ -1,4 +0,0 @@ -# maphash - -Hash any `comparable` type using Golang's fast runtime hash. -Uses [AES](https://en.wikipedia.org/wiki/AES_instruction_set) instructions when available. \ No newline at end of file diff --git a/vendor/github.com/dolthub/maphash/hasher.go b/vendor/github.com/dolthub/maphash/hasher.go deleted file mode 100644 index ef53596a..00000000 --- a/vendor/github.com/dolthub/maphash/hasher.go +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright 2022 Dolthub, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package maphash - -import "unsafe" - -// Hasher hashes values of type K. -// Uses runtime AES-based hashing. -type Hasher[K comparable] struct { - hash hashfn - seed uintptr -} - -// NewHasher creates a new Hasher[K] with a random seed. -func NewHasher[K comparable]() Hasher[K] { - return Hasher[K]{ - hash: getRuntimeHasher[K](), - seed: newHashSeed(), - } -} - -// NewSeed returns a copy of |h| with a new hash seed. -func NewSeed[K comparable](h Hasher[K]) Hasher[K] { - return Hasher[K]{ - hash: h.hash, - seed: newHashSeed(), - } -} - -// Hash hashes |key|. -func (h Hasher[K]) Hash(key K) uint64 { - // promise to the compiler that pointer - // |p| does not escape the stack. - p := noescape(unsafe.Pointer(&key)) - return uint64(h.hash(p, h.seed)) -} diff --git a/vendor/github.com/dolthub/maphash/runtime.go b/vendor/github.com/dolthub/maphash/runtime.go deleted file mode 100644 index 29cd6a8e..00000000 --- a/vendor/github.com/dolthub/maphash/runtime.go +++ /dev/null @@ -1,111 +0,0 @@ -// Copyright 2022 Dolthub, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// This file incorporates work covered by the following copyright and -// permission notice: -// -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.18 || go1.19 -// +build go1.18 go1.19 - -package maphash - -import ( - "math/rand" - "unsafe" -) - -type hashfn func(unsafe.Pointer, uintptr) uintptr - -func getRuntimeHasher[K comparable]() (h hashfn) { - a := any(make(map[K]struct{})) - i := (*mapiface)(unsafe.Pointer(&a)) - h = i.typ.hasher - return -} - -func newHashSeed() uintptr { - return uintptr(rand.Int()) -} - -// noescape hides a pointer from escape analysis. It is the identity function -// but escape analysis doesn't think the output depends on the input. -// noescape is inlined and currently compiles down to zero instructions. -// USE CAREFULLY! -// This was copied from the runtime (via pkg "strings"); see issues 23382 and 7921. -// -//go:nosplit -//go:nocheckptr -func noescape(p unsafe.Pointer) unsafe.Pointer { - x := uintptr(p) - return unsafe.Pointer(x ^ 0) -} - -type mapiface struct { - typ *maptype - val *hmap -} - -// go/src/runtime/type.go -type maptype struct { - typ _type - key *_type - elem *_type - bucket *_type - // function for hashing keys (ptr to key, seed) -> hash - hasher func(unsafe.Pointer, uintptr) uintptr - keysize uint8 - elemsize uint8 - bucketsize uint16 - flags uint32 -} - -// go/src/runtime/map.go -type hmap struct { - count int - flags uint8 - B uint8 - noverflow uint16 - // hash seed - hash0 uint32 - buckets unsafe.Pointer - oldbuckets unsafe.Pointer - nevacuate uintptr - // true type is *mapextra - // but we don't need this data - extra unsafe.Pointer -} - -// go/src/runtime/type.go -type tflag uint8 -type nameOff int32 -type typeOff int32 - -// go/src/runtime/type.go -type _type struct { - size uintptr - ptrdata uintptr - hash uint32 - tflag tflag - align uint8 - fieldAlign uint8 - kind uint8 - equal func(unsafe.Pointer, unsafe.Pointer) bool - gcdata *byte - str nameOff - ptrToThis typeOff -} diff --git a/vendor/github.com/ebitengine/purego/.gitignore b/vendor/github.com/ebitengine/purego/.gitignore new file mode 100644 index 00000000..b25c15b8 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/.gitignore @@ -0,0 +1 @@ +*~ diff --git a/vendor/github.com/dolthub/maphash/LICENSE b/vendor/github.com/ebitengine/purego/LICENSE similarity index 99% rename from vendor/github.com/dolthub/maphash/LICENSE rename to vendor/github.com/ebitengine/purego/LICENSE index 261eeb9e..8dada3ed 100644 --- a/vendor/github.com/dolthub/maphash/LICENSE +++ b/vendor/github.com/ebitengine/purego/LICENSE @@ -178,7 +178,7 @@ APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" + boilerplate notice, with the fields enclosed by brackets "{}" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a @@ -186,7 +186,7 @@ same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright [yyyy] [name of copyright owner] + Copyright {yyyy} {name of copyright owner} Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/github.com/ebitengine/purego/README.md b/vendor/github.com/ebitengine/purego/README.md new file mode 100644 index 00000000..523e9118 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/README.md @@ -0,0 +1,113 @@ +# purego +[![Go Reference](https://pkg.go.dev/badge/github.com/ebitengine/purego?GOOS=darwin.svg)](https://pkg.go.dev/github.com/ebitengine/purego?GOOS=darwin) + +A library for calling C functions from Go without Cgo. + +> This is beta software so expect bugs and potentially API breaking changes +> but each release will be tagged to avoid breaking people's code. +> Bug reports are encouraged. + +## Motivation + +The [Ebitengine](https://github.com/hajimehoshi/ebiten) game engine was ported to use only Go on Windows. This enabled +cross-compiling to Windows from any other operating system simply by setting `GOOS=windows`. The purego project was +born to bring that same vision to the other platforms supported by Ebitengine. + +## Benefits + +- **Simple Cross-Compilation**: No C means you can build for other platforms easily without a C compiler. +- **Faster Compilation**: Efficiently cache your entirely Go builds. +- **Smaller Binaries**: Using Cgo generates a C wrapper function for each C function called. Purego doesn't! +- **Dynamic Linking**: Load symbols at runtime and use it as a plugin system. +- **Foreign Function Interface**: Call into other languages that are compiled into shared objects. +- **Cgo Fallback**: Works even with CGO_ENABLED=1 so incremental porting is possible. +This also means unsupported GOARCHs (freebsd/riscv64, linux/mips, etc.) will still work +except for float arguments and return values. + +## Supported Platforms + +### Tier 1 + +Tier 1 platforms are the primary targets officially supported by PureGo. When a new version of PureGo is released, any critical bugs found on Tier 1 platforms are treated as release blockers. The release will be postponed until such issues are resolved. + +- **Android**: amd64, arm64 +- **iOS**: amd64, arm64 +- **Linux**: amd64, arm64 +- **macOS**: amd64, arm64 +- **Windows**: amd64, arm64 + +### Tier 2 + +Tier 2 platforms are supported by PureGo on a best-effort basis. Critical bugs on Tier 2 platforms do not block new PureGo releases. However, fixes contributed by external contributors are very welcome and encouraged. + +- **Android**: 386, arm +- **FreeBSD**: amd64, arm64 +- **Linux**: 386, arm, loong64 +- **Windows**: 386*, arm* + +`*` These architectures only support `SyscallN` and `NewCallback` + +## Example + +The example below only showcases purego use for macOS and Linux. The other platforms require special handling which can +be seen in the complete example at [examples/libc](https://github.com/ebitengine/purego/tree/main/examples/libc) which supports FreeBSD and Windows. + +```go +package main + +import ( + "fmt" + "runtime" + + "github.com/ebitengine/purego" +) + +func getSystemLibrary() string { + switch runtime.GOOS { + case "darwin": + return "/usr/lib/libSystem.B.dylib" + case "linux": + return "libc.so.6" + default: + panic(fmt.Errorf("GOOS=%s is not supported", runtime.GOOS)) + } +} + +func main() { + libc, err := purego.Dlopen(getSystemLibrary(), purego.RTLD_NOW|purego.RTLD_GLOBAL) + if err != nil { + panic(err) + } + var puts func(string) + purego.RegisterLibFunc(&puts, libc, "puts") + puts("Calling C from Go without Cgo!") +} +``` + +Then to run: `CGO_ENABLED=0 go run main.go` + +## Questions + +If you have questions about how to incorporate purego in your project or want to discuss +how it works join the [Discord](https://discord.gg/HzGZVD6BkY)! + +### External Code + +Purego uses code that originates from the Go runtime. These files are under the BSD-3 +License that can be found [in the Go Source](https://github.com/golang/go/blob/master/LICENSE). +This is a list of the copied files: + +* `abi_*.h` from package `runtime/cgo` +* `wincallback.go` from package `runtime` +* `zcallback_darwin_*.s` from package `runtime` +* `internal/fakecgo/abi_*.h` from package `runtime/cgo` +* `internal/fakecgo/asm_GOARCH.s` from package `runtime/cgo` +* `internal/fakecgo/callbacks.go` from package `runtime/cgo` +* `internal/fakecgo/go_GOOS_GOARCH.go` from package `runtime/cgo` +* `internal/fakecgo/iscgo.go` from package `runtime/cgo` +* `internal/fakecgo/setenv.go` from package `runtime/cgo` +* `internal/fakecgo/freebsd.go` from package `runtime/cgo` +* `internal/fakecgo/netbsd.go` from package `runtime/cgo` + +The files `abi_*.h` and `internal/fakecgo/abi_*.h` are the same because Bazel does not support cross-package use of +`#include` so we need each one once per package. (cf. [issue](https://github.com/bazelbuild/rules_go/issues/3636)) diff --git a/vendor/github.com/ebitengine/purego/abi_amd64.h b/vendor/github.com/ebitengine/purego/abi_amd64.h new file mode 100644 index 00000000..9949435f --- /dev/null +++ b/vendor/github.com/ebitengine/purego/abi_amd64.h @@ -0,0 +1,99 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Macros for transitioning from the host ABI to Go ABI0. +// +// These save the frame pointer, so in general, functions that use +// these should have zero frame size to suppress the automatic frame +// pointer, though it's harmless to not do this. + +#ifdef GOOS_windows + +// REGS_HOST_TO_ABI0_STACK is the stack bytes used by +// PUSH_REGS_HOST_TO_ABI0. +#define REGS_HOST_TO_ABI0_STACK (28*8 + 8) + +// PUSH_REGS_HOST_TO_ABI0 prepares for transitioning from +// the host ABI to Go ABI0 code. It saves all registers that are +// callee-save in the host ABI and caller-save in Go ABI0 and prepares +// for entry to Go. +// +// Save DI SI BP BX R12 R13 R14 R15 X6-X15 registers and the DF flag. +// Clear the DF flag for the Go ABI. +// MXCSR matches the Go ABI, so we don't have to set that, +// and Go doesn't modify it, so we don't have to save it. +#define PUSH_REGS_HOST_TO_ABI0() \ + PUSHFQ \ + CLD \ + ADJSP $(REGS_HOST_TO_ABI0_STACK - 8) \ + MOVQ DI, (0*0)(SP) \ + MOVQ SI, (1*8)(SP) \ + MOVQ BP, (2*8)(SP) \ + MOVQ BX, (3*8)(SP) \ + MOVQ R12, (4*8)(SP) \ + MOVQ R13, (5*8)(SP) \ + MOVQ R14, (6*8)(SP) \ + MOVQ R15, (7*8)(SP) \ + MOVUPS X6, (8*8)(SP) \ + MOVUPS X7, (10*8)(SP) \ + MOVUPS X8, (12*8)(SP) \ + MOVUPS X9, (14*8)(SP) \ + MOVUPS X10, (16*8)(SP) \ + MOVUPS X11, (18*8)(SP) \ + MOVUPS X12, (20*8)(SP) \ + MOVUPS X13, (22*8)(SP) \ + MOVUPS X14, (24*8)(SP) \ + MOVUPS X15, (26*8)(SP) + +#define POP_REGS_HOST_TO_ABI0() \ + MOVQ (0*0)(SP), DI \ + MOVQ (1*8)(SP), SI \ + MOVQ (2*8)(SP), BP \ + MOVQ (3*8)(SP), BX \ + MOVQ (4*8)(SP), R12 \ + MOVQ (5*8)(SP), R13 \ + MOVQ (6*8)(SP), R14 \ + MOVQ (7*8)(SP), R15 \ + MOVUPS (8*8)(SP), X6 \ + MOVUPS (10*8)(SP), X7 \ + MOVUPS (12*8)(SP), X8 \ + MOVUPS (14*8)(SP), X9 \ + MOVUPS (16*8)(SP), X10 \ + MOVUPS (18*8)(SP), X11 \ + MOVUPS (20*8)(SP), X12 \ + MOVUPS (22*8)(SP), X13 \ + MOVUPS (24*8)(SP), X14 \ + MOVUPS (26*8)(SP), X15 \ + ADJSP $-(REGS_HOST_TO_ABI0_STACK - 8) \ + POPFQ + +#else +// SysV ABI + +#define REGS_HOST_TO_ABI0_STACK (6*8) + +// SysV MXCSR matches the Go ABI, so we don't have to set that, +// and Go doesn't modify it, so we don't have to save it. +// Both SysV and Go require DF to be cleared, so that's already clear. +// The SysV and Go frame pointer conventions are compatible. +#define PUSH_REGS_HOST_TO_ABI0() \ + ADJSP $(REGS_HOST_TO_ABI0_STACK) \ + MOVQ BP, (5*8)(SP) \ + LEAQ (5*8)(SP), BP \ + MOVQ BX, (0*8)(SP) \ + MOVQ R12, (1*8)(SP) \ + MOVQ R13, (2*8)(SP) \ + MOVQ R14, (3*8)(SP) \ + MOVQ R15, (4*8)(SP) + +#define POP_REGS_HOST_TO_ABI0() \ + MOVQ (0*8)(SP), BX \ + MOVQ (1*8)(SP), R12 \ + MOVQ (2*8)(SP), R13 \ + MOVQ (3*8)(SP), R14 \ + MOVQ (4*8)(SP), R15 \ + MOVQ (5*8)(SP), BP \ + ADJSP $-(REGS_HOST_TO_ABI0_STACK) + +#endif diff --git a/vendor/github.com/ebitengine/purego/abi_arm64.h b/vendor/github.com/ebitengine/purego/abi_arm64.h new file mode 100644 index 00000000..5d5061ec --- /dev/null +++ b/vendor/github.com/ebitengine/purego/abi_arm64.h @@ -0,0 +1,39 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Macros for transitioning from the host ABI to Go ABI0. +// +// These macros save and restore the callee-saved registers +// from the stack, but they don't adjust stack pointer, so +// the user should prepare stack space in advance. +// SAVE_R19_TO_R28(offset) saves R19 ~ R28 to the stack space +// of ((offset)+0*8)(RSP) ~ ((offset)+9*8)(RSP). +// +// SAVE_F8_TO_F15(offset) saves F8 ~ F15 to the stack space +// of ((offset)+0*8)(RSP) ~ ((offset)+7*8)(RSP). +// +// R29 is not saved because Go will save and restore it. + +#define SAVE_R19_TO_R28(offset) \ + STP (R19, R20), ((offset)+0*8)(RSP) \ + STP (R21, R22), ((offset)+2*8)(RSP) \ + STP (R23, R24), ((offset)+4*8)(RSP) \ + STP (R25, R26), ((offset)+6*8)(RSP) \ + STP (R27, g), ((offset)+8*8)(RSP) +#define RESTORE_R19_TO_R28(offset) \ + LDP ((offset)+0*8)(RSP), (R19, R20) \ + LDP ((offset)+2*8)(RSP), (R21, R22) \ + LDP ((offset)+4*8)(RSP), (R23, R24) \ + LDP ((offset)+6*8)(RSP), (R25, R26) \ + LDP ((offset)+8*8)(RSP), (R27, g) /* R28 */ +#define SAVE_F8_TO_F15(offset) \ + FSTPD (F8, F9), ((offset)+0*8)(RSP) \ + FSTPD (F10, F11), ((offset)+2*8)(RSP) \ + FSTPD (F12, F13), ((offset)+4*8)(RSP) \ + FSTPD (F14, F15), ((offset)+6*8)(RSP) +#define RESTORE_F8_TO_F15(offset) \ + FLDPD ((offset)+0*8)(RSP), (F8, F9) \ + FLDPD ((offset)+2*8)(RSP), (F10, F11) \ + FLDPD ((offset)+4*8)(RSP), (F12, F13) \ + FLDPD ((offset)+6*8)(RSP), (F14, F15) diff --git a/vendor/github.com/ebitengine/purego/abi_loong64.h b/vendor/github.com/ebitengine/purego/abi_loong64.h new file mode 100644 index 00000000..b10d8373 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/abi_loong64.h @@ -0,0 +1,60 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Macros for transitioning from the host ABI to Go ABI0. +// +// These macros save and restore the callee-saved registers +// from the stack, but they don't adjust stack pointer, so +// the user should prepare stack space in advance. +// SAVE_R22_TO_R31(offset) saves R22 ~ R31 to the stack space +// of ((offset)+0*8)(R3) ~ ((offset)+9*8)(R3). +// +// SAVE_F24_TO_F31(offset) saves F24 ~ F31 to the stack space +// of ((offset)+0*8)(R3) ~ ((offset)+7*8)(R3). +// +// Note: g is R22 + +#define SAVE_R22_TO_R31(offset) \ + MOVV g, ((offset)+(0*8))(R3) \ + MOVV R23, ((offset)+(1*8))(R3) \ + MOVV R24, ((offset)+(2*8))(R3) \ + MOVV R25, ((offset)+(3*8))(R3) \ + MOVV R26, ((offset)+(4*8))(R3) \ + MOVV R27, ((offset)+(5*8))(R3) \ + MOVV R28, ((offset)+(6*8))(R3) \ + MOVV R29, ((offset)+(7*8))(R3) \ + MOVV R30, ((offset)+(8*8))(R3) \ + MOVV R31, ((offset)+(9*8))(R3) + +#define SAVE_F24_TO_F31(offset) \ + MOVD F24, ((offset)+(0*8))(R3) \ + MOVD F25, ((offset)+(1*8))(R3) \ + MOVD F26, ((offset)+(2*8))(R3) \ + MOVD F27, ((offset)+(3*8))(R3) \ + MOVD F28, ((offset)+(4*8))(R3) \ + MOVD F29, ((offset)+(5*8))(R3) \ + MOVD F30, ((offset)+(6*8))(R3) \ + MOVD F31, ((offset)+(7*8))(R3) + +#define RESTORE_R22_TO_R31(offset) \ + MOVV ((offset)+(0*8))(R3), g \ + MOVV ((offset)+(1*8))(R3), R23 \ + MOVV ((offset)+(2*8))(R3), R24 \ + MOVV ((offset)+(3*8))(R3), R25 \ + MOVV ((offset)+(4*8))(R3), R26 \ + MOVV ((offset)+(5*8))(R3), R27 \ + MOVV ((offset)+(6*8))(R3), R28 \ + MOVV ((offset)+(7*8))(R3), R29 \ + MOVV ((offset)+(8*8))(R3), R30 \ + MOVV ((offset)+(9*8))(R3), R31 + +#define RESTORE_F24_TO_F31(offset) \ + MOVD ((offset)+(0*8))(R3), F24 \ + MOVD ((offset)+(1*8))(R3), F25 \ + MOVD ((offset)+(2*8))(R3), F26 \ + MOVD ((offset)+(3*8))(R3), F27 \ + MOVD ((offset)+(4*8))(R3), F28 \ + MOVD ((offset)+(5*8))(R3), F29 \ + MOVD ((offset)+(6*8))(R3), F30 \ + MOVD ((offset)+(7*8))(R3), F31 diff --git a/vendor/github.com/ebitengine/purego/cgo.go b/vendor/github.com/ebitengine/purego/cgo.go new file mode 100644 index 00000000..32bb256a --- /dev/null +++ b/vendor/github.com/ebitengine/purego/cgo.go @@ -0,0 +1,19 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +//go:build cgo && (darwin || freebsd || linux || netbsd) + +package purego + +// if CGO_ENABLED=1 import the Cgo runtime to ensure that it is set up properly. +// This is required since some frameworks need TLS setup the C way which Go doesn't do. +// We currently don't support ios in fakecgo mode so force Cgo or fail +// Even if CGO_ENABLED=1 the Cgo runtime is not imported unless `import "C"` is used. +// which will import this package automatically. Normally this isn't an issue since it +// usually isn't possible to call into C without using that import. However, with purego +// it is since we don't use `import "C"`! +import ( + _ "runtime/cgo" + + _ "github.com/ebitengine/purego/internal/cgo" +) diff --git a/vendor/github.com/ebitengine/purego/dlerror.go b/vendor/github.com/ebitengine/purego/dlerror.go new file mode 100644 index 00000000..ad52b436 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/dlerror.go @@ -0,0 +1,17 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2023 The Ebitengine Authors + +//go:build darwin || freebsd || linux || netbsd + +package purego + +// Dlerror represents an error value returned from Dlopen, Dlsym, or Dlclose. +// +// This type is not available on Windows as there is no counterpart to it on Windows. +type Dlerror struct { + s string +} + +func (e Dlerror) Error() string { + return e.s +} diff --git a/vendor/github.com/ebitengine/purego/dlfcn.go b/vendor/github.com/ebitengine/purego/dlfcn.go new file mode 100644 index 00000000..2730d82c --- /dev/null +++ b/vendor/github.com/ebitengine/purego/dlfcn.go @@ -0,0 +1,99 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +//go:build (darwin || freebsd || linux || netbsd) && !android && !faketime + +package purego + +import ( + "unsafe" +) + +// Unix Specification for dlfcn.h: https://pubs.opengroup.org/onlinepubs/7908799/xsh/dlfcn.h.html + +var ( + fnDlopen func(path string, mode int) uintptr + fnDlsym func(handle uintptr, name string) uintptr + fnDlerror func() string + fnDlclose func(handle uintptr) bool +) + +func init() { + RegisterFunc(&fnDlopen, dlopenABI0) + RegisterFunc(&fnDlsym, dlsymABI0) + RegisterFunc(&fnDlerror, dlerrorABI0) + RegisterFunc(&fnDlclose, dlcloseABI0) +} + +// Dlopen examines the dynamic library or bundle file specified by path. If the file is compatible +// with the current process and has not already been loaded into the +// current process, it is loaded and linked. After being linked, if it contains +// any initializer functions, they are called, before Dlopen +// returns. It returns a handle that can be used with Dlsym and Dlclose. +// A second call to Dlopen with the same path will return the same handle, but the internal +// reference count for the handle will be incremented. Therefore, all +// Dlopen calls should be balanced with a Dlclose call. +// +// This function is not available on Windows. +// Use [golang.org/x/sys/windows.LoadLibrary], [golang.org/x/sys/windows.LoadLibraryEx], +// [golang.org/x/sys/windows.NewLazyDLL], or [golang.org/x/sys/windows.NewLazySystemDLL] for Windows instead. +func Dlopen(path string, mode int) (uintptr, error) { + u := fnDlopen(path, mode) + if u == 0 { + return 0, Dlerror{fnDlerror()} + } + return u, nil +} + +// Dlsym takes a "handle" of a dynamic library returned by Dlopen and the symbol name. +// It returns the address where that symbol is loaded into memory. If the symbol is not found, +// in the specified library or any of the libraries that were automatically loaded by Dlopen +// when that library was loaded, Dlsym returns zero. +// +// This function is not available on Windows. +// Use [golang.org/x/sys/windows.GetProcAddress] for Windows instead. +func Dlsym(handle uintptr, name string) (uintptr, error) { + u := fnDlsym(handle, name) + if u == 0 { + return 0, Dlerror{fnDlerror()} + } + return u, nil +} + +// Dlclose decrements the reference count on the dynamic library handle. +// If the reference count drops to zero and no other loaded libraries +// use symbols in it, then the dynamic library is unloaded. +// +// This function is not available on Windows. +// Use [golang.org/x/sys/windows.FreeLibrary] for Windows instead. +func Dlclose(handle uintptr) error { + if fnDlclose(handle) { + return Dlerror{fnDlerror()} + } + return nil +} + +func loadSymbol(handle uintptr, name string) (uintptr, error) { + return Dlsym(handle, name) +} + +// these functions exist in dlfcn_stubs.s and are calling C functions linked to in dlfcn_GOOS.go +// the indirection is necessary because a function is actually a pointer to the pointer to the code. +// sadly, I do not know of anyway to remove the assembly stubs entirely because //go:linkname doesn't +// appear to work if you link directly to the C function on darwin arm64. + +//go:linkname dlopen dlopen +var dlopen uint8 +var dlopenABI0 = uintptr(unsafe.Pointer(&dlopen)) + +//go:linkname dlsym dlsym +var dlsym uint8 +var dlsymABI0 = uintptr(unsafe.Pointer(&dlsym)) + +//go:linkname dlclose dlclose +var dlclose uint8 +var dlcloseABI0 = uintptr(unsafe.Pointer(&dlclose)) + +//go:linkname dlerror dlerror +var dlerror uint8 +var dlerrorABI0 = uintptr(unsafe.Pointer(&dlerror)) diff --git a/vendor/github.com/ebitengine/purego/dlfcn_android.go b/vendor/github.com/ebitengine/purego/dlfcn_android.go new file mode 100644 index 00000000..0d534176 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/dlfcn_android.go @@ -0,0 +1,34 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2024 The Ebitengine Authors + +package purego + +import "github.com/ebitengine/purego/internal/cgo" + +// Source for constants: https://android.googlesource.com/platform/bionic/+/refs/heads/main/libc/include/dlfcn.h + +const ( + is64bit = 1 << (^uintptr(0) >> 63) / 2 + is32bit = 1 - is64bit + RTLD_DEFAULT = is32bit * 0xffffffff + RTLD_LAZY = 0x00000001 + RTLD_NOW = is64bit * 0x00000002 + RTLD_LOCAL = 0x00000000 + RTLD_GLOBAL = is64bit*0x00100 | is32bit*0x00000002 +) + +func Dlopen(path string, mode int) (uintptr, error) { + return cgo.Dlopen(path, mode) +} + +func Dlsym(handle uintptr, name string) (uintptr, error) { + return cgo.Dlsym(handle, name) +} + +func Dlclose(handle uintptr) error { + return cgo.Dlclose(handle) +} + +func loadSymbol(handle uintptr, name string) (uintptr, error) { + return Dlsym(handle, name) +} diff --git a/vendor/github.com/ebitengine/purego/dlfcn_darwin.go b/vendor/github.com/ebitengine/purego/dlfcn_darwin.go new file mode 100644 index 00000000..27f56071 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/dlfcn_darwin.go @@ -0,0 +1,19 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +package purego + +// Source for constants: https://opensource.apple.com/source/dyld/dyld-360.14/include/dlfcn.h.auto.html + +const ( + RTLD_DEFAULT = 1<<64 - 2 // Pseudo-handle for dlsym so search for any loaded symbol + RTLD_LAZY = 0x1 // Relocations are performed at an implementation-dependent time. + RTLD_NOW = 0x2 // Relocations are performed when the object is loaded. + RTLD_LOCAL = 0x4 // All symbols are not made available for relocation processing by other modules. + RTLD_GLOBAL = 0x8 // All symbols are available for relocation processing of other modules. +) + +//go:cgo_import_dynamic purego_dlopen dlopen "/usr/lib/libSystem.B.dylib" +//go:cgo_import_dynamic purego_dlsym dlsym "/usr/lib/libSystem.B.dylib" +//go:cgo_import_dynamic purego_dlerror dlerror "/usr/lib/libSystem.B.dylib" +//go:cgo_import_dynamic purego_dlclose dlclose "/usr/lib/libSystem.B.dylib" diff --git a/vendor/github.com/ebitengine/purego/dlfcn_freebsd.go b/vendor/github.com/ebitengine/purego/dlfcn_freebsd.go new file mode 100644 index 00000000..6b371620 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/dlfcn_freebsd.go @@ -0,0 +1,14 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +package purego + +// Constants as defined in https://github.com/freebsd/freebsd-src/blob/main/include/dlfcn.h +const ( + intSize = 32 << (^uint(0) >> 63) // 32 or 64 + RTLD_DEFAULT = 1<> 63) // 32 or 64 + RTLD_DEFAULT = 1< C) +// +// string <=> char* +// bool <=> _Bool +// uintptr <=> uintptr_t +// uint <=> uint32_t or uint64_t +// uint8 <=> uint8_t +// uint16 <=> uint16_t +// uint32 <=> uint32_t +// uint64 <=> uint64_t +// int <=> int32_t or int64_t +// int8 <=> int8_t +// int16 <=> int16_t +// int32 <=> int32_t +// int64 <=> int64_t +// float32 <=> float +// float64 <=> double +// struct <=> struct (WIP - darwin only) +// func <=> C function +// unsafe.Pointer, *T <=> void* +// []T => void* +// +// There is a special case when the last argument of fptr is a variadic interface (or []interface} +// it will be expanded into a call to the C function as if it had the arguments in that slice. +// This means that using arg ...any is like a cast to the function with the arguments inside arg. +// This is not the same as C variadic. +// +// # Memory +// +// In general it is not possible for purego to guarantee the lifetimes of objects returned or received from +// calling functions using RegisterFunc. For arguments to a C function it is important that the C function doesn't +// hold onto a reference to Go memory. This is the same as the [Cgo rules]. +// +// However, there are some special cases. When passing a string as an argument if the string does not end in a null +// terminated byte (\x00) then the string will be copied into memory maintained by purego. The memory is only valid for +// that specific call. Therefore, if the C code keeps a reference to that string it may become invalid at some +// undefined time. However, if the string does already contain a null-terminated byte then no copy is done. +// It is then the responsibility of the caller to ensure the string stays alive as long as it's needed in C memory. +// This can be done using runtime.KeepAlive or allocating the string in C memory using malloc. When a C function +// returns a null-terminated pointer to char a Go string can be used. Purego will allocate a new string in Go memory +// and copy the data over. This string will be garbage collected whenever Go decides it's no longer referenced. +// This C created string will not be freed by purego. If the pointer to char is not null-terminated or must continue +// to point to C memory (because it's a buffer for example) then use a pointer to byte and then convert that to a slice +// using unsafe.Slice. Doing this means that it becomes the responsibility of the caller to care about the lifetime +// of the pointer +// +// # Structs +// +// Purego can handle the most common structs that have fields of builtin types like int8, uint16, float32, etc. However, +// it does not support aligning fields properly. It is therefore the responsibility of the caller to ensure +// that all padding is added to the Go struct to match the C one. See `BoolStructFn` in struct_test.go for an example. +// +// # Example +// +// All functions below call this C function: +// +// char *foo(char *str); +// +// // Let purego convert types +// var foo func(s string) string +// goString := foo("copied") +// // Go will garbage collect this string +// +// // Manually, handle allocations +// var foo2 func(b string) *byte +// mustFree := foo2("not copied\x00") +// defer free(mustFree) +// +// [Cgo rules]: https://pkg.go.dev/cmd/cgo#hdr-Go_references_to_C +func RegisterFunc(fptr any, cfn uintptr) { + fn := reflect.ValueOf(fptr).Elem() + ty := fn.Type() + if ty.Kind() != reflect.Func { + panic("purego: fptr must be a function pointer") + } + if ty.NumOut() > 1 { + panic("purego: function can only return zero or one values") + } + if cfn == 0 { + panic("purego: cfn is nil") + } + if ty.NumOut() == 1 && (ty.Out(0).Kind() == reflect.Float32 || ty.Out(0).Kind() == reflect.Float64) && + runtime.GOARCH != "arm64" && runtime.GOARCH != "amd64" && runtime.GOARCH != "loong64" { + panic("purego: float returns are not supported") + } + { + // this code checks how many registers and stack this function will use + // to avoid crashing with too many arguments + var ints int + var floats int + var stack int + for i := 0; i < ty.NumIn(); i++ { + arg := ty.In(i) + switch arg.Kind() { + case reflect.Func: + // This only does preliminary testing to ensure the CDecl argument + // is the first argument. Full testing is done when the callback is actually + // created in NewCallback. + for j := 0; j < arg.NumIn(); j++ { + in := arg.In(j) + if !in.AssignableTo(reflect.TypeOf(CDecl{})) { + continue + } + if j != 0 { + panic("purego: CDecl must be the first argument") + } + } + case reflect.String, reflect.Uintptr, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, + reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Ptr, reflect.UnsafePointer, + reflect.Slice, reflect.Bool: + if ints < numOfIntegerRegisters() { + ints++ + } else { + stack++ + } + case reflect.Float32, reflect.Float64: + const is32bit = unsafe.Sizeof(uintptr(0)) == 4 + if is32bit { + panic("purego: floats only supported on 64bit platforms") + } + if floats < numOfFloatRegisters { + floats++ + } else { + stack++ + } + case reflect.Struct: + if runtime.GOOS != "darwin" || (runtime.GOARCH != "amd64" && runtime.GOARCH != "arm64") { + panic("purego: struct arguments are only supported on darwin amd64 & arm64") + } + if arg.Size() == 0 { + continue + } + addInt := func(u uintptr) { + ints++ + } + addFloat := func(u uintptr) { + floats++ + } + addStack := func(u uintptr) { + stack++ + } + _ = addStruct(reflect.New(arg).Elem(), &ints, &floats, &stack, addInt, addFloat, addStack, nil) + default: + panic("purego: unsupported kind " + arg.Kind().String()) + } + } + if ty.NumOut() == 1 && ty.Out(0).Kind() == reflect.Struct { + if runtime.GOOS != "darwin" { + panic("purego: struct return values only supported on darwin arm64 & amd64") + } + outType := ty.Out(0) + checkStructFieldsSupported(outType) + if runtime.GOARCH == "amd64" && outType.Size() > maxRegAllocStructSize { + // on amd64 if struct is bigger than 16 bytes allocate the return struct + // and pass it in as a hidden first argument. + ints++ + } + } + sizeOfStack := maxArgs - numOfIntegerRegisters() + if stack > sizeOfStack { + panic("purego: too many arguments") + } + } + v := reflect.MakeFunc(ty, func(args []reflect.Value) (results []reflect.Value) { + var sysargs [maxArgs]uintptr + var floats [numOfFloatRegisters]uintptr + var numInts int + var numFloats int + var numStack int + var addStack, addInt, addFloat func(x uintptr) + if runtime.GOARCH == "arm64" || runtime.GOOS != "windows" { + // Windows arm64 uses the same calling convention as macOS and Linux + addStack = func(x uintptr) { + sysargs[numOfIntegerRegisters()+numStack] = x + numStack++ + } + addInt = func(x uintptr) { + if numInts >= numOfIntegerRegisters() { + addStack(x) + } else { + sysargs[numInts] = x + numInts++ + } + } + addFloat = func(x uintptr) { + if numFloats < len(floats) { + floats[numFloats] = x + numFloats++ + } else { + addStack(x) + } + } + } else { + // On Windows amd64 the arguments are passed in the numbered registered. + // So the first int is in the first integer register and the first float + // is in the second floating register if there is already a first int. + // This is in contrast to how macOS and Linux pass arguments which + // tries to use as many registers as possible in the calling convention. + addStack = func(x uintptr) { + sysargs[numStack] = x + numStack++ + } + addInt = addStack + addFloat = addStack + } + + var keepAlive []any + defer func() { + runtime.KeepAlive(keepAlive) + runtime.KeepAlive(args) + }() + + var arm64_r8 uintptr + if ty.NumOut() == 1 && ty.Out(0).Kind() == reflect.Struct { + outType := ty.Out(0) + if (runtime.GOARCH == "amd64" || runtime.GOARCH == "loong64") && outType.Size() > maxRegAllocStructSize { + val := reflect.New(outType) + keepAlive = append(keepAlive, val) + addInt(val.Pointer()) + } else if runtime.GOARCH == "arm64" && outType.Size() > maxRegAllocStructSize { + isAllFloats, numFields := isAllSameFloat(outType) + if !isAllFloats || numFields > 4 { + val := reflect.New(outType) + keepAlive = append(keepAlive, val) + arm64_r8 = val.Pointer() + } + } + } + for i, v := range args { + if variadic, ok := args[i].Interface().([]any); ok { + if i != len(args)-1 { + panic("purego: can only expand last parameter") + } + for _, x := range variadic { + keepAlive = addValue(reflect.ValueOf(x), keepAlive, addInt, addFloat, addStack, &numInts, &numFloats, &numStack) + } + continue + } + if runtime.GOARCH == "arm64" && runtime.GOOS == "darwin" && + (numInts >= numOfIntegerRegisters() || numFloats >= numOfFloatRegisters) && v.Kind() != reflect.Struct { // hit the stack + fields := make([]reflect.StructField, len(args[i:])) + + for j, val := range args[i:] { + if val.Kind() == reflect.String { + ptr := strings.CString(val.String()) + keepAlive = append(keepAlive, ptr) + val = reflect.ValueOf(ptr) + args[i+j] = val + } + fields[j] = reflect.StructField{ + Name: "X" + strconv.Itoa(j), + Type: val.Type(), + } + } + structType := reflect.StructOf(fields) + structInstance := reflect.New(structType).Elem() + for j, val := range args[i:] { + structInstance.Field(j).Set(val) + } + placeRegisters(structInstance, addFloat, addInt) + break + } + keepAlive = addValue(v, keepAlive, addInt, addFloat, addStack, &numInts, &numFloats, &numStack) + } + + syscall := thePool.Get().(*syscall15Args) + defer thePool.Put(syscall) + + if runtime.GOARCH == "loong64" { + *syscall = syscall15Args{ + cfn, + sysargs[0], sysargs[1], sysargs[2], sysargs[3], sysargs[4], sysargs[5], + sysargs[6], sysargs[7], sysargs[8], sysargs[9], sysargs[10], sysargs[11], + sysargs[12], sysargs[13], sysargs[14], + floats[0], floats[1], floats[2], floats[3], floats[4], floats[5], floats[6], floats[7], + 0, + } + runtime_cgocall(syscall15XABI0, unsafe.Pointer(syscall)) + } else if runtime.GOARCH == "arm64" || runtime.GOOS != "windows" { + // Use the normal arm64 calling convention even on Windows + *syscall = syscall15Args{ + cfn, + sysargs[0], sysargs[1], sysargs[2], sysargs[3], sysargs[4], sysargs[5], + sysargs[6], sysargs[7], sysargs[8], sysargs[9], sysargs[10], sysargs[11], + sysargs[12], sysargs[13], sysargs[14], + floats[0], floats[1], floats[2], floats[3], floats[4], floats[5], floats[6], floats[7], + arm64_r8, + } + runtime_cgocall(syscall15XABI0, unsafe.Pointer(syscall)) + } else { + *syscall = syscall15Args{} + // This is a fallback for Windows amd64, 386, and arm. Note this may not support floats + syscall.a1, syscall.a2, _ = syscall_syscall15X(cfn, sysargs[0], sysargs[1], sysargs[2], sysargs[3], sysargs[4], + sysargs[5], sysargs[6], sysargs[7], sysargs[8], sysargs[9], sysargs[10], sysargs[11], + sysargs[12], sysargs[13], sysargs[14]) + syscall.f1 = syscall.a2 // on amd64 a2 stores the float return. On 32bit platforms floats aren't support + } + if ty.NumOut() == 0 { + return nil + } + outType := ty.Out(0) + v := reflect.New(outType).Elem() + switch outType.Kind() { + case reflect.Uintptr, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + v.SetUint(uint64(syscall.a1)) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + v.SetInt(int64(syscall.a1)) + case reflect.Bool: + v.SetBool(byte(syscall.a1) != 0) + case reflect.UnsafePointer: + // We take the address and then dereference it to trick go vet from creating a possible miss-use of unsafe.Pointer + v.SetPointer(*(*unsafe.Pointer)(unsafe.Pointer(&syscall.a1))) + case reflect.Ptr: + v = reflect.NewAt(outType, unsafe.Pointer(&syscall.a1)).Elem() + case reflect.Func: + // wrap this C function in a nicely typed Go function + v = reflect.New(outType) + RegisterFunc(v.Interface(), syscall.a1) + case reflect.String: + v.SetString(strings.GoString(syscall.a1)) + case reflect.Float32: + // NOTE: syscall.r2 is only the floating return value on 64bit platforms. + // On 32bit platforms syscall.r2 is the upper part of a 64bit return. + v.SetFloat(float64(math.Float32frombits(uint32(syscall.f1)))) + case reflect.Float64: + // NOTE: syscall.r2 is only the floating return value on 64bit platforms. + // On 32bit platforms syscall.r2 is the upper part of a 64bit return. + v.SetFloat(math.Float64frombits(uint64(syscall.f1))) + case reflect.Struct: + v = getStruct(outType, *syscall) + default: + panic("purego: unsupported return kind: " + outType.Kind().String()) + } + if len(args) > 0 { + // reuse args slice instead of allocating one when possible + args[0] = v + return args[:1] + } else { + return []reflect.Value{v} + } + }) + fn.Set(v) +} + +func addValue(v reflect.Value, keepAlive []any, addInt func(x uintptr), addFloat func(x uintptr), addStack func(x uintptr), numInts *int, numFloats *int, numStack *int) []any { + switch v.Kind() { + case reflect.String: + ptr := strings.CString(v.String()) + keepAlive = append(keepAlive, ptr) + addInt(uintptr(unsafe.Pointer(ptr))) + case reflect.Uintptr, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + addInt(uintptr(v.Uint())) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + addInt(uintptr(v.Int())) + case reflect.Ptr, reflect.UnsafePointer, reflect.Slice: + // There is no need to keepAlive this pointer separately because it is kept alive in the args variable + addInt(v.Pointer()) + case reflect.Func: + addInt(NewCallback(v.Interface())) + case reflect.Bool: + if v.Bool() { + addInt(1) + } else { + addInt(0) + } + case reflect.Float32: + addFloat(uintptr(math.Float32bits(float32(v.Float())))) + case reflect.Float64: + addFloat(uintptr(math.Float64bits(v.Float()))) + case reflect.Struct: + keepAlive = addStruct(v, numInts, numFloats, numStack, addInt, addFloat, addStack, keepAlive) + default: + panic("purego: unsupported kind: " + v.Kind().String()) + } + return keepAlive +} + +// maxRegAllocStructSize is the biggest a struct can be while still fitting in registers. +// if it is bigger than this than enough space must be allocated on the heap and then passed into +// the function as the first parameter on amd64 or in R8 on arm64. +// +// If you change this make sure to update it in objc_runtime_darwin.go +const maxRegAllocStructSize = 16 + +func isAllSameFloat(ty reflect.Type) (allFloats bool, numFields int) { + allFloats = true + root := ty.Field(0).Type + for root.Kind() == reflect.Struct { + root = root.Field(0).Type + } + first := root.Kind() + if first != reflect.Float32 && first != reflect.Float64 { + allFloats = false + } + for i := 0; i < ty.NumField(); i++ { + f := ty.Field(i).Type + if f.Kind() == reflect.Struct { + var structNumFields int + allFloats, structNumFields = isAllSameFloat(f) + numFields += structNumFields + continue + } + numFields++ + if f.Kind() != first { + allFloats = false + } + } + return allFloats, numFields +} + +func checkStructFieldsSupported(ty reflect.Type) { + for i := 0; i < ty.NumField(); i++ { + f := ty.Field(i).Type + if f.Kind() == reflect.Array { + f = f.Elem() + } else if f.Kind() == reflect.Struct { + checkStructFieldsSupported(f) + continue + } + switch f.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, + reflect.Uintptr, reflect.Ptr, reflect.UnsafePointer, reflect.Float64, reflect.Float32: + default: + panic(fmt.Sprintf("purego: struct field type %s is not supported", f)) + } + } +} + +func roundUpTo8(val uintptr) uintptr { + return (val + 7) &^ 7 +} + +func numOfIntegerRegisters() int { + switch runtime.GOARCH { + case "arm64", "loong64": + return 8 + case "amd64": + return 6 + default: + // since this platform isn't supported and can therefore only access + // integer registers it is fine to return the maxArgs + return maxArgs + } +} diff --git a/vendor/github.com/ebitengine/purego/gen.go b/vendor/github.com/ebitengine/purego/gen.go new file mode 100644 index 00000000..9cb7c453 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/gen.go @@ -0,0 +1,6 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2025 The Ebitengine Authors + +package purego + +//go:generate go run wincallback.go diff --git a/vendor/github.com/ebitengine/purego/go_runtime.go b/vendor/github.com/ebitengine/purego/go_runtime.go new file mode 100644 index 00000000..b327f786 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/go_runtime.go @@ -0,0 +1,13 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +//go:build darwin || freebsd || linux || netbsd || windows + +package purego + +import ( + "unsafe" +) + +//go:linkname runtime_cgocall runtime.cgocall +func runtime_cgocall(fn uintptr, arg unsafe.Pointer) int32 // from runtime/sys_libc.go diff --git a/vendor/github.com/ebitengine/purego/internal/cgo/dlfcn_cgo_unix.go b/vendor/github.com/ebitengine/purego/internal/cgo/dlfcn_cgo_unix.go new file mode 100644 index 00000000..6d0571ab --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/cgo/dlfcn_cgo_unix.go @@ -0,0 +1,56 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2024 The Ebitengine Authors + +//go:build freebsd || linux || netbsd + +package cgo + +/* +#cgo !netbsd LDFLAGS: -ldl + +#include +#include +*/ +import "C" + +import ( + "errors" + "unsafe" +) + +func Dlopen(filename string, flag int) (uintptr, error) { + cfilename := C.CString(filename) + defer C.free(unsafe.Pointer(cfilename)) + handle := C.dlopen(cfilename, C.int(flag)) + if handle == nil { + return 0, errors.New(C.GoString(C.dlerror())) + } + return uintptr(handle), nil +} + +func Dlsym(handle uintptr, symbol string) (uintptr, error) { + csymbol := C.CString(symbol) + defer C.free(unsafe.Pointer(csymbol)) + symbolAddr := C.dlsym(*(*unsafe.Pointer)(unsafe.Pointer(&handle)), csymbol) + if symbolAddr == nil { + return 0, errors.New(C.GoString(C.dlerror())) + } + return uintptr(symbolAddr), nil +} + +func Dlclose(handle uintptr) error { + result := C.dlclose(*(*unsafe.Pointer)(unsafe.Pointer(&handle))) + if result != 0 { + return errors.New(C.GoString(C.dlerror())) + } + return nil +} + +// all that is needed is to assign each dl function because then its +// symbol will then be made available to the linker and linked to inside dlfcn.go +var ( + _ = C.dlopen + _ = C.dlsym + _ = C.dlerror + _ = C.dlclose +) diff --git a/vendor/github.com/ebitengine/purego/internal/cgo/empty.go b/vendor/github.com/ebitengine/purego/internal/cgo/empty.go new file mode 100644 index 00000000..1d7cffe2 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/cgo/empty.go @@ -0,0 +1,6 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2024 The Ebitengine Authors + +package cgo + +// Empty so that importing this package doesn't cause issue for certain platforms. diff --git a/vendor/github.com/ebitengine/purego/internal/cgo/syscall_cgo_unix.go b/vendor/github.com/ebitengine/purego/internal/cgo/syscall_cgo_unix.go new file mode 100644 index 00000000..10393fec --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/cgo/syscall_cgo_unix.go @@ -0,0 +1,55 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +//go:build freebsd || (linux && !(arm64 || amd64 || loong64)) || netbsd + +package cgo + +// this file is placed inside internal/cgo and not package purego +// because Cgo and assembly files can't be in the same package. + +/* +#cgo !netbsd LDFLAGS: -ldl + +#include +#include +#include +#include + +typedef struct syscall15Args { + uintptr_t fn; + uintptr_t a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15; + uintptr_t f1, f2, f3, f4, f5, f6, f7, f8; + uintptr_t err; +} syscall15Args; + +void syscall15(struct syscall15Args *args) { + assert((args->f1|args->f2|args->f3|args->f4|args->f5|args->f6|args->f7|args->f8) == 0); + uintptr_t (*func_name)(uintptr_t a1, uintptr_t a2, uintptr_t a3, uintptr_t a4, uintptr_t a5, uintptr_t a6, + uintptr_t a7, uintptr_t a8, uintptr_t a9, uintptr_t a10, uintptr_t a11, uintptr_t a12, + uintptr_t a13, uintptr_t a14, uintptr_t a15); + *(void**)(&func_name) = (void*)(args->fn); + uintptr_t r1 = func_name(args->a1,args->a2,args->a3,args->a4,args->a5,args->a6,args->a7,args->a8,args->a9, + args->a10,args->a11,args->a12,args->a13,args->a14,args->a15); + args->a1 = r1; + args->err = errno; +} + +*/ +import "C" +import "unsafe" + +// assign purego.syscall15XABI0 to the C version of this function. +var Syscall15XABI0 = unsafe.Pointer(C.syscall15) + +//go:nosplit +func Syscall15X(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15 uintptr) (r1, r2, err uintptr) { + args := C.syscall15Args{ + C.uintptr_t(fn), C.uintptr_t(a1), C.uintptr_t(a2), C.uintptr_t(a3), + C.uintptr_t(a4), C.uintptr_t(a5), C.uintptr_t(a6), + C.uintptr_t(a7), C.uintptr_t(a8), C.uintptr_t(a9), C.uintptr_t(a10), C.uintptr_t(a11), C.uintptr_t(a12), + C.uintptr_t(a13), C.uintptr_t(a14), C.uintptr_t(a15), 0, 0, 0, 0, 0, 0, 0, 0, 0, + } + C.syscall15(&args) + return uintptr(args.a1), 0, uintptr(args.err) +} diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/abi_amd64.h b/vendor/github.com/ebitengine/purego/internal/fakecgo/abi_amd64.h new file mode 100644 index 00000000..9949435f --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/abi_amd64.h @@ -0,0 +1,99 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Macros for transitioning from the host ABI to Go ABI0. +// +// These save the frame pointer, so in general, functions that use +// these should have zero frame size to suppress the automatic frame +// pointer, though it's harmless to not do this. + +#ifdef GOOS_windows + +// REGS_HOST_TO_ABI0_STACK is the stack bytes used by +// PUSH_REGS_HOST_TO_ABI0. +#define REGS_HOST_TO_ABI0_STACK (28*8 + 8) + +// PUSH_REGS_HOST_TO_ABI0 prepares for transitioning from +// the host ABI to Go ABI0 code. It saves all registers that are +// callee-save in the host ABI and caller-save in Go ABI0 and prepares +// for entry to Go. +// +// Save DI SI BP BX R12 R13 R14 R15 X6-X15 registers and the DF flag. +// Clear the DF flag for the Go ABI. +// MXCSR matches the Go ABI, so we don't have to set that, +// and Go doesn't modify it, so we don't have to save it. +#define PUSH_REGS_HOST_TO_ABI0() \ + PUSHFQ \ + CLD \ + ADJSP $(REGS_HOST_TO_ABI0_STACK - 8) \ + MOVQ DI, (0*0)(SP) \ + MOVQ SI, (1*8)(SP) \ + MOVQ BP, (2*8)(SP) \ + MOVQ BX, (3*8)(SP) \ + MOVQ R12, (4*8)(SP) \ + MOVQ R13, (5*8)(SP) \ + MOVQ R14, (6*8)(SP) \ + MOVQ R15, (7*8)(SP) \ + MOVUPS X6, (8*8)(SP) \ + MOVUPS X7, (10*8)(SP) \ + MOVUPS X8, (12*8)(SP) \ + MOVUPS X9, (14*8)(SP) \ + MOVUPS X10, (16*8)(SP) \ + MOVUPS X11, (18*8)(SP) \ + MOVUPS X12, (20*8)(SP) \ + MOVUPS X13, (22*8)(SP) \ + MOVUPS X14, (24*8)(SP) \ + MOVUPS X15, (26*8)(SP) + +#define POP_REGS_HOST_TO_ABI0() \ + MOVQ (0*0)(SP), DI \ + MOVQ (1*8)(SP), SI \ + MOVQ (2*8)(SP), BP \ + MOVQ (3*8)(SP), BX \ + MOVQ (4*8)(SP), R12 \ + MOVQ (5*8)(SP), R13 \ + MOVQ (6*8)(SP), R14 \ + MOVQ (7*8)(SP), R15 \ + MOVUPS (8*8)(SP), X6 \ + MOVUPS (10*8)(SP), X7 \ + MOVUPS (12*8)(SP), X8 \ + MOVUPS (14*8)(SP), X9 \ + MOVUPS (16*8)(SP), X10 \ + MOVUPS (18*8)(SP), X11 \ + MOVUPS (20*8)(SP), X12 \ + MOVUPS (22*8)(SP), X13 \ + MOVUPS (24*8)(SP), X14 \ + MOVUPS (26*8)(SP), X15 \ + ADJSP $-(REGS_HOST_TO_ABI0_STACK - 8) \ + POPFQ + +#else +// SysV ABI + +#define REGS_HOST_TO_ABI0_STACK (6*8) + +// SysV MXCSR matches the Go ABI, so we don't have to set that, +// and Go doesn't modify it, so we don't have to save it. +// Both SysV and Go require DF to be cleared, so that's already clear. +// The SysV and Go frame pointer conventions are compatible. +#define PUSH_REGS_HOST_TO_ABI0() \ + ADJSP $(REGS_HOST_TO_ABI0_STACK) \ + MOVQ BP, (5*8)(SP) \ + LEAQ (5*8)(SP), BP \ + MOVQ BX, (0*8)(SP) \ + MOVQ R12, (1*8)(SP) \ + MOVQ R13, (2*8)(SP) \ + MOVQ R14, (3*8)(SP) \ + MOVQ R15, (4*8)(SP) + +#define POP_REGS_HOST_TO_ABI0() \ + MOVQ (0*8)(SP), BX \ + MOVQ (1*8)(SP), R12 \ + MOVQ (2*8)(SP), R13 \ + MOVQ (3*8)(SP), R14 \ + MOVQ (4*8)(SP), R15 \ + MOVQ (5*8)(SP), BP \ + ADJSP $-(REGS_HOST_TO_ABI0_STACK) + +#endif diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/abi_arm64.h b/vendor/github.com/ebitengine/purego/internal/fakecgo/abi_arm64.h new file mode 100644 index 00000000..5d5061ec --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/abi_arm64.h @@ -0,0 +1,39 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Macros for transitioning from the host ABI to Go ABI0. +// +// These macros save and restore the callee-saved registers +// from the stack, but they don't adjust stack pointer, so +// the user should prepare stack space in advance. +// SAVE_R19_TO_R28(offset) saves R19 ~ R28 to the stack space +// of ((offset)+0*8)(RSP) ~ ((offset)+9*8)(RSP). +// +// SAVE_F8_TO_F15(offset) saves F8 ~ F15 to the stack space +// of ((offset)+0*8)(RSP) ~ ((offset)+7*8)(RSP). +// +// R29 is not saved because Go will save and restore it. + +#define SAVE_R19_TO_R28(offset) \ + STP (R19, R20), ((offset)+0*8)(RSP) \ + STP (R21, R22), ((offset)+2*8)(RSP) \ + STP (R23, R24), ((offset)+4*8)(RSP) \ + STP (R25, R26), ((offset)+6*8)(RSP) \ + STP (R27, g), ((offset)+8*8)(RSP) +#define RESTORE_R19_TO_R28(offset) \ + LDP ((offset)+0*8)(RSP), (R19, R20) \ + LDP ((offset)+2*8)(RSP), (R21, R22) \ + LDP ((offset)+4*8)(RSP), (R23, R24) \ + LDP ((offset)+6*8)(RSP), (R25, R26) \ + LDP ((offset)+8*8)(RSP), (R27, g) /* R28 */ +#define SAVE_F8_TO_F15(offset) \ + FSTPD (F8, F9), ((offset)+0*8)(RSP) \ + FSTPD (F10, F11), ((offset)+2*8)(RSP) \ + FSTPD (F12, F13), ((offset)+4*8)(RSP) \ + FSTPD (F14, F15), ((offset)+6*8)(RSP) +#define RESTORE_F8_TO_F15(offset) \ + FLDPD ((offset)+0*8)(RSP), (F8, F9) \ + FLDPD ((offset)+2*8)(RSP), (F10, F11) \ + FLDPD ((offset)+4*8)(RSP), (F12, F13) \ + FLDPD ((offset)+6*8)(RSP), (F14, F15) diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/abi_loong64.h b/vendor/github.com/ebitengine/purego/internal/fakecgo/abi_loong64.h new file mode 100644 index 00000000..b10d8373 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/abi_loong64.h @@ -0,0 +1,60 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Macros for transitioning from the host ABI to Go ABI0. +// +// These macros save and restore the callee-saved registers +// from the stack, but they don't adjust stack pointer, so +// the user should prepare stack space in advance. +// SAVE_R22_TO_R31(offset) saves R22 ~ R31 to the stack space +// of ((offset)+0*8)(R3) ~ ((offset)+9*8)(R3). +// +// SAVE_F24_TO_F31(offset) saves F24 ~ F31 to the stack space +// of ((offset)+0*8)(R3) ~ ((offset)+7*8)(R3). +// +// Note: g is R22 + +#define SAVE_R22_TO_R31(offset) \ + MOVV g, ((offset)+(0*8))(R3) \ + MOVV R23, ((offset)+(1*8))(R3) \ + MOVV R24, ((offset)+(2*8))(R3) \ + MOVV R25, ((offset)+(3*8))(R3) \ + MOVV R26, ((offset)+(4*8))(R3) \ + MOVV R27, ((offset)+(5*8))(R3) \ + MOVV R28, ((offset)+(6*8))(R3) \ + MOVV R29, ((offset)+(7*8))(R3) \ + MOVV R30, ((offset)+(8*8))(R3) \ + MOVV R31, ((offset)+(9*8))(R3) + +#define SAVE_F24_TO_F31(offset) \ + MOVD F24, ((offset)+(0*8))(R3) \ + MOVD F25, ((offset)+(1*8))(R3) \ + MOVD F26, ((offset)+(2*8))(R3) \ + MOVD F27, ((offset)+(3*8))(R3) \ + MOVD F28, ((offset)+(4*8))(R3) \ + MOVD F29, ((offset)+(5*8))(R3) \ + MOVD F30, ((offset)+(6*8))(R3) \ + MOVD F31, ((offset)+(7*8))(R3) + +#define RESTORE_R22_TO_R31(offset) \ + MOVV ((offset)+(0*8))(R3), g \ + MOVV ((offset)+(1*8))(R3), R23 \ + MOVV ((offset)+(2*8))(R3), R24 \ + MOVV ((offset)+(3*8))(R3), R25 \ + MOVV ((offset)+(4*8))(R3), R26 \ + MOVV ((offset)+(5*8))(R3), R27 \ + MOVV ((offset)+(6*8))(R3), R28 \ + MOVV ((offset)+(7*8))(R3), R29 \ + MOVV ((offset)+(8*8))(R3), R30 \ + MOVV ((offset)+(9*8))(R3), R31 + +#define RESTORE_F24_TO_F31(offset) \ + MOVD ((offset)+(0*8))(R3), F24 \ + MOVD ((offset)+(1*8))(R3), F25 \ + MOVD ((offset)+(2*8))(R3), F26 \ + MOVD ((offset)+(3*8))(R3), F27 \ + MOVD ((offset)+(4*8))(R3), F28 \ + MOVD ((offset)+(5*8))(R3), F29 \ + MOVD ((offset)+(6*8))(R3), F30 \ + MOVD ((offset)+(7*8))(R3), F31 diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/asm_amd64.s b/vendor/github.com/ebitengine/purego/internal/fakecgo/asm_amd64.s new file mode 100644 index 00000000..2b7eb57f --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/asm_amd64.s @@ -0,0 +1,39 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" +#include "abi_amd64.h" + +// Called by C code generated by cmd/cgo. +// func crosscall2(fn, a unsafe.Pointer, n int32, ctxt uintptr) +// Saves C callee-saved registers and calls cgocallback with three arguments. +// fn is the PC of a func(a unsafe.Pointer) function. +// This signature is known to SWIG, so we can't change it. +TEXT crosscall2(SB), NOSPLIT, $0-0 + PUSH_REGS_HOST_TO_ABI0() + + // Make room for arguments to cgocallback. + ADJSP $0x18 + +#ifndef GOOS_windows + MOVQ DI, 0x0(SP) // fn + MOVQ SI, 0x8(SP) // arg + + // Skip n in DX. + MOVQ CX, 0x10(SP) // ctxt + +#else + MOVQ CX, 0x0(SP) // fn + MOVQ DX, 0x8(SP) // arg + + // Skip n in R8. + MOVQ R9, 0x10(SP) // ctxt + +#endif + + CALL runtime·cgocallback(SB) + + ADJSP $-0x18 + POP_REGS_HOST_TO_ABI0() + RET diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/asm_arm64.s b/vendor/github.com/ebitengine/purego/internal/fakecgo/asm_arm64.s new file mode 100644 index 00000000..50e5261d --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/asm_arm64.s @@ -0,0 +1,36 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" +#include "abi_arm64.h" + +// Called by C code generated by cmd/cgo. +// func crosscall2(fn, a unsafe.Pointer, n int32, ctxt uintptr) +// Saves C callee-saved registers and calls cgocallback with three arguments. +// fn is the PC of a func(a unsafe.Pointer) function. +TEXT crosscall2(SB), NOSPLIT|NOFRAME, $0 +/* + * We still need to save all callee save register as before, and then + * push 3 args for fn (R0, R1, R3), skipping R2. + * Also note that at procedure entry in gc world, 8(RSP) will be the + * first arg. + */ + SUB $(8*24), RSP + STP (R0, R1), (8*1)(RSP) + MOVD R3, (8*3)(RSP) + + SAVE_R19_TO_R28(8*4) + SAVE_F8_TO_F15(8*14) + STP (R29, R30), (8*22)(RSP) + + // Initialize Go ABI environment + BL runtime·load_g(SB) + BL runtime·cgocallback(SB) + + RESTORE_R19_TO_R28(8*4) + RESTORE_F8_TO_F15(8*14) + LDP (8*22)(RSP), (R29, R30) + + ADD $(8*24), RSP + RET diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/asm_loong64.s b/vendor/github.com/ebitengine/purego/internal/fakecgo/asm_loong64.s new file mode 100644 index 00000000..aea4f8e6 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/asm_loong64.s @@ -0,0 +1,40 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" +#include "abi_loong64.h" + +// Called by C code generated by cmd/cgo. +// func crosscall2(fn, a unsafe.Pointer, n int32, ctxt uintptr) +// Saves C callee-saved registers and calls cgocallback with three arguments. +// fn is the PC of a func(a unsafe.Pointer) function. +TEXT crosscall2(SB),NOSPLIT|NOFRAME,$0 + /* + * We still need to save all callee save register as before, and then + * push 3 args for fn (R4, R5, R7), skipping R6. + * Also note that at procedure entry in gc world, 8(R29) will be the + * first arg. + */ + + ADDV $(-23*8), R3 + MOVV R4, (1*8)(R3) // fn unsafe.Pointer + MOVV R5, (2*8)(R3) // a unsafe.Pointer + MOVV R7, (3*8)(R3) // ctxt uintptr + + SAVE_R22_TO_R31((4*8)) + SAVE_F24_TO_F31((14*8)) + MOVV R1, (22*8)(R3) + + // Initialize Go ABI environment + JAL runtime·load_g(SB) + + JAL runtime·cgocallback(SB) + + RESTORE_R22_TO_R31((4*8)) + RESTORE_F24_TO_F31((14*8)) + MOVV (22*8)(R3), R1 + + ADDV $(23*8), R3 + + RET diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/callbacks.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/callbacks.go new file mode 100644 index 00000000..27d4c98c --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/callbacks.go @@ -0,0 +1,93 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !cgo && (darwin || freebsd || linux || netbsd) + +package fakecgo + +import ( + _ "unsafe" +) + +// TODO: decide if we need _runtime_cgo_panic_internal + +//go:linkname x_cgo_init_trampoline x_cgo_init_trampoline +//go:linkname _cgo_init _cgo_init +var x_cgo_init_trampoline byte +var _cgo_init = &x_cgo_init_trampoline + +// Creates a new system thread without updating any Go state. +// +// This method is invoked during shared library loading to create a new OS +// thread to perform the runtime initialization. This method is similar to +// _cgo_sys_thread_start except that it doesn't update any Go state. + +//go:linkname x_cgo_thread_start_trampoline x_cgo_thread_start_trampoline +//go:linkname _cgo_thread_start _cgo_thread_start +var x_cgo_thread_start_trampoline byte +var _cgo_thread_start = &x_cgo_thread_start_trampoline + +// Notifies that the runtime has been initialized. +// +// We currently block at every CGO entry point (via _cgo_wait_runtime_init_done) +// to ensure that the runtime has been initialized before the CGO call is +// executed. This is necessary for shared libraries where we kickoff runtime +// initialization in a separate thread and return without waiting for this +// thread to complete the init. + +//go:linkname x_cgo_notify_runtime_init_done_trampoline x_cgo_notify_runtime_init_done_trampoline +//go:linkname _cgo_notify_runtime_init_done _cgo_notify_runtime_init_done +var x_cgo_notify_runtime_init_done_trampoline byte +var _cgo_notify_runtime_init_done = &x_cgo_notify_runtime_init_done_trampoline + +// Indicates whether a dummy thread key has been created or not. +// +// When calling go exported function from C, we register a destructor +// callback, for a dummy thread key, by using pthread_key_create. + +//go:linkname _cgo_pthread_key_created _cgo_pthread_key_created +var x_cgo_pthread_key_created uintptr +var _cgo_pthread_key_created = &x_cgo_pthread_key_created + +// Set the x_crosscall2_ptr C function pointer variable point to crosscall2. +// It's for the runtime package to call at init time. +func set_crosscall2() { + // nothing needs to be done here for fakecgo + // because it's possible to just call cgocallback directly +} + +//go:linkname _set_crosscall2 runtime.set_crosscall2 +var _set_crosscall2 = set_crosscall2 + +// Store the g into the thread-specific value. +// So that pthread_key_destructor will dropm when the thread is exiting. + +//go:linkname x_cgo_bindm_trampoline x_cgo_bindm_trampoline +//go:linkname _cgo_bindm _cgo_bindm +var x_cgo_bindm_trampoline byte +var _cgo_bindm = &x_cgo_bindm_trampoline + +// TODO: decide if we need x_cgo_set_context_function +// TODO: decide if we need _cgo_yield + +var ( + // In Go 1.20 the race detector was rewritten to pure Go + // on darwin. This means that when CGO_ENABLED=0 is set + // fakecgo is built with race detector code. This is not + // good since this code is pretending to be C. The go:norace + // pragma is not enough, since it only applies to the native + // ABIInternal function. The ABIO wrapper (which is necessary, + // since all references to text symbols from assembly will use it) + // does not inherit the go:norace pragma, so it will still be + // instrumented by the race detector. + // + // To circumvent this issue, using closure calls in the + // assembly, which forces the compiler to use the ABIInternal + // native implementation (which has go:norace) instead. + threadentry_call = threadentry + x_cgo_init_call = x_cgo_init + x_cgo_setenv_call = x_cgo_setenv + x_cgo_unsetenv_call = x_cgo_unsetenv + x_cgo_thread_start_call = x_cgo_thread_start +) diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/doc.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/doc.go new file mode 100644 index 00000000..e482c120 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/doc.go @@ -0,0 +1,32 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +//go:build !cgo && (darwin || freebsd || linux || netbsd) + +// Package fakecgo implements the Cgo runtime (runtime/cgo) entirely in Go. +// This allows code that calls into C to function properly when CGO_ENABLED=0. +// +// # Goals +// +// fakecgo attempts to replicate the same naming structure as in the runtime. +// For example, functions that have the prefix "gcc_*" are named "go_*". +// This makes it easier to port other GOOSs and GOARCHs as well as to keep +// it in sync with runtime/cgo. +// +// # Support +// +// Currently, fakecgo only supports macOS on amd64 & arm64. It also cannot +// be used with -buildmode=c-archive because that requires special initialization +// that fakecgo does not implement at the moment. +// +// # Usage +// +// Using fakecgo is easy just import _ "github.com/ebitengine/purego" and then +// set the environment variable CGO_ENABLED=0. +// The recommended usage for fakecgo is to prefer using runtime/cgo if possible +// but if cross-compiling or fast build times are important fakecgo is available. +// Purego will pick which ever Cgo runtime is available and prefer the one that +// comes with Go (runtime/cgo). +package fakecgo + +//go:generate go run gen.go diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/freebsd.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/freebsd.go new file mode 100644 index 00000000..bb73a709 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/freebsd.go @@ -0,0 +1,27 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build freebsd && !cgo + +package fakecgo + +import _ "unsafe" // for go:linkname + +// Supply environ and __progname, because we don't +// link against the standard FreeBSD crt0.o and the +// libc dynamic library needs them. + +// Note: when building with cross-compiling or CGO_ENABLED=0, add +// the following argument to `go` so that these symbols are defined by +// making fakecgo the Cgo. +// -gcflags="github.com/ebitengine/purego/internal/fakecgo=-std" + +//go:linkname _environ environ +//go:linkname _progname __progname + +//go:cgo_export_dynamic environ +//go:cgo_export_dynamic __progname + +var _environ uintptr +var _progname uintptr diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/go_darwin_amd64.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_darwin_amd64.go new file mode 100644 index 00000000..39f5ff1f --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_darwin_amd64.go @@ -0,0 +1,73 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !cgo + +package fakecgo + +import "unsafe" + +//go:nosplit +//go:norace +func _cgo_sys_thread_start(ts *ThreadStart) { + var attr pthread_attr_t + var ign, oset sigset_t + var p pthread_t + var size size_t + var err int + + sigfillset(&ign) + pthread_sigmask(SIG_SETMASK, &ign, &oset) + + size = pthread_get_stacksize_np(pthread_self()) + pthread_attr_init(&attr) + pthread_attr_setstacksize(&attr, size) + // Leave stacklo=0 and set stackhi=size; mstart will do the rest. + ts.g.stackhi = uintptr(size) + + err = _cgo_try_pthread_create(&p, &attr, unsafe.Pointer(threadentry_trampolineABI0), ts) + + pthread_sigmask(SIG_SETMASK, &oset, nil) + + if err != 0 { + print("fakecgo: pthread_create failed: ") + println(err) + abort() + } +} + +// threadentry_trampolineABI0 maps the C ABI to Go ABI then calls the Go function +// +//go:linkname x_threadentry_trampoline threadentry_trampoline +var x_threadentry_trampoline byte +var threadentry_trampolineABI0 = &x_threadentry_trampoline + +//go:nosplit +//go:norace +func threadentry(v unsafe.Pointer) unsafe.Pointer { + ts := *(*ThreadStart)(v) + free(v) + + setg_trampoline(setg_func, uintptr(unsafe.Pointer(ts.g))) + + // faking funcs in go is a bit a... involved - but the following works :) + fn := uintptr(unsafe.Pointer(&ts.fn)) + (*(*func())(unsafe.Pointer(&fn)))() + + return nil +} + +// here we will store a pointer to the provided setg func +var setg_func uintptr + +//go:nosplit +//go:norace +func x_cgo_init(g *G, setg uintptr) { + var size size_t + + setg_func = setg + + size = pthread_get_stacksize_np(pthread_self()) + g.stacklo = uintptr(unsafe.Add(unsafe.Pointer(&size), -size+4096)) +} diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/go_darwin_arm64.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_darwin_arm64.go new file mode 100644 index 00000000..d0868f0f --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_darwin_arm64.go @@ -0,0 +1,88 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !cgo + +package fakecgo + +import "unsafe" + +//go:nosplit +//go:norace +func _cgo_sys_thread_start(ts *ThreadStart) { + var attr pthread_attr_t + var ign, oset sigset_t + var p pthread_t + var size size_t + var err int + + sigfillset(&ign) + pthread_sigmask(SIG_SETMASK, &ign, &oset) + + size = pthread_get_stacksize_np(pthread_self()) + pthread_attr_init(&attr) + pthread_attr_setstacksize(&attr, size) + // Leave stacklo=0 and set stackhi=size; mstart will do the rest. + ts.g.stackhi = uintptr(size) + + err = _cgo_try_pthread_create(&p, &attr, unsafe.Pointer(threadentry_trampolineABI0), ts) + + pthread_sigmask(SIG_SETMASK, &oset, nil) + + if err != 0 { + print("fakecgo: pthread_create failed: ") + println(err) + abort() + } +} + +// threadentry_trampolineABI0 maps the C ABI to Go ABI then calls the Go function +// +//go:linkname x_threadentry_trampoline threadentry_trampoline +var x_threadentry_trampoline byte +var threadentry_trampolineABI0 = &x_threadentry_trampoline + +//go:nosplit +//go:norace +func threadentry(v unsafe.Pointer) unsafe.Pointer { + ts := *(*ThreadStart)(v) + free(v) + + // TODO: support ios + //#if TARGET_OS_IPHONE + // darwin_arm_init_thread_exception_port(); + //#endif + setg_trampoline(setg_func, uintptr(unsafe.Pointer(ts.g))) + + // faking funcs in go is a bit a... involved - but the following works :) + fn := uintptr(unsafe.Pointer(&ts.fn)) + (*(*func())(unsafe.Pointer(&fn)))() + + return nil +} + +// here we will store a pointer to the provided setg func +var setg_func uintptr + +// x_cgo_init(G *g, void (*setg)(void*)) (runtime/cgo/gcc_linux_amd64.c) +// This get's called during startup, adjusts stacklo, and provides a pointer to setg_gcc for us +// Additionally, if we set _cgo_init to non-null, go won't do it's own TLS setup +// This function can't be go:systemstack since go is not in a state where the systemcheck would work. +// +//go:nosplit +//go:norace +func x_cgo_init(g *G, setg uintptr) { + var size size_t + + setg_func = setg + size = pthread_get_stacksize_np(pthread_self()) + g.stacklo = uintptr(unsafe.Add(unsafe.Pointer(&size), -size+4096)) + + //TODO: support ios + //#if TARGET_OS_IPHONE + // darwin_arm_init_mach_exception_handler(); + // darwin_arm_init_thread_exception_port(); + // init_working_dir(); + //#endif +} diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/go_freebsd_amd64.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_freebsd_amd64.go new file mode 100644 index 00000000..c9ff7156 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_freebsd_amd64.go @@ -0,0 +1,95 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !cgo + +package fakecgo + +import "unsafe" + +//go:nosplit +func _cgo_sys_thread_start(ts *ThreadStart) { + var attr pthread_attr_t + var ign, oset sigset_t + var p pthread_t + var size size_t + var err int + + //fprintf(stderr, "runtime/cgo: _cgo_sys_thread_start: fn=%p, g=%p\n", ts->fn, ts->g); // debug + sigfillset(&ign) + pthread_sigmask(SIG_SETMASK, &ign, &oset) + + pthread_attr_init(&attr) + pthread_attr_getstacksize(&attr, &size) + // Leave stacklo=0 and set stackhi=size; mstart will do the rest. + ts.g.stackhi = uintptr(size) + + err = _cgo_try_pthread_create(&p, &attr, unsafe.Pointer(threadentry_trampolineABI0), ts) + + pthread_sigmask(SIG_SETMASK, &oset, nil) + + if err != 0 { + print("fakecgo: pthread_create failed: ") + println(err) + abort() + } +} + +// threadentry_trampolineABI0 maps the C ABI to Go ABI then calls the Go function +// +//go:linkname x_threadentry_trampoline threadentry_trampoline +var x_threadentry_trampoline byte +var threadentry_trampolineABI0 = &x_threadentry_trampoline + +//go:nosplit +func threadentry(v unsafe.Pointer) unsafe.Pointer { + ts := *(*ThreadStart)(v) + free(v) + + setg_trampoline(setg_func, uintptr(unsafe.Pointer(ts.g))) + + // faking funcs in go is a bit a... involved - but the following works :) + fn := uintptr(unsafe.Pointer(&ts.fn)) + (*(*func())(unsafe.Pointer(&fn)))() + + return nil +} + +// here we will store a pointer to the provided setg func +var setg_func uintptr + +//go:nosplit +func x_cgo_init(g *G, setg uintptr) { + var size size_t + var attr *pthread_attr_t + + /* The memory sanitizer distributed with versions of clang + before 3.8 has a bug: if you call mmap before malloc, mmap + may return an address that is later overwritten by the msan + library. Avoid this problem by forcing a call to malloc + here, before we ever call malloc. + + This is only required for the memory sanitizer, so it's + unfortunate that we always run it. It should be possible + to remove this when we no longer care about versions of + clang before 3.8. The test for this is + misc/cgo/testsanitizers. + + GCC works hard to eliminate a seemingly unnecessary call to + malloc, so we actually use the memory we allocate. */ + + setg_func = setg + attr = (*pthread_attr_t)(malloc(unsafe.Sizeof(*attr))) + if attr == nil { + println("fakecgo: malloc failed") + abort() + } + pthread_attr_init(attr) + pthread_attr_getstacksize(attr, &size) + // runtime/cgo uses __builtin_frame_address(0) instead of `uintptr(unsafe.Pointer(&size))` + // but this should be OK since we are taking the address of the first variable in this function. + g.stacklo = uintptr(unsafe.Pointer(&size)) - uintptr(size) + 4096 + pthread_attr_destroy(attr) + free(unsafe.Pointer(attr)) +} diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/go_freebsd_arm64.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_freebsd_arm64.go new file mode 100644 index 00000000..e3a060b9 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_freebsd_arm64.go @@ -0,0 +1,98 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !cgo + +package fakecgo + +import "unsafe" + +//go:nosplit +func _cgo_sys_thread_start(ts *ThreadStart) { + var attr pthread_attr_t + var ign, oset sigset_t + var p pthread_t + var size size_t + var err int + + // fprintf(stderr, "runtime/cgo: _cgo_sys_thread_start: fn=%p, g=%p\n", ts->fn, ts->g); // debug + sigfillset(&ign) + pthread_sigmask(SIG_SETMASK, &ign, &oset) + + pthread_attr_init(&attr) + pthread_attr_getstacksize(&attr, &size) + // Leave stacklo=0 and set stackhi=size; mstart will do the rest. + ts.g.stackhi = uintptr(size) + + err = _cgo_try_pthread_create(&p, &attr, unsafe.Pointer(threadentry_trampolineABI0), ts) + + pthread_sigmask(SIG_SETMASK, &oset, nil) + + if err != 0 { + print("fakecgo: pthread_create failed: ") + println(err) + abort() + } +} + +// threadentry_trampolineABI0 maps the C ABI to Go ABI then calls the Go function +// +//go:linkname x_threadentry_trampoline threadentry_trampoline +var x_threadentry_trampoline byte +var threadentry_trampolineABI0 = &x_threadentry_trampoline + +//go:nosplit +func threadentry(v unsafe.Pointer) unsafe.Pointer { + ts := *(*ThreadStart)(v) + free(v) + + setg_trampoline(setg_func, uintptr(unsafe.Pointer(ts.g))) + + // faking funcs in go is a bit a... involved - but the following works :) + fn := uintptr(unsafe.Pointer(&ts.fn)) + (*(*func())(unsafe.Pointer(&fn)))() + + return nil +} + +// here we will store a pointer to the provided setg func +var setg_func uintptr + +// x_cgo_init(G *g, void (*setg)(void*)) (runtime/cgo/gcc_linux_amd64.c) +// This get's called during startup, adjusts stacklo, and provides a pointer to setg_gcc for us +// Additionally, if we set _cgo_init to non-null, go won't do it's own TLS setup +// This function can't be go:systemstack since go is not in a state where the systemcheck would work. +// +//go:nosplit +func x_cgo_init(g *G, setg uintptr) { + var size size_t + var attr *pthread_attr_t + + /* The memory sanitizer distributed with versions of clang + before 3.8 has a bug: if you call mmap before malloc, mmap + may return an address that is later overwritten by the msan + library. Avoid this problem by forcing a call to malloc + here, before we ever call malloc. + + This is only required for the memory sanitizer, so it's + unfortunate that we always run it. It should be possible + to remove this when we no longer care about versions of + clang before 3.8. The test for this is + misc/cgo/testsanitizers. + + GCC works hard to eliminate a seemingly unnecessary call to + malloc, so we actually use the memory we allocate. */ + + setg_func = setg + attr = (*pthread_attr_t)(malloc(unsafe.Sizeof(*attr))) + if attr == nil { + println("fakecgo: malloc failed") + abort() + } + pthread_attr_init(attr) + pthread_attr_getstacksize(attr, &size) + g.stacklo = uintptr(unsafe.Pointer(&size)) - uintptr(size) + 4096 + pthread_attr_destroy(attr) + free(unsafe.Pointer(attr)) +} diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/go_libinit.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_libinit.go new file mode 100644 index 00000000..0c463066 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_libinit.go @@ -0,0 +1,72 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +//go:build !cgo && (darwin || freebsd || linux || netbsd) + +package fakecgo + +import ( + "syscall" + "unsafe" +) + +var ( + pthread_g pthread_key_t + + runtime_init_cond = PTHREAD_COND_INITIALIZER + runtime_init_mu = PTHREAD_MUTEX_INITIALIZER + runtime_init_done int +) + +//go:nosplit +//go:norace +func x_cgo_notify_runtime_init_done() { + pthread_mutex_lock(&runtime_init_mu) + runtime_init_done = 1 + pthread_cond_broadcast(&runtime_init_cond) + pthread_mutex_unlock(&runtime_init_mu) +} + +// Store the g into a thread-specific value associated with the pthread key pthread_g. +// And pthread_key_destructor will dropm when the thread is exiting. +// +//go:norace +func x_cgo_bindm(g unsafe.Pointer) { + // We assume this will always succeed, otherwise, there might be extra M leaking, + // when a C thread exits after a cgo call. + // We only invoke this function once per thread in runtime.needAndBindM, + // and the next calls just reuse the bound m. + pthread_setspecific(pthread_g, g) +} + +// _cgo_try_pthread_create retries pthread_create if it fails with +// EAGAIN. +// +//go:nosplit +//go:norace +func _cgo_try_pthread_create(thread *pthread_t, attr *pthread_attr_t, pfn unsafe.Pointer, arg *ThreadStart) int { + var ts syscall.Timespec + // tries needs to be the same type as syscall.Timespec.Nsec + // but the fields are int32 on 32bit and int64 on 64bit. + // tries is assigned to syscall.Timespec.Nsec in order to match its type. + tries := ts.Nsec + var err int + + for tries = 0; tries < 20; tries++ { + // inlined this call because it ran out of stack when inlining was disabled + err = int(call5(pthread_createABI0, uintptr(unsafe.Pointer(thread)), uintptr(unsafe.Pointer(attr)), uintptr(pfn), uintptr(unsafe.Pointer(arg)), 0)) + if err == 0 { + // inlined this call because it ran out of stack when inlining was disabled + call5(pthread_detachABI0, uintptr(*thread), 0, 0, 0, 0) + return 0 + } + if err != int(syscall.EAGAIN) { + return err + } + ts.Sec = 0 + ts.Nsec = (tries + 1) * 1000 * 1000 // Milliseconds. + // inlined this call because it ran out of stack when inlining was disabled + call5(nanosleepABI0, uintptr(unsafe.Pointer(&ts)), 0, 0, 0, 0) + } + return int(syscall.EAGAIN) +} diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/go_linux_amd64.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_linux_amd64.go new file mode 100644 index 00000000..c9ff7156 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_linux_amd64.go @@ -0,0 +1,95 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !cgo + +package fakecgo + +import "unsafe" + +//go:nosplit +func _cgo_sys_thread_start(ts *ThreadStart) { + var attr pthread_attr_t + var ign, oset sigset_t + var p pthread_t + var size size_t + var err int + + //fprintf(stderr, "runtime/cgo: _cgo_sys_thread_start: fn=%p, g=%p\n", ts->fn, ts->g); // debug + sigfillset(&ign) + pthread_sigmask(SIG_SETMASK, &ign, &oset) + + pthread_attr_init(&attr) + pthread_attr_getstacksize(&attr, &size) + // Leave stacklo=0 and set stackhi=size; mstart will do the rest. + ts.g.stackhi = uintptr(size) + + err = _cgo_try_pthread_create(&p, &attr, unsafe.Pointer(threadentry_trampolineABI0), ts) + + pthread_sigmask(SIG_SETMASK, &oset, nil) + + if err != 0 { + print("fakecgo: pthread_create failed: ") + println(err) + abort() + } +} + +// threadentry_trampolineABI0 maps the C ABI to Go ABI then calls the Go function +// +//go:linkname x_threadentry_trampoline threadentry_trampoline +var x_threadentry_trampoline byte +var threadentry_trampolineABI0 = &x_threadentry_trampoline + +//go:nosplit +func threadentry(v unsafe.Pointer) unsafe.Pointer { + ts := *(*ThreadStart)(v) + free(v) + + setg_trampoline(setg_func, uintptr(unsafe.Pointer(ts.g))) + + // faking funcs in go is a bit a... involved - but the following works :) + fn := uintptr(unsafe.Pointer(&ts.fn)) + (*(*func())(unsafe.Pointer(&fn)))() + + return nil +} + +// here we will store a pointer to the provided setg func +var setg_func uintptr + +//go:nosplit +func x_cgo_init(g *G, setg uintptr) { + var size size_t + var attr *pthread_attr_t + + /* The memory sanitizer distributed with versions of clang + before 3.8 has a bug: if you call mmap before malloc, mmap + may return an address that is later overwritten by the msan + library. Avoid this problem by forcing a call to malloc + here, before we ever call malloc. + + This is only required for the memory sanitizer, so it's + unfortunate that we always run it. It should be possible + to remove this when we no longer care about versions of + clang before 3.8. The test for this is + misc/cgo/testsanitizers. + + GCC works hard to eliminate a seemingly unnecessary call to + malloc, so we actually use the memory we allocate. */ + + setg_func = setg + attr = (*pthread_attr_t)(malloc(unsafe.Sizeof(*attr))) + if attr == nil { + println("fakecgo: malloc failed") + abort() + } + pthread_attr_init(attr) + pthread_attr_getstacksize(attr, &size) + // runtime/cgo uses __builtin_frame_address(0) instead of `uintptr(unsafe.Pointer(&size))` + // but this should be OK since we are taking the address of the first variable in this function. + g.stacklo = uintptr(unsafe.Pointer(&size)) - uintptr(size) + 4096 + pthread_attr_destroy(attr) + free(unsafe.Pointer(attr)) +} diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/go_linux_arm64.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_linux_arm64.go new file mode 100644 index 00000000..a3b1cca5 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_linux_arm64.go @@ -0,0 +1,98 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !cgo + +package fakecgo + +import "unsafe" + +//go:nosplit +func _cgo_sys_thread_start(ts *ThreadStart) { + var attr pthread_attr_t + var ign, oset sigset_t + var p pthread_t + var size size_t + var err int + + //fprintf(stderr, "runtime/cgo: _cgo_sys_thread_start: fn=%p, g=%p\n", ts->fn, ts->g); // debug + sigfillset(&ign) + pthread_sigmask(SIG_SETMASK, &ign, &oset) + + pthread_attr_init(&attr) + pthread_attr_getstacksize(&attr, &size) + // Leave stacklo=0 and set stackhi=size; mstart will do the rest. + ts.g.stackhi = uintptr(size) + + err = _cgo_try_pthread_create(&p, &attr, unsafe.Pointer(threadentry_trampolineABI0), ts) + + pthread_sigmask(SIG_SETMASK, &oset, nil) + + if err != 0 { + print("fakecgo: pthread_create failed: ") + println(err) + abort() + } +} + +// threadentry_trampolineABI0 maps the C ABI to Go ABI then calls the Go function +// +//go:linkname x_threadentry_trampoline threadentry_trampoline +var x_threadentry_trampoline byte +var threadentry_trampolineABI0 = &x_threadentry_trampoline + +//go:nosplit +func threadentry(v unsafe.Pointer) unsafe.Pointer { + ts := *(*ThreadStart)(v) + free(v) + + setg_trampoline(setg_func, uintptr(unsafe.Pointer(ts.g))) + + // faking funcs in go is a bit a... involved - but the following works :) + fn := uintptr(unsafe.Pointer(&ts.fn)) + (*(*func())(unsafe.Pointer(&fn)))() + + return nil +} + +// here we will store a pointer to the provided setg func +var setg_func uintptr + +// x_cgo_init(G *g, void (*setg)(void*)) (runtime/cgo/gcc_linux_amd64.c) +// This get's called during startup, adjusts stacklo, and provides a pointer to setg_gcc for us +// Additionally, if we set _cgo_init to non-null, go won't do it's own TLS setup +// This function can't be go:systemstack since go is not in a state where the systemcheck would work. +// +//go:nosplit +func x_cgo_init(g *G, setg uintptr) { + var size size_t + var attr *pthread_attr_t + + /* The memory sanitizer distributed with versions of clang + before 3.8 has a bug: if you call mmap before malloc, mmap + may return an address that is later overwritten by the msan + library. Avoid this problem by forcing a call to malloc + here, before we ever call malloc. + + This is only required for the memory sanitizer, so it's + unfortunate that we always run it. It should be possible + to remove this when we no longer care about versions of + clang before 3.8. The test for this is + misc/cgo/testsanitizers. + + GCC works hard to eliminate a seemingly unnecessary call to + malloc, so we actually use the memory we allocate. */ + + setg_func = setg + attr = (*pthread_attr_t)(malloc(unsafe.Sizeof(*attr))) + if attr == nil { + println("fakecgo: malloc failed") + abort() + } + pthread_attr_init(attr) + pthread_attr_getstacksize(attr, &size) + g.stacklo = uintptr(unsafe.Pointer(&size)) - uintptr(size) + 4096 + pthread_attr_destroy(attr) + free(unsafe.Pointer(attr)) +} diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/go_linux_loong64.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_linux_loong64.go new file mode 100644 index 00000000..65293914 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_linux_loong64.go @@ -0,0 +1,92 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !cgo + +package fakecgo + +import "unsafe" + +//go:nosplit +func _cgo_sys_thread_start(ts *ThreadStart) { + var attr pthread_attr_t + var ign, oset sigset_t + var p pthread_t + var size size_t + var err int + + sigfillset(&ign) + pthread_sigmask(SIG_SETMASK, &ign, &oset) + + pthread_attr_init(&attr) + pthread_attr_getstacksize(&attr, &size) + // Leave stacklo=0 and set stackhi=size; mstart will do the rest. + ts.g.stackhi = uintptr(size) + + err = _cgo_try_pthread_create(&p, &attr, unsafe.Pointer(threadentry_trampolineABI0), ts) + + pthread_sigmask(SIG_SETMASK, &oset, nil) + + if err != 0 { + print("fakecgo: pthread_create failed: ") + println(err) + abort() + } +} + +// threadentry_trampolineABI0 maps the C ABI to Go ABI then calls the Go function +// +//go:linkname x_threadentry_trampoline threadentry_trampoline +var x_threadentry_trampoline byte +var threadentry_trampolineABI0 = &x_threadentry_trampoline + +//go:nosplit +func threadentry(v unsafe.Pointer) unsafe.Pointer { + ts := *(*ThreadStart)(v) + free(v) + + setg_trampoline(setg_func, uintptr(unsafe.Pointer(ts.g))) + + // faking funcs in go is a bit a... involved - but the following works :) + fn := uintptr(unsafe.Pointer(&ts.fn)) + (*(*func())(unsafe.Pointer(&fn)))() + + return nil +} + +// here we will store a pointer to the provided setg func +var setg_func uintptr + +//go:nosplit +func x_cgo_init(g *G, setg uintptr) { + var size size_t + var attr *pthread_attr_t + + /* The memory sanitizer distributed with versions of clang + before 3.8 has a bug: if you call mmap before malloc, mmap + may return an address that is later overwritten by the msan + library. Avoid this problem by forcing a call to malloc + here, before we ever call malloc. + + This is only required for the memory sanitizer, so it's + unfortunate that we always run it. It should be possible + to remove this when we no longer care about versions of + clang before 3.8. The test for this is + misc/cgo/testsanitizers. + + GCC works hard to eliminate a seemingly unnecessary call to + malloc, so we actually use the memory we allocate. */ + + setg_func = setg + attr = (*pthread_attr_t)(malloc(unsafe.Sizeof(*attr))) + if attr == nil { + println("fakecgo: malloc failed") + abort() + } + pthread_attr_init(attr) + pthread_attr_getstacksize(attr, &size) + g.stacklo = uintptr(unsafe.Pointer(&size)) - uintptr(size) + 4096 + pthread_attr_destroy(attr) + free(unsafe.Pointer(attr)) +} diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/go_netbsd.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_netbsd.go new file mode 100644 index 00000000..935a334f --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_netbsd.go @@ -0,0 +1,106 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !cgo && (amd64 || arm64) + +package fakecgo + +import "unsafe" + +//go:nosplit +func _cgo_sys_thread_start(ts *ThreadStart) { + var attr pthread_attr_t + var ign, oset sigset_t + var p pthread_t + var size size_t + var err int + + // fprintf(stderr, "runtime/cgo: _cgo_sys_thread_start: fn=%p, g=%p\n", ts->fn, ts->g); // debug + sigfillset(&ign) + pthread_sigmask(SIG_SETMASK, &ign, &oset) + + pthread_attr_init(&attr) + pthread_attr_getstacksize(&attr, &size) + // Leave stacklo=0 and set stackhi=size; mstart will do the rest. + ts.g.stackhi = uintptr(size) + + err = _cgo_try_pthread_create(&p, &attr, unsafe.Pointer(threadentry_trampolineABI0), ts) + + pthread_sigmask(SIG_SETMASK, &oset, nil) + + if err != 0 { + print("fakecgo: pthread_create failed: ") + println(err) + abort() + } +} + +// threadentry_trampolineABI0 maps the C ABI to Go ABI then calls the Go function +// +//go:linkname x_threadentry_trampoline threadentry_trampoline +var x_threadentry_trampoline byte +var threadentry_trampolineABI0 = &x_threadentry_trampoline + +//go:nosplit +func threadentry(v unsafe.Pointer) unsafe.Pointer { + var ss stack_t + ts := *(*ThreadStart)(v) + free(v) + + // On NetBSD, a new thread inherits the signal stack of the + // creating thread. That confuses minit, so we remove that + // signal stack here before calling the regular mstart. It's + // a bit baroque to remove a signal stack here only to add one + // in minit, but it's a simple change that keeps NetBSD + // working like other OS's. At this point all signals are + // blocked, so there is no race. + ss.ss_flags = SS_DISABLE + sigaltstack(&ss, nil) + + setg_trampoline(setg_func, uintptr(unsafe.Pointer(ts.g))) + + // faking funcs in go is a bit a... involved - but the following works :) + fn := uintptr(unsafe.Pointer(&ts.fn)) + (*(*func())(unsafe.Pointer(&fn)))() + + return nil +} + +// here we will store a pointer to the provided setg func +var setg_func uintptr + +//go:nosplit +func x_cgo_init(g *G, setg uintptr) { + var size size_t + var attr *pthread_attr_t + + /* The memory sanitizer distributed with versions of clang + before 3.8 has a bug: if you call mmap before malloc, mmap + may return an address that is later overwritten by the msan + library. Avoid this problem by forcing a call to malloc + here, before we ever call malloc. + + This is only required for the memory sanitizer, so it's + unfortunate that we always run it. It should be possible + to remove this when we no longer care about versions of + clang before 3.8. The test for this is + misc/cgo/testsanitizers. + + GCC works hard to eliminate a seemingly unnecessary call to + malloc, so we actually use the memory we allocate. */ + + setg_func = setg + attr = (*pthread_attr_t)(malloc(unsafe.Sizeof(*attr))) + if attr == nil { + println("fakecgo: malloc failed") + abort() + } + pthread_attr_init(attr) + pthread_attr_getstacksize(attr, &size) + // runtime/cgo uses __builtin_frame_address(0) instead of `uintptr(unsafe.Pointer(&size))` + // but this should be OK since we are taking the address of the first variable in this function. + g.stacklo = uintptr(unsafe.Pointer(&size)) - uintptr(size) + 4096 + pthread_attr_destroy(attr) + free(unsafe.Pointer(attr)) +} diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/go_setenv.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_setenv.go new file mode 100644 index 00000000..dfc6629e --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_setenv.go @@ -0,0 +1,18 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +//go:build !cgo && (darwin || freebsd || linux || netbsd) + +package fakecgo + +//go:nosplit +//go:norace +func x_cgo_setenv(arg *[2]*byte) { + setenv(arg[0], arg[1], 1) +} + +//go:nosplit +//go:norace +func x_cgo_unsetenv(arg *[1]*byte) { + unsetenv(arg[0]) +} diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/go_util.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_util.go new file mode 100644 index 00000000..771cb525 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_util.go @@ -0,0 +1,37 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +//go:build !cgo && (darwin || freebsd || linux || netbsd) + +package fakecgo + +import "unsafe" + +// _cgo_thread_start is split into three parts in cgo since only one part is system dependent (keep it here for easier handling) + +// _cgo_thread_start(ThreadStart *arg) (runtime/cgo/gcc_util.c) +// This get's called instead of the go code for creating new threads +// -> pthread_* stuff is used, so threads are setup correctly for C +// If this is missing, TLS is only setup correctly on thread 1! +// This function should be go:systemstack instead of go:nosplit (but that requires runtime) +// +//go:nosplit +//go:norace +func x_cgo_thread_start(arg *ThreadStart) { + var ts *ThreadStart + // Make our own copy that can persist after we return. + // _cgo_tsan_acquire(); + ts = (*ThreadStart)(malloc(unsafe.Sizeof(*ts))) + // _cgo_tsan_release(); + if ts == nil { + println("fakecgo: out of memory in thread_start") + abort() + } + // *ts = *arg would cause a writebarrier so copy using slices + s1 := unsafe.Slice((*uintptr)(unsafe.Pointer(ts)), unsafe.Sizeof(*ts)/8) + s2 := unsafe.Slice((*uintptr)(unsafe.Pointer(arg)), unsafe.Sizeof(*arg)/8) + for i := range s2 { + s1[i] = s2[i] + } + _cgo_sys_thread_start(ts) // OS-dependent half +} diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/iscgo.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/iscgo.go new file mode 100644 index 00000000..12e52147 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/iscgo.go @@ -0,0 +1,19 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !cgo && (darwin || freebsd || linux || netbsd) + +// The runtime package contains an uninitialized definition +// for runtime·iscgo. Override it to tell the runtime we're here. +// There are various function pointers that should be set too, +// but those depend on dynamic linker magic to get initialized +// correctly, and sometimes they break. This variable is a +// backup: it depends only on old C style static linking rules. + +package fakecgo + +import _ "unsafe" // for go:linkname + +//go:linkname _iscgo runtime.iscgo +var _iscgo bool = true diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/libcgo.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/libcgo.go new file mode 100644 index 00000000..94fd8bea --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/libcgo.go @@ -0,0 +1,39 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +//go:build !cgo && (darwin || freebsd || linux || netbsd) + +package fakecgo + +type ( + size_t uintptr + // Sources: + // Darwin (32 bytes) - https://github.com/apple/darwin-xnu/blob/2ff845c2e033bd0ff64b5b6aa6063a1f8f65aa32/bsd/sys/_types.h#L74 + // FreeBSD (32 bytes) - https://github.com/DoctorWkt/xv6-freebsd/blob/d2a294c2a984baed27676068b15ed9a29b06ab6f/include/signal.h#L98C9-L98C21 + // Linux (128 bytes) - https://github.com/torvalds/linux/blob/ab75170520d4964f3acf8bb1f91d34cbc650688e/arch/x86/include/asm/signal.h#L25 + sigset_t [128]byte + pthread_attr_t [64]byte + pthread_t int + pthread_key_t uint64 +) + +// for pthread_sigmask: + +type sighow int32 + +const ( + SIG_BLOCK sighow = 0 + SIG_UNBLOCK sighow = 1 + SIG_SETMASK sighow = 2 +) + +type G struct { + stacklo uintptr + stackhi uintptr +} + +type ThreadStart struct { + g *G + tls *uintptr + fn uintptr +} diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/libcgo_darwin.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/libcgo_darwin.go new file mode 100644 index 00000000..ecdcb2e7 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/libcgo_darwin.go @@ -0,0 +1,26 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +//go:build !cgo + +package fakecgo + +type ( + pthread_mutex_t struct { + sig int64 + opaque [56]byte + } + pthread_cond_t struct { + sig int64 + opaque [40]byte + } +) + +var ( + PTHREAD_COND_INITIALIZER = pthread_cond_t{sig: 0x3CB0B1BB} + PTHREAD_MUTEX_INITIALIZER = pthread_mutex_t{sig: 0x32AAABA7} +) + +type stack_t struct { + /* not implemented */ +} diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/libcgo_freebsd.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/libcgo_freebsd.go new file mode 100644 index 00000000..4bfb70c3 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/libcgo_freebsd.go @@ -0,0 +1,20 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +//go:build !cgo + +package fakecgo + +type ( + pthread_cond_t uintptr + pthread_mutex_t uintptr +) + +var ( + PTHREAD_COND_INITIALIZER = pthread_cond_t(0) + PTHREAD_MUTEX_INITIALIZER = pthread_mutex_t(0) +) + +type stack_t struct { + /* not implemented */ +} diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/libcgo_linux.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/libcgo_linux.go new file mode 100644 index 00000000..b08a44a1 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/libcgo_linux.go @@ -0,0 +1,20 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +//go:build !cgo + +package fakecgo + +type ( + pthread_cond_t [48]byte + pthread_mutex_t [48]byte +) + +var ( + PTHREAD_COND_INITIALIZER = pthread_cond_t{} + PTHREAD_MUTEX_INITIALIZER = pthread_mutex_t{} +) + +type stack_t struct { + /* not implemented */ +} diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/libcgo_netbsd.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/libcgo_netbsd.go new file mode 100644 index 00000000..650f6953 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/libcgo_netbsd.go @@ -0,0 +1,26 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2025 The Ebitengine Authors + +//go:build !cgo + +package fakecgo + +type ( + pthread_cond_t uintptr + pthread_mutex_t uintptr +) + +var ( + PTHREAD_COND_INITIALIZER = pthread_cond_t(0) + PTHREAD_MUTEX_INITIALIZER = pthread_mutex_t(0) +) + +// Source: https://github.com/NetBSD/src/blob/613e27c65223fd2283b6ed679da1197e12f50e27/sys/compat/linux/arch/m68k/linux_signal.h#L133 +type stack_t struct { + ss_sp uintptr + ss_flags int32 + ss_size uintptr +} + +// Source: https://github.com/NetBSD/src/blob/613e27c65223fd2283b6ed679da1197e12f50e27/sys/sys/signal.h#L261 +const SS_DISABLE = 0x004 diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/netbsd.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/netbsd.go new file mode 100644 index 00000000..2d499814 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/netbsd.go @@ -0,0 +1,23 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build netbsd + +package fakecgo + +import _ "unsafe" // for go:linkname + +// Supply environ and __progname, because we don't +// link against the standard NetBSD crt0.o and the +// libc dynamic library needs them. + +//go:linkname _environ environ +//go:linkname _progname __progname +//go:linkname ___ps_strings __ps_strings + +var ( + _environ uintptr + _progname uintptr + ___ps_strings uintptr +) diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/setenv.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/setenv.go new file mode 100644 index 00000000..82308b8c --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/setenv.go @@ -0,0 +1,19 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !cgo && (darwin || freebsd || linux || netbsd) + +package fakecgo + +import _ "unsafe" // for go:linkname + +//go:linkname x_cgo_setenv_trampoline x_cgo_setenv_trampoline +//go:linkname _cgo_setenv runtime._cgo_setenv +var x_cgo_setenv_trampoline byte +var _cgo_setenv = &x_cgo_setenv_trampoline + +//go:linkname x_cgo_unsetenv_trampoline x_cgo_unsetenv_trampoline +//go:linkname _cgo_unsetenv runtime._cgo_unsetenv +var x_cgo_unsetenv_trampoline byte +var _cgo_unsetenv = &x_cgo_unsetenv_trampoline diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/symbols.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/symbols.go new file mode 100644 index 00000000..135f6d21 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/symbols.go @@ -0,0 +1,231 @@ +// Code generated by 'go generate' with gen.go. DO NOT EDIT. + +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +//go:build !cgo && (darwin || freebsd || linux || netbsd) + +package fakecgo + +import ( + "syscall" + "unsafe" +) + +// setg_trampoline calls setg with the G provided +func setg_trampoline(setg uintptr, G uintptr) + +// call5 takes fn the C function and 5 arguments and calls the function with those arguments +func call5(fn, a1, a2, a3, a4, a5 uintptr) uintptr + +//go:nosplit +//go:norace +func malloc(size uintptr) unsafe.Pointer { + ret := call5(mallocABI0, uintptr(size), 0, 0, 0, 0) + // this indirection is to avoid go vet complaining about possible misuse of unsafe.Pointer + return *(*unsafe.Pointer)(unsafe.Pointer(&ret)) +} + +//go:nosplit +//go:norace +func free(ptr unsafe.Pointer) { + call5(freeABI0, uintptr(ptr), 0, 0, 0, 0) +} + +//go:nosplit +//go:norace +func setenv(name *byte, value *byte, overwrite int32) int32 { + return int32(call5(setenvABI0, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(value)), uintptr(overwrite), 0, 0)) +} + +//go:nosplit +//go:norace +func unsetenv(name *byte) int32 { + return int32(call5(unsetenvABI0, uintptr(unsafe.Pointer(name)), 0, 0, 0, 0)) +} + +//go:nosplit +//go:norace +func sigfillset(set *sigset_t) int32 { + return int32(call5(sigfillsetABI0, uintptr(unsafe.Pointer(set)), 0, 0, 0, 0)) +} + +//go:nosplit +//go:norace +func nanosleep(ts *syscall.Timespec, rem *syscall.Timespec) int32 { + return int32(call5(nanosleepABI0, uintptr(unsafe.Pointer(ts)), uintptr(unsafe.Pointer(rem)), 0, 0, 0)) +} + +//go:nosplit +//go:norace +func abort() { + call5(abortABI0, 0, 0, 0, 0, 0) +} + +//go:nosplit +//go:norace +func sigaltstack(ss *stack_t, old_ss *stack_t) int32 { + return int32(call5(sigaltstackABI0, uintptr(unsafe.Pointer(ss)), uintptr(unsafe.Pointer(old_ss)), 0, 0, 0)) +} + +//go:nosplit +//go:norace +func pthread_attr_init(attr *pthread_attr_t) int32 { + return int32(call5(pthread_attr_initABI0, uintptr(unsafe.Pointer(attr)), 0, 0, 0, 0)) +} + +//go:nosplit +//go:norace +func pthread_create(thread *pthread_t, attr *pthread_attr_t, start unsafe.Pointer, arg unsafe.Pointer) int32 { + return int32(call5(pthread_createABI0, uintptr(unsafe.Pointer(thread)), uintptr(unsafe.Pointer(attr)), uintptr(start), uintptr(arg), 0)) +} + +//go:nosplit +//go:norace +func pthread_detach(thread pthread_t) int32 { + return int32(call5(pthread_detachABI0, uintptr(thread), 0, 0, 0, 0)) +} + +//go:nosplit +//go:norace +func pthread_sigmask(how sighow, ign *sigset_t, oset *sigset_t) int32 { + return int32(call5(pthread_sigmaskABI0, uintptr(how), uintptr(unsafe.Pointer(ign)), uintptr(unsafe.Pointer(oset)), 0, 0)) +} + +//go:nosplit +//go:norace +func pthread_self() pthread_t { + return pthread_t(call5(pthread_selfABI0, 0, 0, 0, 0, 0)) +} + +//go:nosplit +//go:norace +func pthread_get_stacksize_np(thread pthread_t) size_t { + return size_t(call5(pthread_get_stacksize_npABI0, uintptr(thread), 0, 0, 0, 0)) +} + +//go:nosplit +//go:norace +func pthread_attr_getstacksize(attr *pthread_attr_t, stacksize *size_t) int32 { + return int32(call5(pthread_attr_getstacksizeABI0, uintptr(unsafe.Pointer(attr)), uintptr(unsafe.Pointer(stacksize)), 0, 0, 0)) +} + +//go:nosplit +//go:norace +func pthread_attr_setstacksize(attr *pthread_attr_t, size size_t) int32 { + return int32(call5(pthread_attr_setstacksizeABI0, uintptr(unsafe.Pointer(attr)), uintptr(size), 0, 0, 0)) +} + +//go:nosplit +//go:norace +func pthread_attr_destroy(attr *pthread_attr_t) int32 { + return int32(call5(pthread_attr_destroyABI0, uintptr(unsafe.Pointer(attr)), 0, 0, 0, 0)) +} + +//go:nosplit +//go:norace +func pthread_mutex_lock(mutex *pthread_mutex_t) int32 { + return int32(call5(pthread_mutex_lockABI0, uintptr(unsafe.Pointer(mutex)), 0, 0, 0, 0)) +} + +//go:nosplit +//go:norace +func pthread_mutex_unlock(mutex *pthread_mutex_t) int32 { + return int32(call5(pthread_mutex_unlockABI0, uintptr(unsafe.Pointer(mutex)), 0, 0, 0, 0)) +} + +//go:nosplit +//go:norace +func pthread_cond_broadcast(cond *pthread_cond_t) int32 { + return int32(call5(pthread_cond_broadcastABI0, uintptr(unsafe.Pointer(cond)), 0, 0, 0, 0)) +} + +//go:nosplit +//go:norace +func pthread_setspecific(key pthread_key_t, value unsafe.Pointer) int32 { + return int32(call5(pthread_setspecificABI0, uintptr(key), uintptr(value), 0, 0, 0)) +} + +//go:linkname _malloc _malloc +var _malloc uint8 +var mallocABI0 = uintptr(unsafe.Pointer(&_malloc)) + +//go:linkname _free _free +var _free uint8 +var freeABI0 = uintptr(unsafe.Pointer(&_free)) + +//go:linkname _setenv _setenv +var _setenv uint8 +var setenvABI0 = uintptr(unsafe.Pointer(&_setenv)) + +//go:linkname _unsetenv _unsetenv +var _unsetenv uint8 +var unsetenvABI0 = uintptr(unsafe.Pointer(&_unsetenv)) + +//go:linkname _sigfillset _sigfillset +var _sigfillset uint8 +var sigfillsetABI0 = uintptr(unsafe.Pointer(&_sigfillset)) + +//go:linkname _nanosleep _nanosleep +var _nanosleep uint8 +var nanosleepABI0 = uintptr(unsafe.Pointer(&_nanosleep)) + +//go:linkname _abort _abort +var _abort uint8 +var abortABI0 = uintptr(unsafe.Pointer(&_abort)) + +//go:linkname _sigaltstack _sigaltstack +var _sigaltstack uint8 +var sigaltstackABI0 = uintptr(unsafe.Pointer(&_sigaltstack)) + +//go:linkname _pthread_attr_init _pthread_attr_init +var _pthread_attr_init uint8 +var pthread_attr_initABI0 = uintptr(unsafe.Pointer(&_pthread_attr_init)) + +//go:linkname _pthread_create _pthread_create +var _pthread_create uint8 +var pthread_createABI0 = uintptr(unsafe.Pointer(&_pthread_create)) + +//go:linkname _pthread_detach _pthread_detach +var _pthread_detach uint8 +var pthread_detachABI0 = uintptr(unsafe.Pointer(&_pthread_detach)) + +//go:linkname _pthread_sigmask _pthread_sigmask +var _pthread_sigmask uint8 +var pthread_sigmaskABI0 = uintptr(unsafe.Pointer(&_pthread_sigmask)) + +//go:linkname _pthread_self _pthread_self +var _pthread_self uint8 +var pthread_selfABI0 = uintptr(unsafe.Pointer(&_pthread_self)) + +//go:linkname _pthread_get_stacksize_np _pthread_get_stacksize_np +var _pthread_get_stacksize_np uint8 +var pthread_get_stacksize_npABI0 = uintptr(unsafe.Pointer(&_pthread_get_stacksize_np)) + +//go:linkname _pthread_attr_getstacksize _pthread_attr_getstacksize +var _pthread_attr_getstacksize uint8 +var pthread_attr_getstacksizeABI0 = uintptr(unsafe.Pointer(&_pthread_attr_getstacksize)) + +//go:linkname _pthread_attr_setstacksize _pthread_attr_setstacksize +var _pthread_attr_setstacksize uint8 +var pthread_attr_setstacksizeABI0 = uintptr(unsafe.Pointer(&_pthread_attr_setstacksize)) + +//go:linkname _pthread_attr_destroy _pthread_attr_destroy +var _pthread_attr_destroy uint8 +var pthread_attr_destroyABI0 = uintptr(unsafe.Pointer(&_pthread_attr_destroy)) + +//go:linkname _pthread_mutex_lock _pthread_mutex_lock +var _pthread_mutex_lock uint8 +var pthread_mutex_lockABI0 = uintptr(unsafe.Pointer(&_pthread_mutex_lock)) + +//go:linkname _pthread_mutex_unlock _pthread_mutex_unlock +var _pthread_mutex_unlock uint8 +var pthread_mutex_unlockABI0 = uintptr(unsafe.Pointer(&_pthread_mutex_unlock)) + +//go:linkname _pthread_cond_broadcast _pthread_cond_broadcast +var _pthread_cond_broadcast uint8 +var pthread_cond_broadcastABI0 = uintptr(unsafe.Pointer(&_pthread_cond_broadcast)) + +//go:linkname _pthread_setspecific _pthread_setspecific +var _pthread_setspecific uint8 +var pthread_setspecificABI0 = uintptr(unsafe.Pointer(&_pthread_setspecific)) diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/symbols_darwin.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/symbols_darwin.go new file mode 100644 index 00000000..8c4489f0 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/symbols_darwin.go @@ -0,0 +1,30 @@ +// Code generated by 'go generate' with gen.go. DO NOT EDIT. + +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +//go:build !cgo + +package fakecgo + +//go:cgo_import_dynamic purego_malloc malloc "/usr/lib/libSystem.B.dylib" +//go:cgo_import_dynamic purego_free free "/usr/lib/libSystem.B.dylib" +//go:cgo_import_dynamic purego_setenv setenv "/usr/lib/libSystem.B.dylib" +//go:cgo_import_dynamic purego_unsetenv unsetenv "/usr/lib/libSystem.B.dylib" +//go:cgo_import_dynamic purego_sigfillset sigfillset "/usr/lib/libSystem.B.dylib" +//go:cgo_import_dynamic purego_nanosleep nanosleep "/usr/lib/libSystem.B.dylib" +//go:cgo_import_dynamic purego_abort abort "/usr/lib/libSystem.B.dylib" +//go:cgo_import_dynamic purego_sigaltstack sigaltstack "/usr/lib/libSystem.B.dylib" +//go:cgo_import_dynamic purego_pthread_attr_init pthread_attr_init "/usr/lib/libSystem.B.dylib" +//go:cgo_import_dynamic purego_pthread_create pthread_create "/usr/lib/libSystem.B.dylib" +//go:cgo_import_dynamic purego_pthread_detach pthread_detach "/usr/lib/libSystem.B.dylib" +//go:cgo_import_dynamic purego_pthread_sigmask pthread_sigmask "/usr/lib/libSystem.B.dylib" +//go:cgo_import_dynamic purego_pthread_self pthread_self "/usr/lib/libSystem.B.dylib" +//go:cgo_import_dynamic purego_pthread_get_stacksize_np pthread_get_stacksize_np "/usr/lib/libSystem.B.dylib" +//go:cgo_import_dynamic purego_pthread_attr_getstacksize pthread_attr_getstacksize "/usr/lib/libSystem.B.dylib" +//go:cgo_import_dynamic purego_pthread_attr_setstacksize pthread_attr_setstacksize "/usr/lib/libSystem.B.dylib" +//go:cgo_import_dynamic purego_pthread_attr_destroy pthread_attr_destroy "/usr/lib/libSystem.B.dylib" +//go:cgo_import_dynamic purego_pthread_mutex_lock pthread_mutex_lock "/usr/lib/libSystem.B.dylib" +//go:cgo_import_dynamic purego_pthread_mutex_unlock pthread_mutex_unlock "/usr/lib/libSystem.B.dylib" +//go:cgo_import_dynamic purego_pthread_cond_broadcast pthread_cond_broadcast "/usr/lib/libSystem.B.dylib" +//go:cgo_import_dynamic purego_pthread_setspecific pthread_setspecific "/usr/lib/libSystem.B.dylib" diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/symbols_freebsd.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/symbols_freebsd.go new file mode 100644 index 00000000..bbe1bd57 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/symbols_freebsd.go @@ -0,0 +1,30 @@ +// Code generated by 'go generate' with gen.go. DO NOT EDIT. + +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +//go:build !cgo + +package fakecgo + +//go:cgo_import_dynamic purego_malloc malloc "libc.so.7" +//go:cgo_import_dynamic purego_free free "libc.so.7" +//go:cgo_import_dynamic purego_setenv setenv "libc.so.7" +//go:cgo_import_dynamic purego_unsetenv unsetenv "libc.so.7" +//go:cgo_import_dynamic purego_sigfillset sigfillset "libc.so.7" +//go:cgo_import_dynamic purego_nanosleep nanosleep "libc.so.7" +//go:cgo_import_dynamic purego_abort abort "libc.so.7" +//go:cgo_import_dynamic purego_sigaltstack sigaltstack "libc.so.7" +//go:cgo_import_dynamic purego_pthread_attr_init pthread_attr_init "libpthread.so" +//go:cgo_import_dynamic purego_pthread_create pthread_create "libpthread.so" +//go:cgo_import_dynamic purego_pthread_detach pthread_detach "libpthread.so" +//go:cgo_import_dynamic purego_pthread_sigmask pthread_sigmask "libpthread.so" +//go:cgo_import_dynamic purego_pthread_self pthread_self "libpthread.so" +//go:cgo_import_dynamic purego_pthread_get_stacksize_np pthread_get_stacksize_np "libpthread.so" +//go:cgo_import_dynamic purego_pthread_attr_getstacksize pthread_attr_getstacksize "libpthread.so" +//go:cgo_import_dynamic purego_pthread_attr_setstacksize pthread_attr_setstacksize "libpthread.so" +//go:cgo_import_dynamic purego_pthread_attr_destroy pthread_attr_destroy "libpthread.so" +//go:cgo_import_dynamic purego_pthread_mutex_lock pthread_mutex_lock "libpthread.so" +//go:cgo_import_dynamic purego_pthread_mutex_unlock pthread_mutex_unlock "libpthread.so" +//go:cgo_import_dynamic purego_pthread_cond_broadcast pthread_cond_broadcast "libpthread.so" +//go:cgo_import_dynamic purego_pthread_setspecific pthread_setspecific "libpthread.so" diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/symbols_linux.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/symbols_linux.go new file mode 100644 index 00000000..21652650 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/symbols_linux.go @@ -0,0 +1,30 @@ +// Code generated by 'go generate' with gen.go. DO NOT EDIT. + +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +//go:build !cgo + +package fakecgo + +//go:cgo_import_dynamic purego_malloc malloc "libc.so.6" +//go:cgo_import_dynamic purego_free free "libc.so.6" +//go:cgo_import_dynamic purego_setenv setenv "libc.so.6" +//go:cgo_import_dynamic purego_unsetenv unsetenv "libc.so.6" +//go:cgo_import_dynamic purego_sigfillset sigfillset "libc.so.6" +//go:cgo_import_dynamic purego_nanosleep nanosleep "libc.so.6" +//go:cgo_import_dynamic purego_abort abort "libc.so.6" +//go:cgo_import_dynamic purego_sigaltstack sigaltstack "libc.so.6" +//go:cgo_import_dynamic purego_pthread_attr_init pthread_attr_init "libpthread.so.0" +//go:cgo_import_dynamic purego_pthread_create pthread_create "libpthread.so.0" +//go:cgo_import_dynamic purego_pthread_detach pthread_detach "libpthread.so.0" +//go:cgo_import_dynamic purego_pthread_sigmask pthread_sigmask "libpthread.so.0" +//go:cgo_import_dynamic purego_pthread_self pthread_self "libpthread.so.0" +//go:cgo_import_dynamic purego_pthread_get_stacksize_np pthread_get_stacksize_np "libpthread.so.0" +//go:cgo_import_dynamic purego_pthread_attr_getstacksize pthread_attr_getstacksize "libpthread.so.0" +//go:cgo_import_dynamic purego_pthread_attr_setstacksize pthread_attr_setstacksize "libpthread.so.0" +//go:cgo_import_dynamic purego_pthread_attr_destroy pthread_attr_destroy "libpthread.so.0" +//go:cgo_import_dynamic purego_pthread_mutex_lock pthread_mutex_lock "libpthread.so.0" +//go:cgo_import_dynamic purego_pthread_mutex_unlock pthread_mutex_unlock "libpthread.so.0" +//go:cgo_import_dynamic purego_pthread_cond_broadcast pthread_cond_broadcast "libpthread.so.0" +//go:cgo_import_dynamic purego_pthread_setspecific pthread_setspecific "libpthread.so.0" diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/symbols_netbsd.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/symbols_netbsd.go new file mode 100644 index 00000000..7c92bb0b --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/symbols_netbsd.go @@ -0,0 +1,30 @@ +// Code generated by 'go generate' with gen.go. DO NOT EDIT. + +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +//go:build !cgo + +package fakecgo + +//go:cgo_import_dynamic purego_malloc malloc "libc.so" +//go:cgo_import_dynamic purego_free free "libc.so" +//go:cgo_import_dynamic purego_setenv setenv "libc.so" +//go:cgo_import_dynamic purego_unsetenv unsetenv "libc.so" +//go:cgo_import_dynamic purego_sigfillset sigfillset "libc.so" +//go:cgo_import_dynamic purego_nanosleep nanosleep "libc.so" +//go:cgo_import_dynamic purego_abort abort "libc.so" +//go:cgo_import_dynamic purego_sigaltstack sigaltstack "libc.so" +//go:cgo_import_dynamic purego_pthread_attr_init pthread_attr_init "libpthread.so" +//go:cgo_import_dynamic purego_pthread_create pthread_create "libpthread.so" +//go:cgo_import_dynamic purego_pthread_detach pthread_detach "libpthread.so" +//go:cgo_import_dynamic purego_pthread_sigmask pthread_sigmask "libpthread.so" +//go:cgo_import_dynamic purego_pthread_self pthread_self "libpthread.so" +//go:cgo_import_dynamic purego_pthread_get_stacksize_np pthread_get_stacksize_np "libpthread.so" +//go:cgo_import_dynamic purego_pthread_attr_getstacksize pthread_attr_getstacksize "libpthread.so" +//go:cgo_import_dynamic purego_pthread_attr_setstacksize pthread_attr_setstacksize "libpthread.so" +//go:cgo_import_dynamic purego_pthread_attr_destroy pthread_attr_destroy "libpthread.so" +//go:cgo_import_dynamic purego_pthread_mutex_lock pthread_mutex_lock "libpthread.so" +//go:cgo_import_dynamic purego_pthread_mutex_unlock pthread_mutex_unlock "libpthread.so" +//go:cgo_import_dynamic purego_pthread_cond_broadcast pthread_cond_broadcast "libpthread.so" +//go:cgo_import_dynamic purego_pthread_setspecific pthread_setspecific "libpthread.so" diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/trampolines_amd64.s b/vendor/github.com/ebitengine/purego/internal/fakecgo/trampolines_amd64.s new file mode 100644 index 00000000..c9a3cc09 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/trampolines_amd64.s @@ -0,0 +1,104 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +//go:build !cgo && (darwin || linux || freebsd) + +/* +trampoline for emulating required C functions for cgo in go (see cgo.go) +(we convert cdecl calling convention to go and vice-versa) + +Since we're called from go and call into C we can cheat a bit with the calling conventions: + - in go all the registers are caller saved + - in C we have a couple of callee saved registers + +=> we can use BX, R12, R13, R14, R15 instead of the stack + +C Calling convention cdecl used here (we only need integer args): +1. arg: DI +2. arg: SI +3. arg: DX +4. arg: CX +5. arg: R8 +6. arg: R9 +We don't need floats with these functions -> AX=0 +return value will be in AX +*/ +#include "textflag.h" +#include "go_asm.h" + +// these trampolines map the gcc ABI to Go ABI and then calls into the Go equivalent functions. + +TEXT x_cgo_init_trampoline(SB), NOSPLIT, $16 + MOVQ DI, AX + MOVQ SI, BX + MOVQ ·x_cgo_init_call(SB), DX + MOVQ (DX), CX + CALL CX + RET + +TEXT x_cgo_thread_start_trampoline(SB), NOSPLIT, $8 + MOVQ DI, AX + MOVQ ·x_cgo_thread_start_call(SB), DX + MOVQ (DX), CX + CALL CX + RET + +TEXT x_cgo_setenv_trampoline(SB), NOSPLIT, $8 + MOVQ DI, AX + MOVQ ·x_cgo_setenv_call(SB), DX + MOVQ (DX), CX + CALL CX + RET + +TEXT x_cgo_unsetenv_trampoline(SB), NOSPLIT, $8 + MOVQ DI, AX + MOVQ ·x_cgo_unsetenv_call(SB), DX + MOVQ (DX), CX + CALL CX + RET + +TEXT x_cgo_notify_runtime_init_done_trampoline(SB), NOSPLIT, $0 + CALL ·x_cgo_notify_runtime_init_done(SB) + RET + +TEXT x_cgo_bindm_trampoline(SB), NOSPLIT, $0 + CALL ·x_cgo_bindm(SB) + RET + +// func setg_trampoline(setg uintptr, g uintptr) +TEXT ·setg_trampoline(SB), NOSPLIT, $0-16 + MOVQ G+8(FP), DI + MOVQ setg+0(FP), BX + XORL AX, AX + CALL BX + RET + +TEXT threadentry_trampoline(SB), NOSPLIT, $16 + MOVQ DI, AX + MOVQ ·threadentry_call(SB), DX + MOVQ (DX), CX + CALL CX + RET + +TEXT ·call5(SB), NOSPLIT, $0-56 + MOVQ fn+0(FP), BX + MOVQ a1+8(FP), DI + MOVQ a2+16(FP), SI + MOVQ a3+24(FP), DX + MOVQ a4+32(FP), CX + MOVQ a5+40(FP), R8 + + XORL AX, AX // no floats + + PUSHQ BP // save BP + MOVQ SP, BP // save SP inside BP bc BP is callee-saved + SUBQ $16, SP // allocate space for alignment + ANDQ $-16, SP // align on 16 bytes for SSE + + CALL BX + + MOVQ BP, SP // get SP back + POPQ BP // restore BP + + MOVQ AX, ret+48(FP) + RET diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/trampolines_arm64.s b/vendor/github.com/ebitengine/purego/internal/fakecgo/trampolines_arm64.s new file mode 100644 index 00000000..9dbdbc01 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/trampolines_arm64.s @@ -0,0 +1,72 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +//go:build !cgo && (darwin || freebsd || linux) + +#include "textflag.h" +#include "go_asm.h" + +// these trampolines map the gcc ABI to Go ABI and then calls into the Go equivalent functions. + +TEXT x_cgo_init_trampoline(SB), NOSPLIT, $0-0 + MOVD R0, 8(RSP) + MOVD R1, 16(RSP) + MOVD ·x_cgo_init_call(SB), R26 + MOVD (R26), R2 + CALL (R2) + RET + +TEXT x_cgo_thread_start_trampoline(SB), NOSPLIT, $0-0 + MOVD R0, 8(RSP) + MOVD ·x_cgo_thread_start_call(SB), R26 + MOVD (R26), R2 + CALL (R2) + RET + +TEXT x_cgo_setenv_trampoline(SB), NOSPLIT, $0-0 + MOVD R0, 8(RSP) + MOVD ·x_cgo_setenv_call(SB), R26 + MOVD (R26), R2 + CALL (R2) + RET + +TEXT x_cgo_unsetenv_trampoline(SB), NOSPLIT, $0-0 + MOVD R0, 8(RSP) + MOVD ·x_cgo_unsetenv_call(SB), R26 + MOVD (R26), R2 + CALL (R2) + RET + +TEXT x_cgo_notify_runtime_init_done_trampoline(SB), NOSPLIT, $0-0 + CALL ·x_cgo_notify_runtime_init_done(SB) + RET + +TEXT x_cgo_bindm_trampoline(SB), NOSPLIT, $0 + CALL ·x_cgo_bindm(SB) + RET + +// func setg_trampoline(setg uintptr, g uintptr) +TEXT ·setg_trampoline(SB), NOSPLIT, $0-16 + MOVD G+8(FP), R0 + MOVD setg+0(FP), R1 + CALL R1 + RET + +TEXT threadentry_trampoline(SB), NOSPLIT, $0-0 + MOVD R0, 8(RSP) + MOVD ·threadentry_call(SB), R26 + MOVD (R26), R2 + CALL (R2) + MOVD $0, R0 // TODO: get the return value from threadentry + RET + +TEXT ·call5(SB), NOSPLIT, $0-0 + MOVD fn+0(FP), R6 + MOVD a1+8(FP), R0 + MOVD a2+16(FP), R1 + MOVD a3+24(FP), R2 + MOVD a4+32(FP), R3 + MOVD a5+40(FP), R4 + CALL R6 + MOVD R0, ret+48(FP) + RET diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/trampolines_loong64.s b/vendor/github.com/ebitengine/purego/internal/fakecgo/trampolines_loong64.s new file mode 100644 index 00000000..15b33543 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/trampolines_loong64.s @@ -0,0 +1,71 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2025 The Ebitengine Authors + +//go:build !cgo && linux + +#include "textflag.h" +#include "go_asm.h" + +// these trampolines map the gcc ABI to Go ABI and then calls into the Go equivalent functions. + +TEXT x_cgo_init_trampoline(SB), NOSPLIT, $16 + MOVV R4, 8(R3) + MOVV R5, 16(R3) + MOVV ·x_cgo_init_call(SB), R6 + MOVV (R6), R7 + CALL (R7) + RET + +TEXT x_cgo_thread_start_trampoline(SB), NOSPLIT, $8 + MOVV R4, 8(R3) + MOVV ·x_cgo_thread_start_call(SB), R5 + MOVV (R5), R6 + CALL (R6) + RET + +TEXT x_cgo_setenv_trampoline(SB), NOSPLIT, $8 + MOVV R4, 8(R3) + MOVV ·x_cgo_setenv_call(SB), R5 + MOVV (R5), R6 + CALL (R6) + RET + +TEXT x_cgo_unsetenv_trampoline(SB), NOSPLIT, $8 + MOVV R4, 8(R3) + MOVV ·x_cgo_unsetenv_call(SB), R5 + MOVV (R5), R6 + CALL (R6) + RET + +TEXT x_cgo_notify_runtime_init_done_trampoline(SB), NOSPLIT, $0 + CALL ·x_cgo_notify_runtime_init_done(SB) + RET + +TEXT x_cgo_bindm_trampoline(SB), NOSPLIT, $0 + CALL ·x_cgo_bindm(SB) + RET + +// func setg_trampoline(setg uintptr, g uintptr) +TEXT ·setg_trampoline(SB), NOSPLIT, $0 + MOVV G+8(FP), R4 + MOVV setg+0(FP), R5 + CALL (R5) + RET + +TEXT threadentry_trampoline(SB), NOSPLIT, $16 + MOVV R4, 8(R3) + MOVV ·threadentry_call(SB), R5 + MOVV (R5), R6 + CALL (R6) + RET + +TEXT ·call5(SB), NOSPLIT, $0-0 + MOVV fn+0(FP), R9 + MOVV a1+8(FP), R4 + MOVV a2+16(FP), R5 + MOVV a3+24(FP), R6 + MOVV a4+32(FP), R7 + MOVV a5+40(FP), R8 + CALL (R9) + MOVV R4, ret+48(FP) + RET diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/trampolines_stubs.s b/vendor/github.com/ebitengine/purego/internal/fakecgo/trampolines_stubs.s new file mode 100644 index 00000000..c93d783d --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/trampolines_stubs.s @@ -0,0 +1,94 @@ +// Code generated by 'go generate' with gen.go. DO NOT EDIT. + +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +//go:build !cgo && (darwin || freebsd || linux || netbsd) + +#include "textflag.h" + +// these stubs are here because it is not possible to go:linkname directly the C functions on darwin arm64 + +TEXT _malloc(SB), NOSPLIT|NOFRAME, $0-0 + JMP purego_malloc(SB) + RET + +TEXT _free(SB), NOSPLIT|NOFRAME, $0-0 + JMP purego_free(SB) + RET + +TEXT _setenv(SB), NOSPLIT|NOFRAME, $0-0 + JMP purego_setenv(SB) + RET + +TEXT _unsetenv(SB), NOSPLIT|NOFRAME, $0-0 + JMP purego_unsetenv(SB) + RET + +TEXT _sigfillset(SB), NOSPLIT|NOFRAME, $0-0 + JMP purego_sigfillset(SB) + RET + +TEXT _nanosleep(SB), NOSPLIT|NOFRAME, $0-0 + JMP purego_nanosleep(SB) + RET + +TEXT _abort(SB), NOSPLIT|NOFRAME, $0-0 + JMP purego_abort(SB) + RET + +TEXT _sigaltstack(SB), NOSPLIT|NOFRAME, $0-0 + JMP purego_sigaltstack(SB) + RET + +TEXT _pthread_attr_init(SB), NOSPLIT|NOFRAME, $0-0 + JMP purego_pthread_attr_init(SB) + RET + +TEXT _pthread_create(SB), NOSPLIT|NOFRAME, $0-0 + JMP purego_pthread_create(SB) + RET + +TEXT _pthread_detach(SB), NOSPLIT|NOFRAME, $0-0 + JMP purego_pthread_detach(SB) + RET + +TEXT _pthread_sigmask(SB), NOSPLIT|NOFRAME, $0-0 + JMP purego_pthread_sigmask(SB) + RET + +TEXT _pthread_self(SB), NOSPLIT|NOFRAME, $0-0 + JMP purego_pthread_self(SB) + RET + +TEXT _pthread_get_stacksize_np(SB), NOSPLIT|NOFRAME, $0-0 + JMP purego_pthread_get_stacksize_np(SB) + RET + +TEXT _pthread_attr_getstacksize(SB), NOSPLIT|NOFRAME, $0-0 + JMP purego_pthread_attr_getstacksize(SB) + RET + +TEXT _pthread_attr_setstacksize(SB), NOSPLIT|NOFRAME, $0-0 + JMP purego_pthread_attr_setstacksize(SB) + RET + +TEXT _pthread_attr_destroy(SB), NOSPLIT|NOFRAME, $0-0 + JMP purego_pthread_attr_destroy(SB) + RET + +TEXT _pthread_mutex_lock(SB), NOSPLIT|NOFRAME, $0-0 + JMP purego_pthread_mutex_lock(SB) + RET + +TEXT _pthread_mutex_unlock(SB), NOSPLIT|NOFRAME, $0-0 + JMP purego_pthread_mutex_unlock(SB) + RET + +TEXT _pthread_cond_broadcast(SB), NOSPLIT|NOFRAME, $0-0 + JMP purego_pthread_cond_broadcast(SB) + RET + +TEXT _pthread_setspecific(SB), NOSPLIT|NOFRAME, $0-0 + JMP purego_pthread_setspecific(SB) + RET diff --git a/vendor/github.com/ebitengine/purego/internal/strings/strings.go b/vendor/github.com/ebitengine/purego/internal/strings/strings.go new file mode 100644 index 00000000..5b0d2522 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/strings/strings.go @@ -0,0 +1,40 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +package strings + +import ( + "unsafe" +) + +// hasSuffix tests whether the string s ends with suffix. +func hasSuffix(s, suffix string) bool { + return len(s) >= len(suffix) && s[len(s)-len(suffix):] == suffix +} + +// CString converts a go string to *byte that can be passed to C code. +func CString(name string) *byte { + if hasSuffix(name, "\x00") { + return &(*(*[]byte)(unsafe.Pointer(&name)))[0] + } + b := make([]byte, len(name)+1) + copy(b, name) + return &b[0] +} + +// GoString copies a null-terminated char* to a Go string. +func GoString(c uintptr) string { + // We take the address and then dereference it to trick go vet from creating a possible misuse of unsafe.Pointer + ptr := *(*unsafe.Pointer)(unsafe.Pointer(&c)) + if ptr == nil { + return "" + } + var length int + for { + if *(*byte)(unsafe.Add(ptr, uintptr(length))) == '\x00' { + break + } + length++ + } + return string(unsafe.Slice((*byte)(ptr), length)) +} diff --git a/vendor/github.com/ebitengine/purego/is_ios.go b/vendor/github.com/ebitengine/purego/is_ios.go new file mode 100644 index 00000000..ed31da97 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/is_ios.go @@ -0,0 +1,13 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +//go:build !cgo + +package purego + +// if you are getting this error it means that you have +// CGO_ENABLED=0 while trying to build for ios. +// purego does not support this mode yet. +// the fix is to set CGO_ENABLED=1 which will require +// a C compiler. +var _ = _PUREGO_REQUIRES_CGO_ON_IOS diff --git a/vendor/github.com/ebitengine/purego/nocgo.go b/vendor/github.com/ebitengine/purego/nocgo.go new file mode 100644 index 00000000..b91b9796 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/nocgo.go @@ -0,0 +1,25 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +//go:build !cgo && (darwin || freebsd || linux || netbsd) + +package purego + +// if CGO_ENABLED=0 import fakecgo to setup the Cgo runtime correctly. +// This is required since some frameworks need TLS setup the C way which Go doesn't do. +// We currently don't support ios in fakecgo mode so force Cgo or fail +// +// The way that the Cgo runtime (runtime/cgo) works is by setting some variables found +// in runtime with non-null GCC compiled functions. The variables that are replaced are +// var ( +// iscgo bool // in runtime/cgo.go +// _cgo_init unsafe.Pointer // in runtime/cgo.go +// _cgo_thread_start unsafe.Pointer // in runtime/cgo.go +// _cgo_notify_runtime_init_done unsafe.Pointer // in runtime/cgo.go +// _cgo_setenv unsafe.Pointer // in runtime/env_posix.go +// _cgo_unsetenv unsafe.Pointer // in runtime/env_posix.go +// ) +// importing fakecgo will set these (using //go:linkname) with functions written +// entirely in Go (except for some assembly trampolines to change GCC ABI to Go ABI). +// Doing so makes it possible to build applications that call into C without CGO_ENABLED=1. +import _ "github.com/ebitengine/purego/internal/fakecgo" diff --git a/vendor/github.com/ebitengine/purego/struct_amd64.go b/vendor/github.com/ebitengine/purego/struct_amd64.go new file mode 100644 index 00000000..c4c2ad8f --- /dev/null +++ b/vendor/github.com/ebitengine/purego/struct_amd64.go @@ -0,0 +1,264 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2024 The Ebitengine Authors + +package purego + +import ( + "math" + "reflect" + "unsafe" +) + +func getStruct(outType reflect.Type, syscall syscall15Args) (v reflect.Value) { + outSize := outType.Size() + switch { + case outSize == 0: + return reflect.New(outType).Elem() + case outSize <= 8: + if isAllFloats(outType) { + // 2 float32s or 1 float64s are return in the float register + return reflect.NewAt(outType, unsafe.Pointer(&struct{ a uintptr }{syscall.f1})).Elem() + } + // up to 8 bytes is returned in RAX + return reflect.NewAt(outType, unsafe.Pointer(&struct{ a uintptr }{syscall.a1})).Elem() + case outSize <= 16: + r1, r2 := syscall.a1, syscall.a2 + if isAllFloats(outType) { + r1 = syscall.f1 + r2 = syscall.f2 + } else { + // check first 8 bytes if it's floats + hasFirstFloat := false + f1 := outType.Field(0).Type + if f1.Kind() == reflect.Float64 || f1.Kind() == reflect.Float32 && outType.Field(1).Type.Kind() == reflect.Float32 { + r1 = syscall.f1 + hasFirstFloat = true + } + + // find index of the field that starts the second 8 bytes + var i int + for i = 0; i < outType.NumField(); i++ { + if outType.Field(i).Offset == 8 { + break + } + } + + // check last 8 bytes if they are floats + f1 = outType.Field(i).Type + if f1.Kind() == reflect.Float64 || f1.Kind() == reflect.Float32 && i+1 == outType.NumField() { + r2 = syscall.f1 + } else if hasFirstFloat { + // if the first field was a float then that means the second integer field + // comes from the first integer register + r2 = syscall.a1 + } + } + return reflect.NewAt(outType, unsafe.Pointer(&struct{ a, b uintptr }{r1, r2})).Elem() + default: + // create struct from the Go pointer created above + // weird pointer dereference to circumvent go vet + return reflect.NewAt(outType, *(*unsafe.Pointer)(unsafe.Pointer(&syscall.a1))).Elem() + } +} + +func isAllFloats(ty reflect.Type) bool { + for i := 0; i < ty.NumField(); i++ { + f := ty.Field(i) + switch f.Type.Kind() { + case reflect.Float64, reflect.Float32: + default: + return false + } + } + return true +} + +// https://refspecs.linuxbase.org/elf/x86_64-abi-0.99.pdf +// https://gitlab.com/x86-psABIs/x86-64-ABI +// Class determines where the 8 byte value goes. +// Higher value classes win over lower value classes +const ( + _NO_CLASS = 0b0000 + _SSE = 0b0001 + _X87 = 0b0011 // long double not used in Go + _INTEGER = 0b0111 + _MEMORY = 0b1111 +) + +func addStruct(v reflect.Value, numInts, numFloats, numStack *int, addInt, addFloat, addStack func(uintptr), keepAlive []any) []any { + if v.Type().Size() == 0 { + return keepAlive + } + + // if greater than 64 bytes place on stack + if v.Type().Size() > 8*8 { + placeStack(v, addStack) + return keepAlive + } + var ( + savedNumFloats = *numFloats + savedNumInts = *numInts + savedNumStack = *numStack + ) + placeOnStack := postMerger(v.Type()) || !tryPlaceRegister(v, addFloat, addInt) + if placeOnStack { + // reset any values placed in registers + *numFloats = savedNumFloats + *numInts = savedNumInts + *numStack = savedNumStack + placeStack(v, addStack) + } + return keepAlive +} + +func postMerger(t reflect.Type) (passInMemory bool) { + // (c) If the size of the aggregate exceeds two eightbytes and the first eight- byte isn’t SSE or any other + // eightbyte isn’t SSEUP, the whole argument is passed in memory. + if t.Kind() != reflect.Struct { + return false + } + if t.Size() <= 2*8 { + return false + } + return true // Go does not have an SSE/SSEUP type so this is always true +} + +func tryPlaceRegister(v reflect.Value, addFloat func(uintptr), addInt func(uintptr)) (ok bool) { + ok = true + var val uint64 + var shift byte // # of bits to shift + var flushed bool + class := _NO_CLASS + flushIfNeeded := func() { + if flushed { + return + } + flushed = true + if class == _SSE { + addFloat(uintptr(val)) + } else { + addInt(uintptr(val)) + } + val = 0 + shift = 0 + class = _NO_CLASS + } + var place func(v reflect.Value) + place = func(v reflect.Value) { + var numFields int + if v.Kind() == reflect.Struct { + numFields = v.Type().NumField() + } else { + numFields = v.Type().Len() + } + + for i := 0; i < numFields; i++ { + flushed = false + var f reflect.Value + if v.Kind() == reflect.Struct { + f = v.Field(i) + } else { + f = v.Index(i) + } + switch f.Kind() { + case reflect.Struct: + place(f) + case reflect.Bool: + if f.Bool() { + val |= 1 << shift + } + shift += 8 + class |= _INTEGER + case reflect.Pointer: + ok = false + return + case reflect.Int8: + val |= uint64(f.Int()&0xFF) << shift + shift += 8 + class |= _INTEGER + case reflect.Int16: + val |= uint64(f.Int()&0xFFFF) << shift + shift += 16 + class |= _INTEGER + case reflect.Int32: + val |= uint64(f.Int()&0xFFFF_FFFF) << shift + shift += 32 + class |= _INTEGER + case reflect.Int64, reflect.Int: + val = uint64(f.Int()) + shift = 64 + class = _INTEGER + case reflect.Uint8: + val |= f.Uint() << shift + shift += 8 + class |= _INTEGER + case reflect.Uint16: + val |= f.Uint() << shift + shift += 16 + class |= _INTEGER + case reflect.Uint32: + val |= f.Uint() << shift + shift += 32 + class |= _INTEGER + case reflect.Uint64, reflect.Uint, reflect.Uintptr: + val = f.Uint() + shift = 64 + class = _INTEGER + case reflect.Float32: + val |= uint64(math.Float32bits(float32(f.Float()))) << shift + shift += 32 + class |= _SSE + case reflect.Float64: + if v.Type().Size() > 16 { + ok = false + return + } + val = uint64(math.Float64bits(f.Float())) + shift = 64 + class = _SSE + case reflect.Array: + place(f) + default: + panic("purego: unsupported kind " + f.Kind().String()) + } + + if shift == 64 { + flushIfNeeded() + } else if shift > 64 { + // Should never happen, but may if we forget to reset shift after flush (or forget to flush), + // better fall apart here, than corrupt arguments. + panic("purego: tryPlaceRegisters shift > 64") + } + } + } + + place(v) + flushIfNeeded() + return ok +} + +func placeStack(v reflect.Value, addStack func(uintptr)) { + for i := 0; i < v.Type().NumField(); i++ { + f := v.Field(i) + switch f.Kind() { + case reflect.Pointer: + addStack(f.Pointer()) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + addStack(uintptr(f.Int())) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + addStack(uintptr(f.Uint())) + case reflect.Float32: + addStack(uintptr(math.Float32bits(float32(f.Float())))) + case reflect.Float64: + addStack(uintptr(math.Float64bits(f.Float()))) + case reflect.Struct: + placeStack(f, addStack) + default: + panic("purego: unsupported kind " + f.Kind().String()) + } + } +} + +func placeRegisters(v reflect.Value, addFloat func(uintptr), addInt func(uintptr)) { + panic("purego: not needed on amd64") +} diff --git a/vendor/github.com/ebitengine/purego/struct_arm64.go b/vendor/github.com/ebitengine/purego/struct_arm64.go new file mode 100644 index 00000000..8605e77b --- /dev/null +++ b/vendor/github.com/ebitengine/purego/struct_arm64.go @@ -0,0 +1,286 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2024 The Ebitengine Authors + +package purego + +import ( + "math" + "reflect" + "unsafe" +) + +func getStruct(outType reflect.Type, syscall syscall15Args) (v reflect.Value) { + outSize := outType.Size() + switch { + case outSize == 0: + return reflect.New(outType).Elem() + case outSize <= 8: + r1 := syscall.a1 + if isAllFloats, numFields := isAllSameFloat(outType); isAllFloats { + r1 = syscall.f1 + if numFields == 2 { + r1 = syscall.f2<<32 | syscall.f1 + } + } + return reflect.NewAt(outType, unsafe.Pointer(&struct{ a uintptr }{r1})).Elem() + case outSize <= 16: + r1, r2 := syscall.a1, syscall.a2 + if isAllFloats, numFields := isAllSameFloat(outType); isAllFloats { + switch numFields { + case 4: + r1 = syscall.f2<<32 | syscall.f1 + r2 = syscall.f4<<32 | syscall.f3 + case 3: + r1 = syscall.f2<<32 | syscall.f1 + r2 = syscall.f3 + case 2: + r1 = syscall.f1 + r2 = syscall.f2 + default: + panic("unreachable") + } + } + return reflect.NewAt(outType, unsafe.Pointer(&struct{ a, b uintptr }{r1, r2})).Elem() + default: + if isAllFloats, numFields := isAllSameFloat(outType); isAllFloats && numFields <= 4 { + switch numFields { + case 4: + return reflect.NewAt(outType, unsafe.Pointer(&struct{ a, b, c, d uintptr }{syscall.f1, syscall.f2, syscall.f3, syscall.f4})).Elem() + case 3: + return reflect.NewAt(outType, unsafe.Pointer(&struct{ a, b, c uintptr }{syscall.f1, syscall.f2, syscall.f3})).Elem() + default: + panic("unreachable") + } + } + // create struct from the Go pointer created in arm64_r8 + // weird pointer dereference to circumvent go vet + return reflect.NewAt(outType, *(*unsafe.Pointer)(unsafe.Pointer(&syscall.arm64_r8))).Elem() + } +} + +// https://github.com/ARM-software/abi-aa/blob/main/sysvabi64/sysvabi64.rst +const ( + _NO_CLASS = 0b00 + _FLOAT = 0b01 + _INT = 0b11 +) + +func addStruct(v reflect.Value, numInts, numFloats, numStack *int, addInt, addFloat, addStack func(uintptr), keepAlive []any) []any { + if v.Type().Size() == 0 { + return keepAlive + } + + if hva, hfa, size := isHVA(v.Type()), isHFA(v.Type()), v.Type().Size(); hva || hfa || size <= 16 { + // if this doesn't fit entirely in registers then + // each element goes onto the stack + if hfa && *numFloats+v.NumField() > numOfFloatRegisters { + *numFloats = numOfFloatRegisters + } else if hva && *numInts+v.NumField() > numOfIntegerRegisters() { + *numInts = numOfIntegerRegisters() + } + + placeRegisters(v, addFloat, addInt) + } else { + keepAlive = placeStack(v, keepAlive, addInt) + } + return keepAlive // the struct was allocated so don't panic +} + +func placeRegisters(v reflect.Value, addFloat func(uintptr), addInt func(uintptr)) { + var val uint64 + var shift byte + var flushed bool + class := _NO_CLASS + var place func(v reflect.Value) + place = func(v reflect.Value) { + var numFields int + if v.Kind() == reflect.Struct { + numFields = v.Type().NumField() + } else { + numFields = v.Type().Len() + } + for k := 0; k < numFields; k++ { + flushed = false + var f reflect.Value + if v.Kind() == reflect.Struct { + f = v.Field(k) + } else { + f = v.Index(k) + } + align := byte(f.Type().Align()*8 - 1) + shift = (shift + align) &^ align + if shift >= 64 { + shift = 0 + flushed = true + if class == _FLOAT { + addFloat(uintptr(val)) + } else { + addInt(uintptr(val)) + } + val = 0 + class = _NO_CLASS + } + switch f.Type().Kind() { + case reflect.Struct: + place(f) + case reflect.Bool: + if f.Bool() { + val |= 1 << shift + } + shift += 8 + class |= _INT + case reflect.Uint8: + val |= f.Uint() << shift + shift += 8 + class |= _INT + case reflect.Uint16: + val |= f.Uint() << shift + shift += 16 + class |= _INT + case reflect.Uint32: + val |= f.Uint() << shift + shift += 32 + class |= _INT + case reflect.Uint64, reflect.Uint, reflect.Uintptr: + addInt(uintptr(f.Uint())) + shift = 0 + flushed = true + class = _NO_CLASS + case reflect.Int8: + val |= uint64(f.Int()&0xFF) << shift + shift += 8 + class |= _INT + case reflect.Int16: + val |= uint64(f.Int()&0xFFFF) << shift + shift += 16 + class |= _INT + case reflect.Int32: + val |= uint64(f.Int()&0xFFFF_FFFF) << shift + shift += 32 + class |= _INT + case reflect.Int64, reflect.Int: + addInt(uintptr(f.Int())) + shift = 0 + flushed = true + class = _NO_CLASS + case reflect.Float32: + if class == _FLOAT { + addFloat(uintptr(val)) + val = 0 + shift = 0 + } + val |= uint64(math.Float32bits(float32(f.Float()))) << shift + shift += 32 + class |= _FLOAT + case reflect.Float64: + addFloat(uintptr(math.Float64bits(float64(f.Float())))) + shift = 0 + flushed = true + class = _NO_CLASS + case reflect.Ptr: + addInt(f.Pointer()) + shift = 0 + flushed = true + class = _NO_CLASS + case reflect.Array: + place(f) + default: + panic("purego: unsupported kind " + f.Kind().String()) + } + } + } + place(v) + if !flushed { + if class == _FLOAT { + addFloat(uintptr(val)) + } else { + addInt(uintptr(val)) + } + } +} + +func placeStack(v reflect.Value, keepAlive []any, addInt func(uintptr)) []any { + // Struct is too big to be placed in registers. + // Copy to heap and place the pointer in register + ptrStruct := reflect.New(v.Type()) + ptrStruct.Elem().Set(v) + ptr := ptrStruct.Elem().Addr().UnsafePointer() + keepAlive = append(keepAlive, ptr) + addInt(uintptr(ptr)) + return keepAlive +} + +// isHFA reports a Homogeneous Floating-point Aggregate (HFA) which is a Fundamental Data Type that is a +// Floating-Point type and at most four uniquely addressable members (5.9.5.1 in [Arm64 Calling Convention]). +// This type of struct will be placed more compactly than the individual fields. +// +// [Arm64 Calling Convention]: https://github.com/ARM-software/abi-aa/blob/main/sysvabi64/sysvabi64.rst +func isHFA(t reflect.Type) bool { + // round up struct size to nearest 8 see section B.4 + structSize := roundUpTo8(t.Size()) + if structSize == 0 || t.NumField() > 4 { + return false + } + first := t.Field(0) + switch first.Type.Kind() { + case reflect.Float32, reflect.Float64: + firstKind := first.Type.Kind() + for i := 0; i < t.NumField(); i++ { + if t.Field(i).Type.Kind() != firstKind { + return false + } + } + return true + case reflect.Array: + switch first.Type.Elem().Kind() { + case reflect.Float32, reflect.Float64: + return true + default: + return false + } + case reflect.Struct: + for i := 0; i < first.Type.NumField(); i++ { + if !isHFA(first.Type) { + return false + } + } + return true + default: + return false + } +} + +// isHVA reports a Homogeneous Aggregate with a Fundamental Data Type that is a Short-Vector type +// and at most four uniquely addressable members (5.9.5.2 in [Arm64 Calling Convention]). +// A short vector is a machine type that is composed of repeated instances of one fundamental integral or +// floating-point type. It may be 8 or 16 bytes in total size (5.4 in [Arm64 Calling Convention]). +// This type of struct will be placed more compactly than the individual fields. +// +// [Arm64 Calling Convention]: https://github.com/ARM-software/abi-aa/blob/main/sysvabi64/sysvabi64.rst +func isHVA(t reflect.Type) bool { + // round up struct size to nearest 8 see section B.4 + structSize := roundUpTo8(t.Size()) + if structSize == 0 || (structSize != 8 && structSize != 16) { + return false + } + first := t.Field(0) + switch first.Type.Kind() { + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Int8, reflect.Int16, reflect.Int32: + firstKind := first.Type.Kind() + for i := 0; i < t.NumField(); i++ { + if t.Field(i).Type.Kind() != firstKind { + return false + } + } + return true + case reflect.Array: + switch first.Type.Elem().Kind() { + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Int8, reflect.Int16, reflect.Int32: + return true + default: + return false + } + default: + return false + } +} diff --git a/vendor/github.com/ebitengine/purego/struct_loong64.go b/vendor/github.com/ebitengine/purego/struct_loong64.go new file mode 100644 index 00000000..da7f1a15 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/struct_loong64.go @@ -0,0 +1,190 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2025 The Ebitengine Authors + +package purego + +import ( + "math" + "reflect" + "unsafe" +) + +func getStruct(outType reflect.Type, syscall syscall15Args) (v reflect.Value) { + outSize := outType.Size() + switch { + case outSize == 0: + return reflect.New(outType).Elem() + case outSize <= 8: + r1 := syscall.a1 + if isAllFloats, numFields := isAllSameFloat(outType); isAllFloats { + r1 = syscall.f1 + if numFields == 2 { + r1 = syscall.f2<<32 | syscall.f1 + } + } + return reflect.NewAt(outType, unsafe.Pointer(&struct{ a uintptr }{r1})).Elem() + case outSize <= 16: + r1, r2 := syscall.a1, syscall.a2 + if isAllFloats, numFields := isAllSameFloat(outType); isAllFloats { + switch numFields { + case 4: + r1 = syscall.f2<<32 | syscall.f1 + r2 = syscall.f4<<32 | syscall.f3 + case 3: + r1 = syscall.f2<<32 | syscall.f1 + r2 = syscall.f3 + case 2: + r1 = syscall.f1 + r2 = syscall.f2 + default: + panic("unreachable") + } + } + return reflect.NewAt(outType, unsafe.Pointer(&struct{ a, b uintptr }{r1, r2})).Elem() + default: + // create struct from the Go pointer created above + // weird pointer dereference to circumvent go vet + return reflect.NewAt(outType, *(*unsafe.Pointer)(unsafe.Pointer(&syscall.a1))).Elem() + } +} + +const ( + _NO_CLASS = 0b00 + _FLOAT = 0b01 + _INT = 0b11 +) + +func addStruct(v reflect.Value, numInts, numFloats, numStack *int, addInt, addFloat, addStack func(uintptr), keepAlive []any) []any { + if v.Type().Size() == 0 { + return keepAlive + } + + if size := v.Type().Size(); size <= 16 { + placeRegisters(v, addFloat, addInt) + } else { + keepAlive = placeStack(v, keepAlive, addInt) + } + return keepAlive // the struct was allocated so don't panic +} + +func placeRegisters(v reflect.Value, addFloat func(uintptr), addInt func(uintptr)) { + var val uint64 + var shift byte + var flushed bool + class := _NO_CLASS + var place func(v reflect.Value) + place = func(v reflect.Value) { + var numFields int + if v.Kind() == reflect.Struct { + numFields = v.Type().NumField() + } else { + numFields = v.Type().Len() + } + for k := 0; k < numFields; k++ { + flushed = false + var f reflect.Value + if v.Kind() == reflect.Struct { + f = v.Field(k) + } else { + f = v.Index(k) + } + align := byte(f.Type().Align()*8 - 1) + shift = (shift + align) &^ align + if shift >= 64 { + shift = 0 + flushed = true + if class == _FLOAT { + addFloat(uintptr(val)) + } else { + addInt(uintptr(val)) + } + } + switch f.Type().Kind() { + case reflect.Struct: + place(f) + case reflect.Bool: + if f.Bool() { + val |= 1 << shift + } + shift += 8 + class |= _INT + case reflect.Uint8: + val |= f.Uint() << shift + shift += 8 + class |= _INT + case reflect.Uint16: + val |= f.Uint() << shift + shift += 16 + class |= _INT + case reflect.Uint32: + val |= f.Uint() << shift + shift += 32 + class |= _INT + case reflect.Uint64, reflect.Uint, reflect.Uintptr: + addInt(uintptr(f.Uint())) + shift = 0 + flushed = true + class = _NO_CLASS + case reflect.Int8: + val |= uint64(f.Int()&0xFF) << shift + shift += 8 + class |= _INT + case reflect.Int16: + val |= uint64(f.Int()&0xFFFF) << shift + shift += 16 + class |= _INT + case reflect.Int32: + val |= uint64(f.Int()&0xFFFF_FFFF) << shift + shift += 32 + class |= _INT + case reflect.Int64, reflect.Int: + addInt(uintptr(f.Int())) + shift = 0 + flushed = true + class = _NO_CLASS + case reflect.Float32: + if class == _FLOAT { + addFloat(uintptr(val)) + val = 0 + shift = 0 + } + val |= uint64(math.Float32bits(float32(f.Float()))) << shift + shift += 32 + class |= _FLOAT + case reflect.Float64: + addFloat(uintptr(math.Float64bits(float64(f.Float())))) + shift = 0 + flushed = true + class = _NO_CLASS + case reflect.Ptr: + addInt(f.Pointer()) + shift = 0 + flushed = true + class = _NO_CLASS + case reflect.Array: + place(f) + default: + panic("purego: unsupported kind " + f.Kind().String()) + } + } + } + place(v) + if !flushed { + if class == _FLOAT { + addFloat(uintptr(val)) + } else { + addInt(uintptr(val)) + } + } +} + +func placeStack(v reflect.Value, keepAlive []any, addInt func(uintptr)) []any { + // Struct is too big to be placed in registers. + // Copy to heap and place the pointer in register + ptrStruct := reflect.New(v.Type()) + ptrStruct.Elem().Set(v) + ptr := ptrStruct.Elem().Addr().UnsafePointer() + keepAlive = append(keepAlive, ptr) + addInt(uintptr(ptr)) + return keepAlive +} diff --git a/vendor/github.com/ebitengine/purego/struct_other.go b/vendor/github.com/ebitengine/purego/struct_other.go new file mode 100644 index 00000000..58ccc973 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/struct_other.go @@ -0,0 +1,20 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2024 The Ebitengine Authors + +//go:build !amd64 && !arm64 && !loong64 + +package purego + +import "reflect" + +func addStruct(v reflect.Value, numInts, numFloats, numStack *int, addInt, addFloat, addStack func(uintptr), keepAlive []any) []any { + panic("purego: struct arguments are not supported") +} + +func getStruct(outType reflect.Type, syscall syscall15Args) (v reflect.Value) { + panic("purego: struct returns are not supported") +} + +func placeRegisters(v reflect.Value, addFloat func(uintptr), addInt func(uintptr)) { + panic("purego: not needed on other platforms") +} diff --git a/vendor/github.com/ebitengine/purego/sys_amd64.s b/vendor/github.com/ebitengine/purego/sys_amd64.s new file mode 100644 index 00000000..a364dd0c --- /dev/null +++ b/vendor/github.com/ebitengine/purego/sys_amd64.s @@ -0,0 +1,164 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +//go:build darwin || freebsd || linux || netbsd + +#include "textflag.h" +#include "abi_amd64.h" +#include "go_asm.h" +#include "funcdata.h" + +#define STACK_SIZE 80 +#define PTR_ADDRESS (STACK_SIZE - 8) + +// syscall15X calls a function in libc on behalf of the syscall package. +// syscall15X takes a pointer to a struct like: +// struct { +// fn uintptr +// a1 uintptr +// a2 uintptr +// a3 uintptr +// a4 uintptr +// a5 uintptr +// a6 uintptr +// a7 uintptr +// a8 uintptr +// a9 uintptr +// a10 uintptr +// a11 uintptr +// a12 uintptr +// a13 uintptr +// a14 uintptr +// a15 uintptr +// r1 uintptr +// r2 uintptr +// err uintptr +// } +// syscall15X must be called on the g0 stack with the +// C calling convention (use libcCall). +GLOBL ·syscall15XABI0(SB), NOPTR|RODATA, $8 +DATA ·syscall15XABI0(SB)/8, $syscall15X(SB) +TEXT syscall15X(SB), NOSPLIT|NOFRAME, $0 + PUSHQ BP + MOVQ SP, BP + SUBQ $STACK_SIZE, SP + MOVQ DI, PTR_ADDRESS(BP) // save the pointer + MOVQ DI, R11 + + MOVQ syscall15Args_f1(R11), X0 // f1 + MOVQ syscall15Args_f2(R11), X1 // f2 + MOVQ syscall15Args_f3(R11), X2 // f3 + MOVQ syscall15Args_f4(R11), X3 // f4 + MOVQ syscall15Args_f5(R11), X4 // f5 + MOVQ syscall15Args_f6(R11), X5 // f6 + MOVQ syscall15Args_f7(R11), X6 // f7 + MOVQ syscall15Args_f8(R11), X7 // f8 + + MOVQ syscall15Args_a1(R11), DI // a1 + MOVQ syscall15Args_a2(R11), SI // a2 + MOVQ syscall15Args_a3(R11), DX // a3 + MOVQ syscall15Args_a4(R11), CX // a4 + MOVQ syscall15Args_a5(R11), R8 // a5 + MOVQ syscall15Args_a6(R11), R9 // a6 + + // push the remaining paramters onto the stack + MOVQ syscall15Args_a7(R11), R12 + MOVQ R12, 0(SP) // push a7 + MOVQ syscall15Args_a8(R11), R12 + MOVQ R12, 8(SP) // push a8 + MOVQ syscall15Args_a9(R11), R12 + MOVQ R12, 16(SP) // push a9 + MOVQ syscall15Args_a10(R11), R12 + MOVQ R12, 24(SP) // push a10 + MOVQ syscall15Args_a11(R11), R12 + MOVQ R12, 32(SP) // push a11 + MOVQ syscall15Args_a12(R11), R12 + MOVQ R12, 40(SP) // push a12 + MOVQ syscall15Args_a13(R11), R12 + MOVQ R12, 48(SP) // push a13 + MOVQ syscall15Args_a14(R11), R12 + MOVQ R12, 56(SP) // push a14 + MOVQ syscall15Args_a15(R11), R12 + MOVQ R12, 64(SP) // push a15 + XORL AX, AX // vararg: say "no float args" + + MOVQ syscall15Args_fn(R11), R10 // fn + CALL R10 + + MOVQ PTR_ADDRESS(BP), DI // get the pointer back + MOVQ AX, syscall15Args_a1(DI) // r1 + MOVQ DX, syscall15Args_a2(DI) // r3 + MOVQ X0, syscall15Args_f1(DI) // f1 + MOVQ X1, syscall15Args_f2(DI) // f2 + + XORL AX, AX // no error (it's ignored anyway) + ADDQ $STACK_SIZE, SP + MOVQ BP, SP + POPQ BP + RET + +TEXT callbackasm1(SB), NOSPLIT|NOFRAME, $0 + MOVQ 0(SP), AX // save the return address to calculate the cb index + MOVQ 8(SP), R10 // get the return SP so that we can align register args with stack args + ADDQ $8, SP // remove return address from stack, we are not returning to callbackasm, but to its caller. + + // make space for first six int and 8 float arguments below the frame + ADJSP $14*8, SP + MOVSD X0, (1*8)(SP) + MOVSD X1, (2*8)(SP) + MOVSD X2, (3*8)(SP) + MOVSD X3, (4*8)(SP) + MOVSD X4, (5*8)(SP) + MOVSD X5, (6*8)(SP) + MOVSD X6, (7*8)(SP) + MOVSD X7, (8*8)(SP) + MOVQ DI, (9*8)(SP) + MOVQ SI, (10*8)(SP) + MOVQ DX, (11*8)(SP) + MOVQ CX, (12*8)(SP) + MOVQ R8, (13*8)(SP) + MOVQ R9, (14*8)(SP) + LEAQ 8(SP), R8 // R8 = address of args vector + + PUSHQ R10 // push the stack pointer below registers + + // Switch from the host ABI to the Go ABI. + PUSH_REGS_HOST_TO_ABI0() + + // determine index into runtime·cbs table + MOVQ $callbackasm(SB), DX + SUBQ DX, AX + MOVQ $0, DX + MOVQ $5, CX // divide by 5 because each call instruction in ·callbacks is 5 bytes long + DIVL CX + SUBQ $1, AX // subtract 1 because return PC is to the next slot + + // Create a struct callbackArgs on our stack to be passed as + // the "frame" to cgocallback and on to callbackWrap. + // $24 to make enough room for the arguments to runtime.cgocallback + SUBQ $(24+callbackArgs__size), SP + MOVQ AX, (24+callbackArgs_index)(SP) // callback index + MOVQ R8, (24+callbackArgs_args)(SP) // address of args vector + MOVQ $0, (24+callbackArgs_result)(SP) // result + LEAQ 24(SP), AX // take the address of callbackArgs + + // Call cgocallback, which will call callbackWrap(frame). + MOVQ ·callbackWrap_call(SB), DI // Get the ABIInternal function pointer + MOVQ (DI), DI // without by using a closure. + MOVQ AX, SI // frame (address of callbackArgs) + MOVQ $0, CX // context + + CALL crosscall2(SB) // runtime.cgocallback(fn, frame, ctxt uintptr) + + // Get callback result. + MOVQ (24+callbackArgs_result)(SP), AX + ADDQ $(24+callbackArgs__size), SP // remove callbackArgs struct + + POP_REGS_HOST_TO_ABI0() + + POPQ R10 // get the SP back + ADJSP $-14*8, SP // remove arguments + + MOVQ R10, 0(SP) + + RET diff --git a/vendor/github.com/ebitengine/purego/sys_arm64.s b/vendor/github.com/ebitengine/purego/sys_arm64.s new file mode 100644 index 00000000..a4f5be72 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/sys_arm64.s @@ -0,0 +1,92 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +//go:build darwin || freebsd || linux || netbsd || windows + +#include "textflag.h" +#include "go_asm.h" +#include "funcdata.h" + +#define STACK_SIZE 64 +#define PTR_ADDRESS (STACK_SIZE - 8) + +// syscall15X calls a function in libc on behalf of the syscall package. +// syscall15X takes a pointer to a struct like: +// struct { +// fn uintptr +// a1 uintptr +// a2 uintptr +// a3 uintptr +// a4 uintptr +// a5 uintptr +// a6 uintptr +// a7 uintptr +// a8 uintptr +// a9 uintptr +// a10 uintptr +// a11 uintptr +// a12 uintptr +// a13 uintptr +// a14 uintptr +// a15 uintptr +// r1 uintptr +// r2 uintptr +// err uintptr +// } +// syscall15X must be called on the g0 stack with the +// C calling convention (use libcCall). +GLOBL ·syscall15XABI0(SB), NOPTR|RODATA, $8 +DATA ·syscall15XABI0(SB)/8, $syscall15X(SB) +TEXT syscall15X(SB), NOSPLIT, $0 + SUB $STACK_SIZE, RSP // push structure pointer + MOVD R0, PTR_ADDRESS(RSP) + MOVD R0, R9 + + FMOVD syscall15Args_f1(R9), F0 // f1 + FMOVD syscall15Args_f2(R9), F1 // f2 + FMOVD syscall15Args_f3(R9), F2 // f3 + FMOVD syscall15Args_f4(R9), F3 // f4 + FMOVD syscall15Args_f5(R9), F4 // f5 + FMOVD syscall15Args_f6(R9), F5 // f6 + FMOVD syscall15Args_f7(R9), F6 // f7 + FMOVD syscall15Args_f8(R9), F7 // f8 + + MOVD syscall15Args_a1(R9), R0 // a1 + MOVD syscall15Args_a2(R9), R1 // a2 + MOVD syscall15Args_a3(R9), R2 // a3 + MOVD syscall15Args_a4(R9), R3 // a4 + MOVD syscall15Args_a5(R9), R4 // a5 + MOVD syscall15Args_a6(R9), R5 // a6 + MOVD syscall15Args_a7(R9), R6 // a7 + MOVD syscall15Args_a8(R9), R7 // a8 + MOVD syscall15Args_arm64_r8(R9), R8 // r8 + + MOVD syscall15Args_a9(R9), R10 + MOVD R10, 0(RSP) // push a9 onto stack + MOVD syscall15Args_a10(R9), R10 + MOVD R10, 8(RSP) // push a10 onto stack + MOVD syscall15Args_a11(R9), R10 + MOVD R10, 16(RSP) // push a11 onto stack + MOVD syscall15Args_a12(R9), R10 + MOVD R10, 24(RSP) // push a12 onto stack + MOVD syscall15Args_a13(R9), R10 + MOVD R10, 32(RSP) // push a13 onto stack + MOVD syscall15Args_a14(R9), R10 + MOVD R10, 40(RSP) // push a14 onto stack + MOVD syscall15Args_a15(R9), R10 + MOVD R10, 48(RSP) // push a15 onto stack + + MOVD syscall15Args_fn(R9), R10 // fn + BL (R10) + + MOVD PTR_ADDRESS(RSP), R2 // pop structure pointer + ADD $STACK_SIZE, RSP + + MOVD R0, syscall15Args_a1(R2) // save r1 + MOVD R1, syscall15Args_a2(R2) // save r3 + FMOVD F0, syscall15Args_f1(R2) // save f0 + FMOVD F1, syscall15Args_f2(R2) // save f1 + FMOVD F2, syscall15Args_f3(R2) // save f2 + FMOVD F3, syscall15Args_f4(R2) // save f3 + + RET diff --git a/vendor/github.com/ebitengine/purego/sys_loong64.s b/vendor/github.com/ebitengine/purego/sys_loong64.s new file mode 100644 index 00000000..0f34eaee --- /dev/null +++ b/vendor/github.com/ebitengine/purego/sys_loong64.s @@ -0,0 +1,96 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2025 The Ebitengine Authors + +//go:build linux + +#include "textflag.h" +#include "go_asm.h" +#include "funcdata.h" + +#define STACK_SIZE 64 +#define PTR_ADDRESS (STACK_SIZE - 8) + +// syscall15X calls a function in libc on behalf of the syscall package. +// syscall15X takes a pointer to a struct like: +// struct { +// fn uintptr +// a1 uintptr +// a2 uintptr +// a3 uintptr +// a4 uintptr +// a5 uintptr +// a6 uintptr +// a7 uintptr +// a8 uintptr +// a9 uintptr +// a10 uintptr +// a11 uintptr +// a12 uintptr +// a13 uintptr +// a14 uintptr +// a15 uintptr +// r1 uintptr +// r2 uintptr +// err uintptr +// } +// syscall15X must be called on the g0 stack with the +// C calling convention (use libcCall). +GLOBL ·syscall15XABI0(SB), NOPTR|RODATA, $8 +DATA ·syscall15XABI0(SB)/8, $syscall15X(SB) +TEXT syscall15X(SB), NOSPLIT, $0 + // push structure pointer + SUBV $STACK_SIZE, R3 + MOVV R4, PTR_ADDRESS(R3) + MOVV R4, R13 + + MOVD syscall15Args_f1(R13), F0 // f1 + MOVD syscall15Args_f2(R13), F1 // f2 + MOVD syscall15Args_f3(R13), F2 // f3 + MOVD syscall15Args_f4(R13), F3 // f4 + MOVD syscall15Args_f5(R13), F4 // f5 + MOVD syscall15Args_f6(R13), F5 // f6 + MOVD syscall15Args_f7(R13), F6 // f7 + MOVD syscall15Args_f8(R13), F7 // f8 + + MOVV syscall15Args_a1(R13), R4 // a1 + MOVV syscall15Args_a2(R13), R5 // a2 + MOVV syscall15Args_a3(R13), R6 // a3 + MOVV syscall15Args_a4(R13), R7 // a4 + MOVV syscall15Args_a5(R13), R8 // a5 + MOVV syscall15Args_a6(R13), R9 // a6 + MOVV syscall15Args_a7(R13), R10 // a7 + MOVV syscall15Args_a8(R13), R11 // a8 + + // push a9-a15 onto stack + MOVV syscall15Args_a9(R13), R12 + MOVV R12, 0(R3) + MOVV syscall15Args_a10(R13), R12 + MOVV R12, 8(R3) + MOVV syscall15Args_a11(R13), R12 + MOVV R12, 16(R3) + MOVV syscall15Args_a12(R13), R12 + MOVV R12, 24(R3) + MOVV syscall15Args_a13(R13), R12 + MOVV R12, 32(R3) + MOVV syscall15Args_a14(R13), R12 + MOVV R12, 40(R3) + MOVV syscall15Args_a15(R13), R12 + MOVV R12, 48(R3) + + MOVV syscall15Args_fn(R13), R12 + JAL (R12) + + // pop structure pointer + MOVV PTR_ADDRESS(R3), R13 + ADDV $STACK_SIZE, R3 + + // save R4, R5 + MOVV R4, syscall15Args_a1(R13) + MOVV R5, syscall15Args_a2(R13) + + // save f0-f3 + MOVD F0, syscall15Args_f1(R13) + MOVD F1, syscall15Args_f2(R13) + MOVD F2, syscall15Args_f3(R13) + MOVD F3, syscall15Args_f4(R13) + RET diff --git a/vendor/github.com/ebitengine/purego/sys_unix_arm64.s b/vendor/github.com/ebitengine/purego/sys_unix_arm64.s new file mode 100644 index 00000000..cea803ef --- /dev/null +++ b/vendor/github.com/ebitengine/purego/sys_unix_arm64.s @@ -0,0 +1,70 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2023 The Ebitengine Authors + +//go:build darwin || freebsd || linux || netbsd + +#include "textflag.h" +#include "go_asm.h" +#include "funcdata.h" +#include "abi_arm64.h" + +TEXT callbackasm1(SB), NOSPLIT|NOFRAME, $0 + NO_LOCAL_POINTERS + + // On entry, the trampoline in zcallback_darwin_arm64.s left + // the callback index in R12 (which is volatile in the C ABI). + + // Save callback register arguments R0-R7 and F0-F7. + // We do this at the top of the frame so they're contiguous with stack arguments. + SUB $(16*8), RSP, R14 + FSTPD (F0, F1), (0*8)(R14) + FSTPD (F2, F3), (2*8)(R14) + FSTPD (F4, F5), (4*8)(R14) + FSTPD (F6, F7), (6*8)(R14) + STP (R0, R1), (8*8)(R14) + STP (R2, R3), (10*8)(R14) + STP (R4, R5), (12*8)(R14) + STP (R6, R7), (14*8)(R14) + + // Adjust SP by frame size. + SUB $(26*8), RSP + + // It is important to save R27 because the go assembler + // uses it for move instructions for a variable. + // This line: + // MOVD ·callbackWrap_call(SB), R0 + // Creates the instructions: + // ADRP 14335(PC), R27 + // MOVD 388(27), R0 + // R27 is a callee saved register so we are responsible + // for ensuring its value doesn't change. So save it and + // restore it at the end of this function. + // R30 is the link register. crosscall2 doesn't save it + // so it's saved here. + STP (R27, R30), 0(RSP) + + // Create a struct callbackArgs on our stack. + MOVD $(callbackArgs__size)(RSP), R13 + MOVD R12, callbackArgs_index(R13) // callback index + MOVD R14, callbackArgs_args(R13) // address of args vector + MOVD ZR, callbackArgs_result(R13) // result + + // Move parameters into registers + // Get the ABIInternal function pointer + // without by using a closure. + MOVD ·callbackWrap_call(SB), R0 + MOVD (R0), R0 // fn unsafe.Pointer + MOVD R13, R1 // frame (&callbackArgs{...}) + MOVD $0, R3 // ctxt uintptr + + BL crosscall2(SB) + + // Get callback result. + MOVD $(callbackArgs__size)(RSP), R13 + MOVD callbackArgs_result(R13), R0 + + // Restore LR and R27 + LDP 0(RSP), (R27, R30) + ADD $(26*8), RSP + + RET diff --git a/vendor/github.com/ebitengine/purego/sys_unix_loong64.s b/vendor/github.com/ebitengine/purego/sys_unix_loong64.s new file mode 100644 index 00000000..89dbd7d1 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/sys_unix_loong64.s @@ -0,0 +1,75 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2025 The Ebitengine Authors + +//go:build linux + +#include "textflag.h" +#include "go_asm.h" +#include "funcdata.h" +#include "abi_loong64.h" + +TEXT callbackasm1(SB), NOSPLIT|NOFRAME, $0 + NO_LOCAL_POINTERS + + SUBV $(16*8), R3, R14 + MOVD F0, 0(R14) + MOVD F1, 8(R14) + MOVD F2, 16(R14) + MOVD F3, 24(R14) + MOVD F4, 32(R14) + MOVD F5, 40(R14) + MOVD F6, 48(R14) + MOVD F7, 56(R14) + MOVV R4, 64(R14) + MOVV R5, 72(R14) + MOVV R6, 80(R14) + MOVV R7, 88(R14) + MOVV R8, 96(R14) + MOVV R9, 104(R14) + MOVV R10, 112(R14) + MOVV R11, 120(R14) + + // Adjust SP by frame size. + SUBV $(22*8), R3 + + // It is important to save R30 because the go assembler + // uses it for move instructions for a variable. + // This line: + // MOVV ·callbackWrap_call(SB), R4 + // Creates the instructions: + // PCALAU12I off1(PC), R30 + // MOVV off2(R30), R4 + // R30 is a callee saved register so we are responsible + // for ensuring its value doesn't change. So save it and + // restore it at the end of this function. + // R1 is the link register. crosscall2 doesn't save it + // so it's saved here. + MOVV R1, 0(R3) + MOVV R30, 8(R3) + + // Create a struct callbackArgs on our stack. + MOVV $(callbackArgs__size)(R3), R13 + MOVV R12, callbackArgs_index(R13) // callback index + MOVV R14, callbackArgs_args(R13) // address of args vector + MOVV $0, callbackArgs_result(R13) // result + + // Move parameters into registers + // Get the ABIInternal function pointer + // without by using a closure. + MOVV ·callbackWrap_call(SB), R4 + MOVV (R4), R4 // fn unsafe.Pointer + MOVV R13, R5 // frame (&callbackArgs{...}) + MOVV $0, R7 // ctxt uintptr + + JAL crosscall2(SB) + + // Get callback result. + MOVV $(callbackArgs__size)(R3), R13 + MOVV callbackArgs_result(R13), R4 + + // Restore LR and R30 + MOVV 0(R3), R1 + MOVV 8(R3), R30 + ADDV $(22*8), R3 + + RET diff --git a/vendor/github.com/ebitengine/purego/syscall.go b/vendor/github.com/ebitengine/purego/syscall.go new file mode 100644 index 00000000..ccfc4982 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/syscall.go @@ -0,0 +1,56 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +//go:build darwin || freebsd || linux || netbsd || windows + +package purego + +// CDecl marks a function as being called using the __cdecl calling convention as defined in +// the [MSDocs] when passed to NewCallback. It must be the first argument to the function. +// This is only useful on 386 Windows, but it is safe to use on other platforms. +// +// [MSDocs]: https://learn.microsoft.com/en-us/cpp/cpp/cdecl?view=msvc-170 +type CDecl struct{} + +const ( + maxArgs = 15 + numOfFloatRegisters = 8 // arm64 and amd64 both have 8 float registers +) + +type syscall15Args struct { + fn, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15 uintptr + f1, f2, f3, f4, f5, f6, f7, f8 uintptr + arm64_r8 uintptr +} + +// SyscallN takes fn, a C function pointer and a list of arguments as uintptr. +// There is an internal maximum number of arguments that SyscallN can take. It panics +// when the maximum is exceeded. It returns the result and the libc error code if there is one. +// +// In order to call this function properly make sure to follow all the rules specified in [unsafe.Pointer] +// especially point 4. +// +// NOTE: SyscallN does not properly call functions that have both integer and float parameters. +// See discussion comment https://github.com/ebiten/purego/pull/1#issuecomment-1128057607 +// for an explanation of why that is. +// +// On amd64, if there are more than 8 floats the 9th and so on will be placed incorrectly on the +// stack. +// +// The pragma go:nosplit is not needed at this function declaration because it uses go:uintptrescapes +// which forces all the objects that the uintptrs point to onto the heap where a stack split won't affect +// their memory location. +// +//go:uintptrescapes +func SyscallN(fn uintptr, args ...uintptr) (r1, r2, err uintptr) { + if fn == 0 { + panic("purego: fn is nil") + } + if len(args) > maxArgs { + panic("purego: too many arguments to SyscallN") + } + // add padding so there is no out-of-bounds slicing + var tmp [maxArgs]uintptr + copy(tmp[:], args) + return syscall_syscall15X(fn, tmp[0], tmp[1], tmp[2], tmp[3], tmp[4], tmp[5], tmp[6], tmp[7], tmp[8], tmp[9], tmp[10], tmp[11], tmp[12], tmp[13], tmp[14]) +} diff --git a/vendor/github.com/ebitengine/purego/syscall_cgo_linux.go b/vendor/github.com/ebitengine/purego/syscall_cgo_linux.go new file mode 100644 index 00000000..7794c263 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/syscall_cgo_linux.go @@ -0,0 +1,21 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +//go:build cgo && !(amd64 || arm64 || loong64) + +package purego + +import ( + "github.com/ebitengine/purego/internal/cgo" +) + +var syscall15XABI0 = uintptr(cgo.Syscall15XABI0) + +//go:nosplit +func syscall_syscall15X(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15 uintptr) (r1, r2, err uintptr) { + return cgo.Syscall15X(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15) +} + +func NewCallback(_ any) uintptr { + panic("purego: NewCallback on Linux is only supported on amd64/arm64/loong64") +} diff --git a/vendor/github.com/ebitengine/purego/syscall_sysv.go b/vendor/github.com/ebitengine/purego/syscall_sysv.go new file mode 100644 index 00000000..d794bc38 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/syscall_sysv.go @@ -0,0 +1,226 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +//go:build darwin || freebsd || (linux && (amd64 || arm64 || loong64)) || netbsd + +package purego + +import ( + "reflect" + "runtime" + "sync" + "unsafe" +) + +var syscall15XABI0 uintptr + +func syscall_syscall15X(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15 uintptr) (r1, r2, err uintptr) { + args := thePool.Get().(*syscall15Args) + defer thePool.Put(args) + + *args = syscall15Args{ + fn, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, + a1, a2, a3, a4, a5, a6, a7, a8, + 0, + } + + runtime_cgocall(syscall15XABI0, unsafe.Pointer(args)) + return args.a1, args.a2, 0 +} + +// NewCallback converts a Go function to a function pointer conforming to the C calling convention. +// This is useful when interoperating with C code requiring callbacks. The argument is expected to be a +// function with zero or one uintptr-sized result. The function must not have arguments with size larger than the size +// of uintptr. Only a limited number of callbacks may be created in a single Go process, and any memory allocated +// for these callbacks is never released. At least 2000 callbacks can always be created. Although this function +// provides similar functionality to windows.NewCallback it is distinct. +func NewCallback(fn any) uintptr { + ty := reflect.TypeOf(fn) + for i := 0; i < ty.NumIn(); i++ { + in := ty.In(i) + if !in.AssignableTo(reflect.TypeOf(CDecl{})) { + continue + } + if i != 0 { + panic("purego: CDecl must be the first argument") + } + } + return compileCallback(fn) +} + +// maxCb is the maximum number of callbacks +// only increase this if you have added more to the callbackasm function +const maxCB = 2000 + +var cbs struct { + lock sync.Mutex + numFn int // the number of functions currently in cbs.funcs + funcs [maxCB]reflect.Value // the saved callbacks +} + +type callbackArgs struct { + index uintptr + // args points to the argument block. + // + // The structure of the arguments goes + // float registers followed by the + // integer registers followed by the stack. + // + // This variable is treated as a continuous + // block of memory containing all of the arguments + // for this callback. + args unsafe.Pointer + // Below are out-args from callbackWrap + result uintptr +} + +func compileCallback(fn any) uintptr { + val := reflect.ValueOf(fn) + if val.Kind() != reflect.Func { + panic("purego: the type must be a function but was not") + } + if val.IsNil() { + panic("purego: function must not be nil") + } + ty := val.Type() + for i := 0; i < ty.NumIn(); i++ { + in := ty.In(i) + switch in.Kind() { + case reflect.Struct: + if i == 0 && in.AssignableTo(reflect.TypeOf(CDecl{})) { + continue + } + fallthrough + case reflect.Interface, reflect.Func, reflect.Slice, + reflect.Chan, reflect.Complex64, reflect.Complex128, + reflect.String, reflect.Map, reflect.Invalid: + panic("purego: unsupported argument type: " + in.Kind().String()) + } + } +output: + switch { + case ty.NumOut() == 1: + switch ty.Out(0).Kind() { + case reflect.Pointer, reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, + reflect.Bool, reflect.UnsafePointer: + break output + } + panic("purego: unsupported return type: " + ty.String()) + case ty.NumOut() > 1: + panic("purego: callbacks can only have one return") + } + cbs.lock.Lock() + defer cbs.lock.Unlock() + if cbs.numFn >= maxCB { + panic("purego: the maximum number of callbacks has been reached") + } + cbs.funcs[cbs.numFn] = val + cbs.numFn++ + return callbackasmAddr(cbs.numFn - 1) +} + +const ptrSize = unsafe.Sizeof((*int)(nil)) + +const callbackMaxFrame = 64 * ptrSize + +// callbackasm is implemented in zcallback_GOOS_GOARCH.s +// +//go:linkname __callbackasm callbackasm +var __callbackasm byte +var callbackasmABI0 = uintptr(unsafe.Pointer(&__callbackasm)) + +// callbackWrap_call allows the calling of the ABIInternal wrapper +// which is required for runtime.cgocallback without the +// tag which is only allowed in the runtime. +// This closure is used inside sys_darwin_GOARCH.s +var callbackWrap_call = callbackWrap + +// callbackWrap is called by assembly code which determines which Go function to call. +// This function takes the arguments and passes them to the Go function and returns the result. +func callbackWrap(a *callbackArgs) { + cbs.lock.Lock() + fn := cbs.funcs[a.index] + cbs.lock.Unlock() + fnType := fn.Type() + args := make([]reflect.Value, fnType.NumIn()) + frame := (*[callbackMaxFrame]uintptr)(a.args) + var floatsN int // floatsN represents the number of float arguments processed + var intsN int // intsN represents the number of integer arguments processed + // stack points to the index into frame of the current stack element. + // The stack begins after the float and integer registers. + stack := numOfIntegerRegisters() + numOfFloatRegisters + for i := range args { + var pos int + switch fnType.In(i).Kind() { + case reflect.Float32, reflect.Float64: + if floatsN >= numOfFloatRegisters { + pos = stack + stack++ + } else { + pos = floatsN + } + floatsN++ + case reflect.Struct: + // This is the CDecl field + args[i] = reflect.Zero(fnType.In(i)) + continue + default: + + if intsN >= numOfIntegerRegisters() { + pos = stack + stack++ + } else { + // the integers begin after the floats in frame + pos = intsN + numOfFloatRegisters + } + intsN++ + } + args[i] = reflect.NewAt(fnType.In(i), unsafe.Pointer(&frame[pos])).Elem() + } + ret := fn.Call(args) + if len(ret) > 0 { + switch k := ret[0].Kind(); k { + case reflect.Uint, reflect.Uint64, reflect.Uint32, reflect.Uint16, reflect.Uint8, reflect.Uintptr: + a.result = uintptr(ret[0].Uint()) + case reflect.Int, reflect.Int64, reflect.Int32, reflect.Int16, reflect.Int8: + a.result = uintptr(ret[0].Int()) + case reflect.Bool: + if ret[0].Bool() { + a.result = 1 + } else { + a.result = 0 + } + case reflect.Pointer: + a.result = ret[0].Pointer() + case reflect.UnsafePointer: + a.result = ret[0].Pointer() + default: + panic("purego: unsupported kind: " + k.String()) + } + } +} + +// callbackasmAddr returns address of runtime.callbackasm +// function adjusted by i. +// On x86 and amd64, runtime.callbackasm is a series of CALL instructions, +// and we want callback to arrive at +// correspondent call instruction instead of start of +// runtime.callbackasm. +// On ARM, runtime.callbackasm is a series of mov and branch instructions. +// R12 is loaded with the callback index. Each entry is two instructions, +// hence 8 bytes. +func callbackasmAddr(i int) uintptr { + var entrySize int + switch runtime.GOARCH { + default: + panic("purego: unsupported architecture") + case "386", "amd64": + entrySize = 5 + case "arm", "arm64", "loong64": + // On ARM and ARM64, each entry is a MOV instruction + // followed by a branch instruction + entrySize = 8 + } + return callbackasmABI0 + uintptr(i*entrySize) +} diff --git a/vendor/github.com/ebitengine/purego/syscall_windows.go b/vendor/github.com/ebitengine/purego/syscall_windows.go new file mode 100644 index 00000000..5afd8d83 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/syscall_windows.go @@ -0,0 +1,46 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +package purego + +import ( + "reflect" + "syscall" +) + +var syscall15XABI0 uintptr + +func syscall_syscall15X(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15 uintptr) (r1, r2, err uintptr) { + r1, r2, errno := syscall.Syscall15(fn, 15, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15) + return r1, r2, uintptr(errno) +} + +// NewCallback converts a Go function to a function pointer conforming to the stdcall calling convention. +// This is useful when interoperating with Windows code requiring callbacks. The argument is expected to be a +// function with one uintptr-sized result. The function must not have arguments with size larger than the +// size of uintptr. Only a limited number of callbacks may be created in a single Go process, and any memory +// allocated for these callbacks is never released. Between NewCallback and NewCallbackCDecl, at least 1024 +// callbacks can always be created. Although this function is similiar to the darwin version it may act +// differently. +func NewCallback(fn any) uintptr { + isCDecl := false + ty := reflect.TypeOf(fn) + for i := 0; i < ty.NumIn(); i++ { + in := ty.In(i) + if !in.AssignableTo(reflect.TypeOf(CDecl{})) { + continue + } + if i != 0 { + panic("purego: CDecl must be the first argument") + } + isCDecl = true + } + if isCDecl { + return syscall.NewCallbackCDecl(fn) + } + return syscall.NewCallback(fn) +} + +func loadSymbol(handle uintptr, name string) (uintptr, error) { + return syscall.GetProcAddress(syscall.Handle(handle), name) +} diff --git a/vendor/github.com/ebitengine/purego/zcallback_amd64.s b/vendor/github.com/ebitengine/purego/zcallback_amd64.s new file mode 100644 index 00000000..42b54c48 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/zcallback_amd64.s @@ -0,0 +1,2014 @@ +// Code generated by wincallback.go using 'go generate'. DO NOT EDIT. + +//go:build darwin || freebsd || linux || netbsd + +// runtime·callbackasm is called by external code to +// execute Go implemented callback function. It is not +// called from the start, instead runtime·compilecallback +// always returns address into runtime·callbackasm offset +// appropriately so different callbacks start with different +// CALL instruction in runtime·callbackasm. This determines +// which Go callback function is executed later on. +#include "textflag.h" + +TEXT callbackasm(SB),NOSPLIT|NOFRAME,$0 + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) diff --git a/vendor/github.com/ebitengine/purego/zcallback_arm64.s b/vendor/github.com/ebitengine/purego/zcallback_arm64.s new file mode 100644 index 00000000..087c2d4f --- /dev/null +++ b/vendor/github.com/ebitengine/purego/zcallback_arm64.s @@ -0,0 +1,4014 @@ +// Code generated by wincallback.go using 'go generate'. DO NOT EDIT. + +//go:build darwin || freebsd || linux || netbsd + +// External code calls into callbackasm at an offset corresponding +// to the callback index. Callbackasm is a table of MOV and B instructions. +// The MOV instruction loads R12 with the callback index, and the +// B instruction branches to callbackasm1. +// callbackasm1 takes the callback index from R12 and +// indexes into an array that stores information about each callback. +// It then calls the Go implementation for that callback. +#include "textflag.h" + +TEXT callbackasm(SB),NOSPLIT|NOFRAME,$0 + MOVD $0, R12 + B callbackasm1(SB) + MOVD $1, R12 + B callbackasm1(SB) + MOVD $2, R12 + B callbackasm1(SB) + MOVD $3, R12 + B callbackasm1(SB) + MOVD $4, R12 + B callbackasm1(SB) + MOVD $5, R12 + B callbackasm1(SB) + MOVD $6, R12 + B callbackasm1(SB) + MOVD $7, R12 + B callbackasm1(SB) + MOVD $8, R12 + B callbackasm1(SB) + MOVD $9, R12 + B callbackasm1(SB) + MOVD $10, R12 + B callbackasm1(SB) + MOVD $11, R12 + B callbackasm1(SB) + MOVD $12, R12 + B callbackasm1(SB) + MOVD $13, R12 + B callbackasm1(SB) + MOVD $14, R12 + B callbackasm1(SB) + MOVD $15, R12 + B callbackasm1(SB) + MOVD $16, R12 + B callbackasm1(SB) + MOVD $17, R12 + B callbackasm1(SB) + MOVD $18, R12 + B callbackasm1(SB) + MOVD $19, R12 + B callbackasm1(SB) + MOVD $20, R12 + B callbackasm1(SB) + MOVD $21, R12 + B callbackasm1(SB) + MOVD $22, R12 + B callbackasm1(SB) + MOVD $23, R12 + B callbackasm1(SB) + MOVD $24, R12 + B callbackasm1(SB) + MOVD $25, R12 + B callbackasm1(SB) + MOVD $26, R12 + B callbackasm1(SB) + MOVD $27, R12 + B callbackasm1(SB) + MOVD $28, R12 + B callbackasm1(SB) + MOVD $29, R12 + B callbackasm1(SB) + MOVD $30, R12 + B callbackasm1(SB) + MOVD $31, R12 + B callbackasm1(SB) + MOVD $32, R12 + B callbackasm1(SB) + MOVD $33, R12 + B callbackasm1(SB) + MOVD $34, R12 + B callbackasm1(SB) + MOVD $35, R12 + B callbackasm1(SB) + MOVD $36, R12 + B callbackasm1(SB) + MOVD $37, R12 + B callbackasm1(SB) + MOVD $38, R12 + B callbackasm1(SB) + MOVD $39, R12 + B callbackasm1(SB) + MOVD $40, R12 + B callbackasm1(SB) + MOVD $41, R12 + B callbackasm1(SB) + MOVD $42, R12 + B callbackasm1(SB) + MOVD $43, R12 + B callbackasm1(SB) + MOVD $44, R12 + B callbackasm1(SB) + MOVD $45, R12 + B callbackasm1(SB) + MOVD $46, R12 + B callbackasm1(SB) + MOVD $47, R12 + B callbackasm1(SB) + MOVD $48, R12 + B callbackasm1(SB) + MOVD $49, R12 + B callbackasm1(SB) + MOVD $50, R12 + B callbackasm1(SB) + MOVD $51, R12 + B callbackasm1(SB) + MOVD $52, R12 + B callbackasm1(SB) + MOVD $53, R12 + B callbackasm1(SB) + MOVD $54, R12 + B callbackasm1(SB) + MOVD $55, R12 + B callbackasm1(SB) + MOVD $56, R12 + B callbackasm1(SB) + MOVD $57, R12 + B callbackasm1(SB) + MOVD $58, R12 + B callbackasm1(SB) + MOVD $59, R12 + B callbackasm1(SB) + MOVD $60, R12 + B callbackasm1(SB) + MOVD $61, R12 + B callbackasm1(SB) + MOVD $62, R12 + B callbackasm1(SB) + MOVD $63, R12 + B callbackasm1(SB) + MOVD $64, R12 + B callbackasm1(SB) + MOVD $65, R12 + B callbackasm1(SB) + MOVD $66, R12 + B callbackasm1(SB) + MOVD $67, R12 + B callbackasm1(SB) + MOVD $68, R12 + B callbackasm1(SB) + MOVD $69, R12 + B callbackasm1(SB) + MOVD $70, R12 + B callbackasm1(SB) + MOVD $71, R12 + B callbackasm1(SB) + MOVD $72, R12 + B callbackasm1(SB) + MOVD $73, R12 + B callbackasm1(SB) + MOVD $74, R12 + B callbackasm1(SB) + MOVD $75, R12 + B callbackasm1(SB) + MOVD $76, R12 + B callbackasm1(SB) + MOVD $77, R12 + B callbackasm1(SB) + MOVD $78, R12 + B callbackasm1(SB) + MOVD $79, R12 + B callbackasm1(SB) + MOVD $80, R12 + B callbackasm1(SB) + MOVD $81, R12 + B callbackasm1(SB) + MOVD $82, R12 + B callbackasm1(SB) + MOVD $83, R12 + B callbackasm1(SB) + MOVD $84, R12 + B callbackasm1(SB) + MOVD $85, R12 + B callbackasm1(SB) + MOVD $86, R12 + B callbackasm1(SB) + MOVD $87, R12 + B callbackasm1(SB) + MOVD $88, R12 + B callbackasm1(SB) + MOVD $89, R12 + B callbackasm1(SB) + MOVD $90, R12 + B callbackasm1(SB) + MOVD $91, R12 + B callbackasm1(SB) + MOVD $92, R12 + B callbackasm1(SB) + MOVD $93, R12 + B callbackasm1(SB) + MOVD $94, R12 + B callbackasm1(SB) + MOVD $95, R12 + B callbackasm1(SB) + MOVD $96, R12 + B callbackasm1(SB) + MOVD $97, R12 + B callbackasm1(SB) + MOVD $98, R12 + B callbackasm1(SB) + MOVD $99, R12 + B callbackasm1(SB) + MOVD $100, R12 + B callbackasm1(SB) + MOVD $101, R12 + B callbackasm1(SB) + MOVD $102, R12 + B callbackasm1(SB) + MOVD $103, R12 + B callbackasm1(SB) + MOVD $104, R12 + B callbackasm1(SB) + MOVD $105, R12 + B callbackasm1(SB) + MOVD $106, R12 + B callbackasm1(SB) + MOVD $107, R12 + B callbackasm1(SB) + MOVD $108, R12 + B callbackasm1(SB) + MOVD $109, R12 + B callbackasm1(SB) + MOVD $110, R12 + B callbackasm1(SB) + MOVD $111, R12 + B callbackasm1(SB) + MOVD $112, R12 + B callbackasm1(SB) + MOVD $113, R12 + B callbackasm1(SB) + MOVD $114, R12 + B callbackasm1(SB) + MOVD $115, R12 + B callbackasm1(SB) + MOVD $116, R12 + B callbackasm1(SB) + MOVD $117, R12 + B callbackasm1(SB) + MOVD $118, R12 + B callbackasm1(SB) + MOVD $119, R12 + B callbackasm1(SB) + MOVD $120, R12 + B callbackasm1(SB) + MOVD $121, R12 + B callbackasm1(SB) + MOVD $122, R12 + B callbackasm1(SB) + MOVD $123, R12 + B callbackasm1(SB) + MOVD $124, R12 + B callbackasm1(SB) + MOVD $125, R12 + B callbackasm1(SB) + MOVD $126, R12 + B callbackasm1(SB) + MOVD $127, R12 + B callbackasm1(SB) + MOVD $128, R12 + B callbackasm1(SB) + MOVD $129, R12 + B callbackasm1(SB) + MOVD $130, R12 + B callbackasm1(SB) + MOVD $131, R12 + B callbackasm1(SB) + MOVD $132, R12 + B callbackasm1(SB) + MOVD $133, R12 + B callbackasm1(SB) + MOVD $134, R12 + B callbackasm1(SB) + MOVD $135, R12 + B callbackasm1(SB) + MOVD $136, R12 + B callbackasm1(SB) + MOVD $137, R12 + B callbackasm1(SB) + MOVD $138, R12 + B callbackasm1(SB) + MOVD $139, R12 + B callbackasm1(SB) + MOVD $140, R12 + B callbackasm1(SB) + MOVD $141, R12 + B callbackasm1(SB) + MOVD $142, R12 + B callbackasm1(SB) + MOVD $143, R12 + B callbackasm1(SB) + MOVD $144, R12 + B callbackasm1(SB) + MOVD $145, R12 + B callbackasm1(SB) + MOVD $146, R12 + B callbackasm1(SB) + MOVD $147, R12 + B callbackasm1(SB) + MOVD $148, R12 + B callbackasm1(SB) + MOVD $149, R12 + B callbackasm1(SB) + MOVD $150, R12 + B callbackasm1(SB) + MOVD $151, R12 + B callbackasm1(SB) + MOVD $152, R12 + B callbackasm1(SB) + MOVD $153, R12 + B callbackasm1(SB) + MOVD $154, R12 + B callbackasm1(SB) + MOVD $155, R12 + B callbackasm1(SB) + MOVD $156, R12 + B callbackasm1(SB) + MOVD $157, R12 + B callbackasm1(SB) + MOVD $158, R12 + B callbackasm1(SB) + MOVD $159, R12 + B callbackasm1(SB) + MOVD $160, R12 + B callbackasm1(SB) + MOVD $161, R12 + B callbackasm1(SB) + MOVD $162, R12 + B callbackasm1(SB) + MOVD $163, R12 + B callbackasm1(SB) + MOVD $164, R12 + B callbackasm1(SB) + MOVD $165, R12 + B callbackasm1(SB) + MOVD $166, R12 + B callbackasm1(SB) + MOVD $167, R12 + B callbackasm1(SB) + MOVD $168, R12 + B callbackasm1(SB) + MOVD $169, R12 + B callbackasm1(SB) + MOVD $170, R12 + B callbackasm1(SB) + MOVD $171, R12 + B callbackasm1(SB) + MOVD $172, R12 + B callbackasm1(SB) + MOVD $173, R12 + B callbackasm1(SB) + MOVD $174, R12 + B callbackasm1(SB) + MOVD $175, R12 + B callbackasm1(SB) + MOVD $176, R12 + B callbackasm1(SB) + MOVD $177, R12 + B callbackasm1(SB) + MOVD $178, R12 + B callbackasm1(SB) + MOVD $179, R12 + B callbackasm1(SB) + MOVD $180, R12 + B callbackasm1(SB) + MOVD $181, R12 + B callbackasm1(SB) + MOVD $182, R12 + B callbackasm1(SB) + MOVD $183, R12 + B callbackasm1(SB) + MOVD $184, R12 + B callbackasm1(SB) + MOVD $185, R12 + B callbackasm1(SB) + MOVD $186, R12 + B callbackasm1(SB) + MOVD $187, R12 + B callbackasm1(SB) + MOVD $188, R12 + B callbackasm1(SB) + MOVD $189, R12 + B callbackasm1(SB) + MOVD $190, R12 + B callbackasm1(SB) + MOVD $191, R12 + B callbackasm1(SB) + MOVD $192, R12 + B callbackasm1(SB) + MOVD $193, R12 + B callbackasm1(SB) + MOVD $194, R12 + B callbackasm1(SB) + MOVD $195, R12 + B callbackasm1(SB) + MOVD $196, R12 + B callbackasm1(SB) + MOVD $197, R12 + B callbackasm1(SB) + MOVD $198, R12 + B callbackasm1(SB) + MOVD $199, R12 + B callbackasm1(SB) + MOVD $200, R12 + B callbackasm1(SB) + MOVD $201, R12 + B callbackasm1(SB) + MOVD $202, R12 + B callbackasm1(SB) + MOVD $203, R12 + B callbackasm1(SB) + MOVD $204, R12 + B callbackasm1(SB) + MOVD $205, R12 + B callbackasm1(SB) + MOVD $206, R12 + B callbackasm1(SB) + MOVD $207, R12 + B callbackasm1(SB) + MOVD $208, R12 + B callbackasm1(SB) + MOVD $209, R12 + B callbackasm1(SB) + MOVD $210, R12 + B callbackasm1(SB) + MOVD $211, R12 + B callbackasm1(SB) + MOVD $212, R12 + B callbackasm1(SB) + MOVD $213, R12 + B callbackasm1(SB) + MOVD $214, R12 + B callbackasm1(SB) + MOVD $215, R12 + B callbackasm1(SB) + MOVD $216, R12 + B callbackasm1(SB) + MOVD $217, R12 + B callbackasm1(SB) + MOVD $218, R12 + B callbackasm1(SB) + MOVD $219, R12 + B callbackasm1(SB) + MOVD $220, R12 + B callbackasm1(SB) + MOVD $221, R12 + B callbackasm1(SB) + MOVD $222, R12 + B callbackasm1(SB) + MOVD $223, R12 + B callbackasm1(SB) + MOVD $224, R12 + B callbackasm1(SB) + MOVD $225, R12 + B callbackasm1(SB) + MOVD $226, R12 + B callbackasm1(SB) + MOVD $227, R12 + B callbackasm1(SB) + MOVD $228, R12 + B callbackasm1(SB) + MOVD $229, R12 + B callbackasm1(SB) + MOVD $230, R12 + B callbackasm1(SB) + MOVD $231, R12 + B callbackasm1(SB) + MOVD $232, R12 + B callbackasm1(SB) + MOVD $233, R12 + B callbackasm1(SB) + MOVD $234, R12 + B callbackasm1(SB) + MOVD $235, R12 + B callbackasm1(SB) + MOVD $236, R12 + B callbackasm1(SB) + MOVD $237, R12 + B callbackasm1(SB) + MOVD $238, R12 + B callbackasm1(SB) + MOVD $239, R12 + B callbackasm1(SB) + MOVD $240, R12 + B callbackasm1(SB) + MOVD $241, R12 + B callbackasm1(SB) + MOVD $242, R12 + B callbackasm1(SB) + MOVD $243, R12 + B callbackasm1(SB) + MOVD $244, R12 + B callbackasm1(SB) + MOVD $245, R12 + B callbackasm1(SB) + MOVD $246, R12 + B callbackasm1(SB) + MOVD $247, R12 + B callbackasm1(SB) + MOVD $248, R12 + B callbackasm1(SB) + MOVD $249, R12 + B callbackasm1(SB) + MOVD $250, R12 + B callbackasm1(SB) + MOVD $251, R12 + B callbackasm1(SB) + MOVD $252, R12 + B callbackasm1(SB) + MOVD $253, R12 + B callbackasm1(SB) + MOVD $254, R12 + B callbackasm1(SB) + MOVD $255, R12 + B callbackasm1(SB) + MOVD $256, R12 + B callbackasm1(SB) + MOVD $257, R12 + B callbackasm1(SB) + MOVD $258, R12 + B callbackasm1(SB) + MOVD $259, R12 + B callbackasm1(SB) + MOVD $260, R12 + B callbackasm1(SB) + MOVD $261, R12 + B callbackasm1(SB) + MOVD $262, R12 + B callbackasm1(SB) + MOVD $263, R12 + B callbackasm1(SB) + MOVD $264, R12 + B callbackasm1(SB) + MOVD $265, R12 + B callbackasm1(SB) + MOVD $266, R12 + B callbackasm1(SB) + MOVD $267, R12 + B callbackasm1(SB) + MOVD $268, R12 + B callbackasm1(SB) + MOVD $269, R12 + B callbackasm1(SB) + MOVD $270, R12 + B callbackasm1(SB) + MOVD $271, R12 + B callbackasm1(SB) + MOVD $272, R12 + B callbackasm1(SB) + MOVD $273, R12 + B callbackasm1(SB) + MOVD $274, R12 + B callbackasm1(SB) + MOVD $275, R12 + B callbackasm1(SB) + MOVD $276, R12 + B callbackasm1(SB) + MOVD $277, R12 + B callbackasm1(SB) + MOVD $278, R12 + B callbackasm1(SB) + MOVD $279, R12 + B callbackasm1(SB) + MOVD $280, R12 + B callbackasm1(SB) + MOVD $281, R12 + B callbackasm1(SB) + MOVD $282, R12 + B callbackasm1(SB) + MOVD $283, R12 + B callbackasm1(SB) + MOVD $284, R12 + B callbackasm1(SB) + MOVD $285, R12 + B callbackasm1(SB) + MOVD $286, R12 + B callbackasm1(SB) + MOVD $287, R12 + B callbackasm1(SB) + MOVD $288, R12 + B callbackasm1(SB) + MOVD $289, R12 + B callbackasm1(SB) + MOVD $290, R12 + B callbackasm1(SB) + MOVD $291, R12 + B callbackasm1(SB) + MOVD $292, R12 + B callbackasm1(SB) + MOVD $293, R12 + B callbackasm1(SB) + MOVD $294, R12 + B callbackasm1(SB) + MOVD $295, R12 + B callbackasm1(SB) + MOVD $296, R12 + B callbackasm1(SB) + MOVD $297, R12 + B callbackasm1(SB) + MOVD $298, R12 + B callbackasm1(SB) + MOVD $299, R12 + B callbackasm1(SB) + MOVD $300, R12 + B callbackasm1(SB) + MOVD $301, R12 + B callbackasm1(SB) + MOVD $302, R12 + B callbackasm1(SB) + MOVD $303, R12 + B callbackasm1(SB) + MOVD $304, R12 + B callbackasm1(SB) + MOVD $305, R12 + B callbackasm1(SB) + MOVD $306, R12 + B callbackasm1(SB) + MOVD $307, R12 + B callbackasm1(SB) + MOVD $308, R12 + B callbackasm1(SB) + MOVD $309, R12 + B callbackasm1(SB) + MOVD $310, R12 + B callbackasm1(SB) + MOVD $311, R12 + B callbackasm1(SB) + MOVD $312, R12 + B callbackasm1(SB) + MOVD $313, R12 + B callbackasm1(SB) + MOVD $314, R12 + B callbackasm1(SB) + MOVD $315, R12 + B callbackasm1(SB) + MOVD $316, R12 + B callbackasm1(SB) + MOVD $317, R12 + B callbackasm1(SB) + MOVD $318, R12 + B callbackasm1(SB) + MOVD $319, R12 + B callbackasm1(SB) + MOVD $320, R12 + B callbackasm1(SB) + MOVD $321, R12 + B callbackasm1(SB) + MOVD $322, R12 + B callbackasm1(SB) + MOVD $323, R12 + B callbackasm1(SB) + MOVD $324, R12 + B callbackasm1(SB) + MOVD $325, R12 + B callbackasm1(SB) + MOVD $326, R12 + B callbackasm1(SB) + MOVD $327, R12 + B callbackasm1(SB) + MOVD $328, R12 + B callbackasm1(SB) + MOVD $329, R12 + B callbackasm1(SB) + MOVD $330, R12 + B callbackasm1(SB) + MOVD $331, R12 + B callbackasm1(SB) + MOVD $332, R12 + B callbackasm1(SB) + MOVD $333, R12 + B callbackasm1(SB) + MOVD $334, R12 + B callbackasm1(SB) + MOVD $335, R12 + B callbackasm1(SB) + MOVD $336, R12 + B callbackasm1(SB) + MOVD $337, R12 + B callbackasm1(SB) + MOVD $338, R12 + B callbackasm1(SB) + MOVD $339, R12 + B callbackasm1(SB) + MOVD $340, R12 + B callbackasm1(SB) + MOVD $341, R12 + B callbackasm1(SB) + MOVD $342, R12 + B callbackasm1(SB) + MOVD $343, R12 + B callbackasm1(SB) + MOVD $344, R12 + B callbackasm1(SB) + MOVD $345, R12 + B callbackasm1(SB) + MOVD $346, R12 + B callbackasm1(SB) + MOVD $347, R12 + B callbackasm1(SB) + MOVD $348, R12 + B callbackasm1(SB) + MOVD $349, R12 + B callbackasm1(SB) + MOVD $350, R12 + B callbackasm1(SB) + MOVD $351, R12 + B callbackasm1(SB) + MOVD $352, R12 + B callbackasm1(SB) + MOVD $353, R12 + B callbackasm1(SB) + MOVD $354, R12 + B callbackasm1(SB) + MOVD $355, R12 + B callbackasm1(SB) + MOVD $356, R12 + B callbackasm1(SB) + MOVD $357, R12 + B callbackasm1(SB) + MOVD $358, R12 + B callbackasm1(SB) + MOVD $359, R12 + B callbackasm1(SB) + MOVD $360, R12 + B callbackasm1(SB) + MOVD $361, R12 + B callbackasm1(SB) + MOVD $362, R12 + B callbackasm1(SB) + MOVD $363, R12 + B callbackasm1(SB) + MOVD $364, R12 + B callbackasm1(SB) + MOVD $365, R12 + B callbackasm1(SB) + MOVD $366, R12 + B callbackasm1(SB) + MOVD $367, R12 + B callbackasm1(SB) + MOVD $368, R12 + B callbackasm1(SB) + MOVD $369, R12 + B callbackasm1(SB) + MOVD $370, R12 + B callbackasm1(SB) + MOVD $371, R12 + B callbackasm1(SB) + MOVD $372, R12 + B callbackasm1(SB) + MOVD $373, R12 + B callbackasm1(SB) + MOVD $374, R12 + B callbackasm1(SB) + MOVD $375, R12 + B callbackasm1(SB) + MOVD $376, R12 + B callbackasm1(SB) + MOVD $377, R12 + B callbackasm1(SB) + MOVD $378, R12 + B callbackasm1(SB) + MOVD $379, R12 + B callbackasm1(SB) + MOVD $380, R12 + B callbackasm1(SB) + MOVD $381, R12 + B callbackasm1(SB) + MOVD $382, R12 + B callbackasm1(SB) + MOVD $383, R12 + B callbackasm1(SB) + MOVD $384, R12 + B callbackasm1(SB) + MOVD $385, R12 + B callbackasm1(SB) + MOVD $386, R12 + B callbackasm1(SB) + MOVD $387, R12 + B callbackasm1(SB) + MOVD $388, R12 + B callbackasm1(SB) + MOVD $389, R12 + B callbackasm1(SB) + MOVD $390, R12 + B callbackasm1(SB) + MOVD $391, R12 + B callbackasm1(SB) + MOVD $392, R12 + B callbackasm1(SB) + MOVD $393, R12 + B callbackasm1(SB) + MOVD $394, R12 + B callbackasm1(SB) + MOVD $395, R12 + B callbackasm1(SB) + MOVD $396, R12 + B callbackasm1(SB) + MOVD $397, R12 + B callbackasm1(SB) + MOVD $398, R12 + B callbackasm1(SB) + MOVD $399, R12 + B callbackasm1(SB) + MOVD $400, R12 + B callbackasm1(SB) + MOVD $401, R12 + B callbackasm1(SB) + MOVD $402, R12 + B callbackasm1(SB) + MOVD $403, R12 + B callbackasm1(SB) + MOVD $404, R12 + B callbackasm1(SB) + MOVD $405, R12 + B callbackasm1(SB) + MOVD $406, R12 + B callbackasm1(SB) + MOVD $407, R12 + B callbackasm1(SB) + MOVD $408, R12 + B callbackasm1(SB) + MOVD $409, R12 + B callbackasm1(SB) + MOVD $410, R12 + B callbackasm1(SB) + MOVD $411, R12 + B callbackasm1(SB) + MOVD $412, R12 + B callbackasm1(SB) + MOVD $413, R12 + B callbackasm1(SB) + MOVD $414, R12 + B callbackasm1(SB) + MOVD $415, R12 + B callbackasm1(SB) + MOVD $416, R12 + B callbackasm1(SB) + MOVD $417, R12 + B callbackasm1(SB) + MOVD $418, R12 + B callbackasm1(SB) + MOVD $419, R12 + B callbackasm1(SB) + MOVD $420, R12 + B callbackasm1(SB) + MOVD $421, R12 + B callbackasm1(SB) + MOVD $422, R12 + B callbackasm1(SB) + MOVD $423, R12 + B callbackasm1(SB) + MOVD $424, R12 + B callbackasm1(SB) + MOVD $425, R12 + B callbackasm1(SB) + MOVD $426, R12 + B callbackasm1(SB) + MOVD $427, R12 + B callbackasm1(SB) + MOVD $428, R12 + B callbackasm1(SB) + MOVD $429, R12 + B callbackasm1(SB) + MOVD $430, R12 + B callbackasm1(SB) + MOVD $431, R12 + B callbackasm1(SB) + MOVD $432, R12 + B callbackasm1(SB) + MOVD $433, R12 + B callbackasm1(SB) + MOVD $434, R12 + B callbackasm1(SB) + MOVD $435, R12 + B callbackasm1(SB) + MOVD $436, R12 + B callbackasm1(SB) + MOVD $437, R12 + B callbackasm1(SB) + MOVD $438, R12 + B callbackasm1(SB) + MOVD $439, R12 + B callbackasm1(SB) + MOVD $440, R12 + B callbackasm1(SB) + MOVD $441, R12 + B callbackasm1(SB) + MOVD $442, R12 + B callbackasm1(SB) + MOVD $443, R12 + B callbackasm1(SB) + MOVD $444, R12 + B callbackasm1(SB) + MOVD $445, R12 + B callbackasm1(SB) + MOVD $446, R12 + B callbackasm1(SB) + MOVD $447, R12 + B callbackasm1(SB) + MOVD $448, R12 + B callbackasm1(SB) + MOVD $449, R12 + B callbackasm1(SB) + MOVD $450, R12 + B callbackasm1(SB) + MOVD $451, R12 + B callbackasm1(SB) + MOVD $452, R12 + B callbackasm1(SB) + MOVD $453, R12 + B callbackasm1(SB) + MOVD $454, R12 + B callbackasm1(SB) + MOVD $455, R12 + B callbackasm1(SB) + MOVD $456, R12 + B callbackasm1(SB) + MOVD $457, R12 + B callbackasm1(SB) + MOVD $458, R12 + B callbackasm1(SB) + MOVD $459, R12 + B callbackasm1(SB) + MOVD $460, R12 + B callbackasm1(SB) + MOVD $461, R12 + B callbackasm1(SB) + MOVD $462, R12 + B callbackasm1(SB) + MOVD $463, R12 + B callbackasm1(SB) + MOVD $464, R12 + B callbackasm1(SB) + MOVD $465, R12 + B callbackasm1(SB) + MOVD $466, R12 + B callbackasm1(SB) + MOVD $467, R12 + B callbackasm1(SB) + MOVD $468, R12 + B callbackasm1(SB) + MOVD $469, R12 + B callbackasm1(SB) + MOVD $470, R12 + B callbackasm1(SB) + MOVD $471, R12 + B callbackasm1(SB) + MOVD $472, R12 + B callbackasm1(SB) + MOVD $473, R12 + B callbackasm1(SB) + MOVD $474, R12 + B callbackasm1(SB) + MOVD $475, R12 + B callbackasm1(SB) + MOVD $476, R12 + B callbackasm1(SB) + MOVD $477, R12 + B callbackasm1(SB) + MOVD $478, R12 + B callbackasm1(SB) + MOVD $479, R12 + B callbackasm1(SB) + MOVD $480, R12 + B callbackasm1(SB) + MOVD $481, R12 + B callbackasm1(SB) + MOVD $482, R12 + B callbackasm1(SB) + MOVD $483, R12 + B callbackasm1(SB) + MOVD $484, R12 + B callbackasm1(SB) + MOVD $485, R12 + B callbackasm1(SB) + MOVD $486, R12 + B callbackasm1(SB) + MOVD $487, R12 + B callbackasm1(SB) + MOVD $488, R12 + B callbackasm1(SB) + MOVD $489, R12 + B callbackasm1(SB) + MOVD $490, R12 + B callbackasm1(SB) + MOVD $491, R12 + B callbackasm1(SB) + MOVD $492, R12 + B callbackasm1(SB) + MOVD $493, R12 + B callbackasm1(SB) + MOVD $494, R12 + B callbackasm1(SB) + MOVD $495, R12 + B callbackasm1(SB) + MOVD $496, R12 + B callbackasm1(SB) + MOVD $497, R12 + B callbackasm1(SB) + MOVD $498, R12 + B callbackasm1(SB) + MOVD $499, R12 + B callbackasm1(SB) + MOVD $500, R12 + B callbackasm1(SB) + MOVD $501, R12 + B callbackasm1(SB) + MOVD $502, R12 + B callbackasm1(SB) + MOVD $503, R12 + B callbackasm1(SB) + MOVD $504, R12 + B callbackasm1(SB) + MOVD $505, R12 + B callbackasm1(SB) + MOVD $506, R12 + B callbackasm1(SB) + MOVD $507, R12 + B callbackasm1(SB) + MOVD $508, R12 + B callbackasm1(SB) + MOVD $509, R12 + B callbackasm1(SB) + MOVD $510, R12 + B callbackasm1(SB) + MOVD $511, R12 + B callbackasm1(SB) + MOVD $512, R12 + B callbackasm1(SB) + MOVD $513, R12 + B callbackasm1(SB) + MOVD $514, R12 + B callbackasm1(SB) + MOVD $515, R12 + B callbackasm1(SB) + MOVD $516, R12 + B callbackasm1(SB) + MOVD $517, R12 + B callbackasm1(SB) + MOVD $518, R12 + B callbackasm1(SB) + MOVD $519, R12 + B callbackasm1(SB) + MOVD $520, R12 + B callbackasm1(SB) + MOVD $521, R12 + B callbackasm1(SB) + MOVD $522, R12 + B callbackasm1(SB) + MOVD $523, R12 + B callbackasm1(SB) + MOVD $524, R12 + B callbackasm1(SB) + MOVD $525, R12 + B callbackasm1(SB) + MOVD $526, R12 + B callbackasm1(SB) + MOVD $527, R12 + B callbackasm1(SB) + MOVD $528, R12 + B callbackasm1(SB) + MOVD $529, R12 + B callbackasm1(SB) + MOVD $530, R12 + B callbackasm1(SB) + MOVD $531, R12 + B callbackasm1(SB) + MOVD $532, R12 + B callbackasm1(SB) + MOVD $533, R12 + B callbackasm1(SB) + MOVD $534, R12 + B callbackasm1(SB) + MOVD $535, R12 + B callbackasm1(SB) + MOVD $536, R12 + B callbackasm1(SB) + MOVD $537, R12 + B callbackasm1(SB) + MOVD $538, R12 + B callbackasm1(SB) + MOVD $539, R12 + B callbackasm1(SB) + MOVD $540, R12 + B callbackasm1(SB) + MOVD $541, R12 + B callbackasm1(SB) + MOVD $542, R12 + B callbackasm1(SB) + MOVD $543, R12 + B callbackasm1(SB) + MOVD $544, R12 + B callbackasm1(SB) + MOVD $545, R12 + B callbackasm1(SB) + MOVD $546, R12 + B callbackasm1(SB) + MOVD $547, R12 + B callbackasm1(SB) + MOVD $548, R12 + B callbackasm1(SB) + MOVD $549, R12 + B callbackasm1(SB) + MOVD $550, R12 + B callbackasm1(SB) + MOVD $551, R12 + B callbackasm1(SB) + MOVD $552, R12 + B callbackasm1(SB) + MOVD $553, R12 + B callbackasm1(SB) + MOVD $554, R12 + B callbackasm1(SB) + MOVD $555, R12 + B callbackasm1(SB) + MOVD $556, R12 + B callbackasm1(SB) + MOVD $557, R12 + B callbackasm1(SB) + MOVD $558, R12 + B callbackasm1(SB) + MOVD $559, R12 + B callbackasm1(SB) + MOVD $560, R12 + B callbackasm1(SB) + MOVD $561, R12 + B callbackasm1(SB) + MOVD $562, R12 + B callbackasm1(SB) + MOVD $563, R12 + B callbackasm1(SB) + MOVD $564, R12 + B callbackasm1(SB) + MOVD $565, R12 + B callbackasm1(SB) + MOVD $566, R12 + B callbackasm1(SB) + MOVD $567, R12 + B callbackasm1(SB) + MOVD $568, R12 + B callbackasm1(SB) + MOVD $569, R12 + B callbackasm1(SB) + MOVD $570, R12 + B callbackasm1(SB) + MOVD $571, R12 + B callbackasm1(SB) + MOVD $572, R12 + B callbackasm1(SB) + MOVD $573, R12 + B callbackasm1(SB) + MOVD $574, R12 + B callbackasm1(SB) + MOVD $575, R12 + B callbackasm1(SB) + MOVD $576, R12 + B callbackasm1(SB) + MOVD $577, R12 + B callbackasm1(SB) + MOVD $578, R12 + B callbackasm1(SB) + MOVD $579, R12 + B callbackasm1(SB) + MOVD $580, R12 + B callbackasm1(SB) + MOVD $581, R12 + B callbackasm1(SB) + MOVD $582, R12 + B callbackasm1(SB) + MOVD $583, R12 + B callbackasm1(SB) + MOVD $584, R12 + B callbackasm1(SB) + MOVD $585, R12 + B callbackasm1(SB) + MOVD $586, R12 + B callbackasm1(SB) + MOVD $587, R12 + B callbackasm1(SB) + MOVD $588, R12 + B callbackasm1(SB) + MOVD $589, R12 + B callbackasm1(SB) + MOVD $590, R12 + B callbackasm1(SB) + MOVD $591, R12 + B callbackasm1(SB) + MOVD $592, R12 + B callbackasm1(SB) + MOVD $593, R12 + B callbackasm1(SB) + MOVD $594, R12 + B callbackasm1(SB) + MOVD $595, R12 + B callbackasm1(SB) + MOVD $596, R12 + B callbackasm1(SB) + MOVD $597, R12 + B callbackasm1(SB) + MOVD $598, R12 + B callbackasm1(SB) + MOVD $599, R12 + B callbackasm1(SB) + MOVD $600, R12 + B callbackasm1(SB) + MOVD $601, R12 + B callbackasm1(SB) + MOVD $602, R12 + B callbackasm1(SB) + MOVD $603, R12 + B callbackasm1(SB) + MOVD $604, R12 + B callbackasm1(SB) + MOVD $605, R12 + B callbackasm1(SB) + MOVD $606, R12 + B callbackasm1(SB) + MOVD $607, R12 + B callbackasm1(SB) + MOVD $608, R12 + B callbackasm1(SB) + MOVD $609, R12 + B callbackasm1(SB) + MOVD $610, R12 + B callbackasm1(SB) + MOVD $611, R12 + B callbackasm1(SB) + MOVD $612, R12 + B callbackasm1(SB) + MOVD $613, R12 + B callbackasm1(SB) + MOVD $614, R12 + B callbackasm1(SB) + MOVD $615, R12 + B callbackasm1(SB) + MOVD $616, R12 + B callbackasm1(SB) + MOVD $617, R12 + B callbackasm1(SB) + MOVD $618, R12 + B callbackasm1(SB) + MOVD $619, R12 + B callbackasm1(SB) + MOVD $620, R12 + B callbackasm1(SB) + MOVD $621, R12 + B callbackasm1(SB) + MOVD $622, R12 + B callbackasm1(SB) + MOVD $623, R12 + B callbackasm1(SB) + MOVD $624, R12 + B callbackasm1(SB) + MOVD $625, R12 + B callbackasm1(SB) + MOVD $626, R12 + B callbackasm1(SB) + MOVD $627, R12 + B callbackasm1(SB) + MOVD $628, R12 + B callbackasm1(SB) + MOVD $629, R12 + B callbackasm1(SB) + MOVD $630, R12 + B callbackasm1(SB) + MOVD $631, R12 + B callbackasm1(SB) + MOVD $632, R12 + B callbackasm1(SB) + MOVD $633, R12 + B callbackasm1(SB) + MOVD $634, R12 + B callbackasm1(SB) + MOVD $635, R12 + B callbackasm1(SB) + MOVD $636, R12 + B callbackasm1(SB) + MOVD $637, R12 + B callbackasm1(SB) + MOVD $638, R12 + B callbackasm1(SB) + MOVD $639, R12 + B callbackasm1(SB) + MOVD $640, R12 + B callbackasm1(SB) + MOVD $641, R12 + B callbackasm1(SB) + MOVD $642, R12 + B callbackasm1(SB) + MOVD $643, R12 + B callbackasm1(SB) + MOVD $644, R12 + B callbackasm1(SB) + MOVD $645, R12 + B callbackasm1(SB) + MOVD $646, R12 + B callbackasm1(SB) + MOVD $647, R12 + B callbackasm1(SB) + MOVD $648, R12 + B callbackasm1(SB) + MOVD $649, R12 + B callbackasm1(SB) + MOVD $650, R12 + B callbackasm1(SB) + MOVD $651, R12 + B callbackasm1(SB) + MOVD $652, R12 + B callbackasm1(SB) + MOVD $653, R12 + B callbackasm1(SB) + MOVD $654, R12 + B callbackasm1(SB) + MOVD $655, R12 + B callbackasm1(SB) + MOVD $656, R12 + B callbackasm1(SB) + MOVD $657, R12 + B callbackasm1(SB) + MOVD $658, R12 + B callbackasm1(SB) + MOVD $659, R12 + B callbackasm1(SB) + MOVD $660, R12 + B callbackasm1(SB) + MOVD $661, R12 + B callbackasm1(SB) + MOVD $662, R12 + B callbackasm1(SB) + MOVD $663, R12 + B callbackasm1(SB) + MOVD $664, R12 + B callbackasm1(SB) + MOVD $665, R12 + B callbackasm1(SB) + MOVD $666, R12 + B callbackasm1(SB) + MOVD $667, R12 + B callbackasm1(SB) + MOVD $668, R12 + B callbackasm1(SB) + MOVD $669, R12 + B callbackasm1(SB) + MOVD $670, R12 + B callbackasm1(SB) + MOVD $671, R12 + B callbackasm1(SB) + MOVD $672, R12 + B callbackasm1(SB) + MOVD $673, R12 + B callbackasm1(SB) + MOVD $674, R12 + B callbackasm1(SB) + MOVD $675, R12 + B callbackasm1(SB) + MOVD $676, R12 + B callbackasm1(SB) + MOVD $677, R12 + B callbackasm1(SB) + MOVD $678, R12 + B callbackasm1(SB) + MOVD $679, R12 + B callbackasm1(SB) + MOVD $680, R12 + B callbackasm1(SB) + MOVD $681, R12 + B callbackasm1(SB) + MOVD $682, R12 + B callbackasm1(SB) + MOVD $683, R12 + B callbackasm1(SB) + MOVD $684, R12 + B callbackasm1(SB) + MOVD $685, R12 + B callbackasm1(SB) + MOVD $686, R12 + B callbackasm1(SB) + MOVD $687, R12 + B callbackasm1(SB) + MOVD $688, R12 + B callbackasm1(SB) + MOVD $689, R12 + B callbackasm1(SB) + MOVD $690, R12 + B callbackasm1(SB) + MOVD $691, R12 + B callbackasm1(SB) + MOVD $692, R12 + B callbackasm1(SB) + MOVD $693, R12 + B callbackasm1(SB) + MOVD $694, R12 + B callbackasm1(SB) + MOVD $695, R12 + B callbackasm1(SB) + MOVD $696, R12 + B callbackasm1(SB) + MOVD $697, R12 + B callbackasm1(SB) + MOVD $698, R12 + B callbackasm1(SB) + MOVD $699, R12 + B callbackasm1(SB) + MOVD $700, R12 + B callbackasm1(SB) + MOVD $701, R12 + B callbackasm1(SB) + MOVD $702, R12 + B callbackasm1(SB) + MOVD $703, R12 + B callbackasm1(SB) + MOVD $704, R12 + B callbackasm1(SB) + MOVD $705, R12 + B callbackasm1(SB) + MOVD $706, R12 + B callbackasm1(SB) + MOVD $707, R12 + B callbackasm1(SB) + MOVD $708, R12 + B callbackasm1(SB) + MOVD $709, R12 + B callbackasm1(SB) + MOVD $710, R12 + B callbackasm1(SB) + MOVD $711, R12 + B callbackasm1(SB) + MOVD $712, R12 + B callbackasm1(SB) + MOVD $713, R12 + B callbackasm1(SB) + MOVD $714, R12 + B callbackasm1(SB) + MOVD $715, R12 + B callbackasm1(SB) + MOVD $716, R12 + B callbackasm1(SB) + MOVD $717, R12 + B callbackasm1(SB) + MOVD $718, R12 + B callbackasm1(SB) + MOVD $719, R12 + B callbackasm1(SB) + MOVD $720, R12 + B callbackasm1(SB) + MOVD $721, R12 + B callbackasm1(SB) + MOVD $722, R12 + B callbackasm1(SB) + MOVD $723, R12 + B callbackasm1(SB) + MOVD $724, R12 + B callbackasm1(SB) + MOVD $725, R12 + B callbackasm1(SB) + MOVD $726, R12 + B callbackasm1(SB) + MOVD $727, R12 + B callbackasm1(SB) + MOVD $728, R12 + B callbackasm1(SB) + MOVD $729, R12 + B callbackasm1(SB) + MOVD $730, R12 + B callbackasm1(SB) + MOVD $731, R12 + B callbackasm1(SB) + MOVD $732, R12 + B callbackasm1(SB) + MOVD $733, R12 + B callbackasm1(SB) + MOVD $734, R12 + B callbackasm1(SB) + MOVD $735, R12 + B callbackasm1(SB) + MOVD $736, R12 + B callbackasm1(SB) + MOVD $737, R12 + B callbackasm1(SB) + MOVD $738, R12 + B callbackasm1(SB) + MOVD $739, R12 + B callbackasm1(SB) + MOVD $740, R12 + B callbackasm1(SB) + MOVD $741, R12 + B callbackasm1(SB) + MOVD $742, R12 + B callbackasm1(SB) + MOVD $743, R12 + B callbackasm1(SB) + MOVD $744, R12 + B callbackasm1(SB) + MOVD $745, R12 + B callbackasm1(SB) + MOVD $746, R12 + B callbackasm1(SB) + MOVD $747, R12 + B callbackasm1(SB) + MOVD $748, R12 + B callbackasm1(SB) + MOVD $749, R12 + B callbackasm1(SB) + MOVD $750, R12 + B callbackasm1(SB) + MOVD $751, R12 + B callbackasm1(SB) + MOVD $752, R12 + B callbackasm1(SB) + MOVD $753, R12 + B callbackasm1(SB) + MOVD $754, R12 + B callbackasm1(SB) + MOVD $755, R12 + B callbackasm1(SB) + MOVD $756, R12 + B callbackasm1(SB) + MOVD $757, R12 + B callbackasm1(SB) + MOVD $758, R12 + B callbackasm1(SB) + MOVD $759, R12 + B callbackasm1(SB) + MOVD $760, R12 + B callbackasm1(SB) + MOVD $761, R12 + B callbackasm1(SB) + MOVD $762, R12 + B callbackasm1(SB) + MOVD $763, R12 + B callbackasm1(SB) + MOVD $764, R12 + B callbackasm1(SB) + MOVD $765, R12 + B callbackasm1(SB) + MOVD $766, R12 + B callbackasm1(SB) + MOVD $767, R12 + B callbackasm1(SB) + MOVD $768, R12 + B callbackasm1(SB) + MOVD $769, R12 + B callbackasm1(SB) + MOVD $770, R12 + B callbackasm1(SB) + MOVD $771, R12 + B callbackasm1(SB) + MOVD $772, R12 + B callbackasm1(SB) + MOVD $773, R12 + B callbackasm1(SB) + MOVD $774, R12 + B callbackasm1(SB) + MOVD $775, R12 + B callbackasm1(SB) + MOVD $776, R12 + B callbackasm1(SB) + MOVD $777, R12 + B callbackasm1(SB) + MOVD $778, R12 + B callbackasm1(SB) + MOVD $779, R12 + B callbackasm1(SB) + MOVD $780, R12 + B callbackasm1(SB) + MOVD $781, R12 + B callbackasm1(SB) + MOVD $782, R12 + B callbackasm1(SB) + MOVD $783, R12 + B callbackasm1(SB) + MOVD $784, R12 + B callbackasm1(SB) + MOVD $785, R12 + B callbackasm1(SB) + MOVD $786, R12 + B callbackasm1(SB) + MOVD $787, R12 + B callbackasm1(SB) + MOVD $788, R12 + B callbackasm1(SB) + MOVD $789, R12 + B callbackasm1(SB) + MOVD $790, R12 + B callbackasm1(SB) + MOVD $791, R12 + B callbackasm1(SB) + MOVD $792, R12 + B callbackasm1(SB) + MOVD $793, R12 + B callbackasm1(SB) + MOVD $794, R12 + B callbackasm1(SB) + MOVD $795, R12 + B callbackasm1(SB) + MOVD $796, R12 + B callbackasm1(SB) + MOVD $797, R12 + B callbackasm1(SB) + MOVD $798, R12 + B callbackasm1(SB) + MOVD $799, R12 + B callbackasm1(SB) + MOVD $800, R12 + B callbackasm1(SB) + MOVD $801, R12 + B callbackasm1(SB) + MOVD $802, R12 + B callbackasm1(SB) + MOVD $803, R12 + B callbackasm1(SB) + MOVD $804, R12 + B callbackasm1(SB) + MOVD $805, R12 + B callbackasm1(SB) + MOVD $806, R12 + B callbackasm1(SB) + MOVD $807, R12 + B callbackasm1(SB) + MOVD $808, R12 + B callbackasm1(SB) + MOVD $809, R12 + B callbackasm1(SB) + MOVD $810, R12 + B callbackasm1(SB) + MOVD $811, R12 + B callbackasm1(SB) + MOVD $812, R12 + B callbackasm1(SB) + MOVD $813, R12 + B callbackasm1(SB) + MOVD $814, R12 + B callbackasm1(SB) + MOVD $815, R12 + B callbackasm1(SB) + MOVD $816, R12 + B callbackasm1(SB) + MOVD $817, R12 + B callbackasm1(SB) + MOVD $818, R12 + B callbackasm1(SB) + MOVD $819, R12 + B callbackasm1(SB) + MOVD $820, R12 + B callbackasm1(SB) + MOVD $821, R12 + B callbackasm1(SB) + MOVD $822, R12 + B callbackasm1(SB) + MOVD $823, R12 + B callbackasm1(SB) + MOVD $824, R12 + B callbackasm1(SB) + MOVD $825, R12 + B callbackasm1(SB) + MOVD $826, R12 + B callbackasm1(SB) + MOVD $827, R12 + B callbackasm1(SB) + MOVD $828, R12 + B callbackasm1(SB) + MOVD $829, R12 + B callbackasm1(SB) + MOVD $830, R12 + B callbackasm1(SB) + MOVD $831, R12 + B callbackasm1(SB) + MOVD $832, R12 + B callbackasm1(SB) + MOVD $833, R12 + B callbackasm1(SB) + MOVD $834, R12 + B callbackasm1(SB) + MOVD $835, R12 + B callbackasm1(SB) + MOVD $836, R12 + B callbackasm1(SB) + MOVD $837, R12 + B callbackasm1(SB) + MOVD $838, R12 + B callbackasm1(SB) + MOVD $839, R12 + B callbackasm1(SB) + MOVD $840, R12 + B callbackasm1(SB) + MOVD $841, R12 + B callbackasm1(SB) + MOVD $842, R12 + B callbackasm1(SB) + MOVD $843, R12 + B callbackasm1(SB) + MOVD $844, R12 + B callbackasm1(SB) + MOVD $845, R12 + B callbackasm1(SB) + MOVD $846, R12 + B callbackasm1(SB) + MOVD $847, R12 + B callbackasm1(SB) + MOVD $848, R12 + B callbackasm1(SB) + MOVD $849, R12 + B callbackasm1(SB) + MOVD $850, R12 + B callbackasm1(SB) + MOVD $851, R12 + B callbackasm1(SB) + MOVD $852, R12 + B callbackasm1(SB) + MOVD $853, R12 + B callbackasm1(SB) + MOVD $854, R12 + B callbackasm1(SB) + MOVD $855, R12 + B callbackasm1(SB) + MOVD $856, R12 + B callbackasm1(SB) + MOVD $857, R12 + B callbackasm1(SB) + MOVD $858, R12 + B callbackasm1(SB) + MOVD $859, R12 + B callbackasm1(SB) + MOVD $860, R12 + B callbackasm1(SB) + MOVD $861, R12 + B callbackasm1(SB) + MOVD $862, R12 + B callbackasm1(SB) + MOVD $863, R12 + B callbackasm1(SB) + MOVD $864, R12 + B callbackasm1(SB) + MOVD $865, R12 + B callbackasm1(SB) + MOVD $866, R12 + B callbackasm1(SB) + MOVD $867, R12 + B callbackasm1(SB) + MOVD $868, R12 + B callbackasm1(SB) + MOVD $869, R12 + B callbackasm1(SB) + MOVD $870, R12 + B callbackasm1(SB) + MOVD $871, R12 + B callbackasm1(SB) + MOVD $872, R12 + B callbackasm1(SB) + MOVD $873, R12 + B callbackasm1(SB) + MOVD $874, R12 + B callbackasm1(SB) + MOVD $875, R12 + B callbackasm1(SB) + MOVD $876, R12 + B callbackasm1(SB) + MOVD $877, R12 + B callbackasm1(SB) + MOVD $878, R12 + B callbackasm1(SB) + MOVD $879, R12 + B callbackasm1(SB) + MOVD $880, R12 + B callbackasm1(SB) + MOVD $881, R12 + B callbackasm1(SB) + MOVD $882, R12 + B callbackasm1(SB) + MOVD $883, R12 + B callbackasm1(SB) + MOVD $884, R12 + B callbackasm1(SB) + MOVD $885, R12 + B callbackasm1(SB) + MOVD $886, R12 + B callbackasm1(SB) + MOVD $887, R12 + B callbackasm1(SB) + MOVD $888, R12 + B callbackasm1(SB) + MOVD $889, R12 + B callbackasm1(SB) + MOVD $890, R12 + B callbackasm1(SB) + MOVD $891, R12 + B callbackasm1(SB) + MOVD $892, R12 + B callbackasm1(SB) + MOVD $893, R12 + B callbackasm1(SB) + MOVD $894, R12 + B callbackasm1(SB) + MOVD $895, R12 + B callbackasm1(SB) + MOVD $896, R12 + B callbackasm1(SB) + MOVD $897, R12 + B callbackasm1(SB) + MOVD $898, R12 + B callbackasm1(SB) + MOVD $899, R12 + B callbackasm1(SB) + MOVD $900, R12 + B callbackasm1(SB) + MOVD $901, R12 + B callbackasm1(SB) + MOVD $902, R12 + B callbackasm1(SB) + MOVD $903, R12 + B callbackasm1(SB) + MOVD $904, R12 + B callbackasm1(SB) + MOVD $905, R12 + B callbackasm1(SB) + MOVD $906, R12 + B callbackasm1(SB) + MOVD $907, R12 + B callbackasm1(SB) + MOVD $908, R12 + B callbackasm1(SB) + MOVD $909, R12 + B callbackasm1(SB) + MOVD $910, R12 + B callbackasm1(SB) + MOVD $911, R12 + B callbackasm1(SB) + MOVD $912, R12 + B callbackasm1(SB) + MOVD $913, R12 + B callbackasm1(SB) + MOVD $914, R12 + B callbackasm1(SB) + MOVD $915, R12 + B callbackasm1(SB) + MOVD $916, R12 + B callbackasm1(SB) + MOVD $917, R12 + B callbackasm1(SB) + MOVD $918, R12 + B callbackasm1(SB) + MOVD $919, R12 + B callbackasm1(SB) + MOVD $920, R12 + B callbackasm1(SB) + MOVD $921, R12 + B callbackasm1(SB) + MOVD $922, R12 + B callbackasm1(SB) + MOVD $923, R12 + B callbackasm1(SB) + MOVD $924, R12 + B callbackasm1(SB) + MOVD $925, R12 + B callbackasm1(SB) + MOVD $926, R12 + B callbackasm1(SB) + MOVD $927, R12 + B callbackasm1(SB) + MOVD $928, R12 + B callbackasm1(SB) + MOVD $929, R12 + B callbackasm1(SB) + MOVD $930, R12 + B callbackasm1(SB) + MOVD $931, R12 + B callbackasm1(SB) + MOVD $932, R12 + B callbackasm1(SB) + MOVD $933, R12 + B callbackasm1(SB) + MOVD $934, R12 + B callbackasm1(SB) + MOVD $935, R12 + B callbackasm1(SB) + MOVD $936, R12 + B callbackasm1(SB) + MOVD $937, R12 + B callbackasm1(SB) + MOVD $938, R12 + B callbackasm1(SB) + MOVD $939, R12 + B callbackasm1(SB) + MOVD $940, R12 + B callbackasm1(SB) + MOVD $941, R12 + B callbackasm1(SB) + MOVD $942, R12 + B callbackasm1(SB) + MOVD $943, R12 + B callbackasm1(SB) + MOVD $944, R12 + B callbackasm1(SB) + MOVD $945, R12 + B callbackasm1(SB) + MOVD $946, R12 + B callbackasm1(SB) + MOVD $947, R12 + B callbackasm1(SB) + MOVD $948, R12 + B callbackasm1(SB) + MOVD $949, R12 + B callbackasm1(SB) + MOVD $950, R12 + B callbackasm1(SB) + MOVD $951, R12 + B callbackasm1(SB) + MOVD $952, R12 + B callbackasm1(SB) + MOVD $953, R12 + B callbackasm1(SB) + MOVD $954, R12 + B callbackasm1(SB) + MOVD $955, R12 + B callbackasm1(SB) + MOVD $956, R12 + B callbackasm1(SB) + MOVD $957, R12 + B callbackasm1(SB) + MOVD $958, R12 + B callbackasm1(SB) + MOVD $959, R12 + B callbackasm1(SB) + MOVD $960, R12 + B callbackasm1(SB) + MOVD $961, R12 + B callbackasm1(SB) + MOVD $962, R12 + B callbackasm1(SB) + MOVD $963, R12 + B callbackasm1(SB) + MOVD $964, R12 + B callbackasm1(SB) + MOVD $965, R12 + B callbackasm1(SB) + MOVD $966, R12 + B callbackasm1(SB) + MOVD $967, R12 + B callbackasm1(SB) + MOVD $968, R12 + B callbackasm1(SB) + MOVD $969, R12 + B callbackasm1(SB) + MOVD $970, R12 + B callbackasm1(SB) + MOVD $971, R12 + B callbackasm1(SB) + MOVD $972, R12 + B callbackasm1(SB) + MOVD $973, R12 + B callbackasm1(SB) + MOVD $974, R12 + B callbackasm1(SB) + MOVD $975, R12 + B callbackasm1(SB) + MOVD $976, R12 + B callbackasm1(SB) + MOVD $977, R12 + B callbackasm1(SB) + MOVD $978, R12 + B callbackasm1(SB) + MOVD $979, R12 + B callbackasm1(SB) + MOVD $980, R12 + B callbackasm1(SB) + MOVD $981, R12 + B callbackasm1(SB) + MOVD $982, R12 + B callbackasm1(SB) + MOVD $983, R12 + B callbackasm1(SB) + MOVD $984, R12 + B callbackasm1(SB) + MOVD $985, R12 + B callbackasm1(SB) + MOVD $986, R12 + B callbackasm1(SB) + MOVD $987, R12 + B callbackasm1(SB) + MOVD $988, R12 + B callbackasm1(SB) + MOVD $989, R12 + B callbackasm1(SB) + MOVD $990, R12 + B callbackasm1(SB) + MOVD $991, R12 + B callbackasm1(SB) + MOVD $992, R12 + B callbackasm1(SB) + MOVD $993, R12 + B callbackasm1(SB) + MOVD $994, R12 + B callbackasm1(SB) + MOVD $995, R12 + B callbackasm1(SB) + MOVD $996, R12 + B callbackasm1(SB) + MOVD $997, R12 + B callbackasm1(SB) + MOVD $998, R12 + B callbackasm1(SB) + MOVD $999, R12 + B callbackasm1(SB) + MOVD $1000, R12 + B callbackasm1(SB) + MOVD $1001, R12 + B callbackasm1(SB) + MOVD $1002, R12 + B callbackasm1(SB) + MOVD $1003, R12 + B callbackasm1(SB) + MOVD $1004, R12 + B callbackasm1(SB) + MOVD $1005, R12 + B callbackasm1(SB) + MOVD $1006, R12 + B callbackasm1(SB) + MOVD $1007, R12 + B callbackasm1(SB) + MOVD $1008, R12 + B callbackasm1(SB) + MOVD $1009, R12 + B callbackasm1(SB) + MOVD $1010, R12 + B callbackasm1(SB) + MOVD $1011, R12 + B callbackasm1(SB) + MOVD $1012, R12 + B callbackasm1(SB) + MOVD $1013, R12 + B callbackasm1(SB) + MOVD $1014, R12 + B callbackasm1(SB) + MOVD $1015, R12 + B callbackasm1(SB) + MOVD $1016, R12 + B callbackasm1(SB) + MOVD $1017, R12 + B callbackasm1(SB) + MOVD $1018, R12 + B callbackasm1(SB) + MOVD $1019, R12 + B callbackasm1(SB) + MOVD $1020, R12 + B callbackasm1(SB) + MOVD $1021, R12 + B callbackasm1(SB) + MOVD $1022, R12 + B callbackasm1(SB) + MOVD $1023, R12 + B callbackasm1(SB) + MOVD $1024, R12 + B callbackasm1(SB) + MOVD $1025, R12 + B callbackasm1(SB) + MOVD $1026, R12 + B callbackasm1(SB) + MOVD $1027, R12 + B callbackasm1(SB) + MOVD $1028, R12 + B callbackasm1(SB) + MOVD $1029, R12 + B callbackasm1(SB) + MOVD $1030, R12 + B callbackasm1(SB) + MOVD $1031, R12 + B callbackasm1(SB) + MOVD $1032, R12 + B callbackasm1(SB) + MOVD $1033, R12 + B callbackasm1(SB) + MOVD $1034, R12 + B callbackasm1(SB) + MOVD $1035, R12 + B callbackasm1(SB) + MOVD $1036, R12 + B callbackasm1(SB) + MOVD $1037, R12 + B callbackasm1(SB) + MOVD $1038, R12 + B callbackasm1(SB) + MOVD $1039, R12 + B callbackasm1(SB) + MOVD $1040, R12 + B callbackasm1(SB) + MOVD $1041, R12 + B callbackasm1(SB) + MOVD $1042, R12 + B callbackasm1(SB) + MOVD $1043, R12 + B callbackasm1(SB) + MOVD $1044, R12 + B callbackasm1(SB) + MOVD $1045, R12 + B callbackasm1(SB) + MOVD $1046, R12 + B callbackasm1(SB) + MOVD $1047, R12 + B callbackasm1(SB) + MOVD $1048, R12 + B callbackasm1(SB) + MOVD $1049, R12 + B callbackasm1(SB) + MOVD $1050, R12 + B callbackasm1(SB) + MOVD $1051, R12 + B callbackasm1(SB) + MOVD $1052, R12 + B callbackasm1(SB) + MOVD $1053, R12 + B callbackasm1(SB) + MOVD $1054, R12 + B callbackasm1(SB) + MOVD $1055, R12 + B callbackasm1(SB) + MOVD $1056, R12 + B callbackasm1(SB) + MOVD $1057, R12 + B callbackasm1(SB) + MOVD $1058, R12 + B callbackasm1(SB) + MOVD $1059, R12 + B callbackasm1(SB) + MOVD $1060, R12 + B callbackasm1(SB) + MOVD $1061, R12 + B callbackasm1(SB) + MOVD $1062, R12 + B callbackasm1(SB) + MOVD $1063, R12 + B callbackasm1(SB) + MOVD $1064, R12 + B callbackasm1(SB) + MOVD $1065, R12 + B callbackasm1(SB) + MOVD $1066, R12 + B callbackasm1(SB) + MOVD $1067, R12 + B callbackasm1(SB) + MOVD $1068, R12 + B callbackasm1(SB) + MOVD $1069, R12 + B callbackasm1(SB) + MOVD $1070, R12 + B callbackasm1(SB) + MOVD $1071, R12 + B callbackasm1(SB) + MOVD $1072, R12 + B callbackasm1(SB) + MOVD $1073, R12 + B callbackasm1(SB) + MOVD $1074, R12 + B callbackasm1(SB) + MOVD $1075, R12 + B callbackasm1(SB) + MOVD $1076, R12 + B callbackasm1(SB) + MOVD $1077, R12 + B callbackasm1(SB) + MOVD $1078, R12 + B callbackasm1(SB) + MOVD $1079, R12 + B callbackasm1(SB) + MOVD $1080, R12 + B callbackasm1(SB) + MOVD $1081, R12 + B callbackasm1(SB) + MOVD $1082, R12 + B callbackasm1(SB) + MOVD $1083, R12 + B callbackasm1(SB) + MOVD $1084, R12 + B callbackasm1(SB) + MOVD $1085, R12 + B callbackasm1(SB) + MOVD $1086, R12 + B callbackasm1(SB) + MOVD $1087, R12 + B callbackasm1(SB) + MOVD $1088, R12 + B callbackasm1(SB) + MOVD $1089, R12 + B callbackasm1(SB) + MOVD $1090, R12 + B callbackasm1(SB) + MOVD $1091, R12 + B callbackasm1(SB) + MOVD $1092, R12 + B callbackasm1(SB) + MOVD $1093, R12 + B callbackasm1(SB) + MOVD $1094, R12 + B callbackasm1(SB) + MOVD $1095, R12 + B callbackasm1(SB) + MOVD $1096, R12 + B callbackasm1(SB) + MOVD $1097, R12 + B callbackasm1(SB) + MOVD $1098, R12 + B callbackasm1(SB) + MOVD $1099, R12 + B callbackasm1(SB) + MOVD $1100, R12 + B callbackasm1(SB) + MOVD $1101, R12 + B callbackasm1(SB) + MOVD $1102, R12 + B callbackasm1(SB) + MOVD $1103, R12 + B callbackasm1(SB) + MOVD $1104, R12 + B callbackasm1(SB) + MOVD $1105, R12 + B callbackasm1(SB) + MOVD $1106, R12 + B callbackasm1(SB) + MOVD $1107, R12 + B callbackasm1(SB) + MOVD $1108, R12 + B callbackasm1(SB) + MOVD $1109, R12 + B callbackasm1(SB) + MOVD $1110, R12 + B callbackasm1(SB) + MOVD $1111, R12 + B callbackasm1(SB) + MOVD $1112, R12 + B callbackasm1(SB) + MOVD $1113, R12 + B callbackasm1(SB) + MOVD $1114, R12 + B callbackasm1(SB) + MOVD $1115, R12 + B callbackasm1(SB) + MOVD $1116, R12 + B callbackasm1(SB) + MOVD $1117, R12 + B callbackasm1(SB) + MOVD $1118, R12 + B callbackasm1(SB) + MOVD $1119, R12 + B callbackasm1(SB) + MOVD $1120, R12 + B callbackasm1(SB) + MOVD $1121, R12 + B callbackasm1(SB) + MOVD $1122, R12 + B callbackasm1(SB) + MOVD $1123, R12 + B callbackasm1(SB) + MOVD $1124, R12 + B callbackasm1(SB) + MOVD $1125, R12 + B callbackasm1(SB) + MOVD $1126, R12 + B callbackasm1(SB) + MOVD $1127, R12 + B callbackasm1(SB) + MOVD $1128, R12 + B callbackasm1(SB) + MOVD $1129, R12 + B callbackasm1(SB) + MOVD $1130, R12 + B callbackasm1(SB) + MOVD $1131, R12 + B callbackasm1(SB) + MOVD $1132, R12 + B callbackasm1(SB) + MOVD $1133, R12 + B callbackasm1(SB) + MOVD $1134, R12 + B callbackasm1(SB) + MOVD $1135, R12 + B callbackasm1(SB) + MOVD $1136, R12 + B callbackasm1(SB) + MOVD $1137, R12 + B callbackasm1(SB) + MOVD $1138, R12 + B callbackasm1(SB) + MOVD $1139, R12 + B callbackasm1(SB) + MOVD $1140, R12 + B callbackasm1(SB) + MOVD $1141, R12 + B callbackasm1(SB) + MOVD $1142, R12 + B callbackasm1(SB) + MOVD $1143, R12 + B callbackasm1(SB) + MOVD $1144, R12 + B callbackasm1(SB) + MOVD $1145, R12 + B callbackasm1(SB) + MOVD $1146, R12 + B callbackasm1(SB) + MOVD $1147, R12 + B callbackasm1(SB) + MOVD $1148, R12 + B callbackasm1(SB) + MOVD $1149, R12 + B callbackasm1(SB) + MOVD $1150, R12 + B callbackasm1(SB) + MOVD $1151, R12 + B callbackasm1(SB) + MOVD $1152, R12 + B callbackasm1(SB) + MOVD $1153, R12 + B callbackasm1(SB) + MOVD $1154, R12 + B callbackasm1(SB) + MOVD $1155, R12 + B callbackasm1(SB) + MOVD $1156, R12 + B callbackasm1(SB) + MOVD $1157, R12 + B callbackasm1(SB) + MOVD $1158, R12 + B callbackasm1(SB) + MOVD $1159, R12 + B callbackasm1(SB) + MOVD $1160, R12 + B callbackasm1(SB) + MOVD $1161, R12 + B callbackasm1(SB) + MOVD $1162, R12 + B callbackasm1(SB) + MOVD $1163, R12 + B callbackasm1(SB) + MOVD $1164, R12 + B callbackasm1(SB) + MOVD $1165, R12 + B callbackasm1(SB) + MOVD $1166, R12 + B callbackasm1(SB) + MOVD $1167, R12 + B callbackasm1(SB) + MOVD $1168, R12 + B callbackasm1(SB) + MOVD $1169, R12 + B callbackasm1(SB) + MOVD $1170, R12 + B callbackasm1(SB) + MOVD $1171, R12 + B callbackasm1(SB) + MOVD $1172, R12 + B callbackasm1(SB) + MOVD $1173, R12 + B callbackasm1(SB) + MOVD $1174, R12 + B callbackasm1(SB) + MOVD $1175, R12 + B callbackasm1(SB) + MOVD $1176, R12 + B callbackasm1(SB) + MOVD $1177, R12 + B callbackasm1(SB) + MOVD $1178, R12 + B callbackasm1(SB) + MOVD $1179, R12 + B callbackasm1(SB) + MOVD $1180, R12 + B callbackasm1(SB) + MOVD $1181, R12 + B callbackasm1(SB) + MOVD $1182, R12 + B callbackasm1(SB) + MOVD $1183, R12 + B callbackasm1(SB) + MOVD $1184, R12 + B callbackasm1(SB) + MOVD $1185, R12 + B callbackasm1(SB) + MOVD $1186, R12 + B callbackasm1(SB) + MOVD $1187, R12 + B callbackasm1(SB) + MOVD $1188, R12 + B callbackasm1(SB) + MOVD $1189, R12 + B callbackasm1(SB) + MOVD $1190, R12 + B callbackasm1(SB) + MOVD $1191, R12 + B callbackasm1(SB) + MOVD $1192, R12 + B callbackasm1(SB) + MOVD $1193, R12 + B callbackasm1(SB) + MOVD $1194, R12 + B callbackasm1(SB) + MOVD $1195, R12 + B callbackasm1(SB) + MOVD $1196, R12 + B callbackasm1(SB) + MOVD $1197, R12 + B callbackasm1(SB) + MOVD $1198, R12 + B callbackasm1(SB) + MOVD $1199, R12 + B callbackasm1(SB) + MOVD $1200, R12 + B callbackasm1(SB) + MOVD $1201, R12 + B callbackasm1(SB) + MOVD $1202, R12 + B callbackasm1(SB) + MOVD $1203, R12 + B callbackasm1(SB) + MOVD $1204, R12 + B callbackasm1(SB) + MOVD $1205, R12 + B callbackasm1(SB) + MOVD $1206, R12 + B callbackasm1(SB) + MOVD $1207, R12 + B callbackasm1(SB) + MOVD $1208, R12 + B callbackasm1(SB) + MOVD $1209, R12 + B callbackasm1(SB) + MOVD $1210, R12 + B callbackasm1(SB) + MOVD $1211, R12 + B callbackasm1(SB) + MOVD $1212, R12 + B callbackasm1(SB) + MOVD $1213, R12 + B callbackasm1(SB) + MOVD $1214, R12 + B callbackasm1(SB) + MOVD $1215, R12 + B callbackasm1(SB) + MOVD $1216, R12 + B callbackasm1(SB) + MOVD $1217, R12 + B callbackasm1(SB) + MOVD $1218, R12 + B callbackasm1(SB) + MOVD $1219, R12 + B callbackasm1(SB) + MOVD $1220, R12 + B callbackasm1(SB) + MOVD $1221, R12 + B callbackasm1(SB) + MOVD $1222, R12 + B callbackasm1(SB) + MOVD $1223, R12 + B callbackasm1(SB) + MOVD $1224, R12 + B callbackasm1(SB) + MOVD $1225, R12 + B callbackasm1(SB) + MOVD $1226, R12 + B callbackasm1(SB) + MOVD $1227, R12 + B callbackasm1(SB) + MOVD $1228, R12 + B callbackasm1(SB) + MOVD $1229, R12 + B callbackasm1(SB) + MOVD $1230, R12 + B callbackasm1(SB) + MOVD $1231, R12 + B callbackasm1(SB) + MOVD $1232, R12 + B callbackasm1(SB) + MOVD $1233, R12 + B callbackasm1(SB) + MOVD $1234, R12 + B callbackasm1(SB) + MOVD $1235, R12 + B callbackasm1(SB) + MOVD $1236, R12 + B callbackasm1(SB) + MOVD $1237, R12 + B callbackasm1(SB) + MOVD $1238, R12 + B callbackasm1(SB) + MOVD $1239, R12 + B callbackasm1(SB) + MOVD $1240, R12 + B callbackasm1(SB) + MOVD $1241, R12 + B callbackasm1(SB) + MOVD $1242, R12 + B callbackasm1(SB) + MOVD $1243, R12 + B callbackasm1(SB) + MOVD $1244, R12 + B callbackasm1(SB) + MOVD $1245, R12 + B callbackasm1(SB) + MOVD $1246, R12 + B callbackasm1(SB) + MOVD $1247, R12 + B callbackasm1(SB) + MOVD $1248, R12 + B callbackasm1(SB) + MOVD $1249, R12 + B callbackasm1(SB) + MOVD $1250, R12 + B callbackasm1(SB) + MOVD $1251, R12 + B callbackasm1(SB) + MOVD $1252, R12 + B callbackasm1(SB) + MOVD $1253, R12 + B callbackasm1(SB) + MOVD $1254, R12 + B callbackasm1(SB) + MOVD $1255, R12 + B callbackasm1(SB) + MOVD $1256, R12 + B callbackasm1(SB) + MOVD $1257, R12 + B callbackasm1(SB) + MOVD $1258, R12 + B callbackasm1(SB) + MOVD $1259, R12 + B callbackasm1(SB) + MOVD $1260, R12 + B callbackasm1(SB) + MOVD $1261, R12 + B callbackasm1(SB) + MOVD $1262, R12 + B callbackasm1(SB) + MOVD $1263, R12 + B callbackasm1(SB) + MOVD $1264, R12 + B callbackasm1(SB) + MOVD $1265, R12 + B callbackasm1(SB) + MOVD $1266, R12 + B callbackasm1(SB) + MOVD $1267, R12 + B callbackasm1(SB) + MOVD $1268, R12 + B callbackasm1(SB) + MOVD $1269, R12 + B callbackasm1(SB) + MOVD $1270, R12 + B callbackasm1(SB) + MOVD $1271, R12 + B callbackasm1(SB) + MOVD $1272, R12 + B callbackasm1(SB) + MOVD $1273, R12 + B callbackasm1(SB) + MOVD $1274, R12 + B callbackasm1(SB) + MOVD $1275, R12 + B callbackasm1(SB) + MOVD $1276, R12 + B callbackasm1(SB) + MOVD $1277, R12 + B callbackasm1(SB) + MOVD $1278, R12 + B callbackasm1(SB) + MOVD $1279, R12 + B callbackasm1(SB) + MOVD $1280, R12 + B callbackasm1(SB) + MOVD $1281, R12 + B callbackasm1(SB) + MOVD $1282, R12 + B callbackasm1(SB) + MOVD $1283, R12 + B callbackasm1(SB) + MOVD $1284, R12 + B callbackasm1(SB) + MOVD $1285, R12 + B callbackasm1(SB) + MOVD $1286, R12 + B callbackasm1(SB) + MOVD $1287, R12 + B callbackasm1(SB) + MOVD $1288, R12 + B callbackasm1(SB) + MOVD $1289, R12 + B callbackasm1(SB) + MOVD $1290, R12 + B callbackasm1(SB) + MOVD $1291, R12 + B callbackasm1(SB) + MOVD $1292, R12 + B callbackasm1(SB) + MOVD $1293, R12 + B callbackasm1(SB) + MOVD $1294, R12 + B callbackasm1(SB) + MOVD $1295, R12 + B callbackasm1(SB) + MOVD $1296, R12 + B callbackasm1(SB) + MOVD $1297, R12 + B callbackasm1(SB) + MOVD $1298, R12 + B callbackasm1(SB) + MOVD $1299, R12 + B callbackasm1(SB) + MOVD $1300, R12 + B callbackasm1(SB) + MOVD $1301, R12 + B callbackasm1(SB) + MOVD $1302, R12 + B callbackasm1(SB) + MOVD $1303, R12 + B callbackasm1(SB) + MOVD $1304, R12 + B callbackasm1(SB) + MOVD $1305, R12 + B callbackasm1(SB) + MOVD $1306, R12 + B callbackasm1(SB) + MOVD $1307, R12 + B callbackasm1(SB) + MOVD $1308, R12 + B callbackasm1(SB) + MOVD $1309, R12 + B callbackasm1(SB) + MOVD $1310, R12 + B callbackasm1(SB) + MOVD $1311, R12 + B callbackasm1(SB) + MOVD $1312, R12 + B callbackasm1(SB) + MOVD $1313, R12 + B callbackasm1(SB) + MOVD $1314, R12 + B callbackasm1(SB) + MOVD $1315, R12 + B callbackasm1(SB) + MOVD $1316, R12 + B callbackasm1(SB) + MOVD $1317, R12 + B callbackasm1(SB) + MOVD $1318, R12 + B callbackasm1(SB) + MOVD $1319, R12 + B callbackasm1(SB) + MOVD $1320, R12 + B callbackasm1(SB) + MOVD $1321, R12 + B callbackasm1(SB) + MOVD $1322, R12 + B callbackasm1(SB) + MOVD $1323, R12 + B callbackasm1(SB) + MOVD $1324, R12 + B callbackasm1(SB) + MOVD $1325, R12 + B callbackasm1(SB) + MOVD $1326, R12 + B callbackasm1(SB) + MOVD $1327, R12 + B callbackasm1(SB) + MOVD $1328, R12 + B callbackasm1(SB) + MOVD $1329, R12 + B callbackasm1(SB) + MOVD $1330, R12 + B callbackasm1(SB) + MOVD $1331, R12 + B callbackasm1(SB) + MOVD $1332, R12 + B callbackasm1(SB) + MOVD $1333, R12 + B callbackasm1(SB) + MOVD $1334, R12 + B callbackasm1(SB) + MOVD $1335, R12 + B callbackasm1(SB) + MOVD $1336, R12 + B callbackasm1(SB) + MOVD $1337, R12 + B callbackasm1(SB) + MOVD $1338, R12 + B callbackasm1(SB) + MOVD $1339, R12 + B callbackasm1(SB) + MOVD $1340, R12 + B callbackasm1(SB) + MOVD $1341, R12 + B callbackasm1(SB) + MOVD $1342, R12 + B callbackasm1(SB) + MOVD $1343, R12 + B callbackasm1(SB) + MOVD $1344, R12 + B callbackasm1(SB) + MOVD $1345, R12 + B callbackasm1(SB) + MOVD $1346, R12 + B callbackasm1(SB) + MOVD $1347, R12 + B callbackasm1(SB) + MOVD $1348, R12 + B callbackasm1(SB) + MOVD $1349, R12 + B callbackasm1(SB) + MOVD $1350, R12 + B callbackasm1(SB) + MOVD $1351, R12 + B callbackasm1(SB) + MOVD $1352, R12 + B callbackasm1(SB) + MOVD $1353, R12 + B callbackasm1(SB) + MOVD $1354, R12 + B callbackasm1(SB) + MOVD $1355, R12 + B callbackasm1(SB) + MOVD $1356, R12 + B callbackasm1(SB) + MOVD $1357, R12 + B callbackasm1(SB) + MOVD $1358, R12 + B callbackasm1(SB) + MOVD $1359, R12 + B callbackasm1(SB) + MOVD $1360, R12 + B callbackasm1(SB) + MOVD $1361, R12 + B callbackasm1(SB) + MOVD $1362, R12 + B callbackasm1(SB) + MOVD $1363, R12 + B callbackasm1(SB) + MOVD $1364, R12 + B callbackasm1(SB) + MOVD $1365, R12 + B callbackasm1(SB) + MOVD $1366, R12 + B callbackasm1(SB) + MOVD $1367, R12 + B callbackasm1(SB) + MOVD $1368, R12 + B callbackasm1(SB) + MOVD $1369, R12 + B callbackasm1(SB) + MOVD $1370, R12 + B callbackasm1(SB) + MOVD $1371, R12 + B callbackasm1(SB) + MOVD $1372, R12 + B callbackasm1(SB) + MOVD $1373, R12 + B callbackasm1(SB) + MOVD $1374, R12 + B callbackasm1(SB) + MOVD $1375, R12 + B callbackasm1(SB) + MOVD $1376, R12 + B callbackasm1(SB) + MOVD $1377, R12 + B callbackasm1(SB) + MOVD $1378, R12 + B callbackasm1(SB) + MOVD $1379, R12 + B callbackasm1(SB) + MOVD $1380, R12 + B callbackasm1(SB) + MOVD $1381, R12 + B callbackasm1(SB) + MOVD $1382, R12 + B callbackasm1(SB) + MOVD $1383, R12 + B callbackasm1(SB) + MOVD $1384, R12 + B callbackasm1(SB) + MOVD $1385, R12 + B callbackasm1(SB) + MOVD $1386, R12 + B callbackasm1(SB) + MOVD $1387, R12 + B callbackasm1(SB) + MOVD $1388, R12 + B callbackasm1(SB) + MOVD $1389, R12 + B callbackasm1(SB) + MOVD $1390, R12 + B callbackasm1(SB) + MOVD $1391, R12 + B callbackasm1(SB) + MOVD $1392, R12 + B callbackasm1(SB) + MOVD $1393, R12 + B callbackasm1(SB) + MOVD $1394, R12 + B callbackasm1(SB) + MOVD $1395, R12 + B callbackasm1(SB) + MOVD $1396, R12 + B callbackasm1(SB) + MOVD $1397, R12 + B callbackasm1(SB) + MOVD $1398, R12 + B callbackasm1(SB) + MOVD $1399, R12 + B callbackasm1(SB) + MOVD $1400, R12 + B callbackasm1(SB) + MOVD $1401, R12 + B callbackasm1(SB) + MOVD $1402, R12 + B callbackasm1(SB) + MOVD $1403, R12 + B callbackasm1(SB) + MOVD $1404, R12 + B callbackasm1(SB) + MOVD $1405, R12 + B callbackasm1(SB) + MOVD $1406, R12 + B callbackasm1(SB) + MOVD $1407, R12 + B callbackasm1(SB) + MOVD $1408, R12 + B callbackasm1(SB) + MOVD $1409, R12 + B callbackasm1(SB) + MOVD $1410, R12 + B callbackasm1(SB) + MOVD $1411, R12 + B callbackasm1(SB) + MOVD $1412, R12 + B callbackasm1(SB) + MOVD $1413, R12 + B callbackasm1(SB) + MOVD $1414, R12 + B callbackasm1(SB) + MOVD $1415, R12 + B callbackasm1(SB) + MOVD $1416, R12 + B callbackasm1(SB) + MOVD $1417, R12 + B callbackasm1(SB) + MOVD $1418, R12 + B callbackasm1(SB) + MOVD $1419, R12 + B callbackasm1(SB) + MOVD $1420, R12 + B callbackasm1(SB) + MOVD $1421, R12 + B callbackasm1(SB) + MOVD $1422, R12 + B callbackasm1(SB) + MOVD $1423, R12 + B callbackasm1(SB) + MOVD $1424, R12 + B callbackasm1(SB) + MOVD $1425, R12 + B callbackasm1(SB) + MOVD $1426, R12 + B callbackasm1(SB) + MOVD $1427, R12 + B callbackasm1(SB) + MOVD $1428, R12 + B callbackasm1(SB) + MOVD $1429, R12 + B callbackasm1(SB) + MOVD $1430, R12 + B callbackasm1(SB) + MOVD $1431, R12 + B callbackasm1(SB) + MOVD $1432, R12 + B callbackasm1(SB) + MOVD $1433, R12 + B callbackasm1(SB) + MOVD $1434, R12 + B callbackasm1(SB) + MOVD $1435, R12 + B callbackasm1(SB) + MOVD $1436, R12 + B callbackasm1(SB) + MOVD $1437, R12 + B callbackasm1(SB) + MOVD $1438, R12 + B callbackasm1(SB) + MOVD $1439, R12 + B callbackasm1(SB) + MOVD $1440, R12 + B callbackasm1(SB) + MOVD $1441, R12 + B callbackasm1(SB) + MOVD $1442, R12 + B callbackasm1(SB) + MOVD $1443, R12 + B callbackasm1(SB) + MOVD $1444, R12 + B callbackasm1(SB) + MOVD $1445, R12 + B callbackasm1(SB) + MOVD $1446, R12 + B callbackasm1(SB) + MOVD $1447, R12 + B callbackasm1(SB) + MOVD $1448, R12 + B callbackasm1(SB) + MOVD $1449, R12 + B callbackasm1(SB) + MOVD $1450, R12 + B callbackasm1(SB) + MOVD $1451, R12 + B callbackasm1(SB) + MOVD $1452, R12 + B callbackasm1(SB) + MOVD $1453, R12 + B callbackasm1(SB) + MOVD $1454, R12 + B callbackasm1(SB) + MOVD $1455, R12 + B callbackasm1(SB) + MOVD $1456, R12 + B callbackasm1(SB) + MOVD $1457, R12 + B callbackasm1(SB) + MOVD $1458, R12 + B callbackasm1(SB) + MOVD $1459, R12 + B callbackasm1(SB) + MOVD $1460, R12 + B callbackasm1(SB) + MOVD $1461, R12 + B callbackasm1(SB) + MOVD $1462, R12 + B callbackasm1(SB) + MOVD $1463, R12 + B callbackasm1(SB) + MOVD $1464, R12 + B callbackasm1(SB) + MOVD $1465, R12 + B callbackasm1(SB) + MOVD $1466, R12 + B callbackasm1(SB) + MOVD $1467, R12 + B callbackasm1(SB) + MOVD $1468, R12 + B callbackasm1(SB) + MOVD $1469, R12 + B callbackasm1(SB) + MOVD $1470, R12 + B callbackasm1(SB) + MOVD $1471, R12 + B callbackasm1(SB) + MOVD $1472, R12 + B callbackasm1(SB) + MOVD $1473, R12 + B callbackasm1(SB) + MOVD $1474, R12 + B callbackasm1(SB) + MOVD $1475, R12 + B callbackasm1(SB) + MOVD $1476, R12 + B callbackasm1(SB) + MOVD $1477, R12 + B callbackasm1(SB) + MOVD $1478, R12 + B callbackasm1(SB) + MOVD $1479, R12 + B callbackasm1(SB) + MOVD $1480, R12 + B callbackasm1(SB) + MOVD $1481, R12 + B callbackasm1(SB) + MOVD $1482, R12 + B callbackasm1(SB) + MOVD $1483, R12 + B callbackasm1(SB) + MOVD $1484, R12 + B callbackasm1(SB) + MOVD $1485, R12 + B callbackasm1(SB) + MOVD $1486, R12 + B callbackasm1(SB) + MOVD $1487, R12 + B callbackasm1(SB) + MOVD $1488, R12 + B callbackasm1(SB) + MOVD $1489, R12 + B callbackasm1(SB) + MOVD $1490, R12 + B callbackasm1(SB) + MOVD $1491, R12 + B callbackasm1(SB) + MOVD $1492, R12 + B callbackasm1(SB) + MOVD $1493, R12 + B callbackasm1(SB) + MOVD $1494, R12 + B callbackasm1(SB) + MOVD $1495, R12 + B callbackasm1(SB) + MOVD $1496, R12 + B callbackasm1(SB) + MOVD $1497, R12 + B callbackasm1(SB) + MOVD $1498, R12 + B callbackasm1(SB) + MOVD $1499, R12 + B callbackasm1(SB) + MOVD $1500, R12 + B callbackasm1(SB) + MOVD $1501, R12 + B callbackasm1(SB) + MOVD $1502, R12 + B callbackasm1(SB) + MOVD $1503, R12 + B callbackasm1(SB) + MOVD $1504, R12 + B callbackasm1(SB) + MOVD $1505, R12 + B callbackasm1(SB) + MOVD $1506, R12 + B callbackasm1(SB) + MOVD $1507, R12 + B callbackasm1(SB) + MOVD $1508, R12 + B callbackasm1(SB) + MOVD $1509, R12 + B callbackasm1(SB) + MOVD $1510, R12 + B callbackasm1(SB) + MOVD $1511, R12 + B callbackasm1(SB) + MOVD $1512, R12 + B callbackasm1(SB) + MOVD $1513, R12 + B callbackasm1(SB) + MOVD $1514, R12 + B callbackasm1(SB) + MOVD $1515, R12 + B callbackasm1(SB) + MOVD $1516, R12 + B callbackasm1(SB) + MOVD $1517, R12 + B callbackasm1(SB) + MOVD $1518, R12 + B callbackasm1(SB) + MOVD $1519, R12 + B callbackasm1(SB) + MOVD $1520, R12 + B callbackasm1(SB) + MOVD $1521, R12 + B callbackasm1(SB) + MOVD $1522, R12 + B callbackasm1(SB) + MOVD $1523, R12 + B callbackasm1(SB) + MOVD $1524, R12 + B callbackasm1(SB) + MOVD $1525, R12 + B callbackasm1(SB) + MOVD $1526, R12 + B callbackasm1(SB) + MOVD $1527, R12 + B callbackasm1(SB) + MOVD $1528, R12 + B callbackasm1(SB) + MOVD $1529, R12 + B callbackasm1(SB) + MOVD $1530, R12 + B callbackasm1(SB) + MOVD $1531, R12 + B callbackasm1(SB) + MOVD $1532, R12 + B callbackasm1(SB) + MOVD $1533, R12 + B callbackasm1(SB) + MOVD $1534, R12 + B callbackasm1(SB) + MOVD $1535, R12 + B callbackasm1(SB) + MOVD $1536, R12 + B callbackasm1(SB) + MOVD $1537, R12 + B callbackasm1(SB) + MOVD $1538, R12 + B callbackasm1(SB) + MOVD $1539, R12 + B callbackasm1(SB) + MOVD $1540, R12 + B callbackasm1(SB) + MOVD $1541, R12 + B callbackasm1(SB) + MOVD $1542, R12 + B callbackasm1(SB) + MOVD $1543, R12 + B callbackasm1(SB) + MOVD $1544, R12 + B callbackasm1(SB) + MOVD $1545, R12 + B callbackasm1(SB) + MOVD $1546, R12 + B callbackasm1(SB) + MOVD $1547, R12 + B callbackasm1(SB) + MOVD $1548, R12 + B callbackasm1(SB) + MOVD $1549, R12 + B callbackasm1(SB) + MOVD $1550, R12 + B callbackasm1(SB) + MOVD $1551, R12 + B callbackasm1(SB) + MOVD $1552, R12 + B callbackasm1(SB) + MOVD $1553, R12 + B callbackasm1(SB) + MOVD $1554, R12 + B callbackasm1(SB) + MOVD $1555, R12 + B callbackasm1(SB) + MOVD $1556, R12 + B callbackasm1(SB) + MOVD $1557, R12 + B callbackasm1(SB) + MOVD $1558, R12 + B callbackasm1(SB) + MOVD $1559, R12 + B callbackasm1(SB) + MOVD $1560, R12 + B callbackasm1(SB) + MOVD $1561, R12 + B callbackasm1(SB) + MOVD $1562, R12 + B callbackasm1(SB) + MOVD $1563, R12 + B callbackasm1(SB) + MOVD $1564, R12 + B callbackasm1(SB) + MOVD $1565, R12 + B callbackasm1(SB) + MOVD $1566, R12 + B callbackasm1(SB) + MOVD $1567, R12 + B callbackasm1(SB) + MOVD $1568, R12 + B callbackasm1(SB) + MOVD $1569, R12 + B callbackasm1(SB) + MOVD $1570, R12 + B callbackasm1(SB) + MOVD $1571, R12 + B callbackasm1(SB) + MOVD $1572, R12 + B callbackasm1(SB) + MOVD $1573, R12 + B callbackasm1(SB) + MOVD $1574, R12 + B callbackasm1(SB) + MOVD $1575, R12 + B callbackasm1(SB) + MOVD $1576, R12 + B callbackasm1(SB) + MOVD $1577, R12 + B callbackasm1(SB) + MOVD $1578, R12 + B callbackasm1(SB) + MOVD $1579, R12 + B callbackasm1(SB) + MOVD $1580, R12 + B callbackasm1(SB) + MOVD $1581, R12 + B callbackasm1(SB) + MOVD $1582, R12 + B callbackasm1(SB) + MOVD $1583, R12 + B callbackasm1(SB) + MOVD $1584, R12 + B callbackasm1(SB) + MOVD $1585, R12 + B callbackasm1(SB) + MOVD $1586, R12 + B callbackasm1(SB) + MOVD $1587, R12 + B callbackasm1(SB) + MOVD $1588, R12 + B callbackasm1(SB) + MOVD $1589, R12 + B callbackasm1(SB) + MOVD $1590, R12 + B callbackasm1(SB) + MOVD $1591, R12 + B callbackasm1(SB) + MOVD $1592, R12 + B callbackasm1(SB) + MOVD $1593, R12 + B callbackasm1(SB) + MOVD $1594, R12 + B callbackasm1(SB) + MOVD $1595, R12 + B callbackasm1(SB) + MOVD $1596, R12 + B callbackasm1(SB) + MOVD $1597, R12 + B callbackasm1(SB) + MOVD $1598, R12 + B callbackasm1(SB) + MOVD $1599, R12 + B callbackasm1(SB) + MOVD $1600, R12 + B callbackasm1(SB) + MOVD $1601, R12 + B callbackasm1(SB) + MOVD $1602, R12 + B callbackasm1(SB) + MOVD $1603, R12 + B callbackasm1(SB) + MOVD $1604, R12 + B callbackasm1(SB) + MOVD $1605, R12 + B callbackasm1(SB) + MOVD $1606, R12 + B callbackasm1(SB) + MOVD $1607, R12 + B callbackasm1(SB) + MOVD $1608, R12 + B callbackasm1(SB) + MOVD $1609, R12 + B callbackasm1(SB) + MOVD $1610, R12 + B callbackasm1(SB) + MOVD $1611, R12 + B callbackasm1(SB) + MOVD $1612, R12 + B callbackasm1(SB) + MOVD $1613, R12 + B callbackasm1(SB) + MOVD $1614, R12 + B callbackasm1(SB) + MOVD $1615, R12 + B callbackasm1(SB) + MOVD $1616, R12 + B callbackasm1(SB) + MOVD $1617, R12 + B callbackasm1(SB) + MOVD $1618, R12 + B callbackasm1(SB) + MOVD $1619, R12 + B callbackasm1(SB) + MOVD $1620, R12 + B callbackasm1(SB) + MOVD $1621, R12 + B callbackasm1(SB) + MOVD $1622, R12 + B callbackasm1(SB) + MOVD $1623, R12 + B callbackasm1(SB) + MOVD $1624, R12 + B callbackasm1(SB) + MOVD $1625, R12 + B callbackasm1(SB) + MOVD $1626, R12 + B callbackasm1(SB) + MOVD $1627, R12 + B callbackasm1(SB) + MOVD $1628, R12 + B callbackasm1(SB) + MOVD $1629, R12 + B callbackasm1(SB) + MOVD $1630, R12 + B callbackasm1(SB) + MOVD $1631, R12 + B callbackasm1(SB) + MOVD $1632, R12 + B callbackasm1(SB) + MOVD $1633, R12 + B callbackasm1(SB) + MOVD $1634, R12 + B callbackasm1(SB) + MOVD $1635, R12 + B callbackasm1(SB) + MOVD $1636, R12 + B callbackasm1(SB) + MOVD $1637, R12 + B callbackasm1(SB) + MOVD $1638, R12 + B callbackasm1(SB) + MOVD $1639, R12 + B callbackasm1(SB) + MOVD $1640, R12 + B callbackasm1(SB) + MOVD $1641, R12 + B callbackasm1(SB) + MOVD $1642, R12 + B callbackasm1(SB) + MOVD $1643, R12 + B callbackasm1(SB) + MOVD $1644, R12 + B callbackasm1(SB) + MOVD $1645, R12 + B callbackasm1(SB) + MOVD $1646, R12 + B callbackasm1(SB) + MOVD $1647, R12 + B callbackasm1(SB) + MOVD $1648, R12 + B callbackasm1(SB) + MOVD $1649, R12 + B callbackasm1(SB) + MOVD $1650, R12 + B callbackasm1(SB) + MOVD $1651, R12 + B callbackasm1(SB) + MOVD $1652, R12 + B callbackasm1(SB) + MOVD $1653, R12 + B callbackasm1(SB) + MOVD $1654, R12 + B callbackasm1(SB) + MOVD $1655, R12 + B callbackasm1(SB) + MOVD $1656, R12 + B callbackasm1(SB) + MOVD $1657, R12 + B callbackasm1(SB) + MOVD $1658, R12 + B callbackasm1(SB) + MOVD $1659, R12 + B callbackasm1(SB) + MOVD $1660, R12 + B callbackasm1(SB) + MOVD $1661, R12 + B callbackasm1(SB) + MOVD $1662, R12 + B callbackasm1(SB) + MOVD $1663, R12 + B callbackasm1(SB) + MOVD $1664, R12 + B callbackasm1(SB) + MOVD $1665, R12 + B callbackasm1(SB) + MOVD $1666, R12 + B callbackasm1(SB) + MOVD $1667, R12 + B callbackasm1(SB) + MOVD $1668, R12 + B callbackasm1(SB) + MOVD $1669, R12 + B callbackasm1(SB) + MOVD $1670, R12 + B callbackasm1(SB) + MOVD $1671, R12 + B callbackasm1(SB) + MOVD $1672, R12 + B callbackasm1(SB) + MOVD $1673, R12 + B callbackasm1(SB) + MOVD $1674, R12 + B callbackasm1(SB) + MOVD $1675, R12 + B callbackasm1(SB) + MOVD $1676, R12 + B callbackasm1(SB) + MOVD $1677, R12 + B callbackasm1(SB) + MOVD $1678, R12 + B callbackasm1(SB) + MOVD $1679, R12 + B callbackasm1(SB) + MOVD $1680, R12 + B callbackasm1(SB) + MOVD $1681, R12 + B callbackasm1(SB) + MOVD $1682, R12 + B callbackasm1(SB) + MOVD $1683, R12 + B callbackasm1(SB) + MOVD $1684, R12 + B callbackasm1(SB) + MOVD $1685, R12 + B callbackasm1(SB) + MOVD $1686, R12 + B callbackasm1(SB) + MOVD $1687, R12 + B callbackasm1(SB) + MOVD $1688, R12 + B callbackasm1(SB) + MOVD $1689, R12 + B callbackasm1(SB) + MOVD $1690, R12 + B callbackasm1(SB) + MOVD $1691, R12 + B callbackasm1(SB) + MOVD $1692, R12 + B callbackasm1(SB) + MOVD $1693, R12 + B callbackasm1(SB) + MOVD $1694, R12 + B callbackasm1(SB) + MOVD $1695, R12 + B callbackasm1(SB) + MOVD $1696, R12 + B callbackasm1(SB) + MOVD $1697, R12 + B callbackasm1(SB) + MOVD $1698, R12 + B callbackasm1(SB) + MOVD $1699, R12 + B callbackasm1(SB) + MOVD $1700, R12 + B callbackasm1(SB) + MOVD $1701, R12 + B callbackasm1(SB) + MOVD $1702, R12 + B callbackasm1(SB) + MOVD $1703, R12 + B callbackasm1(SB) + MOVD $1704, R12 + B callbackasm1(SB) + MOVD $1705, R12 + B callbackasm1(SB) + MOVD $1706, R12 + B callbackasm1(SB) + MOVD $1707, R12 + B callbackasm1(SB) + MOVD $1708, R12 + B callbackasm1(SB) + MOVD $1709, R12 + B callbackasm1(SB) + MOVD $1710, R12 + B callbackasm1(SB) + MOVD $1711, R12 + B callbackasm1(SB) + MOVD $1712, R12 + B callbackasm1(SB) + MOVD $1713, R12 + B callbackasm1(SB) + MOVD $1714, R12 + B callbackasm1(SB) + MOVD $1715, R12 + B callbackasm1(SB) + MOVD $1716, R12 + B callbackasm1(SB) + MOVD $1717, R12 + B callbackasm1(SB) + MOVD $1718, R12 + B callbackasm1(SB) + MOVD $1719, R12 + B callbackasm1(SB) + MOVD $1720, R12 + B callbackasm1(SB) + MOVD $1721, R12 + B callbackasm1(SB) + MOVD $1722, R12 + B callbackasm1(SB) + MOVD $1723, R12 + B callbackasm1(SB) + MOVD $1724, R12 + B callbackasm1(SB) + MOVD $1725, R12 + B callbackasm1(SB) + MOVD $1726, R12 + B callbackasm1(SB) + MOVD $1727, R12 + B callbackasm1(SB) + MOVD $1728, R12 + B callbackasm1(SB) + MOVD $1729, R12 + B callbackasm1(SB) + MOVD $1730, R12 + B callbackasm1(SB) + MOVD $1731, R12 + B callbackasm1(SB) + MOVD $1732, R12 + B callbackasm1(SB) + MOVD $1733, R12 + B callbackasm1(SB) + MOVD $1734, R12 + B callbackasm1(SB) + MOVD $1735, R12 + B callbackasm1(SB) + MOVD $1736, R12 + B callbackasm1(SB) + MOVD $1737, R12 + B callbackasm1(SB) + MOVD $1738, R12 + B callbackasm1(SB) + MOVD $1739, R12 + B callbackasm1(SB) + MOVD $1740, R12 + B callbackasm1(SB) + MOVD $1741, R12 + B callbackasm1(SB) + MOVD $1742, R12 + B callbackasm1(SB) + MOVD $1743, R12 + B callbackasm1(SB) + MOVD $1744, R12 + B callbackasm1(SB) + MOVD $1745, R12 + B callbackasm1(SB) + MOVD $1746, R12 + B callbackasm1(SB) + MOVD $1747, R12 + B callbackasm1(SB) + MOVD $1748, R12 + B callbackasm1(SB) + MOVD $1749, R12 + B callbackasm1(SB) + MOVD $1750, R12 + B callbackasm1(SB) + MOVD $1751, R12 + B callbackasm1(SB) + MOVD $1752, R12 + B callbackasm1(SB) + MOVD $1753, R12 + B callbackasm1(SB) + MOVD $1754, R12 + B callbackasm1(SB) + MOVD $1755, R12 + B callbackasm1(SB) + MOVD $1756, R12 + B callbackasm1(SB) + MOVD $1757, R12 + B callbackasm1(SB) + MOVD $1758, R12 + B callbackasm1(SB) + MOVD $1759, R12 + B callbackasm1(SB) + MOVD $1760, R12 + B callbackasm1(SB) + MOVD $1761, R12 + B callbackasm1(SB) + MOVD $1762, R12 + B callbackasm1(SB) + MOVD $1763, R12 + B callbackasm1(SB) + MOVD $1764, R12 + B callbackasm1(SB) + MOVD $1765, R12 + B callbackasm1(SB) + MOVD $1766, R12 + B callbackasm1(SB) + MOVD $1767, R12 + B callbackasm1(SB) + MOVD $1768, R12 + B callbackasm1(SB) + MOVD $1769, R12 + B callbackasm1(SB) + MOVD $1770, R12 + B callbackasm1(SB) + MOVD $1771, R12 + B callbackasm1(SB) + MOVD $1772, R12 + B callbackasm1(SB) + MOVD $1773, R12 + B callbackasm1(SB) + MOVD $1774, R12 + B callbackasm1(SB) + MOVD $1775, R12 + B callbackasm1(SB) + MOVD $1776, R12 + B callbackasm1(SB) + MOVD $1777, R12 + B callbackasm1(SB) + MOVD $1778, R12 + B callbackasm1(SB) + MOVD $1779, R12 + B callbackasm1(SB) + MOVD $1780, R12 + B callbackasm1(SB) + MOVD $1781, R12 + B callbackasm1(SB) + MOVD $1782, R12 + B callbackasm1(SB) + MOVD $1783, R12 + B callbackasm1(SB) + MOVD $1784, R12 + B callbackasm1(SB) + MOVD $1785, R12 + B callbackasm1(SB) + MOVD $1786, R12 + B callbackasm1(SB) + MOVD $1787, R12 + B callbackasm1(SB) + MOVD $1788, R12 + B callbackasm1(SB) + MOVD $1789, R12 + B callbackasm1(SB) + MOVD $1790, R12 + B callbackasm1(SB) + MOVD $1791, R12 + B callbackasm1(SB) + MOVD $1792, R12 + B callbackasm1(SB) + MOVD $1793, R12 + B callbackasm1(SB) + MOVD $1794, R12 + B callbackasm1(SB) + MOVD $1795, R12 + B callbackasm1(SB) + MOVD $1796, R12 + B callbackasm1(SB) + MOVD $1797, R12 + B callbackasm1(SB) + MOVD $1798, R12 + B callbackasm1(SB) + MOVD $1799, R12 + B callbackasm1(SB) + MOVD $1800, R12 + B callbackasm1(SB) + MOVD $1801, R12 + B callbackasm1(SB) + MOVD $1802, R12 + B callbackasm1(SB) + MOVD $1803, R12 + B callbackasm1(SB) + MOVD $1804, R12 + B callbackasm1(SB) + MOVD $1805, R12 + B callbackasm1(SB) + MOVD $1806, R12 + B callbackasm1(SB) + MOVD $1807, R12 + B callbackasm1(SB) + MOVD $1808, R12 + B callbackasm1(SB) + MOVD $1809, R12 + B callbackasm1(SB) + MOVD $1810, R12 + B callbackasm1(SB) + MOVD $1811, R12 + B callbackasm1(SB) + MOVD $1812, R12 + B callbackasm1(SB) + MOVD $1813, R12 + B callbackasm1(SB) + MOVD $1814, R12 + B callbackasm1(SB) + MOVD $1815, R12 + B callbackasm1(SB) + MOVD $1816, R12 + B callbackasm1(SB) + MOVD $1817, R12 + B callbackasm1(SB) + MOVD $1818, R12 + B callbackasm1(SB) + MOVD $1819, R12 + B callbackasm1(SB) + MOVD $1820, R12 + B callbackasm1(SB) + MOVD $1821, R12 + B callbackasm1(SB) + MOVD $1822, R12 + B callbackasm1(SB) + MOVD $1823, R12 + B callbackasm1(SB) + MOVD $1824, R12 + B callbackasm1(SB) + MOVD $1825, R12 + B callbackasm1(SB) + MOVD $1826, R12 + B callbackasm1(SB) + MOVD $1827, R12 + B callbackasm1(SB) + MOVD $1828, R12 + B callbackasm1(SB) + MOVD $1829, R12 + B callbackasm1(SB) + MOVD $1830, R12 + B callbackasm1(SB) + MOVD $1831, R12 + B callbackasm1(SB) + MOVD $1832, R12 + B callbackasm1(SB) + MOVD $1833, R12 + B callbackasm1(SB) + MOVD $1834, R12 + B callbackasm1(SB) + MOVD $1835, R12 + B callbackasm1(SB) + MOVD $1836, R12 + B callbackasm1(SB) + MOVD $1837, R12 + B callbackasm1(SB) + MOVD $1838, R12 + B callbackasm1(SB) + MOVD $1839, R12 + B callbackasm1(SB) + MOVD $1840, R12 + B callbackasm1(SB) + MOVD $1841, R12 + B callbackasm1(SB) + MOVD $1842, R12 + B callbackasm1(SB) + MOVD $1843, R12 + B callbackasm1(SB) + MOVD $1844, R12 + B callbackasm1(SB) + MOVD $1845, R12 + B callbackasm1(SB) + MOVD $1846, R12 + B callbackasm1(SB) + MOVD $1847, R12 + B callbackasm1(SB) + MOVD $1848, R12 + B callbackasm1(SB) + MOVD $1849, R12 + B callbackasm1(SB) + MOVD $1850, R12 + B callbackasm1(SB) + MOVD $1851, R12 + B callbackasm1(SB) + MOVD $1852, R12 + B callbackasm1(SB) + MOVD $1853, R12 + B callbackasm1(SB) + MOVD $1854, R12 + B callbackasm1(SB) + MOVD $1855, R12 + B callbackasm1(SB) + MOVD $1856, R12 + B callbackasm1(SB) + MOVD $1857, R12 + B callbackasm1(SB) + MOVD $1858, R12 + B callbackasm1(SB) + MOVD $1859, R12 + B callbackasm1(SB) + MOVD $1860, R12 + B callbackasm1(SB) + MOVD $1861, R12 + B callbackasm1(SB) + MOVD $1862, R12 + B callbackasm1(SB) + MOVD $1863, R12 + B callbackasm1(SB) + MOVD $1864, R12 + B callbackasm1(SB) + MOVD $1865, R12 + B callbackasm1(SB) + MOVD $1866, R12 + B callbackasm1(SB) + MOVD $1867, R12 + B callbackasm1(SB) + MOVD $1868, R12 + B callbackasm1(SB) + MOVD $1869, R12 + B callbackasm1(SB) + MOVD $1870, R12 + B callbackasm1(SB) + MOVD $1871, R12 + B callbackasm1(SB) + MOVD $1872, R12 + B callbackasm1(SB) + MOVD $1873, R12 + B callbackasm1(SB) + MOVD $1874, R12 + B callbackasm1(SB) + MOVD $1875, R12 + B callbackasm1(SB) + MOVD $1876, R12 + B callbackasm1(SB) + MOVD $1877, R12 + B callbackasm1(SB) + MOVD $1878, R12 + B callbackasm1(SB) + MOVD $1879, R12 + B callbackasm1(SB) + MOVD $1880, R12 + B callbackasm1(SB) + MOVD $1881, R12 + B callbackasm1(SB) + MOVD $1882, R12 + B callbackasm1(SB) + MOVD $1883, R12 + B callbackasm1(SB) + MOVD $1884, R12 + B callbackasm1(SB) + MOVD $1885, R12 + B callbackasm1(SB) + MOVD $1886, R12 + B callbackasm1(SB) + MOVD $1887, R12 + B callbackasm1(SB) + MOVD $1888, R12 + B callbackasm1(SB) + MOVD $1889, R12 + B callbackasm1(SB) + MOVD $1890, R12 + B callbackasm1(SB) + MOVD $1891, R12 + B callbackasm1(SB) + MOVD $1892, R12 + B callbackasm1(SB) + MOVD $1893, R12 + B callbackasm1(SB) + MOVD $1894, R12 + B callbackasm1(SB) + MOVD $1895, R12 + B callbackasm1(SB) + MOVD $1896, R12 + B callbackasm1(SB) + MOVD $1897, R12 + B callbackasm1(SB) + MOVD $1898, R12 + B callbackasm1(SB) + MOVD $1899, R12 + B callbackasm1(SB) + MOVD $1900, R12 + B callbackasm1(SB) + MOVD $1901, R12 + B callbackasm1(SB) + MOVD $1902, R12 + B callbackasm1(SB) + MOVD $1903, R12 + B callbackasm1(SB) + MOVD $1904, R12 + B callbackasm1(SB) + MOVD $1905, R12 + B callbackasm1(SB) + MOVD $1906, R12 + B callbackasm1(SB) + MOVD $1907, R12 + B callbackasm1(SB) + MOVD $1908, R12 + B callbackasm1(SB) + MOVD $1909, R12 + B callbackasm1(SB) + MOVD $1910, R12 + B callbackasm1(SB) + MOVD $1911, R12 + B callbackasm1(SB) + MOVD $1912, R12 + B callbackasm1(SB) + MOVD $1913, R12 + B callbackasm1(SB) + MOVD $1914, R12 + B callbackasm1(SB) + MOVD $1915, R12 + B callbackasm1(SB) + MOVD $1916, R12 + B callbackasm1(SB) + MOVD $1917, R12 + B callbackasm1(SB) + MOVD $1918, R12 + B callbackasm1(SB) + MOVD $1919, R12 + B callbackasm1(SB) + MOVD $1920, R12 + B callbackasm1(SB) + MOVD $1921, R12 + B callbackasm1(SB) + MOVD $1922, R12 + B callbackasm1(SB) + MOVD $1923, R12 + B callbackasm1(SB) + MOVD $1924, R12 + B callbackasm1(SB) + MOVD $1925, R12 + B callbackasm1(SB) + MOVD $1926, R12 + B callbackasm1(SB) + MOVD $1927, R12 + B callbackasm1(SB) + MOVD $1928, R12 + B callbackasm1(SB) + MOVD $1929, R12 + B callbackasm1(SB) + MOVD $1930, R12 + B callbackasm1(SB) + MOVD $1931, R12 + B callbackasm1(SB) + MOVD $1932, R12 + B callbackasm1(SB) + MOVD $1933, R12 + B callbackasm1(SB) + MOVD $1934, R12 + B callbackasm1(SB) + MOVD $1935, R12 + B callbackasm1(SB) + MOVD $1936, R12 + B callbackasm1(SB) + MOVD $1937, R12 + B callbackasm1(SB) + MOVD $1938, R12 + B callbackasm1(SB) + MOVD $1939, R12 + B callbackasm1(SB) + MOVD $1940, R12 + B callbackasm1(SB) + MOVD $1941, R12 + B callbackasm1(SB) + MOVD $1942, R12 + B callbackasm1(SB) + MOVD $1943, R12 + B callbackasm1(SB) + MOVD $1944, R12 + B callbackasm1(SB) + MOVD $1945, R12 + B callbackasm1(SB) + MOVD $1946, R12 + B callbackasm1(SB) + MOVD $1947, R12 + B callbackasm1(SB) + MOVD $1948, R12 + B callbackasm1(SB) + MOVD $1949, R12 + B callbackasm1(SB) + MOVD $1950, R12 + B callbackasm1(SB) + MOVD $1951, R12 + B callbackasm1(SB) + MOVD $1952, R12 + B callbackasm1(SB) + MOVD $1953, R12 + B callbackasm1(SB) + MOVD $1954, R12 + B callbackasm1(SB) + MOVD $1955, R12 + B callbackasm1(SB) + MOVD $1956, R12 + B callbackasm1(SB) + MOVD $1957, R12 + B callbackasm1(SB) + MOVD $1958, R12 + B callbackasm1(SB) + MOVD $1959, R12 + B callbackasm1(SB) + MOVD $1960, R12 + B callbackasm1(SB) + MOVD $1961, R12 + B callbackasm1(SB) + MOVD $1962, R12 + B callbackasm1(SB) + MOVD $1963, R12 + B callbackasm1(SB) + MOVD $1964, R12 + B callbackasm1(SB) + MOVD $1965, R12 + B callbackasm1(SB) + MOVD $1966, R12 + B callbackasm1(SB) + MOVD $1967, R12 + B callbackasm1(SB) + MOVD $1968, R12 + B callbackasm1(SB) + MOVD $1969, R12 + B callbackasm1(SB) + MOVD $1970, R12 + B callbackasm1(SB) + MOVD $1971, R12 + B callbackasm1(SB) + MOVD $1972, R12 + B callbackasm1(SB) + MOVD $1973, R12 + B callbackasm1(SB) + MOVD $1974, R12 + B callbackasm1(SB) + MOVD $1975, R12 + B callbackasm1(SB) + MOVD $1976, R12 + B callbackasm1(SB) + MOVD $1977, R12 + B callbackasm1(SB) + MOVD $1978, R12 + B callbackasm1(SB) + MOVD $1979, R12 + B callbackasm1(SB) + MOVD $1980, R12 + B callbackasm1(SB) + MOVD $1981, R12 + B callbackasm1(SB) + MOVD $1982, R12 + B callbackasm1(SB) + MOVD $1983, R12 + B callbackasm1(SB) + MOVD $1984, R12 + B callbackasm1(SB) + MOVD $1985, R12 + B callbackasm1(SB) + MOVD $1986, R12 + B callbackasm1(SB) + MOVD $1987, R12 + B callbackasm1(SB) + MOVD $1988, R12 + B callbackasm1(SB) + MOVD $1989, R12 + B callbackasm1(SB) + MOVD $1990, R12 + B callbackasm1(SB) + MOVD $1991, R12 + B callbackasm1(SB) + MOVD $1992, R12 + B callbackasm1(SB) + MOVD $1993, R12 + B callbackasm1(SB) + MOVD $1994, R12 + B callbackasm1(SB) + MOVD $1995, R12 + B callbackasm1(SB) + MOVD $1996, R12 + B callbackasm1(SB) + MOVD $1997, R12 + B callbackasm1(SB) + MOVD $1998, R12 + B callbackasm1(SB) + MOVD $1999, R12 + B callbackasm1(SB) diff --git a/vendor/github.com/ebitengine/purego/zcallback_loong64.s b/vendor/github.com/ebitengine/purego/zcallback_loong64.s new file mode 100644 index 00000000..e20c598a --- /dev/null +++ b/vendor/github.com/ebitengine/purego/zcallback_loong64.s @@ -0,0 +1,4014 @@ +// Code generated by wincallback.go using 'go generate'. DO NOT EDIT. + +//go:build darwin || freebsd || linux || netbsd + +// External code calls into callbackasm at an offset corresponding +// to the callback index. Callbackasm is a table of MOVV and JMP instructions. +// The MOVV instruction loads R12 with the callback index, and the +// JMP instruction branches to callbackasm1. +// callbackasm1 takes the callback index from R12 and +// indexes into an array that stores information about each callback. +// It then calls the Go implementation for that callback. +#include "textflag.h" + +TEXT callbackasm(SB),NOSPLIT|NOFRAME,$0 + MOVV $0, R12 + JMP callbackasm1(SB) + MOVV $1, R12 + JMP callbackasm1(SB) + MOVV $2, R12 + JMP callbackasm1(SB) + MOVV $3, R12 + JMP callbackasm1(SB) + MOVV $4, R12 + JMP callbackasm1(SB) + MOVV $5, R12 + JMP callbackasm1(SB) + MOVV $6, R12 + JMP callbackasm1(SB) + MOVV $7, R12 + JMP callbackasm1(SB) + MOVV $8, R12 + JMP callbackasm1(SB) + MOVV $9, R12 + JMP callbackasm1(SB) + MOVV $10, R12 + JMP callbackasm1(SB) + MOVV $11, R12 + JMP callbackasm1(SB) + MOVV $12, R12 + JMP callbackasm1(SB) + MOVV $13, R12 + JMP callbackasm1(SB) + MOVV $14, R12 + JMP callbackasm1(SB) + MOVV $15, R12 + JMP callbackasm1(SB) + MOVV $16, R12 + JMP callbackasm1(SB) + MOVV $17, R12 + JMP callbackasm1(SB) + MOVV $18, R12 + JMP callbackasm1(SB) + MOVV $19, R12 + JMP callbackasm1(SB) + MOVV $20, R12 + JMP callbackasm1(SB) + MOVV $21, R12 + JMP callbackasm1(SB) + MOVV $22, R12 + JMP callbackasm1(SB) + MOVV $23, R12 + JMP callbackasm1(SB) + MOVV $24, R12 + JMP callbackasm1(SB) + MOVV $25, R12 + JMP callbackasm1(SB) + MOVV $26, R12 + JMP callbackasm1(SB) + MOVV $27, R12 + JMP callbackasm1(SB) + MOVV $28, R12 + JMP callbackasm1(SB) + MOVV $29, R12 + JMP callbackasm1(SB) + MOVV $30, R12 + JMP callbackasm1(SB) + MOVV $31, R12 + JMP callbackasm1(SB) + MOVV $32, R12 + JMP callbackasm1(SB) + MOVV $33, R12 + JMP callbackasm1(SB) + MOVV $34, R12 + JMP callbackasm1(SB) + MOVV $35, R12 + JMP callbackasm1(SB) + MOVV $36, R12 + JMP callbackasm1(SB) + MOVV $37, R12 + JMP callbackasm1(SB) + MOVV $38, R12 + JMP callbackasm1(SB) + MOVV $39, R12 + JMP callbackasm1(SB) + MOVV $40, R12 + JMP callbackasm1(SB) + MOVV $41, R12 + JMP callbackasm1(SB) + MOVV $42, R12 + JMP callbackasm1(SB) + MOVV $43, R12 + JMP callbackasm1(SB) + MOVV $44, R12 + JMP callbackasm1(SB) + MOVV $45, R12 + JMP callbackasm1(SB) + MOVV $46, R12 + JMP callbackasm1(SB) + MOVV $47, R12 + JMP callbackasm1(SB) + MOVV $48, R12 + JMP callbackasm1(SB) + MOVV $49, R12 + JMP callbackasm1(SB) + MOVV $50, R12 + JMP callbackasm1(SB) + MOVV $51, R12 + JMP callbackasm1(SB) + MOVV $52, R12 + JMP callbackasm1(SB) + MOVV $53, R12 + JMP callbackasm1(SB) + MOVV $54, R12 + JMP callbackasm1(SB) + MOVV $55, R12 + JMP callbackasm1(SB) + MOVV $56, R12 + JMP callbackasm1(SB) + MOVV $57, R12 + JMP callbackasm1(SB) + MOVV $58, R12 + JMP callbackasm1(SB) + MOVV $59, R12 + JMP callbackasm1(SB) + MOVV $60, R12 + JMP callbackasm1(SB) + MOVV $61, R12 + JMP callbackasm1(SB) + MOVV $62, R12 + JMP callbackasm1(SB) + MOVV $63, R12 + JMP callbackasm1(SB) + MOVV $64, R12 + JMP callbackasm1(SB) + MOVV $65, R12 + JMP callbackasm1(SB) + MOVV $66, R12 + JMP callbackasm1(SB) + MOVV $67, R12 + JMP callbackasm1(SB) + MOVV $68, R12 + JMP callbackasm1(SB) + MOVV $69, R12 + JMP callbackasm1(SB) + MOVV $70, R12 + JMP callbackasm1(SB) + MOVV $71, R12 + JMP callbackasm1(SB) + MOVV $72, R12 + JMP callbackasm1(SB) + MOVV $73, R12 + JMP callbackasm1(SB) + MOVV $74, R12 + JMP callbackasm1(SB) + MOVV $75, R12 + JMP callbackasm1(SB) + MOVV $76, R12 + JMP callbackasm1(SB) + MOVV $77, R12 + JMP callbackasm1(SB) + MOVV $78, R12 + JMP callbackasm1(SB) + MOVV $79, R12 + JMP callbackasm1(SB) + MOVV $80, R12 + JMP callbackasm1(SB) + MOVV $81, R12 + JMP callbackasm1(SB) + MOVV $82, R12 + JMP callbackasm1(SB) + MOVV $83, R12 + JMP callbackasm1(SB) + MOVV $84, R12 + JMP callbackasm1(SB) + MOVV $85, R12 + JMP callbackasm1(SB) + MOVV $86, R12 + JMP callbackasm1(SB) + MOVV $87, R12 + JMP callbackasm1(SB) + MOVV $88, R12 + JMP callbackasm1(SB) + MOVV $89, R12 + JMP callbackasm1(SB) + MOVV $90, R12 + JMP callbackasm1(SB) + MOVV $91, R12 + JMP callbackasm1(SB) + MOVV $92, R12 + JMP callbackasm1(SB) + MOVV $93, R12 + JMP callbackasm1(SB) + MOVV $94, R12 + JMP callbackasm1(SB) + MOVV $95, R12 + JMP callbackasm1(SB) + MOVV $96, R12 + JMP callbackasm1(SB) + MOVV $97, R12 + JMP callbackasm1(SB) + MOVV $98, R12 + JMP callbackasm1(SB) + MOVV $99, R12 + JMP callbackasm1(SB) + MOVV $100, R12 + JMP callbackasm1(SB) + MOVV $101, R12 + JMP callbackasm1(SB) + MOVV $102, R12 + JMP callbackasm1(SB) + MOVV $103, R12 + JMP callbackasm1(SB) + MOVV $104, R12 + JMP callbackasm1(SB) + MOVV $105, R12 + JMP callbackasm1(SB) + MOVV $106, R12 + JMP callbackasm1(SB) + MOVV $107, R12 + JMP callbackasm1(SB) + MOVV $108, R12 + JMP callbackasm1(SB) + MOVV $109, R12 + JMP callbackasm1(SB) + MOVV $110, R12 + JMP callbackasm1(SB) + MOVV $111, R12 + JMP callbackasm1(SB) + MOVV $112, R12 + JMP callbackasm1(SB) + MOVV $113, R12 + JMP callbackasm1(SB) + MOVV $114, R12 + JMP callbackasm1(SB) + MOVV $115, R12 + JMP callbackasm1(SB) + MOVV $116, R12 + JMP callbackasm1(SB) + MOVV $117, R12 + JMP callbackasm1(SB) + MOVV $118, R12 + JMP callbackasm1(SB) + MOVV $119, R12 + JMP callbackasm1(SB) + MOVV $120, R12 + JMP callbackasm1(SB) + MOVV $121, R12 + JMP callbackasm1(SB) + MOVV $122, R12 + JMP callbackasm1(SB) + MOVV $123, R12 + JMP callbackasm1(SB) + MOVV $124, R12 + JMP callbackasm1(SB) + MOVV $125, R12 + JMP callbackasm1(SB) + MOVV $126, R12 + JMP callbackasm1(SB) + MOVV $127, R12 + JMP callbackasm1(SB) + MOVV $128, R12 + JMP callbackasm1(SB) + MOVV $129, R12 + JMP callbackasm1(SB) + MOVV $130, R12 + JMP callbackasm1(SB) + MOVV $131, R12 + JMP callbackasm1(SB) + MOVV $132, R12 + JMP callbackasm1(SB) + MOVV $133, R12 + JMP callbackasm1(SB) + MOVV $134, R12 + JMP callbackasm1(SB) + MOVV $135, R12 + JMP callbackasm1(SB) + MOVV $136, R12 + JMP callbackasm1(SB) + MOVV $137, R12 + JMP callbackasm1(SB) + MOVV $138, R12 + JMP callbackasm1(SB) + MOVV $139, R12 + JMP callbackasm1(SB) + MOVV $140, R12 + JMP callbackasm1(SB) + MOVV $141, R12 + JMP callbackasm1(SB) + MOVV $142, R12 + JMP callbackasm1(SB) + MOVV $143, R12 + JMP callbackasm1(SB) + MOVV $144, R12 + JMP callbackasm1(SB) + MOVV $145, R12 + JMP callbackasm1(SB) + MOVV $146, R12 + JMP callbackasm1(SB) + MOVV $147, R12 + JMP callbackasm1(SB) + MOVV $148, R12 + JMP callbackasm1(SB) + MOVV $149, R12 + JMP callbackasm1(SB) + MOVV $150, R12 + JMP callbackasm1(SB) + MOVV $151, R12 + JMP callbackasm1(SB) + MOVV $152, R12 + JMP callbackasm1(SB) + MOVV $153, R12 + JMP callbackasm1(SB) + MOVV $154, R12 + JMP callbackasm1(SB) + MOVV $155, R12 + JMP callbackasm1(SB) + MOVV $156, R12 + JMP callbackasm1(SB) + MOVV $157, R12 + JMP callbackasm1(SB) + MOVV $158, R12 + JMP callbackasm1(SB) + MOVV $159, R12 + JMP callbackasm1(SB) + MOVV $160, R12 + JMP callbackasm1(SB) + MOVV $161, R12 + JMP callbackasm1(SB) + MOVV $162, R12 + JMP callbackasm1(SB) + MOVV $163, R12 + JMP callbackasm1(SB) + MOVV $164, R12 + JMP callbackasm1(SB) + MOVV $165, R12 + JMP callbackasm1(SB) + MOVV $166, R12 + JMP callbackasm1(SB) + MOVV $167, R12 + JMP callbackasm1(SB) + MOVV $168, R12 + JMP callbackasm1(SB) + MOVV $169, R12 + JMP callbackasm1(SB) + MOVV $170, R12 + JMP callbackasm1(SB) + MOVV $171, R12 + JMP callbackasm1(SB) + MOVV $172, R12 + JMP callbackasm1(SB) + MOVV $173, R12 + JMP callbackasm1(SB) + MOVV $174, R12 + JMP callbackasm1(SB) + MOVV $175, R12 + JMP callbackasm1(SB) + MOVV $176, R12 + JMP callbackasm1(SB) + MOVV $177, R12 + JMP callbackasm1(SB) + MOVV $178, R12 + JMP callbackasm1(SB) + MOVV $179, R12 + JMP callbackasm1(SB) + MOVV $180, R12 + JMP callbackasm1(SB) + MOVV $181, R12 + JMP callbackasm1(SB) + MOVV $182, R12 + JMP callbackasm1(SB) + MOVV $183, R12 + JMP callbackasm1(SB) + MOVV $184, R12 + JMP callbackasm1(SB) + MOVV $185, R12 + JMP callbackasm1(SB) + MOVV $186, R12 + JMP callbackasm1(SB) + MOVV $187, R12 + JMP callbackasm1(SB) + MOVV $188, R12 + JMP callbackasm1(SB) + MOVV $189, R12 + JMP callbackasm1(SB) + MOVV $190, R12 + JMP callbackasm1(SB) + MOVV $191, R12 + JMP callbackasm1(SB) + MOVV $192, R12 + JMP callbackasm1(SB) + MOVV $193, R12 + JMP callbackasm1(SB) + MOVV $194, R12 + JMP callbackasm1(SB) + MOVV $195, R12 + JMP callbackasm1(SB) + MOVV $196, R12 + JMP callbackasm1(SB) + MOVV $197, R12 + JMP callbackasm1(SB) + MOVV $198, R12 + JMP callbackasm1(SB) + MOVV $199, R12 + JMP callbackasm1(SB) + MOVV $200, R12 + JMP callbackasm1(SB) + MOVV $201, R12 + JMP callbackasm1(SB) + MOVV $202, R12 + JMP callbackasm1(SB) + MOVV $203, R12 + JMP callbackasm1(SB) + MOVV $204, R12 + JMP callbackasm1(SB) + MOVV $205, R12 + JMP callbackasm1(SB) + MOVV $206, R12 + JMP callbackasm1(SB) + MOVV $207, R12 + JMP callbackasm1(SB) + MOVV $208, R12 + JMP callbackasm1(SB) + MOVV $209, R12 + JMP callbackasm1(SB) + MOVV $210, R12 + JMP callbackasm1(SB) + MOVV $211, R12 + JMP callbackasm1(SB) + MOVV $212, R12 + JMP callbackasm1(SB) + MOVV $213, R12 + JMP callbackasm1(SB) + MOVV $214, R12 + JMP callbackasm1(SB) + MOVV $215, R12 + JMP callbackasm1(SB) + MOVV $216, R12 + JMP callbackasm1(SB) + MOVV $217, R12 + JMP callbackasm1(SB) + MOVV $218, R12 + JMP callbackasm1(SB) + MOVV $219, R12 + JMP callbackasm1(SB) + MOVV $220, R12 + JMP callbackasm1(SB) + MOVV $221, R12 + JMP callbackasm1(SB) + MOVV $222, R12 + JMP callbackasm1(SB) + MOVV $223, R12 + JMP callbackasm1(SB) + MOVV $224, R12 + JMP callbackasm1(SB) + MOVV $225, R12 + JMP callbackasm1(SB) + MOVV $226, R12 + JMP callbackasm1(SB) + MOVV $227, R12 + JMP callbackasm1(SB) + MOVV $228, R12 + JMP callbackasm1(SB) + MOVV $229, R12 + JMP callbackasm1(SB) + MOVV $230, R12 + JMP callbackasm1(SB) + MOVV $231, R12 + JMP callbackasm1(SB) + MOVV $232, R12 + JMP callbackasm1(SB) + MOVV $233, R12 + JMP callbackasm1(SB) + MOVV $234, R12 + JMP callbackasm1(SB) + MOVV $235, R12 + JMP callbackasm1(SB) + MOVV $236, R12 + JMP callbackasm1(SB) + MOVV $237, R12 + JMP callbackasm1(SB) + MOVV $238, R12 + JMP callbackasm1(SB) + MOVV $239, R12 + JMP callbackasm1(SB) + MOVV $240, R12 + JMP callbackasm1(SB) + MOVV $241, R12 + JMP callbackasm1(SB) + MOVV $242, R12 + JMP callbackasm1(SB) + MOVV $243, R12 + JMP callbackasm1(SB) + MOVV $244, R12 + JMP callbackasm1(SB) + MOVV $245, R12 + JMP callbackasm1(SB) + MOVV $246, R12 + JMP callbackasm1(SB) + MOVV $247, R12 + JMP callbackasm1(SB) + MOVV $248, R12 + JMP callbackasm1(SB) + MOVV $249, R12 + JMP callbackasm1(SB) + MOVV $250, R12 + JMP callbackasm1(SB) + MOVV $251, R12 + JMP callbackasm1(SB) + MOVV $252, R12 + JMP callbackasm1(SB) + MOVV $253, R12 + JMP callbackasm1(SB) + MOVV $254, R12 + JMP callbackasm1(SB) + MOVV $255, R12 + JMP callbackasm1(SB) + MOVV $256, R12 + JMP callbackasm1(SB) + MOVV $257, R12 + JMP callbackasm1(SB) + MOVV $258, R12 + JMP callbackasm1(SB) + MOVV $259, R12 + JMP callbackasm1(SB) + MOVV $260, R12 + JMP callbackasm1(SB) + MOVV $261, R12 + JMP callbackasm1(SB) + MOVV $262, R12 + JMP callbackasm1(SB) + MOVV $263, R12 + JMP callbackasm1(SB) + MOVV $264, R12 + JMP callbackasm1(SB) + MOVV $265, R12 + JMP callbackasm1(SB) + MOVV $266, R12 + JMP callbackasm1(SB) + MOVV $267, R12 + JMP callbackasm1(SB) + MOVV $268, R12 + JMP callbackasm1(SB) + MOVV $269, R12 + JMP callbackasm1(SB) + MOVV $270, R12 + JMP callbackasm1(SB) + MOVV $271, R12 + JMP callbackasm1(SB) + MOVV $272, R12 + JMP callbackasm1(SB) + MOVV $273, R12 + JMP callbackasm1(SB) + MOVV $274, R12 + JMP callbackasm1(SB) + MOVV $275, R12 + JMP callbackasm1(SB) + MOVV $276, R12 + JMP callbackasm1(SB) + MOVV $277, R12 + JMP callbackasm1(SB) + MOVV $278, R12 + JMP callbackasm1(SB) + MOVV $279, R12 + JMP callbackasm1(SB) + MOVV $280, R12 + JMP callbackasm1(SB) + MOVV $281, R12 + JMP callbackasm1(SB) + MOVV $282, R12 + JMP callbackasm1(SB) + MOVV $283, R12 + JMP callbackasm1(SB) + MOVV $284, R12 + JMP callbackasm1(SB) + MOVV $285, R12 + JMP callbackasm1(SB) + MOVV $286, R12 + JMP callbackasm1(SB) + MOVV $287, R12 + JMP callbackasm1(SB) + MOVV $288, R12 + JMP callbackasm1(SB) + MOVV $289, R12 + JMP callbackasm1(SB) + MOVV $290, R12 + JMP callbackasm1(SB) + MOVV $291, R12 + JMP callbackasm1(SB) + MOVV $292, R12 + JMP callbackasm1(SB) + MOVV $293, R12 + JMP callbackasm1(SB) + MOVV $294, R12 + JMP callbackasm1(SB) + MOVV $295, R12 + JMP callbackasm1(SB) + MOVV $296, R12 + JMP callbackasm1(SB) + MOVV $297, R12 + JMP callbackasm1(SB) + MOVV $298, R12 + JMP callbackasm1(SB) + MOVV $299, R12 + JMP callbackasm1(SB) + MOVV $300, R12 + JMP callbackasm1(SB) + MOVV $301, R12 + JMP callbackasm1(SB) + MOVV $302, R12 + JMP callbackasm1(SB) + MOVV $303, R12 + JMP callbackasm1(SB) + MOVV $304, R12 + JMP callbackasm1(SB) + MOVV $305, R12 + JMP callbackasm1(SB) + MOVV $306, R12 + JMP callbackasm1(SB) + MOVV $307, R12 + JMP callbackasm1(SB) + MOVV $308, R12 + JMP callbackasm1(SB) + MOVV $309, R12 + JMP callbackasm1(SB) + MOVV $310, R12 + JMP callbackasm1(SB) + MOVV $311, R12 + JMP callbackasm1(SB) + MOVV $312, R12 + JMP callbackasm1(SB) + MOVV $313, R12 + JMP callbackasm1(SB) + MOVV $314, R12 + JMP callbackasm1(SB) + MOVV $315, R12 + JMP callbackasm1(SB) + MOVV $316, R12 + JMP callbackasm1(SB) + MOVV $317, R12 + JMP callbackasm1(SB) + MOVV $318, R12 + JMP callbackasm1(SB) + MOVV $319, R12 + JMP callbackasm1(SB) + MOVV $320, R12 + JMP callbackasm1(SB) + MOVV $321, R12 + JMP callbackasm1(SB) + MOVV $322, R12 + JMP callbackasm1(SB) + MOVV $323, R12 + JMP callbackasm1(SB) + MOVV $324, R12 + JMP callbackasm1(SB) + MOVV $325, R12 + JMP callbackasm1(SB) + MOVV $326, R12 + JMP callbackasm1(SB) + MOVV $327, R12 + JMP callbackasm1(SB) + MOVV $328, R12 + JMP callbackasm1(SB) + MOVV $329, R12 + JMP callbackasm1(SB) + MOVV $330, R12 + JMP callbackasm1(SB) + MOVV $331, R12 + JMP callbackasm1(SB) + MOVV $332, R12 + JMP callbackasm1(SB) + MOVV $333, R12 + JMP callbackasm1(SB) + MOVV $334, R12 + JMP callbackasm1(SB) + MOVV $335, R12 + JMP callbackasm1(SB) + MOVV $336, R12 + JMP callbackasm1(SB) + MOVV $337, R12 + JMP callbackasm1(SB) + MOVV $338, R12 + JMP callbackasm1(SB) + MOVV $339, R12 + JMP callbackasm1(SB) + MOVV $340, R12 + JMP callbackasm1(SB) + MOVV $341, R12 + JMP callbackasm1(SB) + MOVV $342, R12 + JMP callbackasm1(SB) + MOVV $343, R12 + JMP callbackasm1(SB) + MOVV $344, R12 + JMP callbackasm1(SB) + MOVV $345, R12 + JMP callbackasm1(SB) + MOVV $346, R12 + JMP callbackasm1(SB) + MOVV $347, R12 + JMP callbackasm1(SB) + MOVV $348, R12 + JMP callbackasm1(SB) + MOVV $349, R12 + JMP callbackasm1(SB) + MOVV $350, R12 + JMP callbackasm1(SB) + MOVV $351, R12 + JMP callbackasm1(SB) + MOVV $352, R12 + JMP callbackasm1(SB) + MOVV $353, R12 + JMP callbackasm1(SB) + MOVV $354, R12 + JMP callbackasm1(SB) + MOVV $355, R12 + JMP callbackasm1(SB) + MOVV $356, R12 + JMP callbackasm1(SB) + MOVV $357, R12 + JMP callbackasm1(SB) + MOVV $358, R12 + JMP callbackasm1(SB) + MOVV $359, R12 + JMP callbackasm1(SB) + MOVV $360, R12 + JMP callbackasm1(SB) + MOVV $361, R12 + JMP callbackasm1(SB) + MOVV $362, R12 + JMP callbackasm1(SB) + MOVV $363, R12 + JMP callbackasm1(SB) + MOVV $364, R12 + JMP callbackasm1(SB) + MOVV $365, R12 + JMP callbackasm1(SB) + MOVV $366, R12 + JMP callbackasm1(SB) + MOVV $367, R12 + JMP callbackasm1(SB) + MOVV $368, R12 + JMP callbackasm1(SB) + MOVV $369, R12 + JMP callbackasm1(SB) + MOVV $370, R12 + JMP callbackasm1(SB) + MOVV $371, R12 + JMP callbackasm1(SB) + MOVV $372, R12 + JMP callbackasm1(SB) + MOVV $373, R12 + JMP callbackasm1(SB) + MOVV $374, R12 + JMP callbackasm1(SB) + MOVV $375, R12 + JMP callbackasm1(SB) + MOVV $376, R12 + JMP callbackasm1(SB) + MOVV $377, R12 + JMP callbackasm1(SB) + MOVV $378, R12 + JMP callbackasm1(SB) + MOVV $379, R12 + JMP callbackasm1(SB) + MOVV $380, R12 + JMP callbackasm1(SB) + MOVV $381, R12 + JMP callbackasm1(SB) + MOVV $382, R12 + JMP callbackasm1(SB) + MOVV $383, R12 + JMP callbackasm1(SB) + MOVV $384, R12 + JMP callbackasm1(SB) + MOVV $385, R12 + JMP callbackasm1(SB) + MOVV $386, R12 + JMP callbackasm1(SB) + MOVV $387, R12 + JMP callbackasm1(SB) + MOVV $388, R12 + JMP callbackasm1(SB) + MOVV $389, R12 + JMP callbackasm1(SB) + MOVV $390, R12 + JMP callbackasm1(SB) + MOVV $391, R12 + JMP callbackasm1(SB) + MOVV $392, R12 + JMP callbackasm1(SB) + MOVV $393, R12 + JMP callbackasm1(SB) + MOVV $394, R12 + JMP callbackasm1(SB) + MOVV $395, R12 + JMP callbackasm1(SB) + MOVV $396, R12 + JMP callbackasm1(SB) + MOVV $397, R12 + JMP callbackasm1(SB) + MOVV $398, R12 + JMP callbackasm1(SB) + MOVV $399, R12 + JMP callbackasm1(SB) + MOVV $400, R12 + JMP callbackasm1(SB) + MOVV $401, R12 + JMP callbackasm1(SB) + MOVV $402, R12 + JMP callbackasm1(SB) + MOVV $403, R12 + JMP callbackasm1(SB) + MOVV $404, R12 + JMP callbackasm1(SB) + MOVV $405, R12 + JMP callbackasm1(SB) + MOVV $406, R12 + JMP callbackasm1(SB) + MOVV $407, R12 + JMP callbackasm1(SB) + MOVV $408, R12 + JMP callbackasm1(SB) + MOVV $409, R12 + JMP callbackasm1(SB) + MOVV $410, R12 + JMP callbackasm1(SB) + MOVV $411, R12 + JMP callbackasm1(SB) + MOVV $412, R12 + JMP callbackasm1(SB) + MOVV $413, R12 + JMP callbackasm1(SB) + MOVV $414, R12 + JMP callbackasm1(SB) + MOVV $415, R12 + JMP callbackasm1(SB) + MOVV $416, R12 + JMP callbackasm1(SB) + MOVV $417, R12 + JMP callbackasm1(SB) + MOVV $418, R12 + JMP callbackasm1(SB) + MOVV $419, R12 + JMP callbackasm1(SB) + MOVV $420, R12 + JMP callbackasm1(SB) + MOVV $421, R12 + JMP callbackasm1(SB) + MOVV $422, R12 + JMP callbackasm1(SB) + MOVV $423, R12 + JMP callbackasm1(SB) + MOVV $424, R12 + JMP callbackasm1(SB) + MOVV $425, R12 + JMP callbackasm1(SB) + MOVV $426, R12 + JMP callbackasm1(SB) + MOVV $427, R12 + JMP callbackasm1(SB) + MOVV $428, R12 + JMP callbackasm1(SB) + MOVV $429, R12 + JMP callbackasm1(SB) + MOVV $430, R12 + JMP callbackasm1(SB) + MOVV $431, R12 + JMP callbackasm1(SB) + MOVV $432, R12 + JMP callbackasm1(SB) + MOVV $433, R12 + JMP callbackasm1(SB) + MOVV $434, R12 + JMP callbackasm1(SB) + MOVV $435, R12 + JMP callbackasm1(SB) + MOVV $436, R12 + JMP callbackasm1(SB) + MOVV $437, R12 + JMP callbackasm1(SB) + MOVV $438, R12 + JMP callbackasm1(SB) + MOVV $439, R12 + JMP callbackasm1(SB) + MOVV $440, R12 + JMP callbackasm1(SB) + MOVV $441, R12 + JMP callbackasm1(SB) + MOVV $442, R12 + JMP callbackasm1(SB) + MOVV $443, R12 + JMP callbackasm1(SB) + MOVV $444, R12 + JMP callbackasm1(SB) + MOVV $445, R12 + JMP callbackasm1(SB) + MOVV $446, R12 + JMP callbackasm1(SB) + MOVV $447, R12 + JMP callbackasm1(SB) + MOVV $448, R12 + JMP callbackasm1(SB) + MOVV $449, R12 + JMP callbackasm1(SB) + MOVV $450, R12 + JMP callbackasm1(SB) + MOVV $451, R12 + JMP callbackasm1(SB) + MOVV $452, R12 + JMP callbackasm1(SB) + MOVV $453, R12 + JMP callbackasm1(SB) + MOVV $454, R12 + JMP callbackasm1(SB) + MOVV $455, R12 + JMP callbackasm1(SB) + MOVV $456, R12 + JMP callbackasm1(SB) + MOVV $457, R12 + JMP callbackasm1(SB) + MOVV $458, R12 + JMP callbackasm1(SB) + MOVV $459, R12 + JMP callbackasm1(SB) + MOVV $460, R12 + JMP callbackasm1(SB) + MOVV $461, R12 + JMP callbackasm1(SB) + MOVV $462, R12 + JMP callbackasm1(SB) + MOVV $463, R12 + JMP callbackasm1(SB) + MOVV $464, R12 + JMP callbackasm1(SB) + MOVV $465, R12 + JMP callbackasm1(SB) + MOVV $466, R12 + JMP callbackasm1(SB) + MOVV $467, R12 + JMP callbackasm1(SB) + MOVV $468, R12 + JMP callbackasm1(SB) + MOVV $469, R12 + JMP callbackasm1(SB) + MOVV $470, R12 + JMP callbackasm1(SB) + MOVV $471, R12 + JMP callbackasm1(SB) + MOVV $472, R12 + JMP callbackasm1(SB) + MOVV $473, R12 + JMP callbackasm1(SB) + MOVV $474, R12 + JMP callbackasm1(SB) + MOVV $475, R12 + JMP callbackasm1(SB) + MOVV $476, R12 + JMP callbackasm1(SB) + MOVV $477, R12 + JMP callbackasm1(SB) + MOVV $478, R12 + JMP callbackasm1(SB) + MOVV $479, R12 + JMP callbackasm1(SB) + MOVV $480, R12 + JMP callbackasm1(SB) + MOVV $481, R12 + JMP callbackasm1(SB) + MOVV $482, R12 + JMP callbackasm1(SB) + MOVV $483, R12 + JMP callbackasm1(SB) + MOVV $484, R12 + JMP callbackasm1(SB) + MOVV $485, R12 + JMP callbackasm1(SB) + MOVV $486, R12 + JMP callbackasm1(SB) + MOVV $487, R12 + JMP callbackasm1(SB) + MOVV $488, R12 + JMP callbackasm1(SB) + MOVV $489, R12 + JMP callbackasm1(SB) + MOVV $490, R12 + JMP callbackasm1(SB) + MOVV $491, R12 + JMP callbackasm1(SB) + MOVV $492, R12 + JMP callbackasm1(SB) + MOVV $493, R12 + JMP callbackasm1(SB) + MOVV $494, R12 + JMP callbackasm1(SB) + MOVV $495, R12 + JMP callbackasm1(SB) + MOVV $496, R12 + JMP callbackasm1(SB) + MOVV $497, R12 + JMP callbackasm1(SB) + MOVV $498, R12 + JMP callbackasm1(SB) + MOVV $499, R12 + JMP callbackasm1(SB) + MOVV $500, R12 + JMP callbackasm1(SB) + MOVV $501, R12 + JMP callbackasm1(SB) + MOVV $502, R12 + JMP callbackasm1(SB) + MOVV $503, R12 + JMP callbackasm1(SB) + MOVV $504, R12 + JMP callbackasm1(SB) + MOVV $505, R12 + JMP callbackasm1(SB) + MOVV $506, R12 + JMP callbackasm1(SB) + MOVV $507, R12 + JMP callbackasm1(SB) + MOVV $508, R12 + JMP callbackasm1(SB) + MOVV $509, R12 + JMP callbackasm1(SB) + MOVV $510, R12 + JMP callbackasm1(SB) + MOVV $511, R12 + JMP callbackasm1(SB) + MOVV $512, R12 + JMP callbackasm1(SB) + MOVV $513, R12 + JMP callbackasm1(SB) + MOVV $514, R12 + JMP callbackasm1(SB) + MOVV $515, R12 + JMP callbackasm1(SB) + MOVV $516, R12 + JMP callbackasm1(SB) + MOVV $517, R12 + JMP callbackasm1(SB) + MOVV $518, R12 + JMP callbackasm1(SB) + MOVV $519, R12 + JMP callbackasm1(SB) + MOVV $520, R12 + JMP callbackasm1(SB) + MOVV $521, R12 + JMP callbackasm1(SB) + MOVV $522, R12 + JMP callbackasm1(SB) + MOVV $523, R12 + JMP callbackasm1(SB) + MOVV $524, R12 + JMP callbackasm1(SB) + MOVV $525, R12 + JMP callbackasm1(SB) + MOVV $526, R12 + JMP callbackasm1(SB) + MOVV $527, R12 + JMP callbackasm1(SB) + MOVV $528, R12 + JMP callbackasm1(SB) + MOVV $529, R12 + JMP callbackasm1(SB) + MOVV $530, R12 + JMP callbackasm1(SB) + MOVV $531, R12 + JMP callbackasm1(SB) + MOVV $532, R12 + JMP callbackasm1(SB) + MOVV $533, R12 + JMP callbackasm1(SB) + MOVV $534, R12 + JMP callbackasm1(SB) + MOVV $535, R12 + JMP callbackasm1(SB) + MOVV $536, R12 + JMP callbackasm1(SB) + MOVV $537, R12 + JMP callbackasm1(SB) + MOVV $538, R12 + JMP callbackasm1(SB) + MOVV $539, R12 + JMP callbackasm1(SB) + MOVV $540, R12 + JMP callbackasm1(SB) + MOVV $541, R12 + JMP callbackasm1(SB) + MOVV $542, R12 + JMP callbackasm1(SB) + MOVV $543, R12 + JMP callbackasm1(SB) + MOVV $544, R12 + JMP callbackasm1(SB) + MOVV $545, R12 + JMP callbackasm1(SB) + MOVV $546, R12 + JMP callbackasm1(SB) + MOVV $547, R12 + JMP callbackasm1(SB) + MOVV $548, R12 + JMP callbackasm1(SB) + MOVV $549, R12 + JMP callbackasm1(SB) + MOVV $550, R12 + JMP callbackasm1(SB) + MOVV $551, R12 + JMP callbackasm1(SB) + MOVV $552, R12 + JMP callbackasm1(SB) + MOVV $553, R12 + JMP callbackasm1(SB) + MOVV $554, R12 + JMP callbackasm1(SB) + MOVV $555, R12 + JMP callbackasm1(SB) + MOVV $556, R12 + JMP callbackasm1(SB) + MOVV $557, R12 + JMP callbackasm1(SB) + MOVV $558, R12 + JMP callbackasm1(SB) + MOVV $559, R12 + JMP callbackasm1(SB) + MOVV $560, R12 + JMP callbackasm1(SB) + MOVV $561, R12 + JMP callbackasm1(SB) + MOVV $562, R12 + JMP callbackasm1(SB) + MOVV $563, R12 + JMP callbackasm1(SB) + MOVV $564, R12 + JMP callbackasm1(SB) + MOVV $565, R12 + JMP callbackasm1(SB) + MOVV $566, R12 + JMP callbackasm1(SB) + MOVV $567, R12 + JMP callbackasm1(SB) + MOVV $568, R12 + JMP callbackasm1(SB) + MOVV $569, R12 + JMP callbackasm1(SB) + MOVV $570, R12 + JMP callbackasm1(SB) + MOVV $571, R12 + JMP callbackasm1(SB) + MOVV $572, R12 + JMP callbackasm1(SB) + MOVV $573, R12 + JMP callbackasm1(SB) + MOVV $574, R12 + JMP callbackasm1(SB) + MOVV $575, R12 + JMP callbackasm1(SB) + MOVV $576, R12 + JMP callbackasm1(SB) + MOVV $577, R12 + JMP callbackasm1(SB) + MOVV $578, R12 + JMP callbackasm1(SB) + MOVV $579, R12 + JMP callbackasm1(SB) + MOVV $580, R12 + JMP callbackasm1(SB) + MOVV $581, R12 + JMP callbackasm1(SB) + MOVV $582, R12 + JMP callbackasm1(SB) + MOVV $583, R12 + JMP callbackasm1(SB) + MOVV $584, R12 + JMP callbackasm1(SB) + MOVV $585, R12 + JMP callbackasm1(SB) + MOVV $586, R12 + JMP callbackasm1(SB) + MOVV $587, R12 + JMP callbackasm1(SB) + MOVV $588, R12 + JMP callbackasm1(SB) + MOVV $589, R12 + JMP callbackasm1(SB) + MOVV $590, R12 + JMP callbackasm1(SB) + MOVV $591, R12 + JMP callbackasm1(SB) + MOVV $592, R12 + JMP callbackasm1(SB) + MOVV $593, R12 + JMP callbackasm1(SB) + MOVV $594, R12 + JMP callbackasm1(SB) + MOVV $595, R12 + JMP callbackasm1(SB) + MOVV $596, R12 + JMP callbackasm1(SB) + MOVV $597, R12 + JMP callbackasm1(SB) + MOVV $598, R12 + JMP callbackasm1(SB) + MOVV $599, R12 + JMP callbackasm1(SB) + MOVV $600, R12 + JMP callbackasm1(SB) + MOVV $601, R12 + JMP callbackasm1(SB) + MOVV $602, R12 + JMP callbackasm1(SB) + MOVV $603, R12 + JMP callbackasm1(SB) + MOVV $604, R12 + JMP callbackasm1(SB) + MOVV $605, R12 + JMP callbackasm1(SB) + MOVV $606, R12 + JMP callbackasm1(SB) + MOVV $607, R12 + JMP callbackasm1(SB) + MOVV $608, R12 + JMP callbackasm1(SB) + MOVV $609, R12 + JMP callbackasm1(SB) + MOVV $610, R12 + JMP callbackasm1(SB) + MOVV $611, R12 + JMP callbackasm1(SB) + MOVV $612, R12 + JMP callbackasm1(SB) + MOVV $613, R12 + JMP callbackasm1(SB) + MOVV $614, R12 + JMP callbackasm1(SB) + MOVV $615, R12 + JMP callbackasm1(SB) + MOVV $616, R12 + JMP callbackasm1(SB) + MOVV $617, R12 + JMP callbackasm1(SB) + MOVV $618, R12 + JMP callbackasm1(SB) + MOVV $619, R12 + JMP callbackasm1(SB) + MOVV $620, R12 + JMP callbackasm1(SB) + MOVV $621, R12 + JMP callbackasm1(SB) + MOVV $622, R12 + JMP callbackasm1(SB) + MOVV $623, R12 + JMP callbackasm1(SB) + MOVV $624, R12 + JMP callbackasm1(SB) + MOVV $625, R12 + JMP callbackasm1(SB) + MOVV $626, R12 + JMP callbackasm1(SB) + MOVV $627, R12 + JMP callbackasm1(SB) + MOVV $628, R12 + JMP callbackasm1(SB) + MOVV $629, R12 + JMP callbackasm1(SB) + MOVV $630, R12 + JMP callbackasm1(SB) + MOVV $631, R12 + JMP callbackasm1(SB) + MOVV $632, R12 + JMP callbackasm1(SB) + MOVV $633, R12 + JMP callbackasm1(SB) + MOVV $634, R12 + JMP callbackasm1(SB) + MOVV $635, R12 + JMP callbackasm1(SB) + MOVV $636, R12 + JMP callbackasm1(SB) + MOVV $637, R12 + JMP callbackasm1(SB) + MOVV $638, R12 + JMP callbackasm1(SB) + MOVV $639, R12 + JMP callbackasm1(SB) + MOVV $640, R12 + JMP callbackasm1(SB) + MOVV $641, R12 + JMP callbackasm1(SB) + MOVV $642, R12 + JMP callbackasm1(SB) + MOVV $643, R12 + JMP callbackasm1(SB) + MOVV $644, R12 + JMP callbackasm1(SB) + MOVV $645, R12 + JMP callbackasm1(SB) + MOVV $646, R12 + JMP callbackasm1(SB) + MOVV $647, R12 + JMP callbackasm1(SB) + MOVV $648, R12 + JMP callbackasm1(SB) + MOVV $649, R12 + JMP callbackasm1(SB) + MOVV $650, R12 + JMP callbackasm1(SB) + MOVV $651, R12 + JMP callbackasm1(SB) + MOVV $652, R12 + JMP callbackasm1(SB) + MOVV $653, R12 + JMP callbackasm1(SB) + MOVV $654, R12 + JMP callbackasm1(SB) + MOVV $655, R12 + JMP callbackasm1(SB) + MOVV $656, R12 + JMP callbackasm1(SB) + MOVV $657, R12 + JMP callbackasm1(SB) + MOVV $658, R12 + JMP callbackasm1(SB) + MOVV $659, R12 + JMP callbackasm1(SB) + MOVV $660, R12 + JMP callbackasm1(SB) + MOVV $661, R12 + JMP callbackasm1(SB) + MOVV $662, R12 + JMP callbackasm1(SB) + MOVV $663, R12 + JMP callbackasm1(SB) + MOVV $664, R12 + JMP callbackasm1(SB) + MOVV $665, R12 + JMP callbackasm1(SB) + MOVV $666, R12 + JMP callbackasm1(SB) + MOVV $667, R12 + JMP callbackasm1(SB) + MOVV $668, R12 + JMP callbackasm1(SB) + MOVV $669, R12 + JMP callbackasm1(SB) + MOVV $670, R12 + JMP callbackasm1(SB) + MOVV $671, R12 + JMP callbackasm1(SB) + MOVV $672, R12 + JMP callbackasm1(SB) + MOVV $673, R12 + JMP callbackasm1(SB) + MOVV $674, R12 + JMP callbackasm1(SB) + MOVV $675, R12 + JMP callbackasm1(SB) + MOVV $676, R12 + JMP callbackasm1(SB) + MOVV $677, R12 + JMP callbackasm1(SB) + MOVV $678, R12 + JMP callbackasm1(SB) + MOVV $679, R12 + JMP callbackasm1(SB) + MOVV $680, R12 + JMP callbackasm1(SB) + MOVV $681, R12 + JMP callbackasm1(SB) + MOVV $682, R12 + JMP callbackasm1(SB) + MOVV $683, R12 + JMP callbackasm1(SB) + MOVV $684, R12 + JMP callbackasm1(SB) + MOVV $685, R12 + JMP callbackasm1(SB) + MOVV $686, R12 + JMP callbackasm1(SB) + MOVV $687, R12 + JMP callbackasm1(SB) + MOVV $688, R12 + JMP callbackasm1(SB) + MOVV $689, R12 + JMP callbackasm1(SB) + MOVV $690, R12 + JMP callbackasm1(SB) + MOVV $691, R12 + JMP callbackasm1(SB) + MOVV $692, R12 + JMP callbackasm1(SB) + MOVV $693, R12 + JMP callbackasm1(SB) + MOVV $694, R12 + JMP callbackasm1(SB) + MOVV $695, R12 + JMP callbackasm1(SB) + MOVV $696, R12 + JMP callbackasm1(SB) + MOVV $697, R12 + JMP callbackasm1(SB) + MOVV $698, R12 + JMP callbackasm1(SB) + MOVV $699, R12 + JMP callbackasm1(SB) + MOVV $700, R12 + JMP callbackasm1(SB) + MOVV $701, R12 + JMP callbackasm1(SB) + MOVV $702, R12 + JMP callbackasm1(SB) + MOVV $703, R12 + JMP callbackasm1(SB) + MOVV $704, R12 + JMP callbackasm1(SB) + MOVV $705, R12 + JMP callbackasm1(SB) + MOVV $706, R12 + JMP callbackasm1(SB) + MOVV $707, R12 + JMP callbackasm1(SB) + MOVV $708, R12 + JMP callbackasm1(SB) + MOVV $709, R12 + JMP callbackasm1(SB) + MOVV $710, R12 + JMP callbackasm1(SB) + MOVV $711, R12 + JMP callbackasm1(SB) + MOVV $712, R12 + JMP callbackasm1(SB) + MOVV $713, R12 + JMP callbackasm1(SB) + MOVV $714, R12 + JMP callbackasm1(SB) + MOVV $715, R12 + JMP callbackasm1(SB) + MOVV $716, R12 + JMP callbackasm1(SB) + MOVV $717, R12 + JMP callbackasm1(SB) + MOVV $718, R12 + JMP callbackasm1(SB) + MOVV $719, R12 + JMP callbackasm1(SB) + MOVV $720, R12 + JMP callbackasm1(SB) + MOVV $721, R12 + JMP callbackasm1(SB) + MOVV $722, R12 + JMP callbackasm1(SB) + MOVV $723, R12 + JMP callbackasm1(SB) + MOVV $724, R12 + JMP callbackasm1(SB) + MOVV $725, R12 + JMP callbackasm1(SB) + MOVV $726, R12 + JMP callbackasm1(SB) + MOVV $727, R12 + JMP callbackasm1(SB) + MOVV $728, R12 + JMP callbackasm1(SB) + MOVV $729, R12 + JMP callbackasm1(SB) + MOVV $730, R12 + JMP callbackasm1(SB) + MOVV $731, R12 + JMP callbackasm1(SB) + MOVV $732, R12 + JMP callbackasm1(SB) + MOVV $733, R12 + JMP callbackasm1(SB) + MOVV $734, R12 + JMP callbackasm1(SB) + MOVV $735, R12 + JMP callbackasm1(SB) + MOVV $736, R12 + JMP callbackasm1(SB) + MOVV $737, R12 + JMP callbackasm1(SB) + MOVV $738, R12 + JMP callbackasm1(SB) + MOVV $739, R12 + JMP callbackasm1(SB) + MOVV $740, R12 + JMP callbackasm1(SB) + MOVV $741, R12 + JMP callbackasm1(SB) + MOVV $742, R12 + JMP callbackasm1(SB) + MOVV $743, R12 + JMP callbackasm1(SB) + MOVV $744, R12 + JMP callbackasm1(SB) + MOVV $745, R12 + JMP callbackasm1(SB) + MOVV $746, R12 + JMP callbackasm1(SB) + MOVV $747, R12 + JMP callbackasm1(SB) + MOVV $748, R12 + JMP callbackasm1(SB) + MOVV $749, R12 + JMP callbackasm1(SB) + MOVV $750, R12 + JMP callbackasm1(SB) + MOVV $751, R12 + JMP callbackasm1(SB) + MOVV $752, R12 + JMP callbackasm1(SB) + MOVV $753, R12 + JMP callbackasm1(SB) + MOVV $754, R12 + JMP callbackasm1(SB) + MOVV $755, R12 + JMP callbackasm1(SB) + MOVV $756, R12 + JMP callbackasm1(SB) + MOVV $757, R12 + JMP callbackasm1(SB) + MOVV $758, R12 + JMP callbackasm1(SB) + MOVV $759, R12 + JMP callbackasm1(SB) + MOVV $760, R12 + JMP callbackasm1(SB) + MOVV $761, R12 + JMP callbackasm1(SB) + MOVV $762, R12 + JMP callbackasm1(SB) + MOVV $763, R12 + JMP callbackasm1(SB) + MOVV $764, R12 + JMP callbackasm1(SB) + MOVV $765, R12 + JMP callbackasm1(SB) + MOVV $766, R12 + JMP callbackasm1(SB) + MOVV $767, R12 + JMP callbackasm1(SB) + MOVV $768, R12 + JMP callbackasm1(SB) + MOVV $769, R12 + JMP callbackasm1(SB) + MOVV $770, R12 + JMP callbackasm1(SB) + MOVV $771, R12 + JMP callbackasm1(SB) + MOVV $772, R12 + JMP callbackasm1(SB) + MOVV $773, R12 + JMP callbackasm1(SB) + MOVV $774, R12 + JMP callbackasm1(SB) + MOVV $775, R12 + JMP callbackasm1(SB) + MOVV $776, R12 + JMP callbackasm1(SB) + MOVV $777, R12 + JMP callbackasm1(SB) + MOVV $778, R12 + JMP callbackasm1(SB) + MOVV $779, R12 + JMP callbackasm1(SB) + MOVV $780, R12 + JMP callbackasm1(SB) + MOVV $781, R12 + JMP callbackasm1(SB) + MOVV $782, R12 + JMP callbackasm1(SB) + MOVV $783, R12 + JMP callbackasm1(SB) + MOVV $784, R12 + JMP callbackasm1(SB) + MOVV $785, R12 + JMP callbackasm1(SB) + MOVV $786, R12 + JMP callbackasm1(SB) + MOVV $787, R12 + JMP callbackasm1(SB) + MOVV $788, R12 + JMP callbackasm1(SB) + MOVV $789, R12 + JMP callbackasm1(SB) + MOVV $790, R12 + JMP callbackasm1(SB) + MOVV $791, R12 + JMP callbackasm1(SB) + MOVV $792, R12 + JMP callbackasm1(SB) + MOVV $793, R12 + JMP callbackasm1(SB) + MOVV $794, R12 + JMP callbackasm1(SB) + MOVV $795, R12 + JMP callbackasm1(SB) + MOVV $796, R12 + JMP callbackasm1(SB) + MOVV $797, R12 + JMP callbackasm1(SB) + MOVV $798, R12 + JMP callbackasm1(SB) + MOVV $799, R12 + JMP callbackasm1(SB) + MOVV $800, R12 + JMP callbackasm1(SB) + MOVV $801, R12 + JMP callbackasm1(SB) + MOVV $802, R12 + JMP callbackasm1(SB) + MOVV $803, R12 + JMP callbackasm1(SB) + MOVV $804, R12 + JMP callbackasm1(SB) + MOVV $805, R12 + JMP callbackasm1(SB) + MOVV $806, R12 + JMP callbackasm1(SB) + MOVV $807, R12 + JMP callbackasm1(SB) + MOVV $808, R12 + JMP callbackasm1(SB) + MOVV $809, R12 + JMP callbackasm1(SB) + MOVV $810, R12 + JMP callbackasm1(SB) + MOVV $811, R12 + JMP callbackasm1(SB) + MOVV $812, R12 + JMP callbackasm1(SB) + MOVV $813, R12 + JMP callbackasm1(SB) + MOVV $814, R12 + JMP callbackasm1(SB) + MOVV $815, R12 + JMP callbackasm1(SB) + MOVV $816, R12 + JMP callbackasm1(SB) + MOVV $817, R12 + JMP callbackasm1(SB) + MOVV $818, R12 + JMP callbackasm1(SB) + MOVV $819, R12 + JMP callbackasm1(SB) + MOVV $820, R12 + JMP callbackasm1(SB) + MOVV $821, R12 + JMP callbackasm1(SB) + MOVV $822, R12 + JMP callbackasm1(SB) + MOVV $823, R12 + JMP callbackasm1(SB) + MOVV $824, R12 + JMP callbackasm1(SB) + MOVV $825, R12 + JMP callbackasm1(SB) + MOVV $826, R12 + JMP callbackasm1(SB) + MOVV $827, R12 + JMP callbackasm1(SB) + MOVV $828, R12 + JMP callbackasm1(SB) + MOVV $829, R12 + JMP callbackasm1(SB) + MOVV $830, R12 + JMP callbackasm1(SB) + MOVV $831, R12 + JMP callbackasm1(SB) + MOVV $832, R12 + JMP callbackasm1(SB) + MOVV $833, R12 + JMP callbackasm1(SB) + MOVV $834, R12 + JMP callbackasm1(SB) + MOVV $835, R12 + JMP callbackasm1(SB) + MOVV $836, R12 + JMP callbackasm1(SB) + MOVV $837, R12 + JMP callbackasm1(SB) + MOVV $838, R12 + JMP callbackasm1(SB) + MOVV $839, R12 + JMP callbackasm1(SB) + MOVV $840, R12 + JMP callbackasm1(SB) + MOVV $841, R12 + JMP callbackasm1(SB) + MOVV $842, R12 + JMP callbackasm1(SB) + MOVV $843, R12 + JMP callbackasm1(SB) + MOVV $844, R12 + JMP callbackasm1(SB) + MOVV $845, R12 + JMP callbackasm1(SB) + MOVV $846, R12 + JMP callbackasm1(SB) + MOVV $847, R12 + JMP callbackasm1(SB) + MOVV $848, R12 + JMP callbackasm1(SB) + MOVV $849, R12 + JMP callbackasm1(SB) + MOVV $850, R12 + JMP callbackasm1(SB) + MOVV $851, R12 + JMP callbackasm1(SB) + MOVV $852, R12 + JMP callbackasm1(SB) + MOVV $853, R12 + JMP callbackasm1(SB) + MOVV $854, R12 + JMP callbackasm1(SB) + MOVV $855, R12 + JMP callbackasm1(SB) + MOVV $856, R12 + JMP callbackasm1(SB) + MOVV $857, R12 + JMP callbackasm1(SB) + MOVV $858, R12 + JMP callbackasm1(SB) + MOVV $859, R12 + JMP callbackasm1(SB) + MOVV $860, R12 + JMP callbackasm1(SB) + MOVV $861, R12 + JMP callbackasm1(SB) + MOVV $862, R12 + JMP callbackasm1(SB) + MOVV $863, R12 + JMP callbackasm1(SB) + MOVV $864, R12 + JMP callbackasm1(SB) + MOVV $865, R12 + JMP callbackasm1(SB) + MOVV $866, R12 + JMP callbackasm1(SB) + MOVV $867, R12 + JMP callbackasm1(SB) + MOVV $868, R12 + JMP callbackasm1(SB) + MOVV $869, R12 + JMP callbackasm1(SB) + MOVV $870, R12 + JMP callbackasm1(SB) + MOVV $871, R12 + JMP callbackasm1(SB) + MOVV $872, R12 + JMP callbackasm1(SB) + MOVV $873, R12 + JMP callbackasm1(SB) + MOVV $874, R12 + JMP callbackasm1(SB) + MOVV $875, R12 + JMP callbackasm1(SB) + MOVV $876, R12 + JMP callbackasm1(SB) + MOVV $877, R12 + JMP callbackasm1(SB) + MOVV $878, R12 + JMP callbackasm1(SB) + MOVV $879, R12 + JMP callbackasm1(SB) + MOVV $880, R12 + JMP callbackasm1(SB) + MOVV $881, R12 + JMP callbackasm1(SB) + MOVV $882, R12 + JMP callbackasm1(SB) + MOVV $883, R12 + JMP callbackasm1(SB) + MOVV $884, R12 + JMP callbackasm1(SB) + MOVV $885, R12 + JMP callbackasm1(SB) + MOVV $886, R12 + JMP callbackasm1(SB) + MOVV $887, R12 + JMP callbackasm1(SB) + MOVV $888, R12 + JMP callbackasm1(SB) + MOVV $889, R12 + JMP callbackasm1(SB) + MOVV $890, R12 + JMP callbackasm1(SB) + MOVV $891, R12 + JMP callbackasm1(SB) + MOVV $892, R12 + JMP callbackasm1(SB) + MOVV $893, R12 + JMP callbackasm1(SB) + MOVV $894, R12 + JMP callbackasm1(SB) + MOVV $895, R12 + JMP callbackasm1(SB) + MOVV $896, R12 + JMP callbackasm1(SB) + MOVV $897, R12 + JMP callbackasm1(SB) + MOVV $898, R12 + JMP callbackasm1(SB) + MOVV $899, R12 + JMP callbackasm1(SB) + MOVV $900, R12 + JMP callbackasm1(SB) + MOVV $901, R12 + JMP callbackasm1(SB) + MOVV $902, R12 + JMP callbackasm1(SB) + MOVV $903, R12 + JMP callbackasm1(SB) + MOVV $904, R12 + JMP callbackasm1(SB) + MOVV $905, R12 + JMP callbackasm1(SB) + MOVV $906, R12 + JMP callbackasm1(SB) + MOVV $907, R12 + JMP callbackasm1(SB) + MOVV $908, R12 + JMP callbackasm1(SB) + MOVV $909, R12 + JMP callbackasm1(SB) + MOVV $910, R12 + JMP callbackasm1(SB) + MOVV $911, R12 + JMP callbackasm1(SB) + MOVV $912, R12 + JMP callbackasm1(SB) + MOVV $913, R12 + JMP callbackasm1(SB) + MOVV $914, R12 + JMP callbackasm1(SB) + MOVV $915, R12 + JMP callbackasm1(SB) + MOVV $916, R12 + JMP callbackasm1(SB) + MOVV $917, R12 + JMP callbackasm1(SB) + MOVV $918, R12 + JMP callbackasm1(SB) + MOVV $919, R12 + JMP callbackasm1(SB) + MOVV $920, R12 + JMP callbackasm1(SB) + MOVV $921, R12 + JMP callbackasm1(SB) + MOVV $922, R12 + JMP callbackasm1(SB) + MOVV $923, R12 + JMP callbackasm1(SB) + MOVV $924, R12 + JMP callbackasm1(SB) + MOVV $925, R12 + JMP callbackasm1(SB) + MOVV $926, R12 + JMP callbackasm1(SB) + MOVV $927, R12 + JMP callbackasm1(SB) + MOVV $928, R12 + JMP callbackasm1(SB) + MOVV $929, R12 + JMP callbackasm1(SB) + MOVV $930, R12 + JMP callbackasm1(SB) + MOVV $931, R12 + JMP callbackasm1(SB) + MOVV $932, R12 + JMP callbackasm1(SB) + MOVV $933, R12 + JMP callbackasm1(SB) + MOVV $934, R12 + JMP callbackasm1(SB) + MOVV $935, R12 + JMP callbackasm1(SB) + MOVV $936, R12 + JMP callbackasm1(SB) + MOVV $937, R12 + JMP callbackasm1(SB) + MOVV $938, R12 + JMP callbackasm1(SB) + MOVV $939, R12 + JMP callbackasm1(SB) + MOVV $940, R12 + JMP callbackasm1(SB) + MOVV $941, R12 + JMP callbackasm1(SB) + MOVV $942, R12 + JMP callbackasm1(SB) + MOVV $943, R12 + JMP callbackasm1(SB) + MOVV $944, R12 + JMP callbackasm1(SB) + MOVV $945, R12 + JMP callbackasm1(SB) + MOVV $946, R12 + JMP callbackasm1(SB) + MOVV $947, R12 + JMP callbackasm1(SB) + MOVV $948, R12 + JMP callbackasm1(SB) + MOVV $949, R12 + JMP callbackasm1(SB) + MOVV $950, R12 + JMP callbackasm1(SB) + MOVV $951, R12 + JMP callbackasm1(SB) + MOVV $952, R12 + JMP callbackasm1(SB) + MOVV $953, R12 + JMP callbackasm1(SB) + MOVV $954, R12 + JMP callbackasm1(SB) + MOVV $955, R12 + JMP callbackasm1(SB) + MOVV $956, R12 + JMP callbackasm1(SB) + MOVV $957, R12 + JMP callbackasm1(SB) + MOVV $958, R12 + JMP callbackasm1(SB) + MOVV $959, R12 + JMP callbackasm1(SB) + MOVV $960, R12 + JMP callbackasm1(SB) + MOVV $961, R12 + JMP callbackasm1(SB) + MOVV $962, R12 + JMP callbackasm1(SB) + MOVV $963, R12 + JMP callbackasm1(SB) + MOVV $964, R12 + JMP callbackasm1(SB) + MOVV $965, R12 + JMP callbackasm1(SB) + MOVV $966, R12 + JMP callbackasm1(SB) + MOVV $967, R12 + JMP callbackasm1(SB) + MOVV $968, R12 + JMP callbackasm1(SB) + MOVV $969, R12 + JMP callbackasm1(SB) + MOVV $970, R12 + JMP callbackasm1(SB) + MOVV $971, R12 + JMP callbackasm1(SB) + MOVV $972, R12 + JMP callbackasm1(SB) + MOVV $973, R12 + JMP callbackasm1(SB) + MOVV $974, R12 + JMP callbackasm1(SB) + MOVV $975, R12 + JMP callbackasm1(SB) + MOVV $976, R12 + JMP callbackasm1(SB) + MOVV $977, R12 + JMP callbackasm1(SB) + MOVV $978, R12 + JMP callbackasm1(SB) + MOVV $979, R12 + JMP callbackasm1(SB) + MOVV $980, R12 + JMP callbackasm1(SB) + MOVV $981, R12 + JMP callbackasm1(SB) + MOVV $982, R12 + JMP callbackasm1(SB) + MOVV $983, R12 + JMP callbackasm1(SB) + MOVV $984, R12 + JMP callbackasm1(SB) + MOVV $985, R12 + JMP callbackasm1(SB) + MOVV $986, R12 + JMP callbackasm1(SB) + MOVV $987, R12 + JMP callbackasm1(SB) + MOVV $988, R12 + JMP callbackasm1(SB) + MOVV $989, R12 + JMP callbackasm1(SB) + MOVV $990, R12 + JMP callbackasm1(SB) + MOVV $991, R12 + JMP callbackasm1(SB) + MOVV $992, R12 + JMP callbackasm1(SB) + MOVV $993, R12 + JMP callbackasm1(SB) + MOVV $994, R12 + JMP callbackasm1(SB) + MOVV $995, R12 + JMP callbackasm1(SB) + MOVV $996, R12 + JMP callbackasm1(SB) + MOVV $997, R12 + JMP callbackasm1(SB) + MOVV $998, R12 + JMP callbackasm1(SB) + MOVV $999, R12 + JMP callbackasm1(SB) + MOVV $1000, R12 + JMP callbackasm1(SB) + MOVV $1001, R12 + JMP callbackasm1(SB) + MOVV $1002, R12 + JMP callbackasm1(SB) + MOVV $1003, R12 + JMP callbackasm1(SB) + MOVV $1004, R12 + JMP callbackasm1(SB) + MOVV $1005, R12 + JMP callbackasm1(SB) + MOVV $1006, R12 + JMP callbackasm1(SB) + MOVV $1007, R12 + JMP callbackasm1(SB) + MOVV $1008, R12 + JMP callbackasm1(SB) + MOVV $1009, R12 + JMP callbackasm1(SB) + MOVV $1010, R12 + JMP callbackasm1(SB) + MOVV $1011, R12 + JMP callbackasm1(SB) + MOVV $1012, R12 + JMP callbackasm1(SB) + MOVV $1013, R12 + JMP callbackasm1(SB) + MOVV $1014, R12 + JMP callbackasm1(SB) + MOVV $1015, R12 + JMP callbackasm1(SB) + MOVV $1016, R12 + JMP callbackasm1(SB) + MOVV $1017, R12 + JMP callbackasm1(SB) + MOVV $1018, R12 + JMP callbackasm1(SB) + MOVV $1019, R12 + JMP callbackasm1(SB) + MOVV $1020, R12 + JMP callbackasm1(SB) + MOVV $1021, R12 + JMP callbackasm1(SB) + MOVV $1022, R12 + JMP callbackasm1(SB) + MOVV $1023, R12 + JMP callbackasm1(SB) + MOVV $1024, R12 + JMP callbackasm1(SB) + MOVV $1025, R12 + JMP callbackasm1(SB) + MOVV $1026, R12 + JMP callbackasm1(SB) + MOVV $1027, R12 + JMP callbackasm1(SB) + MOVV $1028, R12 + JMP callbackasm1(SB) + MOVV $1029, R12 + JMP callbackasm1(SB) + MOVV $1030, R12 + JMP callbackasm1(SB) + MOVV $1031, R12 + JMP callbackasm1(SB) + MOVV $1032, R12 + JMP callbackasm1(SB) + MOVV $1033, R12 + JMP callbackasm1(SB) + MOVV $1034, R12 + JMP callbackasm1(SB) + MOVV $1035, R12 + JMP callbackasm1(SB) + MOVV $1036, R12 + JMP callbackasm1(SB) + MOVV $1037, R12 + JMP callbackasm1(SB) + MOVV $1038, R12 + JMP callbackasm1(SB) + MOVV $1039, R12 + JMP callbackasm1(SB) + MOVV $1040, R12 + JMP callbackasm1(SB) + MOVV $1041, R12 + JMP callbackasm1(SB) + MOVV $1042, R12 + JMP callbackasm1(SB) + MOVV $1043, R12 + JMP callbackasm1(SB) + MOVV $1044, R12 + JMP callbackasm1(SB) + MOVV $1045, R12 + JMP callbackasm1(SB) + MOVV $1046, R12 + JMP callbackasm1(SB) + MOVV $1047, R12 + JMP callbackasm1(SB) + MOVV $1048, R12 + JMP callbackasm1(SB) + MOVV $1049, R12 + JMP callbackasm1(SB) + MOVV $1050, R12 + JMP callbackasm1(SB) + MOVV $1051, R12 + JMP callbackasm1(SB) + MOVV $1052, R12 + JMP callbackasm1(SB) + MOVV $1053, R12 + JMP callbackasm1(SB) + MOVV $1054, R12 + JMP callbackasm1(SB) + MOVV $1055, R12 + JMP callbackasm1(SB) + MOVV $1056, R12 + JMP callbackasm1(SB) + MOVV $1057, R12 + JMP callbackasm1(SB) + MOVV $1058, R12 + JMP callbackasm1(SB) + MOVV $1059, R12 + JMP callbackasm1(SB) + MOVV $1060, R12 + JMP callbackasm1(SB) + MOVV $1061, R12 + JMP callbackasm1(SB) + MOVV $1062, R12 + JMP callbackasm1(SB) + MOVV $1063, R12 + JMP callbackasm1(SB) + MOVV $1064, R12 + JMP callbackasm1(SB) + MOVV $1065, R12 + JMP callbackasm1(SB) + MOVV $1066, R12 + JMP callbackasm1(SB) + MOVV $1067, R12 + JMP callbackasm1(SB) + MOVV $1068, R12 + JMP callbackasm1(SB) + MOVV $1069, R12 + JMP callbackasm1(SB) + MOVV $1070, R12 + JMP callbackasm1(SB) + MOVV $1071, R12 + JMP callbackasm1(SB) + MOVV $1072, R12 + JMP callbackasm1(SB) + MOVV $1073, R12 + JMP callbackasm1(SB) + MOVV $1074, R12 + JMP callbackasm1(SB) + MOVV $1075, R12 + JMP callbackasm1(SB) + MOVV $1076, R12 + JMP callbackasm1(SB) + MOVV $1077, R12 + JMP callbackasm1(SB) + MOVV $1078, R12 + JMP callbackasm1(SB) + MOVV $1079, R12 + JMP callbackasm1(SB) + MOVV $1080, R12 + JMP callbackasm1(SB) + MOVV $1081, R12 + JMP callbackasm1(SB) + MOVV $1082, R12 + JMP callbackasm1(SB) + MOVV $1083, R12 + JMP callbackasm1(SB) + MOVV $1084, R12 + JMP callbackasm1(SB) + MOVV $1085, R12 + JMP callbackasm1(SB) + MOVV $1086, R12 + JMP callbackasm1(SB) + MOVV $1087, R12 + JMP callbackasm1(SB) + MOVV $1088, R12 + JMP callbackasm1(SB) + MOVV $1089, R12 + JMP callbackasm1(SB) + MOVV $1090, R12 + JMP callbackasm1(SB) + MOVV $1091, R12 + JMP callbackasm1(SB) + MOVV $1092, R12 + JMP callbackasm1(SB) + MOVV $1093, R12 + JMP callbackasm1(SB) + MOVV $1094, R12 + JMP callbackasm1(SB) + MOVV $1095, R12 + JMP callbackasm1(SB) + MOVV $1096, R12 + JMP callbackasm1(SB) + MOVV $1097, R12 + JMP callbackasm1(SB) + MOVV $1098, R12 + JMP callbackasm1(SB) + MOVV $1099, R12 + JMP callbackasm1(SB) + MOVV $1100, R12 + JMP callbackasm1(SB) + MOVV $1101, R12 + JMP callbackasm1(SB) + MOVV $1102, R12 + JMP callbackasm1(SB) + MOVV $1103, R12 + JMP callbackasm1(SB) + MOVV $1104, R12 + JMP callbackasm1(SB) + MOVV $1105, R12 + JMP callbackasm1(SB) + MOVV $1106, R12 + JMP callbackasm1(SB) + MOVV $1107, R12 + JMP callbackasm1(SB) + MOVV $1108, R12 + JMP callbackasm1(SB) + MOVV $1109, R12 + JMP callbackasm1(SB) + MOVV $1110, R12 + JMP callbackasm1(SB) + MOVV $1111, R12 + JMP callbackasm1(SB) + MOVV $1112, R12 + JMP callbackasm1(SB) + MOVV $1113, R12 + JMP callbackasm1(SB) + MOVV $1114, R12 + JMP callbackasm1(SB) + MOVV $1115, R12 + JMP callbackasm1(SB) + MOVV $1116, R12 + JMP callbackasm1(SB) + MOVV $1117, R12 + JMP callbackasm1(SB) + MOVV $1118, R12 + JMP callbackasm1(SB) + MOVV $1119, R12 + JMP callbackasm1(SB) + MOVV $1120, R12 + JMP callbackasm1(SB) + MOVV $1121, R12 + JMP callbackasm1(SB) + MOVV $1122, R12 + JMP callbackasm1(SB) + MOVV $1123, R12 + JMP callbackasm1(SB) + MOVV $1124, R12 + JMP callbackasm1(SB) + MOVV $1125, R12 + JMP callbackasm1(SB) + MOVV $1126, R12 + JMP callbackasm1(SB) + MOVV $1127, R12 + JMP callbackasm1(SB) + MOVV $1128, R12 + JMP callbackasm1(SB) + MOVV $1129, R12 + JMP callbackasm1(SB) + MOVV $1130, R12 + JMP callbackasm1(SB) + MOVV $1131, R12 + JMP callbackasm1(SB) + MOVV $1132, R12 + JMP callbackasm1(SB) + MOVV $1133, R12 + JMP callbackasm1(SB) + MOVV $1134, R12 + JMP callbackasm1(SB) + MOVV $1135, R12 + JMP callbackasm1(SB) + MOVV $1136, R12 + JMP callbackasm1(SB) + MOVV $1137, R12 + JMP callbackasm1(SB) + MOVV $1138, R12 + JMP callbackasm1(SB) + MOVV $1139, R12 + JMP callbackasm1(SB) + MOVV $1140, R12 + JMP callbackasm1(SB) + MOVV $1141, R12 + JMP callbackasm1(SB) + MOVV $1142, R12 + JMP callbackasm1(SB) + MOVV $1143, R12 + JMP callbackasm1(SB) + MOVV $1144, R12 + JMP callbackasm1(SB) + MOVV $1145, R12 + JMP callbackasm1(SB) + MOVV $1146, R12 + JMP callbackasm1(SB) + MOVV $1147, R12 + JMP callbackasm1(SB) + MOVV $1148, R12 + JMP callbackasm1(SB) + MOVV $1149, R12 + JMP callbackasm1(SB) + MOVV $1150, R12 + JMP callbackasm1(SB) + MOVV $1151, R12 + JMP callbackasm1(SB) + MOVV $1152, R12 + JMP callbackasm1(SB) + MOVV $1153, R12 + JMP callbackasm1(SB) + MOVV $1154, R12 + JMP callbackasm1(SB) + MOVV $1155, R12 + JMP callbackasm1(SB) + MOVV $1156, R12 + JMP callbackasm1(SB) + MOVV $1157, R12 + JMP callbackasm1(SB) + MOVV $1158, R12 + JMP callbackasm1(SB) + MOVV $1159, R12 + JMP callbackasm1(SB) + MOVV $1160, R12 + JMP callbackasm1(SB) + MOVV $1161, R12 + JMP callbackasm1(SB) + MOVV $1162, R12 + JMP callbackasm1(SB) + MOVV $1163, R12 + JMP callbackasm1(SB) + MOVV $1164, R12 + JMP callbackasm1(SB) + MOVV $1165, R12 + JMP callbackasm1(SB) + MOVV $1166, R12 + JMP callbackasm1(SB) + MOVV $1167, R12 + JMP callbackasm1(SB) + MOVV $1168, R12 + JMP callbackasm1(SB) + MOVV $1169, R12 + JMP callbackasm1(SB) + MOVV $1170, R12 + JMP callbackasm1(SB) + MOVV $1171, R12 + JMP callbackasm1(SB) + MOVV $1172, R12 + JMP callbackasm1(SB) + MOVV $1173, R12 + JMP callbackasm1(SB) + MOVV $1174, R12 + JMP callbackasm1(SB) + MOVV $1175, R12 + JMP callbackasm1(SB) + MOVV $1176, R12 + JMP callbackasm1(SB) + MOVV $1177, R12 + JMP callbackasm1(SB) + MOVV $1178, R12 + JMP callbackasm1(SB) + MOVV $1179, R12 + JMP callbackasm1(SB) + MOVV $1180, R12 + JMP callbackasm1(SB) + MOVV $1181, R12 + JMP callbackasm1(SB) + MOVV $1182, R12 + JMP callbackasm1(SB) + MOVV $1183, R12 + JMP callbackasm1(SB) + MOVV $1184, R12 + JMP callbackasm1(SB) + MOVV $1185, R12 + JMP callbackasm1(SB) + MOVV $1186, R12 + JMP callbackasm1(SB) + MOVV $1187, R12 + JMP callbackasm1(SB) + MOVV $1188, R12 + JMP callbackasm1(SB) + MOVV $1189, R12 + JMP callbackasm1(SB) + MOVV $1190, R12 + JMP callbackasm1(SB) + MOVV $1191, R12 + JMP callbackasm1(SB) + MOVV $1192, R12 + JMP callbackasm1(SB) + MOVV $1193, R12 + JMP callbackasm1(SB) + MOVV $1194, R12 + JMP callbackasm1(SB) + MOVV $1195, R12 + JMP callbackasm1(SB) + MOVV $1196, R12 + JMP callbackasm1(SB) + MOVV $1197, R12 + JMP callbackasm1(SB) + MOVV $1198, R12 + JMP callbackasm1(SB) + MOVV $1199, R12 + JMP callbackasm1(SB) + MOVV $1200, R12 + JMP callbackasm1(SB) + MOVV $1201, R12 + JMP callbackasm1(SB) + MOVV $1202, R12 + JMP callbackasm1(SB) + MOVV $1203, R12 + JMP callbackasm1(SB) + MOVV $1204, R12 + JMP callbackasm1(SB) + MOVV $1205, R12 + JMP callbackasm1(SB) + MOVV $1206, R12 + JMP callbackasm1(SB) + MOVV $1207, R12 + JMP callbackasm1(SB) + MOVV $1208, R12 + JMP callbackasm1(SB) + MOVV $1209, R12 + JMP callbackasm1(SB) + MOVV $1210, R12 + JMP callbackasm1(SB) + MOVV $1211, R12 + JMP callbackasm1(SB) + MOVV $1212, R12 + JMP callbackasm1(SB) + MOVV $1213, R12 + JMP callbackasm1(SB) + MOVV $1214, R12 + JMP callbackasm1(SB) + MOVV $1215, R12 + JMP callbackasm1(SB) + MOVV $1216, R12 + JMP callbackasm1(SB) + MOVV $1217, R12 + JMP callbackasm1(SB) + MOVV $1218, R12 + JMP callbackasm1(SB) + MOVV $1219, R12 + JMP callbackasm1(SB) + MOVV $1220, R12 + JMP callbackasm1(SB) + MOVV $1221, R12 + JMP callbackasm1(SB) + MOVV $1222, R12 + JMP callbackasm1(SB) + MOVV $1223, R12 + JMP callbackasm1(SB) + MOVV $1224, R12 + JMP callbackasm1(SB) + MOVV $1225, R12 + JMP callbackasm1(SB) + MOVV $1226, R12 + JMP callbackasm1(SB) + MOVV $1227, R12 + JMP callbackasm1(SB) + MOVV $1228, R12 + JMP callbackasm1(SB) + MOVV $1229, R12 + JMP callbackasm1(SB) + MOVV $1230, R12 + JMP callbackasm1(SB) + MOVV $1231, R12 + JMP callbackasm1(SB) + MOVV $1232, R12 + JMP callbackasm1(SB) + MOVV $1233, R12 + JMP callbackasm1(SB) + MOVV $1234, R12 + JMP callbackasm1(SB) + MOVV $1235, R12 + JMP callbackasm1(SB) + MOVV $1236, R12 + JMP callbackasm1(SB) + MOVV $1237, R12 + JMP callbackasm1(SB) + MOVV $1238, R12 + JMP callbackasm1(SB) + MOVV $1239, R12 + JMP callbackasm1(SB) + MOVV $1240, R12 + JMP callbackasm1(SB) + MOVV $1241, R12 + JMP callbackasm1(SB) + MOVV $1242, R12 + JMP callbackasm1(SB) + MOVV $1243, R12 + JMP callbackasm1(SB) + MOVV $1244, R12 + JMP callbackasm1(SB) + MOVV $1245, R12 + JMP callbackasm1(SB) + MOVV $1246, R12 + JMP callbackasm1(SB) + MOVV $1247, R12 + JMP callbackasm1(SB) + MOVV $1248, R12 + JMP callbackasm1(SB) + MOVV $1249, R12 + JMP callbackasm1(SB) + MOVV $1250, R12 + JMP callbackasm1(SB) + MOVV $1251, R12 + JMP callbackasm1(SB) + MOVV $1252, R12 + JMP callbackasm1(SB) + MOVV $1253, R12 + JMP callbackasm1(SB) + MOVV $1254, R12 + JMP callbackasm1(SB) + MOVV $1255, R12 + JMP callbackasm1(SB) + MOVV $1256, R12 + JMP callbackasm1(SB) + MOVV $1257, R12 + JMP callbackasm1(SB) + MOVV $1258, R12 + JMP callbackasm1(SB) + MOVV $1259, R12 + JMP callbackasm1(SB) + MOVV $1260, R12 + JMP callbackasm1(SB) + MOVV $1261, R12 + JMP callbackasm1(SB) + MOVV $1262, R12 + JMP callbackasm1(SB) + MOVV $1263, R12 + JMP callbackasm1(SB) + MOVV $1264, R12 + JMP callbackasm1(SB) + MOVV $1265, R12 + JMP callbackasm1(SB) + MOVV $1266, R12 + JMP callbackasm1(SB) + MOVV $1267, R12 + JMP callbackasm1(SB) + MOVV $1268, R12 + JMP callbackasm1(SB) + MOVV $1269, R12 + JMP callbackasm1(SB) + MOVV $1270, R12 + JMP callbackasm1(SB) + MOVV $1271, R12 + JMP callbackasm1(SB) + MOVV $1272, R12 + JMP callbackasm1(SB) + MOVV $1273, R12 + JMP callbackasm1(SB) + MOVV $1274, R12 + JMP callbackasm1(SB) + MOVV $1275, R12 + JMP callbackasm1(SB) + MOVV $1276, R12 + JMP callbackasm1(SB) + MOVV $1277, R12 + JMP callbackasm1(SB) + MOVV $1278, R12 + JMP callbackasm1(SB) + MOVV $1279, R12 + JMP callbackasm1(SB) + MOVV $1280, R12 + JMP callbackasm1(SB) + MOVV $1281, R12 + JMP callbackasm1(SB) + MOVV $1282, R12 + JMP callbackasm1(SB) + MOVV $1283, R12 + JMP callbackasm1(SB) + MOVV $1284, R12 + JMP callbackasm1(SB) + MOVV $1285, R12 + JMP callbackasm1(SB) + MOVV $1286, R12 + JMP callbackasm1(SB) + MOVV $1287, R12 + JMP callbackasm1(SB) + MOVV $1288, R12 + JMP callbackasm1(SB) + MOVV $1289, R12 + JMP callbackasm1(SB) + MOVV $1290, R12 + JMP callbackasm1(SB) + MOVV $1291, R12 + JMP callbackasm1(SB) + MOVV $1292, R12 + JMP callbackasm1(SB) + MOVV $1293, R12 + JMP callbackasm1(SB) + MOVV $1294, R12 + JMP callbackasm1(SB) + MOVV $1295, R12 + JMP callbackasm1(SB) + MOVV $1296, R12 + JMP callbackasm1(SB) + MOVV $1297, R12 + JMP callbackasm1(SB) + MOVV $1298, R12 + JMP callbackasm1(SB) + MOVV $1299, R12 + JMP callbackasm1(SB) + MOVV $1300, R12 + JMP callbackasm1(SB) + MOVV $1301, R12 + JMP callbackasm1(SB) + MOVV $1302, R12 + JMP callbackasm1(SB) + MOVV $1303, R12 + JMP callbackasm1(SB) + MOVV $1304, R12 + JMP callbackasm1(SB) + MOVV $1305, R12 + JMP callbackasm1(SB) + MOVV $1306, R12 + JMP callbackasm1(SB) + MOVV $1307, R12 + JMP callbackasm1(SB) + MOVV $1308, R12 + JMP callbackasm1(SB) + MOVV $1309, R12 + JMP callbackasm1(SB) + MOVV $1310, R12 + JMP callbackasm1(SB) + MOVV $1311, R12 + JMP callbackasm1(SB) + MOVV $1312, R12 + JMP callbackasm1(SB) + MOVV $1313, R12 + JMP callbackasm1(SB) + MOVV $1314, R12 + JMP callbackasm1(SB) + MOVV $1315, R12 + JMP callbackasm1(SB) + MOVV $1316, R12 + JMP callbackasm1(SB) + MOVV $1317, R12 + JMP callbackasm1(SB) + MOVV $1318, R12 + JMP callbackasm1(SB) + MOVV $1319, R12 + JMP callbackasm1(SB) + MOVV $1320, R12 + JMP callbackasm1(SB) + MOVV $1321, R12 + JMP callbackasm1(SB) + MOVV $1322, R12 + JMP callbackasm1(SB) + MOVV $1323, R12 + JMP callbackasm1(SB) + MOVV $1324, R12 + JMP callbackasm1(SB) + MOVV $1325, R12 + JMP callbackasm1(SB) + MOVV $1326, R12 + JMP callbackasm1(SB) + MOVV $1327, R12 + JMP callbackasm1(SB) + MOVV $1328, R12 + JMP callbackasm1(SB) + MOVV $1329, R12 + JMP callbackasm1(SB) + MOVV $1330, R12 + JMP callbackasm1(SB) + MOVV $1331, R12 + JMP callbackasm1(SB) + MOVV $1332, R12 + JMP callbackasm1(SB) + MOVV $1333, R12 + JMP callbackasm1(SB) + MOVV $1334, R12 + JMP callbackasm1(SB) + MOVV $1335, R12 + JMP callbackasm1(SB) + MOVV $1336, R12 + JMP callbackasm1(SB) + MOVV $1337, R12 + JMP callbackasm1(SB) + MOVV $1338, R12 + JMP callbackasm1(SB) + MOVV $1339, R12 + JMP callbackasm1(SB) + MOVV $1340, R12 + JMP callbackasm1(SB) + MOVV $1341, R12 + JMP callbackasm1(SB) + MOVV $1342, R12 + JMP callbackasm1(SB) + MOVV $1343, R12 + JMP callbackasm1(SB) + MOVV $1344, R12 + JMP callbackasm1(SB) + MOVV $1345, R12 + JMP callbackasm1(SB) + MOVV $1346, R12 + JMP callbackasm1(SB) + MOVV $1347, R12 + JMP callbackasm1(SB) + MOVV $1348, R12 + JMP callbackasm1(SB) + MOVV $1349, R12 + JMP callbackasm1(SB) + MOVV $1350, R12 + JMP callbackasm1(SB) + MOVV $1351, R12 + JMP callbackasm1(SB) + MOVV $1352, R12 + JMP callbackasm1(SB) + MOVV $1353, R12 + JMP callbackasm1(SB) + MOVV $1354, R12 + JMP callbackasm1(SB) + MOVV $1355, R12 + JMP callbackasm1(SB) + MOVV $1356, R12 + JMP callbackasm1(SB) + MOVV $1357, R12 + JMP callbackasm1(SB) + MOVV $1358, R12 + JMP callbackasm1(SB) + MOVV $1359, R12 + JMP callbackasm1(SB) + MOVV $1360, R12 + JMP callbackasm1(SB) + MOVV $1361, R12 + JMP callbackasm1(SB) + MOVV $1362, R12 + JMP callbackasm1(SB) + MOVV $1363, R12 + JMP callbackasm1(SB) + MOVV $1364, R12 + JMP callbackasm1(SB) + MOVV $1365, R12 + JMP callbackasm1(SB) + MOVV $1366, R12 + JMP callbackasm1(SB) + MOVV $1367, R12 + JMP callbackasm1(SB) + MOVV $1368, R12 + JMP callbackasm1(SB) + MOVV $1369, R12 + JMP callbackasm1(SB) + MOVV $1370, R12 + JMP callbackasm1(SB) + MOVV $1371, R12 + JMP callbackasm1(SB) + MOVV $1372, R12 + JMP callbackasm1(SB) + MOVV $1373, R12 + JMP callbackasm1(SB) + MOVV $1374, R12 + JMP callbackasm1(SB) + MOVV $1375, R12 + JMP callbackasm1(SB) + MOVV $1376, R12 + JMP callbackasm1(SB) + MOVV $1377, R12 + JMP callbackasm1(SB) + MOVV $1378, R12 + JMP callbackasm1(SB) + MOVV $1379, R12 + JMP callbackasm1(SB) + MOVV $1380, R12 + JMP callbackasm1(SB) + MOVV $1381, R12 + JMP callbackasm1(SB) + MOVV $1382, R12 + JMP callbackasm1(SB) + MOVV $1383, R12 + JMP callbackasm1(SB) + MOVV $1384, R12 + JMP callbackasm1(SB) + MOVV $1385, R12 + JMP callbackasm1(SB) + MOVV $1386, R12 + JMP callbackasm1(SB) + MOVV $1387, R12 + JMP callbackasm1(SB) + MOVV $1388, R12 + JMP callbackasm1(SB) + MOVV $1389, R12 + JMP callbackasm1(SB) + MOVV $1390, R12 + JMP callbackasm1(SB) + MOVV $1391, R12 + JMP callbackasm1(SB) + MOVV $1392, R12 + JMP callbackasm1(SB) + MOVV $1393, R12 + JMP callbackasm1(SB) + MOVV $1394, R12 + JMP callbackasm1(SB) + MOVV $1395, R12 + JMP callbackasm1(SB) + MOVV $1396, R12 + JMP callbackasm1(SB) + MOVV $1397, R12 + JMP callbackasm1(SB) + MOVV $1398, R12 + JMP callbackasm1(SB) + MOVV $1399, R12 + JMP callbackasm1(SB) + MOVV $1400, R12 + JMP callbackasm1(SB) + MOVV $1401, R12 + JMP callbackasm1(SB) + MOVV $1402, R12 + JMP callbackasm1(SB) + MOVV $1403, R12 + JMP callbackasm1(SB) + MOVV $1404, R12 + JMP callbackasm1(SB) + MOVV $1405, R12 + JMP callbackasm1(SB) + MOVV $1406, R12 + JMP callbackasm1(SB) + MOVV $1407, R12 + JMP callbackasm1(SB) + MOVV $1408, R12 + JMP callbackasm1(SB) + MOVV $1409, R12 + JMP callbackasm1(SB) + MOVV $1410, R12 + JMP callbackasm1(SB) + MOVV $1411, R12 + JMP callbackasm1(SB) + MOVV $1412, R12 + JMP callbackasm1(SB) + MOVV $1413, R12 + JMP callbackasm1(SB) + MOVV $1414, R12 + JMP callbackasm1(SB) + MOVV $1415, R12 + JMP callbackasm1(SB) + MOVV $1416, R12 + JMP callbackasm1(SB) + MOVV $1417, R12 + JMP callbackasm1(SB) + MOVV $1418, R12 + JMP callbackasm1(SB) + MOVV $1419, R12 + JMP callbackasm1(SB) + MOVV $1420, R12 + JMP callbackasm1(SB) + MOVV $1421, R12 + JMP callbackasm1(SB) + MOVV $1422, R12 + JMP callbackasm1(SB) + MOVV $1423, R12 + JMP callbackasm1(SB) + MOVV $1424, R12 + JMP callbackasm1(SB) + MOVV $1425, R12 + JMP callbackasm1(SB) + MOVV $1426, R12 + JMP callbackasm1(SB) + MOVV $1427, R12 + JMP callbackasm1(SB) + MOVV $1428, R12 + JMP callbackasm1(SB) + MOVV $1429, R12 + JMP callbackasm1(SB) + MOVV $1430, R12 + JMP callbackasm1(SB) + MOVV $1431, R12 + JMP callbackasm1(SB) + MOVV $1432, R12 + JMP callbackasm1(SB) + MOVV $1433, R12 + JMP callbackasm1(SB) + MOVV $1434, R12 + JMP callbackasm1(SB) + MOVV $1435, R12 + JMP callbackasm1(SB) + MOVV $1436, R12 + JMP callbackasm1(SB) + MOVV $1437, R12 + JMP callbackasm1(SB) + MOVV $1438, R12 + JMP callbackasm1(SB) + MOVV $1439, R12 + JMP callbackasm1(SB) + MOVV $1440, R12 + JMP callbackasm1(SB) + MOVV $1441, R12 + JMP callbackasm1(SB) + MOVV $1442, R12 + JMP callbackasm1(SB) + MOVV $1443, R12 + JMP callbackasm1(SB) + MOVV $1444, R12 + JMP callbackasm1(SB) + MOVV $1445, R12 + JMP callbackasm1(SB) + MOVV $1446, R12 + JMP callbackasm1(SB) + MOVV $1447, R12 + JMP callbackasm1(SB) + MOVV $1448, R12 + JMP callbackasm1(SB) + MOVV $1449, R12 + JMP callbackasm1(SB) + MOVV $1450, R12 + JMP callbackasm1(SB) + MOVV $1451, R12 + JMP callbackasm1(SB) + MOVV $1452, R12 + JMP callbackasm1(SB) + MOVV $1453, R12 + JMP callbackasm1(SB) + MOVV $1454, R12 + JMP callbackasm1(SB) + MOVV $1455, R12 + JMP callbackasm1(SB) + MOVV $1456, R12 + JMP callbackasm1(SB) + MOVV $1457, R12 + JMP callbackasm1(SB) + MOVV $1458, R12 + JMP callbackasm1(SB) + MOVV $1459, R12 + JMP callbackasm1(SB) + MOVV $1460, R12 + JMP callbackasm1(SB) + MOVV $1461, R12 + JMP callbackasm1(SB) + MOVV $1462, R12 + JMP callbackasm1(SB) + MOVV $1463, R12 + JMP callbackasm1(SB) + MOVV $1464, R12 + JMP callbackasm1(SB) + MOVV $1465, R12 + JMP callbackasm1(SB) + MOVV $1466, R12 + JMP callbackasm1(SB) + MOVV $1467, R12 + JMP callbackasm1(SB) + MOVV $1468, R12 + JMP callbackasm1(SB) + MOVV $1469, R12 + JMP callbackasm1(SB) + MOVV $1470, R12 + JMP callbackasm1(SB) + MOVV $1471, R12 + JMP callbackasm1(SB) + MOVV $1472, R12 + JMP callbackasm1(SB) + MOVV $1473, R12 + JMP callbackasm1(SB) + MOVV $1474, R12 + JMP callbackasm1(SB) + MOVV $1475, R12 + JMP callbackasm1(SB) + MOVV $1476, R12 + JMP callbackasm1(SB) + MOVV $1477, R12 + JMP callbackasm1(SB) + MOVV $1478, R12 + JMP callbackasm1(SB) + MOVV $1479, R12 + JMP callbackasm1(SB) + MOVV $1480, R12 + JMP callbackasm1(SB) + MOVV $1481, R12 + JMP callbackasm1(SB) + MOVV $1482, R12 + JMP callbackasm1(SB) + MOVV $1483, R12 + JMP callbackasm1(SB) + MOVV $1484, R12 + JMP callbackasm1(SB) + MOVV $1485, R12 + JMP callbackasm1(SB) + MOVV $1486, R12 + JMP callbackasm1(SB) + MOVV $1487, R12 + JMP callbackasm1(SB) + MOVV $1488, R12 + JMP callbackasm1(SB) + MOVV $1489, R12 + JMP callbackasm1(SB) + MOVV $1490, R12 + JMP callbackasm1(SB) + MOVV $1491, R12 + JMP callbackasm1(SB) + MOVV $1492, R12 + JMP callbackasm1(SB) + MOVV $1493, R12 + JMP callbackasm1(SB) + MOVV $1494, R12 + JMP callbackasm1(SB) + MOVV $1495, R12 + JMP callbackasm1(SB) + MOVV $1496, R12 + JMP callbackasm1(SB) + MOVV $1497, R12 + JMP callbackasm1(SB) + MOVV $1498, R12 + JMP callbackasm1(SB) + MOVV $1499, R12 + JMP callbackasm1(SB) + MOVV $1500, R12 + JMP callbackasm1(SB) + MOVV $1501, R12 + JMP callbackasm1(SB) + MOVV $1502, R12 + JMP callbackasm1(SB) + MOVV $1503, R12 + JMP callbackasm1(SB) + MOVV $1504, R12 + JMP callbackasm1(SB) + MOVV $1505, R12 + JMP callbackasm1(SB) + MOVV $1506, R12 + JMP callbackasm1(SB) + MOVV $1507, R12 + JMP callbackasm1(SB) + MOVV $1508, R12 + JMP callbackasm1(SB) + MOVV $1509, R12 + JMP callbackasm1(SB) + MOVV $1510, R12 + JMP callbackasm1(SB) + MOVV $1511, R12 + JMP callbackasm1(SB) + MOVV $1512, R12 + JMP callbackasm1(SB) + MOVV $1513, R12 + JMP callbackasm1(SB) + MOVV $1514, R12 + JMP callbackasm1(SB) + MOVV $1515, R12 + JMP callbackasm1(SB) + MOVV $1516, R12 + JMP callbackasm1(SB) + MOVV $1517, R12 + JMP callbackasm1(SB) + MOVV $1518, R12 + JMP callbackasm1(SB) + MOVV $1519, R12 + JMP callbackasm1(SB) + MOVV $1520, R12 + JMP callbackasm1(SB) + MOVV $1521, R12 + JMP callbackasm1(SB) + MOVV $1522, R12 + JMP callbackasm1(SB) + MOVV $1523, R12 + JMP callbackasm1(SB) + MOVV $1524, R12 + JMP callbackasm1(SB) + MOVV $1525, R12 + JMP callbackasm1(SB) + MOVV $1526, R12 + JMP callbackasm1(SB) + MOVV $1527, R12 + JMP callbackasm1(SB) + MOVV $1528, R12 + JMP callbackasm1(SB) + MOVV $1529, R12 + JMP callbackasm1(SB) + MOVV $1530, R12 + JMP callbackasm1(SB) + MOVV $1531, R12 + JMP callbackasm1(SB) + MOVV $1532, R12 + JMP callbackasm1(SB) + MOVV $1533, R12 + JMP callbackasm1(SB) + MOVV $1534, R12 + JMP callbackasm1(SB) + MOVV $1535, R12 + JMP callbackasm1(SB) + MOVV $1536, R12 + JMP callbackasm1(SB) + MOVV $1537, R12 + JMP callbackasm1(SB) + MOVV $1538, R12 + JMP callbackasm1(SB) + MOVV $1539, R12 + JMP callbackasm1(SB) + MOVV $1540, R12 + JMP callbackasm1(SB) + MOVV $1541, R12 + JMP callbackasm1(SB) + MOVV $1542, R12 + JMP callbackasm1(SB) + MOVV $1543, R12 + JMP callbackasm1(SB) + MOVV $1544, R12 + JMP callbackasm1(SB) + MOVV $1545, R12 + JMP callbackasm1(SB) + MOVV $1546, R12 + JMP callbackasm1(SB) + MOVV $1547, R12 + JMP callbackasm1(SB) + MOVV $1548, R12 + JMP callbackasm1(SB) + MOVV $1549, R12 + JMP callbackasm1(SB) + MOVV $1550, R12 + JMP callbackasm1(SB) + MOVV $1551, R12 + JMP callbackasm1(SB) + MOVV $1552, R12 + JMP callbackasm1(SB) + MOVV $1553, R12 + JMP callbackasm1(SB) + MOVV $1554, R12 + JMP callbackasm1(SB) + MOVV $1555, R12 + JMP callbackasm1(SB) + MOVV $1556, R12 + JMP callbackasm1(SB) + MOVV $1557, R12 + JMP callbackasm1(SB) + MOVV $1558, R12 + JMP callbackasm1(SB) + MOVV $1559, R12 + JMP callbackasm1(SB) + MOVV $1560, R12 + JMP callbackasm1(SB) + MOVV $1561, R12 + JMP callbackasm1(SB) + MOVV $1562, R12 + JMP callbackasm1(SB) + MOVV $1563, R12 + JMP callbackasm1(SB) + MOVV $1564, R12 + JMP callbackasm1(SB) + MOVV $1565, R12 + JMP callbackasm1(SB) + MOVV $1566, R12 + JMP callbackasm1(SB) + MOVV $1567, R12 + JMP callbackasm1(SB) + MOVV $1568, R12 + JMP callbackasm1(SB) + MOVV $1569, R12 + JMP callbackasm1(SB) + MOVV $1570, R12 + JMP callbackasm1(SB) + MOVV $1571, R12 + JMP callbackasm1(SB) + MOVV $1572, R12 + JMP callbackasm1(SB) + MOVV $1573, R12 + JMP callbackasm1(SB) + MOVV $1574, R12 + JMP callbackasm1(SB) + MOVV $1575, R12 + JMP callbackasm1(SB) + MOVV $1576, R12 + JMP callbackasm1(SB) + MOVV $1577, R12 + JMP callbackasm1(SB) + MOVV $1578, R12 + JMP callbackasm1(SB) + MOVV $1579, R12 + JMP callbackasm1(SB) + MOVV $1580, R12 + JMP callbackasm1(SB) + MOVV $1581, R12 + JMP callbackasm1(SB) + MOVV $1582, R12 + JMP callbackasm1(SB) + MOVV $1583, R12 + JMP callbackasm1(SB) + MOVV $1584, R12 + JMP callbackasm1(SB) + MOVV $1585, R12 + JMP callbackasm1(SB) + MOVV $1586, R12 + JMP callbackasm1(SB) + MOVV $1587, R12 + JMP callbackasm1(SB) + MOVV $1588, R12 + JMP callbackasm1(SB) + MOVV $1589, R12 + JMP callbackasm1(SB) + MOVV $1590, R12 + JMP callbackasm1(SB) + MOVV $1591, R12 + JMP callbackasm1(SB) + MOVV $1592, R12 + JMP callbackasm1(SB) + MOVV $1593, R12 + JMP callbackasm1(SB) + MOVV $1594, R12 + JMP callbackasm1(SB) + MOVV $1595, R12 + JMP callbackasm1(SB) + MOVV $1596, R12 + JMP callbackasm1(SB) + MOVV $1597, R12 + JMP callbackasm1(SB) + MOVV $1598, R12 + JMP callbackasm1(SB) + MOVV $1599, R12 + JMP callbackasm1(SB) + MOVV $1600, R12 + JMP callbackasm1(SB) + MOVV $1601, R12 + JMP callbackasm1(SB) + MOVV $1602, R12 + JMP callbackasm1(SB) + MOVV $1603, R12 + JMP callbackasm1(SB) + MOVV $1604, R12 + JMP callbackasm1(SB) + MOVV $1605, R12 + JMP callbackasm1(SB) + MOVV $1606, R12 + JMP callbackasm1(SB) + MOVV $1607, R12 + JMP callbackasm1(SB) + MOVV $1608, R12 + JMP callbackasm1(SB) + MOVV $1609, R12 + JMP callbackasm1(SB) + MOVV $1610, R12 + JMP callbackasm1(SB) + MOVV $1611, R12 + JMP callbackasm1(SB) + MOVV $1612, R12 + JMP callbackasm1(SB) + MOVV $1613, R12 + JMP callbackasm1(SB) + MOVV $1614, R12 + JMP callbackasm1(SB) + MOVV $1615, R12 + JMP callbackasm1(SB) + MOVV $1616, R12 + JMP callbackasm1(SB) + MOVV $1617, R12 + JMP callbackasm1(SB) + MOVV $1618, R12 + JMP callbackasm1(SB) + MOVV $1619, R12 + JMP callbackasm1(SB) + MOVV $1620, R12 + JMP callbackasm1(SB) + MOVV $1621, R12 + JMP callbackasm1(SB) + MOVV $1622, R12 + JMP callbackasm1(SB) + MOVV $1623, R12 + JMP callbackasm1(SB) + MOVV $1624, R12 + JMP callbackasm1(SB) + MOVV $1625, R12 + JMP callbackasm1(SB) + MOVV $1626, R12 + JMP callbackasm1(SB) + MOVV $1627, R12 + JMP callbackasm1(SB) + MOVV $1628, R12 + JMP callbackasm1(SB) + MOVV $1629, R12 + JMP callbackasm1(SB) + MOVV $1630, R12 + JMP callbackasm1(SB) + MOVV $1631, R12 + JMP callbackasm1(SB) + MOVV $1632, R12 + JMP callbackasm1(SB) + MOVV $1633, R12 + JMP callbackasm1(SB) + MOVV $1634, R12 + JMP callbackasm1(SB) + MOVV $1635, R12 + JMP callbackasm1(SB) + MOVV $1636, R12 + JMP callbackasm1(SB) + MOVV $1637, R12 + JMP callbackasm1(SB) + MOVV $1638, R12 + JMP callbackasm1(SB) + MOVV $1639, R12 + JMP callbackasm1(SB) + MOVV $1640, R12 + JMP callbackasm1(SB) + MOVV $1641, R12 + JMP callbackasm1(SB) + MOVV $1642, R12 + JMP callbackasm1(SB) + MOVV $1643, R12 + JMP callbackasm1(SB) + MOVV $1644, R12 + JMP callbackasm1(SB) + MOVV $1645, R12 + JMP callbackasm1(SB) + MOVV $1646, R12 + JMP callbackasm1(SB) + MOVV $1647, R12 + JMP callbackasm1(SB) + MOVV $1648, R12 + JMP callbackasm1(SB) + MOVV $1649, R12 + JMP callbackasm1(SB) + MOVV $1650, R12 + JMP callbackasm1(SB) + MOVV $1651, R12 + JMP callbackasm1(SB) + MOVV $1652, R12 + JMP callbackasm1(SB) + MOVV $1653, R12 + JMP callbackasm1(SB) + MOVV $1654, R12 + JMP callbackasm1(SB) + MOVV $1655, R12 + JMP callbackasm1(SB) + MOVV $1656, R12 + JMP callbackasm1(SB) + MOVV $1657, R12 + JMP callbackasm1(SB) + MOVV $1658, R12 + JMP callbackasm1(SB) + MOVV $1659, R12 + JMP callbackasm1(SB) + MOVV $1660, R12 + JMP callbackasm1(SB) + MOVV $1661, R12 + JMP callbackasm1(SB) + MOVV $1662, R12 + JMP callbackasm1(SB) + MOVV $1663, R12 + JMP callbackasm1(SB) + MOVV $1664, R12 + JMP callbackasm1(SB) + MOVV $1665, R12 + JMP callbackasm1(SB) + MOVV $1666, R12 + JMP callbackasm1(SB) + MOVV $1667, R12 + JMP callbackasm1(SB) + MOVV $1668, R12 + JMP callbackasm1(SB) + MOVV $1669, R12 + JMP callbackasm1(SB) + MOVV $1670, R12 + JMP callbackasm1(SB) + MOVV $1671, R12 + JMP callbackasm1(SB) + MOVV $1672, R12 + JMP callbackasm1(SB) + MOVV $1673, R12 + JMP callbackasm1(SB) + MOVV $1674, R12 + JMP callbackasm1(SB) + MOVV $1675, R12 + JMP callbackasm1(SB) + MOVV $1676, R12 + JMP callbackasm1(SB) + MOVV $1677, R12 + JMP callbackasm1(SB) + MOVV $1678, R12 + JMP callbackasm1(SB) + MOVV $1679, R12 + JMP callbackasm1(SB) + MOVV $1680, R12 + JMP callbackasm1(SB) + MOVV $1681, R12 + JMP callbackasm1(SB) + MOVV $1682, R12 + JMP callbackasm1(SB) + MOVV $1683, R12 + JMP callbackasm1(SB) + MOVV $1684, R12 + JMP callbackasm1(SB) + MOVV $1685, R12 + JMP callbackasm1(SB) + MOVV $1686, R12 + JMP callbackasm1(SB) + MOVV $1687, R12 + JMP callbackasm1(SB) + MOVV $1688, R12 + JMP callbackasm1(SB) + MOVV $1689, R12 + JMP callbackasm1(SB) + MOVV $1690, R12 + JMP callbackasm1(SB) + MOVV $1691, R12 + JMP callbackasm1(SB) + MOVV $1692, R12 + JMP callbackasm1(SB) + MOVV $1693, R12 + JMP callbackasm1(SB) + MOVV $1694, R12 + JMP callbackasm1(SB) + MOVV $1695, R12 + JMP callbackasm1(SB) + MOVV $1696, R12 + JMP callbackasm1(SB) + MOVV $1697, R12 + JMP callbackasm1(SB) + MOVV $1698, R12 + JMP callbackasm1(SB) + MOVV $1699, R12 + JMP callbackasm1(SB) + MOVV $1700, R12 + JMP callbackasm1(SB) + MOVV $1701, R12 + JMP callbackasm1(SB) + MOVV $1702, R12 + JMP callbackasm1(SB) + MOVV $1703, R12 + JMP callbackasm1(SB) + MOVV $1704, R12 + JMP callbackasm1(SB) + MOVV $1705, R12 + JMP callbackasm1(SB) + MOVV $1706, R12 + JMP callbackasm1(SB) + MOVV $1707, R12 + JMP callbackasm1(SB) + MOVV $1708, R12 + JMP callbackasm1(SB) + MOVV $1709, R12 + JMP callbackasm1(SB) + MOVV $1710, R12 + JMP callbackasm1(SB) + MOVV $1711, R12 + JMP callbackasm1(SB) + MOVV $1712, R12 + JMP callbackasm1(SB) + MOVV $1713, R12 + JMP callbackasm1(SB) + MOVV $1714, R12 + JMP callbackasm1(SB) + MOVV $1715, R12 + JMP callbackasm1(SB) + MOVV $1716, R12 + JMP callbackasm1(SB) + MOVV $1717, R12 + JMP callbackasm1(SB) + MOVV $1718, R12 + JMP callbackasm1(SB) + MOVV $1719, R12 + JMP callbackasm1(SB) + MOVV $1720, R12 + JMP callbackasm1(SB) + MOVV $1721, R12 + JMP callbackasm1(SB) + MOVV $1722, R12 + JMP callbackasm1(SB) + MOVV $1723, R12 + JMP callbackasm1(SB) + MOVV $1724, R12 + JMP callbackasm1(SB) + MOVV $1725, R12 + JMP callbackasm1(SB) + MOVV $1726, R12 + JMP callbackasm1(SB) + MOVV $1727, R12 + JMP callbackasm1(SB) + MOVV $1728, R12 + JMP callbackasm1(SB) + MOVV $1729, R12 + JMP callbackasm1(SB) + MOVV $1730, R12 + JMP callbackasm1(SB) + MOVV $1731, R12 + JMP callbackasm1(SB) + MOVV $1732, R12 + JMP callbackasm1(SB) + MOVV $1733, R12 + JMP callbackasm1(SB) + MOVV $1734, R12 + JMP callbackasm1(SB) + MOVV $1735, R12 + JMP callbackasm1(SB) + MOVV $1736, R12 + JMP callbackasm1(SB) + MOVV $1737, R12 + JMP callbackasm1(SB) + MOVV $1738, R12 + JMP callbackasm1(SB) + MOVV $1739, R12 + JMP callbackasm1(SB) + MOVV $1740, R12 + JMP callbackasm1(SB) + MOVV $1741, R12 + JMP callbackasm1(SB) + MOVV $1742, R12 + JMP callbackasm1(SB) + MOVV $1743, R12 + JMP callbackasm1(SB) + MOVV $1744, R12 + JMP callbackasm1(SB) + MOVV $1745, R12 + JMP callbackasm1(SB) + MOVV $1746, R12 + JMP callbackasm1(SB) + MOVV $1747, R12 + JMP callbackasm1(SB) + MOVV $1748, R12 + JMP callbackasm1(SB) + MOVV $1749, R12 + JMP callbackasm1(SB) + MOVV $1750, R12 + JMP callbackasm1(SB) + MOVV $1751, R12 + JMP callbackasm1(SB) + MOVV $1752, R12 + JMP callbackasm1(SB) + MOVV $1753, R12 + JMP callbackasm1(SB) + MOVV $1754, R12 + JMP callbackasm1(SB) + MOVV $1755, R12 + JMP callbackasm1(SB) + MOVV $1756, R12 + JMP callbackasm1(SB) + MOVV $1757, R12 + JMP callbackasm1(SB) + MOVV $1758, R12 + JMP callbackasm1(SB) + MOVV $1759, R12 + JMP callbackasm1(SB) + MOVV $1760, R12 + JMP callbackasm1(SB) + MOVV $1761, R12 + JMP callbackasm1(SB) + MOVV $1762, R12 + JMP callbackasm1(SB) + MOVV $1763, R12 + JMP callbackasm1(SB) + MOVV $1764, R12 + JMP callbackasm1(SB) + MOVV $1765, R12 + JMP callbackasm1(SB) + MOVV $1766, R12 + JMP callbackasm1(SB) + MOVV $1767, R12 + JMP callbackasm1(SB) + MOVV $1768, R12 + JMP callbackasm1(SB) + MOVV $1769, R12 + JMP callbackasm1(SB) + MOVV $1770, R12 + JMP callbackasm1(SB) + MOVV $1771, R12 + JMP callbackasm1(SB) + MOVV $1772, R12 + JMP callbackasm1(SB) + MOVV $1773, R12 + JMP callbackasm1(SB) + MOVV $1774, R12 + JMP callbackasm1(SB) + MOVV $1775, R12 + JMP callbackasm1(SB) + MOVV $1776, R12 + JMP callbackasm1(SB) + MOVV $1777, R12 + JMP callbackasm1(SB) + MOVV $1778, R12 + JMP callbackasm1(SB) + MOVV $1779, R12 + JMP callbackasm1(SB) + MOVV $1780, R12 + JMP callbackasm1(SB) + MOVV $1781, R12 + JMP callbackasm1(SB) + MOVV $1782, R12 + JMP callbackasm1(SB) + MOVV $1783, R12 + JMP callbackasm1(SB) + MOVV $1784, R12 + JMP callbackasm1(SB) + MOVV $1785, R12 + JMP callbackasm1(SB) + MOVV $1786, R12 + JMP callbackasm1(SB) + MOVV $1787, R12 + JMP callbackasm1(SB) + MOVV $1788, R12 + JMP callbackasm1(SB) + MOVV $1789, R12 + JMP callbackasm1(SB) + MOVV $1790, R12 + JMP callbackasm1(SB) + MOVV $1791, R12 + JMP callbackasm1(SB) + MOVV $1792, R12 + JMP callbackasm1(SB) + MOVV $1793, R12 + JMP callbackasm1(SB) + MOVV $1794, R12 + JMP callbackasm1(SB) + MOVV $1795, R12 + JMP callbackasm1(SB) + MOVV $1796, R12 + JMP callbackasm1(SB) + MOVV $1797, R12 + JMP callbackasm1(SB) + MOVV $1798, R12 + JMP callbackasm1(SB) + MOVV $1799, R12 + JMP callbackasm1(SB) + MOVV $1800, R12 + JMP callbackasm1(SB) + MOVV $1801, R12 + JMP callbackasm1(SB) + MOVV $1802, R12 + JMP callbackasm1(SB) + MOVV $1803, R12 + JMP callbackasm1(SB) + MOVV $1804, R12 + JMP callbackasm1(SB) + MOVV $1805, R12 + JMP callbackasm1(SB) + MOVV $1806, R12 + JMP callbackasm1(SB) + MOVV $1807, R12 + JMP callbackasm1(SB) + MOVV $1808, R12 + JMP callbackasm1(SB) + MOVV $1809, R12 + JMP callbackasm1(SB) + MOVV $1810, R12 + JMP callbackasm1(SB) + MOVV $1811, R12 + JMP callbackasm1(SB) + MOVV $1812, R12 + JMP callbackasm1(SB) + MOVV $1813, R12 + JMP callbackasm1(SB) + MOVV $1814, R12 + JMP callbackasm1(SB) + MOVV $1815, R12 + JMP callbackasm1(SB) + MOVV $1816, R12 + JMP callbackasm1(SB) + MOVV $1817, R12 + JMP callbackasm1(SB) + MOVV $1818, R12 + JMP callbackasm1(SB) + MOVV $1819, R12 + JMP callbackasm1(SB) + MOVV $1820, R12 + JMP callbackasm1(SB) + MOVV $1821, R12 + JMP callbackasm1(SB) + MOVV $1822, R12 + JMP callbackasm1(SB) + MOVV $1823, R12 + JMP callbackasm1(SB) + MOVV $1824, R12 + JMP callbackasm1(SB) + MOVV $1825, R12 + JMP callbackasm1(SB) + MOVV $1826, R12 + JMP callbackasm1(SB) + MOVV $1827, R12 + JMP callbackasm1(SB) + MOVV $1828, R12 + JMP callbackasm1(SB) + MOVV $1829, R12 + JMP callbackasm1(SB) + MOVV $1830, R12 + JMP callbackasm1(SB) + MOVV $1831, R12 + JMP callbackasm1(SB) + MOVV $1832, R12 + JMP callbackasm1(SB) + MOVV $1833, R12 + JMP callbackasm1(SB) + MOVV $1834, R12 + JMP callbackasm1(SB) + MOVV $1835, R12 + JMP callbackasm1(SB) + MOVV $1836, R12 + JMP callbackasm1(SB) + MOVV $1837, R12 + JMP callbackasm1(SB) + MOVV $1838, R12 + JMP callbackasm1(SB) + MOVV $1839, R12 + JMP callbackasm1(SB) + MOVV $1840, R12 + JMP callbackasm1(SB) + MOVV $1841, R12 + JMP callbackasm1(SB) + MOVV $1842, R12 + JMP callbackasm1(SB) + MOVV $1843, R12 + JMP callbackasm1(SB) + MOVV $1844, R12 + JMP callbackasm1(SB) + MOVV $1845, R12 + JMP callbackasm1(SB) + MOVV $1846, R12 + JMP callbackasm1(SB) + MOVV $1847, R12 + JMP callbackasm1(SB) + MOVV $1848, R12 + JMP callbackasm1(SB) + MOVV $1849, R12 + JMP callbackasm1(SB) + MOVV $1850, R12 + JMP callbackasm1(SB) + MOVV $1851, R12 + JMP callbackasm1(SB) + MOVV $1852, R12 + JMP callbackasm1(SB) + MOVV $1853, R12 + JMP callbackasm1(SB) + MOVV $1854, R12 + JMP callbackasm1(SB) + MOVV $1855, R12 + JMP callbackasm1(SB) + MOVV $1856, R12 + JMP callbackasm1(SB) + MOVV $1857, R12 + JMP callbackasm1(SB) + MOVV $1858, R12 + JMP callbackasm1(SB) + MOVV $1859, R12 + JMP callbackasm1(SB) + MOVV $1860, R12 + JMP callbackasm1(SB) + MOVV $1861, R12 + JMP callbackasm1(SB) + MOVV $1862, R12 + JMP callbackasm1(SB) + MOVV $1863, R12 + JMP callbackasm1(SB) + MOVV $1864, R12 + JMP callbackasm1(SB) + MOVV $1865, R12 + JMP callbackasm1(SB) + MOVV $1866, R12 + JMP callbackasm1(SB) + MOVV $1867, R12 + JMP callbackasm1(SB) + MOVV $1868, R12 + JMP callbackasm1(SB) + MOVV $1869, R12 + JMP callbackasm1(SB) + MOVV $1870, R12 + JMP callbackasm1(SB) + MOVV $1871, R12 + JMP callbackasm1(SB) + MOVV $1872, R12 + JMP callbackasm1(SB) + MOVV $1873, R12 + JMP callbackasm1(SB) + MOVV $1874, R12 + JMP callbackasm1(SB) + MOVV $1875, R12 + JMP callbackasm1(SB) + MOVV $1876, R12 + JMP callbackasm1(SB) + MOVV $1877, R12 + JMP callbackasm1(SB) + MOVV $1878, R12 + JMP callbackasm1(SB) + MOVV $1879, R12 + JMP callbackasm1(SB) + MOVV $1880, R12 + JMP callbackasm1(SB) + MOVV $1881, R12 + JMP callbackasm1(SB) + MOVV $1882, R12 + JMP callbackasm1(SB) + MOVV $1883, R12 + JMP callbackasm1(SB) + MOVV $1884, R12 + JMP callbackasm1(SB) + MOVV $1885, R12 + JMP callbackasm1(SB) + MOVV $1886, R12 + JMP callbackasm1(SB) + MOVV $1887, R12 + JMP callbackasm1(SB) + MOVV $1888, R12 + JMP callbackasm1(SB) + MOVV $1889, R12 + JMP callbackasm1(SB) + MOVV $1890, R12 + JMP callbackasm1(SB) + MOVV $1891, R12 + JMP callbackasm1(SB) + MOVV $1892, R12 + JMP callbackasm1(SB) + MOVV $1893, R12 + JMP callbackasm1(SB) + MOVV $1894, R12 + JMP callbackasm1(SB) + MOVV $1895, R12 + JMP callbackasm1(SB) + MOVV $1896, R12 + JMP callbackasm1(SB) + MOVV $1897, R12 + JMP callbackasm1(SB) + MOVV $1898, R12 + JMP callbackasm1(SB) + MOVV $1899, R12 + JMP callbackasm1(SB) + MOVV $1900, R12 + JMP callbackasm1(SB) + MOVV $1901, R12 + JMP callbackasm1(SB) + MOVV $1902, R12 + JMP callbackasm1(SB) + MOVV $1903, R12 + JMP callbackasm1(SB) + MOVV $1904, R12 + JMP callbackasm1(SB) + MOVV $1905, R12 + JMP callbackasm1(SB) + MOVV $1906, R12 + JMP callbackasm1(SB) + MOVV $1907, R12 + JMP callbackasm1(SB) + MOVV $1908, R12 + JMP callbackasm1(SB) + MOVV $1909, R12 + JMP callbackasm1(SB) + MOVV $1910, R12 + JMP callbackasm1(SB) + MOVV $1911, R12 + JMP callbackasm1(SB) + MOVV $1912, R12 + JMP callbackasm1(SB) + MOVV $1913, R12 + JMP callbackasm1(SB) + MOVV $1914, R12 + JMP callbackasm1(SB) + MOVV $1915, R12 + JMP callbackasm1(SB) + MOVV $1916, R12 + JMP callbackasm1(SB) + MOVV $1917, R12 + JMP callbackasm1(SB) + MOVV $1918, R12 + JMP callbackasm1(SB) + MOVV $1919, R12 + JMP callbackasm1(SB) + MOVV $1920, R12 + JMP callbackasm1(SB) + MOVV $1921, R12 + JMP callbackasm1(SB) + MOVV $1922, R12 + JMP callbackasm1(SB) + MOVV $1923, R12 + JMP callbackasm1(SB) + MOVV $1924, R12 + JMP callbackasm1(SB) + MOVV $1925, R12 + JMP callbackasm1(SB) + MOVV $1926, R12 + JMP callbackasm1(SB) + MOVV $1927, R12 + JMP callbackasm1(SB) + MOVV $1928, R12 + JMP callbackasm1(SB) + MOVV $1929, R12 + JMP callbackasm1(SB) + MOVV $1930, R12 + JMP callbackasm1(SB) + MOVV $1931, R12 + JMP callbackasm1(SB) + MOVV $1932, R12 + JMP callbackasm1(SB) + MOVV $1933, R12 + JMP callbackasm1(SB) + MOVV $1934, R12 + JMP callbackasm1(SB) + MOVV $1935, R12 + JMP callbackasm1(SB) + MOVV $1936, R12 + JMP callbackasm1(SB) + MOVV $1937, R12 + JMP callbackasm1(SB) + MOVV $1938, R12 + JMP callbackasm1(SB) + MOVV $1939, R12 + JMP callbackasm1(SB) + MOVV $1940, R12 + JMP callbackasm1(SB) + MOVV $1941, R12 + JMP callbackasm1(SB) + MOVV $1942, R12 + JMP callbackasm1(SB) + MOVV $1943, R12 + JMP callbackasm1(SB) + MOVV $1944, R12 + JMP callbackasm1(SB) + MOVV $1945, R12 + JMP callbackasm1(SB) + MOVV $1946, R12 + JMP callbackasm1(SB) + MOVV $1947, R12 + JMP callbackasm1(SB) + MOVV $1948, R12 + JMP callbackasm1(SB) + MOVV $1949, R12 + JMP callbackasm1(SB) + MOVV $1950, R12 + JMP callbackasm1(SB) + MOVV $1951, R12 + JMP callbackasm1(SB) + MOVV $1952, R12 + JMP callbackasm1(SB) + MOVV $1953, R12 + JMP callbackasm1(SB) + MOVV $1954, R12 + JMP callbackasm1(SB) + MOVV $1955, R12 + JMP callbackasm1(SB) + MOVV $1956, R12 + JMP callbackasm1(SB) + MOVV $1957, R12 + JMP callbackasm1(SB) + MOVV $1958, R12 + JMP callbackasm1(SB) + MOVV $1959, R12 + JMP callbackasm1(SB) + MOVV $1960, R12 + JMP callbackasm1(SB) + MOVV $1961, R12 + JMP callbackasm1(SB) + MOVV $1962, R12 + JMP callbackasm1(SB) + MOVV $1963, R12 + JMP callbackasm1(SB) + MOVV $1964, R12 + JMP callbackasm1(SB) + MOVV $1965, R12 + JMP callbackasm1(SB) + MOVV $1966, R12 + JMP callbackasm1(SB) + MOVV $1967, R12 + JMP callbackasm1(SB) + MOVV $1968, R12 + JMP callbackasm1(SB) + MOVV $1969, R12 + JMP callbackasm1(SB) + MOVV $1970, R12 + JMP callbackasm1(SB) + MOVV $1971, R12 + JMP callbackasm1(SB) + MOVV $1972, R12 + JMP callbackasm1(SB) + MOVV $1973, R12 + JMP callbackasm1(SB) + MOVV $1974, R12 + JMP callbackasm1(SB) + MOVV $1975, R12 + JMP callbackasm1(SB) + MOVV $1976, R12 + JMP callbackasm1(SB) + MOVV $1977, R12 + JMP callbackasm1(SB) + MOVV $1978, R12 + JMP callbackasm1(SB) + MOVV $1979, R12 + JMP callbackasm1(SB) + MOVV $1980, R12 + JMP callbackasm1(SB) + MOVV $1981, R12 + JMP callbackasm1(SB) + MOVV $1982, R12 + JMP callbackasm1(SB) + MOVV $1983, R12 + JMP callbackasm1(SB) + MOVV $1984, R12 + JMP callbackasm1(SB) + MOVV $1985, R12 + JMP callbackasm1(SB) + MOVV $1986, R12 + JMP callbackasm1(SB) + MOVV $1987, R12 + JMP callbackasm1(SB) + MOVV $1988, R12 + JMP callbackasm1(SB) + MOVV $1989, R12 + JMP callbackasm1(SB) + MOVV $1990, R12 + JMP callbackasm1(SB) + MOVV $1991, R12 + JMP callbackasm1(SB) + MOVV $1992, R12 + JMP callbackasm1(SB) + MOVV $1993, R12 + JMP callbackasm1(SB) + MOVV $1994, R12 + JMP callbackasm1(SB) + MOVV $1995, R12 + JMP callbackasm1(SB) + MOVV $1996, R12 + JMP callbackasm1(SB) + MOVV $1997, R12 + JMP callbackasm1(SB) + MOVV $1998, R12 + JMP callbackasm1(SB) + MOVV $1999, R12 + JMP callbackasm1(SB) diff --git a/vendor/github.com/gammazero/deque/.gitignore b/vendor/github.com/gammazero/deque/.gitignore deleted file mode 100644 index b33406fb..00000000 --- a/vendor/github.com/gammazero/deque/.gitignore +++ /dev/null @@ -1,26 +0,0 @@ -*~ - -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof diff --git a/vendor/github.com/gammazero/deque/README.md b/vendor/github.com/gammazero/deque/README.md deleted file mode 100644 index ee2dbb88..00000000 --- a/vendor/github.com/gammazero/deque/README.md +++ /dev/null @@ -1,80 +0,0 @@ -# deque - -[![GoDoc](https://pkg.go.dev/badge/github.com/gammazero/deque)](https://pkg.go.dev/github.com/gammazero/deque) -[![Build Status](https://github.com/gammazero/deque/actions/workflows/go.yml/badge.svg)](https://github.com/gammazero/deque/actions/workflows/go.yml) -[![Go Report Card](https://goreportcard.com/badge/github.com/gammazero/deque)](https://goreportcard.com/report/github.com/gammazero/deque) -[![codecov](https://codecov.io/gh/gammazero/deque/branch/master/graph/badge.svg)](https://codecov.io/gh/gammazero/deque) -[![License](https://img.shields.io/badge/License-MIT-blue.svg)](LICENSE) - -Fast ring-buffer deque ([double-ended queue](https://en.wikipedia.org/wiki/Double-ended_queue)) implementation. - -For a pictorial description, see the [Deque diagram](https://github.com/gammazero/deque/wiki) - -## Installation - -``` -$ go get github.com/gammazero/deque -``` - -## Deque data structure - -Deque generalizes a queue and a stack, to efficiently add and remove items at either end with O(1) performance. [Queue](https://en.wikipedia.org/wiki/Queue_(abstract_data_type)) (FIFO) operations are supported using `PushBack` and `PopFront`. [Stack](https://en.wikipedia.org/wiki/Stack_(abstract_data_type)) (LIFO) operations are supported using `PushBack` and `PopBack`. - -## Ring-buffer Performance - -This deque implementation is optimized for CPU and GC performance. The circular buffer automatically re-sizes by powers of two, growing when additional capacity is needed and shrinking when only a quarter of the capacity is used, and uses bitwise arithmetic for all calculations. Since growth is by powers of two, adding elements will only cause O(log n) allocations. A base capacity can be set, with `SetBaseCap`, so that there is no resizing at or below that specified amount. The Deque can also be grown, using `Grow`, to ensure sufficient storage for n additional items, to prevent resizing when adding a number of itmes. - -The ring-buffer implementation improves memory and time performance with fewer GC pauses, compared to implementations based on slices or linked lists. By wrapping around the buffer, previously used space is reused, making allocation unnecessary until all buffer capacity is used. The ring buffer implementation performs best when resizes are infrequest, as is the case when items moving in and out of the Deque are balanced or when the base capacity is large enough to rarely require a resize. - -For maximum speed, this deque implementation leaves concurrency safety up to the application to provide, however the application chooses, if needed at all. - -## Reading Empty Deque - -Since it is OK for the deque to contain a `nil` value, it is necessary to either panic or return a second boolean value to indicate the deque is empty, when reading or removing an element. This deque panics when reading from an empty deque. This is a run-time check to help catch programming errors, which may be missed if a second return value is ignored. Simply check `Deque.Len()` before reading from the deque. - -## Generics - -Deque uses generics to create a Deque that contains items of the type specified. To create a Deque that holds a specific type, provide a type argument with the `Deque` variable declaration. For example: -```go - stringDeque := new(deque.Deque[string]) - var intDeque deque.Deque[int] -``` - -## Example - -```go -package main - -import ( - "fmt" - "github.com/gammazero/deque" -) - -func main() { - var q deque.Deque[string] - q.PushBack("foo") - q.PushBack("bar") - q.PushBack("baz") - - fmt.Println(q.Len()) // Prints: 3 - fmt.Println(q.Front()) // Prints: foo - fmt.Println(q.Back()) // Prints: baz - - q.PopFront() // remove "foo" - q.PopBack() // remove "baz" - - q.PushFront("hello") - q.PushBack("world") - - // Consume deque and print elements. - for q.Len() != 0 { - fmt.Println(q.PopFront()) - } -} -``` - -## Uses - -Deque can be used as both a: -- [Queue](https://en.wikipedia.org/wiki/Queue_(abstract_data_type)) using `PushBack` and `PopFront` -- [Stack](https://en.wikipedia.org/wiki/Stack_(abstract_data_type)) using `PushBack` and `PopBack` diff --git a/vendor/github.com/gammazero/deque/deque.go b/vendor/github.com/gammazero/deque/deque.go deleted file mode 100644 index ff109962..00000000 --- a/vendor/github.com/gammazero/deque/deque.go +++ /dev/null @@ -1,434 +0,0 @@ -package deque - -import "fmt" - -// minCapacity is the smallest capacity that deque may have. Must be power of 2 -// for bitwise modulus: x % n == x & (n - 1). -const minCapacity = 16 - -// Deque represents a single instance of the deque data structure. A Deque -// instance contains items of the type specified by the type argument. -// -// For example, to create a Deque that contains strings do one of the -// following: -// -// var stringDeque deque.Deque[string] -// stringDeque := new(deque.Deque[string]) -// stringDeque := &deque.Deque[string]{} -// -// To create a Deque that will never resize to have space for less than 64 -// items, specify a base capacity: -// -// var d deque.Deque[int] -// d.SetBaseCap(64) -// -// To ensure the Deque can store 1000 items without needing to resize while -// items are added: -// -// d.Grow(1000) -// -// Any values supplied to SetBaseCap and Grow are rounded up to the nearest -// power of 2, since the Deque grows by powers of 2. -type Deque[T any] struct { - buf []T - head int - tail int - count int - minCap int -} - -// Cap returns the current capacity of the Deque. If q is nil, q.Cap() is zero. -func (q *Deque[T]) Cap() int { - if q == nil { - return 0 - } - return len(q.buf) -} - -// Len returns the number of elements currently stored in the queue. If q is -// nil, q.Len() returns zero. -func (q *Deque[T]) Len() int { - if q == nil { - return 0 - } - return q.count -} - -// PushBack appends an element to the back of the queue. Implements FIFO when -// elements are removed with PopFront, and LIFO when elements are removed with -// PopBack. -func (q *Deque[T]) PushBack(elem T) { - q.growIfFull() - - q.buf[q.tail] = elem - // Calculate new tail position. - q.tail = q.next(q.tail) - q.count++ -} - -// PushFront prepends an element to the front of the queue. -func (q *Deque[T]) PushFront(elem T) { - q.growIfFull() - - // Calculate new head position. - q.head = q.prev(q.head) - q.buf[q.head] = elem - q.count++ -} - -// PopFront removes and returns the element from the front of the queue. -// Implements FIFO when used with PushBack. If the queue is empty, the call -// panics. -func (q *Deque[T]) PopFront() T { - if q.count <= 0 { - panic("deque: PopFront() called on empty queue") - } - ret := q.buf[q.head] - var zero T - q.buf[q.head] = zero - // Calculate new head position. - q.head = q.next(q.head) - q.count-- - - q.shrinkIfExcess() - return ret -} - -// PopBack removes and returns the element from the back of the queue. -// Implements LIFO when used with PushBack. If the queue is empty, the call -// panics. -func (q *Deque[T]) PopBack() T { - if q.count <= 0 { - panic("deque: PopBack() called on empty queue") - } - - // Calculate new tail position - q.tail = q.prev(q.tail) - - // Remove value at tail. - ret := q.buf[q.tail] - var zero T - q.buf[q.tail] = zero - q.count-- - - q.shrinkIfExcess() - return ret -} - -// Front returns the element at the front of the queue. This is the element -// that would be returned by PopFront. This call panics if the queue is empty. -func (q *Deque[T]) Front() T { - if q.count <= 0 { - panic("deque: Front() called when empty") - } - return q.buf[q.head] -} - -// Back returns the element at the back of the queue. This is the element that -// would be returned by PopBack. This call panics if the queue is empty. -func (q *Deque[T]) Back() T { - if q.count <= 0 { - panic("deque: Back() called when empty") - } - return q.buf[q.prev(q.tail)] -} - -// At returns the element at index i in the queue without removing the element -// from the queue. This method accepts only non-negative index values. At(0) -// refers to the first element and is the same as Front(). At(Len()-1) refers -// to the last element and is the same as Back(). If the index is invalid, the -// call panics. -// -// The purpose of At is to allow Deque to serve as a more general purpose -// circular buffer, where items are only added to and removed from the ends of -// the deque, but may be read from any place within the deque. Consider the -// case of a fixed-size circular log buffer: A new entry is pushed onto one end -// and when full the oldest is popped from the other end. All the log entries -// in the buffer must be readable without altering the buffer contents. -func (q *Deque[T]) At(i int) T { - q.checkRange(i) - // bitwise modulus - return q.buf[(q.head+i)&(len(q.buf)-1)] -} - -// Set assigns the item to index i in the queue. Set indexes the deque the same -// as At but perform the opposite operation. If the index is invalid, the call -// panics. -func (q *Deque[T]) Set(i int, item T) { - q.checkRange(i) - // bitwise modulus - q.buf[(q.head+i)&(len(q.buf)-1)] = item -} - -// Clear removes all elements from the queue, but retains the current capacity. -// This is useful when repeatedly reusing the queue at high frequency to avoid -// GC during reuse. The queue will not be resized smaller as long as items are -// only added. Only when items are removed is the queue subject to getting -// resized smaller. -func (q *Deque[T]) Clear() { - var zero T - modBits := len(q.buf) - 1 - h := q.head - for i := 0; i < q.Len(); i++ { - q.buf[(h+i)&modBits] = zero - } - q.head = 0 - q.tail = 0 - q.count = 0 -} - -// Grow grows deque's capacity, if necessary, to guarantee space for another n -// items. After Grow(n), at least n items can be written to the deque without -// another allocation. If n is negative, Grow panics. -func (q *Deque[T]) Grow(n int) { - if n < 0 { - panic("deque.Grow: negative count") - } - c := q.Cap() - l := q.Len() - // If already big enough. - if n <= c-l { - return - } - - if c == 0 { - c = minCapacity - } - - newLen := l + n - for c < newLen { - c <<= 1 - } - if l == 0 { - q.buf = make([]T, c) - q.head = 0 - q.tail = 0 - } else { - q.resize(c) - } -} - -// Rotate rotates the deque n steps front-to-back. If n is negative, rotates -// back-to-front. Having Deque provide Rotate avoids resizing that could happen -// if implementing rotation using only Pop and Push methods. If q.Len() is one -// or less, or q is nil, then Rotate does nothing. -func (q *Deque[T]) Rotate(n int) { - if q.Len() <= 1 { - return - } - // Rotating a multiple of q.count is same as no rotation. - n %= q.count - if n == 0 { - return - } - - modBits := len(q.buf) - 1 - // If no empty space in buffer, only move head and tail indexes. - if q.head == q.tail { - // Calculate new head and tail using bitwise modulus. - q.head = (q.head + n) & modBits - q.tail = q.head - return - } - - var zero T - - if n < 0 { - // Rotate back to front. - for ; n < 0; n++ { - // Calculate new head and tail using bitwise modulus. - q.head = (q.head - 1) & modBits - q.tail = (q.tail - 1) & modBits - // Put tail value at head and remove value at tail. - q.buf[q.head] = q.buf[q.tail] - q.buf[q.tail] = zero - } - return - } - - // Rotate front to back. - for ; n > 0; n-- { - // Put head value at tail and remove value at head. - q.buf[q.tail] = q.buf[q.head] - q.buf[q.head] = zero - // Calculate new head and tail using bitwise modulus. - q.head = (q.head + 1) & modBits - q.tail = (q.tail + 1) & modBits - } -} - -// Index returns the index into the Deque of the first item satisfying f(item), -// or -1 if none do. If q is nil, then -1 is always returned. Search is linear -// starting with index 0. -func (q *Deque[T]) Index(f func(T) bool) int { - if q.Len() > 0 { - modBits := len(q.buf) - 1 - for i := 0; i < q.count; i++ { - if f(q.buf[(q.head+i)&modBits]) { - return i - } - } - } - return -1 -} - -// RIndex is the same as Index, but searches from Back to Front. The index -// returned is from Front to Back, where index 0 is the index of the item -// returned by Front(). -func (q *Deque[T]) RIndex(f func(T) bool) int { - if q.Len() > 0 { - modBits := len(q.buf) - 1 - for i := q.count - 1; i >= 0; i-- { - if f(q.buf[(q.head+i)&modBits]) { - return i - } - } - } - return -1 -} - -// Insert is used to insert an element into the middle of the queue, before the -// element at the specified index. Insert(0,e) is the same as PushFront(e) and -// Insert(Len(),e) is the same as PushBack(e). Out of range indexes result in -// pushing the item onto the front of back of the deque. -// -// Important: Deque is optimized for O(1) operations at the ends of the queue, -// not for operations in the the middle. Complexity of this function is -// constant plus linear in the lesser of the distances between the index and -// either of the ends of the queue. -func (q *Deque[T]) Insert(at int, item T) { - if at <= 0 { - q.PushFront(item) - return - } - if at >= q.Len() { - q.PushBack(item) - return - } - if at*2 < q.count { - q.PushFront(item) - front := q.head - for i := 0; i < at; i++ { - next := q.next(front) - q.buf[front], q.buf[next] = q.buf[next], q.buf[front] - front = next - } - return - } - swaps := q.count - at - q.PushBack(item) - back := q.prev(q.tail) - for i := 0; i < swaps; i++ { - prev := q.prev(back) - q.buf[back], q.buf[prev] = q.buf[prev], q.buf[back] - back = prev - } -} - -// Remove removes and returns an element from the middle of the queue, at the -// specified index. Remove(0) is the same as PopFront() and Remove(Len()-1) is -// the same as PopBack(). Accepts only non-negative index values, and panics if -// index is out of range. -// -// Important: Deque is optimized for O(1) operations at the ends of the queue, -// not for operations in the the middle. Complexity of this function is -// constant plus linear in the lesser of the distances between the index and -// either of the ends of the queue. -func (q *Deque[T]) Remove(at int) T { - q.checkRange(at) - rm := (q.head + at) & (len(q.buf) - 1) - if at*2 < q.count { - for i := 0; i < at; i++ { - prev := q.prev(rm) - q.buf[prev], q.buf[rm] = q.buf[rm], q.buf[prev] - rm = prev - } - return q.PopFront() - } - swaps := q.count - at - 1 - for i := 0; i < swaps; i++ { - next := q.next(rm) - q.buf[rm], q.buf[next] = q.buf[next], q.buf[rm] - rm = next - } - return q.PopBack() -} - -// SetBaseCap sets a base capacity so that at least the specified number of -// items can always be stored without resizing. -func (q *Deque[T]) SetBaseCap(baseCap int) { - minCap := minCapacity - for minCap < baseCap { - minCap <<= 1 - } - q.minCap = minCap -} - -// Swap exchanges the two values at idxA and idxB. It panics if either index is -// out of range. -func (q *Deque[T]) Swap(idxA, idxB int) { - q.checkRange(idxA) - q.checkRange(idxB) - if idxA == idxB { - return - } - - realA := (q.head + idxA) & (len(q.buf) - 1) - realB := (q.head + idxB) & (len(q.buf) - 1) - q.buf[realA], q.buf[realB] = q.buf[realB], q.buf[realA] -} - -func (q *Deque[T]) checkRange(i int) { - if i < 0 || i >= q.count { - panic(fmt.Sprintf("deque: index out of range %d with length %d", i, q.Len())) - } -} - -// prev returns the previous buffer position wrapping around buffer. -func (q *Deque[T]) prev(i int) int { - return (i - 1) & (len(q.buf) - 1) // bitwise modulus -} - -// next returns the next buffer position wrapping around buffer. -func (q *Deque[T]) next(i int) int { - return (i + 1) & (len(q.buf) - 1) // bitwise modulus -} - -// growIfFull resizes up if the buffer is full. -func (q *Deque[T]) growIfFull() { - if q.count != len(q.buf) { - return - } - if len(q.buf) == 0 { - if q.minCap == 0 { - q.minCap = minCapacity - } - q.buf = make([]T, q.minCap) - return - } - q.resize(q.count << 1) -} - -// shrinkIfExcess resize down if the buffer 1/4 full. -func (q *Deque[T]) shrinkIfExcess() { - if len(q.buf) > q.minCap && (q.count<<2) == len(q.buf) { - q.resize(q.count << 1) - } -} - -// resize resizes the deque to fit exactly twice its current contents. This is -// used to grow the queue when it is full, and also to shrink it when it is -// only a quarter full. -func (q *Deque[T]) resize(newSize int) { - newBuf := make([]T, newSize) - if q.tail > q.head { - copy(newBuf, q.buf[q.head:q.tail]) - } else { - n := copy(newBuf, q.buf[q.head:]) - copy(newBuf[n:], q.buf[:q.tail]) - } - - q.head = 0 - q.tail = q.count - q.buf = newBuf -} diff --git a/vendor/github.com/gammazero/deque/doc.go b/vendor/github.com/gammazero/deque/doc.go deleted file mode 100644 index dfff00ad..00000000 --- a/vendor/github.com/gammazero/deque/doc.go +++ /dev/null @@ -1,38 +0,0 @@ -/* -Package deque provides a fast ring-buffer deque (double-ended queue) -implementation. - -Deque generalizes a queue and a stack, to efficiently add and remove items at -either end with O(1) performance. Queue (FIFO) operations are supported using -PushBack and PopFront. Stack (LIFO) operations are supported using PushBack and -PopBack. - -# Ring-buffer Performance - -The ring-buffer automatically resizes by powers of two, growing when additional -capacity is needed and shrinking when only a quarter of the capacity is used, -and uses bitwise arithmetic for all calculations. - -The ring-buffer implementation significantly improves memory and time -performance with fewer GC pauses, compared to implementations based on slices -and linked lists. - -For maximum speed, this deque implementation leaves concurrency safety up to -the application to provide, however the application chooses, if needed at all. - -# Reading Empty Deque - -Since it is OK for the deque to contain the zero-value of an item, it is -necessary to either panic or return a second boolean value to indicate the -deque is empty, when reading or removing an element. This deque panics when -reading from an empty deque. This is a run-time check to help catch programming -errors, which may be missed if a second return value is ignored. Simply check -Deque.Len() before reading from the deque. - -# Generics - -Deque uses generics to create a Deque that contains items of the type -specified. To create a Deque that holds a specific type, provide a type -argument with the Deque variable declaration. -*/ -package deque diff --git a/vendor/github.com/maypok86/otter/.golangci.yml b/vendor/github.com/maypok86/otter/.golangci.yml deleted file mode 100644 index a4235930..00000000 --- a/vendor/github.com/maypok86/otter/.golangci.yml +++ /dev/null @@ -1,103 +0,0 @@ -run: - concurrency: 8 - timeout: 5m - build-tags: - - integration - modules-download-mode: readonly - go: '1.22' -output: - formats: - - format: tab - path: lint.txt - print-issued-lines: false - uniq-by-line: false - sort-results: true -linters: - enable: - - asasalint - - asciicheck - - bidichk - - bodyclose - - contextcheck - - durationcheck - - errcheck - - errname - - errorlint - - gocheckcompilerdirectives - - gocritic - - godot - - gofumpt - - gci - - gomoddirectives - - gosec - - gosimple - - govet - - ineffassign - - misspell - - nakedret - - nilerr - - nilnil - - noctx - - nolintlint - - prealloc - - predeclared - - promlinter - - reassign - - revive - - rowserrcheck - - sqlclosecheck - - staticcheck - - stylecheck - - tagliatelle - - tenv - - testableexamples - - thelper - - tparallel - - unconvert - - unparam - - usestdlibvars - - wastedassign - disable: - - unused -issues: - max-issues-per-linter: 0 - max-same-issues: 0 - exclude-rules: - - path: _test\.go - linters: - - gosec -linters-settings: - gci: - sections: - - standard # Standard lib - - default # External dependencies - - prefix(github.com/maypok86/otter) # Internal packages - gocritic: - enabled-tags: - - diagnostic - - experimental - - opinionated - - performance - - style - disabled-checks: - - hugeParam - - rangeExprCopy - - rangeValCopy - errcheck: - check-type-assertions: true - check-blank: true - exclude-functions: - - io/ioutil.ReadFile - - io.Copy(*bytes.Buffer) - - io.Copy(os.Stdout) - nakedret: - max-func-lines: 1 - revive: - rules: - - name: empty-block - disabled: true - tagliatelle: - case: - rules: - json: snake - yaml: snake diff --git a/vendor/github.com/maypok86/otter/CHANGELOG.md b/vendor/github.com/maypok86/otter/CHANGELOG.md deleted file mode 100644 index 01e2d724..00000000 --- a/vendor/github.com/maypok86/otter/CHANGELOG.md +++ /dev/null @@ -1,98 +0,0 @@ -## 1.2.4 - 2024-11-23 - -### 🐞 Bug Fixes - -- Fixed a bug due to changing [gammazero/deque](https://github.com/gammazero/deque/pull/33) contracts without v2 release. ([#112](https://github.com/maypok86/otter/issues/112)) - -## 1.2.3 - 2024-09-30 - -### 🐞 Bug Fixes - -- Added collection of eviction statistics for expired entries. ([#108](https://github.com/maypok86/otter/issues/108)) - -## 1.2.2 - 2024-08-14 - -### ✨️Features - -- Implemented `fmt.Stringer` interface for `DeletionReason` type ([#100](https://github.com/maypok86/otter/issues/100)) - -### 🐞 Bug Fixes - -- Fixed processing of an expired entry in the `Get` method ([#98](https://github.com/maypok86/otter/issues/98)) -- Fixed inconsistent deletion listener behavior ([#98](https://github.com/maypok86/otter/issues/98)) -- Fixed the behavior of `checkedAdd` when over/underflow ([#91](https://github.com/maypok86/otter/issues/91)) - -## 1.2.1 - 2024-04-15 - -### 🐞 Bug Fixes - -- Fixed uint32 capacity overflow. - -## 1.2.0 - 2024-03-12 - -The main innovation of this release is the addition of an `Extension`, which makes it easy to add a huge number of features to otter. - -Usage example: - -```go -key := 1 -... -entry, ok := cache.Extension().GetEntry(key) -... -key := entry.Key() -value := entry.Value() -cost := entry.Cost() -expiration := entry.Expiration() -ttl := entry.TTL() -hasExpired := entry.HasExpired() -``` - -### ✨️Features - -- Added `DeletionListener` to the builder ([#63](https://github.com/maypok86/otter/issues/63)) -- Added `Extension` ([#56](https://github.com/maypok86/otter/issues/56)) - -### 🚀 Improvements - -- Added support for Go 1.22 -- Memory consumption with small cache sizes is reduced to the level of other libraries ([#66](https://github.com/maypok86/otter/issues/66)) - -## 1.1.1 - 2024-03-06 - -### 🐞 Bug Fixes - -- Fixed alignment issues on 32-bit archs - -## 1.1.0 - 2024-03-04 - -The main innovation of this release is node code generation. Thanks to it, the cache will no longer consume more memory due to features that it does not use. For example, if you do not need an expiration policy, then otter will not store the expiration time of each entry. It also allows otter to use more effective expiration policies. - -Another expected improvement is the correction of minor synchronization problems due to the state machine. Now otter, unlike other contention-free caches in Go, should not have them at all. - -### ✨️Features - -- Added `DeleteByFunc` function to cache ([#44](https://github.com/maypok86/otter/issues/44)) -- Added `InitialCapacity` function to builder ([#47](https://github.com/maypok86/otter/issues/47)) -- Added collection of additional statistics ([#57](https://github.com/maypok86/otter/issues/57)) - -### 🚀 Improvements - -- Added proactive queue-based and timer wheel-based expiration policies with O(1) time complexity ([#55](https://github.com/maypok86/otter/issues/55)) -- Added node code generation ([#55](https://github.com/maypok86/otter/issues/55)) -- Fixed the race condition when changing the order of events ([#59](https://github.com/maypok86/otter/issues/59)) -- Reduced memory consumption on small caches - -## 1.0.0 - 2024-01-26 - -### ✨️Features - -- Builder pattern support -- Cleaner API compared to other caches ([#40](https://github.com/maypok86/otter/issues/40)) -- Added `SetIfAbsent` and `Range` functions ([#27](https://github.com/maypok86/otter/issues/27)) -- Statistics collection ([#4](https://github.com/maypok86/otter/issues/4)) -- Cost based eviction -- Support for generics and any comparable types as keys -- Support ttl ([#14](https://github.com/maypok86/otter/issues/14)) -- Excellent speed ([benchmark results](https://github.com/maypok86/otter?tab=readme-ov-file#-performance-)) -- O(1) worst case time complexity for S3-FIFO instead of O(n) -- Improved hit ratio of S3-FIFO on many traces ([simulator results](https://github.com/maypok86/otter?tab=readme-ov-file#-hit-ratio-)) diff --git a/vendor/github.com/maypok86/otter/README.md b/vendor/github.com/maypok86/otter/README.md deleted file mode 100644 index 27233930..00000000 --- a/vendor/github.com/maypok86/otter/README.md +++ /dev/null @@ -1,191 +0,0 @@ -

- -

High performance in-memory cache

-

- -

-Go Reference - - - - - -Mentioned in Awesome Go -

- -Otter is one of the most powerful caching libraries for Go based on researches in caching and concurrent data structures. Otter also uses the experience of designing caching libraries in other languages (for example, [caffeine](https://github.com/ben-manes/caffeine)). - -## 📖 Contents - -- [Features](#features) -- [Related works](#related-works) -- [Usage](#usage) - - [Requirements](#requirements) - - [Installation](#installation) - - [Examples](#examples) -- [Performance](#performance) - - [Throughput](#throughput) - - [Hit ratio](#hit-ratio) - - [Memory consumption](#memory-consumption) -- [Contribute](#contribute) -- [License](#license) - -## ✨ Features - -- **Simple API**: Just set the parameters you want in the builder and enjoy -- **Autoconfiguration**: Otter is automatically configured based on the parallelism of your application -- **Generics**: You can safely use any comparable types as keys and any types as values -- **TTL**: Expired values will be automatically deleted from the cache -- **Cost-based eviction**: Otter supports eviction based on the cost of each entry -- **Deletion listener**: You can pass a callback function in the builder that will be called when an entry is deleted from the cache -- **Stats**: You can collect various usage statistics -- **Excellent throughput**: Otter can handle a [huge number of requests](#throughput) -- **Great hit ratio**: New S3-FIFO algorithm is used, which shows excellent [results](#hit-ratio) - -## 🗃 Related works - -Otter is based on the following papers: - -- [BP-Wrapper: A Framework Making Any Replacement Algorithms (Almost) Lock Contention Free](https://www.researchgate.net/publication/220966845_BP-Wrapper_A_System_Framework_Making_Any_Replacement_Algorithms_Almost_Lock_Contention_Free) -- [FIFO queues are all you need for cache eviction](https://dl.acm.org/doi/10.1145/3600006.3613147) -- [A large scale analysis of hundreds of in-memory cache clusters at Twitter](https://www.usenix.org/system/files/osdi20-yang.pdf) - -## 📚 Usage - -### 📋 Requirements - -- Go 1.19+ - -### 🛠️ Installation - -```shell -go get -u github.com/maypok86/otter -``` - -### ✏️ Examples - -Otter uses a builder pattern that allows you to conveniently create a cache instance with different parameters. - -**Cache with const TTL** -```go -package main - -import ( - "fmt" - "time" - - "github.com/maypok86/otter" -) - -func main() { - // create a cache with capacity equal to 10000 elements - cache, err := otter.MustBuilder[string, string](10_000). - CollectStats(). - Cost(func(key string, value string) uint32 { - return 1 - }). - WithTTL(time.Hour). - Build() - if err != nil { - panic(err) - } - - // set item with ttl (1 hour) - cache.Set("key", "value") - - // get value from cache - value, ok := cache.Get("key") - if !ok { - panic("not found key") - } - fmt.Println(value) - - // delete item from cache - cache.Delete("key") - - // delete data and stop goroutines - cache.Close() -} -``` - -**Cache with variable TTL** -```go -package main - -import ( - "fmt" - "time" - - "github.com/maypok86/otter" -) - -func main() { - // create a cache with capacity equal to 10000 elements - cache, err := otter.MustBuilder[string, string](10_000). - CollectStats(). - Cost(func(key string, value string) uint32 { - return 1 - }). - WithVariableTTL(). - Build() - if err != nil { - panic(err) - } - - // set item with ttl (1 hour) - cache.Set("key1", "value1", time.Hour) - // set item with ttl (1 minute) - cache.Set("key2", "value2", time.Minute) - - // get value from cache - value, ok := cache.Get("key1") - if !ok { - panic("not found key") - } - fmt.Println(value) - - // delete item from cache - cache.Delete("key1") - - // delete data and stop goroutines - cache.Close() -} -``` - -## 📊 Performance - -The benchmark code can be found [here](https://github.com/maypok86/benchmarks). - -### 🚀 Throughput - -Throughput benchmarks are a Go port of the caffeine [benchmarks](https://github.com/ben-manes/caffeine/blob/master/caffeine/src/jmh/java/com/github/benmanes/caffeine/cache/GetPutBenchmark.java). This microbenchmark compares the throughput of caches on a zipf distribution, which allows to show various inefficient places in implementations. - -You can find results [here](https://maypok86.github.io/otter/performance/throughput/). - -### 🎯 Hit ratio - -The hit ratio simulator tests caches on various traces: -1. Synthetic (zipf distribution) -2. Traditional (widely known and used in various projects and papers) -3. Modern (recently collected from the production of the largest companies in the world) - -You can find results [here](https://maypok86.github.io/otter/performance/hit-ratio/). - -### 💾 Memory consumption - -The memory overhead benchmark shows how much additional memory the cache will require at different capacities. - -You can find results [here](https://maypok86.github.io/otter/performance/memory-consumption/). - -## 👏 Contribute - -Contributions are welcome as always, before submitting a new PR please make sure to open a new issue so community members can discuss it. -For more information please see [contribution guidelines](./CONTRIBUTING.md). - -Additionally, you might find existing open issues which can help with improvements. - -This project follows a standard [code of conduct](./CODE_OF_CONDUCT.md) so that you can understand what actions will and will not be tolerated. - -## 📄 License - -This project is Apache 2.0 licensed, as found in the [LICENSE](./LICENSE). diff --git a/vendor/github.com/maypok86/otter/builder.go b/vendor/github.com/maypok86/otter/builder.go deleted file mode 100644 index 2498c903..00000000 --- a/vendor/github.com/maypok86/otter/builder.go +++ /dev/null @@ -1,311 +0,0 @@ -// Copyright (c) 2023 Alexey Mayshev. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package otter - -import ( - "errors" - "time" - - "github.com/maypok86/otter/internal/core" -) - -const ( - unsetCapacity = -1 -) - -var ( - // ErrIllegalCapacity means that a non-positive capacity has been passed to the NewBuilder. - ErrIllegalCapacity = errors.New("capacity should be positive") - // ErrIllegalInitialCapacity means that a non-positive capacity has been passed to the Builder.InitialCapacity. - ErrIllegalInitialCapacity = errors.New("initial capacity should be positive") - // ErrNilCostFunc means that a nil cost func has been passed to the Builder.Cost. - ErrNilCostFunc = errors.New("setCostFunc func should not be nil") - // ErrIllegalTTL means that a non-positive ttl has been passed to the Builder.WithTTL. - ErrIllegalTTL = errors.New("ttl should be positive") -) - -type baseOptions[K comparable, V any] struct { - capacity int - initialCapacity int - statsEnabled bool - withCost bool - costFunc func(key K, value V) uint32 - deletionListener func(key K, value V, cause DeletionCause) -} - -func (o *baseOptions[K, V]) collectStats() { - o.statsEnabled = true -} - -func (o *baseOptions[K, V]) setCostFunc(costFunc func(key K, value V) uint32) { - o.costFunc = costFunc - o.withCost = true -} - -func (o *baseOptions[K, V]) setInitialCapacity(initialCapacity int) { - o.initialCapacity = initialCapacity -} - -func (o *baseOptions[K, V]) setDeletionListener(deletionListener func(key K, value V, cause DeletionCause)) { - o.deletionListener = deletionListener -} - -func (o *baseOptions[K, V]) validate() error { - if o.initialCapacity <= 0 && o.initialCapacity != unsetCapacity { - return ErrIllegalInitialCapacity - } - if o.costFunc == nil { - return ErrNilCostFunc - } - return nil -} - -func (o *baseOptions[K, V]) toConfig() core.Config[K, V] { - var initialCapacity *int - if o.initialCapacity != unsetCapacity { - initialCapacity = &o.initialCapacity - } - return core.Config[K, V]{ - Capacity: o.capacity, - InitialCapacity: initialCapacity, - StatsEnabled: o.statsEnabled, - CostFunc: o.costFunc, - WithCost: o.withCost, - DeletionListener: o.deletionListener, - } -} - -type constTTLOptions[K comparable, V any] struct { - baseOptions[K, V] - ttl time.Duration -} - -func (o *constTTLOptions[K, V]) validate() error { - if o.ttl <= 0 { - return ErrIllegalTTL - } - return o.baseOptions.validate() -} - -func (o *constTTLOptions[K, V]) toConfig() core.Config[K, V] { - c := o.baseOptions.toConfig() - c.TTL = &o.ttl - return c -} - -type variableTTLOptions[K comparable, V any] struct { - baseOptions[K, V] -} - -func (o *variableTTLOptions[K, V]) toConfig() core.Config[K, V] { - c := o.baseOptions.toConfig() - c.WithVariableTTL = true - return c -} - -// Builder is a one-shot builder for creating a cache instance. -type Builder[K comparable, V any] struct { - baseOptions[K, V] -} - -// MustBuilder creates a builder and sets the future cache capacity. -// -// Panics if capacity <= 0. -func MustBuilder[K comparable, V any](capacity int) *Builder[K, V] { - b, err := NewBuilder[K, V](capacity) - if err != nil { - panic(err) - } - return b -} - -// NewBuilder creates a builder and sets the future cache capacity. -// -// Returns an error if capacity <= 0. -func NewBuilder[K comparable, V any](capacity int) (*Builder[K, V], error) { - if capacity <= 0 { - return nil, ErrIllegalCapacity - } - - return &Builder[K, V]{ - baseOptions: baseOptions[K, V]{ - capacity: capacity, - initialCapacity: unsetCapacity, - statsEnabled: false, - costFunc: func(key K, value V) uint32 { - return 1 - }, - }, - }, nil -} - -// CollectStats determines whether statistics should be calculated when the cache is running. -// -// By default, statistics calculating is disabled. -func (b *Builder[K, V]) CollectStats() *Builder[K, V] { - b.collectStats() - return b -} - -// InitialCapacity sets the minimum total size for the internal data structures. Providing a large enough estimate -// at construction time avoids the need for expensive resizing operations later, but setting this -// value unnecessarily high wastes memory. -func (b *Builder[K, V]) InitialCapacity(initialCapacity int) *Builder[K, V] { - b.setInitialCapacity(initialCapacity) - return b -} - -// Cost sets a function to dynamically calculate the cost of an item. -// -// By default, this function always returns 1. -func (b *Builder[K, V]) Cost(costFunc func(key K, value V) uint32) *Builder[K, V] { - b.setCostFunc(costFunc) - return b -} - -// DeletionListener specifies a listener instance that caches should notify each time an entry is deleted for any -// DeletionCause cause. The cache will invoke this listener in the background goroutine -// after the entry's deletion operation has completed. -func (b *Builder[K, V]) DeletionListener(deletionListener func(key K, value V, cause DeletionCause)) *Builder[K, V] { - b.setDeletionListener(deletionListener) - return b -} - -// WithTTL specifies that each item should be automatically removed from the cache once a fixed duration -// has elapsed after the item's creation. -func (b *Builder[K, V]) WithTTL(ttl time.Duration) *ConstTTLBuilder[K, V] { - return &ConstTTLBuilder[K, V]{ - constTTLOptions[K, V]{ - baseOptions: b.baseOptions, - ttl: ttl, - }, - } -} - -// WithVariableTTL specifies that each item should be automatically removed from the cache once a duration has -// elapsed after the item's creation. Items are expired based on the custom ttl specified for each item separately. -// -// You should prefer WithTTL to this option whenever possible. -func (b *Builder[K, V]) WithVariableTTL() *VariableTTLBuilder[K, V] { - return &VariableTTLBuilder[K, V]{ - variableTTLOptions[K, V]{ - baseOptions: b.baseOptions, - }, - } -} - -// Build creates a configured cache or -// returns an error if invalid parameters were passed to the builder. -func (b *Builder[K, V]) Build() (Cache[K, V], error) { - if err := b.validate(); err != nil { - return Cache[K, V]{}, err - } - - return newCache(b.toConfig()), nil -} - -// ConstTTLBuilder is a one-shot builder for creating a cache instance. -type ConstTTLBuilder[K comparable, V any] struct { - constTTLOptions[K, V] -} - -// CollectStats determines whether statistics should be calculated when the cache is running. -// -// By default, statistics calculating is disabled. -func (b *ConstTTLBuilder[K, V]) CollectStats() *ConstTTLBuilder[K, V] { - b.collectStats() - return b -} - -// InitialCapacity sets the minimum total size for the internal data structures. Providing a large enough estimate -// at construction time avoids the need for expensive resizing operations later, but setting this -// value unnecessarily high wastes memory. -func (b *ConstTTLBuilder[K, V]) InitialCapacity(initialCapacity int) *ConstTTLBuilder[K, V] { - b.setInitialCapacity(initialCapacity) - return b -} - -// Cost sets a function to dynamically calculate the cost of an item. -// -// By default, this function always returns 1. -func (b *ConstTTLBuilder[K, V]) Cost(costFunc func(key K, value V) uint32) *ConstTTLBuilder[K, V] { - b.setCostFunc(costFunc) - return b -} - -// DeletionListener specifies a listener instance that caches should notify each time an entry is deleted for any -// DeletionCause cause. The cache will invoke this listener in the background goroutine -// after the entry's deletion operation has completed. -func (b *ConstTTLBuilder[K, V]) DeletionListener(deletionListener func(key K, value V, cause DeletionCause)) *ConstTTLBuilder[K, V] { - b.setDeletionListener(deletionListener) - return b -} - -// Build creates a configured cache or -// returns an error if invalid parameters were passed to the builder. -func (b *ConstTTLBuilder[K, V]) Build() (Cache[K, V], error) { - if err := b.validate(); err != nil { - return Cache[K, V]{}, err - } - - return newCache(b.toConfig()), nil -} - -// VariableTTLBuilder is a one-shot builder for creating a cache instance. -type VariableTTLBuilder[K comparable, V any] struct { - variableTTLOptions[K, V] -} - -// CollectStats determines whether statistics should be calculated when the cache is running. -// -// By default, statistics calculating is disabled. -func (b *VariableTTLBuilder[K, V]) CollectStats() *VariableTTLBuilder[K, V] { - b.collectStats() - return b -} - -// InitialCapacity sets the minimum total size for the internal data structures. Providing a large enough estimate -// at construction time avoids the need for expensive resizing operations later, but setting this -// value unnecessarily high wastes memory. -func (b *VariableTTLBuilder[K, V]) InitialCapacity(initialCapacity int) *VariableTTLBuilder[K, V] { - b.setInitialCapacity(initialCapacity) - return b -} - -// Cost sets a function to dynamically calculate the cost of an item. -// -// By default, this function always returns 1. -func (b *VariableTTLBuilder[K, V]) Cost(costFunc func(key K, value V) uint32) *VariableTTLBuilder[K, V] { - b.setCostFunc(costFunc) - return b -} - -// DeletionListener specifies a listener instance that caches should notify each time an entry is deleted for any -// DeletionCause cause. The cache will invoke this listener in the background goroutine -// after the entry's deletion operation has completed. -func (b *VariableTTLBuilder[K, V]) DeletionListener(deletionListener func(key K, value V, cause DeletionCause)) *VariableTTLBuilder[K, V] { - b.setDeletionListener(deletionListener) - return b -} - -// Build creates a configured cache or -// returns an error if invalid parameters were passed to the builder. -func (b *VariableTTLBuilder[K, V]) Build() (CacheWithVariableTTL[K, V], error) { - if err := b.validate(); err != nil { - return CacheWithVariableTTL[K, V]{}, err - } - - return newCacheWithVariableTTL(b.toConfig()), nil -} diff --git a/vendor/github.com/maypok86/otter/cache.go b/vendor/github.com/maypok86/otter/cache.go deleted file mode 100644 index bedbb0e5..00000000 --- a/vendor/github.com/maypok86/otter/cache.go +++ /dev/null @@ -1,165 +0,0 @@ -// Copyright (c) 2024 Alexey Mayshev. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package otter - -import ( - "time" - - "github.com/maypok86/otter/internal/core" -) - -// DeletionCause the cause why a cached entry was deleted. -type DeletionCause = core.DeletionCause - -const ( - // Explicit the entry was manually deleted by the user. - Explicit = core.Explicit - // Replaced the entry itself was not actually deleted, but its value was replaced by the user. - Replaced = core.Replaced - // Size the entry was evicted due to size constraints. - Size = core.Size - // Expired the entry's expiration timestamp has passed. - Expired = core.Expired -) - -type baseCache[K comparable, V any] struct { - cache *core.Cache[K, V] -} - -func newBaseCache[K comparable, V any](c core.Config[K, V]) baseCache[K, V] { - return baseCache[K, V]{ - cache: core.NewCache(c), - } -} - -// Has checks if there is an entry with the given key in the cache. -func (bs baseCache[K, V]) Has(key K) bool { - return bs.cache.Has(key) -} - -// Get returns the value associated with the key in this cache. -func (bs baseCache[K, V]) Get(key K) (V, bool) { - return bs.cache.Get(key) -} - -// Delete removes the association for this key from the cache. -func (bs baseCache[K, V]) Delete(key K) { - bs.cache.Delete(key) -} - -// DeleteByFunc removes the association for this key from the cache when the given function returns true. -func (bs baseCache[K, V]) DeleteByFunc(f func(key K, value V) bool) { - bs.cache.DeleteByFunc(f) -} - -// Range iterates over all entries in the cache. -// -// Iteration stops early when the given function returns false. -func (bs baseCache[K, V]) Range(f func(key K, value V) bool) { - bs.cache.Range(f) -} - -// Clear clears the hash table, all policies, buffers, etc. -// -// NOTE: this operation must be performed when no requests are made to the cache otherwise the behavior is undefined. -func (bs baseCache[K, V]) Clear() { - bs.cache.Clear() -} - -// Close clears the hash table, all policies, buffers, etc and stop all goroutines. -// -// NOTE: this operation must be performed when no requests are made to the cache otherwise the behavior is undefined. -func (bs baseCache[K, V]) Close() { - bs.cache.Close() -} - -// Size returns the current number of entries in the cache. -func (bs baseCache[K, V]) Size() int { - return bs.cache.Size() -} - -// Capacity returns the cache capacity. -func (bs baseCache[K, V]) Capacity() int { - return bs.cache.Capacity() -} - -// Stats returns a current snapshot of this cache's cumulative statistics. -func (bs baseCache[K, V]) Stats() Stats { - return newStats(bs.cache.Stats()) -} - -// Extension returns access to inspect and perform low-level operations on this cache based on its runtime -// characteristics. These operations are optional and dependent on how the cache was constructed -// and what abilities the implementation exposes. -func (bs baseCache[K, V]) Extension() Extension[K, V] { - return newExtension(bs.cache) -} - -// Cache is a structure performs a best-effort bounding of a hash table using eviction algorithm -// to determine which entries to evict when the capacity is exceeded. -type Cache[K comparable, V any] struct { - baseCache[K, V] -} - -func newCache[K comparable, V any](c core.Config[K, V]) Cache[K, V] { - return Cache[K, V]{ - baseCache: newBaseCache(c), - } -} - -// Set associates the value with the key in this cache. -// -// If it returns false, then the key-value pair had too much cost and the Set was dropped. -func (c Cache[K, V]) Set(key K, value V) bool { - return c.cache.Set(key, value) -} - -// SetIfAbsent if the specified key is not already associated with a value associates it with the given value. -// -// If the specified key is not already associated with a value, then it returns false. -// -// Also, it returns false if the key-value pair had too much cost and the SetIfAbsent was dropped. -func (c Cache[K, V]) SetIfAbsent(key K, value V) bool { - return c.cache.SetIfAbsent(key, value) -} - -// CacheWithVariableTTL is a structure performs a best-effort bounding of a hash table using eviction algorithm -// to determine which entries to evict when the capacity is exceeded. -type CacheWithVariableTTL[K comparable, V any] struct { - baseCache[K, V] -} - -func newCacheWithVariableTTL[K comparable, V any](c core.Config[K, V]) CacheWithVariableTTL[K, V] { - return CacheWithVariableTTL[K, V]{ - baseCache: newBaseCache(c), - } -} - -// Set associates the value with the key in this cache and sets the custom ttl for this key-value pair. -// -// If it returns false, then the key-value pair had too much cost and the Set was dropped. -func (c CacheWithVariableTTL[K, V]) Set(key K, value V, ttl time.Duration) bool { - return c.cache.SetWithTTL(key, value, ttl) -} - -// SetIfAbsent if the specified key is not already associated with a value associates it with the given value -// and sets the custom ttl for this key-value pair. -// -// If the specified key is not already associated with a value, then it returns false. -// -// Also, it returns false if the key-value pair had too much cost and the SetIfAbsent was dropped. -func (c CacheWithVariableTTL[K, V]) SetIfAbsent(key K, value V, ttl time.Duration) bool { - return c.cache.SetIfAbsentWithTTL(key, value, ttl) -} diff --git a/vendor/github.com/maypok86/otter/entry.go b/vendor/github.com/maypok86/otter/entry.go deleted file mode 100644 index 5dcfaf02..00000000 --- a/vendor/github.com/maypok86/otter/entry.go +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright (c) 2024 Alexey Mayshev. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package otter - -import "time" - -// Entry is a key-value pair that may include policy metadata for the cached entry. -// -// It is an immutable snapshot of the cached data at the time of this entry's creation, and it will not -// reflect changes afterward. -type Entry[K comparable, V any] struct { - key K - value V - expiration int64 - cost uint32 -} - -// Key returns the entry's key. -func (e Entry[K, V]) Key() K { - return e.key -} - -// Value returns the entry's value. -func (e Entry[K, V]) Value() V { - return e.value -} - -// Expiration returns the entry's expiration time as a unix time, -// the number of seconds elapsed since January 1, 1970 UTC. -// -// If the cache was not configured with an expiration policy then this value is always 0. -func (e Entry[K, V]) Expiration() int64 { - return e.expiration -} - -// TTL returns the entry's ttl. -// -// If the cache was not configured with an expiration policy then this value is always -1. -// -// If the entry is expired then this value is always 0. -func (e Entry[K, V]) TTL() time.Duration { - expiration := e.Expiration() - if expiration == 0 { - return -1 - } - - now := time.Now().Unix() - if expiration <= now { - return 0 - } - - return time.Duration(expiration-now) * time.Second -} - -// HasExpired returns true if the entry has expired. -func (e Entry[K, V]) HasExpired() bool { - expiration := e.Expiration() - if expiration == 0 { - return false - } - - return expiration <= time.Now().Unix() -} - -// Cost returns the entry's cost. -// -// If the cache was not configured with a cost then this value is always 1. -func (e Entry[K, V]) Cost() uint32 { - return e.cost -} diff --git a/vendor/github.com/maypok86/otter/extension.go b/vendor/github.com/maypok86/otter/extension.go deleted file mode 100644 index 83a511b5..00000000 --- a/vendor/github.com/maypok86/otter/extension.go +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright (c) 2024 Alexey Mayshev. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package otter - -import ( - "github.com/maypok86/otter/internal/core" - "github.com/maypok86/otter/internal/generated/node" - "github.com/maypok86/otter/internal/unixtime" -) - -func zeroValue[V any]() V { - var zero V - return zero -} - -// Extension is an access point for inspecting and performing low-level operations based on the cache's runtime -// characteristics. These operations are optional and dependent on how the cache was constructed -// and what abilities the implementation exposes. -type Extension[K comparable, V any] struct { - cache *core.Cache[K, V] -} - -func newExtension[K comparable, V any](cache *core.Cache[K, V]) Extension[K, V] { - return Extension[K, V]{ - cache: cache, - } -} - -func (e Extension[K, V]) createEntry(n node.Node[K, V]) Entry[K, V] { - var expiration int64 - if e.cache.WithExpiration() { - expiration = unixtime.StartTime() + int64(n.Expiration()) - } - - return Entry[K, V]{ - key: n.Key(), - value: n.Value(), - expiration: expiration, - cost: n.Cost(), - } -} - -// GetQuietly returns the value associated with the key in this cache. -// -// Unlike Get in the cache, this function does not produce any side effects -// such as updating statistics or the eviction policy. -func (e Extension[K, V]) GetQuietly(key K) (V, bool) { - n, ok := e.cache.GetNodeQuietly(key) - if !ok { - return zeroValue[V](), false - } - - return n.Value(), true -} - -// GetEntry returns the cache entry associated with the key in this cache. -func (e Extension[K, V]) GetEntry(key K) (Entry[K, V], bool) { - n, ok := e.cache.GetNode(key) - if !ok { - return Entry[K, V]{}, false - } - - return e.createEntry(n), true -} - -// GetEntryQuietly returns the cache entry associated with the key in this cache. -// -// Unlike GetEntry, this function does not produce any side effects -// such as updating statistics or the eviction policy. -func (e Extension[K, V]) GetEntryQuietly(key K) (Entry[K, V], bool) { - n, ok := e.cache.GetNodeQuietly(key) - if !ok { - return Entry[K, V]{}, false - } - - return e.createEntry(n), true -} diff --git a/vendor/github.com/maypok86/otter/internal/core/cache.go b/vendor/github.com/maypok86/otter/internal/core/cache.go deleted file mode 100644 index 761a3abb..00000000 --- a/vendor/github.com/maypok86/otter/internal/core/cache.go +++ /dev/null @@ -1,533 +0,0 @@ -// Copyright (c) 2023 Alexey Mayshev. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package core - -import ( - "sync" - "time" - - "github.com/maypok86/otter/internal/expiry" - "github.com/maypok86/otter/internal/generated/node" - "github.com/maypok86/otter/internal/hashtable" - "github.com/maypok86/otter/internal/lossy" - "github.com/maypok86/otter/internal/queue" - "github.com/maypok86/otter/internal/s3fifo" - "github.com/maypok86/otter/internal/stats" - "github.com/maypok86/otter/internal/unixtime" - "github.com/maypok86/otter/internal/xmath" - "github.com/maypok86/otter/internal/xruntime" -) - -// DeletionCause the cause why a cached entry was deleted. -type DeletionCause uint8 - -const ( - // Explicit the entry was manually deleted by the user. - Explicit DeletionCause = iota - // Replaced the entry itself was not actually deleted, but its value was replaced by the user. - Replaced - // Size the entry was evicted due to size constraints. - Size - // Expired the entry's expiration timestamp has passed. - Expired -) - -func (dc DeletionCause) String() string { - switch dc { - case Explicit: - return "Explicit" - case Replaced: - return "Replaced" - case Size: - return "Size" - case Expired: - return "Expired" - default: - panic("unknown deletion cause") - } -} - -const ( - minWriteBufferSize uint32 = 4 -) - -var ( - maxWriteBufferSize uint32 - maxStripedBufferSize int -) - -func init() { - parallelism := xruntime.Parallelism() - roundedParallelism := int(xmath.RoundUpPowerOf2(parallelism)) - //nolint:gosec // there will never be an overflow - maxWriteBufferSize = uint32(128 * roundedParallelism) - maxStripedBufferSize = 4 * roundedParallelism -} - -func zeroValue[V any]() V { - var zero V - return zero -} - -func getTTL(ttl time.Duration) uint32 { - //nolint:gosec // there will never be an overflow - return uint32((ttl + time.Second - 1) / time.Second) -} - -func getExpiration(ttl time.Duration) uint32 { - return unixtime.Now() + getTTL(ttl) -} - -// Config is a set of cache settings. -type Config[K comparable, V any] struct { - Capacity int - InitialCapacity *int - StatsEnabled bool - TTL *time.Duration - WithVariableTTL bool - CostFunc func(key K, value V) uint32 - WithCost bool - DeletionListener func(key K, value V, cause DeletionCause) -} - -type expiryPolicy[K comparable, V any] interface { - Add(n node.Node[K, V]) - Delete(n node.Node[K, V]) - DeleteExpired() - Clear() -} - -// Cache is a structure performs a best-effort bounding of a hash table using eviction algorithm -// to determine which entries to evict when the capacity is exceeded. -type Cache[K comparable, V any] struct { - nodeManager *node.Manager[K, V] - hashmap *hashtable.Map[K, V] - policy *s3fifo.Policy[K, V] - expiryPolicy expiryPolicy[K, V] - stats *stats.Stats - stripedBuffer []*lossy.Buffer[K, V] - writeBuffer *queue.Growable[task[K, V]] - evictionMutex sync.Mutex - closeOnce sync.Once - doneClear chan struct{} - costFunc func(key K, value V) uint32 - deletionListener func(key K, value V, cause DeletionCause) - capacity int - mask uint32 - ttl uint32 - withExpiration bool - isClosed bool -} - -// NewCache returns a new cache instance based on the settings from Config. -func NewCache[K comparable, V any](c Config[K, V]) *Cache[K, V] { - nodeManager := node.NewManager[K, V](node.Config{ - WithExpiration: c.TTL != nil || c.WithVariableTTL, - WithCost: c.WithCost, - }) - - stripedBuffer := make([]*lossy.Buffer[K, V], 0, maxStripedBufferSize) - for i := 0; i < maxStripedBufferSize; i++ { - stripedBuffer = append(stripedBuffer, lossy.New[K, V](nodeManager)) - } - - var hashmap *hashtable.Map[K, V] - if c.InitialCapacity == nil { - hashmap = hashtable.New[K, V](nodeManager) - } else { - hashmap = hashtable.NewWithSize[K, V](nodeManager, *c.InitialCapacity) - } - - cache := &Cache[K, V]{ - nodeManager: nodeManager, - hashmap: hashmap, - stripedBuffer: stripedBuffer, - writeBuffer: queue.NewGrowable[task[K, V]](minWriteBufferSize, maxWriteBufferSize), - doneClear: make(chan struct{}), - //nolint:gosec // there will never be an overflow - mask: uint32(maxStripedBufferSize - 1), - costFunc: c.CostFunc, - deletionListener: c.DeletionListener, - capacity: c.Capacity, - } - - cache.policy = s3fifo.NewPolicy(c.Capacity, cache.evictNode) - - switch { - case c.TTL != nil: - cache.expiryPolicy = expiry.NewFixed[K, V](cache.deleteExpiredNode) - case c.WithVariableTTL: - cache.expiryPolicy = expiry.NewVariable[K, V](nodeManager, cache.deleteExpiredNode) - default: - cache.expiryPolicy = expiry.NewDisabled[K, V]() - } - - if c.StatsEnabled { - cache.stats = stats.New() - } - if c.TTL != nil { - cache.ttl = getTTL(*c.TTL) - } - - cache.withExpiration = c.TTL != nil || c.WithVariableTTL - - if cache.withExpiration { - unixtime.Start() - go cache.cleanup() - } - - go cache.process() - - return cache -} - -func (c *Cache[K, V]) getReadBufferIdx() int { - return int(xruntime.Fastrand() & c.mask) -} - -// Has checks if there is an item with the given key in the cache. -func (c *Cache[K, V]) Has(key K) bool { - _, ok := c.Get(key) - return ok -} - -// Get returns the value associated with the key in this cache. -func (c *Cache[K, V]) Get(key K) (V, bool) { - n, ok := c.GetNode(key) - if !ok { - return zeroValue[V](), false - } - - return n.Value(), true -} - -// GetNode returns the node associated with the key in this cache. -func (c *Cache[K, V]) GetNode(key K) (node.Node[K, V], bool) { - n, ok := c.hashmap.Get(key) - if !ok || !n.IsAlive() { - c.stats.IncMisses() - return nil, false - } - - if n.HasExpired() { - // avoid duplicate push - deleted := c.hashmap.DeleteNode(n) - if deleted != nil { - n.Die() - c.writeBuffer.Push(newExpiredTask(n)) - } - c.stats.IncMisses() - return nil, false - } - - c.afterGet(n) - c.stats.IncHits() - - return n, true -} - -// GetNodeQuietly returns the node associated with the key in this cache. -// -// Unlike GetNode, this function does not produce any side effects -// such as updating statistics or the eviction policy. -func (c *Cache[K, V]) GetNodeQuietly(key K) (node.Node[K, V], bool) { - n, ok := c.hashmap.Get(key) - if !ok || !n.IsAlive() || n.HasExpired() { - return nil, false - } - - return n, true -} - -func (c *Cache[K, V]) afterGet(got node.Node[K, V]) { - idx := c.getReadBufferIdx() - pb := c.stripedBuffer[idx].Add(got) - if pb != nil { - c.evictionMutex.Lock() - c.policy.Read(pb.Returned) - c.evictionMutex.Unlock() - - c.stripedBuffer[idx].Free() - } -} - -// Set associates the value with the key in this cache. -// -// If it returns false, then the key-value item had too much cost and the Set was dropped. -func (c *Cache[K, V]) Set(key K, value V) bool { - return c.set(key, value, c.defaultExpiration(), false) -} - -func (c *Cache[K, V]) defaultExpiration() uint32 { - if c.ttl == 0 { - return 0 - } - - return unixtime.Now() + c.ttl -} - -// SetWithTTL associates the value with the key in this cache and sets the custom ttl for this key-value item. -// -// If it returns false, then the key-value item had too much cost and the SetWithTTL was dropped. -func (c *Cache[K, V]) SetWithTTL(key K, value V, ttl time.Duration) bool { - return c.set(key, value, getExpiration(ttl), false) -} - -// SetIfAbsent if the specified key is not already associated with a value associates it with the given value. -// -// If the specified key is not already associated with a value, then it returns false. -// -// Also, it returns false if the key-value item had too much cost and the SetIfAbsent was dropped. -func (c *Cache[K, V]) SetIfAbsent(key K, value V) bool { - return c.set(key, value, c.defaultExpiration(), true) -} - -// SetIfAbsentWithTTL if the specified key is not already associated with a value associates it with the given value -// and sets the custom ttl for this key-value item. -// -// If the specified key is not already associated with a value, then it returns false. -// -// Also, it returns false if the key-value item had too much cost and the SetIfAbsent was dropped. -func (c *Cache[K, V]) SetIfAbsentWithTTL(key K, value V, ttl time.Duration) bool { - return c.set(key, value, getExpiration(ttl), true) -} - -func (c *Cache[K, V]) set(key K, value V, expiration uint32, onlyIfAbsent bool) bool { - cost := c.costFunc(key, value) - if int(cost) > c.policy.MaxAvailableCost() { - c.stats.IncRejectedSets() - return false - } - - n := c.nodeManager.Create(key, value, expiration, cost) - if onlyIfAbsent { - res := c.hashmap.SetIfAbsent(n) - if res == nil { - // insert - c.writeBuffer.Push(newAddTask(n)) - return true - } - c.stats.IncRejectedSets() - return false - } - - evicted := c.hashmap.Set(n) - if evicted != nil { - // update - evicted.Die() - c.writeBuffer.Push(newUpdateTask(n, evicted)) - } else { - // insert - c.writeBuffer.Push(newAddTask(n)) - } - - return true -} - -// Delete deletes the association for this key from the cache. -func (c *Cache[K, V]) Delete(key K) { - c.afterDelete(c.hashmap.Delete(key)) -} - -func (c *Cache[K, V]) deleteNode(n node.Node[K, V]) { - c.afterDelete(c.hashmap.DeleteNode(n)) -} - -func (c *Cache[K, V]) afterDelete(deleted node.Node[K, V]) { - if deleted != nil { - deleted.Die() - c.writeBuffer.Push(newDeleteTask(deleted)) - } -} - -// DeleteByFunc deletes the association for this key from the cache when the given function returns true. -func (c *Cache[K, V]) DeleteByFunc(f func(key K, value V) bool) { - c.hashmap.Range(func(n node.Node[K, V]) bool { - if !n.IsAlive() || n.HasExpired() { - return true - } - - if f(n.Key(), n.Value()) { - c.deleteNode(n) - } - - return true - }) -} - -func (c *Cache[K, V]) notifyDeletion(key K, value V, cause DeletionCause) { - if c.deletionListener == nil { - return - } - - c.deletionListener(key, value, cause) -} - -func (c *Cache[K, V]) deleteExpiredNode(n node.Node[K, V]) { - c.policy.Delete(n) - deleted := c.hashmap.DeleteNode(n) - if deleted != nil { - n.Die() - c.notifyDeletion(n.Key(), n.Value(), Expired) - c.stats.IncEvictedCount() - c.stats.AddEvictedCost(n.Cost()) - } -} - -func (c *Cache[K, V]) cleanup() { - for { - time.Sleep(time.Second) - - c.evictionMutex.Lock() - if c.isClosed { - c.evictionMutex.Unlock() - return - } - - c.expiryPolicy.DeleteExpired() - - c.evictionMutex.Unlock() - } -} - -func (c *Cache[K, V]) evictNode(n node.Node[K, V]) { - c.expiryPolicy.Delete(n) - deleted := c.hashmap.DeleteNode(n) - if deleted != nil { - n.Die() - c.notifyDeletion(n.Key(), n.Value(), Size) - c.stats.IncEvictedCount() - c.stats.AddEvictedCost(n.Cost()) - } -} - -func (c *Cache[K, V]) onWrite(t task[K, V]) { - if t.isClear() || t.isClose() { - c.writeBuffer.Clear() - - c.policy.Clear() - c.expiryPolicy.Clear() - if t.isClose() { - c.isClosed = true - } - - c.doneClear <- struct{}{} - return - } - - n := t.node() - switch { - case t.isAdd(): - if n.IsAlive() { - c.expiryPolicy.Add(n) - c.policy.Add(n) - } - case t.isUpdate(): - oldNode := t.oldNode() - c.expiryPolicy.Delete(oldNode) - c.policy.Delete(oldNode) - if n.IsAlive() { - c.expiryPolicy.Add(n) - c.policy.Add(n) - } - c.notifyDeletion(oldNode.Key(), oldNode.Value(), Replaced) - case t.isDelete(): - c.expiryPolicy.Delete(n) - c.policy.Delete(n) - c.notifyDeletion(n.Key(), n.Value(), Explicit) - case t.isExpired(): - c.expiryPolicy.Delete(n) - c.policy.Delete(n) - c.notifyDeletion(n.Key(), n.Value(), Expired) - } -} - -func (c *Cache[K, V]) process() { - for { - t := c.writeBuffer.Pop() - - c.evictionMutex.Lock() - c.onWrite(t) - c.evictionMutex.Unlock() - - if t.isClose() { - break - } - } -} - -// Range iterates over all items in the cache. -// -// Iteration stops early when the given function returns false. -func (c *Cache[K, V]) Range(f func(key K, value V) bool) { - c.hashmap.Range(func(n node.Node[K, V]) bool { - if !n.IsAlive() || n.HasExpired() { - return true - } - - return f(n.Key(), n.Value()) - }) -} - -// Clear clears the hash table, all policies, buffers, etc. -// -// NOTE: this operation must be performed when no requests are made to the cache otherwise the behavior is undefined. -func (c *Cache[K, V]) Clear() { - c.clear(newClearTask[K, V]()) -} - -func (c *Cache[K, V]) clear(t task[K, V]) { - c.hashmap.Clear() - for i := 0; i < len(c.stripedBuffer); i++ { - c.stripedBuffer[i].Clear() - } - - c.writeBuffer.Push(t) - <-c.doneClear - - c.stats.Clear() -} - -// Close clears the hash table, all policies, buffers, etc and stop all goroutines. -// -// NOTE: this operation must be performed when no requests are made to the cache otherwise the behavior is undefined. -func (c *Cache[K, V]) Close() { - c.closeOnce.Do(func() { - c.clear(newCloseTask[K, V]()) - if c.withExpiration { - unixtime.Stop() - } - }) -} - -// Size returns the current number of items in the cache. -func (c *Cache[K, V]) Size() int { - return c.hashmap.Size() -} - -// Capacity returns the cache capacity. -func (c *Cache[K, V]) Capacity() int { - return c.capacity -} - -// Stats returns a current snapshot of this cache's cumulative statistics. -func (c *Cache[K, V]) Stats() *stats.Stats { - return c.stats -} - -// WithExpiration returns true if the cache was configured with the expiration policy enabled. -func (c *Cache[K, V]) WithExpiration() bool { - return c.withExpiration -} diff --git a/vendor/github.com/maypok86/otter/internal/core/task.go b/vendor/github.com/maypok86/otter/internal/core/task.go deleted file mode 100644 index 2455708b..00000000 --- a/vendor/github.com/maypok86/otter/internal/core/task.go +++ /dev/null @@ -1,126 +0,0 @@ -// Copyright (c) 2023 Alexey Mayshev. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package core - -import ( - "github.com/maypok86/otter/internal/generated/node" -) - -// reason represents the reason for writing the item to the cache. -type reason uint8 - -const ( - addReason reason = iota + 1 - deleteReason - updateReason - clearReason - closeReason - expiredReason -) - -// task is a set of information to update the cache: -// node, reason for write, difference after node cost change, etc. -type task[K comparable, V any] struct { - n node.Node[K, V] - old node.Node[K, V] - writeReason reason -} - -// newAddTask creates a task to add a node to policies. -func newAddTask[K comparable, V any](n node.Node[K, V]) task[K, V] { - return task[K, V]{ - n: n, - writeReason: addReason, - } -} - -// newDeleteTask creates a task to delete a node from policies. -func newDeleteTask[K comparable, V any](n node.Node[K, V]) task[K, V] { - return task[K, V]{ - n: n, - writeReason: deleteReason, - } -} - -// newExpireTask creates a task to delete a expired node from policies. -func newExpiredTask[K comparable, V any](n node.Node[K, V]) task[K, V] { - return task[K, V]{ - n: n, - writeReason: expiredReason, - } -} - -// newUpdateTask creates a task to update the node in the policies. -func newUpdateTask[K comparable, V any](n, oldNode node.Node[K, V]) task[K, V] { - return task[K, V]{ - n: n, - old: oldNode, - writeReason: updateReason, - } -} - -// newClearTask creates a task to clear policies. -func newClearTask[K comparable, V any]() task[K, V] { - return task[K, V]{ - writeReason: clearReason, - } -} - -// newCloseTask creates a task to clear policies and stop all goroutines. -func newCloseTask[K comparable, V any]() task[K, V] { - return task[K, V]{ - writeReason: closeReason, - } -} - -// node returns the node contained in the task. If node was not specified, it returns nil. -func (t *task[K, V]) node() node.Node[K, V] { - return t.n -} - -// oldNode returns the old node contained in the task. If old node was not specified, it returns nil. -func (t *task[K, V]) oldNode() node.Node[K, V] { - return t.old -} - -// isAdd returns true if this is an add task. -func (t *task[K, V]) isAdd() bool { - return t.writeReason == addReason -} - -// isDelete returns true if this is a delete task. -func (t *task[K, V]) isDelete() bool { - return t.writeReason == deleteReason -} - -// isExpired returns true if this is an expired task. -func (t *task[K, V]) isExpired() bool { - return t.writeReason == expiredReason -} - -// isUpdate returns true if this is an update task. -func (t *task[K, V]) isUpdate() bool { - return t.writeReason == updateReason -} - -// isClear returns true if this is a clear task. -func (t *task[K, V]) isClear() bool { - return t.writeReason == clearReason -} - -// isClose returns true if this is a close task. -func (t *task[K, V]) isClose() bool { - return t.writeReason == closeReason -} diff --git a/vendor/github.com/maypok86/otter/internal/expiry/fixed.go b/vendor/github.com/maypok86/otter/internal/expiry/fixed.go deleted file mode 100644 index 35792aa0..00000000 --- a/vendor/github.com/maypok86/otter/internal/expiry/fixed.go +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright (c) 2024 Alexey Mayshev. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package expiry - -import "github.com/maypok86/otter/internal/generated/node" - -type Fixed[K comparable, V any] struct { - q *queue[K, V] - deleteNode func(node.Node[K, V]) -} - -func NewFixed[K comparable, V any](deleteNode func(node.Node[K, V])) *Fixed[K, V] { - return &Fixed[K, V]{ - q: newQueue[K, V](), - deleteNode: deleteNode, - } -} - -func (f *Fixed[K, V]) Add(n node.Node[K, V]) { - f.q.push(n) -} - -func (f *Fixed[K, V]) Delete(n node.Node[K, V]) { - f.q.delete(n) -} - -func (f *Fixed[K, V]) DeleteExpired() { - for !f.q.isEmpty() && f.q.head.HasExpired() { - f.deleteNode(f.q.pop()) - } -} - -func (f *Fixed[K, V]) Clear() { - f.q.clear() -} diff --git a/vendor/github.com/maypok86/otter/internal/expiry/queue.go b/vendor/github.com/maypok86/otter/internal/expiry/queue.go deleted file mode 100644 index ce4bdf13..00000000 --- a/vendor/github.com/maypok86/otter/internal/expiry/queue.go +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright (c) 2024 Alexey Mayshev. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package expiry - -import "github.com/maypok86/otter/internal/generated/node" - -type queue[K comparable, V any] struct { - head node.Node[K, V] - tail node.Node[K, V] - len int -} - -func newQueue[K comparable, V any]() *queue[K, V] { - return &queue[K, V]{} -} - -func (q *queue[K, V]) length() int { - return q.len -} - -func (q *queue[K, V]) isEmpty() bool { - return q.length() == 0 -} - -func (q *queue[K, V]) push(n node.Node[K, V]) { - if q.isEmpty() { - q.head = n - q.tail = n - } else { - n.SetPrevExp(q.tail) - q.tail.SetNextExp(n) - q.tail = n - } - - q.len++ -} - -func (q *queue[K, V]) pop() node.Node[K, V] { - if q.isEmpty() { - return nil - } - - result := q.head - q.delete(result) - return result -} - -func (q *queue[K, V]) delete(n node.Node[K, V]) { - next := n.NextExp() - prev := n.PrevExp() - - if node.Equals(prev, nil) { - if node.Equals(next, nil) && !node.Equals(q.head, n) { - return - } - - q.head = next - } else { - prev.SetNextExp(next) - n.SetPrevExp(nil) - } - - if node.Equals(next, nil) { - q.tail = prev - } else { - next.SetPrevExp(prev) - n.SetNextExp(nil) - } - - q.len-- -} - -func (q *queue[K, V]) clear() { - for !q.isEmpty() { - q.pop() - } -} diff --git a/vendor/github.com/maypok86/otter/internal/generated/node/b.go b/vendor/github.com/maypok86/otter/internal/generated/node/b.go deleted file mode 100644 index a10e484f..00000000 --- a/vendor/github.com/maypok86/otter/internal/generated/node/b.go +++ /dev/null @@ -1,144 +0,0 @@ -// Code generated by NodeGenerator. DO NOT EDIT. - -// Package node is a generated generator package. -package node - -import ( - "sync/atomic" - "unsafe" -) - -// B is a cache entry that provide the following features: -// -// 1. Base -type B[K comparable, V any] struct { - key K - value V - prev *B[K, V] - next *B[K, V] - state uint32 - frequency uint8 - queueType uint8 -} - -// NewB creates a new B. -func NewB[K comparable, V any](key K, value V, expiration, cost uint32) Node[K, V] { - return &B[K, V]{ - key: key, - value: value, - state: aliveState, - } -} - -// CastPointerToB casts a pointer to B. -func CastPointerToB[K comparable, V any](ptr unsafe.Pointer) Node[K, V] { - return (*B[K, V])(ptr) -} - -func (n *B[K, V]) Key() K { - return n.key -} - -func (n *B[K, V]) Value() V { - return n.value -} - -func (n *B[K, V]) AsPointer() unsafe.Pointer { - return unsafe.Pointer(n) -} - -func (n *B[K, V]) Prev() Node[K, V] { - return n.prev -} - -func (n *B[K, V]) SetPrev(v Node[K, V]) { - if v == nil { - n.prev = nil - return - } - n.prev = (*B[K, V])(v.AsPointer()) -} - -func (n *B[K, V]) Next() Node[K, V] { - return n.next -} - -func (n *B[K, V]) SetNext(v Node[K, V]) { - if v == nil { - n.next = nil - return - } - n.next = (*B[K, V])(v.AsPointer()) -} - -func (n *B[K, V]) PrevExp() Node[K, V] { - panic("not implemented") -} - -func (n *B[K, V]) SetPrevExp(v Node[K, V]) { - panic("not implemented") -} - -func (n *B[K, V]) NextExp() Node[K, V] { - panic("not implemented") -} - -func (n *B[K, V]) SetNextExp(v Node[K, V]) { - panic("not implemented") -} - -func (n *B[K, V]) HasExpired() bool { - return false -} - -func (n *B[K, V]) Expiration() uint32 { - panic("not implemented") -} - -func (n *B[K, V]) Cost() uint32 { - return 1 -} - -func (n *B[K, V]) IsAlive() bool { - return atomic.LoadUint32(&n.state) == aliveState -} - -func (n *B[K, V]) Die() { - atomic.StoreUint32(&n.state, deadState) -} - -func (n *B[K, V]) Frequency() uint8 { - return n.frequency -} - -func (n *B[K, V]) IncrementFrequency() { - n.frequency = minUint8(n.frequency+1, maxFrequency) -} - -func (n *B[K, V]) DecrementFrequency() { - n.frequency-- -} - -func (n *B[K, V]) ResetFrequency() { - n.frequency = 0 -} - -func (n *B[K, V]) MarkSmall() { - n.queueType = smallQueueType -} - -func (n *B[K, V]) IsSmall() bool { - return n.queueType == smallQueueType -} - -func (n *B[K, V]) MarkMain() { - n.queueType = mainQueueType -} - -func (n *B[K, V]) IsMain() bool { - return n.queueType == mainQueueType -} - -func (n *B[K, V]) Unmark() { - n.queueType = unknownQueueType -} diff --git a/vendor/github.com/maypok86/otter/internal/generated/node/bc.go b/vendor/github.com/maypok86/otter/internal/generated/node/bc.go deleted file mode 100644 index 962ecde0..00000000 --- a/vendor/github.com/maypok86/otter/internal/generated/node/bc.go +++ /dev/null @@ -1,148 +0,0 @@ -// Code generated by NodeGenerator. DO NOT EDIT. - -// Package node is a generated generator package. -package node - -import ( - "sync/atomic" - "unsafe" -) - -// BC is a cache entry that provide the following features: -// -// 1. Base -// -// 2. Cost -type BC[K comparable, V any] struct { - key K - value V - prev *BC[K, V] - next *BC[K, V] - cost uint32 - state uint32 - frequency uint8 - queueType uint8 -} - -// NewBC creates a new BC. -func NewBC[K comparable, V any](key K, value V, expiration, cost uint32) Node[K, V] { - return &BC[K, V]{ - key: key, - value: value, - cost: cost, - state: aliveState, - } -} - -// CastPointerToBC casts a pointer to BC. -func CastPointerToBC[K comparable, V any](ptr unsafe.Pointer) Node[K, V] { - return (*BC[K, V])(ptr) -} - -func (n *BC[K, V]) Key() K { - return n.key -} - -func (n *BC[K, V]) Value() V { - return n.value -} - -func (n *BC[K, V]) AsPointer() unsafe.Pointer { - return unsafe.Pointer(n) -} - -func (n *BC[K, V]) Prev() Node[K, V] { - return n.prev -} - -func (n *BC[K, V]) SetPrev(v Node[K, V]) { - if v == nil { - n.prev = nil - return - } - n.prev = (*BC[K, V])(v.AsPointer()) -} - -func (n *BC[K, V]) Next() Node[K, V] { - return n.next -} - -func (n *BC[K, V]) SetNext(v Node[K, V]) { - if v == nil { - n.next = nil - return - } - n.next = (*BC[K, V])(v.AsPointer()) -} - -func (n *BC[K, V]) PrevExp() Node[K, V] { - panic("not implemented") -} - -func (n *BC[K, V]) SetPrevExp(v Node[K, V]) { - panic("not implemented") -} - -func (n *BC[K, V]) NextExp() Node[K, V] { - panic("not implemented") -} - -func (n *BC[K, V]) SetNextExp(v Node[K, V]) { - panic("not implemented") -} - -func (n *BC[K, V]) HasExpired() bool { - return false -} - -func (n *BC[K, V]) Expiration() uint32 { - panic("not implemented") -} - -func (n *BC[K, V]) Cost() uint32 { - return n.cost -} - -func (n *BC[K, V]) IsAlive() bool { - return atomic.LoadUint32(&n.state) == aliveState -} - -func (n *BC[K, V]) Die() { - atomic.StoreUint32(&n.state, deadState) -} - -func (n *BC[K, V]) Frequency() uint8 { - return n.frequency -} - -func (n *BC[K, V]) IncrementFrequency() { - n.frequency = minUint8(n.frequency+1, maxFrequency) -} - -func (n *BC[K, V]) DecrementFrequency() { - n.frequency-- -} - -func (n *BC[K, V]) ResetFrequency() { - n.frequency = 0 -} - -func (n *BC[K, V]) MarkSmall() { - n.queueType = smallQueueType -} - -func (n *BC[K, V]) IsSmall() bool { - return n.queueType == smallQueueType -} - -func (n *BC[K, V]) MarkMain() { - n.queueType = mainQueueType -} - -func (n *BC[K, V]) IsMain() bool { - return n.queueType == mainQueueType -} - -func (n *BC[K, V]) Unmark() { - n.queueType = unknownQueueType -} diff --git a/vendor/github.com/maypok86/otter/internal/generated/node/be.go b/vendor/github.com/maypok86/otter/internal/generated/node/be.go deleted file mode 100644 index ef66eef9..00000000 --- a/vendor/github.com/maypok86/otter/internal/generated/node/be.go +++ /dev/null @@ -1,160 +0,0 @@ -// Code generated by NodeGenerator. DO NOT EDIT. - -// Package node is a generated generator package. -package node - -import ( - "sync/atomic" - "unsafe" - - "github.com/maypok86/otter/internal/unixtime" -) - -// BE is a cache entry that provide the following features: -// -// 1. Base -// -// 2. Expiration -type BE[K comparable, V any] struct { - key K - value V - prev *BE[K, V] - next *BE[K, V] - prevExp *BE[K, V] - nextExp *BE[K, V] - expiration uint32 - state uint32 - frequency uint8 - queueType uint8 -} - -// NewBE creates a new BE. -func NewBE[K comparable, V any](key K, value V, expiration, cost uint32) Node[K, V] { - return &BE[K, V]{ - key: key, - value: value, - expiration: expiration, - state: aliveState, - } -} - -// CastPointerToBE casts a pointer to BE. -func CastPointerToBE[K comparable, V any](ptr unsafe.Pointer) Node[K, V] { - return (*BE[K, V])(ptr) -} - -func (n *BE[K, V]) Key() K { - return n.key -} - -func (n *BE[K, V]) Value() V { - return n.value -} - -func (n *BE[K, V]) AsPointer() unsafe.Pointer { - return unsafe.Pointer(n) -} - -func (n *BE[K, V]) Prev() Node[K, V] { - return n.prev -} - -func (n *BE[K, V]) SetPrev(v Node[K, V]) { - if v == nil { - n.prev = nil - return - } - n.prev = (*BE[K, V])(v.AsPointer()) -} - -func (n *BE[K, V]) Next() Node[K, V] { - return n.next -} - -func (n *BE[K, V]) SetNext(v Node[K, V]) { - if v == nil { - n.next = nil - return - } - n.next = (*BE[K, V])(v.AsPointer()) -} - -func (n *BE[K, V]) PrevExp() Node[K, V] { - return n.prevExp -} - -func (n *BE[K, V]) SetPrevExp(v Node[K, V]) { - if v == nil { - n.prevExp = nil - return - } - n.prevExp = (*BE[K, V])(v.AsPointer()) -} - -func (n *BE[K, V]) NextExp() Node[K, V] { - return n.nextExp -} - -func (n *BE[K, V]) SetNextExp(v Node[K, V]) { - if v == nil { - n.nextExp = nil - return - } - n.nextExp = (*BE[K, V])(v.AsPointer()) -} - -func (n *BE[K, V]) HasExpired() bool { - return n.expiration <= unixtime.Now() -} - -func (n *BE[K, V]) Expiration() uint32 { - return n.expiration -} - -func (n *BE[K, V]) Cost() uint32 { - return 1 -} - -func (n *BE[K, V]) IsAlive() bool { - return atomic.LoadUint32(&n.state) == aliveState -} - -func (n *BE[K, V]) Die() { - atomic.StoreUint32(&n.state, deadState) -} - -func (n *BE[K, V]) Frequency() uint8 { - return n.frequency -} - -func (n *BE[K, V]) IncrementFrequency() { - n.frequency = minUint8(n.frequency+1, maxFrequency) -} - -func (n *BE[K, V]) DecrementFrequency() { - n.frequency-- -} - -func (n *BE[K, V]) ResetFrequency() { - n.frequency = 0 -} - -func (n *BE[K, V]) MarkSmall() { - n.queueType = smallQueueType -} - -func (n *BE[K, V]) IsSmall() bool { - return n.queueType == smallQueueType -} - -func (n *BE[K, V]) MarkMain() { - n.queueType = mainQueueType -} - -func (n *BE[K, V]) IsMain() bool { - return n.queueType == mainQueueType -} - -func (n *BE[K, V]) Unmark() { - n.queueType = unknownQueueType -} diff --git a/vendor/github.com/maypok86/otter/internal/generated/node/bec.go b/vendor/github.com/maypok86/otter/internal/generated/node/bec.go deleted file mode 100644 index f4813ca9..00000000 --- a/vendor/github.com/maypok86/otter/internal/generated/node/bec.go +++ /dev/null @@ -1,164 +0,0 @@ -// Code generated by NodeGenerator. DO NOT EDIT. - -// Package node is a generated generator package. -package node - -import ( - "sync/atomic" - "unsafe" - - "github.com/maypok86/otter/internal/unixtime" -) - -// BEC is a cache entry that provide the following features: -// -// 1. Base -// -// 2. Expiration -// -// 3. Cost -type BEC[K comparable, V any] struct { - key K - value V - prev *BEC[K, V] - next *BEC[K, V] - prevExp *BEC[K, V] - nextExp *BEC[K, V] - expiration uint32 - cost uint32 - state uint32 - frequency uint8 - queueType uint8 -} - -// NewBEC creates a new BEC. -func NewBEC[K comparable, V any](key K, value V, expiration, cost uint32) Node[K, V] { - return &BEC[K, V]{ - key: key, - value: value, - expiration: expiration, - cost: cost, - state: aliveState, - } -} - -// CastPointerToBEC casts a pointer to BEC. -func CastPointerToBEC[K comparable, V any](ptr unsafe.Pointer) Node[K, V] { - return (*BEC[K, V])(ptr) -} - -func (n *BEC[K, V]) Key() K { - return n.key -} - -func (n *BEC[K, V]) Value() V { - return n.value -} - -func (n *BEC[K, V]) AsPointer() unsafe.Pointer { - return unsafe.Pointer(n) -} - -func (n *BEC[K, V]) Prev() Node[K, V] { - return n.prev -} - -func (n *BEC[K, V]) SetPrev(v Node[K, V]) { - if v == nil { - n.prev = nil - return - } - n.prev = (*BEC[K, V])(v.AsPointer()) -} - -func (n *BEC[K, V]) Next() Node[K, V] { - return n.next -} - -func (n *BEC[K, V]) SetNext(v Node[K, V]) { - if v == nil { - n.next = nil - return - } - n.next = (*BEC[K, V])(v.AsPointer()) -} - -func (n *BEC[K, V]) PrevExp() Node[K, V] { - return n.prevExp -} - -func (n *BEC[K, V]) SetPrevExp(v Node[K, V]) { - if v == nil { - n.prevExp = nil - return - } - n.prevExp = (*BEC[K, V])(v.AsPointer()) -} - -func (n *BEC[K, V]) NextExp() Node[K, V] { - return n.nextExp -} - -func (n *BEC[K, V]) SetNextExp(v Node[K, V]) { - if v == nil { - n.nextExp = nil - return - } - n.nextExp = (*BEC[K, V])(v.AsPointer()) -} - -func (n *BEC[K, V]) HasExpired() bool { - return n.expiration <= unixtime.Now() -} - -func (n *BEC[K, V]) Expiration() uint32 { - return n.expiration -} - -func (n *BEC[K, V]) Cost() uint32 { - return n.cost -} - -func (n *BEC[K, V]) IsAlive() bool { - return atomic.LoadUint32(&n.state) == aliveState -} - -func (n *BEC[K, V]) Die() { - atomic.StoreUint32(&n.state, deadState) -} - -func (n *BEC[K, V]) Frequency() uint8 { - return n.frequency -} - -func (n *BEC[K, V]) IncrementFrequency() { - n.frequency = minUint8(n.frequency+1, maxFrequency) -} - -func (n *BEC[K, V]) DecrementFrequency() { - n.frequency-- -} - -func (n *BEC[K, V]) ResetFrequency() { - n.frequency = 0 -} - -func (n *BEC[K, V]) MarkSmall() { - n.queueType = smallQueueType -} - -func (n *BEC[K, V]) IsSmall() bool { - return n.queueType == smallQueueType -} - -func (n *BEC[K, V]) MarkMain() { - n.queueType = mainQueueType -} - -func (n *BEC[K, V]) IsMain() bool { - return n.queueType == mainQueueType -} - -func (n *BEC[K, V]) Unmark() { - n.queueType = unknownQueueType -} diff --git a/vendor/github.com/maypok86/otter/internal/generated/node/manager.go b/vendor/github.com/maypok86/otter/internal/generated/node/manager.go deleted file mode 100644 index e48b9008..00000000 --- a/vendor/github.com/maypok86/otter/internal/generated/node/manager.go +++ /dev/null @@ -1,143 +0,0 @@ -// Code generated by NodeGenerator. DO NOT EDIT. - -// Package node is a generated generator package. -package node - -import ( - "strings" - "unsafe" -) - -const ( - unknownQueueType uint8 = iota - smallQueueType - mainQueueType - - maxFrequency uint8 = 3 -) - -const ( - aliveState uint32 = iota - deadState -) - -// Node is a cache entry. -type Node[K comparable, V any] interface { - // Key returns the key. - Key() K - // Value returns the value. - Value() V - // AsPointer returns the node as a pointer. - AsPointer() unsafe.Pointer - // Prev returns the previous node in the eviction policy. - Prev() Node[K, V] - // SetPrev sets the previous node in the eviction policy. - SetPrev(v Node[K, V]) - // Next returns the next node in the eviction policy. - Next() Node[K, V] - // SetNext sets the next node in the eviction policy. - SetNext(v Node[K, V]) - // PrevExp returns the previous node in the expiration policy. - PrevExp() Node[K, V] - // SetPrevExp sets the previous node in the expiration policy. - SetPrevExp(v Node[K, V]) - // NextExp returns the next node in the expiration policy. - NextExp() Node[K, V] - // SetNextExp sets the next node in the expiration policy. - SetNextExp(v Node[K, V]) - // HasExpired returns true if node has expired. - HasExpired() bool - // Expiration returns the expiration time. - Expiration() uint32 - // Cost returns the cost of the node. - Cost() uint32 - // IsAlive returns true if the entry is available in the hash-table. - IsAlive() bool - // Die sets the node to the dead state. - Die() - // Frequency returns the frequency of the node. - Frequency() uint8 - // IncrementFrequency increments the frequency of the node. - IncrementFrequency() - // DecrementFrequency decrements the frequency of the node. - DecrementFrequency() - // ResetFrequency resets the frequency. - ResetFrequency() - // MarkSmall sets the status to the small queue. - MarkSmall() - // IsSmall returns true if node is in the small queue. - IsSmall() bool - // MarkMain sets the status to the main queue. - MarkMain() - // IsMain returns true if node is in the main queue. - IsMain() bool - // Unmark sets the status to unknown. - Unmark() -} - -func Equals[K comparable, V any](a, b Node[K, V]) bool { - if a == nil { - return b == nil || b.AsPointer() == nil - } - if b == nil { - return a.AsPointer() == nil - } - return a.AsPointer() == b.AsPointer() -} - -type Config struct { - WithExpiration bool - WithCost bool -} - -type Manager[K comparable, V any] struct { - create func(key K, value V, expiration, cost uint32) Node[K, V] - fromPointer func(ptr unsafe.Pointer) Node[K, V] -} - -func NewManager[K comparable, V any](c Config) *Manager[K, V] { - var sb strings.Builder - sb.WriteString("b") - if c.WithExpiration { - sb.WriteString("e") - } - if c.WithCost { - sb.WriteString("c") - } - nodeType := sb.String() - m := &Manager[K, V]{} - - switch nodeType { - case "bec": - m.create = NewBEC[K, V] - m.fromPointer = CastPointerToBEC[K, V] - case "bc": - m.create = NewBC[K, V] - m.fromPointer = CastPointerToBC[K, V] - case "be": - m.create = NewBE[K, V] - m.fromPointer = CastPointerToBE[K, V] - case "b": - m.create = NewB[K, V] - m.fromPointer = CastPointerToB[K, V] - default: - panic("not valid nodeType") - } - return m -} - -func (m *Manager[K, V]) Create(key K, value V, expiration, cost uint32) Node[K, V] { - return m.create(key, value, expiration, cost) -} - -func (m *Manager[K, V]) FromPointer(ptr unsafe.Pointer) Node[K, V] { - return m.fromPointer(ptr) -} - -func minUint8(a, b uint8) uint8 { - if a < b { - return a - } - - return b -} diff --git a/vendor/github.com/maypok86/otter/internal/hashtable/bucket.go b/vendor/github.com/maypok86/otter/internal/hashtable/bucket.go deleted file mode 100644 index 2bec4656..00000000 --- a/vendor/github.com/maypok86/otter/internal/hashtable/bucket.go +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright (c) 2023 Alexey Mayshev. All rights reserved. -// Copyright (c) 2021 Andrey Pechkurov -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Copyright notice. This code is a fork of xsync.MapOf from this file with some changes: -// https://github.com/puzpuzpuz/xsync/blob/main/mapof.go -// -// Use of this source code is governed by a MIT license that can be found -// at https://github.com/puzpuzpuz/xsync/blob/main/LICENSE - -package hashtable - -import ( - "sync" - "unsafe" - - "github.com/maypok86/otter/internal/xruntime" -) - -// paddedBucket is a CL-sized map bucket holding up to -// bucketSize nodes. -type paddedBucket struct { - // ensure each bucket takes two cache lines on both 32 and 64-bit archs - padding [xruntime.CacheLineSize - unsafe.Sizeof(bucket{})]byte - - bucket -} - -type bucket struct { - hashes [bucketSize]uint64 - nodes [bucketSize]unsafe.Pointer - next unsafe.Pointer - mutex sync.Mutex -} - -func (root *paddedBucket) isEmpty() bool { - b := root - for { - for i := 0; i < bucketSize; i++ { - if b.nodes[i] != nil { - return false - } - } - if b.next == nil { - return true - } - b = (*paddedBucket)(b.next) - } -} - -func (root *paddedBucket) add(h uint64, nodePtr unsafe.Pointer) { - b := root - for { - for i := 0; i < bucketSize; i++ { - if b.nodes[i] == nil { - b.hashes[i] = h - b.nodes[i] = nodePtr - return - } - } - if b.next == nil { - newBucket := &paddedBucket{} - newBucket.hashes[0] = h - newBucket.nodes[0] = nodePtr - b.next = unsafe.Pointer(newBucket) - return - } - b = (*paddedBucket)(b.next) - } -} diff --git a/vendor/github.com/maypok86/otter/internal/hashtable/map.go b/vendor/github.com/maypok86/otter/internal/hashtable/map.go deleted file mode 100644 index 01d261cd..00000000 --- a/vendor/github.com/maypok86/otter/internal/hashtable/map.go +++ /dev/null @@ -1,551 +0,0 @@ -// Copyright (c) 2023 Alexey Mayshev. All rights reserved. -// Copyright (c) 2021 Andrey Pechkurov -// -// Copyright notice. This code is a fork of xsync.MapOf from this file with some changes: -// https://github.com/puzpuzpuz/xsync/blob/main/mapof.go -// -// Use of this source code is governed by a MIT license that can be found -// at https://github.com/puzpuzpuz/xsync/blob/main/LICENSE - -package hashtable - -import ( - "fmt" - "sync" - "sync/atomic" - "unsafe" - - "github.com/dolthub/maphash" - - "github.com/maypok86/otter/internal/generated/node" - "github.com/maypok86/otter/internal/xmath" - "github.com/maypok86/otter/internal/xruntime" -) - -type resizeHint int - -const ( - growHint resizeHint = 0 - shrinkHint resizeHint = 1 - clearHint resizeHint = 2 -) - -const ( - // number of entries per bucket - // 3 because we need to fit them into 1 cache line (64 bytes). - bucketSize = 3 - // percentage at which the map will be expanded. - loadFactor = 0.75 - // threshold fraction of table occupation to start a table shrinking - // when deleting the last entry in a bucket chain. - shrinkFraction = 128 - minBucketCount = 32 - minNodeCount = bucketSize * minBucketCount - minCounterLength = 8 - maxCounterLength = 32 -) - -// Map is like a Go map[K]V but is safe for concurrent -// use by multiple goroutines without additional locking or -// coordination. -// -// A Map must not be copied after first use. -// -// Map uses a modified version of Cache-Line Hash Table (CLHT) -// data structure: https://github.com/LPD-EPFL/CLHT -// -// CLHT is built around idea to organize the hash table in -// cache-line-sized buckets, so that on all modern CPUs update -// operations complete with at most one cache-line transfer. -// Also, Get operations involve no write to memory, as well as no -// mutexes or any other sort of locks. Due to this design, in all -// considered scenarios Map outperforms sync.Map. -type Map[K comparable, V any] struct { - table unsafe.Pointer - - nodeManager *node.Manager[K, V] - // only used along with resizeCond - resizeMutex sync.Mutex - // used to wake up resize waiters (concurrent modifications) - resizeCond sync.Cond - // resize in progress flag; updated atomically - resizing atomic.Int64 -} - -type table[K comparable] struct { - buckets []paddedBucket - // sharded counter for number of table entries; - // used to determine if a table shrinking is needed - // occupies min(buckets_memory/1024, 64KB) of memory - size []paddedCounter - mask uint64 - hasher maphash.Hasher[K] -} - -func (t *table[K]) addSize(bucketIdx uint64, delta int) { - //nolint:gosec // there will never be an overflow - counterIdx := uint64(len(t.size)-1) & bucketIdx - atomic.AddInt64(&t.size[counterIdx].c, int64(delta)) -} - -func (t *table[K]) addSizePlain(bucketIdx uint64, delta int) { - //nolint:gosec // there will never be an overflow - counterIdx := uint64(len(t.size)-1) & bucketIdx - t.size[counterIdx].c += int64(delta) -} - -func (t *table[K]) sumSize() int64 { - sum := int64(0) - for i := range t.size { - sum += atomic.LoadInt64(&t.size[i].c) - } - return sum -} - -func (t *table[K]) calcShiftHash(key K) uint64 { - // uint64(0) is a reserved value which stands for an empty slot. - h := t.hasher.Hash(key) - if h == uint64(0) { - return 1 - } - - return h -} - -type counter struct { - c int64 -} - -type paddedCounter struct { - // padding prevents false sharing. - padding [xruntime.CacheLineSize - unsafe.Sizeof(counter{})]byte - - counter -} - -// NewWithSize creates a new Map instance with capacity enough -// to hold size nodes. If size is zero or negative, the value -// is ignored. -func NewWithSize[K comparable, V any](nodeManager *node.Manager[K, V], size int) *Map[K, V] { - return newMap[K, V](nodeManager, size) -} - -// New creates a new Map instance. -func New[K comparable, V any](nodeManager *node.Manager[K, V]) *Map[K, V] { - return newMap[K, V](nodeManager, minNodeCount) -} - -func newMap[K comparable, V any](nodeManager *node.Manager[K, V], size int) *Map[K, V] { - m := &Map[K, V]{ - nodeManager: nodeManager, - } - m.resizeCond = *sync.NewCond(&m.resizeMutex) - var t *table[K] - if size <= minNodeCount { - t = newTable(minBucketCount, maphash.NewHasher[K]()) - } else { - //nolint:gosec // there will never be an overflow - bucketCount := xmath.RoundUpPowerOf2(uint32(size / bucketSize)) - t = newTable(int(bucketCount), maphash.NewHasher[K]()) - } - atomic.StorePointer(&m.table, unsafe.Pointer(t)) - return m -} - -func newTable[K comparable](bucketCount int, prevHasher maphash.Hasher[K]) *table[K] { - buckets := make([]paddedBucket, bucketCount) - counterLength := bucketCount >> 10 - if counterLength < minCounterLength { - counterLength = minCounterLength - } else if counterLength > maxCounterLength { - counterLength = maxCounterLength - } - counter := make([]paddedCounter, counterLength) - //nolint:gosec // there will never be an overflow - mask := uint64(len(buckets) - 1) - t := &table[K]{ - buckets: buckets, - size: counter, - mask: mask, - hasher: maphash.NewSeed[K](prevHasher), - } - return t -} - -// Get returns the node.Node stored in the map for a key, or nil if no node is present. -// -// The ok result indicates whether node was found in the map. -func (m *Map[K, V]) Get(key K) (got node.Node[K, V], ok bool) { - t := (*table[K])(atomic.LoadPointer(&m.table)) - hash := t.calcShiftHash(key) - bucketIdx := hash & t.mask - b := &t.buckets[bucketIdx] - for { - for i := 0; i < bucketSize; i++ { - // we treat the hash code only as a hint, so there is no - // need to get an atomic snapshot. - h := atomic.LoadUint64(&b.hashes[i]) - if h == uint64(0) || h != hash { - continue - } - // we found a matching hash code - nodePtr := atomic.LoadPointer(&b.nodes[i]) - if nodePtr == nil { - // concurrent write in this node - continue - } - n := m.nodeManager.FromPointer(nodePtr) - if key != n.Key() { - continue - } - - return n, true - } - bucketPtr := atomic.LoadPointer(&b.next) - if bucketPtr == nil { - return nil, false - } - b = (*paddedBucket)(bucketPtr) - } -} - -// Set sets the node.Node for the key. -// -// Returns the evicted node or nil if the node was inserted. -func (m *Map[K, V]) Set(n node.Node[K, V]) node.Node[K, V] { - return m.set(n, false) -} - -// SetIfAbsent sets the node.Node if the specified key is not already associated with a value (or is mapped to null) -// associates it with the given value and returns null, else returns the current node. -func (m *Map[K, V]) SetIfAbsent(n node.Node[K, V]) node.Node[K, V] { - return m.set(n, true) -} - -func (m *Map[K, V]) set(n node.Node[K, V], onlyIfAbsent bool) node.Node[K, V] { - for { - RETRY: - var ( - emptyBucket *paddedBucket - emptyIdx int - ) - t := (*table[K])(atomic.LoadPointer(&m.table)) - tableLen := len(t.buckets) - hash := t.calcShiftHash(n.Key()) - bucketIdx := hash & t.mask - rootBucket := &t.buckets[bucketIdx] - rootBucket.mutex.Lock() - // the following two checks must go in reverse to what's - // in the resize method. - if m.resizeInProgress() { - // resize is in progress. wait, then go for another attempt. - rootBucket.mutex.Unlock() - m.waitForResize() - goto RETRY - } - if m.newerTableExists(t) { - // someone resized the table, go for another attempt. - rootBucket.mutex.Unlock() - goto RETRY - } - b := rootBucket - for { - for i := 0; i < bucketSize; i++ { - h := b.hashes[i] - if h == uint64(0) { - if emptyBucket == nil { - emptyBucket = b - emptyIdx = i - } - continue - } - if h != hash { - continue - } - prev := m.nodeManager.FromPointer(b.nodes[i]) - if n.Key() != prev.Key() { - continue - } - if onlyIfAbsent { - // found node, drop set - rootBucket.mutex.Unlock() - return n - } - // in-place update. - // We get a copy of the value via an interface{} on each call, - // thus the live value pointers are unique. Otherwise atomic - // snapshot won't be correct in case of multiple Store calls - // using the same value. - atomic.StorePointer(&b.nodes[i], n.AsPointer()) - rootBucket.mutex.Unlock() - return prev - } - if b.next == nil { - if emptyBucket != nil { - // insertion into an existing bucket. - // first we update the hash, then the entry. - atomic.StoreUint64(&emptyBucket.hashes[emptyIdx], hash) - atomic.StorePointer(&emptyBucket.nodes[emptyIdx], n.AsPointer()) - rootBucket.mutex.Unlock() - t.addSize(bucketIdx, 1) - return nil - } - growThreshold := float64(tableLen) * bucketSize * loadFactor - if t.sumSize() > int64(growThreshold) { - // need to grow the table then go for another attempt. - rootBucket.mutex.Unlock() - m.resize(t, growHint) - goto RETRY - } - // insertion into a new bucket. - // create and append the bucket. - newBucket := &paddedBucket{} - newBucket.hashes[0] = hash - newBucket.nodes[0] = n.AsPointer() - atomic.StorePointer(&b.next, unsafe.Pointer(newBucket)) - rootBucket.mutex.Unlock() - t.addSize(bucketIdx, 1) - return nil - } - b = (*paddedBucket)(b.next) - } - } -} - -// Delete deletes the value for a key. -// -// Returns the deleted node or nil if the node wasn't deleted. -func (m *Map[K, V]) Delete(key K) node.Node[K, V] { - return m.delete(key, func(n node.Node[K, V]) bool { - return key == n.Key() - }) -} - -// DeleteNode evicts the node for a key. -// -// Returns the evicted node or nil if the node wasn't evicted. -func (m *Map[K, V]) DeleteNode(n node.Node[K, V]) node.Node[K, V] { - return m.delete(n.Key(), func(current node.Node[K, V]) bool { - return node.Equals(n, current) - }) -} - -func (m *Map[K, V]) delete(key K, cmp func(node.Node[K, V]) bool) node.Node[K, V] { - for { - RETRY: - hintNonEmpty := 0 - t := (*table[K])(atomic.LoadPointer(&m.table)) - hash := t.calcShiftHash(key) - bucketIdx := hash & t.mask - rootBucket := &t.buckets[bucketIdx] - rootBucket.mutex.Lock() - // the following two checks must go in reverse to what's - // in the resize method. - if m.resizeInProgress() { - // resize is in progress. Wait, then go for another attempt. - rootBucket.mutex.Unlock() - m.waitForResize() - goto RETRY - } - if m.newerTableExists(t) { - // someone resized the table. Go for another attempt. - rootBucket.mutex.Unlock() - goto RETRY - } - b := rootBucket - for { - for i := 0; i < bucketSize; i++ { - h := b.hashes[i] - if h == uint64(0) { - continue - } - if h != hash { - hintNonEmpty++ - continue - } - current := m.nodeManager.FromPointer(b.nodes[i]) - if !cmp(current) { - hintNonEmpty++ - continue - } - // Deletion. - // First we update the hash, then the node. - atomic.StoreUint64(&b.hashes[i], uint64(0)) - atomic.StorePointer(&b.nodes[i], nil) - leftEmpty := false - if hintNonEmpty == 0 { - leftEmpty = b.isEmpty() - } - rootBucket.mutex.Unlock() - t.addSize(bucketIdx, -1) - // Might need to shrink the table. - if leftEmpty { - m.resize(t, shrinkHint) - } - return current - } - if b.next == nil { - // not found - rootBucket.mutex.Unlock() - return nil - } - b = (*paddedBucket)(b.next) - } - } -} - -func (m *Map[K, V]) resize(known *table[K], hint resizeHint) { - knownTableLen := len(known.buckets) - // fast path for shrink attempts. - if hint == shrinkHint { - shrinkThreshold := int64((knownTableLen * bucketSize) / shrinkFraction) - if knownTableLen == minBucketCount || known.sumSize() > shrinkThreshold { - return - } - } - // slow path. - if !m.resizing.CompareAndSwap(0, 1) { - // someone else started resize. Wait for it to finish. - m.waitForResize() - return - } - var nt *table[K] - t := (*table[K])(atomic.LoadPointer(&m.table)) - tableLen := len(t.buckets) - switch hint { - case growHint: - // grow the table with factor of 2. - nt = newTable(tableLen<<1, t.hasher) - case shrinkHint: - shrinkThreshold := int64((tableLen * bucketSize) / shrinkFraction) - if tableLen > minBucketCount && t.sumSize() <= shrinkThreshold { - // shrink the table with factor of 2. - nt = newTable(tableLen>>1, t.hasher) - } else { - // no need to shrink, wake up all waiters and give up. - m.resizeMutex.Lock() - m.resizing.Store(0) - m.resizeCond.Broadcast() - m.resizeMutex.Unlock() - return - } - case clearHint: - nt = newTable(minBucketCount, t.hasher) - default: - panic(fmt.Sprintf("unexpected resize hint: %d", hint)) - } - // copy the data only if we're not clearing the hashtable. - if hint != clearHint { - for i := 0; i < tableLen; i++ { - copied := m.copyBuckets(&t.buckets[i], nt) - //nolint:gosec // there will never be an overflow - nt.addSizePlain(uint64(i), copied) - } - } - // publish the new table and wake up all waiters. - atomic.StorePointer(&m.table, unsafe.Pointer(nt)) - m.resizeMutex.Lock() - m.resizing.Store(0) - m.resizeCond.Broadcast() - m.resizeMutex.Unlock() -} - -func (m *Map[K, V]) copyBuckets(b *paddedBucket, dest *table[K]) (copied int) { - rootBucket := b - rootBucket.mutex.Lock() - for { - for i := 0; i < bucketSize; i++ { - if b.nodes[i] == nil { - continue - } - n := m.nodeManager.FromPointer(b.nodes[i]) - hash := dest.calcShiftHash(n.Key()) - bucketIdx := hash & dest.mask - dest.buckets[bucketIdx].add(hash, b.nodes[i]) - copied++ - } - if b.next == nil { - rootBucket.mutex.Unlock() - return copied - } - b = (*paddedBucket)(b.next) - } -} - -func (m *Map[K, V]) newerTableExists(table *table[K]) bool { - currentTable := atomic.LoadPointer(&m.table) - return uintptr(currentTable) != uintptr(unsafe.Pointer(table)) -} - -func (m *Map[K, V]) resizeInProgress() bool { - return m.resizing.Load() == 1 -} - -func (m *Map[K, V]) waitForResize() { - m.resizeMutex.Lock() - for m.resizeInProgress() { - m.resizeCond.Wait() - } - m.resizeMutex.Unlock() -} - -// Range calls f sequentially for each node present in the -// map. If f returns false, range stops the iteration. -// -// Range does not necessarily correspond to any consistent snapshot -// of the Map's contents: no key will be visited more than once, but -// if the value for any key is stored or deleted concurrently, Range -// may reflect any mapping for that key from any point during the -// Range call. -// -// It is safe to modify the map while iterating it. However, the -// concurrent modification rule apply, i.e. the changes may be not -// reflected in the subsequently iterated nodes. -func (m *Map[K, V]) Range(f func(node.Node[K, V]) bool) { - var zeroPtr unsafe.Pointer - // Pre-allocate array big enough to fit nodes for most hash tables. - buffer := make([]unsafe.Pointer, 0, 16*bucketSize) - tp := atomic.LoadPointer(&m.table) - t := *(*table[K])(tp) - for i := range t.buckets { - rootBucket := &t.buckets[i] - b := rootBucket - // Prevent concurrent modifications and copy all nodes into - // the intermediate slice. - rootBucket.mutex.Lock() - for { - for i := 0; i < bucketSize; i++ { - if b.nodes[i] != nil { - buffer = append(buffer, b.nodes[i]) - } - } - if b.next == nil { - rootBucket.mutex.Unlock() - break - } - b = (*paddedBucket)(b.next) - } - // Call the function for all copied nodes. - for j := range buffer { - n := m.nodeManager.FromPointer(buffer[j]) - if !f(n) { - return - } - // Remove the reference to allow the copied nodes to be GCed before this method finishes. - buffer[j] = zeroPtr - } - buffer = buffer[:0] - } -} - -// Clear deletes all keys and values currently stored in the map. -func (m *Map[K, V]) Clear() { - table := (*table[K])(atomic.LoadPointer(&m.table)) - m.resize(table, clearHint) -} - -// Size returns current size of the map. -func (m *Map[K, V]) Size() int { - table := (*table[K])(atomic.LoadPointer(&m.table)) - return int(table.sumSize()) -} diff --git a/vendor/github.com/maypok86/otter/internal/lossy/buffer.go b/vendor/github.com/maypok86/otter/internal/lossy/buffer.go deleted file mode 100644 index a0a1d558..00000000 --- a/vendor/github.com/maypok86/otter/internal/lossy/buffer.go +++ /dev/null @@ -1,143 +0,0 @@ -// Copyright (c) 2023 Alexey Mayshev. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package lossy - -import ( - "runtime" - "sync/atomic" - "unsafe" - - "github.com/maypok86/otter/internal/generated/node" - "github.com/maypok86/otter/internal/xruntime" -) - -const ( - // The maximum number of elements per buffer. - capacity = 16 - mask = uint64(capacity - 1) -) - -// PolicyBuffers is the set of buffers returned by the lossy buffer. -type PolicyBuffers[K comparable, V any] struct { - Returned []node.Node[K, V] -} - -// Buffer is a circular ring buffer stores the elements being transferred by the producers to the consumer. -// The monotonically increasing count of reads and writes allow indexing sequentially to the next -// element location based upon a power-of-two sizing. -// -// The producers race to read the counts, check if there is available capacity, and if so then try -// once to CAS to the next write count. If the increment is successful then the producer lazily -// publishes the element. The producer does not retry or block when unsuccessful due to a failed -// CAS or the buffer being full. -// -// The consumer reads the counts and takes the available elements. The clearing of the elements -// and the next read count are lazily set. -// -// This implementation is striped to further increase concurrency. -type Buffer[K comparable, V any] struct { - head atomic.Uint64 - headPadding [xruntime.CacheLineSize - unsafe.Sizeof(atomic.Uint64{})]byte - tail atomic.Uint64 - tailPadding [xruntime.CacheLineSize - unsafe.Sizeof(atomic.Uint64{})]byte - nodeManager *node.Manager[K, V] - returned unsafe.Pointer - returnedPadding [xruntime.CacheLineSize - 2*8]byte - policyBuffers unsafe.Pointer - returnedSlicePadding [xruntime.CacheLineSize - 8]byte - buffer [capacity]unsafe.Pointer -} - -// New creates a new lossy Buffer. -func New[K comparable, V any](nodeManager *node.Manager[K, V]) *Buffer[K, V] { - pb := &PolicyBuffers[K, V]{ - Returned: make([]node.Node[K, V], 0, capacity), - } - b := &Buffer[K, V]{ - nodeManager: nodeManager, - policyBuffers: unsafe.Pointer(pb), - } - b.returned = b.policyBuffers - return b -} - -// Add lazily publishes the item to the consumer. -// -// item may be lost due to contention. -func (b *Buffer[K, V]) Add(n node.Node[K, V]) *PolicyBuffers[K, V] { - head := b.head.Load() - tail := b.tail.Load() - size := tail - head - if size >= capacity { - // full buffer - return nil - } - if b.tail.CompareAndSwap(tail, tail+1) { - // success - //nolint:gosec // there will never be an overflow - index := int(tail & mask) - atomic.StorePointer(&b.buffer[index], n.AsPointer()) - if size == capacity-1 { - // try return new buffer - if !atomic.CompareAndSwapPointer(&b.returned, b.policyBuffers, nil) { - // somebody already get buffer - return nil - } - - pb := (*PolicyBuffers[K, V])(b.policyBuffers) - for i := 0; i < capacity; i++ { - //nolint:gosec // there will never be an overflow - index := int(head & mask) - v := atomic.LoadPointer(&b.buffer[index]) - if v != nil { - // published - pb.Returned = append(pb.Returned, b.nodeManager.FromPointer(v)) - // release - atomic.StorePointer(&b.buffer[index], nil) - } - head++ - } - - b.head.Store(head) - return pb - } - } - - // failed - return nil -} - -// Free returns the processed buffer back and also clears it. -func (b *Buffer[K, V]) Free() { - pb := (*PolicyBuffers[K, V])(b.policyBuffers) - for i := 0; i < len(pb.Returned); i++ { - pb.Returned[i] = nil - } - pb.Returned = pb.Returned[:0] - atomic.StorePointer(&b.returned, b.policyBuffers) -} - -// Clear clears the lossy Buffer and returns it to the default state. -func (b *Buffer[K, V]) Clear() { - for !atomic.CompareAndSwapPointer(&b.returned, b.policyBuffers, nil) { - runtime.Gosched() - } - for i := 0; i < capacity; i++ { - atomic.StorePointer(&b.buffer[i], nil) - } - b.Free() - b.tail.Store(0) - b.head.Store(0) -} diff --git a/vendor/github.com/maypok86/otter/internal/queue/growable.go b/vendor/github.com/maypok86/otter/internal/queue/growable.go deleted file mode 100644 index 80e80656..00000000 --- a/vendor/github.com/maypok86/otter/internal/queue/growable.go +++ /dev/null @@ -1,135 +0,0 @@ -// Copyright (c) 2024 Alexey Mayshev. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package queue - -import ( - "sync" - - "github.com/maypok86/otter/internal/xmath" -) - -type Growable[T any] struct { - mutex sync.Mutex - notEmpty sync.Cond - notFull sync.Cond - buf []T - head int - tail int - count int - minCap int - maxCap int -} - -func NewGrowable[T any](minCap, maxCap uint32) *Growable[T] { - minCap = xmath.RoundUpPowerOf2(minCap) - maxCap = xmath.RoundUpPowerOf2(maxCap) - - g := &Growable[T]{ - buf: make([]T, minCap), - minCap: int(minCap), - maxCap: int(maxCap), - } - - g.notEmpty = *sync.NewCond(&g.mutex) - g.notFull = *sync.NewCond(&g.mutex) - - return g -} - -func (g *Growable[T]) Push(item T) { - g.mutex.Lock() - for g.count == g.maxCap { - g.notFull.Wait() - } - g.push(item) - g.mutex.Unlock() -} - -func (g *Growable[T]) push(item T) { - g.grow() - g.buf[g.tail] = item - g.tail = g.next(g.tail) - g.count++ - g.notEmpty.Signal() -} - -func (g *Growable[T]) Pop() T { - g.mutex.Lock() - for g.count == 0 { - g.notEmpty.Wait() - } - item := g.pop() - g.mutex.Unlock() - return item -} - -func (g *Growable[T]) TryPop() (T, bool) { - var zero T - g.mutex.Lock() - if g.count == 0 { - g.mutex.Unlock() - return zero, false - } - item := g.pop() - g.mutex.Unlock() - return item, true -} - -func (g *Growable[T]) pop() T { - var zero T - - item := g.buf[g.head] - g.buf[g.head] = zero - - g.head = g.next(g.head) - g.count-- - - g.notFull.Signal() - - return item -} - -func (g *Growable[T]) Clear() { - g.mutex.Lock() - for g.count > 0 { - g.pop() - } - g.mutex.Unlock() -} - -func (g *Growable[T]) grow() { - if g.count != len(g.buf) { - return - } - g.resize() -} - -func (g *Growable[T]) resize() { - newBuf := make([]T, g.count<<1) - if g.tail > g.head { - copy(newBuf, g.buf[g.head:g.tail]) - } else { - n := copy(newBuf, g.buf[g.head:]) - copy(newBuf[n:], g.buf[:g.tail]) - } - - g.head = 0 - g.tail = g.count - g.buf = newBuf -} - -func (g *Growable[T]) next(i int) int { - return (i + 1) & (len(g.buf) - 1) -} diff --git a/vendor/github.com/maypok86/otter/internal/s3fifo/ghost.go b/vendor/github.com/maypok86/otter/internal/s3fifo/ghost.go deleted file mode 100644 index 0b263a08..00000000 --- a/vendor/github.com/maypok86/otter/internal/s3fifo/ghost.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright (c) 2023 Alexey Mayshev. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package s3fifo - -import ( - "github.com/dolthub/maphash" - "github.com/gammazero/deque" - - "github.com/maypok86/otter/internal/generated/node" -) - -type ghost[K comparable, V any] struct { - q *deque.Deque[uint64] - m map[uint64]struct{} - main *main[K, V] - small *small[K, V] - hasher maphash.Hasher[K] - evictNode func(node.Node[K, V]) -} - -func newGhost[K comparable, V any](main *main[K, V], evictNode func(node.Node[K, V])) *ghost[K, V] { - return &ghost[K, V]{ - q: &deque.Deque[uint64]{}, - m: make(map[uint64]struct{}), - main: main, - hasher: maphash.NewHasher[K](), - evictNode: evictNode, - } -} - -func (g *ghost[K, V]) isGhost(n node.Node[K, V]) bool { - h := g.hasher.Hash(n.Key()) - _, ok := g.m[h] - return ok -} - -func (g *ghost[K, V]) insert(n node.Node[K, V]) { - g.evictNode(n) - - h := g.hasher.Hash(n.Key()) - - if _, ok := g.m[h]; ok { - return - } - - maxLength := g.small.length() + g.main.length() - if maxLength == 0 { - return - } - - for g.q.Len() >= maxLength { - v := g.q.PopFront() - delete(g.m, v) - } - - g.q.PushBack(h) - g.m[h] = struct{}{} -} - -func (g *ghost[K, V]) clear() { - g.q.Clear() - for k := range g.m { - delete(g.m, k) - } -} diff --git a/vendor/github.com/maypok86/otter/internal/s3fifo/main.go b/vendor/github.com/maypok86/otter/internal/s3fifo/main.go deleted file mode 100644 index e57120cc..00000000 --- a/vendor/github.com/maypok86/otter/internal/s3fifo/main.go +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright (c) 2023 Alexey Mayshev. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package s3fifo - -import ( - "github.com/maypok86/otter/internal/generated/node" -) - -const maxReinsertions = 20 - -type main[K comparable, V any] struct { - q *queue[K, V] - cost int - maxCost int - evictNode func(node.Node[K, V]) -} - -func newMain[K comparable, V any](maxCost int, evictNode func(node.Node[K, V])) *main[K, V] { - return &main[K, V]{ - q: newQueue[K, V](), - maxCost: maxCost, - evictNode: evictNode, - } -} - -func (m *main[K, V]) insert(n node.Node[K, V]) { - m.q.push(n) - n.MarkMain() - m.cost += int(n.Cost()) -} - -func (m *main[K, V]) evict() { - reinsertions := 0 - for m.cost > 0 { - n := m.q.pop() - - if !n.IsAlive() || n.HasExpired() || n.Frequency() == 0 { - n.Unmark() - m.cost -= int(n.Cost()) - m.evictNode(n) - return - } - - // to avoid the worst case O(n), we remove the 20th reinserted consecutive element. - reinsertions++ - if reinsertions >= maxReinsertions { - n.Unmark() - m.cost -= int(n.Cost()) - m.evictNode(n) - return - } - - m.q.push(n) - n.DecrementFrequency() - } -} - -func (m *main[K, V]) delete(n node.Node[K, V]) { - m.cost -= int(n.Cost()) - n.Unmark() - m.q.delete(n) -} - -func (m *main[K, V]) length() int { - return m.q.length() -} - -func (m *main[K, V]) clear() { - m.q.clear() - m.cost = 0 -} - -func (m *main[K, V]) isFull() bool { - return m.cost >= m.maxCost -} diff --git a/vendor/github.com/maypok86/otter/internal/s3fifo/policy.go b/vendor/github.com/maypok86/otter/internal/s3fifo/policy.go deleted file mode 100644 index dd698172..00000000 --- a/vendor/github.com/maypok86/otter/internal/s3fifo/policy.go +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright (c) 2023 Alexey Mayshev. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package s3fifo - -import ( - "github.com/maypok86/otter/internal/generated/node" -) - -// Policy is an eviction policy based on S3-FIFO eviction algorithm -// from the following paper: https://dl.acm.org/doi/10.1145/3600006.3613147. -type Policy[K comparable, V any] struct { - small *small[K, V] - main *main[K, V] - ghost *ghost[K, V] - maxCost int - maxAvailableNodeCost int -} - -// NewPolicy creates a new Policy. -func NewPolicy[K comparable, V any](maxCost int, evictNode func(node.Node[K, V])) *Policy[K, V] { - smallMaxCost := maxCost / 10 - mainMaxCost := maxCost - smallMaxCost - - main := newMain[K, V](mainMaxCost, evictNode) - ghost := newGhost(main, evictNode) - small := newSmall(smallMaxCost, main, ghost, evictNode) - ghost.small = small - - return &Policy[K, V]{ - small: small, - main: main, - ghost: ghost, - maxCost: maxCost, - maxAvailableNodeCost: smallMaxCost, - } -} - -// Read updates the eviction policy based on node accesses. -func (p *Policy[K, V]) Read(nodes []node.Node[K, V]) { - for _, n := range nodes { - n.IncrementFrequency() - } -} - -// Add adds node to the eviction policy. -func (p *Policy[K, V]) Add(n node.Node[K, V]) { - if p.ghost.isGhost(n) { - p.main.insert(n) - n.ResetFrequency() - } else { - p.small.insert(n) - } - - for p.isFull() { - p.evict() - } -} - -func (p *Policy[K, V]) evict() { - if p.small.cost >= p.maxCost/10 { - p.small.evict() - return - } - - p.main.evict() -} - -func (p *Policy[K, V]) isFull() bool { - return p.small.cost+p.main.cost > p.maxCost -} - -// Delete deletes node from the eviction policy. -func (p *Policy[K, V]) Delete(n node.Node[K, V]) { - if n.IsSmall() { - p.small.delete(n) - return - } - - if n.IsMain() { - p.main.delete(n) - } -} - -// MaxAvailableCost returns the maximum available cost of the node. -func (p *Policy[K, V]) MaxAvailableCost() int { - return p.maxAvailableNodeCost -} - -// Clear clears the eviction policy and returns it to the default state. -func (p *Policy[K, V]) Clear() { - p.ghost.clear() - p.main.clear() - p.small.clear() -} diff --git a/vendor/github.com/maypok86/otter/internal/s3fifo/queue.go b/vendor/github.com/maypok86/otter/internal/s3fifo/queue.go deleted file mode 100644 index 4f8e76de..00000000 --- a/vendor/github.com/maypok86/otter/internal/s3fifo/queue.go +++ /dev/null @@ -1,75 +0,0 @@ -package s3fifo - -import "github.com/maypok86/otter/internal/generated/node" - -type queue[K comparable, V any] struct { - head node.Node[K, V] - tail node.Node[K, V] - len int -} - -func newQueue[K comparable, V any]() *queue[K, V] { - return &queue[K, V]{} -} - -func (q *queue[K, V]) length() int { - return q.len -} - -func (q *queue[K, V]) isEmpty() bool { - return q.length() == 0 -} - -func (q *queue[K, V]) push(n node.Node[K, V]) { - if q.isEmpty() { - q.head = n - q.tail = n - } else { - n.SetPrev(q.tail) - q.tail.SetNext(n) - q.tail = n - } - - q.len++ -} - -func (q *queue[K, V]) pop() node.Node[K, V] { - if q.isEmpty() { - return nil - } - - result := q.head - q.delete(result) - return result -} - -func (q *queue[K, V]) delete(n node.Node[K, V]) { - next := n.Next() - prev := n.Prev() - - if node.Equals(prev, nil) { - if node.Equals(next, nil) && !node.Equals(q.head, n) { - return - } - - q.head = next - } else { - prev.SetNext(next) - n.SetPrev(nil) - } - - if node.Equals(next, nil) { - q.tail = prev - } else { - next.SetPrev(prev) - n.SetNext(nil) - } - - q.len-- -} - -func (q *queue[K, V]) clear() { - for !q.isEmpty() { - q.pop() - } -} diff --git a/vendor/github.com/maypok86/otter/internal/s3fifo/small.go b/vendor/github.com/maypok86/otter/internal/s3fifo/small.go deleted file mode 100644 index 75e39771..00000000 --- a/vendor/github.com/maypok86/otter/internal/s3fifo/small.go +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright (c) 2023 Alexey Mayshev. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package s3fifo - -import ( - "github.com/maypok86/otter/internal/generated/node" -) - -type small[K comparable, V any] struct { - q *queue[K, V] - main *main[K, V] - ghost *ghost[K, V] - cost int - maxCost int - evictNode func(node.Node[K, V]) -} - -func newSmall[K comparable, V any]( - maxCost int, - main *main[K, V], - ghost *ghost[K, V], - evictNode func(node.Node[K, V]), -) *small[K, V] { - return &small[K, V]{ - q: newQueue[K, V](), - main: main, - ghost: ghost, - maxCost: maxCost, - evictNode: evictNode, - } -} - -func (s *small[K, V]) insert(n node.Node[K, V]) { - s.q.push(n) - n.MarkSmall() - s.cost += int(n.Cost()) -} - -func (s *small[K, V]) evict() { - if s.cost == 0 { - return - } - - n := s.q.pop() - s.cost -= int(n.Cost()) - n.Unmark() - if !n.IsAlive() || n.HasExpired() { - s.evictNode(n) - return - } - - if n.Frequency() > 1 { - s.main.insert(n) - for s.main.isFull() { - s.main.evict() - } - n.ResetFrequency() - return - } - - s.ghost.insert(n) -} - -func (s *small[K, V]) delete(n node.Node[K, V]) { - s.cost -= int(n.Cost()) - n.Unmark() - s.q.delete(n) -} - -func (s *small[K, V]) length() int { - return s.q.length() -} - -func (s *small[K, V]) clear() { - s.q.clear() - s.cost = 0 -} diff --git a/vendor/github.com/maypok86/otter/internal/stats/counter.go b/vendor/github.com/maypok86/otter/internal/stats/counter.go deleted file mode 100644 index 0958c6f2..00000000 --- a/vendor/github.com/maypok86/otter/internal/stats/counter.go +++ /dev/null @@ -1,108 +0,0 @@ -// Copyright (c) 2023 Alexey Mayshev. All rights reserved. -// Copyright (c) 2021 Andrey Pechkurov -// -// Copyright notice. This code is a fork of xsync.Counter from this file with some changes: -// https://github.com/puzpuzpuz/xsync/blob/main/counter.go -// -// Use of this source code is governed by a MIT license that can be found -// at https://github.com/puzpuzpuz/xsync/blob/main/LICENSE - -package stats - -import ( - "sync" - "sync/atomic" - - "github.com/maypok86/otter/internal/xmath" - "github.com/maypok86/otter/internal/xruntime" -) - -// pool for P tokens. -var tokenPool sync.Pool - -// a P token is used to point at the current OS thread (P) -// on which the goroutine is run; exact identity of the thread, -// as well as P migration tolerance, is not important since -// it's used to as a best effort mechanism for assigning -// concurrent operations (goroutines) to different stripes of -// the counter. -type token struct { - idx uint32 - padding [xruntime.CacheLineSize - 4]byte -} - -// A counter is a striped int64 counter. -// -// Should be preferred over a single atomically updated int64 -// counter in high contention scenarios. -// -// A counter must not be copied after first use. -type counter struct { - shards []cshard - mask uint32 -} - -type cshard struct { - c int64 - padding [xruntime.CacheLineSize - 8]byte -} - -// newCounter creates a new counter instance. -func newCounter() *counter { - nshards := xmath.RoundUpPowerOf2(xruntime.Parallelism()) - return &counter{ - shards: make([]cshard, nshards), - mask: nshards - 1, - } -} - -// increment increments the counter by 1. -func (c *counter) increment() { - c.add(1) -} - -// decrement decrements the counter by 1. -func (c *counter) decrement() { - c.add(-1) -} - -// add adds the delta to the counter. -func (c *counter) add(delta int64) { - t, ok := tokenPool.Get().(*token) - if !ok { - t = &token{} - t.idx = xruntime.Fastrand() - } - for { - shard := &c.shards[t.idx&c.mask] - cnt := atomic.LoadInt64(&shard.c) - if atomic.CompareAndSwapInt64(&shard.c, cnt, cnt+delta) { - break - } - // Give a try with another randomly selected shard. - t.idx = xruntime.Fastrand() - } - tokenPool.Put(t) -} - -// value returns the current counter value. -// The returned value may not include all of the latest operations in -// presence of concurrent modifications of the counter. -func (c *counter) value() int64 { - v := int64(0) - for i := 0; i < len(c.shards); i++ { - shard := &c.shards[i] - v += atomic.LoadInt64(&shard.c) - } - return v -} - -// reset resets the counter to zero. -// This method should only be used when it is known that there are -// no concurrent modifications of the counter. -func (c *counter) reset() { - for i := 0; i < len(c.shards); i++ { - shard := &c.shards[i] - atomic.StoreInt64(&shard.c, 0) - } -} diff --git a/vendor/github.com/maypok86/otter/internal/stats/stats.go b/vendor/github.com/maypok86/otter/internal/stats/stats.go deleted file mode 100644 index e3824fa0..00000000 --- a/vendor/github.com/maypok86/otter/internal/stats/stats.go +++ /dev/null @@ -1,143 +0,0 @@ -// Copyright (c) 2023 Alexey Mayshev. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package stats - -import ( - "sync/atomic" - "unsafe" - - "github.com/maypok86/otter/internal/xruntime" -) - -// Stats is a thread-safe statistics collector. -type Stats struct { - hits *counter - misses *counter - rejectedSets *counter - evictedCountersPadding [xruntime.CacheLineSize - 2*unsafe.Sizeof(atomic.Int64{})]byte - evictedCount atomic.Int64 - evictedCost atomic.Int64 -} - -// New creates a new Stats collector. -func New() *Stats { - return &Stats{ - hits: newCounter(), - misses: newCounter(), - rejectedSets: newCounter(), - } -} - -// IncHits increments the hits counter. -func (s *Stats) IncHits() { - if s == nil { - return - } - - s.hits.increment() -} - -// Hits returns the number of cache hits. -func (s *Stats) Hits() int64 { - if s == nil { - return 0 - } - - return s.hits.value() -} - -// IncMisses increments the misses counter. -func (s *Stats) IncMisses() { - if s == nil { - return - } - - s.misses.increment() -} - -// Misses returns the number of cache misses. -func (s *Stats) Misses() int64 { - if s == nil { - return 0 - } - - return s.misses.value() -} - -// IncRejectedSets increments the rejectedSets counter. -func (s *Stats) IncRejectedSets() { - if s == nil { - return - } - - s.rejectedSets.increment() -} - -// RejectedSets returns the number of rejected sets. -func (s *Stats) RejectedSets() int64 { - if s == nil { - return 0 - } - - return s.rejectedSets.value() -} - -// IncEvictedCount increments the evictedCount counter. -func (s *Stats) IncEvictedCount() { - if s == nil { - return - } - - s.evictedCount.Add(1) -} - -// EvictedCount returns the number of evicted entries. -func (s *Stats) EvictedCount() int64 { - if s == nil { - return 0 - } - - return s.evictedCount.Load() -} - -// AddEvictedCost adds cost to the evictedCost counter. -func (s *Stats) AddEvictedCost(cost uint32) { - if s == nil { - return - } - - s.evictedCost.Add(int64(cost)) -} - -// EvictedCost returns the sum of costs of evicted entries. -func (s *Stats) EvictedCost() int64 { - if s == nil { - return 0 - } - - return s.evictedCost.Load() -} - -func (s *Stats) Clear() { - if s == nil { - return - } - - s.hits.reset() - s.misses.reset() - s.rejectedSets.reset() - s.evictedCount.Store(0) - s.evictedCost.Store(0) -} diff --git a/vendor/github.com/maypok86/otter/internal/unixtime/unixtime.go b/vendor/github.com/maypok86/otter/internal/unixtime/unixtime.go deleted file mode 100644 index 38f7b19f..00000000 --- a/vendor/github.com/maypok86/otter/internal/unixtime/unixtime.go +++ /dev/null @@ -1,93 +0,0 @@ -// Copyright (c) 2023 Alexey Mayshev. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package unixtime - -import ( - "sync" - "sync/atomic" - "time" -) - -var ( - // We need this package because time.Now() is slower, allocates memory, - // and we don't need a more precise time for the expiry time (and most other operations). - now uint32 - startTime int64 - - mutex sync.Mutex - countInstance int - done chan struct{} -) - -func startTimer() { - done = make(chan struct{}) - atomic.StoreInt64(&startTime, time.Now().Unix()) - atomic.StoreUint32(&now, uint32(0)) - - go func() { - ticker := time.NewTicker(time.Second) - defer ticker.Stop() - for { - select { - case t := <-ticker.C: - //nolint:gosec // there will never be an overflow - atomic.StoreUint32(&now, uint32(t.Unix()-StartTime())) - case <-done: - return - } - } - }() -} - -// Start should be called when the cache instance is created to initialize the timer. -func Start() { - mutex.Lock() - defer mutex.Unlock() - - if countInstance == 0 { - startTimer() - } - - countInstance++ -} - -// Stop should be called when closing and stopping the cache instance to stop the timer. -func Stop() { - mutex.Lock() - defer mutex.Unlock() - - countInstance-- - if countInstance == 0 { - done <- struct{}{} - close(done) - } -} - -// Now returns time as a Unix time, the number of seconds elapsed since program start. -func Now() uint32 { - return atomic.LoadUint32(&now) -} - -// SetNow sets the current time. -// -// NOTE: use only for testing and debugging. -func SetNow(t uint32) { - atomic.StoreUint32(&now, t) -} - -// StartTime returns the start time of the program. -func StartTime() int64 { - return atomic.LoadInt64(&startTime) -} diff --git a/vendor/github.com/maypok86/otter/stats.go b/vendor/github.com/maypok86/otter/stats.go deleted file mode 100644 index c80b849b..00000000 --- a/vendor/github.com/maypok86/otter/stats.go +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright (c) 2024 Alexey Mayshev. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package otter - -import ( - "math" - - "github.com/maypok86/otter/internal/stats" -) - -// Stats is a statistics snapshot. -type Stats struct { - hits int64 - misses int64 - rejectedSets int64 - evictedCount int64 - evictedCost int64 -} - -func newStats(s *stats.Stats) Stats { - return Stats{ - hits: negativeToMax(s.Hits()), - misses: negativeToMax(s.Misses()), - rejectedSets: negativeToMax(s.RejectedSets()), - evictedCount: negativeToMax(s.EvictedCount()), - evictedCost: negativeToMax(s.EvictedCost()), - } -} - -// Hits returns the number of cache hits. -func (s Stats) Hits() int64 { - return s.hits -} - -// Misses returns the number of cache misses. -func (s Stats) Misses() int64 { - return s.misses -} - -// Ratio returns the cache hit ratio. -func (s Stats) Ratio() float64 { - requests := checkedAdd(s.hits, s.misses) - if requests == 0 { - return 0.0 - } - return float64(s.hits) / float64(requests) -} - -// RejectedSets returns the number of rejected sets. -func (s Stats) RejectedSets() int64 { - return s.rejectedSets -} - -// EvictedCount returns the number of evicted entries. -func (s Stats) EvictedCount() int64 { - return s.evictedCount -} - -// EvictedCost returns the sum of costs of evicted entries. -func (s Stats) EvictedCost() int64 { - return s.evictedCost -} - -func checkedAdd(a, b int64) int64 { - naiveSum := a + b - if (a^b) < 0 || (a^naiveSum) >= 0 { - // If a and b have different signs or a has the same sign as the result then there was no overflow, return. - return naiveSum - } - // we did over/under flow, if the sign is negative we should return math.MaxInt64 otherwise math.MinInt64. - if naiveSum < 0 { - return math.MaxInt64 - } - return math.MinInt64 -} - -func negativeToMax(v int64) int64 { - if v < 0 { - return math.MaxInt64 - } - - return v -} diff --git a/vendor/github.com/maypok86/otter/.gitignore b/vendor/github.com/maypok86/otter/v2/.gitignore similarity index 91% rename from vendor/github.com/maypok86/otter/.gitignore rename to vendor/github.com/maypok86/otter/v2/.gitignore index e95bc5ed..22972882 100644 --- a/vendor/github.com/maypok86/otter/.gitignore +++ b/vendor/github.com/maypok86/otter/v2/.gitignore @@ -17,6 +17,8 @@ /.idea/ *.tmp *coverage.txt +*coverage.svg +*coverage.html *lint.txt **/bin/ .DS_Store diff --git a/vendor/github.com/maypok86/otter/v2/.golangci.yml b/vendor/github.com/maypok86/otter/v2/.golangci.yml new file mode 100644 index 00000000..99dc8d3d --- /dev/null +++ b/vendor/github.com/maypok86/otter/v2/.golangci.yml @@ -0,0 +1,110 @@ +version: "2" +run: + concurrency: 8 + timeout: 5m + build-tags: + - integration + modules-download-mode: readonly +output: + formats: + tab: + path: lint.txt + colors: false +formatters: + enable: + - gci + - gofumpt + settings: + gci: + sections: + - standard # Standard lib + - default # External dependencies + - prefix(github.com/maypok86/otter) # Internal packages +linters: + enable: + - asasalint + - asciicheck + - bidichk + - bodyclose + - durationcheck + - errcheck + - errname + - errorlint + - gocheckcompilerdirectives + - gocritic + - godot + - gomoddirectives + - govet + - ineffassign + - misspell + - nakedret + - nilerr + - nilnil + - noctx + - nolintlint + - prealloc + - predeclared + - promlinter + - reassign + - revive + - rowserrcheck + - sqlclosecheck + - staticcheck + - tagliatelle + - testableexamples + - tparallel + - unconvert + - unparam + - usestdlibvars + - wastedassign + disable: + - gosec + - unused + exclusions: + rules: + - path: _test\.go + linters: + - gosec + - errname + - errcheck + - errorlint + - path: cmd/generator + linters: + - errcheck + - gosec + - linters: + - staticcheck + text: "QF1001:" + settings: + gocritic: + enabled-tags: + - diagnostic + - experimental + - opinionated + - performance + - style + disabled-checks: + - hugeParam + - rangeExprCopy + - rangeValCopy + errcheck: + check-type-assertions: true + check-blank: true + exclude-functions: + - io/ioutil.ReadFile + - io.Copy(*bytes.Buffer) + - io.Copy(os.Stdout) + nakedret: + max-func-lines: 1 + revive: + rules: + - name: empty-block + disabled: true + tagliatelle: + case: + rules: + json: snake + yaml: snake +issues: + max-issues-per-linter: 0 + max-same-issues: 0 diff --git a/vendor/github.com/maypok86/otter/v2/CHANGELOG.md b/vendor/github.com/maypok86/otter/v2/CHANGELOG.md new file mode 100644 index 00000000..e6396acd --- /dev/null +++ b/vendor/github.com/maypok86/otter/v2/CHANGELOG.md @@ -0,0 +1,252 @@ +## 2.2.1 - 2025-07-22 + +### 🚀 Improvements + +- Added more detailed explanations of the mechanics related to returning `ErrNotFound` ([#136](https://github.com/maypok86/otter/issues/136)) + +### 🐞 Bug Fixes + +- Fix inconsistent singleflight results if the key is invalidated on the way ([#137](https://github.com/maypok86/otter/issues/137)) +- Fix panic during concurrent execution of `InvalidateAll` and `Get` under high contention ([#139](https://github.com/maypok86/otter/issues/139)) + +## 2.2.0 - 2025-07-07 + +This release focuses on improving the integration experience with pull-based metric collectors. + +### ✨Features + +- Added `IsWeighted`, `IsRecordingStats` and `Stats` methods for cache ([#131](https://github.com/maypok86/otter/issues/131)) +- Added `Minus` and `Plus` methods for `stats.Stats` +- Added `stats.Snapshoter` and `stats.SnapshotRecorder` interfaces + +### 🚀 Improvements + +- Reduced memory consumption of `stats.Counter` by 4 times + +## 2.1.1 - 2025-07-05 + +### 🚀 Improvements + +- `Get` now returns the value from the loader even when an error is returned. ([#132](https://github.com/maypok86/otter/issues/132)) + +## 2.1.0 - 2025-06-29 + +### ✨Features + +- Added `Compute`, `ComputeIfAbsent` and `ComputeIfPresent` methods +- Added `LoadCacheFrom`, `LoadCacheFromFile`, `SaveCacheTo` and `SaveCacheToFile` functions +- Added `Clock` interface and option for time mocking +- Added `Keys` and `Values` iterators +- Added `Hottest` and `Coldest` iterators + +### 🚀 Improvements + +- Slightly reduced memory consumption +- Cache became significantly faster in cases when it's lightly populated +- Reduced number of allocations during refresh + +### 🐞 Bug Fixes + +- Fixed a bug in timer wheel ([#64](https://github.com/Yiling-J/theine-go/issues/64)) +- Added usage of `context.WithoutCancel` during refresh execution ([#124](https://github.com/maypok86/otter/issues/124)) + +## 2.0.0 - 2025-06-18 + +### 📝 Description + +Otter v2 has been completely redesigned for better performance and usability. + +Key improvements: +- Completely rethought API for greater flexibility +- Added [loading](https://maypok86.github.io/otter/user-guide/v2/features/loading/) and [refreshing](https://maypok86.github.io/otter/user-guide/v2/features/refresh/) features ([#26](https://github.com/maypok86/otter/issues/26)) +- Added [entry pinning](https://maypok86.github.io/otter/user-guide/v2/features/eviction/#pinning-entries) +- Replaced eviction policy with adaptive W-TinyLFU, enabling Otter to achieve one of the highest hit rates across **all** workloads. +- Added HashDoS protection against potential attacks +- The task scheduling mechanism has been completely reworked, allowing users to manage it themselves when needed +- Added more efficient write buffer +- Added auto-configurable lossy read buffer +- Optimized hash table +- Test coverage increased to 97% + +### 🚨 Breaking Changes + +1. **Cache Creation** + - Removed `Builder` pattern in favor of canonical `Options` struct + - `MustBuilder` and `Builder` methods are replaced with `Must` and `New` functions + - `Cost` renamed to `Weight` + - Replaced unified `capacity` with explicit `MaximumSize` and `MaximumWeight` parameters + - Replaced `DeletionListener` with `OnDeletion` and `OnAtomicDeletion` handlers + - The ability to create a cache with any combination of features + +2. **Cache API Changes** + - `Get` method renamed to `GetIfPresent` + - `Set` method signature changed to return both value and bool + - `SetIfAbsent` method signature changed to return both value and bool + - `Delete` method renamed to `Invalidate` + - `Clear` method renamed to `InvalidateAll` + - `Size` method renamed to `EstimatedSize` + - `Capacity` method renamed to `GetMaximum` + - `Range` method removed in favor of `All` iterator + - `Has` method removed + - `DeleteByFunc` method removed + - `Stats` method removed in favor of `stats.Recorder` interface + - `Close` method removed + +3. **Expiration** + - Expiration API is now more flexible with `ExpiryCalculator` interface + - `ExpiryCreating`, `ExpiryWriting` and `ExpiryAccessing` functions introduced + +4. **Statistics** + - Moved statistics to a separate package `stats` + - `stats.Recorder` interface and `stats.Counter` struct introduced + +5. **Extension** + - `Extension` struct removed in favor of methods from `Cache` + +### ✨Features + +1. **Loading** + - Added `Get` method for obtaining values if necessary + - Added `Loader` interface for retrieving values from the data source + - Added `ErrNotFound` error for indicating missing entries + +2. **Refresh** + - Added flexible refresh API with `RefreshCalculator` interface + - `RefreshCreating`, `RefreshWriting` functions introduced + - Added `Refresh` method for refreshing values asynchronously + +3. **Bulk Operations** + - Added `BulkGet` for loading multiple values at once + - Added `BulkRefresh` for refreshing multiple values asynchronously + - Added `BulkLoader` interface for retrieving multiple values from the data source at once + +4. **Cache Methods** + - Added `All` method for iterating over all entries + - Added `CleanUp` method for performing pending maintenance operations + - Added `WeightedSize` method for weight-based caches + +5. **Enhanced Configuration** + - Added `Executor` option for customizing async operations + - Added `Logger` interface for custom logging + +6. **Entry Management** + - Added `SetExpiresAfter` and `SetRefreshableAfter` for per-entry time control + - Added `GetEntry` and `GetEntryQuietly` methods for accessing cache entries + - Most `Entry`'s methods replaced with public fields for direct access. + +7. **Deletion Notifications** + - Replaced `DeletionListener` with `OnDeletion` and `OnAtomicDeletion` handlers + - Deletion causes renamed for clarity and consistency. + - Added `IsEviction` method + - Added `DeletionEvent` struct + +8. **Performance Improvements** + - Replaced `S3-FIFO` with adaptive `W-TinyLFU` + - Added more efficient write buffer + - Added auto-configurable lossy read buffer + - The task scheduling mechanism has been completely reworked, allowing users to manage it themselves when needed. + +### 🚀 Improvements + +- Added [loading](https://maypok86.github.io/otter/user-guide/v2/features/loading/) and [refreshing](https://maypok86.github.io/otter/user-guide/v2/features/refresh/) features ([#26](https://github.com/maypok86/otter/issues/26)) +- You can now pass a custom implementation of the `stats.Recorder` interface ([#119](https://github.com/maypok86/otter/issues/119)) +- You can now use a TTL shorter than `time.Second` ([#115](https://github.com/maypok86/otter/issues/115)) + +## 1.2.4 - 2024-11-23 + +### 🐞 Bug Fixes + +- Fixed a bug due to changing [gammazero/deque](https://github.com/gammazero/deque/pull/33) contracts without v2 release. ([#112](https://github.com/maypok86/otter/issues/112)) + +## 1.2.3 - 2024-09-30 + +### 🐞 Bug Fixes + +- Added collection of eviction statistics for expired entries. ([#108](https://github.com/maypok86/otter/issues/108)) + +## 1.2.2 - 2024-08-14 + +### ✨️Features + +- Implemented `fmt.Stringer` interface for `DeletionReason` type ([#100](https://github.com/maypok86/otter/issues/100)) + +### 🐞 Bug Fixes + +- Fixed processing of an expired entry in the `Get` method ([#98](https://github.com/maypok86/otter/issues/98)) +- Fixed inconsistent deletion listener behavior ([#98](https://github.com/maypok86/otter/issues/98)) +- Fixed the behavior of `checkedAdd` when over/underflow ([#91](https://github.com/maypok86/otter/issues/91)) + +## 1.2.1 - 2024-04-15 + +### 🐞 Bug Fixes + +- Fixed uint32 capacity overflow. + +## 1.2.0 - 2024-03-12 + +The main innovation of this release is the addition of an `Extension`, which makes it easy to add a huge number of features to otter. + +Usage example: + +```go +key := 1 +... +entry, ok := cache.Extension().GetEntry(key) +... +key := entry.Key() +value := entry.Value() +cost := entry.Cost() +expiration := entry.Expiration() +ttl := entry.TTL() +hasExpired := entry.HasExpired() +``` + +### ✨️Features + +- Added `DeletionListener` to the builder ([#63](https://github.com/maypok86/otter/issues/63)) +- Added `Extension` ([#56](https://github.com/maypok86/otter/issues/56)) + +### 🚀 Improvements + +- Added support for Go 1.22 +- Memory consumption with small cache sizes is reduced to the level of other libraries ([#66](https://github.com/maypok86/otter/issues/66)) + +## 1.1.1 - 2024-03-06 + +### 🐞 Bug Fixes + +- Fixed alignment issues on 32-bit archs + +## 1.1.0 - 2024-03-04 + +The main innovation of this release is node code generation. Thanks to it, the cache will no longer consume more memory due to features that it does not use. For example, if you do not need an expiration policy, then otter will not store the expiration time of each entry. It also allows otter to use more effective expiration policies. + +Another expected improvement is the correction of minor synchronization problems due to the state machine. Now otter, unlike other contention-free caches in Go, should not have them at all. + +### ✨️Features + +- Added `DeleteByFunc` function to cache ([#44](https://github.com/maypok86/otter/issues/44)) +- Added `InitialCapacity` function to builder ([#47](https://github.com/maypok86/otter/issues/47)) +- Added collection of additional statistics ([#57](https://github.com/maypok86/otter/issues/57)) + +### 🚀 Improvements + +- Added proactive queue-based and timer wheel-based expiration policies with O(1) time complexity ([#55](https://github.com/maypok86/otter/issues/55)) +- Added node code generation ([#55](https://github.com/maypok86/otter/issues/55)) +- Fixed the race condition when changing the order of events ([#59](https://github.com/maypok86/otter/issues/59)) +- Reduced memory consumption on small caches + +## 1.0.0 - 2024-01-26 + +### ✨️Features + +- Builder pattern support +- Cleaner API compared to other caches ([#40](https://github.com/maypok86/otter/issues/40)) +- Added `SetIfAbsent` and `Range` functions ([#27](https://github.com/maypok86/otter/issues/27)) +- Statistics collection ([#4](https://github.com/maypok86/otter/issues/4)) +- Cost based eviction +- Support for generics and any comparable types as keys +- Support ttl ([#14](https://github.com/maypok86/otter/issues/14)) +- Excellent speed ([benchmark results](https://github.com/maypok86/otter?tab=readme-ov-file#-performance-)) +- O(1) worst case time complexity for S3-FIFO instead of O(n) +- Improved hit ratio of S3-FIFO on many traces ([simulator results](https://github.com/maypok86/otter?tab=readme-ov-file#-hit-ratio-)) diff --git a/vendor/github.com/maypok86/otter/CODE_OF_CONDUCT.md b/vendor/github.com/maypok86/otter/v2/CODE_OF_CONDUCT.md similarity index 100% rename from vendor/github.com/maypok86/otter/CODE_OF_CONDUCT.md rename to vendor/github.com/maypok86/otter/v2/CODE_OF_CONDUCT.md diff --git a/vendor/github.com/maypok86/otter/CONTRIBUTING.md b/vendor/github.com/maypok86/otter/v2/CONTRIBUTING.md similarity index 100% rename from vendor/github.com/maypok86/otter/CONTRIBUTING.md rename to vendor/github.com/maypok86/otter/v2/CONTRIBUTING.md diff --git a/vendor/github.com/maypok86/otter/LICENSE b/vendor/github.com/maypok86/otter/v2/LICENSE similarity index 99% rename from vendor/github.com/maypok86/otter/LICENSE rename to vendor/github.com/maypok86/otter/v2/LICENSE index f49a4e16..f00b6347 100644 --- a/vendor/github.com/maypok86/otter/LICENSE +++ b/vendor/github.com/maypok86/otter/v2/LICENSE @@ -186,7 +186,7 @@ same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright [yyyy] [name of copyright owner] + Copyright 2023-present Alexey Mayshev and contributors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/github.com/maypok86/otter/Makefile b/vendor/github.com/maypok86/otter/v2/Makefile similarity index 71% rename from vendor/github.com/maypok86/otter/Makefile rename to vendor/github.com/maypok86/otter/v2/Makefile index ef138b48..af886b43 100644 --- a/vendor/github.com/maypok86/otter/Makefile +++ b/vendor/github.com/maypok86/otter/v2/Makefile @@ -1,11 +1,10 @@ -.PHONY: setup -setup: deps ## Setup development environment - cp ./scripts/pre-push.sh .git/hooks/pre-push - chmod +x .git/hooks/pre-push +SHELL := /bin/bash + +SCRIPTS := "./.github/workflows/scripts" .PHONY: deps deps: ## Install all the build and lint dependencies - bash scripts/deps.sh + bash $(SCRIPTS)/deps.sh .PHONY: fmt fmt: ## Run format tools on all go files @@ -22,10 +21,7 @@ test: test.unit ## Run all the tests .PHONY: test.unit test.unit: ## Run all unit tests - @echo 'mode: atomic' > coverage.txt - go test -covermode=atomic -coverprofile=coverage.txt.tmp -coverpkg=./... -v -race ./... - cat coverage.txt.tmp | grep -v -E "/generated/|/cmd/" > coverage.txt - rm coverage.txt.tmp + bash $(SCRIPTS)/run-tests.sh .PHONY: test.32-bit test.32-bit: ## Run tests on 32-bit arch @@ -39,7 +35,10 @@ cover: test.unit ## Run all the tests and opens the coverage report ci: lint test ## Run all the tests and code checks .PHONY: generate -generate: ## Generate files for the project +generate: gennode fmt ## Generate files for the project + +.PHONY: gennode +gennode: ## Generate nodes go run ./cmd/generator ./internal/generated/node .PHONY: clean diff --git a/vendor/github.com/maypok86/otter/v2/README.md b/vendor/github.com/maypok86/otter/v2/README.md new file mode 100644 index 00000000..55dd7657 --- /dev/null +++ b/vendor/github.com/maypok86/otter/v2/README.md @@ -0,0 +1,234 @@ +

+ +

In-memory caching library

+

+ +

+Go Reference + + + +GitHub Release +Mentioned in Awesome Go +

+ +Otter is designed to provide an excellent developer experience while maintaining high performance. It aims to address the shortcomings of its predecessors and incorporates design principles from high-performance libraries in other languages (such as [Caffeine](https://github.com/ben-manes/caffeine)). + +## 📖 Contents + +- [Features](#features) +- [Usage](#usage) + - [Requirements](#requirements) + - [Installation](#installation) + - [Examples](#examples) +- [Performance](#performance) + - [Throughput](#throughput) + - [Hit ratio](#hit-ratio) + - [Memory consumption](#memory-consumption) +- [Projects using Otter](#projects) +- [Related works](#related-works) +- [Contribute](#contribute) +- [License](#license) + +## ✨ Features + +Performance-wise, Otter provides: + +- [High hit rates](https://maypok86.github.io/otter/performance/hit-ratio/) across all workload types via [adaptive W-TinyLFU](https://dl.acm.org/citation.cfm?id=3274816) +- [Excellent throughput](https://maypok86.github.io/otter/performance/throughput/) under high contention on most workload types +- Among the lowest [memory overheads](https://maypok86.github.io/otter/performance/memory-consumption/) across all cache capacities +- Automatic data structures configuration based on contention/parallelism and workload patterns + +Otter also provides a highly configurable caching API, enabling any combination of these optional features: + +- Size-based [eviction](https://maypok86.github.io/otter/user-guide/v2/features/eviction/#size-based) when a maximum is exceeded +- Time-based [expiration](https://maypok86.github.io/otter/user-guide/v2/features/eviction/#time-based) of entries, measured since last access or last write +- [Automatic loading](https://maypok86.github.io/otter/user-guide/v2/features/loading/) of entries into the cache +- [Asynchronously refresh](https://maypok86.github.io/otter/user-guide/v2/features/refresh/) when the first stale request for an entry occurs +- [Writes propagated](https://maypok86.github.io/otter/user-guide/v2/features/compute/) to an external resource +- Accumulation of cache access [statistics](https://maypok86.github.io/otter/user-guide/v2/features/statistics/) +- [Saving cache](https://maypok86.github.io/otter/user-guide/v2/features/persistence/) to a file and loading cache from a file + +## 📚 Usage + +For more details, see our [user's guide](https://maypok86.github.io/otter/user-guide/v2/getting-started/) and browse the [API docs](https://pkg.go.dev/github.com/maypok86/otter) for the latest release. + +### 📋 Requirements + +Otter requires [Go](https://go.dev/) version [1.24](https://go.dev/doc/devel/release#go1.24.0) or above. + +### 🛠️ Installation + +#### With v1 + +```shell +go get -u github.com/maypok86/otter +``` + +#### With v2 + +```shell +go get -u github.com/maypok86/otter/v2 +``` + +See the [release notes](https://github.com/maypok86/otter/releases) for details of the changes. + +Note that otter only supports the two most recent minor versions of Go. + +Otter follows semantic versioning for the documented public API on stable releases. `v2` is the latest stable major version. + +### ✏️ Examples + +Otter uses a plain `Options` struct for cache configuration. Check out [otter.Options](https://pkg.go.dev/github.com/maypok86/otter/v2#Options) for more details. + +Note that all features are optional. You can create a cache that acts as a simple hash table wrapper, with near-zero memory overhead for unused features — thanks to [node code generation](https://github.com/maypok86/otter/blob/main/cmd/generator/main.go). + +**API Usage Example** +```go +package main + +import ( + "context" + "time" + + "github.com/maypok86/otter/v2" + "github.com/maypok86/otter/v2/stats" +) + +func main() { + ctx := context.Background() + + // Create statistics counter to track cache operations + counter := stats.NewCounter() + + // Configure cache with: + // - Capacity: 10,000 entries + // - 1 second expiration after last access + // - 500ms refresh interval after writes + // - Stats collection enabled + cache := otter.Must(&otter.Options[string, string]{ + MaximumSize: 10_000, + ExpiryCalculator: otter.ExpiryAccessing[string, string](time.Second), // Reset timer on reads/writes + RefreshCalculator: otter.RefreshWriting[string, string](500 * time.Millisecond), // Refresh after writes + StatsRecorder: counter, // Attach stats collector + }) + + // Phase 1: Test basic expiration + // ----------------------------- + cache.Set("key", "value") // Add initial value + + // Wait for expiration (1 second) + time.Sleep(time.Second) + + // Verify entry expired + if _, ok := cache.GetIfPresent("key"); ok { + panic("key shouldn't be found") // Should be expired + } + + // Phase 2: Test cache stampede protection + // -------------------------------------- + loader := func(ctx context.Context, key string) (string, error) { + time.Sleep(200 * time.Millisecond) // Simulate slow load + return "value1", nil // Return new value + } + + // Concurrent Gets would deduplicate loader calls + value, err := cache.Get(ctx, "key", otter.LoaderFunc[string, string](loader)) + if err != nil { + panic(err) + } + if value != "value1" { + panic("incorrect value") // Should get newly loaded value + } + + // Phase 3: Test background refresh + // -------------------------------- + time.Sleep(500 * time.Millisecond) // Wait until refresh needed + + // New loader that returns updated value + loader = func(ctx context.Context, key string) (string, error) { + time.Sleep(100 * time.Millisecond) // Simulate refresh + return "value2", nil // Return refreshed value + } + + // This triggers async refresh but returns current value + value, err = cache.Get(ctx, "key", otter.LoaderFunc[string, string](loader)) + if err != nil { + panic(err) + } + if value != "value1" { // Should get old value while refreshing + panic("loader shouldn't be called during Get") + } + + // Wait for refresh to complete + time.Sleep(110 * time.Millisecond) + + // Verify refreshed value + v, ok := cache.GetIfPresent("key") + if !ok { + panic("key should be found") // Should still be cached + } + if v != "value2" { // Should now have refreshed value + panic("refresh should be completed") + } +} +``` + +You can find more usage examples [here](https://maypok86.github.io/otter/user-guide/v2/examples/). + +## 📊 Performance + +The benchmark code can be found [here](./benchmarks). + +### 🚀 Throughput + +Throughput benchmarks are a Go port of the caffeine [benchmarks](https://github.com/ben-manes/caffeine/blob/master/caffeine/src/jmh/java/com/github/benmanes/caffeine/cache/GetPutBenchmark.java). This microbenchmark compares the throughput of caches on a zipf distribution, which allows to show various inefficient places in implementations. + +You can find results [here](https://maypok86.github.io/otter/performance/throughput/). + +### 🎯 Hit ratio + +The hit ratio simulator tests caches on various traces: +1. Synthetic (Zipf distribution) +2. Traditional (widely known and used in various projects and papers) + +You can find results [here](https://maypok86.github.io/otter/performance/hit-ratio/). + +### 💾 Memory consumption + +This benchmark quantifies the additional memory consumption across varying cache capacities. + +You can find results [here](https://maypok86.github.io/otter/performance/memory-consumption/). + +## 🏗️ Projects using Otter + +Below is a list of known projects that use Otter: + +- [Grafana](https://github.com/grafana/grafana): The open and composable observability and data visualization platform. +- [Centrifugo](https://github.com/centrifugal/centrifugo): Scalable real-time messaging server in a language-agnostic way. +- [FrankenPHP](https://github.com/php/frankenphp): The modern PHP app server +- [Unkey](https://github.com/unkeyed/unkey): Open source API management platform + +## 🗃 Related works + +Otter is based on the following papers: + +- [BP-Wrapper: A Framework Making Any Replacement Algorithms (Almost) Lock Contention Free](https://www.researchgate.net/publication/220966845_BP-Wrapper_A_System_Framework_Making_Any_Replacement_Algorithms_Almost_Lock_Contention_Free) +- [TinyLFU: A Highly Efficient Cache Admission Policy](https://dl.acm.org/citation.cfm?id=3149371) +- [Adaptive Software Cache Management](https://dl.acm.org/citation.cfm?id=3274816) +- [Denial of Service via Algorithmic Complexity Attack](https://www.usenix.org/legacy/events/sec03/tech/full_papers/crosby/crosby.pdf) +- [Hashed and Hierarchical Timing Wheels](https://ieeexplore.ieee.org/document/650142) +- [A large scale analysis of hundreds of in-memory cache clusters at Twitter](https://www.usenix.org/system/files/osdi20-yang.pdf) + +## 👏 Contribute + +Contributions are welcome as always, before submitting a new PR please make sure to open a new issue so community members can discuss it. +For more information please see [contribution guidelines](./CONTRIBUTING.md). + +Additionally, you might find existing open issues which can help with improvements. + +This project follows a standard [code of conduct](./CODE_OF_CONDUCT.md) so that you can understand what actions will and will not be tolerated. + +## 📄 License + +This project is Apache 2.0 licensed, as found in the [LICENSE](./LICENSE). diff --git a/vendor/github.com/maypok86/otter/v2/cache.go b/vendor/github.com/maypok86/otter/v2/cache.go new file mode 100644 index 00000000..4abcf17f --- /dev/null +++ b/vendor/github.com/maypok86/otter/v2/cache.go @@ -0,0 +1,470 @@ +// Copyright (c) 2025 Alexey Mayshev and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otter + +import ( + "context" + "iter" + "runtime" + "time" + + "github.com/maypok86/otter/v2/stats" +) + +// ComputeOp tells the Compute methods what to do. +type ComputeOp int + +const ( + // CancelOp signals to Compute to not do anything as a result + // of executing the lambda. If the entry was not present in + // the map, nothing happens, and if it was present, the + // returned value is ignored. + CancelOp ComputeOp = iota + // WriteOp signals to Compute to update the entry to the + // value returned by the lambda, creating it if necessary. + WriteOp + // InvalidateOp signals to Compute to always discard the entry + // from the cache. + InvalidateOp +) + +var computeOpStrings = []string{ + "CancelOp", + "WriteOp", + "InvalidateOp", +} + +// String implements [fmt.Stringer] interface. +func (co ComputeOp) String() string { + if co >= 0 && int(co) < len(computeOpStrings) { + return computeOpStrings[co] + } + return "" +} + +// Cache is an in-memory cache implementation that supports full concurrency of retrievals and multiple ways to bound the cache. +type Cache[K comparable, V any] struct { + cache *cache[K, V] +} + +// Must creates a configured [Cache] instance or +// panics if invalid parameters were specified. +// +// This method does not alter the state of the [Options] instance, so it can be invoked +// again to create multiple independent caches. +func Must[K comparable, V any](o *Options[K, V]) *Cache[K, V] { + c, err := New(o) + if err != nil { + panic(err) + } + return c +} + +// New creates a configured [Cache] instance or +// returns an error if invalid parameters were specified. +// +// This method does not alter the state of the [Options] instance, so it can be invoked +// again to create multiple independent caches. +func New[K comparable, V any](o *Options[K, V]) (*Cache[K, V], error) { + if o == nil { + o = &Options[K, V]{} + } + + if err := o.validate(); err != nil { + return nil, err + } + + cacheImpl := newCache(o) + c := &Cache[K, V]{ + cache: cacheImpl, + } + runtime.AddCleanup(c, func(cacheImpl *cache[K, V]) { + cacheImpl.close() + }, cacheImpl) + + return c, nil +} + +// GetIfPresent returns the value associated with the key in this cache. +func (c *Cache[K, V]) GetIfPresent(key K) (V, bool) { + return c.cache.GetIfPresent(key) +} + +// GetEntry returns the cache entry associated with the key in this cache. +func (c *Cache[K, V]) GetEntry(key K) (Entry[K, V], bool) { + return c.cache.GetEntry(key) +} + +// GetEntryQuietly returns the cache entry associated with the key in this cache. +// +// Unlike GetEntry, this function does not produce any side effects +// such as updating statistics or the eviction policy. +func (c *Cache[K, V]) GetEntryQuietly(key K) (Entry[K, V], bool) { + return c.cache.GetEntryQuietly(key) +} + +// Set associates the value with the key in this cache. +// +// If the specified key is not already associated with a value, then it returns new value and true. +// +// If the specified key is already associated with a value, then it returns existing value and false. +func (c *Cache[K, V]) Set(key K, value V) (V, bool) { + return c.cache.Set(key, value) +} + +// SetIfAbsent if the specified key is not already associated with a value associates it with the given value. +// +// If the specified key is not already associated with a value, then it returns new value and true. +// +// If the specified key is already associated with a value, then it returns existing value and false. +func (c *Cache[K, V]) SetIfAbsent(key K, value V) (V, bool) { + return c.cache.SetIfAbsent(key, value) +} + +// Compute either sets the computed new value for the key, +// invalidates the value for the key, or does nothing, based on +// the returned [ComputeOp]. When the op returned by remappingFunc +// is [WriteOp], the value is updated to the new value. If +// it is [InvalidateOp], the entry is removed from the cache +// altogether. And finally, if the op is [CancelOp] then the +// entry is left as-is. In other words, if it did not already +// exist, it is not created, and if it did exist, it is not +// updated. This is useful to synchronously execute some +// operation on the value without incurring the cost of +// updating the cache every time. +// +// The ok result indicates whether the entry is present in the cache after the compute operation. +// The actualValue result contains the value of the cache +// if a corresponding entry is present, or the zero value +// otherwise. You can think of these results as equivalent to regular key-value lookups in a map. +// +// This call locks a hash table bucket while the compute function +// is executed. It means that modifications on other entries in +// the bucket will be blocked until the remappingFunc executes. Consider +// this when the function includes long-running operations. +func (c *Cache[K, V]) Compute( + key K, + remappingFunc func(oldValue V, found bool) (newValue V, op ComputeOp), +) (actualValue V, ok bool) { + return c.cache.Compute(key, remappingFunc) +} + +// ComputeIfAbsent returns the existing value for the key if +// present. Otherwise, it tries to compute the value using the +// provided function. If mappingFunc returns true as the cancel value, the computation is cancelled and the zero value +// for type V is returned. +// +// The ok result indicates whether the entry is present in the cache after the compute operation. +// The actualValue result contains the value of the cache +// if a corresponding entry is present, or the zero value +// otherwise. You can think of these results as equivalent to regular key-value lookups in a map. +// +// This call locks a hash table bucket while the compute function +// is executed. It means that modifications on other entries in +// the bucket will be blocked until the valueFn executes. Consider +// this when the function includes long-running operations. +func (c *Cache[K, V]) ComputeIfAbsent( + key K, + mappingFunc func() (newValue V, cancel bool), +) (actualValue V, ok bool) { + return c.cache.ComputeIfAbsent(key, mappingFunc) +} + +// ComputeIfPresent returns the zero value for type V if the key is not found. +// Otherwise, it tries to compute the value using the provided function. +// +// ComputeIfPresent either sets the computed new value for the key, +// invalidates the value for the key, or does nothing, based on +// the returned [ComputeOp]. When the op returned by remappingFunc +// is [WriteOp], the value is updated to the new value. If +// it is [InvalidateOp], the entry is removed from the cache +// altogether. And finally, if the op is [CancelOp] then the +// entry is left as-is. In other words, if it did not already +// exist, it is not created, and if it did exist, it is not +// updated. This is useful to synchronously execute some +// operation on the value without incurring the cost of +// updating the cache every time. +// +// The ok result indicates whether the entry is present in the cache after the compute operation. +// The actualValue result contains the value of the cache +// if a corresponding entry is present, or the zero value +// otherwise. You can think of these results as equivalent to regular key-value lookups in a map. +// +// This call locks a hash table bucket while the compute function +// is executed. It means that modifications on other entries in +// the bucket will be blocked until the valueFn executes. Consider +// this when the function includes long-running operations. +func (c *Cache[K, V]) ComputeIfPresent( + key K, + remappingFunc func(oldValue V) (newValue V, op ComputeOp), +) (actualValue V, ok bool) { + return c.cache.ComputeIfPresent(key, remappingFunc) +} + +// SetExpiresAfter specifies that the entry should be automatically removed from the cache once the duration has +// elapsed. The expiration policy determines when the entry's age is reset. +func (c *Cache[K, V]) SetExpiresAfter(key K, expiresAfter time.Duration) { + c.cache.SetExpiresAfter(key, expiresAfter) +} + +// SetRefreshableAfter specifies that each entry should be eligible for reloading once a fixed duration has elapsed. +// The refresh policy determines when the entry's age is reset. +func (c *Cache[K, V]) SetRefreshableAfter(key K, refreshableAfter time.Duration) { + c.cache.SetRefreshableAfter(key, refreshableAfter) +} + +// Get returns the value associated with key in this cache, obtaining that value from loader if necessary. +// The method improves upon the conventional "if cached, return; otherwise create, cache and return" pattern. +// +// Get can return an [ErrNotFound] error if the [Loader] returns it. +// This means that the entry was not found in the data source. +// This allows the cache to recognize when a record is missing from the data source +// and subsequently delete the cached entry. +// It also enables proper metric collection, as the cache doesn't classify [ErrNotFound] as a load error. +// +// If another call to Get is currently loading the value for key, +// simply waits for that goroutine to finish and returns its loaded value. Note that +// multiple goroutines can concurrently load values for distinct keys. +// +// No observable state associated with this cache is modified until loading completes. +// +// WARNING: When performing a refresh (see [RefreshCalculator]), +// the [Loader] will receive a context wrapped in [context.WithoutCancel]. +// If you need to control refresh cancellation, you can use closures or values stored in the context. +// +// WARNING: [Loader] must not attempt to update any mappings of this cache directly. +// +// WARNING: For any given key, every loader used with it should compute the same value. +// Otherwise, a call that passes one loader may return the result of another call +// with a differently behaving loader. For example, a call that requests a short timeout +// for an RPC may wait for a similar call that requests a long timeout, or a call by an +// unprivileged user may return a resource accessible only to a privileged user making a similar call. +func (c *Cache[K, V]) Get(ctx context.Context, key K, loader Loader[K, V]) (V, error) { + return c.cache.Get(ctx, key, loader) +} + +// BulkGet returns the value associated with key in this cache, obtaining that value from loader if necessary. +// The method improves upon the conventional "if cached, return; otherwise create, cache and return" pattern. +// +// If another call to Get (BulkGet) is currently loading the value for key, +// simply waits for that goroutine to finish and returns its loaded value. Note that +// multiple goroutines can concurrently load values for distinct keys. +// +// No observable state associated with this cache is modified until loading completes. +// +// NOTE: duplicate elements in keys will be ignored. +// +// WARNING: When performing a refresh (see [RefreshCalculator]), +// the [BulkLoader] will receive a context wrapped in [context.WithoutCancel]. +// If you need to control refresh cancellation, you can use closures or values stored in the context. +// +// WARNING: [BulkLoader] must not attempt to update any mappings of this cache directly. +// +// WARNING: For any given key, every bulkLoader used with it should compute the same value. +// Otherwise, a call that passes one bulkLoader may return the result of another call +// with a differently behaving bulkLoader. For example, a call that requests a short timeout +// for an RPC may wait for a similar call that requests a long timeout, or a call by an +// unprivileged user may return a resource accessible only to a privileged user making a similar call. +func (c *Cache[K, V]) BulkGet(ctx context.Context, keys []K, bulkLoader BulkLoader[K, V]) (map[K]V, error) { + return c.cache.BulkGet(ctx, keys, bulkLoader) +} + +// Refresh loads a new value for the key, asynchronously. While the new value is loading the +// previous value (if any) will continue to be returned by any Get unless it is evicted. +// If the new value is loaded successfully, it will replace the previous value in the cache; +// If refreshing returned an error, the previous value will remain, +// and the error will be logged using [Logger] (if it's not [ErrNotFound]) and swallowed. If another goroutine is currently +// loading the value for key, then this method does not perform an additional load. +// +// [Cache] will call Loader.Reload if the cache currently contains a value for the key, +// and Loader.Load otherwise. +// Loading is asynchronous by delegating to the configured Executor. +// +// Refresh returns a channel that will receive the result when it is ready. The returned channel will not be closed. +// +// WARNING: When performing a refresh (see [RefreshCalculator]), +// the [Loader] will receive a context wrapped in [context.WithoutCancel]. +// If you need to control refresh cancellation, you can use closures or values stored in the context. +// +// WARNING: If the cache was constructed without [RefreshCalculator], then Refresh will return the nil channel. +// +// WARNING: Loader.Load and Loader.Reload must not attempt to update any mappings of this cache directly. +// +// WARNING: For any given key, every loader used with it should compute the same value. +// Otherwise, a call that passes one loader may return the result of another call +// with a differently behaving loader. For example, a call that requests a short timeout +// for an RPC may wait for a similar call that requests a long timeout, or a call by an +// unprivileged user may return a resource accessible only to a privileged user making a similar call. +func (c *Cache[K, V]) Refresh(ctx context.Context, key K, loader Loader[K, V]) <-chan RefreshResult[K, V] { + return c.cache.Refresh(ctx, key, loader) +} + +// BulkRefresh loads a new value for each key, asynchronously. While the new value is loading the +// previous value (if any) will continue to be returned by any Get unless it is evicted. +// If the new value is loaded successfully, it will replace the previous value in the cache; +// If refreshing returned an error, the previous value will remain, +// and the error will be logged using [Logger] and swallowed. If another goroutine is currently +// loading the value for key, then this method does not perform an additional load. +// +// [Cache] will call BulkLoader.BulkReload for existing keys, and BulkLoader.BulkLoad otherwise. +// Loading is asynchronous by delegating to the configured Executor. +// +// BulkRefresh returns a channel that will receive the results when they are ready. The returned channel will not be closed. +// +// NOTE: duplicate elements in keys will be ignored. +// +// WARNING: When performing a refresh (see [RefreshCalculator]), +// the [BulkLoader] will receive a context wrapped in [context.WithoutCancel]. +// If you need to control refresh cancellation, you can use closures or values stored in the context. +// +// WARNING: If the cache was constructed without [RefreshCalculator], then BulkRefresh will return the nil channel. +// +// WARNING: BulkLoader.BulkLoad and BulkLoader.BulkReload must not attempt to update any mappings of this cache directly. +// +// WARNING: For any given key, every bulkLoader used with it should compute the same value. +// Otherwise, a call that passes one bulkLoader may return the result of another call +// with a differently behaving loader. For example, a call that requests a short timeout +// for an RPC may wait for a similar call that requests a long timeout, or a call by an +// unprivileged user may return a resource accessible only to a privileged user making a similar call. +func (c *Cache[K, V]) BulkRefresh(ctx context.Context, keys []K, bulkLoader BulkLoader[K, V]) <-chan []RefreshResult[K, V] { + return c.cache.BulkRefresh(ctx, keys, bulkLoader) +} + +// Invalidate discards any cached value for the key. +// +// Returns previous value if any. The invalidated result reports whether the key was +// present. +func (c *Cache[K, V]) Invalidate(key K) (value V, invalidated bool) { + return c.cache.Invalidate(key) +} + +// All returns an iterator over all key-value pairs in the cache. +// The iteration order is not specified and is not guaranteed to be the same from one call to the next. +// +// Iterator is at least weakly consistent: he is safe for concurrent use, +// but if the cache is modified (including by eviction) after the iterator is +// created, it is undefined which of the changes (if any) will be reflected in that iterator. +func (c *Cache[K, V]) All() iter.Seq2[K, V] { + return c.cache.All() +} + +// Keys returns an iterator over all keys in the cache. +// The iteration order is not specified and is not guaranteed to be the same from one call to the next. +// +// Iterator is at least weakly consistent: he is safe for concurrent use, +// but if the cache is modified (including by eviction) after the iterator is +// created, it is undefined which of the changes (if any) will be reflected in that iterator. +func (c *Cache[K, V]) Keys() iter.Seq[K] { + return c.cache.Keys() +} + +// Values returns an iterator over all values in the cache. +// The iteration order is not specified and is not guaranteed to be the same from one call to the next. +// +// Iterator is at least weakly consistent: he is safe for concurrent use, +// but if the cache is modified (including by eviction) after the iterator is +// created, it is undefined which of the changes (if any) will be reflected in that iterator. +func (c *Cache[K, V]) Values() iter.Seq[V] { + return c.cache.Values() +} + +// InvalidateAll discards all entries in the cache. The behavior of this operation is undefined for an entry +// that is being loaded (or reloaded) and is otherwise not present. +func (c *Cache[K, V]) InvalidateAll() { + c.cache.InvalidateAll() +} + +// CleanUp performs any pending maintenance operations needed by the cache. Exactly which activities are +// performed -- if any -- is implementation-dependent. +func (c *Cache[K, V]) CleanUp() { + c.cache.CleanUp() +} + +// SetMaximum specifies the maximum total size of this cache. This value may be interpreted as the weighted +// or unweighted threshold size based on how this cache was constructed. If the cache currently +// exceeds the new maximum size this operation eagerly evict entries until the cache shrinks to +// the appropriate size. +func (c *Cache[K, V]) SetMaximum(maximum uint64) { + c.cache.SetMaximum(maximum) +} + +// GetMaximum returns the maximum total weighted or unweighted size of this cache, depending on how the +// cache was constructed. If this cache does not use a (weighted) size bound, then the method will return math.MaxUint64. +func (c *Cache[K, V]) GetMaximum() uint64 { + return c.cache.GetMaximum() +} + +// EstimatedSize returns the approximate number of entries in this cache. The value returned is an estimate; the +// actual count may differ if there are concurrent insertions or deletions, or if some entries are +// pending deletion due to expiration. In the case of stale entries +// this inaccuracy can be mitigated by performing a CleanUp first. +func (c *Cache[K, V]) EstimatedSize() int { + return c.cache.EstimatedSize() +} + +// IsWeighted returns whether the cache is bounded by a maximum size or maximum weight. +func (c *Cache[K, V]) IsWeighted() bool { + return c.cache.IsWeighted() +} + +// WeightedSize returns the approximate accumulated weight of entries in this cache. If this cache does not +// use a weighted size bound, then the method will return 0. +func (c *Cache[K, V]) WeightedSize() uint64 { + return c.cache.WeightedSize() +} + +// IsRecordingStats returns whether the cache statistics are being accumulated. +func (c *Cache[K, V]) IsRecordingStats() bool { + return c.cache.IsRecordingStats() +} + +// Stats returns a current snapshot of this cache's cumulative statistics. +// All statistics are initialized to zero and are monotonically increasing over the lifetime of the cache. +// Due to the performance penalty of maintaining statistics, +// some implementations may not record the usage history immediately or at all. +// +// NOTE: If your [stats.Recorder] implementation doesn't also implement [stats.Snapshoter], +// this method will always return a zero-value snapshot. +func (c *Cache[K, V]) Stats() stats.Stats { + return c.cache.Stats() +} + +// Hottest returns an iterator for ordered traversal of the cache entries. The order of +// iteration is from the entries most likely to be retained (hottest) to the entries least +// likely to be retained (coldest). This order is determined by the eviction policy's best guess +// at the start of the iteration. +// +// WARNING: Beware that this iteration is performed within the eviction policy's exclusive lock, so the +// iteration should be short and simple. While the iteration is in progress further eviction +// maintenance will be halted. +func (c *Cache[K, V]) Hottest() iter.Seq[Entry[K, V]] { + return c.cache.Hottest() +} + +// Coldest returns an iterator for ordered traversal of the cache entries. The order of +// iteration is from the entries least likely to be retained (coldest) to the entries most +// likely to be retained (hottest). This order is determined by the eviction policy's best guess +// at the start of the iteration. +// +// WARNING: Beware that this iteration is performed within the eviction policy's exclusive lock, so the +// iteration should be short and simple. While the iteration is in progress further eviction +// maintenance will be halted. +func (c *Cache[K, V]) Coldest() iter.Seq[Entry[K, V]] { + return c.cache.Coldest() +} + +func (c *Cache[K, V]) has(key K) bool { + return c.cache.has(key) +} diff --git a/vendor/github.com/maypok86/otter/v2/cache_impl.go b/vendor/github.com/maypok86/otter/v2/cache_impl.go new file mode 100644 index 00000000..d0fa6960 --- /dev/null +++ b/vendor/github.com/maypok86/otter/v2/cache_impl.go @@ -0,0 +1,1868 @@ +// Copyright (c) 2023 Alexey Mayshev and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otter + +import ( + "cmp" + "context" + "errors" + "fmt" + "iter" + "math" + "runtime" + "sync" + "sync/atomic" + "time" + + "github.com/maypok86/otter/v2/internal/deque/queue" + "github.com/maypok86/otter/v2/internal/expiration" + "github.com/maypok86/otter/v2/internal/generated/node" + "github.com/maypok86/otter/v2/internal/hashmap" + "github.com/maypok86/otter/v2/internal/lossy" + "github.com/maypok86/otter/v2/internal/xiter" + "github.com/maypok86/otter/v2/internal/xmath" + "github.com/maypok86/otter/v2/internal/xruntime" + "github.com/maypok86/otter/v2/stats" +) + +const ( + unreachableExpiresAt = int64(xruntime.MaxDuration) + unreachableRefreshableAt = int64(xruntime.MaxDuration) + noTime = int64(0) + + minWriteBufferSize = 4 + writeBufferRetries = 100 +) + +const ( + // A drain is not taking place. + idle uint32 = 0 + // A drain is required due to a pending write modification. + required uint32 = 1 + // A drain is in progress and will transition to idle. + processingToIdle uint32 = 2 + // A drain is in progress and will transition to required. + processingToRequired uint32 = 3 +) + +var ( + maxWriteBufferSize uint32 + maxStripedBufferSize int +) + +func init() { + parallelism := xruntime.Parallelism() + roundedParallelism := int(xmath.RoundUpPowerOf2(parallelism)) + //nolint:gosec // there will never be an overflow + maxWriteBufferSize = uint32(128 * roundedParallelism) + maxStripedBufferSize = 4 * roundedParallelism +} + +func zeroValue[V any]() V { + var v V + return v +} + +// cache is a structure performs a best-effort bounding of a hash table using eviction algorithm +// to determine which entries to evict when the capacity is exceeded. +type cache[K comparable, V any] struct { + drainStatus atomic.Uint32 + _ [xruntime.CacheLineSize - 4]byte + nodeManager *node.Manager[K, V] + hashmap *hashmap.Map[K, V, node.Node[K, V]] + evictionPolicy *policy[K, V] + expirationPolicy *expiration.Variable[K, V] + stats stats.Recorder + statsSnapshoter stats.Snapshoter + logger Logger + clock timeSource + statsClock *realSource + readBuffer *lossy.Striped[K, V] + writeBuffer *queue.MPSC[task[K, V]] + executor func(fn func()) + singleflight *group[K, V] + evictionMutex sync.Mutex + doneClose chan struct{} + weigher func(key K, value V) uint32 + onDeletion func(e DeletionEvent[K, V]) + onAtomicDeletion func(e DeletionEvent[K, V]) + expiryCalculator ExpiryCalculator[K, V] + refreshCalculator RefreshCalculator[K, V] + taskPool sync.Pool + hasDefaultExecutor bool + withTime bool + withExpiration bool + withRefresh bool + withEviction bool + isWeighted bool + withMaintenance bool + withStats bool +} + +// newCache returns a new cache instance based on the settings from Options. +func newCache[K comparable, V any](o *Options[K, V]) *cache[K, V] { + withWeight := o.MaximumWeight > 0 + nodeManager := node.NewManager[K, V](node.Config{ + WithSize: o.MaximumSize > 0, + WithExpiration: o.ExpiryCalculator != nil, + WithRefresh: o.RefreshCalculator != nil, + WithWeight: withWeight, + }) + + maximum := o.getMaximum() + withEviction := maximum > 0 + + withStats := o.StatsRecorder != nil + if withStats { + _, ok := o.StatsRecorder.(*stats.NoopRecorder) + withStats = !ok + } + statsRecorder := o.StatsRecorder + if !withStats { + statsRecorder = &stats.NoopRecorder{} + } + var statsSnapshoter stats.Snapshoter + if snapshoter, ok := statsRecorder.(stats.Snapshoter); ok { + statsSnapshoter = snapshoter + } else { + statsSnapshoter = &stats.NoopRecorder{} + } + + c := &cache[K, V]{ + nodeManager: nodeManager, + hashmap: hashmap.NewWithSize[K, V, node.Node[K, V]](nodeManager, o.getInitialCapacity()), + stats: statsRecorder, + statsSnapshoter: statsSnapshoter, + logger: o.getLogger(), + singleflight: &group[K, V]{}, + executor: o.getExecutor(), + hasDefaultExecutor: o.Executor == nil, + weigher: o.getWeigher(), + onDeletion: o.OnDeletion, + onAtomicDeletion: o.OnAtomicDeletion, + clock: newTimeSource(o.Clock), + statsClock: &realSource{}, + expiryCalculator: o.ExpiryCalculator, + refreshCalculator: o.RefreshCalculator, + isWeighted: withWeight, + withStats: withStats, + } + + if withStats { + c.statsClock.Init() + } + + c.withEviction = withEviction + if c.withEviction { + c.evictionPolicy = newPolicy[K, V](withWeight) + if o.hasInitialCapacity() { + //nolint:gosec // there's no overflow + c.evictionPolicy.sketch.ensureCapacity(min(maximum, uint64(o.getInitialCapacity()))) + } + } + + if o.ExpiryCalculator != nil { + c.expirationPolicy = expiration.NewVariable(nodeManager) + } + + c.withExpiration = o.ExpiryCalculator != nil + c.withRefresh = o.RefreshCalculator != nil + c.withTime = c.withExpiration || c.withRefresh + c.withMaintenance = c.withEviction || c.withExpiration + + if c.withMaintenance { + c.readBuffer = lossy.NewStriped(maxStripedBufferSize, nodeManager) + c.writeBuffer = queue.NewMPSC[task[K, V]](minWriteBufferSize, maxWriteBufferSize) + } + if c.withTime { + c.clock.Init() + } + if c.withExpiration { + c.doneClose = make(chan struct{}) + go c.periodicCleanUp() + } + + if c.withEviction { + c.SetMaximum(maximum) + } + + return c +} + +func (c *cache[K, V]) newNode(key K, value V, old node.Node[K, V]) node.Node[K, V] { + weight := c.weigher(key, value) + expiresAt := unreachableExpiresAt + if c.withExpiration && old != nil { + expiresAt = old.ExpiresAt() + } + refreshableAt := unreachableRefreshableAt + if c.withRefresh && old != nil { + refreshableAt = old.RefreshableAt() + } + return c.nodeManager.Create(key, value, expiresAt, refreshableAt, weight) +} + +func (c *cache[K, V]) nodeToEntry(n node.Node[K, V], nanos int64) Entry[K, V] { + nowNano := noTime + if c.withTime { + nowNano = nanos + } + + expiresAt := unreachableExpiresAt + if c.withExpiration { + expiresAt = n.ExpiresAt() + } + + refreshableAt := unreachableRefreshableAt + if c.withRefresh { + refreshableAt = n.RefreshableAt() + } + + return Entry[K, V]{ + Key: n.Key(), + Value: n.Value(), + Weight: n.Weight(), + ExpiresAtNano: expiresAt, + RefreshableAtNano: refreshableAt, + SnapshotAtNano: nowNano, + } +} + +// has checks if there is an item with the given key in the cache. +func (c *cache[K, V]) has(key K) bool { + _, ok := c.GetIfPresent(key) + return ok +} + +// GetIfPresent returns the value associated with the key in this cache. +func (c *cache[K, V]) GetIfPresent(key K) (V, bool) { + nowNano := c.clock.NowNano() + n := c.getNode(key, nowNano) + if n == nil { + return zeroValue[V](), false + } + + return n.Value(), true +} + +// getNode returns the node associated with the key in this cache. +func (c *cache[K, V]) getNode(key K, nowNano int64) node.Node[K, V] { + n := c.hashmap.Get(key) + if n == nil { + c.stats.RecordMisses(1) + if c.drainStatus.Load() == required { + c.scheduleDrainBuffers() + } + return nil + } + if n.HasExpired(nowNano) { + c.stats.RecordMisses(1) + c.scheduleDrainBuffers() + return nil + } + + c.afterRead(n, nowNano, true, true) + + return n +} + +// getNodeQuietly returns the node associated with the key in this cache. +// +// Unlike getNode, this function does not produce any side effects +// such as updating statistics or the eviction policy. +func (c *cache[K, V]) getNodeQuietly(key K, nowNano int64) node.Node[K, V] { + n := c.hashmap.Get(key) + if n == nil || !n.IsAlive() || n.HasExpired(nowNano) { + return nil + } + + return n +} + +func (c *cache[K, V]) afterRead(got node.Node[K, V], nowNano int64, recordHit, calcExpiresAt bool) { + if recordHit { + c.stats.RecordHits(1) + } + + if calcExpiresAt { + c.calcExpiresAtAfterRead(got, nowNano) + } + + delayable := c.skipReadBuffer() || c.readBuffer.Add(got) != lossy.Full + if c.shouldDrainBuffers(delayable) { + c.scheduleDrainBuffers() + } +} + +// Set associates the value with the key in this cache. +// +// If the specified key is not already associated with a value, then it returns new value and true. +// +// If the specified key is already associated with a value, then it returns existing value and false. +func (c *cache[K, V]) Set(key K, value V) (V, bool) { + return c.set(key, value, false) +} + +// SetIfAbsent if the specified key is not already associated with a value associates it with the given value. +// +// If the specified key is not already associated with a value, then it returns new value and true. +// +// If the specified key is already associated with a value, then it returns existing value and false. +func (c *cache[K, V]) SetIfAbsent(key K, value V) (V, bool) { + return c.set(key, value, true) +} + +func (c *cache[K, V]) calcExpiresAtAfterRead(n node.Node[K, V], nowNano int64) { + if !c.withExpiration { + return + } + + expiresAfter := c.expiryCalculator.ExpireAfterRead(c.nodeToEntry(n, nowNano)) + c.setExpiresAfterRead(n, nowNano, expiresAfter) +} + +func (c *cache[K, V]) setExpiresAfterRead(n node.Node[K, V], nowNano int64, expiresAfter time.Duration) { + if expiresAfter <= 0 { + return + } + + expiresAt := n.ExpiresAt() + currentDuration := time.Duration(expiresAt - nowNano) + diff := xmath.Abs(int64(expiresAfter - currentDuration)) + if diff > 0 { + n.CASExpiresAt(expiresAt, nowNano+int64(expiresAfter)) + } +} + +// GetEntry returns the cache entry associated with the key in this cache. +func (c *cache[K, V]) GetEntry(key K) (Entry[K, V], bool) { + nowNano := c.clock.NowNano() + n := c.getNode(key, nowNano) + if n == nil { + return Entry[K, V]{}, false + } + return c.nodeToEntry(n, nowNano), true +} + +// GetEntryQuietly returns the cache entry associated with the key in this cache. +// +// Unlike GetEntry, this function does not produce any side effects +// such as updating statistics or the eviction policy. +func (c *cache[K, V]) GetEntryQuietly(key K) (Entry[K, V], bool) { + nowNano := c.clock.NowNano() + n := c.getNodeQuietly(key, nowNano) + if n == nil { + return Entry[K, V]{}, false + } + return c.nodeToEntry(n, nowNano), true +} + +// SetExpiresAfter specifies that the entry should be automatically removed from the cache once the duration has +// elapsed. The expiration policy determines when the entry's age is reset. +func (c *cache[K, V]) SetExpiresAfter(key K, expiresAfter time.Duration) { + if !c.withExpiration || expiresAfter <= 0 { + return + } + + nowNano := c.clock.NowNano() + n := c.hashmap.Get(key) + if n == nil { + return + } + + c.setExpiresAfterRead(n, nowNano, expiresAfter) + c.afterRead(n, nowNano, false, false) +} + +// SetRefreshableAfter specifies that each entry should be eligible for reloading once a fixed duration has elapsed. +// The refresh policy determines when the entry's age is reset. +func (c *cache[K, V]) SetRefreshableAfter(key K, refreshableAfter time.Duration) { + if !c.withRefresh || refreshableAfter <= 0 { + return + } + + nowNano := c.clock.NowNano() + n := c.hashmap.Get(key) + if n == nil { + return + } + + entry := c.nodeToEntry(n, nowNano) + currentDuration := entry.RefreshableAfter() + if refreshableAfter > 0 && currentDuration != refreshableAfter { + n.SetRefreshableAt(nowNano + int64(refreshableAfter)) + } +} + +func (c *cache[K, V]) calcExpiresAtAfterWrite(n, old node.Node[K, V], nowNano int64) { + if !c.withExpiration { + return + } + + entry := c.nodeToEntry(n, nowNano) + currentDuration := entry.ExpiresAfter() + var expiresAfter time.Duration + if old == nil { + expiresAfter = c.expiryCalculator.ExpireAfterCreate(entry) + } else { + expiresAfter = c.expiryCalculator.ExpireAfterUpdate(entry, old.Value()) + } + + if expiresAfter > 0 && currentDuration != expiresAfter { + n.SetExpiresAt(nowNano + int64(expiresAfter)) + } +} + +func (c *cache[K, V]) set(key K, value V, onlyIfAbsent bool) (V, bool) { + var old node.Node[K, V] + nowNano := c.clock.NowNano() + n := c.hashmap.Compute(key, func(current node.Node[K, V]) node.Node[K, V] { + old = current + if onlyIfAbsent && current != nil && !current.HasExpired(nowNano) { + // no op + c.calcExpiresAtAfterRead(old, nowNano) + return current + } + // set + return c.atomicSet(key, value, old, nil, nowNano) + }) + if onlyIfAbsent { + if old == nil || old.HasExpired(nowNano) { + c.afterWrite(n, old, nowNano) + return value, true + } + c.afterRead(old, nowNano, false, false) + return old.Value(), false + } + + c.afterWrite(n, old, nowNano) + if old != nil { + return old.Value(), false + } + return value, true +} + +func (c *cache[K, V]) atomicSet(key K, value V, old node.Node[K, V], cl *call[K, V], nowNano int64) node.Node[K, V] { + if cl == nil { + c.singleflight.delete(key) + } + n := c.newNode(key, value, old) + c.calcExpiresAtAfterWrite(n, old, nowNano) + c.calcRefreshableAt(n, old, cl, nowNano) + c.makeRetired(old) + if old != nil { + cause := getCause(old, nowNano, CauseReplacement) + c.notifyAtomicDeletion(old.Key(), old.Value(), cause) + } + return n +} + +//nolint:unparam // it's ok +func (c *cache[K, V]) atomicDelete(key K, old node.Node[K, V], cl *call[K, V], nowNano int64) node.Node[K, V] { + if cl == nil { + c.singleflight.delete(key) + } + if old != nil { + cause := getCause(old, nowNano, CauseInvalidation) + c.makeRetired(old) + c.notifyAtomicDeletion(old.Key(), old.Value(), cause) + } + return nil +} + +// Compute either sets the computed new value for the key, +// invalidates the value for the key, or does nothing, based on +// the returned [ComputeOp]. When the op returned by remappingFunc +// is [WriteOp], the value is updated to the new value. If +// it is [InvalidateOp], the entry is removed from the cache +// altogether. And finally, if the op is [CancelOp] then the +// entry is left as-is. In other words, if it did not already +// exist, it is not created, and if it did exist, it is not +// updated. This is useful to synchronously execute some +// operation on the value without incurring the cost of +// updating the cache every time. +// +// The ok result indicates whether the entry is present in the cache after the compute operation. +// The actualValue result contains the value of the cache +// if a corresponding entry is present, or the zero value otherwise. +// You can think of these results as equivalent to regular key-value lookups in a map. +// +// This call locks a hash table bucket while the compute function +// is executed. It means that modifications on other entries in +// the bucket will be blocked until the remappingFunc executes. Consider +// this when the function includes long-running operations. +func (c *cache[K, V]) Compute( + key K, + remappingFunc func(oldValue V, found bool) (newValue V, op ComputeOp), +) (V, bool) { + return c.doCompute(key, remappingFunc, c.clock.NowNano(), true) +} + +// ComputeIfAbsent returns the existing value for the key if +// present. Otherwise, it tries to compute the value using the +// provided function. If mappingFunc returns true as the cancel value, the computation is cancelled and the zero value +// for type V is returned. +// +// The ok result indicates whether the entry is present in the cache after the compute operation. +// The actualValue result contains the value of the cache +// if a corresponding entry is present, or the zero value +// otherwise. You can think of these results as equivalent to regular key-value lookups in a map. +// +// This call locks a hash table bucket while the compute function +// is executed. It means that modifications on other entries in +// the bucket will be blocked until the valueFn executes. Consider +// this when the function includes long-running operations. +func (c *cache[K, V]) ComputeIfAbsent( + key K, + mappingFunc func() (newValue V, cancel bool), +) (V, bool) { + nowNano := c.clock.NowNano() + if n := c.getNode(key, nowNano); n != nil { + return n.Value(), true + } + + return c.doCompute(key, func(oldValue V, found bool) (newValue V, op ComputeOp) { + if found { + return oldValue, CancelOp + } + newValue, cancel := mappingFunc() + if cancel { + return zeroValue[V](), CancelOp + } + return newValue, WriteOp + }, nowNano, false) +} + +// ComputeIfPresent returns the zero value for type V if the key is not found. +// Otherwise, it tries to compute the value using the provided function. +// +// ComputeIfPresent either sets the computed new value for the key, +// invalidates the value for the key, or does nothing, based on +// the returned [ComputeOp]. When the op returned by remappingFunc +// is [WriteOp], the value is updated to the new value. If +// it is [InvalidateOp], the entry is removed from the cache +// altogether. And finally, if the op is [CancelOp] then the +// entry is left as-is. In other words, if it did not already +// exist, it is not created, and if it did exist, it is not +// updated. This is useful to synchronously execute some +// operation on the value without incurring the cost of +// updating the cache every time. +// +// The ok result indicates whether the entry is present in the cache after the compute operation. +// The actualValue result contains the value of the cache +// if a corresponding entry is present, or the zero value +// otherwise. You can think of these results as equivalent to regular key-value lookups in a map. +// +// This call locks a hash table bucket while the compute function +// is executed. It means that modifications on other entries in +// the bucket will be blocked until the valueFn executes. Consider +// this when the function includes long-running operations. +func (c *cache[K, V]) ComputeIfPresent( + key K, + remappingFunc func(oldValue V) (newValue V, op ComputeOp), +) (V, bool) { + nowNano := c.clock.NowNano() + if n := c.getNode(key, nowNano); n == nil { + return zeroValue[V](), false + } + + return c.doCompute(key, func(oldValue V, found bool) (newValue V, op ComputeOp) { + if found { + return remappingFunc(oldValue) + } + return zeroValue[V](), CancelOp + }, nowNano, false) +} + +func (c *cache[K, V]) doCompute( + key K, + remappingFunc func(oldValue V, found bool) (newValue V, op ComputeOp), + nowNano int64, + recordStats bool, +) (V, bool) { + var ( + old node.Node[K, V] + op ComputeOp + notValidOp bool + panicErr error + ) + computedNode := c.hashmap.Compute(key, func(oldNode node.Node[K, V]) node.Node[K, V] { + var ( + oldValue V + actualValue V + found bool + ) + if oldNode != nil && !oldNode.HasExpired(nowNano) { + oldValue = oldNode.Value() + found = true + } + old = oldNode + + func() { + defer func() { + if r := recover(); r != nil { + panicErr = newPanicError(r) + } + }() + + actualValue, op = remappingFunc(oldValue, found) + }() + if panicErr != nil { + return oldNode + } + if op == CancelOp { + if oldNode != nil && oldNode.HasExpired(nowNano) { + return c.atomicDelete(key, oldNode, nil, nowNano) + } + return oldNode + } + if op == WriteOp { + return c.atomicSet(key, actualValue, old, nil, nowNano) + } + if op == InvalidateOp { + return c.atomicDelete(key, old, nil, nowNano) + } + notValidOp = true + return oldNode + }) + if panicErr != nil { + panic(panicErr) + } + if notValidOp { + panic(fmt.Sprintf("otter: invalid ComputeOp: %d", op)) + } + if recordStats { + if old != nil && !old.HasExpired(nowNano) { + c.stats.RecordHits(1) + } else { + c.stats.RecordMisses(1) + } + } + switch op { + case CancelOp: + if computedNode == nil { + c.afterDelete(old, nowNano, false) + return zeroValue[V](), false + } + return computedNode.Value(), true + case WriteOp: + c.afterWrite(computedNode, old, nowNano) + case InvalidateOp: + c.afterDelete(old, nowNano, false) + } + if computedNode == nil { + return zeroValue[V](), false + } + return computedNode.Value(), true +} + +func (c *cache[K, V]) afterWrite(n, old node.Node[K, V], nowNano int64) { + if !c.withMaintenance { + if old != nil { + c.notifyDeletion(old.Key(), old.Value(), CauseReplacement) + } + return + } + + if old == nil { + // insert + c.afterWriteTask(c.getTask(n, nil, addReason, causeUnknown)) + return + } + + // update + cause := getCause(old, nowNano, CauseReplacement) + c.afterWriteTask(c.getTask(n, old, updateReason, cause)) +} + +type refreshableKey[K comparable, V any] struct { + key K + old node.Node[K, V] +} + +func (c *cache[K, V]) refreshKey( + ctx context.Context, + rk refreshableKey[K, V], + loader Loader[K, V], + isManual bool, +) <-chan RefreshResult[K, V] { + if !c.withRefresh { + return nil + } + + var ch chan RefreshResult[K, V] + if isManual { + ch = make(chan RefreshResult[K, V], 1) + } + + c.executor(func() { + var refresher func(ctx context.Context, key K) (V, error) + if rk.old != nil { + refresher = func(ctx context.Context, key K) (V, error) { + return loader.Reload(ctx, key, rk.old.Value()) + } + } else { + refresher = loader.Load + } + + cl, shouldLoad := c.singleflight.startCall(rk.key, true) + if shouldLoad { + //nolint:errcheck // there is no need to check error + _ = c.wrapLoad(func() error { + loadCtx := context.WithoutCancel(ctx) + return c.singleflight.doCall(loadCtx, cl, refresher, c.afterDeleteCall) + }) + } + cl.wait() + + if cl.err != nil && !cl.isNotFound { + c.logger.Error(ctx, "Returned an error during the refreshing", cl.err) + } + + if isManual { + ch <- RefreshResult[K, V]{ + Key: cl.key, + Value: cl.value, + Err: cl.err, + } + } + }) + + return ch +} + +// Get returns the value associated with key in this cache, obtaining that value from loader if necessary. +// The method improves upon the conventional "if cached, return; otherwise create, cache and return" pattern. +// +// Get can return an ErrNotFound error if the Loader returns it. +// This means that the entry was not found in the data source. +// +// If another call to Get is currently loading the value for key, +// simply waits for that goroutine to finish and returns its loaded value. Note that +// multiple goroutines can concurrently load values for distinct keys. +// +// No observable state associated with this cache is modified until loading completes. +// +// WARNING: Loader.Load must not attempt to update any mappings of this cache directly. +// +// WARNING: For any given key, every loader used with it should compute the same value. +// Otherwise, a call that passes one loader may return the result of another call +// with a differently behaving loader. For example, a call that requests a short timeout +// for an RPC may wait for a similar call that requests a long timeout, or a call by an +// unprivileged user may return a resource accessible only to a privileged user making a similar call. +func (c *cache[K, V]) Get(ctx context.Context, key K, loader Loader[K, V]) (V, error) { + c.singleflight.init() + + nowNano := c.clock.NowNano() + n := c.getNode(key, nowNano) + if n != nil { + if !n.IsFresh(nowNano) { + c.refreshKey(ctx, refreshableKey[K, V]{ + key: n.Key(), + old: n, + }, loader, false) + } + return n.Value(), nil + } + + cl, shouldLoad := c.singleflight.startCall(key, false) + if shouldLoad { + //nolint:errcheck // there is no need to check error + _ = c.wrapLoad(func() error { + return c.singleflight.doCall(ctx, cl, loader.Load, c.afterDeleteCall) + }) + } + cl.wait() + + return cl.value, cl.err +} + +func (c *cache[K, V]) calcRefreshableAt(n, old node.Node[K, V], cl *call[K, V], nowNano int64) { + if !c.withRefresh { + return + } + + var refreshableAfter time.Duration + entry := c.nodeToEntry(n, nowNano) + currentDuration := entry.RefreshableAfter() + //nolint:gocritic // it's ok + if cl != nil && cl.isRefresh && old != nil { + if cl.isNotFound { + return + } + if cl.err != nil { + refreshableAfter = c.refreshCalculator.RefreshAfterReloadFailure(entry, cl.err) + } else { + refreshableAfter = c.refreshCalculator.RefreshAfterReload(entry, old.Value()) + } + } else if old != nil { + refreshableAfter = c.refreshCalculator.RefreshAfterUpdate(entry, old.Value()) + } else { + refreshableAfter = c.refreshCalculator.RefreshAfterCreate(entry) + } + + if refreshableAfter > 0 && currentDuration != refreshableAfter { + n.SetRefreshableAt(nowNano + int64(refreshableAfter)) + } +} + +func (c *cache[K, V]) afterDeleteCall(cl *call[K, V]) { + var ( + inserted bool + deleted bool + old node.Node[K, V] + ) + nowNano := c.clock.NowNano() + newNode := c.hashmap.Compute(cl.key, func(oldNode node.Node[K, V]) node.Node[K, V] { + isCorrectCall := cl.isFake || c.singleflight.deleteCall(cl) + old = oldNode + if isCorrectCall && cl.isNotFound { + deleted = oldNode != nil + return c.atomicDelete(cl.key, oldNode, cl, nowNano) + } + if cl.err != nil { + if cl.isRefresh && oldNode != nil { + c.calcRefreshableAt(oldNode, oldNode, cl, nowNano) + } + return oldNode + } + if !isCorrectCall { + return oldNode + } + inserted = true + return c.atomicSet(cl.key, cl.value, old, cl, nowNano) + }) + cl.cancel() + if deleted { + c.afterDelete(old, nowNano, false) + } + if inserted { + c.afterWrite(newNode, old, nowNano) + } +} + +func (c *cache[K, V]) bulkRefreshKeys( + ctx context.Context, + rks []refreshableKey[K, V], + bulkLoader BulkLoader[K, V], + isManual bool, +) <-chan []RefreshResult[K, V] { + if !c.withRefresh { + return nil + } + var ch chan []RefreshResult[K, V] + if isManual { + ch = make(chan []RefreshResult[K, V], 1) + } + if len(rks) == 0 { + if isManual { + ch <- []RefreshResult[K, V]{} + } + return ch + } + + c.executor(func() { + var ( + toLoadCalls map[K]*call[K, V] + toReloadCalls map[K]*call[K, V] + foundCalls []*call[K, V] + results []RefreshResult[K, V] + ) + if isManual { + results = make([]RefreshResult[K, V], 0, len(rks)) + } + i := 0 + for _, rk := range rks { + cl, shouldLoad := c.singleflight.startCall(rk.key, true) + if shouldLoad { + if rk.old != nil { + if toReloadCalls == nil { + toReloadCalls = make(map[K]*call[K, V], len(rks)-i) + } + cl.value = rk.old.Value() + toReloadCalls[rk.key] = cl + } else { + if toLoadCalls == nil { + toLoadCalls = make(map[K]*call[K, V], len(rks)-i) + } + toLoadCalls[rk.key] = cl + } + } else { + if foundCalls == nil { + foundCalls = make([]*call[K, V], 0, len(rks)-i) + } + foundCalls = append(foundCalls, cl) + } + i++ + } + + loadCtx := context.WithoutCancel(ctx) + if len(toLoadCalls) > 0 { + loadErr := c.wrapLoad(func() error { + return c.singleflight.doBulkCall(loadCtx, toLoadCalls, bulkLoader.BulkLoad, c.afterDeleteCall) + }) + if loadErr != nil { + c.logger.Error(ctx, "BulkLoad returned an error", loadErr) + } + + if isManual { + for _, cl := range toLoadCalls { + results = append(results, RefreshResult[K, V]{ + Key: cl.key, + Value: cl.value, + Err: cl.err, + }) + } + } + } + if len(toReloadCalls) > 0 { + reload := func(ctx context.Context, keys []K) (map[K]V, error) { + oldValues := make([]V, 0, len(keys)) + for _, k := range keys { + cl := toReloadCalls[k] + oldValues = append(oldValues, cl.value) + cl.value = zeroValue[V]() + } + return bulkLoader.BulkReload(ctx, keys, oldValues) + } + + reloadErr := c.wrapLoad(func() error { + return c.singleflight.doBulkCall(loadCtx, toReloadCalls, reload, c.afterDeleteCall) + }) + if reloadErr != nil { + c.logger.Error(ctx, "BulkReload returned an error", reloadErr) + } + + if isManual { + for _, cl := range toReloadCalls { + results = append(results, RefreshResult[K, V]{ + Key: cl.key, + Value: cl.value, + Err: cl.err, + }) + } + } + } + for _, cl := range foundCalls { + cl.wait() + if isManual { + results = append(results, RefreshResult[K, V]{ + Key: cl.key, + Value: cl.value, + Err: cl.err, + }) + } + } + if isManual { + ch <- results + } + }) + + return ch +} + +// BulkGet returns the value associated with key in this cache, obtaining that value from loader if necessary. +// The method improves upon the conventional "if cached, return; otherwise create, cache and return" pattern. +// +// If another call to Get (BulkGet) is currently loading the value for key, +// simply waits for that goroutine to finish and returns its loaded value. Note that +// multiple goroutines can concurrently load values for distinct keys. +// +// No observable state associated with this cache is modified until loading completes. +// +// WARNING: BulkLoader.BulkLoad must not attempt to update any mappings of this cache directly. +// +// WARNING: For any given key, every bulkLoader used with it should compute the same value. +// Otherwise, a call that passes one bulkLoader may return the result of another call +// with a differently behaving bulkLoader. For example, a call that requests a short timeout +// for an RPC may wait for a similar call that requests a long timeout, or a call by an +// unprivileged user may return a resource accessible only to a privileged user making a similar call. +func (c *cache[K, V]) BulkGet(ctx context.Context, keys []K, bulkLoader BulkLoader[K, V]) (map[K]V, error) { + c.singleflight.init() + + nowNano := c.clock.NowNano() + result := make(map[K]V, len(keys)) + var ( + misses map[K]*call[K, V] + toRefresh []refreshableKey[K, V] + ) + for _, key := range keys { + if _, found := result[key]; found { + continue + } + if _, found := misses[key]; found { + continue + } + + n := c.getNode(key, nowNano) + if n != nil { + if !n.IsFresh(nowNano) { + if toRefresh == nil { + toRefresh = make([]refreshableKey[K, V], 0, len(keys)-len(result)) + } + + toRefresh = append(toRefresh, refreshableKey[K, V]{ + key: key, + old: n, + }) + } + + result[key] = n.Value() + continue + } + + if misses == nil { + misses = make(map[K]*call[K, V], len(keys)-len(result)) + } + misses[key] = nil + } + + c.bulkRefreshKeys(ctx, toRefresh, bulkLoader, false) + if len(misses) == 0 { + return result, nil + } + + var toLoadCalls map[K]*call[K, V] + i := 0 + for key := range misses { + cl, shouldLoad := c.singleflight.startCall(key, false) + if shouldLoad { + if toLoadCalls == nil { + toLoadCalls = make(map[K]*call[K, V], len(misses)-i) + } + toLoadCalls[key] = cl + } + misses[key] = cl + i++ + } + + var loadErr error + if len(toLoadCalls) > 0 { + loadErr = c.wrapLoad(func() error { + return c.singleflight.doBulkCall(ctx, toLoadCalls, bulkLoader.BulkLoad, c.afterDeleteCall) + }) + } + if loadErr != nil { + return result, loadErr + } + + //nolint:prealloc // it's ok + var errsFromCalls []error + i = 0 + for key, cl := range misses { + cl.wait() + i++ + + if cl.err == nil { + result[key] = cl.Value() + continue + } + if _, ok := toLoadCalls[key]; ok || cl.isNotFound { + continue + } + if errsFromCalls == nil { + errsFromCalls = make([]error, 0, len(misses)-i+1) + } + errsFromCalls = append(errsFromCalls, cl.err) + } + + var err error + if len(errsFromCalls) > 0 { + err = errors.Join(errsFromCalls...) + } + + return result, err +} + +func (c *cache[K, V]) wrapLoad(fn func() error) error { + startTime := c.statsClock.NowNano() + + err := fn() + + loadTime := time.Duration(c.statsClock.NowNano() - startTime) + if err == nil || errors.Is(err, ErrNotFound) { + c.stats.RecordLoadSuccess(loadTime) + } else { + c.stats.RecordLoadFailure(loadTime) + } + + var pe *panicError + if errors.As(err, &pe) { + panic(pe) + } + + return err +} + +// Refresh loads a new value for the key, asynchronously. While the new value is loading the +// previous value (if any) will continue to be returned by any Get unless it is evicted. +// If the new value is loaded successfully, it will replace the previous value in the cache; +// If refreshing returned an error, the previous value will remain, +// and the error will be logged using Logger (if it's not ErrNotFound) and swallowed. If another goroutine is currently +// loading the value for key, then this method does not perform an additional load. +// +// cache will call Loader.Reload if the cache currently contains a value for the key, +// and Loader.Load otherwise. +// +// WARNING: Loader.Load and Loader.Reload must not attempt to update any mappings of this cache directly. +// +// WARNING: For any given key, every loader used with it should compute the same value. +// Otherwise, a call that passes one loader may return the result of another call +// with a differently behaving loader. For example, a call that requests a short timeout +// for an RPC may wait for a similar call that requests a long timeout, or a call by an +// unprivileged user may return a resource accessible only to a privileged user making a similar call. +func (c *cache[K, V]) Refresh(ctx context.Context, key K, loader Loader[K, V]) <-chan RefreshResult[K, V] { + if !c.withRefresh { + return nil + } + + c.singleflight.init() + + nowNano := c.clock.NowNano() + n := c.getNodeQuietly(key, nowNano) + + return c.refreshKey(ctx, refreshableKey[K, V]{ + key: key, + old: n, + }, loader, true) +} + +// BulkRefresh loads a new value for each key, asynchronously. While the new value is loading the +// previous value (if any) will continue to be returned by any Get unless it is evicted. +// If the new value is loaded successfully, it will replace the previous value in the cache; +// If refreshing returned an error, the previous value will remain, +// and the error will be logged using Logger and swallowed. If another goroutine is currently +// loading the value for key, then this method does not perform an additional load. +// +// cache will call BulkLoader.BulkReload for existing keys, and BulkLoader.BulkLoad otherwise. +// +// WARNING: BulkLoader.BulkLoad and BulkLoader.BulkReload must not attempt to update any mappings of this cache directly. +// +// WARNING: For any given key, every bulkLoader used with it should compute the same value. +// Otherwise, a call that passes one bulkLoader may return the result of another call +// with a differently behaving loader. For example, a call that requests a short timeout +// for an RPC may wait for a similar call that requests a long timeout, or a call by an +// unprivileged user may return a resource accessible only to a privileged user making a similar call. +func (c *cache[K, V]) BulkRefresh(ctx context.Context, keys []K, bulkLoader BulkLoader[K, V]) <-chan []RefreshResult[K, V] { + if !c.withRefresh { + return nil + } + + c.singleflight.init() + + uniq := make(map[K]struct{}, len(keys)) + for _, k := range keys { + uniq[k] = struct{}{} + } + + nowNano := c.clock.NowNano() + toRefresh := make([]refreshableKey[K, V], 0, len(uniq)) + for key := range uniq { + n := c.getNodeQuietly(key, nowNano) + toRefresh = append(toRefresh, refreshableKey[K, V]{ + key: key, + old: n, + }) + } + + return c.bulkRefreshKeys(ctx, toRefresh, bulkLoader, true) +} + +// Invalidate discards any cached value for the key. +// +// Returns previous value if any. The invalidated result reports whether the key was +// present. +func (c *cache[K, V]) Invalidate(key K) (value V, invalidated bool) { + var d node.Node[K, V] + nowNano := c.clock.NowNano() + c.hashmap.Compute(key, func(n node.Node[K, V]) node.Node[K, V] { + d = n + return c.atomicDelete(key, d, nil, nowNano) + }) + c.afterDelete(d, nowNano, false) + if d != nil { + return d.Value(), true + } + return zeroValue[V](), false +} + +func (c *cache[K, V]) deleteNodeFromMap(n node.Node[K, V], nowNano int64, cause DeletionCause) node.Node[K, V] { + var deleted node.Node[K, V] + c.hashmap.Compute(n.Key(), func(current node.Node[K, V]) node.Node[K, V] { + c.singleflight.delete(n.Key()) + if current == nil { + return nil + } + if n.AsPointer() == current.AsPointer() { + deleted = current + cause := getCause(deleted, nowNano, cause) + c.makeRetired(deleted) + c.notifyAtomicDeletion(deleted.Key(), deleted.Value(), cause) + return nil + } + return current + }) + return deleted +} + +func (c *cache[K, V]) deleteNode(n node.Node[K, V], nowNano int64) { + c.afterDelete(c.deleteNodeFromMap(n, nowNano, CauseInvalidation), nowNano, true) +} + +func (c *cache[K, V]) afterDelete(deleted node.Node[K, V], nowNano int64, alreadyLocked bool) { + if deleted == nil { + return + } + + if !c.withMaintenance { + c.notifyDeletion(deleted.Key(), deleted.Value(), CauseInvalidation) + return + } + + // delete + cause := getCause(deleted, nowNano, CauseInvalidation) + t := c.getTask(deleted, nil, deleteReason, cause) + if alreadyLocked { + c.runTask(t) + } else { + c.afterWriteTask(t) + } +} + +func (c *cache[K, V]) notifyDeletion(key K, value V, cause DeletionCause) { + if c.onDeletion == nil { + return + } + + c.executor(func() { + c.onDeletion(DeletionEvent[K, V]{ + Key: key, + Value: value, + Cause: cause, + }) + }) +} + +func (c *cache[K, V]) notifyAtomicDeletion(key K, value V, cause DeletionCause) { + if c.onAtomicDeletion == nil { + return + } + + c.onAtomicDeletion(DeletionEvent[K, V]{ + Key: key, + Value: value, + Cause: cause, + }) +} + +func (c *cache[K, V]) periodicCleanUp() { + tick := c.clock.Tick(time.Second) + for { + select { + case <-c.doneClose: + return + case <-tick: + c.CleanUp() + c.clock.ProcessTick() + } + } +} + +func (c *cache[K, V]) evictNode(n node.Node[K, V], nowNanos int64) { + cause := CauseOverflow + if n.HasExpired(nowNanos) { + cause = CauseExpiration + } + + deleted := c.deleteNodeFromMap(n, nowNanos, cause) != nil + + if c.withEviction { + c.evictionPolicy.delete(n) + } + if c.withExpiration { + c.expirationPolicy.Delete(n) + } + + c.makeDead(n) + + if deleted { + c.notifyDeletion(n.Key(), n.Value(), cause) + c.stats.RecordEviction(n.Weight()) + } +} + +func (c *cache[K, V]) nodes() iter.Seq[node.Node[K, V]] { + return func(yield func(node.Node[K, V]) bool) { + c.hashmap.Range(func(n node.Node[K, V]) bool { + nowNano := c.clock.NowNano() + if !n.IsAlive() || n.HasExpired(nowNano) { + c.scheduleDrainBuffers() + return true + } + + return yield(n) + }) + } +} + +func (c *cache[K, V]) entries() iter.Seq[Entry[K, V]] { + return func(yield func(Entry[K, V]) bool) { + for n := range c.nodes() { + if !yield(c.nodeToEntry(n, c.clock.NowNano())) { + return + } + } + } +} + +// All returns an iterator over all entries in the cache. +// +// Iterator is at least weakly consistent: he is safe for concurrent use, +// but if the cache is modified (including by eviction) after the iterator is +// created, it is undefined which of the changes (if any) will be reflected in that iterator. +func (c *cache[K, V]) All() iter.Seq2[K, V] { + return func(yield func(K, V) bool) { + for n := range c.nodes() { + if !yield(n.Key(), n.Value()) { + return + } + } + } +} + +// Keys returns an iterator over all keys in the cache. +// The iteration order is not specified and is not guaranteed to be the same from one call to the next. +// +// Iterator is at least weakly consistent: he is safe for concurrent use, +// but if the cache is modified (including by eviction) after the iterator is +// created, it is undefined which of the changes (if any) will be reflected in that iterator. +func (c *cache[K, V]) Keys() iter.Seq[K] { + return func(yield func(K) bool) { + for n := range c.nodes() { + if !yield(n.Key()) { + return + } + } + } +} + +// Values returns an iterator over all values in the cache. +// The iteration order is not specified and is not guaranteed to be the same from one call to the next. +// +// Iterator is at least weakly consistent: he is safe for concurrent use, +// but if the cache is modified (including by eviction) after the iterator is +// created, it is undefined which of the changes (if any) will be reflected in that iterator. +func (c *cache[K, V]) Values() iter.Seq[V] { + return func(yield func(V) bool) { + for n := range c.nodes() { + if !yield(n.Value()) { + return + } + } + } +} + +// InvalidateAll discards all entries in the cache. The behavior of this operation is undefined for an entry +// that is being loaded (or reloaded) and is otherwise not present. +func (c *cache[K, V]) InvalidateAll() { + c.evictionMutex.Lock() + + if c.withMaintenance { + c.readBuffer.DrainTo(func(n node.Node[K, V]) {}) + for { + t := c.writeBuffer.TryPop() + if t == nil { + break + } + c.runTask(t) + } + } + // Discard all entries, falling back to one-by-one to avoid excessive lock hold times + nodes := make([]node.Node[K, V], 0, c.EstimatedSize()) + threshold := uint64(maxWriteBufferSize / 2) + c.hashmap.Range(func(n node.Node[K, V]) bool { + nodes = append(nodes, n) + return true + }) + nowNano := c.clock.NowNano() + for len(nodes) > 0 && c.writeBuffer.Size() < threshold { + n := nodes[len(nodes)-1] + nodes = nodes[:len(nodes)-1] + c.deleteNode(n, nowNano) + } + + c.evictionMutex.Unlock() + + for _, n := range nodes { + c.Invalidate(n.Key()) + } +} + +// CleanUp performs any pending maintenance operations needed by the cache. Exactly which activities are +// performed -- if any -- is implementation-dependent. +func (c *cache[K, V]) CleanUp() { + c.performCleanUp(nil) +} + +func (c *cache[K, V]) shouldDrainBuffers(delayable bool) bool { + drainStatus := c.drainStatus.Load() + switch drainStatus { + case idle: + return !delayable + case required: + return true + case processingToIdle, processingToRequired: + return false + default: + panic(fmt.Sprintf("Invalid drain status: %d", drainStatus)) + } +} + +func (c *cache[K, V]) skipReadBuffer() bool { + return !c.withMaintenance || // without read buffer + (!c.withExpiration && c.withEviction && c.evictionPolicy.sketch.isNotInitialized()) +} + +func (c *cache[K, V]) afterWriteTask(t *task[K, V]) { + for i := 0; i < writeBufferRetries; i++ { + if c.writeBuffer.TryPush(t) { + c.scheduleAfterWrite() + return + } + c.scheduleDrainBuffers() + runtime.Gosched() + } + + // In scenarios where the writing goroutines cannot make progress then they attempt to provide + // assistance by performing the eviction work directly. This can resolve cases where the + // maintenance task is scheduled but not running. + c.performCleanUp(t) +} + +func (c *cache[K, V]) scheduleAfterWrite() { + for { + drainStatus := c.drainStatus.Load() + switch drainStatus { + case idle: + c.drainStatus.CompareAndSwap(idle, required) + c.scheduleDrainBuffers() + return + case required: + c.scheduleDrainBuffers() + return + case processingToIdle: + if c.drainStatus.CompareAndSwap(processingToIdle, processingToRequired) { + return + } + case processingToRequired: + return + default: + panic(fmt.Sprintf("Invalid drain status: %d", drainStatus)) + } + } +} + +func (c *cache[K, V]) scheduleDrainBuffers() { + if c.drainStatus.Load() >= processingToIdle { + return + } + + if c.evictionMutex.TryLock() { + drainStatus := c.drainStatus.Load() + if drainStatus >= processingToIdle { + c.evictionMutex.Unlock() + return + } + + c.drainStatus.Store(processingToIdle) + + var token atomic.Uint32 + c.executor(func() { + c.drainBuffers(&token) + }) + + if token.CompareAndSwap(0, 1) { + c.evictionMutex.Unlock() + } + } +} + +func (c *cache[K, V]) drainBuffers(token *atomic.Uint32) { + if c.evictionMutex.TryLock() { + c.maintenance(nil) + c.evictionMutex.Unlock() + c.rescheduleCleanUpIfIncomplete() + } else { + // already locked + if token.CompareAndSwap(0, 1) { + // executor is sync + c.maintenance(nil) + c.evictionMutex.Unlock() + c.rescheduleCleanUpIfIncomplete() + } else { + // executor is async + c.performCleanUp(nil) + } + } +} + +func (c *cache[K, V]) performCleanUp(t *task[K, V]) { + c.evictionMutex.Lock() + c.maintenance(t) + c.evictionMutex.Unlock() + c.rescheduleCleanUpIfIncomplete() +} + +func (c *cache[K, V]) rescheduleCleanUpIfIncomplete() { + if c.drainStatus.Load() != required { + return + } + + // An immediate scheduling cannot be performed on a custom executor because it may use a + // caller-runs policy. This could cause the caller's penalty to exceed the amortized threshold, + // e.g. repeated concurrent writes could result in a retry loop. + if c.hasDefaultExecutor { + c.scheduleDrainBuffers() + return + } +} + +func (c *cache[K, V]) maintenance(t *task[K, V]) { + c.drainStatus.Store(processingToIdle) + + c.drainReadBuffer() + c.drainWriteBuffer() + c.runTask(t) + c.expireNodes() + c.evictNodes() + c.climb() + + if c.drainStatus.Load() != processingToIdle || !c.drainStatus.CompareAndSwap(processingToIdle, idle) { + c.drainStatus.Store(required) + } +} + +func (c *cache[K, V]) drainReadBuffer() { + if c.skipReadBuffer() { + return + } + + c.readBuffer.DrainTo(c.onAccess) +} + +func (c *cache[K, V]) drainWriteBuffer() { + if !c.withMaintenance { + return + } + + for i := uint32(0); i <= maxWriteBufferSize; i++ { + t := c.writeBuffer.TryPop() + if t == nil { + return + } + c.runTask(t) + } + c.drainStatus.Store(processingToRequired) +} + +func (c *cache[K, V]) runTask(t *task[K, V]) { + if t == nil { + return + } + + n := t.node() + switch t.writeReason { + case addReason: + if c.withExpiration && n.IsAlive() { + c.expirationPolicy.Add(n) + } + if c.withEviction { + c.evictionPolicy.add(n, c.evictNode) + } + case updateReason: + old := t.oldNode() + if c.withExpiration { + c.expirationPolicy.Delete(old) + if n.IsAlive() { + c.expirationPolicy.Add(n) + } + } + if c.withEviction { + c.evictionPolicy.update(n, old, c.evictNode) + } + c.notifyDeletion(old.Key(), old.Value(), t.deletionCause) + case deleteReason: + if c.withExpiration { + c.expirationPolicy.Delete(n) + } + if c.withEviction { + c.evictionPolicy.delete(n) + } + c.notifyDeletion(n.Key(), n.Value(), t.deletionCause) + default: + panic(fmt.Sprintf("Invalid task type: %d", t.writeReason)) + } + + c.putTask(t) +} + +func (c *cache[K, V]) onAccess(n node.Node[K, V]) { + if c.withEviction { + c.evictionPolicy.access(n) + } + if c.withExpiration && !node.Equals(n.NextExp(), nil) { + c.expirationPolicy.Delete(n) + if n.IsAlive() { + c.expirationPolicy.Add(n) + } + } +} + +func (c *cache[K, V]) expireNodes() { + if c.withExpiration { + c.expirationPolicy.DeleteExpired(c.clock.NowNano(), c.evictNode) + } +} + +func (c *cache[K, V]) evictNodes() { + if !c.withEviction { + return + } + c.evictionPolicy.evictNodes(c.evictNode) +} + +func (c *cache[K, V]) climb() { + if !c.withEviction { + return + } + c.evictionPolicy.climb() +} + +func (c *cache[K, V]) getTask(n, old node.Node[K, V], writeReason reason, cause DeletionCause) *task[K, V] { + t, ok := c.taskPool.Get().(*task[K, V]) + if !ok { + return &task[K, V]{ + n: n, + old: old, + writeReason: writeReason, + deletionCause: cause, + } + } + t.n = n + t.old = old + t.writeReason = writeReason + t.deletionCause = cause + + return t +} + +func (c *cache[K, V]) putTask(t *task[K, V]) { + t.n = nil + t.old = nil + t.writeReason = unknownReason + t.deletionCause = causeUnknown + c.taskPool.Put(t) +} + +// SetMaximum specifies the maximum total size of this cache. This value may be interpreted as the weighted +// or unweighted threshold size based on how this cache was constructed. If the cache currently +// exceeds the new maximum size this operation eagerly evict entries until the cache shrinks to +// the appropriate size. +func (c *cache[K, V]) SetMaximum(maximum uint64) { + if !c.withEviction { + return + } + c.evictionMutex.Lock() + c.evictionPolicy.setMaximumSize(maximum) + c.maintenance(nil) + c.evictionMutex.Unlock() + c.rescheduleCleanUpIfIncomplete() +} + +// GetMaximum returns the maximum total weighted or unweighted size of this cache, depending on how the +// cache was constructed. If this cache does not use a (weighted) size bound, then the method will return math.MaxUint64. +func (c *cache[K, V]) GetMaximum() uint64 { + if !c.withEviction { + return uint64(math.MaxUint64) + } + + c.evictionMutex.Lock() + if c.drainStatus.Load() == required { + c.maintenance(nil) + } + result := c.evictionPolicy.maximum + c.evictionMutex.Unlock() + c.rescheduleCleanUpIfIncomplete() + return result +} + +// close discards all entries in the cache and stop all goroutines. +// +// NOTE: this operation must be performed when no requests are made to the cache otherwise the behavior is undefined. +func (c *cache[K, V]) close() { + if c.withExpiration { + c.doneClose <- struct{}{} + } +} + +// EstimatedSize returns the approximate number of entries in this cache. The value returned is an estimate; the +// actual count may differ if there are concurrent insertions or deletions, or if some entries are +// pending deletion due to expiration. In the case of stale entries +// this inaccuracy can be mitigated by performing a CleanUp first. +func (c *cache[K, V]) EstimatedSize() int { + return c.hashmap.Size() +} + +// IsWeighted returns whether the cache is bounded by a maximum size or maximum weight. +func (c *cache[K, V]) IsWeighted() bool { + return c.isWeighted +} + +// IsRecordingStats returns whether the cache statistics are being accumulated. +func (c *cache[K, V]) IsRecordingStats() bool { + return c.withStats +} + +// Stats returns a current snapshot of this cache's cumulative statistics. +// All statistics are initialized to zero and are monotonically increasing over the lifetime of the cache. +// Due to the performance penalty of maintaining statistics, +// some implementations may not record the usage history immediately or at all. +// +// NOTE: If your [stats.Recorder] implementation doesn't also implement [stats.Snapshoter], +// this method will always return a zero-value snapshot. +func (c *cache[K, V]) Stats() stats.Stats { + return c.statsSnapshoter.Snapshot() +} + +// WeightedSize returns the approximate accumulated weight of entries in this cache. If this cache does not +// use a weighted size bound, then the method will return 0. +func (c *cache[K, V]) WeightedSize() uint64 { + if !c.isWeighted { + return 0 + } + + c.evictionMutex.Lock() + if c.drainStatus.Load() == required { + c.maintenance(nil) + } + result := c.evictionPolicy.weightedSize + c.evictionMutex.Unlock() + c.rescheduleCleanUpIfIncomplete() + return result +} + +// Hottest returns an iterator for ordered traversal of the cache entries. The order of +// iteration is from the entries most likely to be retained (hottest) to the entries least +// likely to be retained (coldest). This order is determined by the eviction policy's best guess +// at the start of the iteration. +// +// WARNING: Beware that this iteration is performed within the eviction policy's exclusive lock, so the +// iteration should be short and simple. While the iteration is in progress further eviction +// maintenance will be halted. +func (c *cache[K, V]) Hottest() iter.Seq[Entry[K, V]] { + return c.evictionOrder(true) +} + +// Coldest returns an iterator for ordered traversal of the cache entries. The order of +// iteration is from the entries least likely to be retained (coldest) to the entries most +// likely to be retained (hottest). This order is determined by the eviction policy's best guess +// at the start of the iteration. +// +// WARNING: Beware that this iteration is performed within the eviction policy's exclusive lock, so the +// iteration should be short and simple. While the iteration is in progress further eviction +// maintenance will be halted. +func (c *cache[K, V]) Coldest() iter.Seq[Entry[K, V]] { + return c.evictionOrder(false) +} + +func (c *cache[K, V]) evictionOrder(hottest bool) iter.Seq[Entry[K, V]] { + if !c.withEviction { + return c.entries() + } + + return func(yield func(Entry[K, V]) bool) { + comparator := func(a node.Node[K, V], b node.Node[K, V]) int { + return cmp.Compare( + c.evictionPolicy.sketch.frequency(a.Key()), + c.evictionPolicy.sketch.frequency(b.Key()), + ) + } + + var seq iter.Seq[node.Node[K, V]] + if hottest { + secondary := xiter.MergeFunc( + c.evictionPolicy.probation.Backward(), + c.evictionPolicy.window.Backward(), + comparator, + ) + seq = xiter.Concat( + c.evictionPolicy.protected.Backward(), + secondary, + ) + } else { + primary := xiter.MergeFunc( + c.evictionPolicy.window.All(), + c.evictionPolicy.probation.All(), + func(a node.Node[K, V], b node.Node[K, V]) int { + return -comparator(a, b) + }, + ) + + seq = xiter.Concat( + primary, + c.evictionPolicy.protected.All(), + ) + } + + c.evictionMutex.Lock() + defer c.evictionMutex.Unlock() + c.maintenance(nil) + + for n := range seq { + nowNano := c.clock.NowNano() + if !n.IsAlive() || n.HasExpired(nowNano) { + continue + } + if !yield(c.nodeToEntry(n, nowNano)) { + return + } + } + } +} + +func (c *cache[K, V]) makeRetired(n node.Node[K, V]) { + if n != nil && c.withMaintenance && n.IsAlive() { + n.Retire() + } +} + +func (c *cache[K, V]) makeDead(n node.Node[K, V]) { + if !c.withMaintenance { + return + } + + if c.withEviction { + c.evictionPolicy.makeDead(n) + } else if !n.IsDead() { + n.Die() + } +} + +func getCause[K comparable, V any](n node.Node[K, V], nowNano int64, cause DeletionCause) DeletionCause { + if n.HasExpired(nowNano) { + return CauseExpiration + } + return cause +} diff --git a/vendor/github.com/maypok86/otter/v2/clock.go b/vendor/github.com/maypok86/otter/v2/clock.go new file mode 100644 index 00000000..1d7aec1d --- /dev/null +++ b/vendor/github.com/maypok86/otter/v2/clock.go @@ -0,0 +1,232 @@ +// Copyright (c) 2025 Alexey Mayshev and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otter + +import ( + "sync" + "sync/atomic" + "time" + + "github.com/maypok86/otter/v2/internal/xmath" +) + +// Clock is a time source that +// - Returns a time value representing the number of nanoseconds elapsed since some +// fixed but arbitrary point in time +// - Returns a channel that delivers “ticks” of a clock at intervals. +type Clock interface { + // NowNano returns the number of nanoseconds elapsed since this clock's fixed point of reference. + // + // By default, time.Now().UnixNano() is used. + NowNano() int64 + // Tick returns a channel that delivers “ticks” of a clock at intervals. + // + // The cache uses this method only for proactive expiration and calls Tick(time.Second) in a separate goroutine. + // + // By default, [time.Tick] is used. + Tick(duration time.Duration) <-chan time.Time +} + +type timeSource interface { + Clock + Init() + Sleep(duration time.Duration) + ProcessTick() +} + +func newTimeSource(clock Clock) timeSource { + if clock == nil { + return &realSource{} + } + if r, ok := clock.(*realSource); ok { + return r + } + if f, ok := clock.(*fakeSource); ok { + return f + } + return newCustomSource(clock) +} + +type customSource struct { + clock Clock + isInitialized atomic.Bool +} + +func newCustomSource(clock Clock) *customSource { + return &customSource{ + clock: clock, + } +} + +func (cs *customSource) Init() { + if !cs.isInitialized.Load() { + cs.isInitialized.Store(true) + } +} + +func (cs *customSource) NowNano() int64 { + if !cs.isInitialized.Load() { + return 0 + } + return cs.clock.NowNano() +} + +func (cs *customSource) Tick(duration time.Duration) <-chan time.Time { + return cs.clock.Tick(duration) +} + +func (cs *customSource) Sleep(duration time.Duration) { + time.Sleep(duration) +} + +func (cs *customSource) ProcessTick() {} + +type realSource struct { + initMutex sync.Mutex + isInitialized atomic.Bool + start time.Time + startNanos atomic.Int64 +} + +func (c *realSource) Init() { + if !c.isInitialized.Load() { + c.initMutex.Lock() + if !c.isInitialized.Load() { + now := time.Now() + c.start = now + c.startNanos.Store(now.UnixNano()) + c.isInitialized.Store(true) + } + c.initMutex.Unlock() + } +} + +func (c *realSource) NowNano() int64 { + if !c.isInitialized.Load() { + return 0 + } + return xmath.SaturatedAdd(c.startNanos.Load(), time.Since(c.start).Nanoseconds()) +} + +func (c *realSource) Tick(duration time.Duration) <-chan time.Time { + return time.Tick(duration) +} + +func (c *realSource) Sleep(duration time.Duration) { + time.Sleep(duration) +} + +func (c *realSource) ProcessTick() {} + +type fakeSource struct { + mutex sync.Mutex + now time.Time + initOnce sync.Once + sleeps chan time.Duration + tickWg sync.WaitGroup + sleepWg sync.WaitGroup + firstSleep atomic.Bool + withTick atomic.Bool + ticker chan time.Time + enableTickOnce sync.Once + enableTick chan time.Duration +} + +func (f *fakeSource) Init() { + f.initOnce.Do(func() { + f.mutex.Lock() + now := time.Now() + f.now = now + f.sleeps = make(chan time.Duration) + f.firstSleep.Store(true) + f.enableTick = make(chan time.Duration) + f.ticker = make(chan time.Time, 1) + f.mutex.Unlock() + + go func() { + var ( + dur time.Duration + d time.Duration + ) + enabled := false + last := now + for { + select { + case d = <-f.enableTick: + enabled = true + for d <= dur { + if f.firstSleep.Load() { + f.tickWg.Add(1) + f.ticker <- last + f.tickWg.Wait() + f.firstSleep.Store(false) + } + last = last.Add(d) + f.tickWg.Add(1) + f.ticker <- last + dur -= d + } + case s := <-f.sleeps: + if enabled && f.firstSleep.Load() { + f.tickWg.Add(1) + f.ticker <- last + f.tickWg.Wait() + f.firstSleep.Store(false) + } + f.mutex.Lock() + f.now = f.now.Add(s) + f.mutex.Unlock() + dur += s + if enabled { + for d <= dur { + last = last.Add(d) + f.tickWg.Add(1) + f.ticker <- last + dur -= d + } + } + f.sleepWg.Done() + } + } + }() + }) +} + +func (f *fakeSource) NowNano() int64 { + return f.getNow().UnixNano() +} + +func (f *fakeSource) Tick(d time.Duration) <-chan time.Time { + f.enableTickOnce.Do(func() { + f.enableTick <- d + }) + return f.ticker +} + +func (f *fakeSource) Sleep(d time.Duration) { + f.sleepWg.Add(1) + f.sleeps <- d + f.sleepWg.Wait() +} + +func (f *fakeSource) getNow() time.Time { + f.mutex.Lock() + defer f.mutex.Unlock() + return f.now +} + +func (f *fakeSource) ProcessTick() { + f.tickWg.Done() +} diff --git a/vendor/github.com/maypok86/otter/v2/deletion.go b/vendor/github.com/maypok86/otter/v2/deletion.go new file mode 100644 index 00000000..fa10a384 --- /dev/null +++ b/vendor/github.com/maypok86/otter/v2/deletion.go @@ -0,0 +1,68 @@ +// Copyright (c) 2024 Alexey Mayshev and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otter + +// DeletionCause the cause why a cached entry was deleted. +type DeletionCause int + +const ( + // CauseInvalidation means that the entry was manually deleted by the user. + CauseInvalidation DeletionCause = iota + 1 + // CauseReplacement means that the entry itself was not actually deleted, but its value was replaced by the user. + CauseReplacement + // CauseOverflow means that the entry was evicted due to size constraints. + CauseOverflow + // CauseExpiration means that the entry's expiration timestamp has passed. + CauseExpiration +) + +const causeUnknown DeletionCause = 0 + +var deletionCauseStrings = []string{ + "Invalidation", + "Replacement", + "Overflow", + "Expiration", +} + +// String implements [fmt.Stringer] interface. +func (dc DeletionCause) String() string { + if dc >= 1 && int(dc) <= len(deletionCauseStrings) { + return deletionCauseStrings[dc-1] + } + return "" +} + +// IsEviction returns true if there was an automatic deletion due to eviction +// (the cause is neither [CauseInvalidation] nor [CauseReplacement]). +func (dc DeletionCause) IsEviction() bool { + return !(dc == CauseInvalidation || dc == CauseReplacement) +} + +// DeletionEvent is an event of the deletion of a single entry. +type DeletionEvent[K comparable, V any] struct { + // Key is the key corresponding to the deleted entry. + Key K + // Value is the value corresponding to the deleted entry. + Value V + // Cause is the cause for which entry was deleted. + Cause DeletionCause +} + +// WasEvicted returns true if there was an automatic deletion due to eviction (the cause is neither +// [CauseInvalidation] nor [CauseReplacement]). +func (de DeletionEvent[K, V]) WasEvicted() bool { + return de.Cause.IsEviction() +} diff --git a/vendor/github.com/maypok86/otter/v2/doc.go b/vendor/github.com/maypok86/otter/v2/doc.go new file mode 100644 index 00000000..5ee26cf6 --- /dev/null +++ b/vendor/github.com/maypok86/otter/v2/doc.go @@ -0,0 +1,28 @@ +// Copyright (c) 2025 Alexey Mayshev and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package otter contains in-memory caching functionality. +// +// A [Cache] is similar to a hash table, but it also has additional support for policies to bound the map. +// +// [Cache] instances should always be configured and created using [Options]. +// +// The [Cache] also has [Cache.Get]/[Cache.BulkGet]/[Cache.Refresh]/[Cache.Refresh] methods +// which allows the cache to populate itself on a miss and offers refresh capabilities. +// +// Additional functionality such as bounding by the entry's size, deletion notifications, statistics, +// and eviction policies are described in the [Options]. +// +// See https://maypok86.github.io/otter/user-guide/v2/getting-started/ for more information about otter. +package otter diff --git a/vendor/github.com/maypok86/otter/v2/entry.go b/vendor/github.com/maypok86/otter/v2/entry.go new file mode 100644 index 00000000..27e09ca5 --- /dev/null +++ b/vendor/github.com/maypok86/otter/v2/entry.go @@ -0,0 +1,95 @@ +// Copyright (c) 2025 Alexey Mayshev and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otter + +import ( + "time" +) + +// Entry is a key-value pair that may include policy metadata for the cached entry. +// +// It is an immutable snapshot of the cached data at the time of this entry's creation, and it will not +// reflect changes afterward. +type Entry[K comparable, V any] struct { + // Key is the entry's key. + Key K + // Value is the entry's value. + Value V + // Weight returns the entry's weight. + // + // If the cache was not configured with a weight then this value is always 1. + Weight uint32 + // ExpiresAtNano is the entry's expiration time as a unix time, + // the number of nanoseconds elapsed since January 1, 1970 UTC. + // + // If the cache was not configured with an expiration policy then this value is always math.MaxInt64. + ExpiresAtNano int64 + // RefreshableAtNano is the time after which the entry will be reloaded as a unix time, + // the number of nanoseconds elapsed since January 1, 1970 UTC. + // + // If the cache was not configured with a refresh policy then this value is always math.MaxInt64. + RefreshableAtNano int64 + // SnapshotAtNano is the time when this snapshot of the entry was taken as a unix time, + // the number of nanoseconds elapsed since January 1, 1970 UTC. + // + // If the cache was not configured with a time-based policy then this value is always 0. + SnapshotAtNano int64 +} + +// ExpiresAt returns the entry's expiration time. +// +// If the cache was not configured with an expiration policy then this value is roughly [math.MaxInt64] +// nanoseconds away from the SnapshotAt. +func (e Entry[K, V]) ExpiresAt() time.Time { + return time.Unix(0, e.ExpiresAtNano) +} + +// ExpiresAfter returns the fixed duration used to determine if an entry should be automatically removed due +// to elapsing this time bound. An entry is considered fresh if its age is less than this +// duration, and stale otherwise. The expiration policy determines when the entry's age is reset. +// +// If the cache was not configured with an expiration policy then this value is always [math.MaxInt64]. +func (e Entry[K, V]) ExpiresAfter() time.Duration { + return time.Duration(e.ExpiresAtNano - e.SnapshotAtNano) +} + +// HasExpired returns true if the entry has expired. +func (e Entry[K, V]) HasExpired() bool { + return e.ExpiresAtNano < e.SnapshotAtNano +} + +// RefreshableAt is the time after which the entry will be reloaded. +// +// If the cache was not configured with a refresh policy then this value is roughly [math.MaxInt64] +// nanoseconds away from the SnapshotAt. +func (e Entry[K, V]) RefreshableAt() time.Time { + return time.Unix(0, e.RefreshableAtNano) +} + +// RefreshableAfter returns the fixed duration used to determine if an entry should be eligible for reloading due +// to elapsing this time bound. An entry is considered fresh if its age is less than this +// duration, and stale otherwise. The refresh policy determines when the entry's age is reset. +// +// If the cache was not configured with a refresh policy then this value is always [math.MaxInt64]. +func (e Entry[K, V]) RefreshableAfter() time.Duration { + return time.Duration(e.RefreshableAtNano - e.SnapshotAtNano) +} + +// SnapshotAt is the time when this snapshot of the entry was taken. +// +// If the cache was not configured with a time-based policy then this value is always 1970-01-01 00:00:00 UTC. +func (e Entry[K, V]) SnapshotAt() time.Time { + return time.Unix(0, e.SnapshotAtNano) +} diff --git a/vendor/github.com/maypok86/otter/v2/error.go b/vendor/github.com/maypok86/otter/v2/error.go new file mode 100644 index 00000000..758a95b7 --- /dev/null +++ b/vendor/github.com/maypok86/otter/v2/error.go @@ -0,0 +1,55 @@ +package otter + +import ( + "bytes" + "fmt" + "runtime/debug" +) + +const ( + // ErrNotFound should be returned from a Loader.Load/Loader.Reload to indicate that an entry is + // missing at the underlying data source. This helps the cache to determine + // if an entry should be deleted. + // + // NOTE: this only applies to Cache.Get/Cache.Refresh/Loader.Load/Loader.Reload. For Cache.BulkGet/Cache.BulkRefresh, + // this works implicitly if you return a map without the key. + ErrNotFound strError = "otter: the entry was not found in the data source" +) + +// strError allows declaring errors as constants. +type strError string + +func (err strError) Error() string { return string(err) } + +// A panicError is an arbitrary value recovered from a panic +// with the stack trace during the execution of given function. +type panicError struct { + value any + stack []byte +} + +// Error implements error interface. +func (p *panicError) Error() string { + return fmt.Sprintf("%v\n\n%s", p.value, p.stack) +} + +func (p *panicError) Unwrap() error { + err, ok := p.value.(error) + if !ok { + return nil + } + + return err +} + +func newPanicError(v any) error { + stack := debug.Stack() + + // The first line of the stack trace is of the form "goroutine N [status]:" + // but by the time the panic reaches cache the goroutine may no longer exist + // and its status will have changed. Trim out the misleading line. + if line := bytes.IndexByte(stack, '\n'); line >= 0 { + stack = stack[line+1:] + } + return &panicError{value: v, stack: stack} +} diff --git a/vendor/github.com/maypok86/otter/v2/expiry_calculator.go b/vendor/github.com/maypok86/otter/v2/expiry_calculator.go new file mode 100644 index 00000000..83e8b86e --- /dev/null +++ b/vendor/github.com/maypok86/otter/v2/expiry_calculator.go @@ -0,0 +1,140 @@ +// Copyright (c) 2025 Alexey Mayshev and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otter + +import ( + "time" +) + +// ExpiryCalculator calculates when cache entries expire. A single expiration time is retained so that the lifetime +// of an entry may be extended or reduced by subsequent evaluations. +type ExpiryCalculator[K comparable, V any] interface { + // ExpireAfterCreate specifies that the entry should be automatically removed from the cache once the duration has + // elapsed after the entry's creation. To indicate no expiration, an entry may be given an + // excessively long period. + // + // NOTE: ExpiresAtNano and RefreshableAtNano are not initialized at this stage. + ExpireAfterCreate(entry Entry[K, V]) time.Duration + // ExpireAfterUpdate specifies that the entry should be automatically removed from the cache once the duration has + // elapsed after the replacement of its value. To indicate no expiration, an entry may be given an + // excessively long period. The entry.ExpiresAfter() may be returned to not modify the expiration time. + ExpireAfterUpdate(entry Entry[K, V], oldValue V) time.Duration + // ExpireAfterRead specifies that the entry should be automatically removed from the cache once the duration has + // elapsed after its last read. To indicate no expiration, an entry may be given an excessively + // long period. The entry.ExpiresAfter() may be returned to not modify the expiration time. + ExpireAfterRead(entry Entry[K, V]) time.Duration +} + +type varExpiryCreating[K comparable, V any] struct { + f func(entry Entry[K, V]) time.Duration +} + +func (c *varExpiryCreating[K, V]) ExpireAfterCreate(entry Entry[K, V]) time.Duration { + return c.f(entry) +} + +func (c *varExpiryCreating[K, V]) ExpireAfterUpdate(entry Entry[K, V], oldValue V) time.Duration { + return entry.ExpiresAfter() +} + +func (c *varExpiryCreating[K, V]) ExpireAfterRead(entry Entry[K, V]) time.Duration { + return entry.ExpiresAfter() +} + +// ExpiryCreating returns an [ExpiryCalculator] that specifies that the entry should be automatically deleted from +// the cache once the duration has elapsed after the entry's creation. The expiration time is +// not modified when the entry is updated or read. +func ExpiryCreating[K comparable, V any](duration time.Duration) ExpiryCalculator[K, V] { + return ExpiryCreatingFunc(func(entry Entry[K, V]) time.Duration { + return duration + }) +} + +// ExpiryCreatingFunc returns an [ExpiryCalculator] that specifies that the entry should be automatically deleted from +// the cache once the duration has elapsed after the entry's creation. The expiration time is +// not modified when the entry is updated or read. +func ExpiryCreatingFunc[K comparable, V any](f func(entry Entry[K, V]) time.Duration) ExpiryCalculator[K, V] { + return &varExpiryCreating[K, V]{ + f: f, + } +} + +type varExpiryWriting[K comparable, V any] struct { + f func(entry Entry[K, V]) time.Duration +} + +func (w *varExpiryWriting[K, V]) ExpireAfterCreate(entry Entry[K, V]) time.Duration { + return w.f(entry) +} + +func (w *varExpiryWriting[K, V]) ExpireAfterUpdate(entry Entry[K, V], oldValue V) time.Duration { + return w.f(entry) +} + +func (w *varExpiryWriting[K, V]) ExpireAfterRead(entry Entry[K, V]) time.Duration { + return entry.ExpiresAfter() +} + +// ExpiryWriting returns an [ExpiryCalculator] that specifies that the entry should be automatically deleted from +// the cache once the duration has elapsed after the entry's creation or replacement of its value. +// The expiration time is not modified when the entry is read. +func ExpiryWriting[K comparable, V any](duration time.Duration) ExpiryCalculator[K, V] { + return ExpiryWritingFunc(func(entry Entry[K, V]) time.Duration { + return duration + }) +} + +// ExpiryWritingFunc returns an [ExpiryCalculator] that specifies that the entry should be automatically deleted from +// the cache once the duration has elapsed after the entry's creation or replacement of its value. +// The expiration time is not modified when the entry is read. +func ExpiryWritingFunc[K comparable, V any](f func(entry Entry[K, V]) time.Duration) ExpiryCalculator[K, V] { + return &varExpiryWriting[K, V]{ + f: f, + } +} + +type varExpiryAccessing[K comparable, V any] struct { + f func(entry Entry[K, V]) time.Duration +} + +func (a *varExpiryAccessing[K, V]) ExpireAfterCreate(entry Entry[K, V]) time.Duration { + return a.f(entry) +} + +func (a *varExpiryAccessing[K, V]) ExpireAfterUpdate(entry Entry[K, V], oldValue V) time.Duration { + return a.f(entry) +} + +func (a *varExpiryAccessing[K, V]) ExpireAfterRead(entry Entry[K, V]) time.Duration { + return a.f(entry) +} + +// ExpiryAccessing returns an [ExpiryCalculator] that specifies that the entry should be automatically deleted from +// the cache once the duration has elapsed after the entry's creation, replacement of its value, +// or after it was last read. +func ExpiryAccessing[K comparable, V any](duration time.Duration) ExpiryCalculator[K, V] { + return ExpiryAccessingFunc(func(entry Entry[K, V]) time.Duration { + return duration + }) +} + +// ExpiryAccessingFunc returns an [ExpiryCalculator] that specifies that the entry should be automatically deleted from +// the cache once the duration has elapsed after the entry's creation, replacement of its value, +// or after it was last read. +func ExpiryAccessingFunc[K comparable, V any](f func(entry Entry[K, V]) time.Duration) ExpiryCalculator[K, V] { + return &varExpiryAccessing[K, V]{ + f: f, + } +} diff --git a/vendor/github.com/maypok86/otter/v2/internal/deque/linked.go b/vendor/github.com/maypok86/otter/v2/internal/deque/linked.go new file mode 100644 index 00000000..6923df71 --- /dev/null +++ b/vendor/github.com/maypok86/otter/v2/internal/deque/linked.go @@ -0,0 +1,231 @@ +// Copyright (c) 2024 Alexey Mayshev and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package deque + +import ( + "iter" + + "github.com/maypok86/otter/v2/internal/generated/node" +) + +type Linked[K comparable, V any] struct { + head node.Node[K, V] + tail node.Node[K, V] + len int + isExp bool +} + +func NewLinked[K comparable, V any](isExp bool) *Linked[K, V] { + return &Linked[K, V]{ + isExp: isExp, + } +} + +func (d *Linked[K, V]) PushBack(n node.Node[K, V]) { + if d.IsEmpty() { + d.head = n + d.tail = n + } else { + d.setPrev(n, d.tail) + d.setNext(d.tail, n) + d.tail = n + } + + d.len++ +} + +func (d *Linked[K, V]) UpdateNode(n, old node.Node[K, V]) { + oldNext := d.getNext(old) + if node.Equals(oldNext, nil) { + if node.Equals(d.tail, old) { + d.tail = n + } + } else { + d.setPrev(oldNext, n) + d.setNext(n, oldNext) + d.setNext(old, nil) + } + + oldPrev := d.getPrev(old) + if node.Equals(oldPrev, nil) { + if node.Equals(d.head, old) { + d.head = n + } + } else { + d.setPrev(n, oldPrev) + d.setNext(oldPrev, n) + d.setPrev(old, nil) + } +} + +func (d *Linked[K, V]) PushFront(n node.Node[K, V]) { + if d.IsEmpty() { + d.head = n + d.tail = n + } else { + d.setNext(n, d.head) + d.setPrev(d.head, n) + d.head = n + } + + d.len++ +} + +func (d *Linked[K, V]) PopFront() node.Node[K, V] { + if d.IsEmpty() { + return nil + } + + result := d.head + d.Delete(result) + return result +} + +/* +func (d *Linked[K, V]) PopBack() node.Node[K, V] { + if d.IsEmpty() { + return nil + } + + result := d.tail + d.Delete(result) + return result +} +*/ + +func (d *Linked[K, V]) NotContains(n node.Node[K, V]) bool { + return !d.Contains(n) +} + +func (d *Linked[K, V]) Contains(n node.Node[K, V]) bool { + return !node.Equals(d.getPrev(n), nil) || !node.Equals(d.getNext(n), nil) || node.Equals(d.head, n) +} + +func (d *Linked[K, V]) MoveToBack(n node.Node[K, V]) { + if !node.Equals(n, d.tail) { + d.Delete(n) + d.PushBack(n) + } +} + +func (d *Linked[K, V]) MoveToFront(n node.Node[K, V]) { + if !node.Equals(n, d.head) { + d.Delete(n) + d.PushFront(n) + } +} + +func (d *Linked[K, V]) Delete(n node.Node[K, V]) { + next := d.getNext(n) + prev := d.getPrev(n) + + if node.Equals(prev, nil) { + if node.Equals(next, nil) && !node.Equals(d.head, n) { + return + } + + d.head = next + } else { + d.setNext(prev, next) + d.setPrev(n, nil) + } + + if node.Equals(next, nil) { + d.tail = prev + } else { + d.setPrev(next, prev) + d.setNext(n, nil) + } + + d.len-- +} + +func (d *Linked[K, V]) Clear() { + for !d.IsEmpty() { + d.PopFront() + } +} + +func (d *Linked[K, V]) Len() int { + return d.len +} + +func (d *Linked[K, V]) IsEmpty() bool { + return d.Len() == 0 +} + +func (d *Linked[K, V]) Head() node.Node[K, V] { + return d.head +} + +func (d *Linked[K, V]) Tail() node.Node[K, V] { + return d.tail +} + +func (d *Linked[K, V]) All() iter.Seq[node.Node[K, V]] { + return func(yield func(node.Node[K, V]) bool) { + cursor := d.head + for !node.Equals(cursor, nil) { + if !yield(cursor) { + return + } + cursor = d.getNext(cursor) + } + } +} + +func (d *Linked[K, V]) Backward() iter.Seq[node.Node[K, V]] { + return func(yield func(node.Node[K, V]) bool) { + cursor := d.tail + for !node.Equals(cursor, nil) { + if !yield(cursor) { + return + } + cursor = d.getPrev(cursor) + } + } +} + +func (d *Linked[K, V]) setPrev(to, n node.Node[K, V]) { + if d.isExp { + to.SetPrevExp(n) + } else { + to.SetPrev(n) + } +} + +func (d *Linked[K, V]) setNext(to, n node.Node[K, V]) { + if d.isExp { + to.SetNextExp(n) + } else { + to.SetNext(n) + } +} + +func (d *Linked[K, V]) getNext(n node.Node[K, V]) node.Node[K, V] { + if d.isExp { + return n.NextExp() + } else { + return n.Next() + } +} + +func (d *Linked[K, V]) getPrev(n node.Node[K, V]) node.Node[K, V] { + if d.isExp { + return n.PrevExp() + } else { + return n.Prev() + } +} diff --git a/vendor/github.com/maypok86/otter/v2/internal/deque/queue/mpsc.go b/vendor/github.com/maypok86/otter/v2/internal/deque/queue/mpsc.go new file mode 100644 index 00000000..7f4f6067 --- /dev/null +++ b/vendor/github.com/maypok86/otter/v2/internal/deque/queue/mpsc.go @@ -0,0 +1,320 @@ +// Copyright (c) 2025 Alexey Mayshev and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package queue + +import ( + "fmt" + "math" + "sync/atomic" + "unsafe" + + "github.com/maypok86/otter/v2/internal/xmath" + "github.com/maypok86/otter/v2/internal/xruntime" +) + +type buffer struct { + data []unsafe.Pointer +} + +func newBuffer(capacity uint64) *buffer { + return &buffer{ + data: make([]unsafe.Pointer, capacity), + } +} + +// MPSC is an MPSC array queue which starts at initialCapacity and grows to maxCapacity in +// linked chunks of the initial size. The queue grows only when the current buffer is full and +// elements are not copied on resize, instead a link to the new buffer is stored in the old buffer +// for the consumer to follow. +type MPSC[T any] struct { + producerIndex atomic.Uint64 + _ [xruntime.CacheLineSize - 8]byte + consumerBuffer atomic.Pointer[buffer] + consumerIndex atomic.Uint64 + consumerMask atomic.Uint64 + _ [xruntime.CacheLineSize - 8*3]byte + producerBuffer atomic.Pointer[buffer] + producerLimit atomic.Uint64 + producerMask atomic.Uint64 + _ [xruntime.CacheLineSize - 8*2]byte + jump unsafe.Pointer + maxQueueCapacity uint64 +} + +func NewMPSC[T any](initialCapacity, maxCapacity uint32) *MPSC[T] { + if initialCapacity < 2 { + panic(fmt.Sprintf("Initial capacity must be 2 or more. initialCapacity = %d", initialCapacity)) + } + if maxCapacity < 4 { + panic(fmt.Sprintf("Max capacity must be 4 or more. maxCapacity = %d", maxCapacity)) + } + + p2initialCapacity := xmath.RoundUpPowerOf2(initialCapacity) + p2maxCapacity := xmath.RoundUpPowerOf2(maxCapacity) + if p2maxCapacity < p2initialCapacity { + s := fmt.Sprintf( + "Initial capacity cannot exceed maximum capacity(both rounded up to a power of 2). initialCapacity = %d, maxCapacity = %d", + initialCapacity, + maxCapacity, + ) + panic(s) + } + + mask := uint64((p2initialCapacity - 1) << 1) + buffer := newBuffer(uint64(p2initialCapacity) + 1) + + var zero T + q := &MPSC[T]{ + maxQueueCapacity: uint64(p2maxCapacity) << 1, + //nolint:gosec // it's ok + jump: unsafe.Pointer(&zero), + } + q.consumerBuffer.Store(buffer) + q.consumerMask.Store(mask) + q.producerBuffer.Store(buffer) + q.producerLimit.Store(mask) + q.producerMask.Store(mask) + + return q +} + +func (m *MPSC[T]) getNextBufferSize(buffer *buffer) uint64 { + maxSize := m.maxQueueCapacity / 2 + bufferLength := uint64(len(buffer.data)) + if maxSize < bufferLength { + panic(fmt.Sprintf("maxSize should be >= bufferLength. maxSize = %d, bufferLength = %d", maxSize, bufferLength)) + } + newSize := 2 * (bufferLength - 1) + return newSize + 1 +} + +func (m *MPSC[T]) getCurrentBufferCapacity(mask uint64) uint64 { + if mask+2 == m.maxQueueCapacity { + return m.maxQueueCapacity + } + return mask +} + +func (m *MPSC[T]) availableInQueue(pIndex, cIndex uint64) uint64 { + return m.maxQueueCapacity - (pIndex - cIndex) +} + +func (m *MPSC[T]) capacity() int { + //nolint:gosec // there's no overflow + return int(m.maxQueueCapacity / 2) +} + +func (m *MPSC[T]) TryPush(t *T) bool { + var ( + mask uint64 + pIndex uint64 + buffer *buffer + ) + + for { + producerLimit := m.producerLimit.Load() + pIndex = m.producerIndex.Load() + // lower bit is indicative of resize, if we see it we spin until it's cleared + if pIndex&1 == 1 { + continue + } + // pIndex is even (lower bit is 0) -> actual index is (pIndex >> 1) + + // mask/buffer may get changed by resizing -> only use for array access after successful CAS. + mask = m.producerMask.Load() + buffer = m.producerBuffer.Load() + // a successful CAS ties the ordering, lv(pIndex)-[mask/buffer]->cas(pIndex) + + // assumption behind this optimization is that queue is almost always empty or near empty + if producerLimit <= pIndex { + result := m.pushSlowPath(mask, pIndex, producerLimit) + switch result { + case 0: + break + case 1: + continue + case 2: + return false + case 3: + m.resize(mask, buffer, pIndex, t) + return true + } + } + + if m.producerIndex.CompareAndSwap(pIndex, pIndex+2) { + break + } + } + + offset := modifiedCalcElementOffset(pIndex, mask) + //nolint:gosec // it's ok + atomic.StorePointer(&buffer.data[offset], unsafe.Pointer(t)) + return true +} + +// We do not inline resize into this method because we do not resize on fill. +func (m *MPSC[T]) pushSlowPath(mask, pIndex, producerLimit uint64) uint8 { + var result uint8 // 0 - goto pIndex CAS + cIndex := m.consumerIndex.Load() + bufferCapacity := m.getCurrentBufferCapacity(mask) + + switch { + case cIndex+bufferCapacity > pIndex: + if !m.producerLimit.CompareAndSwap(producerLimit, cIndex+bufferCapacity) { + result = 1 // retry from top + } + // full and cannot grow + case m.availableInQueue(pIndex, cIndex) <= 0: + result = 2 // -> return false + case m.producerIndex.CompareAndSwap(pIndex, pIndex+1): + result = 3 // -> resize + default: + result = 1 // failed resize attempt, retry from top + } + + return result +} + +func (m *MPSC[T]) TryPop() *T { + buffer := m.consumerBuffer.Load() + index := m.consumerIndex.Load() + mask := m.consumerMask.Load() + + offset := modifiedCalcElementOffset(index, mask) + v := atomic.LoadPointer(&buffer.data[offset]) + if v == nil { + if index == m.producerIndex.Load() { + return nil + } + v = atomic.LoadPointer(&buffer.data[offset]) + for v == nil { + v = atomic.LoadPointer(&buffer.data[offset]) + } + } + if v == m.jump { + nextBuffer := m.getNextBuffer(buffer, mask) + return m.newBufferTryPush(nextBuffer, index) + } + + atomic.StorePointer(&buffer.data[offset], nil) + m.consumerIndex.Store(index + 2) + + return (*T)(v) +} + +func (m *MPSC[T]) Size() uint64 { + if m == nil { + return 0 + } + // NOTE: because indices are on even numbers we cannot use the size util. + + // It is possible for a thread to be interrupted or reschedule between the read of the producer + // and consumer indices, therefore protection is required to ensure size is within valid range. + // In the event of concurrent polls/offers to this method the size is OVER estimated as we read + // consumer index BEFORE the producer index. + + after := m.consumerIndex.Load() + for { + before := after + currentProducerIndex := m.producerIndex.Load() + after = m.consumerIndex.Load() + if before == after { + return (currentProducerIndex - after) >> 1 + } + } +} + +func (m *MPSC[T]) IsEmpty() bool { + // Order matters! + // Loading consumer before producer allows for producer increments after consumer index is read. + // This ensures this method is conservative in its estimate. Note that as this is an MPMC there + // is nothing we can do to make this an exact method. + return m.consumerIndex.Load() == m.producerIndex.Load() +} + +func (m *MPSC[T]) getNextBuffer(b *buffer, mask uint64) *buffer { + nextBufferOffset := nextArrayOffset(mask) + nextBuffer := (*buffer)(atomic.LoadPointer(&b.data[nextBufferOffset])) + atomic.StorePointer(&b.data[nextBufferOffset], nil) + if nextBuffer == nil { + panic("nextBuffer should be != nil") + } + return nextBuffer +} + +func (m *MPSC[T]) newBufferTryPush(b *buffer, index uint64) *T { + offsetInNew := m.newBufferAndOffset(b, index) + v := atomic.LoadPointer(&b.data[offsetInNew]) + if v == nil { + panic("new buffer must have at least one element") + } + atomic.StorePointer(&b.data[offsetInNew], nil) + m.consumerIndex.Store(index + 2) + return (*T)(v) +} + +func (m *MPSC[T]) newBufferAndOffset(b *buffer, index uint64) uint64 { + m.consumerBuffer.Store(b) + //nolint:gosec // there's no overflow + mask := uint64(len(b.data)-2) << 1 + m.consumerMask.Store(mask) + return modifiedCalcElementOffset(index, mask) +} + +func (m *MPSC[T]) resize(oldMask uint64, oldBuffer *buffer, pIndex uint64, t *T) { + newBufferLength := m.getNextBufferSize(oldBuffer) + newBuffer := newBuffer(newBufferLength) + + m.producerBuffer.Store(newBuffer) + newMask := (newBufferLength - 2) << 1 + m.producerMask.Store(newMask) + + offsetInOld := modifiedCalcElementOffset(pIndex, oldMask) + offsetInNew := modifiedCalcElementOffset(pIndex, newMask) + + //nolint:gosec // it's ok + atomic.StorePointer(&newBuffer.data[offsetInNew], unsafe.Pointer(t)) // element in new array + //nolint:gosec // it's ok + atomic.StorePointer(&oldBuffer.data[nextArrayOffset(oldMask)], unsafe.Pointer(newBuffer)) // buffer linked + + cIndex := m.consumerIndex.Load() + availableInQueue := m.availableInQueue(pIndex, cIndex) + if availableInQueue == 0 { + panic(fmt.Sprintf("availableInQueue should be == . availableInQueue = %d", availableInQueue)) + } + + // Invalidate racing CASs + // We never set the limit beyond the bounds of a buffer + m.producerLimit.Store(pIndex + min(newMask, availableInQueue)) + + // make resize visible to the other producers + m.producerIndex.Store(pIndex + 2) + + // INDEX visible before ELEMENT, consistent with consumer expectation + + // make resize visible to consumer + atomic.StorePointer(&oldBuffer.data[offsetInOld], m.jump) +} + +func nextArrayOffset(mask uint64) uint64 { + return modifiedCalcElementOffset(mask+2, math.MaxUint64) +} + +// This method assumes index is actually (index << 1) because lower bit is used for resize. This +// is compensated for by reducing the element shift. The computation is constant folded, so +// there's no cost. +func modifiedCalcElementOffset(index, mask uint64) uint64 { + return (index & mask) >> 1 +} diff --git a/vendor/github.com/maypok86/otter/internal/expiry/variable.go b/vendor/github.com/maypok86/otter/v2/internal/expiration/variable.go similarity index 57% rename from vendor/github.com/maypok86/otter/internal/expiry/variable.go rename to vendor/github.com/maypok86/otter/v2/internal/expiration/variable.go index cecf3f12..ee9b61ac 100644 --- a/vendor/github.com/maypok86/otter/internal/expiry/variable.go +++ b/vendor/github.com/maypok86/otter/v2/internal/expiration/variable.go @@ -1,4 +1,4 @@ -// Copyright (c) 2024 Alexey Mayshev. All rights reserved. +// Copyright (c) 2024 Alexey Mayshev and contributors. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,64 +12,61 @@ // See the License for the specific language governing permissions and // limitations under the License. -package expiry +package expiration import ( "math" "math/bits" "time" - "github.com/maypok86/otter/internal/generated/node" - "github.com/maypok86/otter/internal/unixtime" - "github.com/maypok86/otter/internal/xmath" + "github.com/maypok86/otter/v2/internal/generated/node" + "github.com/maypok86/otter/v2/internal/xmath" ) var ( - buckets = []uint32{64, 64, 32, 4, 1} - spans = []uint32{ - xmath.RoundUpPowerOf2(uint32((1 * time.Second).Seconds())), // 1s - xmath.RoundUpPowerOf2(uint32((1 * time.Minute).Seconds())), // 1.07m - xmath.RoundUpPowerOf2(uint32((1 * time.Hour).Seconds())), // 1.13h - xmath.RoundUpPowerOf2(uint32((24 * time.Hour).Seconds())), // 1.52d - buckets[3] * xmath.RoundUpPowerOf2(uint32((24 * time.Hour).Seconds())), // 6.07d - buckets[3] * xmath.RoundUpPowerOf2(uint32((24 * time.Hour).Seconds())), // 6.07d + buckets = []uint64{64, 64, 32, 4, 1} + spans = []uint64{ + xmath.RoundUpPowerOf264(uint64((1 * time.Second).Nanoseconds())), // 1.07s + xmath.RoundUpPowerOf264(uint64((1 * time.Minute).Nanoseconds())), // 1.14m + xmath.RoundUpPowerOf264(uint64((1 * time.Hour).Nanoseconds())), // 1.22h + xmath.RoundUpPowerOf264(uint64((24 * time.Hour).Nanoseconds())), // 1.63d + buckets[3] * xmath.RoundUpPowerOf264(uint64((24 * time.Hour).Nanoseconds())), // 6.5d + buckets[3] * xmath.RoundUpPowerOf264(uint64((24 * time.Hour).Nanoseconds())), // 6.5d } - shift = []uint32{ - uint32(bits.TrailingZeros32(spans[0])), - uint32(bits.TrailingZeros32(spans[1])), - uint32(bits.TrailingZeros32(spans[2])), - uint32(bits.TrailingZeros32(spans[3])), - uint32(bits.TrailingZeros32(spans[4])), + shift = []uint64{ + uint64(bits.TrailingZeros64(spans[0])), + uint64(bits.TrailingZeros64(spans[1])), + uint64(bits.TrailingZeros64(spans[2])), + uint64(bits.TrailingZeros64(spans[3])), + uint64(bits.TrailingZeros64(spans[4])), } ) type Variable[K comparable, V any] struct { - wheel [][]node.Node[K, V] - time uint32 - deleteNode func(node.Node[K, V]) + wheel [][]node.Node[K, V] + time uint64 } -func NewVariable[K comparable, V any](nodeManager *node.Manager[K, V], deleteNode func(node.Node[K, V])) *Variable[K, V] { +func NewVariable[K comparable, V any](nodeManager *node.Manager[K, V]) *Variable[K, V] { wheel := make([][]node.Node[K, V], len(buckets)) for i := 0; i < len(wheel); i++ { wheel[i] = make([]node.Node[K, V], buckets[i]) for j := 0; j < len(wheel[i]); j++ { var k K var v V - fn := nodeManager.Create(k, v, math.MaxUint32, 1) + fn := nodeManager.Create(k, v, math.MaxInt64, math.MaxInt64, 1) fn.SetPrevExp(fn) fn.SetNextExp(fn) wheel[i][j] = fn } } return &Variable[K, V]{ - wheel: wheel, - deleteNode: deleteNode, + wheel: wheel, } } // findBucket determines the bucket that the timer event should be added to. -func (v *Variable[K, V]) findBucket(expiration uint32) node.Node[K, V] { +func (v *Variable[K, V]) findBucket(expiration uint64) node.Node[K, V] { duration := expiration - v.time length := len(v.wheel) - 1 for i := 0; i < length; i++ { @@ -84,7 +81,8 @@ func (v *Variable[K, V]) findBucket(expiration uint32) node.Node[K, V] { // Add schedules a timer event for the node. func (v *Variable[K, V]) Add(n node.Node[K, V]) { - root := v.findBucket(n.Expiration()) + //nolint:gosec // there is no overflow + root := v.findBucket(uint64(n.ExpiresAt())) link(root, n) } @@ -95,8 +93,8 @@ func (v *Variable[K, V]) Delete(n node.Node[K, V]) { n.SetPrevExp(nil) } -func (v *Variable[K, V]) DeleteExpired() { - currentTime := unixtime.Now() +func (v *Variable[K, V]) DeleteExpired(nowNanos int64, expireNode func(n node.Node[K, V], nowNanos int64)) { + currentTime := uint64(nowNanos) prevTime := v.time v.time = currentTime @@ -108,16 +106,17 @@ func (v *Variable[K, V]) DeleteExpired() { break } - v.deleteExpiredFromBucket(i, previousTicks, delta) + v.deleteExpiredFromBucket(i, previousTicks, delta, expireNode) } } -func (v *Variable[K, V]) deleteExpiredFromBucket(index int, prevTicks, delta uint32) { +func (v *Variable[K, V]) deleteExpiredFromBucket( + index int, + prevTicks, delta uint64, + expireNode func(n node.Node[K, V], nowNanos int64), +) { mask := buckets[index] - 1 - steps := buckets[index] - if delta < steps { - steps = delta - } + steps := min(delta+1, buckets[index]) start := prevTicks & mask end := start + steps timerWheel := v.wheel[index] @@ -132,8 +131,8 @@ func (v *Variable[K, V]) deleteExpiredFromBucket(index int, prevTicks, delta uin n.SetPrevExp(nil) n.SetNextExp(nil) - if n.Expiration() <= v.time { - v.deleteNode(n) + if uint64(n.ExpiresAt()) < v.time { + expireNode(n, int64(v.time)) } else { v.Add(n) } @@ -143,24 +142,6 @@ func (v *Variable[K, V]) deleteExpiredFromBucket(index int, prevTicks, delta uin } } -func (v *Variable[K, V]) Clear() { - for i := 0; i < len(v.wheel); i++ { - for j := 0; j < len(v.wheel[i]); j++ { - root := v.wheel[i][j] - n := root.NextExp() - // NOTE(maypok86): Maybe we should use the same approach as in DeleteExpired? - - for !node.Equals(n, root) { - next := n.NextExp() - v.Delete(n) - - n = next - } - } - } - v.time = unixtime.Now() -} - // link adds the entry at the tail of the bucket's list. func link[K comparable, V any](root, n node.Node[K, V]) { n.SetPrevExp(root.PrevExp()) diff --git a/vendor/github.com/maypok86/otter/v2/internal/generated/node/b.go b/vendor/github.com/maypok86/otter/v2/internal/generated/node/b.go new file mode 100644 index 00000000..4d6d1d21 --- /dev/null +++ b/vendor/github.com/maypok86/otter/v2/internal/generated/node/b.go @@ -0,0 +1,163 @@ +// Code generated by NodeGenerator. DO NOT EDIT. + +// Package node is a generated by the generator. +package node + +import ( + "unsafe" +) + +// B is a cache entry that provide the following features: +// +// 1. Base +type B[K comparable, V any] struct { + key K + value V +} + +// NewB creates a new B. +func NewB[K comparable, V any](key K, value V, expiresAt, refreshableAt int64, weight uint32) Node[K, V] { + n := &B[K, V]{ + key: key, + value: value, + } + + return n +} + +// CastPointerToB casts a pointer to B. +func CastPointerToB[K comparable, V any](ptr unsafe.Pointer) Node[K, V] { + return (*B[K, V])(ptr) +} + +func (n *B[K, V]) Key() K { + return n.key +} + +func (n *B[K, V]) Value() V { + return n.value +} + +func (n *B[K, V]) AsPointer() unsafe.Pointer { + return unsafe.Pointer(n) +} + +func (n *B[K, V]) Prev() Node[K, V] { + panic("not implemented") +} + +func (n *B[K, V]) SetPrev(v Node[K, V]) { + panic("not implemented") +} + +func (n *B[K, V]) Next() Node[K, V] { + panic("not implemented") +} + +func (n *B[K, V]) SetNext(v Node[K, V]) { + panic("not implemented") +} + +func (n *B[K, V]) PrevExp() Node[K, V] { + panic("not implemented") +} + +func (n *B[K, V]) SetPrevExp(v Node[K, V]) { + panic("not implemented") +} + +func (n *B[K, V]) NextExp() Node[K, V] { + panic("not implemented") +} + +func (n *B[K, V]) SetNextExp(v Node[K, V]) { + panic("not implemented") +} + +func (n *B[K, V]) HasExpired(now int64) bool { + return false +} + +func (n *B[K, V]) ExpiresAt() int64 { + panic("not implemented") +} + +func (n *B[K, V]) CASExpiresAt(old, new int64) bool { + panic("not implemented") +} + +func (n *B[K, V]) SetExpiresAt(new int64) { + panic("not implemented") +} + +func (n *B[K, V]) RefreshableAt() int64 { + panic("not implemented") +} + +func (n *B[K, V]) CASRefreshableAt(old, new int64) bool { + panic("not implemented") +} + +func (n *B[K, V]) SetRefreshableAt(new int64) { + panic("not implemented") +} + +func (n *B[K, V]) IsFresh(now int64) bool { + return true +} + +func (n *B[K, V]) Weight() uint32 { + return 1 +} + +func (n *B[K, V]) IsAlive() bool { + return true +} + +func (n *B[K, V]) IsRetired() bool { + panic("not implemented") +} + +func (n *B[K, V]) Retire() { + panic("not implemented") +} + +func (n *B[K, V]) IsDead() bool { + panic("not implemented") +} + +func (n *B[K, V]) Die() { + panic("not implemented") +} + +func (n *B[K, V]) GetQueueType() uint8 { + panic("not implemented") +} + +func (n *B[K, V]) SetQueueType(queueType uint8) { + panic("not implemented") +} + +func (n *B[K, V]) InWindow() bool { + return n.GetQueueType() == InWindowQueue +} + +func (n *B[K, V]) MakeWindow() { + n.SetQueueType(InWindowQueue) +} + +func (n *B[K, V]) InMainProbation() bool { + return n.GetQueueType() == InMainProbationQueue +} + +func (n *B[K, V]) MakeMainProbation() { + n.SetQueueType(InMainProbationQueue) +} + +func (n *B[K, V]) InMainProtected() bool { + return n.GetQueueType() == InMainProtectedQueue +} + +func (n *B[K, V]) MakeMainProtected() { + n.SetQueueType(InMainProtectedQueue) +} diff --git a/vendor/github.com/maypok86/otter/v2/internal/generated/node/be.go b/vendor/github.com/maypok86/otter/v2/internal/generated/node/be.go new file mode 100644 index 00000000..432eb943 --- /dev/null +++ b/vendor/github.com/maypok86/otter/v2/internal/generated/node/be.go @@ -0,0 +1,180 @@ +// Code generated by NodeGenerator. DO NOT EDIT. + +// Package node is a generated by the generator. +package node + +import ( + "sync/atomic" + "unsafe" +) + +// BE is a cache entry that provide the following features: +// +// 1. Base +// +// 2. Expiration +type BE[K comparable, V any] struct { + key K + value V + prevExp *BE[K, V] + nextExp *BE[K, V] + expiresAt atomic.Int64 + state atomic.Uint32 +} + +// NewBE creates a new BE. +func NewBE[K comparable, V any](key K, value V, expiresAt, refreshableAt int64, weight uint32) Node[K, V] { + n := &BE[K, V]{ + key: key, + value: value, + } + n.expiresAt.Store(expiresAt) + n.state.Store(aliveState) + + return n +} + +// CastPointerToBE casts a pointer to BE. +func CastPointerToBE[K comparable, V any](ptr unsafe.Pointer) Node[K, V] { + return (*BE[K, V])(ptr) +} + +func (n *BE[K, V]) Key() K { + return n.key +} + +func (n *BE[K, V]) Value() V { + return n.value +} + +func (n *BE[K, V]) AsPointer() unsafe.Pointer { + return unsafe.Pointer(n) +} + +func (n *BE[K, V]) Prev() Node[K, V] { + panic("not implemented") +} + +func (n *BE[K, V]) SetPrev(v Node[K, V]) { + panic("not implemented") +} + +func (n *BE[K, V]) Next() Node[K, V] { + panic("not implemented") +} + +func (n *BE[K, V]) SetNext(v Node[K, V]) { + panic("not implemented") +} + +func (n *BE[K, V]) PrevExp() Node[K, V] { + return n.prevExp +} + +func (n *BE[K, V]) SetPrevExp(v Node[K, V]) { + if v == nil { + n.prevExp = nil + return + } + n.prevExp = (*BE[K, V])(v.AsPointer()) +} + +func (n *BE[K, V]) NextExp() Node[K, V] { + return n.nextExp +} + +func (n *BE[K, V]) SetNextExp(v Node[K, V]) { + if v == nil { + n.nextExp = nil + return + } + n.nextExp = (*BE[K, V])(v.AsPointer()) +} + +func (n *BE[K, V]) HasExpired(now int64) bool { + return n.ExpiresAt() <= now +} + +func (n *BE[K, V]) ExpiresAt() int64 { + return n.expiresAt.Load() +} + +func (n *BE[K, V]) CASExpiresAt(old, new int64) bool { + return n.expiresAt.CompareAndSwap(old, new) +} + +func (n *BE[K, V]) SetExpiresAt(new int64) { + n.expiresAt.Store(new) +} + +func (n *BE[K, V]) RefreshableAt() int64 { + panic("not implemented") +} + +func (n *BE[K, V]) CASRefreshableAt(old, new int64) bool { + panic("not implemented") +} + +func (n *BE[K, V]) SetRefreshableAt(new int64) { + panic("not implemented") +} + +func (n *BE[K, V]) IsFresh(now int64) bool { + return true +} + +func (n *BE[K, V]) Weight() uint32 { + return 1 +} + +func (n *BE[K, V]) IsAlive() bool { + return n.state.Load() == aliveState +} + +func (n *BE[K, V]) IsRetired() bool { + return n.state.Load() == retiredState +} + +func (n *BE[K, V]) Retire() { + n.state.Store(retiredState) +} + +func (n *BE[K, V]) IsDead() bool { + return n.state.Load() == deadState +} + +func (n *BE[K, V]) Die() { + n.state.Store(deadState) +} + +func (n *BE[K, V]) GetQueueType() uint8 { + panic("not implemented") +} + +func (n *BE[K, V]) SetQueueType(queueType uint8) { + panic("not implemented") +} + +func (n *BE[K, V]) InWindow() bool { + return n.GetQueueType() == InWindowQueue +} + +func (n *BE[K, V]) MakeWindow() { + n.SetQueueType(InWindowQueue) +} + +func (n *BE[K, V]) InMainProbation() bool { + return n.GetQueueType() == InMainProbationQueue +} + +func (n *BE[K, V]) MakeMainProbation() { + n.SetQueueType(InMainProbationQueue) +} + +func (n *BE[K, V]) InMainProtected() bool { + return n.GetQueueType() == InMainProtectedQueue +} + +func (n *BE[K, V]) MakeMainProtected() { + n.SetQueueType(InMainProtectedQueue) +} diff --git a/vendor/github.com/maypok86/otter/v2/internal/generated/node/ber.go b/vendor/github.com/maypok86/otter/v2/internal/generated/node/ber.go new file mode 100644 index 00000000..4eb858a0 --- /dev/null +++ b/vendor/github.com/maypok86/otter/v2/internal/generated/node/ber.go @@ -0,0 +1,184 @@ +// Code generated by NodeGenerator. DO NOT EDIT. + +// Package node is a generated by the generator. +package node + +import ( + "sync/atomic" + "unsafe" +) + +// BER is a cache entry that provide the following features: +// +// 1. Base +// +// 2. Expiration +// +// 3. Refresh +type BER[K comparable, V any] struct { + key K + value V + prevExp *BER[K, V] + nextExp *BER[K, V] + expiresAt atomic.Int64 + refreshableAt atomic.Int64 + state atomic.Uint32 +} + +// NewBER creates a new BER. +func NewBER[K comparable, V any](key K, value V, expiresAt, refreshableAt int64, weight uint32) Node[K, V] { + n := &BER[K, V]{ + key: key, + value: value, + } + n.expiresAt.Store(expiresAt) + n.refreshableAt.Store(refreshableAt) + n.state.Store(aliveState) + + return n +} + +// CastPointerToBER casts a pointer to BER. +func CastPointerToBER[K comparable, V any](ptr unsafe.Pointer) Node[K, V] { + return (*BER[K, V])(ptr) +} + +func (n *BER[K, V]) Key() K { + return n.key +} + +func (n *BER[K, V]) Value() V { + return n.value +} + +func (n *BER[K, V]) AsPointer() unsafe.Pointer { + return unsafe.Pointer(n) +} + +func (n *BER[K, V]) Prev() Node[K, V] { + panic("not implemented") +} + +func (n *BER[K, V]) SetPrev(v Node[K, V]) { + panic("not implemented") +} + +func (n *BER[K, V]) Next() Node[K, V] { + panic("not implemented") +} + +func (n *BER[K, V]) SetNext(v Node[K, V]) { + panic("not implemented") +} + +func (n *BER[K, V]) PrevExp() Node[K, V] { + return n.prevExp +} + +func (n *BER[K, V]) SetPrevExp(v Node[K, V]) { + if v == nil { + n.prevExp = nil + return + } + n.prevExp = (*BER[K, V])(v.AsPointer()) +} + +func (n *BER[K, V]) NextExp() Node[K, V] { + return n.nextExp +} + +func (n *BER[K, V]) SetNextExp(v Node[K, V]) { + if v == nil { + n.nextExp = nil + return + } + n.nextExp = (*BER[K, V])(v.AsPointer()) +} + +func (n *BER[K, V]) HasExpired(now int64) bool { + return n.ExpiresAt() <= now +} + +func (n *BER[K, V]) ExpiresAt() int64 { + return n.expiresAt.Load() +} + +func (n *BER[K, V]) CASExpiresAt(old, new int64) bool { + return n.expiresAt.CompareAndSwap(old, new) +} + +func (n *BER[K, V]) SetExpiresAt(new int64) { + n.expiresAt.Store(new) +} + +func (n *BER[K, V]) RefreshableAt() int64 { + return n.refreshableAt.Load() +} + +func (n *BER[K, V]) CASRefreshableAt(old, new int64) bool { + return n.refreshableAt.CompareAndSwap(old, new) +} + +func (n *BER[K, V]) SetRefreshableAt(new int64) { + n.refreshableAt.Store(new) +} + +func (n *BER[K, V]) IsFresh(now int64) bool { + return n.IsAlive() && n.RefreshableAt() > now +} + +func (n *BER[K, V]) Weight() uint32 { + return 1 +} + +func (n *BER[K, V]) IsAlive() bool { + return n.state.Load() == aliveState +} + +func (n *BER[K, V]) IsRetired() bool { + return n.state.Load() == retiredState +} + +func (n *BER[K, V]) Retire() { + n.state.Store(retiredState) +} + +func (n *BER[K, V]) IsDead() bool { + return n.state.Load() == deadState +} + +func (n *BER[K, V]) Die() { + n.state.Store(deadState) +} + +func (n *BER[K, V]) GetQueueType() uint8 { + panic("not implemented") +} + +func (n *BER[K, V]) SetQueueType(queueType uint8) { + panic("not implemented") +} + +func (n *BER[K, V]) InWindow() bool { + return n.GetQueueType() == InWindowQueue +} + +func (n *BER[K, V]) MakeWindow() { + n.SetQueueType(InWindowQueue) +} + +func (n *BER[K, V]) InMainProbation() bool { + return n.GetQueueType() == InMainProbationQueue +} + +func (n *BER[K, V]) MakeMainProbation() { + n.SetQueueType(InMainProbationQueue) +} + +func (n *BER[K, V]) InMainProtected() bool { + return n.GetQueueType() == InMainProtectedQueue +} + +func (n *BER[K, V]) MakeMainProtected() { + n.SetQueueType(InMainProtectedQueue) +} diff --git a/vendor/github.com/maypok86/otter/v2/internal/generated/node/berw.go b/vendor/github.com/maypok86/otter/v2/internal/generated/node/berw.go new file mode 100644 index 00000000..4b881c03 --- /dev/null +++ b/vendor/github.com/maypok86/otter/v2/internal/generated/node/berw.go @@ -0,0 +1,199 @@ +// Code generated by NodeGenerator. DO NOT EDIT. + +// Package node is a generated by the generator. +package node + +import ( + "sync/atomic" + "unsafe" +) + +// BERW is a cache entry that provide the following features: +// +// 1. Base +// +// 2. Expiration +// +// 3. Refresh +// +// 4. Weight +type BERW[K comparable, V any] struct { + key K + value V + prev *BERW[K, V] + next *BERW[K, V] + prevExp *BERW[K, V] + nextExp *BERW[K, V] + expiresAt atomic.Int64 + refreshableAt atomic.Int64 + weight uint32 + state atomic.Uint32 + queueType uint8 +} + +// NewBERW creates a new BERW. +func NewBERW[K comparable, V any](key K, value V, expiresAt, refreshableAt int64, weight uint32) Node[K, V] { + n := &BERW[K, V]{ + key: key, + value: value, + weight: weight, + } + n.expiresAt.Store(expiresAt) + n.refreshableAt.Store(refreshableAt) + n.state.Store(aliveState) + + return n +} + +// CastPointerToBERW casts a pointer to BERW. +func CastPointerToBERW[K comparable, V any](ptr unsafe.Pointer) Node[K, V] { + return (*BERW[K, V])(ptr) +} + +func (n *BERW[K, V]) Key() K { + return n.key +} + +func (n *BERW[K, V]) Value() V { + return n.value +} + +func (n *BERW[K, V]) AsPointer() unsafe.Pointer { + return unsafe.Pointer(n) +} + +func (n *BERW[K, V]) Prev() Node[K, V] { + return n.prev +} + +func (n *BERW[K, V]) SetPrev(v Node[K, V]) { + if v == nil { + n.prev = nil + return + } + n.prev = (*BERW[K, V])(v.AsPointer()) +} + +func (n *BERW[K, V]) Next() Node[K, V] { + return n.next +} + +func (n *BERW[K, V]) SetNext(v Node[K, V]) { + if v == nil { + n.next = nil + return + } + n.next = (*BERW[K, V])(v.AsPointer()) +} + +func (n *BERW[K, V]) PrevExp() Node[K, V] { + return n.prevExp +} + +func (n *BERW[K, V]) SetPrevExp(v Node[K, V]) { + if v == nil { + n.prevExp = nil + return + } + n.prevExp = (*BERW[K, V])(v.AsPointer()) +} + +func (n *BERW[K, V]) NextExp() Node[K, V] { + return n.nextExp +} + +func (n *BERW[K, V]) SetNextExp(v Node[K, V]) { + if v == nil { + n.nextExp = nil + return + } + n.nextExp = (*BERW[K, V])(v.AsPointer()) +} + +func (n *BERW[K, V]) HasExpired(now int64) bool { + return n.ExpiresAt() <= now +} + +func (n *BERW[K, V]) ExpiresAt() int64 { + return n.expiresAt.Load() +} + +func (n *BERW[K, V]) CASExpiresAt(old, new int64) bool { + return n.expiresAt.CompareAndSwap(old, new) +} + +func (n *BERW[K, V]) SetExpiresAt(new int64) { + n.expiresAt.Store(new) +} + +func (n *BERW[K, V]) RefreshableAt() int64 { + return n.refreshableAt.Load() +} + +func (n *BERW[K, V]) CASRefreshableAt(old, new int64) bool { + return n.refreshableAt.CompareAndSwap(old, new) +} + +func (n *BERW[K, V]) SetRefreshableAt(new int64) { + n.refreshableAt.Store(new) +} + +func (n *BERW[K, V]) IsFresh(now int64) bool { + return n.IsAlive() && n.RefreshableAt() > now +} + +func (n *BERW[K, V]) Weight() uint32 { + return n.weight +} + +func (n *BERW[K, V]) IsAlive() bool { + return n.state.Load() == aliveState +} + +func (n *BERW[K, V]) IsRetired() bool { + return n.state.Load() == retiredState +} + +func (n *BERW[K, V]) Retire() { + n.state.Store(retiredState) +} + +func (n *BERW[K, V]) IsDead() bool { + return n.state.Load() == deadState +} + +func (n *BERW[K, V]) Die() { + n.state.Store(deadState) +} + +func (n *BERW[K, V]) GetQueueType() uint8 { + return n.queueType +} + +func (n *BERW[K, V]) SetQueueType(queueType uint8) { + n.queueType = queueType +} + +func (n *BERW[K, V]) InWindow() bool { + return n.GetQueueType() == InWindowQueue +} + +func (n *BERW[K, V]) MakeWindow() { + n.SetQueueType(InWindowQueue) +} + +func (n *BERW[K, V]) InMainProbation() bool { + return n.GetQueueType() == InMainProbationQueue +} + +func (n *BERW[K, V]) MakeMainProbation() { + n.SetQueueType(InMainProbationQueue) +} + +func (n *BERW[K, V]) InMainProtected() bool { + return n.GetQueueType() == InMainProtectedQueue +} + +func (n *BERW[K, V]) MakeMainProtected() { + n.SetQueueType(InMainProtectedQueue) +} diff --git a/vendor/github.com/maypok86/otter/v2/internal/generated/node/bew.go b/vendor/github.com/maypok86/otter/v2/internal/generated/node/bew.go new file mode 100644 index 00000000..18fbe059 --- /dev/null +++ b/vendor/github.com/maypok86/otter/v2/internal/generated/node/bew.go @@ -0,0 +1,195 @@ +// Code generated by NodeGenerator. DO NOT EDIT. + +// Package node is a generated by the generator. +package node + +import ( + "sync/atomic" + "unsafe" +) + +// BEW is a cache entry that provide the following features: +// +// 1. Base +// +// 2. Expiration +// +// 3. Weight +type BEW[K comparable, V any] struct { + key K + value V + prev *BEW[K, V] + next *BEW[K, V] + prevExp *BEW[K, V] + nextExp *BEW[K, V] + expiresAt atomic.Int64 + weight uint32 + state atomic.Uint32 + queueType uint8 +} + +// NewBEW creates a new BEW. +func NewBEW[K comparable, V any](key K, value V, expiresAt, refreshableAt int64, weight uint32) Node[K, V] { + n := &BEW[K, V]{ + key: key, + value: value, + weight: weight, + } + n.expiresAt.Store(expiresAt) + n.state.Store(aliveState) + + return n +} + +// CastPointerToBEW casts a pointer to BEW. +func CastPointerToBEW[K comparable, V any](ptr unsafe.Pointer) Node[K, V] { + return (*BEW[K, V])(ptr) +} + +func (n *BEW[K, V]) Key() K { + return n.key +} + +func (n *BEW[K, V]) Value() V { + return n.value +} + +func (n *BEW[K, V]) AsPointer() unsafe.Pointer { + return unsafe.Pointer(n) +} + +func (n *BEW[K, V]) Prev() Node[K, V] { + return n.prev +} + +func (n *BEW[K, V]) SetPrev(v Node[K, V]) { + if v == nil { + n.prev = nil + return + } + n.prev = (*BEW[K, V])(v.AsPointer()) +} + +func (n *BEW[K, V]) Next() Node[K, V] { + return n.next +} + +func (n *BEW[K, V]) SetNext(v Node[K, V]) { + if v == nil { + n.next = nil + return + } + n.next = (*BEW[K, V])(v.AsPointer()) +} + +func (n *BEW[K, V]) PrevExp() Node[K, V] { + return n.prevExp +} + +func (n *BEW[K, V]) SetPrevExp(v Node[K, V]) { + if v == nil { + n.prevExp = nil + return + } + n.prevExp = (*BEW[K, V])(v.AsPointer()) +} + +func (n *BEW[K, V]) NextExp() Node[K, V] { + return n.nextExp +} + +func (n *BEW[K, V]) SetNextExp(v Node[K, V]) { + if v == nil { + n.nextExp = nil + return + } + n.nextExp = (*BEW[K, V])(v.AsPointer()) +} + +func (n *BEW[K, V]) HasExpired(now int64) bool { + return n.ExpiresAt() <= now +} + +func (n *BEW[K, V]) ExpiresAt() int64 { + return n.expiresAt.Load() +} + +func (n *BEW[K, V]) CASExpiresAt(old, new int64) bool { + return n.expiresAt.CompareAndSwap(old, new) +} + +func (n *BEW[K, V]) SetExpiresAt(new int64) { + n.expiresAt.Store(new) +} + +func (n *BEW[K, V]) RefreshableAt() int64 { + panic("not implemented") +} + +func (n *BEW[K, V]) CASRefreshableAt(old, new int64) bool { + panic("not implemented") +} + +func (n *BEW[K, V]) SetRefreshableAt(new int64) { + panic("not implemented") +} + +func (n *BEW[K, V]) IsFresh(now int64) bool { + return true +} + +func (n *BEW[K, V]) Weight() uint32 { + return n.weight +} + +func (n *BEW[K, V]) IsAlive() bool { + return n.state.Load() == aliveState +} + +func (n *BEW[K, V]) IsRetired() bool { + return n.state.Load() == retiredState +} + +func (n *BEW[K, V]) Retire() { + n.state.Store(retiredState) +} + +func (n *BEW[K, V]) IsDead() bool { + return n.state.Load() == deadState +} + +func (n *BEW[K, V]) Die() { + n.state.Store(deadState) +} + +func (n *BEW[K, V]) GetQueueType() uint8 { + return n.queueType +} + +func (n *BEW[K, V]) SetQueueType(queueType uint8) { + n.queueType = queueType +} + +func (n *BEW[K, V]) InWindow() bool { + return n.GetQueueType() == InWindowQueue +} + +func (n *BEW[K, V]) MakeWindow() { + n.SetQueueType(InWindowQueue) +} + +func (n *BEW[K, V]) InMainProbation() bool { + return n.GetQueueType() == InMainProbationQueue +} + +func (n *BEW[K, V]) MakeMainProbation() { + n.SetQueueType(InMainProbationQueue) +} + +func (n *BEW[K, V]) InMainProtected() bool { + return n.GetQueueType() == InMainProtectedQueue +} + +func (n *BEW[K, V]) MakeMainProtected() { + n.SetQueueType(InMainProtectedQueue) +} diff --git a/vendor/github.com/maypok86/otter/v2/internal/generated/node/br.go b/vendor/github.com/maypok86/otter/v2/internal/generated/node/br.go new file mode 100644 index 00000000..ecac3b05 --- /dev/null +++ b/vendor/github.com/maypok86/otter/v2/internal/generated/node/br.go @@ -0,0 +1,168 @@ +// Code generated by NodeGenerator. DO NOT EDIT. + +// Package node is a generated by the generator. +package node + +import ( + "sync/atomic" + "unsafe" +) + +// BR is a cache entry that provide the following features: +// +// 1. Base +// +// 2. Refresh +type BR[K comparable, V any] struct { + key K + value V + refreshableAt atomic.Int64 +} + +// NewBR creates a new BR. +func NewBR[K comparable, V any](key K, value V, expiresAt, refreshableAt int64, weight uint32) Node[K, V] { + n := &BR[K, V]{ + key: key, + value: value, + } + n.refreshableAt.Store(refreshableAt) + + return n +} + +// CastPointerToBR casts a pointer to BR. +func CastPointerToBR[K comparable, V any](ptr unsafe.Pointer) Node[K, V] { + return (*BR[K, V])(ptr) +} + +func (n *BR[K, V]) Key() K { + return n.key +} + +func (n *BR[K, V]) Value() V { + return n.value +} + +func (n *BR[K, V]) AsPointer() unsafe.Pointer { + return unsafe.Pointer(n) +} + +func (n *BR[K, V]) Prev() Node[K, V] { + panic("not implemented") +} + +func (n *BR[K, V]) SetPrev(v Node[K, V]) { + panic("not implemented") +} + +func (n *BR[K, V]) Next() Node[K, V] { + panic("not implemented") +} + +func (n *BR[K, V]) SetNext(v Node[K, V]) { + panic("not implemented") +} + +func (n *BR[K, V]) PrevExp() Node[K, V] { + panic("not implemented") +} + +func (n *BR[K, V]) SetPrevExp(v Node[K, V]) { + panic("not implemented") +} + +func (n *BR[K, V]) NextExp() Node[K, V] { + panic("not implemented") +} + +func (n *BR[K, V]) SetNextExp(v Node[K, V]) { + panic("not implemented") +} + +func (n *BR[K, V]) HasExpired(now int64) bool { + return false +} + +func (n *BR[K, V]) ExpiresAt() int64 { + panic("not implemented") +} + +func (n *BR[K, V]) CASExpiresAt(old, new int64) bool { + panic("not implemented") +} + +func (n *BR[K, V]) SetExpiresAt(new int64) { + panic("not implemented") +} + +func (n *BR[K, V]) RefreshableAt() int64 { + return n.refreshableAt.Load() +} + +func (n *BR[K, V]) CASRefreshableAt(old, new int64) bool { + return n.refreshableAt.CompareAndSwap(old, new) +} + +func (n *BR[K, V]) SetRefreshableAt(new int64) { + n.refreshableAt.Store(new) +} + +func (n *BR[K, V]) IsFresh(now int64) bool { + return n.IsAlive() && n.RefreshableAt() > now +} + +func (n *BR[K, V]) Weight() uint32 { + return 1 +} + +func (n *BR[K, V]) IsAlive() bool { + return true +} + +func (n *BR[K, V]) IsRetired() bool { + panic("not implemented") +} + +func (n *BR[K, V]) Retire() { + panic("not implemented") +} + +func (n *BR[K, V]) IsDead() bool { + panic("not implemented") +} + +func (n *BR[K, V]) Die() { + panic("not implemented") +} + +func (n *BR[K, V]) GetQueueType() uint8 { + panic("not implemented") +} + +func (n *BR[K, V]) SetQueueType(queueType uint8) { + panic("not implemented") +} + +func (n *BR[K, V]) InWindow() bool { + return n.GetQueueType() == InWindowQueue +} + +func (n *BR[K, V]) MakeWindow() { + n.SetQueueType(InWindowQueue) +} + +func (n *BR[K, V]) InMainProbation() bool { + return n.GetQueueType() == InMainProbationQueue +} + +func (n *BR[K, V]) MakeMainProbation() { + n.SetQueueType(InMainProbationQueue) +} + +func (n *BR[K, V]) InMainProtected() bool { + return n.GetQueueType() == InMainProtectedQueue +} + +func (n *BR[K, V]) MakeMainProtected() { + n.SetQueueType(InMainProtectedQueue) +} diff --git a/vendor/github.com/maypok86/otter/v2/internal/generated/node/brw.go b/vendor/github.com/maypok86/otter/v2/internal/generated/node/brw.go new file mode 100644 index 00000000..c0f0686e --- /dev/null +++ b/vendor/github.com/maypok86/otter/v2/internal/generated/node/brw.go @@ -0,0 +1,185 @@ +// Code generated by NodeGenerator. DO NOT EDIT. + +// Package node is a generated by the generator. +package node + +import ( + "sync/atomic" + "unsafe" +) + +// BRW is a cache entry that provide the following features: +// +// 1. Base +// +// 2. Refresh +// +// 3. Weight +type BRW[K comparable, V any] struct { + key K + value V + prev *BRW[K, V] + next *BRW[K, V] + refreshableAt atomic.Int64 + weight uint32 + state atomic.Uint32 + queueType uint8 +} + +// NewBRW creates a new BRW. +func NewBRW[K comparable, V any](key K, value V, expiresAt, refreshableAt int64, weight uint32) Node[K, V] { + n := &BRW[K, V]{ + key: key, + value: value, + weight: weight, + } + n.refreshableAt.Store(refreshableAt) + n.state.Store(aliveState) + + return n +} + +// CastPointerToBRW casts a pointer to BRW. +func CastPointerToBRW[K comparable, V any](ptr unsafe.Pointer) Node[K, V] { + return (*BRW[K, V])(ptr) +} + +func (n *BRW[K, V]) Key() K { + return n.key +} + +func (n *BRW[K, V]) Value() V { + return n.value +} + +func (n *BRW[K, V]) AsPointer() unsafe.Pointer { + return unsafe.Pointer(n) +} + +func (n *BRW[K, V]) Prev() Node[K, V] { + return n.prev +} + +func (n *BRW[K, V]) SetPrev(v Node[K, V]) { + if v == nil { + n.prev = nil + return + } + n.prev = (*BRW[K, V])(v.AsPointer()) +} + +func (n *BRW[K, V]) Next() Node[K, V] { + return n.next +} + +func (n *BRW[K, V]) SetNext(v Node[K, V]) { + if v == nil { + n.next = nil + return + } + n.next = (*BRW[K, V])(v.AsPointer()) +} + +func (n *BRW[K, V]) PrevExp() Node[K, V] { + panic("not implemented") +} + +func (n *BRW[K, V]) SetPrevExp(v Node[K, V]) { + panic("not implemented") +} + +func (n *BRW[K, V]) NextExp() Node[K, V] { + panic("not implemented") +} + +func (n *BRW[K, V]) SetNextExp(v Node[K, V]) { + panic("not implemented") +} + +func (n *BRW[K, V]) HasExpired(now int64) bool { + return false +} + +func (n *BRW[K, V]) ExpiresAt() int64 { + panic("not implemented") +} + +func (n *BRW[K, V]) CASExpiresAt(old, new int64) bool { + panic("not implemented") +} + +func (n *BRW[K, V]) SetExpiresAt(new int64) { + panic("not implemented") +} + +func (n *BRW[K, V]) RefreshableAt() int64 { + return n.refreshableAt.Load() +} + +func (n *BRW[K, V]) CASRefreshableAt(old, new int64) bool { + return n.refreshableAt.CompareAndSwap(old, new) +} + +func (n *BRW[K, V]) SetRefreshableAt(new int64) { + n.refreshableAt.Store(new) +} + +func (n *BRW[K, V]) IsFresh(now int64) bool { + return n.IsAlive() && n.RefreshableAt() > now +} + +func (n *BRW[K, V]) Weight() uint32 { + return n.weight +} + +func (n *BRW[K, V]) IsAlive() bool { + return n.state.Load() == aliveState +} + +func (n *BRW[K, V]) IsRetired() bool { + return n.state.Load() == retiredState +} + +func (n *BRW[K, V]) Retire() { + n.state.Store(retiredState) +} + +func (n *BRW[K, V]) IsDead() bool { + return n.state.Load() == deadState +} + +func (n *BRW[K, V]) Die() { + n.state.Store(deadState) +} + +func (n *BRW[K, V]) GetQueueType() uint8 { + return n.queueType +} + +func (n *BRW[K, V]) SetQueueType(queueType uint8) { + n.queueType = queueType +} + +func (n *BRW[K, V]) InWindow() bool { + return n.GetQueueType() == InWindowQueue +} + +func (n *BRW[K, V]) MakeWindow() { + n.SetQueueType(InWindowQueue) +} + +func (n *BRW[K, V]) InMainProbation() bool { + return n.GetQueueType() == InMainProbationQueue +} + +func (n *BRW[K, V]) MakeMainProbation() { + n.SetQueueType(InMainProbationQueue) +} + +func (n *BRW[K, V]) InMainProtected() bool { + return n.GetQueueType() == InMainProtectedQueue +} + +func (n *BRW[K, V]) MakeMainProtected() { + n.SetQueueType(InMainProtectedQueue) +} diff --git a/vendor/github.com/maypok86/otter/v2/internal/generated/node/bs.go b/vendor/github.com/maypok86/otter/v2/internal/generated/node/bs.go new file mode 100644 index 00000000..448b69cb --- /dev/null +++ b/vendor/github.com/maypok86/otter/v2/internal/generated/node/bs.go @@ -0,0 +1,179 @@ +// Code generated by NodeGenerator. DO NOT EDIT. + +// Package node is a generated by the generator. +package node + +import ( + "sync/atomic" + "unsafe" +) + +// BS is a cache entry that provide the following features: +// +// 1. Base +// +// 2. Size +type BS[K comparable, V any] struct { + key K + value V + prev *BS[K, V] + next *BS[K, V] + state atomic.Uint32 + queueType uint8 +} + +// NewBS creates a new BS. +func NewBS[K comparable, V any](key K, value V, expiresAt, refreshableAt int64, weight uint32) Node[K, V] { + n := &BS[K, V]{ + key: key, + value: value, + } + n.state.Store(aliveState) + + return n +} + +// CastPointerToBS casts a pointer to BS. +func CastPointerToBS[K comparable, V any](ptr unsafe.Pointer) Node[K, V] { + return (*BS[K, V])(ptr) +} + +func (n *BS[K, V]) Key() K { + return n.key +} + +func (n *BS[K, V]) Value() V { + return n.value +} + +func (n *BS[K, V]) AsPointer() unsafe.Pointer { + return unsafe.Pointer(n) +} + +func (n *BS[K, V]) Prev() Node[K, V] { + return n.prev +} + +func (n *BS[K, V]) SetPrev(v Node[K, V]) { + if v == nil { + n.prev = nil + return + } + n.prev = (*BS[K, V])(v.AsPointer()) +} + +func (n *BS[K, V]) Next() Node[K, V] { + return n.next +} + +func (n *BS[K, V]) SetNext(v Node[K, V]) { + if v == nil { + n.next = nil + return + } + n.next = (*BS[K, V])(v.AsPointer()) +} + +func (n *BS[K, V]) PrevExp() Node[K, V] { + panic("not implemented") +} + +func (n *BS[K, V]) SetPrevExp(v Node[K, V]) { + panic("not implemented") +} + +func (n *BS[K, V]) NextExp() Node[K, V] { + panic("not implemented") +} + +func (n *BS[K, V]) SetNextExp(v Node[K, V]) { + panic("not implemented") +} + +func (n *BS[K, V]) HasExpired(now int64) bool { + return false +} + +func (n *BS[K, V]) ExpiresAt() int64 { + panic("not implemented") +} + +func (n *BS[K, V]) CASExpiresAt(old, new int64) bool { + panic("not implemented") +} + +func (n *BS[K, V]) SetExpiresAt(new int64) { + panic("not implemented") +} + +func (n *BS[K, V]) RefreshableAt() int64 { + panic("not implemented") +} + +func (n *BS[K, V]) CASRefreshableAt(old, new int64) bool { + panic("not implemented") +} + +func (n *BS[K, V]) SetRefreshableAt(new int64) { + panic("not implemented") +} + +func (n *BS[K, V]) IsFresh(now int64) bool { + return true +} + +func (n *BS[K, V]) Weight() uint32 { + return 1 +} + +func (n *BS[K, V]) IsAlive() bool { + return n.state.Load() == aliveState +} + +func (n *BS[K, V]) IsRetired() bool { + return n.state.Load() == retiredState +} + +func (n *BS[K, V]) Retire() { + n.state.Store(retiredState) +} + +func (n *BS[K, V]) IsDead() bool { + return n.state.Load() == deadState +} + +func (n *BS[K, V]) Die() { + n.state.Store(deadState) +} + +func (n *BS[K, V]) GetQueueType() uint8 { + return n.queueType +} + +func (n *BS[K, V]) SetQueueType(queueType uint8) { + n.queueType = queueType +} + +func (n *BS[K, V]) InWindow() bool { + return n.GetQueueType() == InWindowQueue +} + +func (n *BS[K, V]) MakeWindow() { + n.SetQueueType(InWindowQueue) +} + +func (n *BS[K, V]) InMainProbation() bool { + return n.GetQueueType() == InMainProbationQueue +} + +func (n *BS[K, V]) MakeMainProbation() { + n.SetQueueType(InMainProbationQueue) +} + +func (n *BS[K, V]) InMainProtected() bool { + return n.GetQueueType() == InMainProtectedQueue +} + +func (n *BS[K, V]) MakeMainProtected() { + n.SetQueueType(InMainProtectedQueue) +} diff --git a/vendor/github.com/maypok86/otter/v2/internal/generated/node/bse.go b/vendor/github.com/maypok86/otter/v2/internal/generated/node/bse.go new file mode 100644 index 00000000..9a27c5f7 --- /dev/null +++ b/vendor/github.com/maypok86/otter/v2/internal/generated/node/bse.go @@ -0,0 +1,193 @@ +// Code generated by NodeGenerator. DO NOT EDIT. + +// Package node is a generated by the generator. +package node + +import ( + "sync/atomic" + "unsafe" +) + +// BSE is a cache entry that provide the following features: +// +// 1. Base +// +// 2. Size +// +// 3. Expiration +type BSE[K comparable, V any] struct { + key K + value V + prev *BSE[K, V] + next *BSE[K, V] + prevExp *BSE[K, V] + nextExp *BSE[K, V] + expiresAt atomic.Int64 + state atomic.Uint32 + queueType uint8 +} + +// NewBSE creates a new BSE. +func NewBSE[K comparable, V any](key K, value V, expiresAt, refreshableAt int64, weight uint32) Node[K, V] { + n := &BSE[K, V]{ + key: key, + value: value, + } + n.expiresAt.Store(expiresAt) + n.state.Store(aliveState) + + return n +} + +// CastPointerToBSE casts a pointer to BSE. +func CastPointerToBSE[K comparable, V any](ptr unsafe.Pointer) Node[K, V] { + return (*BSE[K, V])(ptr) +} + +func (n *BSE[K, V]) Key() K { + return n.key +} + +func (n *BSE[K, V]) Value() V { + return n.value +} + +func (n *BSE[K, V]) AsPointer() unsafe.Pointer { + return unsafe.Pointer(n) +} + +func (n *BSE[K, V]) Prev() Node[K, V] { + return n.prev +} + +func (n *BSE[K, V]) SetPrev(v Node[K, V]) { + if v == nil { + n.prev = nil + return + } + n.prev = (*BSE[K, V])(v.AsPointer()) +} + +func (n *BSE[K, V]) Next() Node[K, V] { + return n.next +} + +func (n *BSE[K, V]) SetNext(v Node[K, V]) { + if v == nil { + n.next = nil + return + } + n.next = (*BSE[K, V])(v.AsPointer()) +} + +func (n *BSE[K, V]) PrevExp() Node[K, V] { + return n.prevExp +} + +func (n *BSE[K, V]) SetPrevExp(v Node[K, V]) { + if v == nil { + n.prevExp = nil + return + } + n.prevExp = (*BSE[K, V])(v.AsPointer()) +} + +func (n *BSE[K, V]) NextExp() Node[K, V] { + return n.nextExp +} + +func (n *BSE[K, V]) SetNextExp(v Node[K, V]) { + if v == nil { + n.nextExp = nil + return + } + n.nextExp = (*BSE[K, V])(v.AsPointer()) +} + +func (n *BSE[K, V]) HasExpired(now int64) bool { + return n.ExpiresAt() <= now +} + +func (n *BSE[K, V]) ExpiresAt() int64 { + return n.expiresAt.Load() +} + +func (n *BSE[K, V]) CASExpiresAt(old, new int64) bool { + return n.expiresAt.CompareAndSwap(old, new) +} + +func (n *BSE[K, V]) SetExpiresAt(new int64) { + n.expiresAt.Store(new) +} + +func (n *BSE[K, V]) RefreshableAt() int64 { + panic("not implemented") +} + +func (n *BSE[K, V]) CASRefreshableAt(old, new int64) bool { + panic("not implemented") +} + +func (n *BSE[K, V]) SetRefreshableAt(new int64) { + panic("not implemented") +} + +func (n *BSE[K, V]) IsFresh(now int64) bool { + return true +} + +func (n *BSE[K, V]) Weight() uint32 { + return 1 +} + +func (n *BSE[K, V]) IsAlive() bool { + return n.state.Load() == aliveState +} + +func (n *BSE[K, V]) IsRetired() bool { + return n.state.Load() == retiredState +} + +func (n *BSE[K, V]) Retire() { + n.state.Store(retiredState) +} + +func (n *BSE[K, V]) IsDead() bool { + return n.state.Load() == deadState +} + +func (n *BSE[K, V]) Die() { + n.state.Store(deadState) +} + +func (n *BSE[K, V]) GetQueueType() uint8 { + return n.queueType +} + +func (n *BSE[K, V]) SetQueueType(queueType uint8) { + n.queueType = queueType +} + +func (n *BSE[K, V]) InWindow() bool { + return n.GetQueueType() == InWindowQueue +} + +func (n *BSE[K, V]) MakeWindow() { + n.SetQueueType(InWindowQueue) +} + +func (n *BSE[K, V]) InMainProbation() bool { + return n.GetQueueType() == InMainProbationQueue +} + +func (n *BSE[K, V]) MakeMainProbation() { + n.SetQueueType(InMainProbationQueue) +} + +func (n *BSE[K, V]) InMainProtected() bool { + return n.GetQueueType() == InMainProtectedQueue +} + +func (n *BSE[K, V]) MakeMainProtected() { + n.SetQueueType(InMainProtectedQueue) +} diff --git a/vendor/github.com/maypok86/otter/v2/internal/generated/node/bser.go b/vendor/github.com/maypok86/otter/v2/internal/generated/node/bser.go new file mode 100644 index 00000000..4eb61764 --- /dev/null +++ b/vendor/github.com/maypok86/otter/v2/internal/generated/node/bser.go @@ -0,0 +1,197 @@ +// Code generated by NodeGenerator. DO NOT EDIT. + +// Package node is a generated by the generator. +package node + +import ( + "sync/atomic" + "unsafe" +) + +// BSER is a cache entry that provide the following features: +// +// 1. Base +// +// 2. Size +// +// 3. Expiration +// +// 4. Refresh +type BSER[K comparable, V any] struct { + key K + value V + prev *BSER[K, V] + next *BSER[K, V] + prevExp *BSER[K, V] + nextExp *BSER[K, V] + expiresAt atomic.Int64 + refreshableAt atomic.Int64 + state atomic.Uint32 + queueType uint8 +} + +// NewBSER creates a new BSER. +func NewBSER[K comparable, V any](key K, value V, expiresAt, refreshableAt int64, weight uint32) Node[K, V] { + n := &BSER[K, V]{ + key: key, + value: value, + } + n.expiresAt.Store(expiresAt) + n.refreshableAt.Store(refreshableAt) + n.state.Store(aliveState) + + return n +} + +// CastPointerToBSER casts a pointer to BSER. +func CastPointerToBSER[K comparable, V any](ptr unsafe.Pointer) Node[K, V] { + return (*BSER[K, V])(ptr) +} + +func (n *BSER[K, V]) Key() K { + return n.key +} + +func (n *BSER[K, V]) Value() V { + return n.value +} + +func (n *BSER[K, V]) AsPointer() unsafe.Pointer { + return unsafe.Pointer(n) +} + +func (n *BSER[K, V]) Prev() Node[K, V] { + return n.prev +} + +func (n *BSER[K, V]) SetPrev(v Node[K, V]) { + if v == nil { + n.prev = nil + return + } + n.prev = (*BSER[K, V])(v.AsPointer()) +} + +func (n *BSER[K, V]) Next() Node[K, V] { + return n.next +} + +func (n *BSER[K, V]) SetNext(v Node[K, V]) { + if v == nil { + n.next = nil + return + } + n.next = (*BSER[K, V])(v.AsPointer()) +} + +func (n *BSER[K, V]) PrevExp() Node[K, V] { + return n.prevExp +} + +func (n *BSER[K, V]) SetPrevExp(v Node[K, V]) { + if v == nil { + n.prevExp = nil + return + } + n.prevExp = (*BSER[K, V])(v.AsPointer()) +} + +func (n *BSER[K, V]) NextExp() Node[K, V] { + return n.nextExp +} + +func (n *BSER[K, V]) SetNextExp(v Node[K, V]) { + if v == nil { + n.nextExp = nil + return + } + n.nextExp = (*BSER[K, V])(v.AsPointer()) +} + +func (n *BSER[K, V]) HasExpired(now int64) bool { + return n.ExpiresAt() <= now +} + +func (n *BSER[K, V]) ExpiresAt() int64 { + return n.expiresAt.Load() +} + +func (n *BSER[K, V]) CASExpiresAt(old, new int64) bool { + return n.expiresAt.CompareAndSwap(old, new) +} + +func (n *BSER[K, V]) SetExpiresAt(new int64) { + n.expiresAt.Store(new) +} + +func (n *BSER[K, V]) RefreshableAt() int64 { + return n.refreshableAt.Load() +} + +func (n *BSER[K, V]) CASRefreshableAt(old, new int64) bool { + return n.refreshableAt.CompareAndSwap(old, new) +} + +func (n *BSER[K, V]) SetRefreshableAt(new int64) { + n.refreshableAt.Store(new) +} + +func (n *BSER[K, V]) IsFresh(now int64) bool { + return n.IsAlive() && n.RefreshableAt() > now +} + +func (n *BSER[K, V]) Weight() uint32 { + return 1 +} + +func (n *BSER[K, V]) IsAlive() bool { + return n.state.Load() == aliveState +} + +func (n *BSER[K, V]) IsRetired() bool { + return n.state.Load() == retiredState +} + +func (n *BSER[K, V]) Retire() { + n.state.Store(retiredState) +} + +func (n *BSER[K, V]) IsDead() bool { + return n.state.Load() == deadState +} + +func (n *BSER[K, V]) Die() { + n.state.Store(deadState) +} + +func (n *BSER[K, V]) GetQueueType() uint8 { + return n.queueType +} + +func (n *BSER[K, V]) SetQueueType(queueType uint8) { + n.queueType = queueType +} + +func (n *BSER[K, V]) InWindow() bool { + return n.GetQueueType() == InWindowQueue +} + +func (n *BSER[K, V]) MakeWindow() { + n.SetQueueType(InWindowQueue) +} + +func (n *BSER[K, V]) InMainProbation() bool { + return n.GetQueueType() == InMainProbationQueue +} + +func (n *BSER[K, V]) MakeMainProbation() { + n.SetQueueType(InMainProbationQueue) +} + +func (n *BSER[K, V]) InMainProtected() bool { + return n.GetQueueType() == InMainProtectedQueue +} + +func (n *BSER[K, V]) MakeMainProtected() { + n.SetQueueType(InMainProtectedQueue) +} diff --git a/vendor/github.com/maypok86/otter/v2/internal/generated/node/bsr.go b/vendor/github.com/maypok86/otter/v2/internal/generated/node/bsr.go new file mode 100644 index 00000000..d00edcbf --- /dev/null +++ b/vendor/github.com/maypok86/otter/v2/internal/generated/node/bsr.go @@ -0,0 +1,183 @@ +// Code generated by NodeGenerator. DO NOT EDIT. + +// Package node is a generated by the generator. +package node + +import ( + "sync/atomic" + "unsafe" +) + +// BSR is a cache entry that provide the following features: +// +// 1. Base +// +// 2. Size +// +// 3. Refresh +type BSR[K comparable, V any] struct { + key K + value V + prev *BSR[K, V] + next *BSR[K, V] + refreshableAt atomic.Int64 + state atomic.Uint32 + queueType uint8 +} + +// NewBSR creates a new BSR. +func NewBSR[K comparable, V any](key K, value V, expiresAt, refreshableAt int64, weight uint32) Node[K, V] { + n := &BSR[K, V]{ + key: key, + value: value, + } + n.refreshableAt.Store(refreshableAt) + n.state.Store(aliveState) + + return n +} + +// CastPointerToBSR casts a pointer to BSR. +func CastPointerToBSR[K comparable, V any](ptr unsafe.Pointer) Node[K, V] { + return (*BSR[K, V])(ptr) +} + +func (n *BSR[K, V]) Key() K { + return n.key +} + +func (n *BSR[K, V]) Value() V { + return n.value +} + +func (n *BSR[K, V]) AsPointer() unsafe.Pointer { + return unsafe.Pointer(n) +} + +func (n *BSR[K, V]) Prev() Node[K, V] { + return n.prev +} + +func (n *BSR[K, V]) SetPrev(v Node[K, V]) { + if v == nil { + n.prev = nil + return + } + n.prev = (*BSR[K, V])(v.AsPointer()) +} + +func (n *BSR[K, V]) Next() Node[K, V] { + return n.next +} + +func (n *BSR[K, V]) SetNext(v Node[K, V]) { + if v == nil { + n.next = nil + return + } + n.next = (*BSR[K, V])(v.AsPointer()) +} + +func (n *BSR[K, V]) PrevExp() Node[K, V] { + panic("not implemented") +} + +func (n *BSR[K, V]) SetPrevExp(v Node[K, V]) { + panic("not implemented") +} + +func (n *BSR[K, V]) NextExp() Node[K, V] { + panic("not implemented") +} + +func (n *BSR[K, V]) SetNextExp(v Node[K, V]) { + panic("not implemented") +} + +func (n *BSR[K, V]) HasExpired(now int64) bool { + return false +} + +func (n *BSR[K, V]) ExpiresAt() int64 { + panic("not implemented") +} + +func (n *BSR[K, V]) CASExpiresAt(old, new int64) bool { + panic("not implemented") +} + +func (n *BSR[K, V]) SetExpiresAt(new int64) { + panic("not implemented") +} + +func (n *BSR[K, V]) RefreshableAt() int64 { + return n.refreshableAt.Load() +} + +func (n *BSR[K, V]) CASRefreshableAt(old, new int64) bool { + return n.refreshableAt.CompareAndSwap(old, new) +} + +func (n *BSR[K, V]) SetRefreshableAt(new int64) { + n.refreshableAt.Store(new) +} + +func (n *BSR[K, V]) IsFresh(now int64) bool { + return n.IsAlive() && n.RefreshableAt() > now +} + +func (n *BSR[K, V]) Weight() uint32 { + return 1 +} + +func (n *BSR[K, V]) IsAlive() bool { + return n.state.Load() == aliveState +} + +func (n *BSR[K, V]) IsRetired() bool { + return n.state.Load() == retiredState +} + +func (n *BSR[K, V]) Retire() { + n.state.Store(retiredState) +} + +func (n *BSR[K, V]) IsDead() bool { + return n.state.Load() == deadState +} + +func (n *BSR[K, V]) Die() { + n.state.Store(deadState) +} + +func (n *BSR[K, V]) GetQueueType() uint8 { + return n.queueType +} + +func (n *BSR[K, V]) SetQueueType(queueType uint8) { + n.queueType = queueType +} + +func (n *BSR[K, V]) InWindow() bool { + return n.GetQueueType() == InWindowQueue +} + +func (n *BSR[K, V]) MakeWindow() { + n.SetQueueType(InWindowQueue) +} + +func (n *BSR[K, V]) InMainProbation() bool { + return n.GetQueueType() == InMainProbationQueue +} + +func (n *BSR[K, V]) MakeMainProbation() { + n.SetQueueType(InMainProbationQueue) +} + +func (n *BSR[K, V]) InMainProtected() bool { + return n.GetQueueType() == InMainProtectedQueue +} + +func (n *BSR[K, V]) MakeMainProtected() { + n.SetQueueType(InMainProtectedQueue) +} diff --git a/vendor/github.com/maypok86/otter/v2/internal/generated/node/bw.go b/vendor/github.com/maypok86/otter/v2/internal/generated/node/bw.go new file mode 100644 index 00000000..23cca819 --- /dev/null +++ b/vendor/github.com/maypok86/otter/v2/internal/generated/node/bw.go @@ -0,0 +1,181 @@ +// Code generated by NodeGenerator. DO NOT EDIT. + +// Package node is a generated by the generator. +package node + +import ( + "sync/atomic" + "unsafe" +) + +// BW is a cache entry that provide the following features: +// +// 1. Base +// +// 2. Weight +type BW[K comparable, V any] struct { + key K + value V + prev *BW[K, V] + next *BW[K, V] + weight uint32 + state atomic.Uint32 + queueType uint8 +} + +// NewBW creates a new BW. +func NewBW[K comparable, V any](key K, value V, expiresAt, refreshableAt int64, weight uint32) Node[K, V] { + n := &BW[K, V]{ + key: key, + value: value, + weight: weight, + } + n.state.Store(aliveState) + + return n +} + +// CastPointerToBW casts a pointer to BW. +func CastPointerToBW[K comparable, V any](ptr unsafe.Pointer) Node[K, V] { + return (*BW[K, V])(ptr) +} + +func (n *BW[K, V]) Key() K { + return n.key +} + +func (n *BW[K, V]) Value() V { + return n.value +} + +func (n *BW[K, V]) AsPointer() unsafe.Pointer { + return unsafe.Pointer(n) +} + +func (n *BW[K, V]) Prev() Node[K, V] { + return n.prev +} + +func (n *BW[K, V]) SetPrev(v Node[K, V]) { + if v == nil { + n.prev = nil + return + } + n.prev = (*BW[K, V])(v.AsPointer()) +} + +func (n *BW[K, V]) Next() Node[K, V] { + return n.next +} + +func (n *BW[K, V]) SetNext(v Node[K, V]) { + if v == nil { + n.next = nil + return + } + n.next = (*BW[K, V])(v.AsPointer()) +} + +func (n *BW[K, V]) PrevExp() Node[K, V] { + panic("not implemented") +} + +func (n *BW[K, V]) SetPrevExp(v Node[K, V]) { + panic("not implemented") +} + +func (n *BW[K, V]) NextExp() Node[K, V] { + panic("not implemented") +} + +func (n *BW[K, V]) SetNextExp(v Node[K, V]) { + panic("not implemented") +} + +func (n *BW[K, V]) HasExpired(now int64) bool { + return false +} + +func (n *BW[K, V]) ExpiresAt() int64 { + panic("not implemented") +} + +func (n *BW[K, V]) CASExpiresAt(old, new int64) bool { + panic("not implemented") +} + +func (n *BW[K, V]) SetExpiresAt(new int64) { + panic("not implemented") +} + +func (n *BW[K, V]) RefreshableAt() int64 { + panic("not implemented") +} + +func (n *BW[K, V]) CASRefreshableAt(old, new int64) bool { + panic("not implemented") +} + +func (n *BW[K, V]) SetRefreshableAt(new int64) { + panic("not implemented") +} + +func (n *BW[K, V]) IsFresh(now int64) bool { + return true +} + +func (n *BW[K, V]) Weight() uint32 { + return n.weight +} + +func (n *BW[K, V]) IsAlive() bool { + return n.state.Load() == aliveState +} + +func (n *BW[K, V]) IsRetired() bool { + return n.state.Load() == retiredState +} + +func (n *BW[K, V]) Retire() { + n.state.Store(retiredState) +} + +func (n *BW[K, V]) IsDead() bool { + return n.state.Load() == deadState +} + +func (n *BW[K, V]) Die() { + n.state.Store(deadState) +} + +func (n *BW[K, V]) GetQueueType() uint8 { + return n.queueType +} + +func (n *BW[K, V]) SetQueueType(queueType uint8) { + n.queueType = queueType +} + +func (n *BW[K, V]) InWindow() bool { + return n.GetQueueType() == InWindowQueue +} + +func (n *BW[K, V]) MakeWindow() { + n.SetQueueType(InWindowQueue) +} + +func (n *BW[K, V]) InMainProbation() bool { + return n.GetQueueType() == InMainProbationQueue +} + +func (n *BW[K, V]) MakeMainProbation() { + n.SetQueueType(InMainProbationQueue) +} + +func (n *BW[K, V]) InMainProtected() bool { + return n.GetQueueType() == InMainProtectedQueue +} + +func (n *BW[K, V]) MakeMainProtected() { + n.SetQueueType(InMainProtectedQueue) +} diff --git a/vendor/github.com/maypok86/otter/v2/internal/generated/node/manager.go b/vendor/github.com/maypok86/otter/v2/internal/generated/node/manager.go new file mode 100644 index 00000000..607d954f --- /dev/null +++ b/vendor/github.com/maypok86/otter/v2/internal/generated/node/manager.go @@ -0,0 +1,186 @@ +// Code generated by NodeGenerator. DO NOT EDIT. + +// Package node is a generated generator package. +package node + +import ( + "strings" + "unsafe" +) + +const ( + InWindowQueue uint8 = iota + InMainProbationQueue + InMainProtectedQueue +) + +const ( + aliveState uint32 = iota + retiredState + deadState +) + +// Node is a cache entry. +type Node[K comparable, V any] interface { + // Key returns the key. + Key() K + // Value returns the value. + Value() V + // AsPointer returns the node as a pointer. + AsPointer() unsafe.Pointer + // Prev returns the previous node in the eviction policy. + Prev() Node[K, V] + // SetPrev sets the previous node in the eviction policy. + SetPrev(v Node[K, V]) + // Next returns the next node in the eviction policy. + Next() Node[K, V] + // SetNext sets the next node in the eviction policy. + SetNext(v Node[K, V]) + // PrevExp returns the previous node in the expiration policy. + PrevExp() Node[K, V] + // SetPrevExp sets the previous node in the expiration policy. + SetPrevExp(v Node[K, V]) + // NextExp returns the next node in the expiration policy. + NextExp() Node[K, V] + // SetNextExp sets the next node in the expiration policy. + SetNextExp(v Node[K, V]) + // HasExpired returns true if node has expired. + HasExpired(now int64) bool + // ExpiresAt returns the expiration time. + ExpiresAt() int64 + // CASExpiresAt executes the compare-and-swap operation for expiresAt. + CASExpiresAt(old, new int64) bool + // SetExpiresAt sets the expiration time. + SetExpiresAt(new int64) + // RefreshableAt returns the refresh time. + RefreshableAt() int64 + // CASRefreshableAt executes the compare-and-swap operation for refreshableAt. + CASRefreshableAt(old, new int64) bool + // SetRefreshableAt returns the refresh time. + SetRefreshableAt(new int64) + IsFresh(now int64) bool + // Weight returns the weight of the node. + Weight() uint32 + // IsAlive returns true if the entry is available in the hash-table and page replacement policy. + IsAlive() bool + // IsRetired returns true if the entry was removed from the hash-table and is awaiting removal from the page + // replacement policy. + IsRetired() bool + // Retire sets the node to the retired state. + Retire() + // IsDead returns true if the entry was removed from the hash-table and the page replacement policy. + IsDead() bool + // Die sets the node to the dead state. + Die() + // GetQueueType returns the queue that the entry's resides in (window, probation, or protected). + GetQueueType() uint8 + // SetQueueType sets queue that the entry resides in (window, probation, or protected). + SetQueueType(queueType uint8) + // InWindow returns true if the entry is in the Window or Main space. + InWindow() bool + // MakeWindow sets the status to the Window queue. + MakeWindow() + // InMainProbation returns true if the entry is in the Main space's probation queue. + InMainProbation() bool + // MakeMainProbation sets the status to the Main space's probation queue. + MakeMainProbation() + // InMainProtected returns if the entry is in the Main space's protected queue. + InMainProtected() bool + // MakeMainProtected sets the status to the Main space's protected queue. + MakeMainProtected() +} + +func Equals[K comparable, V any](a, b Node[K, V]) bool { + if a == nil { + return b == nil || b.AsPointer() == nil + } + if b == nil { + return a.AsPointer() == nil + } + return a.AsPointer() == b.AsPointer() +} + +type Config struct { + WithSize bool + WithExpiration bool + WithWeight bool + WithRefresh bool +} + +type Manager[K comparable, V any] struct { + create func(key K, value V, expiresAt, refreshableAt int64, weight uint32) Node[K, V] + fromPointer func(ptr unsafe.Pointer) Node[K, V] +} + +func NewManager[K comparable, V any](c Config) *Manager[K, V] { + var sb strings.Builder + sb.WriteString("b") + if c.WithSize { + sb.WriteString("s") + } + if c.WithExpiration { + sb.WriteString("e") + } + if c.WithRefresh { + sb.WriteString("r") + } + if c.WithWeight { + sb.WriteString("w") + } + nodeType := sb.String() + m := &Manager[K, V]{} + + switch nodeType { + case "b": + m.create = NewB[K, V] + m.fromPointer = CastPointerToB[K, V] + case "be": + m.create = NewBE[K, V] + m.fromPointer = CastPointerToBE[K, V] + case "ber": + m.create = NewBER[K, V] + m.fromPointer = CastPointerToBER[K, V] + case "berw": + m.create = NewBERW[K, V] + m.fromPointer = CastPointerToBERW[K, V] + case "bew": + m.create = NewBEW[K, V] + m.fromPointer = CastPointerToBEW[K, V] + case "br": + m.create = NewBR[K, V] + m.fromPointer = CastPointerToBR[K, V] + case "brw": + m.create = NewBRW[K, V] + m.fromPointer = CastPointerToBRW[K, V] + case "bs": + m.create = NewBS[K, V] + m.fromPointer = CastPointerToBS[K, V] + case "bse": + m.create = NewBSE[K, V] + m.fromPointer = CastPointerToBSE[K, V] + case "bser": + m.create = NewBSER[K, V] + m.fromPointer = CastPointerToBSER[K, V] + case "bsr": + m.create = NewBSR[K, V] + m.fromPointer = CastPointerToBSR[K, V] + case "bw": + m.create = NewBW[K, V] + m.fromPointer = CastPointerToBW[K, V] + default: + panic("not valid nodeType") + } + return m +} + +func (m *Manager[K, V]) Create(key K, value V, expiresAt, refreshableAt int64, weight uint32) Node[K, V] { + return m.create(key, value, expiresAt, refreshableAt, weight) +} + +func (m *Manager[K, V]) FromPointer(ptr unsafe.Pointer) Node[K, V] { + return m.fromPointer(ptr) +} + +func (m *Manager[K, V]) IsNil(n Node[K, V]) bool { + return n == nil || n.AsPointer() == nil +} diff --git a/vendor/github.com/maypok86/otter/v2/internal/hashmap/map.go b/vendor/github.com/maypok86/otter/v2/internal/hashmap/map.go new file mode 100644 index 00000000..bcd50883 --- /dev/null +++ b/vendor/github.com/maypok86/otter/v2/internal/hashmap/map.go @@ -0,0 +1,631 @@ +// Copyright (c) 2024 Alexey Mayshev and contributors. All rights reserved. +// Copyright (c) 2021 Andrey Pechkurov. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright notice. This code is a fork of xsync.MapOf from this file with some changes: +// https://github.com/puzpuzpuz/xsync/blob/main/mapof_test.go +// +// Use of this source code is governed by a MIT license that can be found +// at https://github.com/puzpuzpuz/xsync/blob/main/LICENSE + +package hashmap + +import ( + "fmt" + "math/bits" + "runtime" + "sync" + "sync/atomic" + "unsafe" + + "github.com/maypok86/otter/v2/internal/xmath" + "github.com/maypok86/otter/v2/internal/xruntime" +) + +type mapResizeHint int + +const ( + mapGrowHint mapResizeHint = 0 + mapShrinkHint mapResizeHint = 1 + mapClearHint mapResizeHint = 2 +) + +const ( + // number of Map nodes per bucket; 5 nodes lead to size of 64B + // (one cache line) on 64-bit machines. + nodesPerMapBucket = 5 + defaultMeta uint64 = 0x8080808080808080 + metaMask uint64 = 0xffffffffff + defaultMetaMasked = defaultMeta & metaMask + emptyMetaSlot uint8 = 0x80 + + // threshold fraction of table occupation to start a table shrinking + // when deleting the last entry in a bucket chain. + mapShrinkFraction = 128 + // map load factor to trigger a table resize during insertion; + // a map holds up to mapLoadFactor*nodesPerMapBucket*mapTableLen + // key-value pairs (this is a soft limit). + mapLoadFactor = 0.75 + // minimal table size, i.e. number of buckets; thus, minimal map + // capacity can be calculated as nodesPerMapBucket*defaultMinMapTableLen. + defaultMinMapTableLen = 32 + // minimum counter stripes to use. + minMapCounterLen = 8 + // maximum counter stripes to use; stands for around 4KB of memory. + maxMapCounterLen = 32 + // minimum buckets per goroutine during parallel resize. + minBucketsPerGoroutine = 64 +) + +// Map is like a Go map[K]V but is safe for concurrent +// use by multiple goroutines without additional locking or +// coordination. It follows the interface of sync.Map with +// a number of valuable extensions like Compute or Size. +// +// A Map must not be copied after first use. +// +// Map uses a modified version of Cache-Line Hash Table (CLHT) +// data structure: https://github.com/LPD-EPFL/CLHT +// +// CLHT is built around idea to organize the hash table in +// cache-line-sized buckets, so that on all modern CPUs update +// operations complete with at most one cache-line transfer. +// Also, Get operations involve no write to memory, as well as no +// mutexes or any other sort of locks. Due to this design, in all +// considered scenarios Map outperforms sync.Map. +// +// Map also borrows ideas from Java's j.u.c.ConcurrentHashMap +// (immutable K/V pair structs instead of atomic snapshots) +// and C++'s absl::flat_hash_map (meta memory and SWAR-based +// lookups). +type Map[K comparable, V any, N mapNode[K, V]] struct { + totalGrowths atomic.Int64 + totalShrinks atomic.Int64 + resizing atomic.Bool // resize in progress flag + resizeMu sync.Mutex // only used along with resizeCond + resizeCond sync.Cond // used to wake up resize waiters (concurrent modifications) + table atomic.Pointer[mapTable[K]] // *mapTable + nodeManager mapNodeManager[K, V, N] + minTableLen int +} + +type counterStripe struct { + c int64 + //lint:ignore U1000 prevents false sharing + pad [xruntime.CacheLineSize - 8]byte +} + +type mapTable[K comparable] struct { + buckets []bucketPadded + // striped counter for number of table nodes; + // used to determine if a table shrinking is needed + // occupies min(buckets_memory/1024, 64KB) of memory + size []counterStripe + hasher xruntime.Hasher[K] +} + +// bucketPadded is a CL-sized map bucket holding up to +// nodesPerMapBucket nodes. +type bucketPadded struct { + //lint:ignore U1000 ensure each bucket takes two cache lines on both 32 and 64-bit archs + pad [64 - unsafe.Sizeof(bucket{})]byte + bucket +} + +type bucket struct { + meta atomic.Uint64 + nodes [nodesPerMapBucket]unsafe.Pointer // node.Node + next atomic.Pointer[bucketPadded] + mu sync.Mutex +} + +// NewWithSize creates a new Map instance with capacity enough +// to hold size nodes. If size is zero or negative, the value +// is ignored. +func NewWithSize[K comparable, V any, N mapNode[K, V]](nodeManager mapNodeManager[K, V, N], size int) *Map[K, V, N] { + return newMap[K, V, N](nodeManager, size) +} + +// New creates a new Map instance. +func New[K comparable, V any, N mapNode[K, V]](nodeManager mapNodeManager[K, V, N]) *Map[K, V, N] { + return newMap[K, V, N](nodeManager, defaultMinMapTableLen*nodesPerMapBucket) +} + +func newMap[K comparable, V any, N mapNode[K, V]](nodeManager mapNodeManager[K, V, N], sizeHint int) *Map[K, V, N] { + m := &Map[K, V, N]{ + nodeManager: nodeManager, + } + m.resizeCond = *sync.NewCond(&m.resizeMu) + var table *mapTable[K] + if sizeHint <= defaultMinMapTableLen*nodesPerMapBucket { + table = newMapTable[K](defaultMinMapTableLen) + } else { + tableLen := xmath.RoundUpPowerOf2(uint32((float64(sizeHint) / nodesPerMapBucket) / mapLoadFactor)) + table = newMapTable[K](int(tableLen)) + } + m.minTableLen = len(table.buckets) + m.table.Store(table) + return m +} + +func newMapTable[K comparable](minTableLen int) *mapTable[K] { + buckets := make([]bucketPadded, minTableLen) + for i := range buckets { + buckets[i].meta.Store(defaultMeta) + } + counterLen := minTableLen >> 10 + if counterLen < minMapCounterLen { + counterLen = minMapCounterLen + } else if counterLen > maxMapCounterLen { + counterLen = maxMapCounterLen + } + counter := make([]counterStripe, counterLen) + t := &mapTable[K]{ + buckets: buckets, + size: counter, + hasher: xruntime.NewHasher[K](), + } + return t +} + +func zeroValue[V any]() V { + var zero V + return zero +} + +// Get returns the node stored in the map for a key, or nil +// if no value is present. +func (m *Map[K, V, N]) Get(key K) N { + table := m.table.Load() + hash := table.hasher.Hash(key) + h1 := h1(hash) + h2w := broadcast(h2(hash)) + //nolint:gosec // there is no overflow + bidx := uint64(len(table.buckets)-1) & h1 + b := &table.buckets[bidx] + for { + metaw := b.meta.Load() + markedw := markZeroBytes(metaw^h2w) & metaMask + for markedw != 0 { + idx := firstMarkedByteIndex(markedw) + nptr := atomic.LoadPointer(&b.nodes[idx]) + if nptr != nil { + n := m.nodeManager.FromPointer(nptr) + if n.Key() == key { + return n + } + } + markedw &= markedw - 1 + } + b = b.next.Load() + if b == nil { + return zeroValue[N]() + } + } +} + +// Compute either sets the computed new value for the key or deletes +// the value for the key. +// +// This call locks a hash table bucket while the compute function +// is executed. It means that modifications on other nodes in +// the bucket will be blocked until the computeFn executes. Consider +// this when the function includes long-running operations. +func (m *Map[K, V, N]) Compute(key K, computeFunc func(n N) N) N { + for { + compute_attempt: + var ( + emptyb *bucketPadded + emptyidx int + ) + table := m.table.Load() + tableLen := len(table.buckets) + hash := table.hasher.Hash(key) + h1 := h1(hash) + h2 := h2(hash) + h2w := broadcast(h2) + //nolint:gosec // there is no overflow + bidx := uint64(len(table.buckets)-1) & h1 + rootb := &table.buckets[bidx] + rootb.mu.Lock() + // The following two checks must go in reverse to what's + // in the resize method. + if m.resizeInProgress() { + // Resize is in progress. Wait, then go for another attempt. + rootb.mu.Unlock() + m.waitForResize() + goto compute_attempt + } + if m.newerTableExists(table) { + // Someone resized the table. Go for another attempt. + rootb.mu.Unlock() + goto compute_attempt + } + b := rootb + for { + metaw := b.meta.Load() + markedw := markZeroBytes(metaw^h2w) & metaMask + for markedw != 0 { + idx := firstMarkedByteIndex(markedw) + nptr := b.nodes[idx] + if nptr != nil { + oldNode := m.nodeManager.FromPointer(nptr) + if oldNode.Key() == key { + // In-place update/delete. + newNode := computeFunc(oldNode) + // oldNode != nil + if m.nodeManager.IsNil(newNode) { + // Deletion. + // First we update the hash, then the node. + newmetaw := setByte(metaw, emptyMetaSlot, idx) + b.meta.Store(newmetaw) + atomic.StorePointer(&b.nodes[idx], nil) + rootb.mu.Unlock() + table.addSize(bidx, -1) + // Might need to shrink the table if we left bucket empty. + if newmetaw == defaultMeta { + m.resize(table, mapShrinkHint) + } + return newNode + } + if oldNode.AsPointer() != newNode.AsPointer() { + atomic.StorePointer(&b.nodes[idx], newNode.AsPointer()) + } + rootb.mu.Unlock() + return newNode + } + } + markedw &= markedw - 1 + } + if emptyb == nil { + // Search for empty nodes (up to 5 per bucket). + emptyw := metaw & defaultMetaMasked + if emptyw != 0 { + idx := firstMarkedByteIndex(emptyw) + emptyb = b + emptyidx = idx + } + } + if b.next.Load() == nil { + if emptyb != nil { + // Insertion into an existing bucket. + var zeroNode N + // oldNode == nil. + newNode := computeFunc(zeroNode) + if m.nodeManager.IsNil(newNode) { + // no op. + rootb.mu.Unlock() + return newNode + } + // First we update meta, then the node. + emptyb.meta.Store(setByte(emptyb.meta.Load(), h2, emptyidx)) + atomic.StorePointer(&emptyb.nodes[emptyidx], newNode.AsPointer()) + rootb.mu.Unlock() + table.addSize(bidx, 1) + return newNode + } + growThreshold := float64(tableLen) * nodesPerMapBucket * mapLoadFactor + if table.sumSize() > int64(growThreshold) { + // Need to grow the table. Then go for another attempt. + rootb.mu.Unlock() + m.resize(table, mapGrowHint) + goto compute_attempt + } + // Insertion into a new bucket. + var zeroNode N + // oldNode == nil + newNode := computeFunc(zeroNode) + if m.nodeManager.IsNil(newNode) { + rootb.mu.Unlock() + return newNode + } + // Create and append a bucket. + newb := new(bucketPadded) + newb.meta.Store(setByte(defaultMeta, h2, 0)) + newb.nodes[0] = newNode.AsPointer() + b.next.Store(newb) + rootb.mu.Unlock() + table.addSize(bidx, 1) + return newNode + } + b = b.next.Load() + } + } +} + +func (m *Map[K, V, N]) newerTableExists(table *mapTable[K]) bool { + return table != m.table.Load() +} + +func (m *Map[K, V, N]) resizeInProgress() bool { + return m.resizing.Load() +} + +func (m *Map[K, V, N]) waitForResize() { + m.resizeMu.Lock() + for m.resizeInProgress() { + m.resizeCond.Wait() + } + m.resizeMu.Unlock() +} + +func (m *Map[K, V, N]) resize(knownTable *mapTable[K], hint mapResizeHint) { + knownTableLen := len(knownTable.buckets) + // Fast path for shrink attempts. + if hint == mapShrinkHint { + if m.minTableLen == knownTableLen || + knownTable.sumSize() > int64((knownTableLen*nodesPerMapBucket)/mapShrinkFraction) { + return + } + } + // Slow path. + if !m.resizing.CompareAndSwap(false, true) { + // Someone else started resize. Wait for it to finish. + m.waitForResize() + return + } + var newTable *mapTable[K] + table := m.table.Load() + tableLen := len(table.buckets) + switch hint { + case mapGrowHint: + // Grow the table with factor of 2. + m.totalGrowths.Add(1) + newTable = newMapTable[K](tableLen << 1) + case mapShrinkHint: + shrinkThreshold := int64((tableLen * nodesPerMapBucket) / mapShrinkFraction) + if tableLen > m.minTableLen && table.sumSize() <= shrinkThreshold { + // Shrink the table with factor of 2. + m.totalShrinks.Add(1) + newTable = newMapTable[K](tableLen >> 1) + } else { + // No need to shrink. Wake up all waiters and give up. + m.resizeMu.Lock() + m.resizing.Store(false) + m.resizeCond.Broadcast() + m.resizeMu.Unlock() + return + } + case mapClearHint: + newTable = newMapTable[K](m.minTableLen) + default: + panic(fmt.Sprintf("unexpected resize hint: %d", hint)) + } + // Copy the data only if we're not clearing the map. + if hint != mapClearHint { + // Enable parallel resizing when serialResize is false and table is large enough. + // Calculate optimal goroutine count based on table size and available CPUs + chunks := 1 + if tableLen >= minBucketsPerGoroutine*2 { + chunks = min(tableLen/minBucketsPerGoroutine, runtime.GOMAXPROCS(0)) + chunks = max(chunks, 1) + } + if chunks > 1 { + var copyWg sync.WaitGroup + chunkSize := (tableLen + chunks - 1) / chunks + for c := 0; c < chunks; c++ { + copyWg.Add(1) + go func(start, end int) { + for i := start; i < end; i++ { + copied := m.copyBucketWithDestLock(&table.buckets[i], newTable) + if copied > 0 { + //nolint:gosec // there is no overflow + newTable.addSize(uint64(i), copied) + } + } + copyWg.Done() + }(c*chunkSize, min((c+1)*chunkSize, tableLen)) + } + copyWg.Wait() + } else { + for i := 0; i < tableLen; i++ { + copied := m.copyBucket(&table.buckets[i], newTable) + //nolint:gosec // there is no overflow + newTable.addSizePlain(uint64(i), copied) + } + } + } + // Publish the new table and wake up all waiters. + m.table.Store(newTable) + m.resizeMu.Lock() + m.resizing.Store(false) + m.resizeCond.Broadcast() + m.resizeMu.Unlock() +} + +func (m *Map[K, V, N]) copyBucketWithDestLock(b *bucketPadded, destTable *mapTable[K]) (copied int) { + rootb := b + rootb.mu.Lock() + for { + for i := 0; i < nodesPerMapBucket; i++ { + if b.nodes[i] == nil { + continue + } + n := m.nodeManager.FromPointer(b.nodes[i]) + hash := destTable.hasher.Hash(n.Key()) + //nolint:gosec // there is no overflow + bidx := uint64(len(destTable.buckets)-1) & h1(hash) + destb := &destTable.buckets[bidx] + destb.mu.Lock() + appendToBucket(h2(hash), b.nodes[i], destb) + destb.mu.Unlock() + copied++ + } + if next := b.next.Load(); next == nil { + rootb.mu.Unlock() + //nolint:nakedret // it's ok + return + } else { + b = next + } + } +} + +func (m *Map[K, V, N]) copyBucket(b *bucketPadded, destTable *mapTable[K]) (copied int) { + rootb := b + rootb.mu.Lock() + //nolint:gocritic // nesting is normal here + for { + for i := 0; i < nodesPerMapBucket; i++ { + if b.nodes[i] != nil { + n := m.nodeManager.FromPointer(b.nodes[i]) + hash := destTable.hasher.Hash(n.Key()) + //nolint:gosec // there is no overflow + bidx := uint64(len(destTable.buckets)-1) & h1(hash) + destb := &destTable.buckets[bidx] + appendToBucket(h2(hash), b.nodes[i], destb) + copied++ + } + } + if next := b.next.Load(); next == nil { + rootb.mu.Unlock() + //nolint:nakedret // it's ok + return + } else { + b = next + } + } +} + +// Range calls f sequentially for each key and value present in the +// map. If f returns false, range stops the iteration. +// +// Range does not necessarily correspond to any consistent snapshot +// of the Map's contents: no key will be visited more than once, but +// if the value for any key is stored or deleted concurrently, Range +// may reflect any mapping for that key from any point during the +// Range call. +// +// It is safe to modify the map while iterating it, including entry +// creation, modification and deletion. However, the concurrent +// modification rule apply, i.e. the changes may be not reflected +// in the subsequently iterated nodes. +func (m *Map[K, V, N]) Range(fn func(n N) bool) { + var zeroPtr unsafe.Pointer + // Pre-allocate array big enough to fit nodes for most hash tables. + bnodes := make([]unsafe.Pointer, 0, 16*nodesPerMapBucket) + table := m.table.Load() + for i := range table.buckets { + rootb := &table.buckets[i] + b := rootb + // Prevent concurrent modifications and copy all nodes into + // the intermediate slice. + rootb.mu.Lock() + for { + for i := 0; i < nodesPerMapBucket; i++ { + if b.nodes[i] != nil { + bnodes = append(bnodes, b.nodes[i]) + } + } + if next := b.next.Load(); next == nil { + rootb.mu.Unlock() + break + } else { + b = next + } + } + // Call the function for all copied nodes. + for j := range bnodes { + n := m.nodeManager.FromPointer(bnodes[j]) + if !fn(n) { + return + } + // Remove the reference to avoid preventing the copied + // nodes from being GCed until this method finishes. + bnodes[j] = zeroPtr + } + bnodes = bnodes[:0] + } +} + +// Clear deletes all keys and values currently stored in the map. +func (m *Map[K, V, N]) Clear() { + table := m.table.Load() + m.resize(table, mapClearHint) +} + +// Size returns current size of the map. +func (m *Map[K, V, N]) Size() int { + table := m.table.Load() + return int(table.sumSize()) +} + +func appendToBucket(h2 uint8, nodePtr unsafe.Pointer, b *bucketPadded) { + for { + for i := 0; i < nodesPerMapBucket; i++ { + if b.nodes[i] == nil { + b.meta.Store(setByte(b.meta.Load(), h2, i)) + b.nodes[i] = nodePtr + return + } + } + if next := b.next.Load(); next == nil { + newb := new(bucketPadded) + newb.meta.Store(setByte(defaultMeta, h2, 0)) + newb.nodes[0] = nodePtr + b.next.Store(newb) + return + } else { + b = next + } + } +} + +func (table *mapTable[K]) addSize(bucketIdx uint64, delta int) { + //nolint:gosec // there is no overflow + cidx := uint64(len(table.size)-1) & bucketIdx + atomic.AddInt64(&table.size[cidx].c, int64(delta)) +} + +func (table *mapTable[K]) addSizePlain(bucketIdx uint64, delta int) { + //nolint:gosec // there is no overflow + cidx := uint64(len(table.size)-1) & bucketIdx + table.size[cidx].c += int64(delta) +} + +func (table *mapTable[K]) sumSize() int64 { + sum := int64(0) + for i := range table.size { + sum += atomic.LoadInt64(&table.size[i].c) + } + return max(sum, 0) +} + +func h1(h uint64) uint64 { + return h >> 7 +} + +func h2(h uint64) uint8 { + //nolint:gosec // there is no overflow + return uint8(h & 0x7f) +} + +func broadcast(b uint8) uint64 { + return 0x101010101010101 * uint64(b) +} + +func firstMarkedByteIndex(w uint64) int { + return bits.TrailingZeros64(w) >> 3 +} + +// SWAR byte search: may produce false positives, e.g. for 0x0100, +// so make sure to double-check bytes found by this function. +func markZeroBytes(w uint64) uint64 { + return ((w - 0x0101010101010101) & (^w) & 0x8080808080808080) +} + +func setByte(w uint64, b uint8, idx int) uint64 { + shift := idx << 3 + return (w &^ (0xff << shift)) | (uint64(b) << shift) +} diff --git a/vendor/github.com/maypok86/otter/internal/expiry/disabled.go b/vendor/github.com/maypok86/otter/v2/internal/hashmap/node.go similarity index 55% rename from vendor/github.com/maypok86/otter/internal/expiry/disabled.go rename to vendor/github.com/maypok86/otter/v2/internal/hashmap/node.go index e75494ac..1b036b69 100644 --- a/vendor/github.com/maypok86/otter/internal/expiry/disabled.go +++ b/vendor/github.com/maypok86/otter/v2/internal/hashmap/node.go @@ -1,4 +1,4 @@ -// Copyright (c) 2024 Alexey Mayshev. All rights reserved. +// Copyright (c) 2024 Alexey Mayshev and contributors. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,24 +12,17 @@ // See the License for the specific language governing permissions and // limitations under the License. -package expiry +package hashmap -import "github.com/maypok86/otter/internal/generated/node" +import "unsafe" -type Disabled[K comparable, V any] struct{} - -func NewDisabled[K comparable, V any]() *Disabled[K, V] { - return &Disabled[K, V]{} -} - -func (d *Disabled[K, V]) Add(n node.Node[K, V]) { -} - -func (d *Disabled[K, V]) Delete(n node.Node[K, V]) { -} - -func (d *Disabled[K, V]) DeleteExpired() { +type mapNode[K comparable, V any] interface { + Key() K + Value() V + AsPointer() unsafe.Pointer } -func (d *Disabled[K, V]) Clear() { +type mapNodeManager[K comparable, V any, N mapNode[K, V]] interface { + FromPointer(ptr unsafe.Pointer) N + IsNil(n N) bool } diff --git a/vendor/github.com/maypok86/otter/v2/internal/lossy/ring.go b/vendor/github.com/maypok86/otter/v2/internal/lossy/ring.go new file mode 100644 index 00000000..a5763cb3 --- /dev/null +++ b/vendor/github.com/maypok86/otter/v2/internal/lossy/ring.go @@ -0,0 +1,131 @@ +// Copyright (c) 2024 Alexey Mayshev and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// This is a port of lossy buffers from Caffeine. +// https://github.com/ben-manes/caffeine/blob/master/caffeine/src/main/java/com/github/benmanes/caffeine/cache/BoundedBuffer.java + +package lossy + +import ( + "sync/atomic" + "unsafe" + + "github.com/maypok86/otter/v2/internal/generated/node" + "github.com/maypok86/otter/v2/internal/xruntime" +) + +// Status is the result of adding a node to the buffer. +type Status int8 + +const ( + // Success means that the node was added. + Success Status = 0 + // Failed means that the CAS failed. + Failed Status = -1 + // Full means that the buffer is full. + Full Status = 1 +) + +const ( + // The maximum number of elements per buffer. + bufferSize = 16 + mask = uint64(bufferSize - 1) +) + +// ring is a circular ring buffer stores the elements being transferred by the producers to the consumer. +// the monotonically increasing count of reads and writes allow indexing sequentially to the next +// element location based upon a power-of-two sizing. +// +// The producers race to read the counts, check if there is available capacity, and if so then try +// once to CAS to the next write count. If the increment is successful then the producer lazily +// publishes the element. The producer does not retry or block when unsuccessful due to a failed +// CAS or the buffer being full. +// +// The consumer reads the counts and takes the available elements. The clearing of the elements +// and the next read count are lazily set. +// +// This implementation is striped to further increase concurrency by rehashing and dynamically +// adding new buffers when contention is detected, up to an internal maximum. When rehashing in +// order to discover an available buffer, the producer may retry adding its element to determine +// whether it found a satisfactory buffer or if resizing is necessary. +type ring[K comparable, V any] struct { + head atomic.Uint64 + _ [xruntime.CacheLineSize - unsafe.Sizeof(atomic.Uint64{})]byte + tail atomic.Uint64 + _ [xruntime.CacheLineSize - unsafe.Sizeof(atomic.Uint64{})]byte + nodeManager *node.Manager[K, V] + buffer [bufferSize]unsafe.Pointer // node.Node[K, V] +} + +func newRing[K comparable, V any](nodeManager *node.Manager[K, V], n node.Node[K, V]) *ring[K, V] { + r := &ring[K, V]{ + nodeManager: nodeManager, + } + r.buffer[0] = n.AsPointer() + r.tail.Store(1) + return r +} + +func (r *ring[K, V]) add(n node.Node[K, V]) Status { + head := r.head.Load() + tail := r.tail.Load() + size := tail - head + if size >= bufferSize { + return Full + } + + if r.tail.CompareAndSwap(tail, tail+1) { + atomic.StorePointer(&r.buffer[tail&mask], n.AsPointer()) + return Success + } + return Failed +} + +func (r *ring[K, V]) drainTo(consumer func(n node.Node[K, V])) { + head := r.head.Load() + tail := r.tail.Load() + size := tail - head + if size == 0 { + return + } + + nm := r.nodeManager + for head != tail { + index := head & mask + ptr := atomic.LoadPointer(&r.buffer[index]) + if ptr == nil { + // not published. + break + } + atomic.StorePointer(&r.buffer[index], nil) + consumer(nm.FromPointer(ptr)) + head++ + } + r.head.Store(head) +} + +func (r *ring[K, V]) len() int { + //nolint:gosec // there is no overflow + return int(r.tail.Load() - r.head.Load()) +} + +/* +func (r *ring[K, V]) clear() { + for i := 0; i < bufferSize; i++ { + atomic.StorePointer(&r.buffer[i], nil) + } + r.head.Store(0) + r.tail.Store(0) +} +*/ diff --git a/vendor/github.com/maypok86/otter/v2/internal/lossy/striped.go b/vendor/github.com/maypok86/otter/v2/internal/lossy/striped.go new file mode 100644 index 00000000..7810cf0e --- /dev/null +++ b/vendor/github.com/maypok86/otter/v2/internal/lossy/striped.go @@ -0,0 +1,235 @@ +// Copyright (c) 2024 Alexey Mayshev and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// This is a port of lossy buffers from Caffeine. +// https://github.com/ben-manes/caffeine/blob/master/caffeine/src/main/java/com/github/benmanes/caffeine/cache/StripedBuffer.java + +package lossy + +import ( + "sync" + "sync/atomic" + + "github.com/maypok86/otter/v2/internal/generated/node" + "github.com/maypok86/otter/v2/internal/xruntime" +) + +const ( + attempts = 3 +) + +// pool for P tokens. +var tokenPool sync.Pool + +// a P token is used to point at the current OS thread (P) +// on which the goroutine is run; exact identity of the thread, +// as well as P migration tolerance, is not important since +// it's used to as a best effort mechanism for assigning +// concurrent operations (goroutines) to different stripes of +// the Adder. +type token struct { + idx uint32 + padding [xruntime.CacheLineSize - 4]byte +} + +type striped[K comparable, V any] struct { + buffers []atomic.Pointer[ring[K, V]] + len int +} + +// Striped is a multiple-producer / single-consumer buffer that rejects new elements if it is full or +// fails spuriously due to contention. Unlike a queue and stack, a buffer does not guarantee an +// ordering of elements in either FIFO or LIFO order. +type Striped[K comparable, V any] struct { + nodeManager *node.Manager[K, V] + maxLen int + striped atomic.Pointer[striped[K, V]] + busy atomic.Uint32 +} + +func NewStriped[K comparable, V any](maxLen int, nodeManager *node.Manager[K, V]) *Striped[K, V] { + return &Striped[K, V]{ + nodeManager: nodeManager, + maxLen: maxLen, + } +} + +// Add inserts the specified element into this buffer if it is possible to do so immediately without +// violating capacity restrictions. The addition is allowed to fail spuriously if multiple +// goroutines insert concurrently. +func (s *Striped[K, V]) Add(n node.Node[K, V]) Status { + t, ok := tokenPool.Get().(*token) + if !ok { + t = &token{ + idx: xruntime.Fastrand(), + } + } + defer tokenPool.Put(t) + + bs := s.striped.Load() + if bs == nil { + return s.expandOrRetry(n, t, true) + } + + //nolint:gosec // len will never overflow uint32 + buffer := bs.buffers[t.idx&uint32(bs.len-1)].Load() + if buffer == nil { + return s.expandOrRetry(n, t, true) + } + + result := buffer.add(n) + if result == Failed { + return s.expandOrRetry(n, t, false) + } + + return result +} + +func (s *Striped[K, V]) expandOrRetry(n node.Node[K, V], t *token, wasUncontended bool) Status { + result := Failed + // True if last slot nonempty. + collide := true + + for attempt := 0; attempt < attempts; attempt++ { + bs := s.striped.Load() + if bs != nil && bs.len > 0 { + //nolint:gosec // len will never overflow uint32 + buffer := bs.buffers[t.idx&uint32(bs.len-1)].Load() + //nolint:gocritic // the switch statement looks even worse here + if buffer == nil { + if s.busy.Load() == 0 && s.busy.CompareAndSwap(0, 1) { + // Try to attach new buffer. + created := false + rs := s.striped.Load() + if rs != nil && rs.len > 0 { + // Recheck under lock. + //nolint:gosec // len will never overflow uint32 + j := t.idx & uint32(rs.len-1) + if rs.buffers[j].Load() == nil { + rs.buffers[j].Store(newRing(s.nodeManager, n)) + created = true + } + } + s.busy.Store(0) + if created { + result = Success + break + } + // Slot is now non-empty. + continue + } + collide = false + } else if !wasUncontended { + // CAS already known to fail. + // Continue after rehash. + wasUncontended = true + } else { + result = buffer.add(n) + //nolint:gocritic // the switch statement looks even worse here + if result != Failed { + break + } else if bs.len >= s.maxLen || s.striped.Load() != bs { + // At max size or stale. + collide = false + } else if !collide { + collide = true + } else if s.busy.Load() == 0 && s.busy.CompareAndSwap(0, 1) { + if s.striped.Load() == bs { + length := bs.len << 1 + striped := &striped[K, V]{ + buffers: make([]atomic.Pointer[ring[K, V]], length), + len: length, + } + for j := 0; j < bs.len; j++ { + striped.buffers[j].Store(bs.buffers[j].Load()) + } + s.striped.Store(striped) + } + s.busy.Store(0) + collide = false + continue + } + } + t.idx = xruntime.Fastrand() + } else if s.busy.Load() == 0 && s.striped.Load() == bs && s.busy.CompareAndSwap(0, 1) { + init := false + if s.striped.Load() == bs { + striped := &striped[K, V]{ + buffers: make([]atomic.Pointer[ring[K, V]], 1), + len: 1, + } + striped.buffers[0].Store(newRing(s.nodeManager, n)) + s.striped.Store(striped) + init = true + } + s.busy.Store(0) + if init { + result = Success + break + } + } + } + + return result +} + +// DrainTo drains the buffer, sending each element to the consumer for processing. The caller must ensure +// that a consumer has exclusive read access to the buffer. +func (s *Striped[K, V]) DrainTo(consumer func(n node.Node[K, V])) { + bs := s.striped.Load() + if bs == nil { + return + } + for i := 0; i < bs.len; i++ { + b := bs.buffers[i].Load() + if b != nil { + b.drainTo(consumer) + } + } +} + +func (s *Striped[K, V]) Len() int { + result := 0 + bs := s.striped.Load() + if bs == nil { + return result + } + for i := 0; i < bs.len; i++ { + b := bs.buffers[i].Load() + if b == nil { + continue + } + result += b.len() + } + return result +} + +/* +func (s *Striped[K, V]) Clear() { + bs := s.striped.Load() + if bs == nil { + return + } + for s.busy.Load() != 0 || !s.busy.CompareAndSwap(0, 1) { + runtime.Gosched() + } + for i := 0; i < bs.len; i++ { + b := bs.buffers[i].Load() + if b != nil { + b.clear() + } + } + s.busy.Store(0) +} +*/ diff --git a/vendor/github.com/maypok86/otter/v2/internal/xiter/xiter.go b/vendor/github.com/maypok86/otter/v2/internal/xiter/xiter.go new file mode 100644 index 00000000..836060fd --- /dev/null +++ b/vendor/github.com/maypok86/otter/v2/internal/xiter/xiter.go @@ -0,0 +1,63 @@ +// Copyright (c) 2025 Alexey Mayshev and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package xiter + +import "iter" + +// Concat returns an iterator over the concatenation of the sequences. +func Concat[V any](seqs ...iter.Seq[V]) iter.Seq[V] { + return func(yield func(V) bool) { + for _, seq := range seqs { + for e := range seq { + if !yield(e) { + return + } + } + } + } +} + +// MergeFunc merges two sequences of values ordered by the function f. +// Values appear in the output once for each time they appear in x +// and once for each time they appear in y. +// When equal values appear in both sequences, +// the output contains the values from x before the values from y. +// If the two input sequences are not ordered by f, +// the output sequence will not be ordered by f, +// but it will still contain every value from x and y exactly once. +func MergeFunc[V any](x, y iter.Seq[V], f func(V, V) int) iter.Seq[V] { + return func(yield func(V) bool) { + next, stop := iter.Pull(y) + defer stop() + v2, ok2 := next() + for v1 := range x { + for ok2 && f(v1, v2) > 0 { + if !yield(v2) { + return + } + v2, ok2 = next() + } + if !yield(v1) { + return + } + } + for ok2 { + if !yield(v2) { + return + } + v2, ok2 = next() + } + } +} diff --git a/vendor/github.com/maypok86/otter/internal/xmath/power.go b/vendor/github.com/maypok86/otter/v2/internal/xmath/xmath.go similarity index 65% rename from vendor/github.com/maypok86/otter/internal/xmath/power.go rename to vendor/github.com/maypok86/otter/v2/internal/xmath/xmath.go index 384d7659..a2e1aea0 100644 --- a/vendor/github.com/maypok86/otter/internal/xmath/power.go +++ b/vendor/github.com/maypok86/otter/v2/internal/xmath/xmath.go @@ -1,4 +1,4 @@ -// Copyright (c) 2023 Alexey Mayshev. All rights reserved. +// Copyright (c) 2023 Alexey Mayshev and contributors. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,6 +14,15 @@ package xmath +import "math" + +func Abs(a int64) int64 { + if a < 0 { + return -a + } + return a +} + // RoundUpPowerOf2 is based on https://graphics.stanford.edu/~seander/bithacks.html#RoundUpPowerOf2. func RoundUpPowerOf2(v uint32) uint32 { if v == 0 { @@ -28,3 +37,26 @@ func RoundUpPowerOf2(v uint32) uint32 { v++ return v } + +func RoundUpPowerOf264(x uint64) uint64 { + if x == 0 { + return 1 + } + x-- + x |= x >> 1 + x |= x >> 2 + x |= x >> 4 + x |= x >> 8 + x |= x >> 16 + x |= x >> 32 + x++ + return x +} + +func SaturatedAdd(a, b int64) int64 { + s := a + b + if s < a || s < b { + return math.MaxInt64 + } + return s +} diff --git a/vendor/github.com/maypok86/otter/internal/xruntime/runtime_1.22.go b/vendor/github.com/maypok86/otter/v2/internal/xruntime/hasher.go similarity index 64% rename from vendor/github.com/maypok86/otter/internal/xruntime/runtime_1.22.go rename to vendor/github.com/maypok86/otter/v2/internal/xruntime/hasher.go index 4827a343..65d38ccb 100644 --- a/vendor/github.com/maypok86/otter/internal/xruntime/runtime_1.22.go +++ b/vendor/github.com/maypok86/otter/v2/internal/xruntime/hasher.go @@ -1,6 +1,4 @@ -//go:build go1.22 - -// Copyright (c) 2023 Alexey Mayshev. All rights reserved. +// Copyright (c) 2025 Alexey Mayshev and contributors. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,11 +14,18 @@ package xruntime -import ( - "math/rand/v2" -) +import "hash/maphash" + +type Hasher[T comparable] struct { + seed maphash.Seed +} + +func NewHasher[T comparable]() Hasher[T] { + return Hasher[T]{ + seed: maphash.MakeSeed(), + } +} -func Fastrand() uint32 { - //nolint:gosec // we don't need a cryptographically secure random number generator - return rand.Uint32() +func (h Hasher[T]) Hash(t T) uint64 { + return maphash.Comparable(h.seed, t) } diff --git a/vendor/github.com/maypok86/otter/internal/xruntime/xruntime.go b/vendor/github.com/maypok86/otter/v2/internal/xruntime/xruntime.go similarity index 73% rename from vendor/github.com/maypok86/otter/internal/xruntime/xruntime.go rename to vendor/github.com/maypok86/otter/v2/internal/xruntime/xruntime.go index 9c5de936..c2eb0c16 100644 --- a/vendor/github.com/maypok86/otter/internal/xruntime/xruntime.go +++ b/vendor/github.com/maypok86/otter/v2/internal/xruntime/xruntime.go @@ -1,4 +1,4 @@ -// Copyright (c) 2023 Alexey Mayshev. All rights reserved. +// Copyright (c) 2023 Alexey Mayshev and contributors. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,12 +15,20 @@ package xruntime import ( + "math" + "math/rand/v2" "runtime" + "time" + "unsafe" + + "golang.org/x/sys/cpu" ) const ( // CacheLineSize is useful for preventing false sharing. - CacheLineSize = 64 + CacheLineSize = unsafe.Sizeof(cpu.CacheLinePad{}) + + MaxDuration = time.Duration(math.MaxInt64) ) // Parallelism returns the maximum possible number of concurrently running goroutines. @@ -34,3 +42,8 @@ func Parallelism() uint32 { } return numCPU } + +func Fastrand() uint32 { + //nolint:gosec // we don't need a cryptographically secure random number generator + return rand.Uint32() +} diff --git a/vendor/github.com/maypok86/otter/v2/internal/xsync/adder.go b/vendor/github.com/maypok86/otter/v2/internal/xsync/adder.go new file mode 100644 index 00000000..12c08240 --- /dev/null +++ b/vendor/github.com/maypok86/otter/v2/internal/xsync/adder.go @@ -0,0 +1,91 @@ +// Copyright (c) 2023 Alexey Mayshev and contributors. All rights reserved. +// Copyright (c) 2021 Andrey Pechkurov. All rights reserved. +// +// Copyright notice. This code is a fork of xsync.Adder from this file with some changes: +// https://github.com/puzpuzpuz/xsync/blob/main/counter.go +// +// Use of this source code is governed by a MIT license that can be found +// at https://github.com/puzpuzpuz/xsync/blob/main/LICENSE + +package xsync + +import ( + "sync" + "sync/atomic" + + "github.com/maypok86/otter/v2/internal/xmath" + "github.com/maypok86/otter/v2/internal/xruntime" +) + +// pool for P tokens. +var tokenPool sync.Pool + +// a P token is used to point at the current OS thread (P) +// on which the goroutine is run; exact identity of the thread, +// as well as P migration tolerance, is not important since +// it's used to as a best effort mechanism for assigning +// concurrent operations (goroutines) to different stripes of +// the Adder. +type token struct { + idx uint32 + padding [xruntime.CacheLineSize - 4]byte +} + +// A Adder is a striped int64 Adder. +// +// Should be preferred over a single atomically updated uint64 +// Adder in high contention scenarios. +// +// A Adder must not be copied after first use. +type Adder struct { + stripes []astripe + mask uint32 +} + +const cacheLineSize = 64 + +type astripe struct { + adder atomic.Uint64 + padding [cacheLineSize - 8]byte +} + +// NewAdder creates a new Adder instance. +func NewAdder() *Adder { + nstripes := xmath.RoundUpPowerOf2(xruntime.Parallelism()) + return &Adder{ + stripes: make([]astripe, nstripes), + mask: nstripes - 1, + } +} + +// Add adds the delta to the Adder. +func (a *Adder) Add(delta uint64) { + t, ok := tokenPool.Get().(*token) + if !ok { + t = &token{ + idx: xruntime.Fastrand(), + } + } + for { + stripe := &a.stripes[t.idx&a.mask] + cnt := stripe.adder.Load() + if stripe.adder.CompareAndSwap(cnt, cnt+delta) { + break + } + // Give a try with another randomly selected stripe. + t.idx = xruntime.Fastrand() + } + tokenPool.Put(t) +} + +// Value returns the current Adder value. +// The returned value may not include all of the latest operations in +// presence of concurrent modifications of the Adder. +func (a *Adder) Value() uint64 { + value := uint64(0) + for i := 0; i < len(a.stripes); i++ { + stripe := &a.stripes[i] + value += stripe.adder.Load() + } + return value +} diff --git a/vendor/github.com/maypok86/otter/v2/loader.go b/vendor/github.com/maypok86/otter/v2/loader.go new file mode 100644 index 00000000..b4179f7a --- /dev/null +++ b/vendor/github.com/maypok86/otter/v2/loader.go @@ -0,0 +1,106 @@ +// Copyright (c) 2024 Alexey Mayshev and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otter + +import "context" + +// Loader computes or retrieves values, based on a key, for use in populating a [Cache]. +type Loader[K comparable, V any] interface { + // Load computes or retrieves the value corresponding to key. + // + // WARNING: loading must not attempt to update any mappings of this cache directly. + // + // NOTE: The Loader implementation should always return ErrNotFound + // if the entry was not found in the data source. + Load(ctx context.Context, key K) (V, error) + // Reload computes or retrieves a replacement value corresponding to an already-cached key. + // If the replacement value is not found, then the mapping will be removed if ErrNotFound is returned. + // This method is called when an existing cache entry is refreshed by Cache.Get, or through a call to Cache.Refresh. + // + // WARNING: loading must not attempt to update any mappings of this cache directly + // or block waiting for other cache operations to complete. + // + // NOTE: all errors returned by this method will be logged (using Logger) and then swallowed. + // + // NOTE: The Loader implementation should always return ErrNotFound + // if the entry was not found in the data source. + Reload(ctx context.Context, key K, oldValue V) (V, error) +} + +// LoaderFunc is an adapter to allow the use of ordinary functions as loaders. +// If f is a function with the appropriate signature, LoaderFunc(f) is a [Loader] that calls f. +type LoaderFunc[K comparable, V any] func(ctx context.Context, key K) (V, error) + +// Load calls f(ctx, key). +func (lf LoaderFunc[K, V]) Load(ctx context.Context, key K) (V, error) { + return lf(ctx, key) +} + +// Reload calls f(ctx, key). +func (lf LoaderFunc[K, V]) Reload(ctx context.Context, key K, oldValue V) (V, error) { + return lf(ctx, key) +} + +// BulkLoader computes or retrieves values, based on the keys, for use in populating a [Cache]. +type BulkLoader[K comparable, V any] interface { + // BulkLoad computes or retrieves the values corresponding to keys. + // This method is called by Cache.BulkGet. + // + // If the returned map doesn't contain all requested keys, then the entries it does + // contain will be cached, and Cache.BulkGet will return the partial results. If the returned map + // contains extra keys not present in keys then all returned entries will be cached, but + // only the entries for keys, will be returned from Cache.BulkGet. + // + // WARNING: loading must not attempt to update any mappings of this cache directly. + BulkLoad(ctx context.Context, keys []K) (map[K]V, error) + // BulkReload computes or retrieves replacement values corresponding to already-cached keys. + // If the replacement value is not found, then the mapping will be removed. + // This method is called when an existing cache entry is refreshed by Cache.BulkGet, or through a call to Cache.BulkRefresh. + // + // If the returned map doesn't contain all requested keys, then the entries it does + // contain will be cached. If the returned map + // contains extra keys not present in keys then all returned entries will be cached. + // + // WARNING: loading must not attempt to update any mappings of this cache directly + // or block waiting for other cache operations to complete. + // + // NOTE: all errors returned by this method will be logged (using Logger) and then swallowed. + BulkReload(ctx context.Context, keys []K, oldValues []V) (map[K]V, error) +} + +// BulkLoaderFunc is an adapter to allow the use of ordinary functions as loaders. +// If f is a function with the appropriate signature, BulkLoaderFunc(f) is a [BulkLoader] that calls f. +type BulkLoaderFunc[K comparable, V any] func(ctx context.Context, keys []K) (map[K]V, error) + +// BulkLoad calls f(ctx, keys). +func (blf BulkLoaderFunc[K, V]) BulkLoad(ctx context.Context, keys []K) (map[K]V, error) { + return blf(ctx, keys) +} + +// BulkReload calls f(ctx, keys). +func (blf BulkLoaderFunc[K, V]) BulkReload(ctx context.Context, keys []K, oldValues []V) (map[K]V, error) { + return blf(ctx, keys) +} + +// RefreshResult holds the results of [Cache.Refresh]/[Cache.BulkRefresh], so they can be passed +// on a channel. +type RefreshResult[K comparable, V any] struct { + // Key is the key corresponding to the refreshed entry. + Key K + // Value is the value corresponding to the refreshed entry. + Value V + // Err is the error that Loader / BulkLoader returned. + Err error +} diff --git a/vendor/github.com/maypok86/otter/v2/logger.go b/vendor/github.com/maypok86/otter/v2/logger.go new file mode 100644 index 00000000..2037b89f --- /dev/null +++ b/vendor/github.com/maypok86/otter/v2/logger.go @@ -0,0 +1,52 @@ +// Copyright (c) 2024 Alexey Mayshev and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otter + +import ( + "context" + "log/slog" +) + +// Logger is the interface used to get log output from otter. +type Logger interface { + // Warn logs a message at the warn level with an error. + Warn(ctx context.Context, msg string, err error) + // Error logs a message at the error level with an error. + Error(ctx context.Context, msg string, err error) +} + +type defaultLogger struct { + log *slog.Logger +} + +func newDefaultLogger() *defaultLogger { + return &defaultLogger{ + log: slog.Default(), + } +} + +func (dl *defaultLogger) Warn(ctx context.Context, msg string, err error) { + dl.log.WarnContext(ctx, msg, slog.Any("err", err)) +} + +func (dl *defaultLogger) Error(ctx context.Context, msg string, err error) { + dl.log.ErrorContext(ctx, msg, slog.Any("err", err)) +} + +// NoopLogger is a stub implementation of [Logger] interface. It may be useful if error logging is not necessary. +type NoopLogger struct{} + +func (nl *NoopLogger) Warn(ctx context.Context, msg string, err error) {} +func (nl *NoopLogger) Error(ctx context.Context, msg string, err error) {} diff --git a/vendor/github.com/maypok86/otter/v2/mkdocs.yml b/vendor/github.com/maypok86/otter/v2/mkdocs.yml new file mode 100644 index 00000000..65368141 --- /dev/null +++ b/vendor/github.com/maypok86/otter/v2/mkdocs.yml @@ -0,0 +1,162 @@ +# Project information +site_name: Otter +site_url: https://maypok86.github.io/otter/ +site_author: Alexey Mayshev + +# Repository +repo_name: maypok86/otter +repo_url: https://github.com/maypok86/otter +remote_branch: main +edit_uri: "" + +# Copyright +copyright: Copyright © 2024-2025 Alexey Mayshev + +# Configuration +theme: + name: material + logo: assets/icon.png + favicon: assets/icon.png + icon: + repo: fontawesome/brands/github + features: + - navigation.tracking + - navigation.instant + - navigation.tabs + - navigation.tabs.sticky + - navigation.top + #- navigation.sections + - navigation.indexes + - toc.follow + - content.code.copy + - content.tabs.link + - search.suggest + - search.highlight + font: + text: Roboto + code: JetBrains Mono + palette: + # Palette toggle for light mode + - media: "(prefers-color-scheme: light)" + scheme: default + primary: teal + accent: blue + toggle: + icon: material/lightbulb-outline + name: Switch to dark mode + + # Palette toggle for dark mode + - media: "(prefers-color-scheme: dark)" + scheme: slate + primary: teal + accent: blue + toggle: + icon: material/lightbulb + name: Switch to light mode + +# Plugins +plugins: + - blog: + post_url_format: "{slug}" + archive: false + - glightbox + - search: + separator: '[\s\u200b\-_,:!=\[\]()"`/]+|\.(?!\d)|&[lg]t;|(?!\b)(?=[A-Z][a-z])' + - minify: + minify_html: true + - tags + +# Additional configuration +extra: + status: + new: Recently added + deprecated: Deprecated + social: + - icon: fontawesome/brands/github + link: https://github.com/maypok86 + - icon: fontawesome/brands/golang + link: https://pkg.go.dev/github.com/maypok86/otter/v2 + +# Extensions +markdown_extensions: + - abbr + - admonition + - attr_list + - def_list + - footnotes + - md_in_html + - toc: + permalink: true + - pymdownx.arithmatex: + generic: true + - pymdownx.betterem: + smart_enable: all + - pymdownx.caret + - pymdownx.details + - pymdownx.emoji: + emoji_generator: !!python/name:material.extensions.emoji.to_svg + emoji_index: !!python/name:material.extensions.emoji.twemoji + - pymdownx.highlight: + anchor_linenums: true + line_spans: __span + pygments_lang_class: true + - pymdownx.inlinehilite + - pymdownx.keys + - pymdownx.magiclink: + normalize_issue_symbols: true + repo_url_shorthand: true + user: maypok86 + repo: otter + - pymdownx.mark + - pymdownx.smartsymbols + - pymdownx.superfences: + custom_fences: + - name: mermaid + class: mermaid + format: !!python/name:pymdownx.superfences.fence_code_format + - pymdownx.tabbed: + alternate_style: true + combine_header_slug: true + slugify: !!python/object/apply:pymdownx.slugs.slugify + kwds: + case: lower + - pymdownx.tasklist: + custom_checkbox: true + - pymdownx.tilde + - pymdownx.snippets + +nav: + - Overview: + - Overview: index.md + - Ask a question: https://github.com/maypok86/otter/discussions/categories/q-a + - User guide: + - v2 manual: + - Getting started: user-guide/v2/getting-started.md + - Examples: user-guide/v2/examples.md + - Features: + - user-guide/v2/features/index.md + - Eviction: user-guide/v2/features/eviction.md + - Deletion: user-guide/v2/features/deletion.md + - Loading: user-guide/v2/features/loading.md + - Refresh: user-guide/v2/features/refresh.md + - Bulk operations: user-guide/v2/features/bulk.md + - Compute: user-guide/v2/features/compute.md + - Statistics: user-guide/v2/features/statistics.md + - Persistence: user-guide/v2/features/persistence.md + - Extension: user-guide/v2/features/extension.md + - Iteration: user-guide/v2/features/iteration.md + - v1 manual: + - Getting started: user-guide/v1/getting-started.md + - Features: + - user-guide/v1/features/index.md + - Expiration policy: user-guide/v1/features/expiration-policy.md + - Cost-based eviction: user-guide/v1/features/cost.md + - Statistics: user-guide/v1/features/stats.md + - API: api/index.md + - Performance: + - Design: performance/design.md + - Throughput: performance/throughput.md + - Hit ratio: performance/hit-ratio.md + - Memory consumption: performance/memory-consumption.md + - Blog: + - blog/index.md diff --git a/vendor/github.com/maypok86/otter/v2/options.go b/vendor/github.com/maypok86/otter/v2/options.go new file mode 100644 index 00000000..336bd721 --- /dev/null +++ b/vendor/github.com/maypok86/otter/v2/options.go @@ -0,0 +1,224 @@ +// Copyright (c) 2025 Alexey Mayshev and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otter + +import ( + "errors" + + "github.com/maypok86/otter/v2/stats" +) + +const ( + defaultInitialCapacity = 16 +) + +var defaultExecutor = func(fn func()) { + go fn() +} + +// Options should be passed to [New]/[Must] to construct a [Cache] having a combination of the following features: +// +// - automatic loading of entries into the cache +// - size-based eviction when a maximum is exceeded based on frequency and recency +// - time-based expiration of entries, measured since last access or last write +// - asynchronously refresh when the first stale request for an entry occurs +// - notification of deleted entries +// - accumulation of cache access statistics +// +// These features are all optional; caches can be created using all or none of them. By default, +// cache instances created using [Options] will not perform any type of eviction. +// +// cache := otter.Must(&Options[string, string]{ +// MaximumSize: 10_000, +// ExpiryCalculator: otter.ExpiryWriting[string, string(10 * time.Minute), +// StatsRecorder: stats.NewCounter(), +// }) +// +// Entries are automatically evicted from the cache when any of MaximumSize, MaximumWeight, +// ExpiryCalculator are specified. +// +// If MaximumSize or MaximumWeight is specified, entries may be evicted on each cache modification. +// +// If ExpiryCalculator is specified, then entries may be evicted on +// each cache modification, on occasional cache accesses, or on calls to [Cache.CleanUp]. +// Expired entries may be counted by [Cache.EstimatedSize], but will never be visible to read or write operations. +// +// Certain cache configurations will result in the accrual of periodic maintenance tasks that +// will be performed during write operations, or during occasional read operations in the absence of writes. +// The [Cache.CleanUp] method of the returned cache will also perform maintenance, but +// calling it should not be necessary with a high-throughput cache. Only caches built with +// MaximumSize, MaximumWeight, ExpiryCalculator perform periodic maintenance. +type Options[K comparable, V any] struct { + // MaximumSize specifies the maximum number of entries the cache may contain. + // + // This option cannot be used in conjunction with MaximumWeight. + // + // NOTE: the cache may evict an entry before this limit is exceeded or temporarily exceed the threshold while evicting. + // As the cache size grows close to the maximum, the cache evicts entries that are less likely to be used again. + // For example, the cache may evict an entry because it hasn't been used recently or very often. + MaximumSize int + // MaximumWeight specifies the maximum weight of entries the cache may contain. Weight is determined using the + // callback specified with Weigher. + // Use of this method requires specifying an option Weigher prior to calling New. + // + // This option cannot be used in conjunction with MaximumSize. + // + // NOTE: the cache may evict an entry before this limit is exceeded or temporarily exceed the threshold while evicting. + // As the cache size grows close to the maximum, the cache evicts entries that are less likely to be used again. + // For example, the cache may evict an entry because it hasn't been used recently or very often. + // + // NOTE: weight is only used to determine whether the cache is over capacity; it has no effect + // on selecting which entry should be evicted next. + MaximumWeight uint64 + // StatsRecorder accumulates statistics during the operation of a Cache. + // + // NOTE: If your stats.Recorder implementation doesn't also implement stats.Snapshoter, + // Cache.Stats method will always return a zero-value snapshot. + StatsRecorder stats.Recorder + // InitialCapacity specifies the minimum total size for the internal data structures. Providing a large enough estimate + // at construction time avoids the need for expensive resizing operations later, but setting this + // value unnecessarily high wastes memory. + InitialCapacity int + // Weigher specifies the weigher to use in determining the weight of entries. Entry weight is taken into + // consideration by MaximumWeight when determining which entries to evict, and use + // of this method requires specifying an option MaximumWeight prior to calling New. + // Weights are measured and recorded when entries are inserted into or updated in + // the cache, and are thus effectively static during the lifetime of a cache entry. + // + // When the weight of an entry is zero it will not be considered for size-based eviction (though + // it still may be evicted by other means). + Weigher func(key K, value V) uint32 + // ExpiryCalculator specifies that each entry should be automatically removed from the cache once a duration has + // elapsed after the entry's creation, the most recent replacement of its value, or its last read. + // The expiration time is reset by all cache read and write operations. + ExpiryCalculator ExpiryCalculator[K, V] + // OnDeletion specifies a handler instance that caches should notify each time an entry is deleted for any + // DeletionCause reason. The cache will invoke this handler on the configured Executor + // after the entry's deletion operation has completed. + // + // An OnAtomicDeletion may be preferred when the handler should be invoked + // as part of the atomic operation to delete the entry. + OnDeletion func(e DeletionEvent[K, V]) + // OnAtomicDeletion specifies a handler that caches should notify each time an entry is deleted for any + // DeletionCause. The cache will invoke this handler during the atomic operation to delete the entry. + // + // A OnDeletion may be preferred when the handler should be performed outside the atomic operation to + // delete the entry, or be delegated to the configured Executor. + OnAtomicDeletion func(e DeletionEvent[K, V]) + // RefreshCalculator specifies that active entries are eligible for automatic refresh once a duration has + // elapsed after the entry's creation, the most recent replacement of its value, or the most recent entry's reload. + // The semantics of refreshes are specified in Cache.Refresh, + // and are performed by calling Loader.Reload in a separate background goroutine. + // + // Automatic refreshes are performed when the first stale request for an entry occurs. The request + // triggering the refresh will make an asynchronous call to Loader.Reload to get a new value. + // Until refresh is completed, requests will continue to return the old value. + // + // NOTE: all errors returned during refresh will be logged (using Logger) and then swallowed. + RefreshCalculator RefreshCalculator[K, V] + // Executor specifies the executor to use when running asynchronous tasks. The executor is delegated to + // when sending deletion events, when asynchronous computations are performed by + // Cache.Refresh/Cache.BulkRefresh or for refreshes in Cache.Get/Cache.BulkGet, if RefreshCalculator was specified, + // or when performing periodic maintenance. By default, goroutines are used. + // + // The primary intent of this method is to facilitate testing of caches which have been configured + // with OnDeletion or utilize asynchronous computations. A test may instead prefer + // to configure the cache to execute tasks directly on the same goroutine. + // + // Beware that configuring a cache with an executor that discards tasks or never runs them may + // experience non-deterministic behavior. + Executor func(fn func()) + // Clock specifies a nanosecond-precision time source for use in determining when entries should be + // expired or refreshed. By default, time.Now().UnixNano() is used. + // + // The primary intent of this option is to facilitate testing of caches which have been configured + // with ExpiryCalculator or RefreshCalculator. + // + // NOTE: this clock is not used when recording statistics. + Clock Clock + // Logger specifies the Logger implementation that will be used for logging warning and errors. + // + // The cache will use slog.Default() by default. + Logger Logger +} + +func (o *Options[K, V]) getMaximum() uint64 { + if o.MaximumSize > 0 { + return uint64(o.MaximumSize) + } + if o.MaximumWeight > 0 { + return o.MaximumWeight + } + return 0 +} + +func (o *Options[K, V]) hasInitialCapacity() bool { + return o.InitialCapacity > 0 +} + +func (o *Options[K, V]) getInitialCapacity() int { + if o.hasInitialCapacity() { + return o.InitialCapacity + } + return defaultInitialCapacity +} + +func (o *Options[K, V]) getExecutor() func(fn func()) { + if o.Executor == nil { + return defaultExecutor + } + return o.Executor +} + +func (o *Options[K, V]) getWeigher() func(key K, value V) uint32 { + if o.Weigher == nil { + return func(key K, value V) uint32 { + return 1 + } + } + return o.Weigher +} + +func (o *Options[K, V]) getLogger() Logger { + if o.Logger == nil { + return newDefaultLogger() + } + return o.Logger +} + +func (o *Options[K, V]) validate() error { + if o.MaximumSize > 0 && o.MaximumWeight > 0 { + return errors.New("otter: both maximumSize and maximumWeight are set") + } + if o.MaximumSize > 0 && o.Weigher != nil { + return errors.New("otter: both maximumSize and weigher are set") + } + + if o.MaximumWeight > 0 && o.Weigher == nil { + return errors.New("otter: maximumWeight requires weigher") + } + if o.Weigher != nil && o.MaximumWeight <= 0 { + return errors.New("otter: weigher requires maximumWeight") + } + + if o.MaximumSize < 0 { + return errors.New("otter: maximumSize should be positive") + } + if o.InitialCapacity < 0 { + return errors.New("otter: initial capacity should be positive") + } + + return nil +} diff --git a/vendor/github.com/maypok86/otter/v2/persistence.go b/vendor/github.com/maypok86/otter/v2/persistence.go new file mode 100644 index 00000000..b717fc7e --- /dev/null +++ b/vendor/github.com/maypok86/otter/v2/persistence.go @@ -0,0 +1,155 @@ +// Copyright (c) 2025 Alexey Mayshev and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otter + +import ( + "encoding/gob" + "errors" + "fmt" + "io" + "os" + "path/filepath" + "time" +) + +// LoadCacheFromFile loads cache data from the given filePath. +// +// See SaveCacheToFile for saving cache data to file. +func LoadCacheFromFile[K comparable, V any](c *Cache[K, V], filePath string) error { + file, err := os.Open(filePath) + if err != nil { + return fmt.Errorf("otter: open file %s: %w", filePath, err) + } + //nolint:errcheck // it's ok + defer file.Close() + + return LoadCacheFrom(c, file) +} + +// LoadCacheFrom loads cache data from the given [io.Reader]. +// +// See SaveCacheToFile for saving cache data to file. +func LoadCacheFrom[K comparable, V any](c *Cache[K, V], r io.Reader) error { + dec := gob.NewDecoder(r) + + var savedMaximum uint64 + if err := dec.Decode(&savedMaximum); err != nil { + return fmt.Errorf("otter: decode maximum: %w", err) + } + + maximum := min(savedMaximum, c.GetMaximum()) + maximum2 := maximum / 4 + maximum1 := 2 * maximum2 + size := uint64(0) + for size < maximum { + var entry Entry[K, V] + if err := dec.Decode(&entry); err != nil { + if errors.Is(err, io.EOF) { + break + } + + return fmt.Errorf("otter: decode entry: %w", err) + } + + nowNano := c.cache.clock.NowNano() + if c.cache.withExpiration && entry.ExpiresAtNano < nowNano { + continue + } + c.Set(entry.Key, entry.Value) + if c.cache.withExpiration && entry.ExpiresAtNano != unreachableExpiresAt { + expiresAfter := max(1, time.Duration(entry.ExpiresAtNano-nowNano)) + c.SetExpiresAfter(entry.Key, expiresAfter) + } + if c.cache.withRefresh && entry.RefreshableAtNano != unreachableRefreshableAt { + refreshableAfter := max(1, time.Duration(entry.RefreshableAtNano-nowNano)) + c.SetRefreshableAfter(entry.Key, refreshableAfter) + } + size += uint64(entry.Weight) + + if size <= maximum2 { + c.GetIfPresent(entry.Key) + c.GetIfPresent(entry.Key) + continue + } + if size <= maximum1 { + c.GetIfPresent(entry.Key) + continue + } + } + + return nil +} + +// SaveCacheToFile atomically saves cache data to the given filePath. +// +// SaveCacheToFile may be called concurrently with other operations on the cache. +// +// The saved data may be loaded with LoadCacheFromFile. +// +// WARNING: Beware that this operation is performed within the eviction policy's exclusive lock. +// While the operation is in progress further eviction maintenance will be halted. +func SaveCacheToFile[K comparable, V any](c *Cache[K, V], filePath string) error { + // Create dir if it doesn't exist. + dir := filepath.Dir(filePath) + if _, err := os.Stat(dir); err != nil { + if !os.IsNotExist(err) { + return fmt.Errorf("otter: stat %s: %w", dir, err) + } + if err := os.MkdirAll(dir, 0o755); err != nil { + return fmt.Errorf("otter: create dir %s: %w", dir, err) + } + } + + file, err := os.Create(filePath) + if err != nil { + return fmt.Errorf("otter: create file %s: %w", filePath, err) + } + //nolint:errcheck // it's ok + defer file.Close() + + return SaveCacheTo(c, file) +} + +// SaveCacheTo atomically saves cache data to the given [io.Writer]. +// +// SaveCacheToFile may be called concurrently with other operations on the cache. +// +// The saved data may be loaded with LoadCacheFrom. +// +// WARNING: Beware that this operation is performed within the eviction policy's exclusive lock. +// While the operation is in progress further eviction maintenance will be halted. +func SaveCacheTo[K comparable, V any](c *Cache[K, V], w io.Writer) error { + enc := gob.NewEncoder(w) + + maximum := c.GetMaximum() + if err := enc.Encode(maximum); err != nil { + return fmt.Errorf("otter: encode maximum: %w", err) + } + + size := uint64(0) + for entry := range c.Hottest() { + if size >= maximum { + break + } + + if err := enc.Encode(entry); err != nil { + return fmt.Errorf("otter: encode entry: %w", err) + } + + size += uint64(entry.Weight) + } + + return nil +} diff --git a/vendor/github.com/maypok86/otter/v2/policy.go b/vendor/github.com/maypok86/otter/v2/policy.go new file mode 100644 index 00000000..b141b8e6 --- /dev/null +++ b/vendor/github.com/maypok86/otter/v2/policy.go @@ -0,0 +1,542 @@ +// Copyright (c) 2025 Alexey Mayshev and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otter + +import ( + "github.com/maypok86/otter/v2/internal/deque" + "github.com/maypok86/otter/v2/internal/generated/node" + "github.com/maypok86/otter/v2/internal/xruntime" +) + +const ( + isExp = false + + // The initial percent of the maximum weighted capacity dedicated to the main space. + percentMain = 0.99 + // percentMainProtected is the percent of the maximum weighted capacity dedicated to the main's protected space. + percentMainProtected = 0.80 + // The difference in hit rates that restarts the climber. + hillClimberRestartThreshold = 0.05 + // The percent of the total size to adapt the window by. + hillClimberStepPercent = 0.0625 + // The rate to decrease the step size to adapt by. + hillClimberStepDecayRate = 0.98 + // admitHashdosThreshold is the minimum popularity for allowing randomized admission. + admitHashdosThreshold = 6 + // The maximum number of entries that can be transferred between queues. + queueTransferThreshold = 1_000 +) + +type policy[K comparable, V any] struct { + sketch *sketch[K] + window *deque.Linked[K, V] + probation *deque.Linked[K, V] + protected *deque.Linked[K, V] + maximum uint64 + weightedSize uint64 + windowMaximum uint64 + windowWeightedSize uint64 + mainProtectedMaximum uint64 + mainProtectedWeightedSize uint64 + stepSize float64 + adjustment int64 + hitsInSample uint64 + missesInSample uint64 + previousSampleHitRate float64 + isWeighted bool + rand func() uint32 +} + +func newPolicy[K comparable, V any](isWeighted bool) *policy[K, V] { + return &policy[K, V]{ + sketch: newSketch[K](), + window: deque.NewLinked[K, V](isExp), + probation: deque.NewLinked[K, V](isExp), + protected: deque.NewLinked[K, V](isExp), + isWeighted: isWeighted, + rand: xruntime.Fastrand, + } +} + +// access updates the eviction policy based on node accesses. +func (p *policy[K, V]) access(n node.Node[K, V]) { + p.sketch.increment(n.Key()) + switch { + case n.InWindow(): + reorder(p.window, n) + case n.InMainProbation(): + p.reorderProbation(n) + case n.InMainProtected(): + reorder(p.protected, n) + } + p.hitsInSample++ +} + +// add adds node to the eviction policy. +func (p *policy[K, V]) add(n node.Node[K, V], evictNode func(n node.Node[K, V], nowNanos int64)) { + nodeWeight := uint64(n.Weight()) + + p.weightedSize += nodeWeight + p.windowWeightedSize += nodeWeight + if p.weightedSize >= p.maximum>>1 { + // Lazily initialize when close to the maximum + capacity := p.maximum + if p.isWeighted { + //nolint:gosec // there's no overflow + capacity = uint64(p.window.Len()) + uint64(p.probation.Len()) + uint64(p.protected.Len()) + } + p.sketch.ensureCapacity(capacity) + } + + p.sketch.increment(n.Key()) + p.missesInSample++ + + // ignore out-of-order write operations + if !n.IsAlive() { + return + } + + switch { + case nodeWeight > p.maximum: + evictNode(n, 0) + case nodeWeight > p.windowMaximum: + p.window.PushFront(n) + default: + p.window.PushBack(n) + } +} + +func (p *policy[K, V]) update(n, old node.Node[K, V], evictNode func(n node.Node[K, V], nowNanos int64)) { + nodeWeight := uint64(n.Weight()) + p.updateNode(n, old) + switch { + case n.InWindow(): + p.windowWeightedSize += nodeWeight + switch { + case nodeWeight > p.maximum: + evictNode(n, 0) + case nodeWeight <= p.windowMaximum: + p.access(n) + case p.window.Contains(n): + p.window.MoveToFront(n) + } + case n.InMainProbation(): + if nodeWeight <= p.maximum { + p.access(n) + } else { + evictNode(n, 0) + } + case n.InMainProtected(): + p.mainProtectedWeightedSize += nodeWeight + if nodeWeight <= p.maximum { + p.access(n) + } else { + evictNode(n, 0) + } + } + + p.weightedSize += nodeWeight +} + +func (p *policy[K, V]) updateNode(n, old node.Node[K, V]) { + n.SetQueueType(old.GetQueueType()) + + switch { + case n.InWindow(): + p.window.UpdateNode(n, old) + case n.InMainProbation(): + p.probation.UpdateNode(n, old) + default: + p.protected.UpdateNode(n, old) + } + p.makeDead(old) +} + +// delete deletes node from the eviction policy. +func (p *policy[K, V]) delete(n node.Node[K, V]) { + // add may not have been processed yet + switch { + case n.InWindow(): + p.window.Delete(n) + case n.InMainProbation(): + p.probation.Delete(n) + default: + p.protected.Delete(n) + } + p.makeDead(n) +} + +func (p *policy[K, V]) makeDead(n node.Node[K, V]) { + if !n.IsDead() { + nodeWeight := uint64(n.Weight()) + if n.InWindow() { + p.windowWeightedSize -= nodeWeight + } else if n.InMainProtected() { + p.mainProtectedWeightedSize -= nodeWeight + } + p.weightedSize -= nodeWeight + n.Die() + } +} + +func (p *policy[K, V]) setMaximumSize(maximum uint64) { + if maximum == p.maximum { + return + } + + window := maximum - uint64(percentMain*float64(maximum)) + mainProtected := uint64(percentMainProtected * float64(maximum-window)) + + p.maximum = maximum + p.windowMaximum = window + p.mainProtectedMaximum = mainProtected + + p.hitsInSample = 0 + p.missesInSample = 0 + p.stepSize = -hillClimberStepPercent * float64(maximum) + + if p.sketch != nil && !p.isWeighted && p.weightedSize >= (maximum>>1) { + // Lazily initialize when close to the maximum size + p.sketch.ensureCapacity(maximum) + } +} + +// Promote the node from probation to protected on access. +func (p *policy[K, V]) reorderProbation(n node.Node[K, V]) { + nodeWeight := uint64(n.Weight()) + + if p.probation.NotContains(n) { + // Ignore stale accesses for an entry that is no longer present + return + } else if nodeWeight > p.mainProtectedMaximum { + reorder(p.probation, n) + return + } + + // If the protected space exceeds its maximum, the LRU items are demoted to the probation space. + // This is deferred to the adaption phase at the end of the maintenance cycle. + p.mainProtectedWeightedSize += nodeWeight + p.probation.Delete(n) + p.protected.PushBack(n) + n.MakeMainProtected() +} + +func (p *policy[K, V]) evictNodes(evictNode func(n node.Node[K, V], nowNanos int64)) { + candidate := p.evictFromWindow() + p.evictFromMain(candidate, evictNode) +} + +func (p *policy[K, V]) evictFromWindow() node.Node[K, V] { + var first node.Node[K, V] + n := p.window.Head() + for p.windowWeightedSize > p.windowMaximum { + // The pending operations will adjust the size to reflect the correct weight + if node.Equals(n, nil) { + break + } + + next := n.Next() + nodeWeight := uint64(n.Weight()) + if nodeWeight != 0 { + n.MakeMainProbation() + p.window.Delete(n) + p.probation.PushBack(n) + if first == nil { + first = n + } + + p.windowWeightedSize -= nodeWeight + } + n = next + } + return first +} + +func (p *policy[K, V]) evictFromMain(candidate node.Node[K, V], evictNode func(n node.Node[K, V], nowNanos int64)) { + victimQueue := node.InMainProbationQueue + candidateQueue := node.InMainProbationQueue + victim := p.probation.Head() + for p.weightedSize > p.maximum { + // Search the admission window for additional candidates + if node.Equals(candidate, nil) && candidateQueue == node.InMainProbationQueue { + candidate = p.window.Head() + candidateQueue = node.InWindowQueue + } + + // Try evicting from the protected and window queues + if node.Equals(candidate, nil) && node.Equals(victim, nil) { + if victimQueue == node.InMainProbationQueue { + victim = p.protected.Head() + victimQueue = node.InMainProtectedQueue + continue + } else if victimQueue == node.InMainProtectedQueue { + victim = p.window.Head() + victimQueue = node.InWindowQueue + continue + } + + // The pending operations will adjust the size to reflect the correct weight + break + } + + // Skip over entries with zero weight + if !node.Equals(victim, nil) && victim.Weight() == 0 { + victim = victim.Next() + continue + } else if !node.Equals(candidate, nil) && candidate.Weight() == 0 { + candidate = candidate.Next() + continue + } + + // Evict immediately if only one of the entries is present + if node.Equals(victim, nil) { + previous := candidate.Next() + evict := candidate + candidate = previous + evictNode(evict, 0) + continue + } else if node.Equals(candidate, nil) { + evict := victim + victim = victim.Next() + evictNode(evict, 0) + continue + } + + // Evict immediately if both selected the same entry + if node.Equals(candidate, victim) { + victim = victim.Next() + evictNode(candidate, 0) + candidate = nil + continue + } + + // Evict immediately if an entry was deleted + if !victim.IsAlive() { + evict := victim + victim = victim.Next() + evictNode(evict, 0) + continue + } else if !candidate.IsAlive() { + evict := candidate + candidate = candidate.Next() + evictNode(evict, 0) + continue + } + + // Evict immediately if the candidate's weight exceeds the maximum + if uint64(candidate.Weight()) > p.maximum { + evict := candidate + candidate = candidate.Next() + evictNode(evict, 0) + continue + } + + // Evict the entry with the lowest frequency + if p.admit(candidate.Key(), victim.Key()) { + evict := victim + victim = victim.Next() + evictNode(evict, 0) + candidate = candidate.Next() + } else { + evict := candidate + candidate = candidate.Next() + evictNode(evict, 0) + } + } +} + +func (p *policy[K, V]) admit(candidateKey, victimKey K) bool { + victimFreq := p.sketch.frequency(victimKey) + candidateFreq := p.sketch.frequency(candidateKey) + if candidateFreq > victimFreq { + return true + } + if candidateFreq >= admitHashdosThreshold { + // The maximum frequency is 15 and halved to 7 after a reset to age the history. An attack + // exploits that a hot candidate is rejected in favor of a hot victim. The threshold of a warm + // candidate reduces the number of random acceptances to minimize the impact on the hit rate. + return (p.rand() & 127) == 0 + } + return false +} + +func (p *policy[K, V]) climb() { + p.determineAdjustment() + p.demoteFromMainProtected() + amount := p.adjustment + if amount == 0 { + return + } + if amount > 0 { + p.increaseWindow() + } else { + p.decreaseWindow() + } +} + +func (p *policy[K, V]) determineAdjustment() { + if p.sketch.isNotInitialized() { + p.previousSampleHitRate = 0.0 + p.missesInSample = 0 + p.hitsInSample = 0 + return + } + + requestCount := p.hitsInSample + p.missesInSample + if requestCount < p.sketch.sampleSize { + return + } + + hitRate := float64(p.hitsInSample) / float64(requestCount) + hitRateChange := hitRate - p.previousSampleHitRate + amount := p.stepSize + if hitRateChange < 0 { + amount = -p.stepSize + } + var nextStepSize float64 + if abs(hitRateChange) >= hillClimberRestartThreshold { + k := float64(-1) + if amount >= 0 { + k = float64(1) + } + nextStepSize = hillClimberStepPercent * float64(p.maximum) * k + } else { + nextStepSize = hillClimberStepDecayRate * amount + } + p.previousSampleHitRate = hitRate + p.adjustment = int64(amount) + p.stepSize = nextStepSize + p.missesInSample = 0 + p.hitsInSample = 0 +} + +func (p *policy[K, V]) demoteFromMainProtected() { + mainProtectedMaximum := p.mainProtectedMaximum + mainProtectedWeightedSize := p.mainProtectedWeightedSize + if mainProtectedWeightedSize <= mainProtectedMaximum { + return + } + + for i := 0; i < queueTransferThreshold; i++ { + if mainProtectedWeightedSize <= mainProtectedMaximum { + break + } + + demoted := p.protected.PopFront() + if node.Equals(demoted, nil) { + break + } + demoted.MakeMainProbation() + p.probation.PushBack(demoted) + mainProtectedWeightedSize -= uint64(demoted.Weight()) + } + + p.mainProtectedWeightedSize = mainProtectedWeightedSize +} + +func (p *policy[K, V]) increaseWindow() { + if p.mainProtectedMaximum == 0 { + return + } + + quota := p.adjustment + if p.mainProtectedMaximum < uint64(p.adjustment) { + quota = int64(p.mainProtectedMaximum) + } + p.mainProtectedMaximum -= uint64(quota) + p.windowMaximum += uint64(quota) + p.demoteFromMainProtected() + + for i := 0; i < queueTransferThreshold; i++ { + candidate := p.probation.Head() + probation := true + if node.Equals(candidate, nil) || quota < int64(candidate.Weight()) { + candidate = p.protected.Head() + probation = false + } + if node.Equals(candidate, nil) { + break + } + + weight := uint64(candidate.Weight()) + if quota < int64(weight) { + break + } + + quota -= int64(weight) + if probation { + p.probation.Delete(candidate) + } else { + p.mainProtectedWeightedSize -= weight + p.protected.Delete(candidate) + } + p.windowWeightedSize += weight + p.window.PushBack(candidate) + candidate.MakeWindow() + } + + p.mainProtectedMaximum += uint64(quota) + p.windowMaximum -= uint64(quota) + p.adjustment = quota +} + +func (p *policy[K, V]) decreaseWindow() { + if p.windowMaximum <= 1 { + return + } + + quota := -p.adjustment + windowMaximum := max(0, p.windowMaximum-1) + if windowMaximum < uint64(-p.adjustment) { + quota = int64(windowMaximum) + } + p.mainProtectedMaximum += uint64(quota) + p.windowMaximum -= uint64(quota) + + for i := 0; i < queueTransferThreshold; i++ { + candidate := p.window.Head() + if node.Equals(candidate, nil) { + break + } + + weight := int64(candidate.Weight()) + if quota < weight { + break + } + + quota -= weight + p.windowWeightedSize -= uint64(weight) + p.window.Delete(candidate) + p.probation.PushBack(candidate) + candidate.MakeMainProbation() + } + + p.mainProtectedMaximum -= uint64(quota) + p.windowMaximum += uint64(quota) + p.adjustment = -quota +} + +func abs(a float64) float64 { + if a < 0 { + return -a + } + return a +} + +func reorder[K comparable, V any](d *deque.Linked[K, V], n node.Node[K, V]) { + if d.Contains(n) { + d.MoveToBack(n) + } +} diff --git a/vendor/github.com/maypok86/otter/v2/refresh_calculator.go b/vendor/github.com/maypok86/otter/v2/refresh_calculator.go new file mode 100644 index 00000000..0228ca59 --- /dev/null +++ b/vendor/github.com/maypok86/otter/v2/refresh_calculator.go @@ -0,0 +1,115 @@ +// Copyright (c) 2025 Alexey Mayshev and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otter + +import ( + "time" +) + +// RefreshCalculator calculates when cache entries will be reloaded. A single refresh time is retained so that the lifetime +// of an entry may be extended or reduced by subsequent evaluations. +type RefreshCalculator[K comparable, V any] interface { + // RefreshAfterCreate returns the duration after which the entry is eligible for an automatic refresh after the + // entry's creation. To indicate no refresh, an entry may be given an excessively long period. + RefreshAfterCreate(entry Entry[K, V]) time.Duration + // RefreshAfterUpdate returns the duration after which the entry is eligible for an automatic refresh after the + // replacement of the entry's value due to an explicit update. + // The entry.RefreshableAfter() may be returned to not modify the refresh time. + RefreshAfterUpdate(entry Entry[K, V], oldValue V) time.Duration + // RefreshAfterReload returns the duration after which the entry is eligible for an automatic refresh after the + // replacement of the entry's value due to a reload. + // The entry.RefreshableAfter() may be returned to not modify the refresh time. + RefreshAfterReload(entry Entry[K, V], oldValue V) time.Duration + // RefreshAfterReloadFailure returns the duration after which the entry is eligible for an automatic refresh after the + // value failed to be reloaded. + // The entry.RefreshableAfter() may be returned to not modify the refresh time. + RefreshAfterReloadFailure(entry Entry[K, V], err error) time.Duration +} + +type varRefreshCreating[K comparable, V any] struct { + f func(entry Entry[K, V]) time.Duration +} + +func (c *varRefreshCreating[K, V]) RefreshAfterCreate(entry Entry[K, V]) time.Duration { + return c.f(entry) +} + +func (c *varRefreshCreating[K, V]) RefreshAfterUpdate(entry Entry[K, V], oldValue V) time.Duration { + return entry.RefreshableAfter() +} + +func (c *varRefreshCreating[K, V]) RefreshAfterReload(entry Entry[K, V], oldValue V) time.Duration { + return entry.RefreshableAfter() +} + +func (c *varRefreshCreating[K, V]) RefreshAfterReloadFailure(entry Entry[K, V], err error) time.Duration { + return entry.RefreshableAfter() +} + +// RefreshCreating returns a [RefreshCalculator] that specifies that the entry should be automatically reloaded +// once the duration has elapsed after the entry's creation. +// The refresh time is not modified when the entry is updated or reloaded. +func RefreshCreating[K comparable, V any](duration time.Duration) RefreshCalculator[K, V] { + return RefreshCreatingFunc(func(entry Entry[K, V]) time.Duration { + return duration + }) +} + +// RefreshCreatingFunc returns a [RefreshCalculator] that specifies that the entry should be automatically reloaded +// once the duration has elapsed after the entry's creation. +// The refresh time is not modified when the entry is updated or reloaded. +func RefreshCreatingFunc[K comparable, V any](f func(entry Entry[K, V]) time.Duration) RefreshCalculator[K, V] { + return &varRefreshCreating[K, V]{ + f: f, + } +} + +type varRefreshWriting[K comparable, V any] struct { + f func(entry Entry[K, V]) time.Duration +} + +func (w *varRefreshWriting[K, V]) RefreshAfterCreate(entry Entry[K, V]) time.Duration { + return w.f(entry) +} + +func (w *varRefreshWriting[K, V]) RefreshAfterUpdate(entry Entry[K, V], oldValue V) time.Duration { + return w.f(entry) +} + +func (w *varRefreshWriting[K, V]) RefreshAfterReload(entry Entry[K, V], oldValue V) time.Duration { + return w.f(entry) +} + +func (w *varRefreshWriting[K, V]) RefreshAfterReloadFailure(entry Entry[K, V], err error) time.Duration { + return entry.RefreshableAfter() +} + +// RefreshWriting returns a [RefreshCalculator] that specifies that the entry should be automatically reloaded +// once the duration has elapsed after the entry's creation or the most recent replacement of its value. +// The refresh time is not modified when the reload fails. +func RefreshWriting[K comparable, V any](duration time.Duration) RefreshCalculator[K, V] { + return RefreshWritingFunc(func(entry Entry[K, V]) time.Duration { + return duration + }) +} + +// RefreshWritingFunc returns a [RefreshCalculator] that specifies that the entry should be automatically reloaded +// once the duration has elapsed after the entry's creation or the most recent replacement of its value. +// The refresh time is not modified when the reload fails. +func RefreshWritingFunc[K comparable, V any](f func(entry Entry[K, V]) time.Duration) RefreshCalculator[K, V] { + return &varRefreshWriting[K, V]{ + f: f, + } +} diff --git a/vendor/github.com/maypok86/otter/v2/singleflight.go b/vendor/github.com/maypok86/otter/v2/singleflight.go new file mode 100644 index 00000000..d6587b2d --- /dev/null +++ b/vendor/github.com/maypok86/otter/v2/singleflight.go @@ -0,0 +1,221 @@ +// Copyright (c) 2024 Alexey Mayshev and contributors. All rights reserved. +// Copyright 2009 The Go Authors. All rights reserved. +// +// Copyright notice. Initial version of the following code was based on +// the following file from the Go Programming Language core repo: +// https://cs.opensource.google/go/go/+/refs/tags/go1.21.5:src/container/list/list_test.go +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// That can be found at https://cs.opensource.google/go/go/+/refs/tags/go1.21.5:LICENSE + +package otter + +import ( + "context" + "errors" + "sync" + "sync/atomic" + "unsafe" + + "github.com/maypok86/otter/v2/internal/hashmap" +) + +type call[K comparable, V any] struct { + key K + value V + err error + wg sync.WaitGroup + isRefresh bool + isNotFound bool + isFake bool +} + +func newCall[K comparable, V any](key K, isRefresh bool) *call[K, V] { + c := &call[K, V]{ + key: key, + isRefresh: isRefresh, + } + c.wg.Add(1) + return c +} + +func (c *call[K, V]) Key() K { + return c.key +} + +func (c *call[K, V]) Value() V { + return c.value +} + +func (c *call[K, V]) AsPointer() unsafe.Pointer { + //nolint:gosec // it's ok + return unsafe.Pointer(c) +} + +func (c *call[K, V]) cancel() { + if c.isFake { + return + } + c.wg.Done() +} + +func (c *call[K, V]) wait() { + c.wg.Wait() +} + +type mapCallManager[K comparable, V any] struct{} + +func (m *mapCallManager[K, V]) FromPointer(ptr unsafe.Pointer) *call[K, V] { + return (*call[K, V])(ptr) +} + +func (m *mapCallManager[K, V]) IsNil(c *call[K, V]) bool { + return c == nil +} + +type group[K comparable, V any] struct { + calls *hashmap.Map[K, V, *call[K, V]] + initMutex sync.Mutex + isInitialized atomic.Bool +} + +func (g *group[K, V]) init() { + if !g.isInitialized.Load() { + g.initMutex.Lock() + if !g.isInitialized.Load() { + g.calls = hashmap.New[K, V, *call[K, V]](&mapCallManager[K, V]{}) + g.isInitialized.Store(true) + } + g.initMutex.Unlock() + } +} + +func (g *group[K, V]) getCall(key K) *call[K, V] { + return g.calls.Get(key) +} + +func (g *group[K, V]) startCall(key K, isRefresh bool) (c *call[K, V], shouldLoad bool) { + // fast path + if c := g.getCall(key); c != nil { + return c, shouldLoad + } + + return g.calls.Compute(key, func(prevCall *call[K, V]) *call[K, V] { + // double check + if prevCall != nil { + return prevCall + } + shouldLoad = true + return newCall[K, V](key, isRefresh) + }), shouldLoad +} + +func (g *group[K, V]) doCall( + ctx context.Context, + c *call[K, V], + load func(ctx context.Context, key K) (V, error), + afterFinish func(c *call[K, V]), +) (err error) { + defer func() { + if r := recover(); r != nil { + err = newPanicError(r) + } + + c.err = err + c.isNotFound = errors.Is(err, ErrNotFound) + afterFinish(c) + }() + + c.value, err = load(ctx, c.key) + return err +} + +func (g *group[K, V]) doBulkCall( + ctx context.Context, + callsInBulk map[K]*call[K, V], + bulkLoad func(ctx context.Context, keys []K) (map[K]V, error), + afterFinish func(c *call[K, V]), +) (err error) { + defer func() { + if r := recover(); r != nil { + err = newPanicError(r) + } + + if err != nil { + for _, cl := range callsInBulk { + cl.err = err + cl.isNotFound = false + } + } + + for _, cl := range callsInBulk { + afterFinish(cl) + } + }() + + keys := make([]K, 0, len(callsInBulk)) + for k := range callsInBulk { + keys = append(keys, k) + } + + res, err := bulkLoad(ctx, keys) + + var ( + isRefresh bool + found bool + ) + for k, cl := range callsInBulk { + if !found { + isRefresh = cl.isRefresh + found = true + } + v, ok := res[k] + if ok { + cl.value = v + } else { + cl.isNotFound = true + } + } + + for k, v := range res { + if _, ok := callsInBulk[k]; ok { + continue + } + callsInBulk[k] = &call[K, V]{ + key: k, + value: v, + isFake: true, + isRefresh: isRefresh, + } + } + + return err +} + +func (g *group[K, V]) deleteCall(c *call[K, V]) (deleted bool) { + // fast path + if got := g.getCall(c.key); got != c { + return false + } + + cl := g.calls.Compute(c.key, func(prevCall *call[K, V]) *call[K, V] { + // double check + if prevCall == c { + // delete + return nil + } + return prevCall + }) + return cl == nil +} + +func (g *group[K, V]) delete(key K) { + if !g.isInitialized.Load() { + return + } + + g.calls.Compute(key, func(prevCall *call[K, V]) *call[K, V] { + return nil + }) +} diff --git a/vendor/github.com/maypok86/otter/v2/sketch.go b/vendor/github.com/maypok86/otter/v2/sketch.go new file mode 100644 index 00000000..35baf0c4 --- /dev/null +++ b/vendor/github.com/maypok86/otter/v2/sketch.go @@ -0,0 +1,172 @@ +// Copyright (c) 2025 Alexey Mayshev and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otter + +import ( + "math" + "math/bits" + "sync/atomic" + + "github.com/maypok86/otter/v2/internal/xmath" + "github.com/maypok86/otter/v2/internal/xruntime" +) + +const ( + resetMask = 0x7777777777777777 + oneMask = 0x1111111111111111 +) + +// sketch is a probabilistic multiset for estimating the popularity of an element within a time window. The +// maximum frequency of an element is limited to 15 (4-bits) and an aging process periodically +// halves the popularity of all elements. +type sketch[K comparable] struct { + table []uint64 + sampleSize uint64 + blockMask uint64 + size uint64 + hasher xruntime.Hasher[K] + isInitialized atomic.Bool +} + +func newSketch[K comparable]() *sketch[K] { + return &sketch[K]{ + hasher: xruntime.NewHasher[K](), + } +} + +func (s *sketch[K]) ensureCapacity(maximumSize uint64) { + if uint64(len(s.table)) >= maximumSize { + return + } + + if !s.isInitialized.Load() { + s.isInitialized.Store(true) + } + newSize := xmath.RoundUpPowerOf264(maximumSize) + if newSize < 8 { + newSize = 8 + } + + s.table = make([]uint64, newSize) + s.sampleSize = 10 + if maximumSize != 0 { + s.sampleSize = 10 * maximumSize + } + s.blockMask = (uint64(len(s.table)) >> 3) - 1 + s.size = 0 + s.hasher = xruntime.NewHasher[K]() +} + +func (s *sketch[K]) isNotInitialized() bool { + return !s.isInitialized.Load() +} + +func (s *sketch[K]) frequency(k K) uint64 { + if s.isNotInitialized() { + return 0 + } + + frequency := uint64(math.MaxUint64) + blockHash := s.hash(k) + counterHash := rehash(blockHash) + block := (blockHash & s.blockMask) << 3 + for i := uint64(0); i < 4; i++ { + h := counterHash >> (i << 3) + index := (h >> 1) & 15 + offset := h & 1 + slot := block + offset + (i << 1) + count := (s.table[slot] >> (index << 2)) & 0xf + frequency = min(frequency, count) + } + + return frequency +} + +func (s *sketch[K]) increment(k K) { + if s.isNotInitialized() { + return + } + + blockHash := s.hash(k) + counterHash := rehash(blockHash) + block := (blockHash & s.blockMask) << 3 + + // Loop unrolling improves throughput by 10m ops/s + h0 := counterHash + h1 := counterHash >> 8 + h2 := counterHash >> 16 + h3 := counterHash >> 24 + + index0 := (h0 >> 1) & 15 + index1 := (h1 >> 1) & 15 + index2 := (h2 >> 1) & 15 + index3 := (h3 >> 1) & 15 + + slot0 := block + (h0 & 1) + slot1 := block + (h1 & 1) + 2 + slot2 := block + (h2 & 1) + 4 + slot3 := block + (h3 & 1) + 6 + + added := s.incrementAt(slot0, index0) + added = s.incrementAt(slot1, index1) || added + added = s.incrementAt(slot2, index2) || added + added = s.incrementAt(slot3, index3) || added + + if added { + s.size++ + if s.size == s.sampleSize { + s.reset() + } + } +} + +func (s *sketch[K]) incrementAt(i, j uint64) bool { + offset := j << 2 + mask := uint64(0xf) << offset + if (s.table[i] & mask) != mask { + s.table[i] += uint64(1) << offset + return true + } + return false +} + +func (s *sketch[K]) reset() { + count := 0 + for i := 0; i < len(s.table); i++ { + count += bits.OnesCount64(s.table[i] & oneMask) + s.table[i] = (s.table[i] >> 1) & resetMask + } + //nolint:gosec // there's no overflow + s.size = (s.size - (uint64(count) >> 2)) >> 1 +} + +func (s *sketch[K]) hash(k K) uint64 { + return spread(s.hasher.Hash(k)) +} + +func spread(h uint64) uint64 { + h ^= h >> 17 + h *= 0xed5ad4bb + h ^= h >> 11 + h *= 0xac4c1b51 + h ^= h >> 15 + return h +} + +func rehash(h uint64) uint64 { + h *= 0x31848bab + h ^= h >> 14 + return h +} diff --git a/vendor/github.com/maypok86/otter/v2/stats/counter.go b/vendor/github.com/maypok86/otter/v2/stats/counter.go new file mode 100644 index 00000000..e711b828 --- /dev/null +++ b/vendor/github.com/maypok86/otter/v2/stats/counter.go @@ -0,0 +1,102 @@ +// Copyright (c) 2024 Alexey Mayshev and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package stats + +import ( + "math" + "sync/atomic" + "time" + + "github.com/maypok86/otter/v2/internal/xruntime" + "github.com/maypok86/otter/v2/internal/xsync" +) + +// Counter is a goroutine-safe [Recorder] implementation for use by otter.Cache. +type Counter struct { + hits *xsync.Adder + misses *xsync.Adder + _ [xruntime.CacheLineSize - 16]byte + evictions atomic.Uint64 + evictionWeight atomic.Uint64 + _ [xruntime.CacheLineSize - 16]byte + loadSuccesses atomic.Uint64 + loadFailures atomic.Uint64 + totalLoadTime atomic.Uint64 +} + +// NewCounter constructs a [Counter] instance with all counts initialized to zero. +func NewCounter() *Counter { + return &Counter{ + hits: xsync.NewAdder(), + misses: xsync.NewAdder(), + } +} + +// Snapshot returns a snapshot of this recorder's values. Note that this may be an inconsistent view, as it +// may be interleaved with update operations. +// +// NOTE: the values of the metrics are undefined in case of overflow. If you require specific handling, we recommend +// implementing your own [Recorder]. +func (c *Counter) Snapshot() Stats { + totalLoadTime := c.totalLoadTime.Load() + if totalLoadTime > uint64(math.MaxInt64) { + totalLoadTime = uint64(math.MaxInt64) + } + return Stats{ + Hits: c.hits.Value(), + Misses: c.misses.Value(), + Evictions: c.evictions.Load(), + EvictionWeight: c.evictionWeight.Load(), + LoadSuccesses: c.loadSuccesses.Load(), + LoadFailures: c.loadFailures.Load(), + TotalLoadTime: time.Duration(totalLoadTime), + } +} + +// RecordHits records cache hits. This should be called when a cache request returns a cached value. +func (c *Counter) RecordHits(count int) { + //nolint:gosec // there is no overflow + c.hits.Add(uint64(count)) +} + +// RecordMisses records cache misses. This should be called when a cache request returns a value that was not +// found in the cache. +func (c *Counter) RecordMisses(count int) { + //nolint:gosec // there is no overflow + c.misses.Add(uint64(count)) +} + +// RecordEviction records the eviction of an entry from the cache. This should only been called when an entry is +// evicted due to the cache's eviction strategy, and not as a result of manual deletions. +func (c *Counter) RecordEviction(weight uint32) { + c.evictions.Add(1) + c.evictionWeight.Add(uint64(weight)) +} + +// RecordLoadSuccess records the successful load of a new entry. This method should be called when a cache request +// causes an entry to be loaded and the loading completes successfully (either no error or otter.ErrNotFound). +func (c *Counter) RecordLoadSuccess(loadTime time.Duration) { + c.loadSuccesses.Add(1) + //nolint:gosec // there is no overflow + c.totalLoadTime.Add(uint64(loadTime)) +} + +// RecordLoadFailure records the failed load of a new entry. This method should be called when a cache request +// causes an entry to be loaded, but the loading function returns an error that is not otter.ErrNotFound. +func (c *Counter) RecordLoadFailure(loadTime time.Duration) { + c.loadFailures.Add(1) + //nolint:gosec // there is no overflow + c.totalLoadTime.Add(uint64(loadTime)) +} diff --git a/vendor/github.com/maypok86/otter/internal/xruntime/runtime.go b/vendor/github.com/maypok86/otter/v2/stats/doc.go similarity index 73% rename from vendor/github.com/maypok86/otter/internal/xruntime/runtime.go rename to vendor/github.com/maypok86/otter/v2/stats/doc.go index 4813c8c9..6cdfb677 100644 --- a/vendor/github.com/maypok86/otter/internal/xruntime/runtime.go +++ b/vendor/github.com/maypok86/otter/v2/stats/doc.go @@ -1,6 +1,4 @@ -//go:build !go1.22 - -// Copyright (c) 2023 Alexey Mayshev. All rights reserved. +// Copyright (c) 2025 Alexey Mayshev and contributors. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,12 +12,5 @@ // See the License for the specific language governing permissions and // limitations under the License. -package xruntime - -import ( - _ "unsafe" -) - -//go:noescape -//go:linkname Fastrand runtime.fastrand -func Fastrand() uint32 +// Package stats contains caching statistic utilities. +package stats diff --git a/vendor/github.com/maypok86/otter/v2/stats/recorder.go b/vendor/github.com/maypok86/otter/v2/stats/recorder.go new file mode 100644 index 00000000..de6ee26b --- /dev/null +++ b/vendor/github.com/maypok86/otter/v2/stats/recorder.go @@ -0,0 +1,59 @@ +// Copyright (c) 2024 Alexey Mayshev and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package stats + +import "time" + +// Recorder accumulates statistics during the operation of a otter.Cache. +type Recorder interface { + // RecordHits records cache hits. This should be called when a cache request returns a cached value. + RecordHits(count int) + // RecordMisses records cache misses. This should be called when a cache request returns a value that was not + // found in the cache. + RecordMisses(count int) + // RecordEviction records the eviction of an entry from the cache. This should only been called when an entry is + // evicted due to the cache's eviction strategy, and not as a result of manual deletions. + RecordEviction(weight uint32) + // RecordLoadSuccess records the successful load of a new entry. This method should be called when a cache request + // causes an entry to be loaded and the loading completes successfully (either no error or otter.ErrNotFound). + RecordLoadSuccess(loadTime time.Duration) + // RecordLoadFailure records the failed load of a new entry. This method should be called when a cache request + // causes an entry to be loaded, but the loading function returns an error that is not otter.ErrNotFound. + RecordLoadFailure(loadTime time.Duration) +} + +// Snapshoter allows getting a stats snapshot from a recorder that implements it. +type Snapshoter interface { + // Snapshot returns a snapshot of this recorder's values. + Snapshot() Stats +} + +// SnapshotRecorder is the interface that groups the [Snapshoter] and [Recorder] interfaces. +type SnapshotRecorder interface { + Snapshoter + Recorder +} + +// NoopRecorder is a noop stats recorder. It can be useful if recording statistics is not necessary. +type NoopRecorder struct{} + +func (np *NoopRecorder) RecordHits(count int) {} +func (np *NoopRecorder) RecordMisses(count int) {} +func (np *NoopRecorder) RecordEviction(weight uint32) {} +func (np *NoopRecorder) RecordLoadFailure(loadTime time.Duration) {} +func (np *NoopRecorder) RecordLoadSuccess(loadTime time.Duration) {} +func (np *NoopRecorder) Snapshot() Stats { + return Stats{} +} diff --git a/vendor/github.com/maypok86/otter/v2/stats/stats.go b/vendor/github.com/maypok86/otter/v2/stats/stats.go new file mode 100644 index 00000000..7b6c622b --- /dev/null +++ b/vendor/github.com/maypok86/otter/v2/stats/stats.go @@ -0,0 +1,153 @@ +// Copyright (c) 2024 Alexey Mayshev and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package stats + +import ( + "math" + "time" + + "github.com/maypok86/otter/v2/internal/xmath" +) + +// Stats are statistics about the performance of an otter.Cache. +type Stats struct { + // Hits is the number of times otter.Cache lookup methods returned a cached value. + Hits uint64 + // Misses is the number of times otter.Cache lookup methods did not find a cached value. + Misses uint64 + // Evictions is the number of times an entry has been evicted. This count does not include manual + // otter.Cache deletions. + Evictions uint64 + // EvictionWeight is the sum of weights of evicted entries. This total does not include manual + // otter.Cache deletions. + EvictionWeight uint64 + // LoadSuccesses is the number of times otter.Cache lookup methods have successfully loaded a new value. + LoadSuccesses uint64 + // LoadFailures is the number of times otter.Cache lookup methods failed to load a new value, either + // because no value was found or an error was returned while loading. + LoadFailures uint64 + // TotalLoadTime returns the time the cache has spent loading new values. + TotalLoadTime time.Duration +} + +// Requests returns the number of times otter.Cache lookup methods were looking for a cached value. +// +// NOTE: the values of the metrics are undefined in case of overflow. If you require specific handling, we recommend +// implementing your own [Recorder]. +func (s Stats) Requests() uint64 { + return saturatedAdd(s.Hits, s.Misses) +} + +// HitRatio returns the ratio of cache requests which were hits. +// +// NOTE: hitRatio + missRatio =~ 1.0. +func (s Stats) HitRatio() float64 { + requests := s.Requests() + if requests == 0 { + return 1.0 + } + return float64(s.Hits) / float64(requests) +} + +// MissRatio returns the ratio of cache requests which were misses. +// +// NOTE: hitRatio + missRatio =~ 1.0. +func (s Stats) MissRatio() float64 { + requests := s.Requests() + if requests == 0 { + return 0.0 + } + return float64(s.Misses) / float64(requests) +} + +// Loads returns the total number of times that otter.Cache lookup methods attempted to load new values. +// +// NOTE: the values of the metrics are undefined in case of overflow. If you require specific handling, we recommend +// implementing your own [Recorder]. +func (s Stats) Loads() uint64 { + return saturatedAdd(s.LoadSuccesses, s.LoadFailures) +} + +// LoadFailureRatio returns the ratio of cache loading attempts which returned errors. +func (s Stats) LoadFailureRatio() float64 { + loads := s.Loads() + if loads == 0 { + return 0.0 + } + return float64(s.LoadFailures) / float64(loads) +} + +// AverageLoadPenalty returns the average time spent loading new values. +func (s Stats) AverageLoadPenalty() time.Duration { + loads := s.Loads() + if loads == 0 { + return 0 + } + if loads > uint64(math.MaxInt64) { + return s.TotalLoadTime / time.Duration(math.MaxInt64) + } + return s.TotalLoadTime / time.Duration(loads) +} + +// Minus returns a new [Stats] representing the difference between this [Stats] and other. +// Negative values, which aren't supported by [Stats] will be rounded up to zero. +func (s Stats) Minus(other Stats) Stats { + return Stats{ + Hits: subtract(s.Hits, other.Hits), + Misses: subtract(s.Misses, other.Misses), + Evictions: subtract(s.Evictions, other.Evictions), + EvictionWeight: subtract(s.EvictionWeight, other.EvictionWeight), + LoadSuccesses: subtract(s.LoadSuccesses, other.LoadSuccesses), + LoadFailures: subtract(s.LoadFailures, other.LoadFailures), + TotalLoadTime: subtract(s.TotalLoadTime, other.TotalLoadTime), + } +} + +// Plus returns a new [Stats] representing the sum of this [Stats] and other. +// +// NOTE: the values of the metrics are undefined in case of overflow (though it is +// guaranteed not to throw an exception). If you require specific handling, we recommend +// implementing your own stats' recorder. +func (s Stats) Plus(other Stats) Stats { + totalLoadTime := xmath.SaturatedAdd(int64(s.TotalLoadTime), int64(other.TotalLoadTime)) + return Stats{ + Hits: saturatedAdd(s.Hits, other.Hits), + Misses: saturatedAdd(s.Misses, other.Misses), + Evictions: saturatedAdd(s.Evictions, other.Evictions), + EvictionWeight: saturatedAdd(s.EvictionWeight, other.EvictionWeight), + LoadSuccesses: saturatedAdd(s.LoadSuccesses, other.LoadSuccesses), + LoadFailures: saturatedAdd(s.LoadFailures, other.LoadFailures), + TotalLoadTime: time.Duration(totalLoadTime), + } +} + +type counterType interface { + ~uint64 | ~int64 +} + +func subtract[T counterType](a, b T) T { + if a < b { + return 0 + } + return a - b +} + +func saturatedAdd(a, b uint64) uint64 { + s := a + b + if s < a || s < b { + return math.MaxUint64 + } + return s +} diff --git a/vendor/github.com/maypok86/otter/v2/task.go b/vendor/github.com/maypok86/otter/v2/task.go new file mode 100644 index 00000000..32fdfe42 --- /dev/null +++ b/vendor/github.com/maypok86/otter/v2/task.go @@ -0,0 +1,48 @@ +// Copyright (c) 2023 Alexey Mayshev and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otter + +import ( + "github.com/maypok86/otter/v2/internal/generated/node" +) + +// reason represents the reason for writing the item to the cache. +type reason uint8 + +const ( + unknownReason reason = iota + addReason + deleteReason + updateReason +) + +// task is a set of information to update the cache: +// node, reason for write, difference after node weight change, etc. +type task[K comparable, V any] struct { + n node.Node[K, V] + old node.Node[K, V] + writeReason reason + deletionCause DeletionCause +} + +// node returns the node contained in the task. If node was not specified, it returns nil. +func (t *task[K, V]) node() node.Node[K, V] { + return t.n +} + +// oldNode returns the old node contained in the task. If old node was not specified, it returns nil. +func (t *task[K, V]) oldNode() node.Node[K, V] { + return t.old +} diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_darwin.go b/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_darwin.go deleted file mode 100644 index 41f395e5..00000000 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_darwin.go +++ /dev/null @@ -1,117 +0,0 @@ -//go:build darwin -// +build darwin - -package cpu - -import ( - "context" - "strconv" - "strings" - - "github.com/shoenig/go-m1cpu" - "github.com/tklauser/go-sysconf" - "golang.org/x/sys/unix" -) - -// sys/resource.h -const ( - CPUser = 0 - cpNice = 1 - cpSys = 2 - cpIntr = 3 - cpIdle = 4 - cpUStates = 5 -) - -// default value. from time.h -var ClocksPerSec = float64(128) - -func init() { - clkTck, err := sysconf.Sysconf(sysconf.SC_CLK_TCK) - // ignore errors - if err == nil { - ClocksPerSec = float64(clkTck) - } -} - -func Times(percpu bool) ([]TimesStat, error) { - return TimesWithContext(context.Background(), percpu) -} - -func TimesWithContext(ctx context.Context, percpu bool) ([]TimesStat, error) { - if percpu { - return perCPUTimes() - } - - return allCPUTimes() -} - -// Returns only one CPUInfoStat on FreeBSD -func Info() ([]InfoStat, error) { - return InfoWithContext(context.Background()) -} - -func InfoWithContext(ctx context.Context) ([]InfoStat, error) { - var ret []InfoStat - - c := InfoStat{} - c.ModelName, _ = unix.Sysctl("machdep.cpu.brand_string") - family, _ := unix.SysctlUint32("machdep.cpu.family") - c.Family = strconv.FormatUint(uint64(family), 10) - model, _ := unix.SysctlUint32("machdep.cpu.model") - c.Model = strconv.FormatUint(uint64(model), 10) - stepping, _ := unix.SysctlUint32("machdep.cpu.stepping") - c.Stepping = int32(stepping) - features, err := unix.Sysctl("machdep.cpu.features") - if err == nil { - for _, v := range strings.Fields(features) { - c.Flags = append(c.Flags, strings.ToLower(v)) - } - } - leaf7Features, err := unix.Sysctl("machdep.cpu.leaf7_features") - if err == nil { - for _, v := range strings.Fields(leaf7Features) { - c.Flags = append(c.Flags, strings.ToLower(v)) - } - } - extfeatures, err := unix.Sysctl("machdep.cpu.extfeatures") - if err == nil { - for _, v := range strings.Fields(extfeatures) { - c.Flags = append(c.Flags, strings.ToLower(v)) - } - } - cores, _ := unix.SysctlUint32("machdep.cpu.core_count") - c.Cores = int32(cores) - cacheSize, _ := unix.SysctlUint32("machdep.cpu.cache.size") - c.CacheSize = int32(cacheSize) - c.VendorID, _ = unix.Sysctl("machdep.cpu.vendor") - - if m1cpu.IsAppleSilicon() { - c.Mhz = float64(m1cpu.PCoreHz() / 1_000_000) - } else { - // Use the rated frequency of the CPU. This is a static value and does not - // account for low power or Turbo Boost modes. - cpuFrequency, err := unix.SysctlUint64("hw.cpufrequency") - if err == nil { - c.Mhz = float64(cpuFrequency) / 1000000.0 - } - } - - return append(ret, c), nil -} - -func CountsWithContext(ctx context.Context, logical bool) (int, error) { - var cpuArgument string - if logical { - cpuArgument = "hw.logicalcpu" - } else { - cpuArgument = "hw.physicalcpu" - } - - count, err := unix.SysctlUint32(cpuArgument) - if err != nil { - return 0, err - } - - return int(count), nil -} diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_darwin_cgo.go b/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_darwin_cgo.go deleted file mode 100644 index 1d5f0772..00000000 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_darwin_cgo.go +++ /dev/null @@ -1,111 +0,0 @@ -//go:build darwin && cgo -// +build darwin,cgo - -package cpu - -/* -#include -#include -#include -#include -#include -#include -#include -#if TARGET_OS_MAC -#include -#endif -#include -#include -*/ -import "C" - -import ( - "bytes" - "encoding/binary" - "fmt" - "unsafe" -) - -// these CPU times for darwin is borrowed from influxdb/telegraf. - -func perCPUTimes() ([]TimesStat, error) { - var ( - count C.mach_msg_type_number_t - cpuload *C.processor_cpu_load_info_data_t - ncpu C.natural_t - ) - - status := C.host_processor_info(C.host_t(C.mach_host_self()), - C.PROCESSOR_CPU_LOAD_INFO, - &ncpu, - (*C.processor_info_array_t)(unsafe.Pointer(&cpuload)), - &count) - - if status != C.KERN_SUCCESS { - return nil, fmt.Errorf("host_processor_info error=%d", status) - } - - // jump through some cgo casting hoops and ensure we properly free - // the memory that cpuload points to - target := C.vm_map_t(C.mach_task_self_) - address := C.vm_address_t(uintptr(unsafe.Pointer(cpuload))) - defer C.vm_deallocate(target, address, C.vm_size_t(ncpu)) - - // the body of struct processor_cpu_load_info - // aka processor_cpu_load_info_data_t - var cpu_ticks [C.CPU_STATE_MAX]uint32 - - // copy the cpuload array to a []byte buffer - // where we can binary.Read the data - size := int(ncpu) * binary.Size(cpu_ticks) - buf := (*[1 << 30]byte)(unsafe.Pointer(cpuload))[:size:size] - - bbuf := bytes.NewBuffer(buf) - - var ret []TimesStat - - for i := 0; i < int(ncpu); i++ { - err := binary.Read(bbuf, binary.LittleEndian, &cpu_ticks) - if err != nil { - return nil, err - } - - c := TimesStat{ - CPU: fmt.Sprintf("cpu%d", i), - User: float64(cpu_ticks[C.CPU_STATE_USER]) / ClocksPerSec, - System: float64(cpu_ticks[C.CPU_STATE_SYSTEM]) / ClocksPerSec, - Nice: float64(cpu_ticks[C.CPU_STATE_NICE]) / ClocksPerSec, - Idle: float64(cpu_ticks[C.CPU_STATE_IDLE]) / ClocksPerSec, - } - - ret = append(ret, c) - } - - return ret, nil -} - -func allCPUTimes() ([]TimesStat, error) { - var count C.mach_msg_type_number_t - var cpuload C.host_cpu_load_info_data_t - - count = C.HOST_CPU_LOAD_INFO_COUNT - - status := C.host_statistics(C.host_t(C.mach_host_self()), - C.HOST_CPU_LOAD_INFO, - C.host_info_t(unsafe.Pointer(&cpuload)), - &count) - - if status != C.KERN_SUCCESS { - return nil, fmt.Errorf("host_statistics error=%d", status) - } - - c := TimesStat{ - CPU: "cpu-total", - User: float64(cpuload.cpu_ticks[C.CPU_STATE_USER]) / ClocksPerSec, - System: float64(cpuload.cpu_ticks[C.CPU_STATE_SYSTEM]) / ClocksPerSec, - Nice: float64(cpuload.cpu_ticks[C.CPU_STATE_NICE]) / ClocksPerSec, - Idle: float64(cpuload.cpu_ticks[C.CPU_STATE_IDLE]) / ClocksPerSec, - } - - return []TimesStat{c}, nil -} diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_darwin_nocgo.go b/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_darwin_nocgo.go deleted file mode 100644 index e067e99f..00000000 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_darwin_nocgo.go +++ /dev/null @@ -1,14 +0,0 @@ -//go:build darwin && !cgo -// +build darwin,!cgo - -package cpu - -import "github.com/shirou/gopsutil/v3/internal/common" - -func perCPUTimes() ([]TimesStat, error) { - return []TimesStat{}, common.ErrNotImplementedError -} - -func allCPUTimes() ([]TimesStat, error) { - return []TimesStat{}, common.ErrNotImplementedError -} diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_windows.go b/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_windows.go deleted file mode 100644 index e10612fd..00000000 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_windows.go +++ /dev/null @@ -1,229 +0,0 @@ -//go:build windows -// +build windows - -package cpu - -import ( - "context" - "fmt" - "unsafe" - - "github.com/shirou/gopsutil/v3/internal/common" - "github.com/yusufpapurcu/wmi" - "golang.org/x/sys/windows" -) - -var ( - procGetNativeSystemInfo = common.Modkernel32.NewProc("GetNativeSystemInfo") -) - -type win32_Processor struct { - Family uint16 - Manufacturer string - Name string - NumberOfLogicalProcessors uint32 - NumberOfCores uint32 - ProcessorID *string - Stepping *string - MaxClockSpeed uint32 -} - -// SYSTEM_PROCESSOR_PERFORMANCE_INFORMATION -// defined in windows api doc with the following -// https://docs.microsoft.com/en-us/windows/desktop/api/winternl/nf-winternl-ntquerysysteminformation#system_processor_performance_information -// additional fields documented here -// https://www.geoffchappell.com/studies/windows/km/ntoskrnl/api/ex/sysinfo/processor_performance.htm -type win32_SystemProcessorPerformanceInformation struct { - IdleTime int64 // idle time in 100ns (this is not a filetime). - KernelTime int64 // kernel time in 100ns. kernel time includes idle time. (this is not a filetime). - UserTime int64 // usertime in 100ns (this is not a filetime). - DpcTime int64 // dpc time in 100ns (this is not a filetime). - InterruptTime int64 // interrupt time in 100ns - InterruptCount uint32 -} - -const ( - ClocksPerSec = 10000000.0 - - // systemProcessorPerformanceInformationClass information class to query with NTQuerySystemInformation - // https://processhacker.sourceforge.io/doc/ntexapi_8h.html#ad5d815b48e8f4da1ef2eb7a2f18a54e0 - win32_SystemProcessorPerformanceInformationClass = 8 - - // size of systemProcessorPerformanceInfoSize in memory - win32_SystemProcessorPerformanceInfoSize = uint32(unsafe.Sizeof(win32_SystemProcessorPerformanceInformation{})) -) - -// Times returns times stat per cpu and combined for all CPUs -func Times(percpu bool) ([]TimesStat, error) { - return TimesWithContext(context.Background(), percpu) -} - -func TimesWithContext(ctx context.Context, percpu bool) ([]TimesStat, error) { - if percpu { - return perCPUTimes() - } - - var ret []TimesStat - var lpIdleTime common.FILETIME - var lpKernelTime common.FILETIME - var lpUserTime common.FILETIME - r, _, _ := common.ProcGetSystemTimes.Call( - uintptr(unsafe.Pointer(&lpIdleTime)), - uintptr(unsafe.Pointer(&lpKernelTime)), - uintptr(unsafe.Pointer(&lpUserTime))) - if r == 0 { - return ret, windows.GetLastError() - } - - LOT := float64(0.0000001) - HIT := (LOT * 4294967296.0) - idle := ((HIT * float64(lpIdleTime.DwHighDateTime)) + (LOT * float64(lpIdleTime.DwLowDateTime))) - user := ((HIT * float64(lpUserTime.DwHighDateTime)) + (LOT * float64(lpUserTime.DwLowDateTime))) - kernel := ((HIT * float64(lpKernelTime.DwHighDateTime)) + (LOT * float64(lpKernelTime.DwLowDateTime))) - system := (kernel - idle) - - ret = append(ret, TimesStat{ - CPU: "cpu-total", - Idle: float64(idle), - User: float64(user), - System: float64(system), - }) - return ret, nil -} - -func Info() ([]InfoStat, error) { - return InfoWithContext(context.Background()) -} - -func InfoWithContext(ctx context.Context) ([]InfoStat, error) { - var ret []InfoStat - var dst []win32_Processor - q := wmi.CreateQuery(&dst, "") - if err := common.WMIQueryWithContext(ctx, q, &dst); err != nil { - return ret, err - } - - var procID string - for i, l := range dst { - procID = "" - if l.ProcessorID != nil { - procID = *l.ProcessorID - } - - cpu := InfoStat{ - CPU: int32(i), - Family: fmt.Sprintf("%d", l.Family), - VendorID: l.Manufacturer, - ModelName: l.Name, - Cores: int32(l.NumberOfLogicalProcessors), - PhysicalID: procID, - Mhz: float64(l.MaxClockSpeed), - Flags: []string{}, - } - ret = append(ret, cpu) - } - - return ret, nil -} - -// perCPUTimes returns times stat per cpu, per core and overall for all CPUs -func perCPUTimes() ([]TimesStat, error) { - var ret []TimesStat - stats, err := perfInfo() - if err != nil { - return nil, err - } - for core, v := range stats { - c := TimesStat{ - CPU: fmt.Sprintf("cpu%d", core), - User: float64(v.UserTime) / ClocksPerSec, - System: float64(v.KernelTime-v.IdleTime) / ClocksPerSec, - Idle: float64(v.IdleTime) / ClocksPerSec, - Irq: float64(v.InterruptTime) / ClocksPerSec, - } - ret = append(ret, c) - } - return ret, nil -} - -// makes call to Windows API function to retrieve performance information for each core -func perfInfo() ([]win32_SystemProcessorPerformanceInformation, error) { - // Make maxResults large for safety. - // We can't invoke the api call with a results array that's too small. - // If we have more than 2056 cores on a single host, then it's probably the future. - maxBuffer := 2056 - // buffer for results from the windows proc - resultBuffer := make([]win32_SystemProcessorPerformanceInformation, maxBuffer) - // size of the buffer in memory - bufferSize := uintptr(win32_SystemProcessorPerformanceInfoSize) * uintptr(maxBuffer) - // size of the returned response - var retSize uint32 - - // Invoke windows api proc. - // The returned err from the windows dll proc will always be non-nil even when successful. - // See https://godoc.org/golang.org/x/sys/windows#LazyProc.Call for more information - retCode, _, err := common.ProcNtQuerySystemInformation.Call( - win32_SystemProcessorPerformanceInformationClass, // System Information Class -> SystemProcessorPerformanceInformation - uintptr(unsafe.Pointer(&resultBuffer[0])), // pointer to first element in result buffer - bufferSize, // size of the buffer in memory - uintptr(unsafe.Pointer(&retSize)), // pointer to the size of the returned results the windows proc will set this - ) - - // check return code for errors - if retCode != 0 { - return nil, fmt.Errorf("call to NtQuerySystemInformation returned %d. err: %s", retCode, err.Error()) - } - - // calculate the number of returned elements based on the returned size - numReturnedElements := retSize / win32_SystemProcessorPerformanceInfoSize - - // trim results to the number of returned elements - resultBuffer = resultBuffer[:numReturnedElements] - - return resultBuffer, nil -} - -// SystemInfo is an equivalent representation of SYSTEM_INFO in the Windows API. -// https://msdn.microsoft.com/en-us/library/ms724958%28VS.85%29.aspx?f=255&MSPPError=-2147217396 -// https://github.com/elastic/go-windows/blob/bb1581babc04d5cb29a2bfa7a9ac6781c730c8dd/kernel32.go#L43 -type systemInfo struct { - wProcessorArchitecture uint16 - wReserved uint16 - dwPageSize uint32 - lpMinimumApplicationAddress uintptr - lpMaximumApplicationAddress uintptr - dwActiveProcessorMask uintptr - dwNumberOfProcessors uint32 - dwProcessorType uint32 - dwAllocationGranularity uint32 - wProcessorLevel uint16 - wProcessorRevision uint16 -} - -func CountsWithContext(ctx context.Context, logical bool) (int, error) { - if logical { - // https://github.com/giampaolo/psutil/blob/d01a9eaa35a8aadf6c519839e987a49d8be2d891/psutil/_psutil_windows.c#L97 - ret := windows.GetActiveProcessorCount(windows.ALL_PROCESSOR_GROUPS) - if ret != 0 { - return int(ret), nil - } - var systemInfo systemInfo - _, _, err := procGetNativeSystemInfo.Call(uintptr(unsafe.Pointer(&systemInfo))) - if systemInfo.dwNumberOfProcessors == 0 { - return 0, err - } - return int(systemInfo.dwNumberOfProcessors), nil - } - // physical cores https://github.com/giampaolo/psutil/blob/d01a9eaa35a8aadf6c519839e987a49d8be2d891/psutil/_psutil_windows.c#L499 - // for the time being, try with unreliable and slow WMI call… - var dst []win32_Processor - q := wmi.CreateQuery(&dst, "") - if err := common.WMIQueryWithContext(ctx, q, &dst); err != nil { - return 0, err - } - var count uint32 - for _, d := range dst { - count += d.NumberOfCores - } - return int(count), nil -} diff --git a/vendor/github.com/shirou/gopsutil/v3/host/host_darwin_cgo.go b/vendor/github.com/shirou/gopsutil/v3/host/host_darwin_cgo.go deleted file mode 100644 index ffdc7b78..00000000 --- a/vendor/github.com/shirou/gopsutil/v3/host/host_darwin_cgo.go +++ /dev/null @@ -1,47 +0,0 @@ -//go:build darwin && cgo -// +build darwin,cgo - -package host - -// #cgo LDFLAGS: -framework IOKit -// #include "smc_darwin.h" -import "C" -import "context" - -func SensorsTemperaturesWithContext(ctx context.Context) ([]TemperatureStat, error) { - temperatureKeys := []string{ - C.AMBIENT_AIR_0, - C.AMBIENT_AIR_1, - C.CPU_0_DIODE, - C.CPU_0_HEATSINK, - C.CPU_0_PROXIMITY, - C.ENCLOSURE_BASE_0, - C.ENCLOSURE_BASE_1, - C.ENCLOSURE_BASE_2, - C.ENCLOSURE_BASE_3, - C.GPU_0_DIODE, - C.GPU_0_HEATSINK, - C.GPU_0_PROXIMITY, - C.HARD_DRIVE_BAY, - C.MEMORY_SLOT_0, - C.MEMORY_SLOTS_PROXIMITY, - C.NORTHBRIDGE, - C.NORTHBRIDGE_DIODE, - C.NORTHBRIDGE_PROXIMITY, - C.THUNDERBOLT_0, - C.THUNDERBOLT_1, - C.WIRELESS_MODULE, - } - var temperatures []TemperatureStat - - C.gopsutil_v3_open_smc() - defer C.gopsutil_v3_close_smc() - - for _, key := range temperatureKeys { - temperatures = append(temperatures, TemperatureStat{ - SensorKey: key, - Temperature: float64(C.gopsutil_v3_get_temperature(C.CString(key))), - }) - } - return temperatures, nil -} diff --git a/vendor/github.com/shirou/gopsutil/v3/host/host_darwin_nocgo.go b/vendor/github.com/shirou/gopsutil/v3/host/host_darwin_nocgo.go deleted file mode 100644 index 6285ba94..00000000 --- a/vendor/github.com/shirou/gopsutil/v3/host/host_darwin_nocgo.go +++ /dev/null @@ -1,14 +0,0 @@ -//go:build darwin && !cgo -// +build darwin,!cgo - -package host - -import ( - "context" - - "github.com/shirou/gopsutil/v3/internal/common" -) - -func SensorsTemperaturesWithContext(ctx context.Context) ([]TemperatureStat, error) { - return []TemperatureStat{}, common.ErrNotImplementedError -} diff --git a/vendor/github.com/shirou/gopsutil/v3/host/host_fallback.go b/vendor/github.com/shirou/gopsutil/v3/host/host_fallback.go deleted file mode 100644 index 150ccf00..00000000 --- a/vendor/github.com/shirou/gopsutil/v3/host/host_fallback.go +++ /dev/null @@ -1,50 +0,0 @@ -//go:build !darwin && !linux && !freebsd && !openbsd && !netbsd && !solaris && !windows && !aix -// +build !darwin,!linux,!freebsd,!openbsd,!netbsd,!solaris,!windows,!aix - -package host - -import ( - "context" - - "github.com/shirou/gopsutil/v3/internal/common" -) - -func HostIDWithContext(ctx context.Context) (string, error) { - return "", common.ErrNotImplementedError -} - -func numProcs(ctx context.Context) (uint64, error) { - return 0, common.ErrNotImplementedError -} - -func BootTimeWithContext(ctx context.Context) (uint64, error) { - return 0, common.ErrNotImplementedError -} - -func UptimeWithContext(ctx context.Context) (uint64, error) { - return 0, common.ErrNotImplementedError -} - -func UsersWithContext(ctx context.Context) ([]UserStat, error) { - return []UserStat{}, common.ErrNotImplementedError -} - -func VirtualizationWithContext(ctx context.Context) (string, string, error) { - return "", "", common.ErrNotImplementedError -} - -func KernelVersionWithContext(ctx context.Context) (string, error) { - return "", common.ErrNotImplementedError -} - -func PlatformInformationWithContext(ctx context.Context) (string, string, string, error) { - return "", "", "", common.ErrNotImplementedError -} - -func SensorsTemperaturesWithContext(ctx context.Context) ([]TemperatureStat, error) { - return []TemperatureStat{}, common.ErrNotImplementedError -} - -func KernelArch() (string, error) { - return "", common.ErrNotImplementedError -} diff --git a/vendor/github.com/shirou/gopsutil/v3/host/smc_darwin.c b/vendor/github.com/shirou/gopsutil/v3/host/smc_darwin.c deleted file mode 100644 index 0197d95b..00000000 --- a/vendor/github.com/shirou/gopsutil/v3/host/smc_darwin.c +++ /dev/null @@ -1,169 +0,0 @@ -#include -#include -#include "smc_darwin.h" - -#define IOSERVICE_SMC "AppleSMC" -#define IOSERVICE_MODEL "IOPlatformExpertDevice" - -#define DATA_TYPE_SP78 "sp78" - -typedef enum { - kSMCUserClientOpen = 0, - kSMCUserClientClose = 1, - kSMCHandleYPCEvent = 2, - kSMCReadKey = 5, - kSMCWriteKey = 6, - kSMCGetKeyCount = 7, - kSMCGetKeyFromIndex = 8, - kSMCGetKeyInfo = 9, -} selector_t; - -typedef struct { - unsigned char major; - unsigned char minor; - unsigned char build; - unsigned char reserved; - unsigned short release; -} SMCVersion; - -typedef struct { - uint16_t version; - uint16_t length; - uint32_t cpuPLimit; - uint32_t gpuPLimit; - uint32_t memPLimit; -} SMCPLimitData; - -typedef struct { - IOByteCount data_size; - uint32_t data_type; - uint8_t data_attributes; -} SMCKeyInfoData; - -typedef struct { - uint32_t key; - SMCVersion vers; - SMCPLimitData p_limit_data; - SMCKeyInfoData key_info; - uint8_t result; - uint8_t status; - uint8_t data8; - uint32_t data32; - uint8_t bytes[32]; -} SMCParamStruct; - -typedef enum { - kSMCSuccess = 0, - kSMCError = 1, - kSMCKeyNotFound = 0x84, -} kSMC_t; - -typedef struct { - uint8_t data[32]; - uint32_t data_type; - uint32_t data_size; - kSMC_t kSMC; -} smc_return_t; - -static const int SMC_KEY_SIZE = 4; // number of characters in an SMC key. -static io_connect_t conn; // our connection to the SMC. - -kern_return_t gopsutil_v3_open_smc(void) { - kern_return_t result; - io_service_t service; - - service = IOServiceGetMatchingService(0, IOServiceMatching(IOSERVICE_SMC)); - if (service == 0) { - // Note: IOServiceMatching documents 0 on failure - printf("ERROR: %s NOT FOUND\n", IOSERVICE_SMC); - return kIOReturnError; - } - - result = IOServiceOpen(service, mach_task_self(), 0, &conn); - IOObjectRelease(service); - - return result; -} - -kern_return_t gopsutil_v3_close_smc(void) { return IOServiceClose(conn); } - -static uint32_t to_uint32(char *key) { - uint32_t ans = 0; - uint32_t shift = 24; - - if (strlen(key) != SMC_KEY_SIZE) { - return 0; - } - - for (int i = 0; i < SMC_KEY_SIZE; i++) { - ans += key[i] << shift; - shift -= 8; - } - - return ans; -} - -static kern_return_t call_smc(SMCParamStruct *input, SMCParamStruct *output) { - kern_return_t result; - size_t input_cnt = sizeof(SMCParamStruct); - size_t output_cnt = sizeof(SMCParamStruct); - - result = IOConnectCallStructMethod(conn, kSMCHandleYPCEvent, input, input_cnt, - output, &output_cnt); - - if (result != kIOReturnSuccess) { - result = err_get_code(result); - } - return result; -} - -static kern_return_t read_smc(char *key, smc_return_t *result_smc) { - kern_return_t result; - SMCParamStruct input; - SMCParamStruct output; - - memset(&input, 0, sizeof(SMCParamStruct)); - memset(&output, 0, sizeof(SMCParamStruct)); - memset(result_smc, 0, sizeof(smc_return_t)); - - input.key = to_uint32(key); - input.data8 = kSMCGetKeyInfo; - - result = call_smc(&input, &output); - result_smc->kSMC = output.result; - - if (result != kIOReturnSuccess || output.result != kSMCSuccess) { - return result; - } - - result_smc->data_size = output.key_info.data_size; - result_smc->data_type = output.key_info.data_type; - - input.key_info.data_size = output.key_info.data_size; - input.data8 = kSMCReadKey; - - result = call_smc(&input, &output); - result_smc->kSMC = output.result; - - if (result != kIOReturnSuccess || output.result != kSMCSuccess) { - return result; - } - - memcpy(result_smc->data, output.bytes, sizeof(output.bytes)); - - return result; -} - -double gopsutil_v3_get_temperature(char *key) { - kern_return_t result; - smc_return_t result_smc; - - result = read_smc(key, &result_smc); - - if (!(result == kIOReturnSuccess) && result_smc.data_size == 2 && - result_smc.data_type == to_uint32(DATA_TYPE_SP78)) { - return 0.0; - } - - return (double)result_smc.data[0]; -} diff --git a/vendor/github.com/shirou/gopsutil/v3/host/smc_darwin.h b/vendor/github.com/shirou/gopsutil/v3/host/smc_darwin.h deleted file mode 100644 index e3013abd..00000000 --- a/vendor/github.com/shirou/gopsutil/v3/host/smc_darwin.h +++ /dev/null @@ -1,32 +0,0 @@ -#ifndef __SMC_H__ -#define __SMC_H__ 1 - -#include - -#define AMBIENT_AIR_0 "TA0P" -#define AMBIENT_AIR_1 "TA1P" -#define CPU_0_DIODE "TC0D" -#define CPU_0_HEATSINK "TC0H" -#define CPU_0_PROXIMITY "TC0P" -#define ENCLOSURE_BASE_0 "TB0T" -#define ENCLOSURE_BASE_1 "TB1T" -#define ENCLOSURE_BASE_2 "TB2T" -#define ENCLOSURE_BASE_3 "TB3T" -#define GPU_0_DIODE "TG0D" -#define GPU_0_HEATSINK "TG0H" -#define GPU_0_PROXIMITY "TG0P" -#define HARD_DRIVE_BAY "TH0P" -#define MEMORY_SLOT_0 "TM0S" -#define MEMORY_SLOTS_PROXIMITY "TM0P" -#define NORTHBRIDGE "TN0H" -#define NORTHBRIDGE_DIODE "TN0D" -#define NORTHBRIDGE_PROXIMITY "TN0P" -#define THUNDERBOLT_0 "TI0P" -#define THUNDERBOLT_1 "TI1P" -#define WIRELESS_MODULE "TW0P" - -kern_return_t gopsutil_v3_open_smc(void); -kern_return_t gopsutil_v3_close_smc(void); -double gopsutil_v3_get_temperature(char *); - -#endif // __SMC_H__ diff --git a/vendor/github.com/shirou/gopsutil/v3/internal/common/binary.go b/vendor/github.com/shirou/gopsutil/v3/internal/common/binary.go deleted file mode 100644 index 5e8d43db..00000000 --- a/vendor/github.com/shirou/gopsutil/v3/internal/common/binary.go +++ /dev/null @@ -1,637 +0,0 @@ -package common - -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package binary implements simple translation between numbers and byte -// sequences and encoding and decoding of varints. -// -// Numbers are translated by reading and writing fixed-size values. -// A fixed-size value is either a fixed-size arithmetic -// type (int8, uint8, int16, float32, complex64, ...) -// or an array or struct containing only fixed-size values. -// -// The varint functions encode and decode single integer values using -// a variable-length encoding; smaller values require fewer bytes. -// For a specification, see -// http://code.google.com/apis/protocolbuffers/docs/encoding.html. -// -// This package favors simplicity over efficiency. Clients that require -// high-performance serialization, especially for large data structures, -// should look at more advanced solutions such as the encoding/gob -// package or protocol buffers. - -import ( - "errors" - "io" - "math" - "reflect" -) - -// A ByteOrder specifies how to convert byte sequences into -// 16-, 32-, or 64-bit unsigned integers. -type ByteOrder interface { - Uint16([]byte) uint16 - Uint32([]byte) uint32 - Uint64([]byte) uint64 - PutUint16([]byte, uint16) - PutUint32([]byte, uint32) - PutUint64([]byte, uint64) - String() string -} - -// LittleEndian is the little-endian implementation of ByteOrder. -var LittleEndian littleEndian - -// BigEndian is the big-endian implementation of ByteOrder. -var BigEndian bigEndian - -type littleEndian struct{} - -func (littleEndian) Uint16(b []byte) uint16 { return uint16(b[0]) | uint16(b[1])<<8 } - -func (littleEndian) PutUint16(b []byte, v uint16) { - b[0] = byte(v) - b[1] = byte(v >> 8) -} - -func (littleEndian) Uint32(b []byte) uint32 { - return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 -} - -func (littleEndian) PutUint32(b []byte, v uint32) { - b[0] = byte(v) - b[1] = byte(v >> 8) - b[2] = byte(v >> 16) - b[3] = byte(v >> 24) -} - -func (littleEndian) Uint64(b []byte) uint64 { - return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | - uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 -} - -func (littleEndian) PutUint64(b []byte, v uint64) { - b[0] = byte(v) - b[1] = byte(v >> 8) - b[2] = byte(v >> 16) - b[3] = byte(v >> 24) - b[4] = byte(v >> 32) - b[5] = byte(v >> 40) - b[6] = byte(v >> 48) - b[7] = byte(v >> 56) -} - -func (littleEndian) String() string { return "LittleEndian" } - -func (littleEndian) GoString() string { return "binary.LittleEndian" } - -type bigEndian struct{} - -func (bigEndian) Uint16(b []byte) uint16 { return uint16(b[1]) | uint16(b[0])<<8 } - -func (bigEndian) PutUint16(b []byte, v uint16) { - b[0] = byte(v >> 8) - b[1] = byte(v) -} - -func (bigEndian) Uint32(b []byte) uint32 { - return uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24 -} - -func (bigEndian) PutUint32(b []byte, v uint32) { - b[0] = byte(v >> 24) - b[1] = byte(v >> 16) - b[2] = byte(v >> 8) - b[3] = byte(v) -} - -func (bigEndian) Uint64(b []byte) uint64 { - return uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 | - uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56 -} - -func (bigEndian) PutUint64(b []byte, v uint64) { - b[0] = byte(v >> 56) - b[1] = byte(v >> 48) - b[2] = byte(v >> 40) - b[3] = byte(v >> 32) - b[4] = byte(v >> 24) - b[5] = byte(v >> 16) - b[6] = byte(v >> 8) - b[7] = byte(v) -} - -func (bigEndian) String() string { return "BigEndian" } - -func (bigEndian) GoString() string { return "binary.BigEndian" } - -// Read reads structured binary data from r into data. -// Data must be a pointer to a fixed-size value or a slice -// of fixed-size values. -// Bytes read from r are decoded using the specified byte order -// and written to successive fields of the data. -// When reading into structs, the field data for fields with -// blank (_) field names is skipped; i.e., blank field names -// may be used for padding. -// When reading into a struct, all non-blank fields must be exported. -func Read(r io.Reader, order ByteOrder, data interface{}) error { - // Fast path for basic types and slices. - if n := intDataSize(data); n != 0 { - var b [8]byte - var bs []byte - if n > len(b) { - bs = make([]byte, n) - } else { - bs = b[:n] - } - if _, err := io.ReadFull(r, bs); err != nil { - return err - } - switch data := data.(type) { - case *int8: - *data = int8(b[0]) - case *uint8: - *data = b[0] - case *int16: - *data = int16(order.Uint16(bs)) - case *uint16: - *data = order.Uint16(bs) - case *int32: - *data = int32(order.Uint32(bs)) - case *uint32: - *data = order.Uint32(bs) - case *int64: - *data = int64(order.Uint64(bs)) - case *uint64: - *data = order.Uint64(bs) - case []int8: - for i, x := range bs { // Easier to loop over the input for 8-bit values. - data[i] = int8(x) - } - case []uint8: - copy(data, bs) - case []int16: - for i := range data { - data[i] = int16(order.Uint16(bs[2*i:])) - } - case []uint16: - for i := range data { - data[i] = order.Uint16(bs[2*i:]) - } - case []int32: - for i := range data { - data[i] = int32(order.Uint32(bs[4*i:])) - } - case []uint32: - for i := range data { - data[i] = order.Uint32(bs[4*i:]) - } - case []int64: - for i := range data { - data[i] = int64(order.Uint64(bs[8*i:])) - } - case []uint64: - for i := range data { - data[i] = order.Uint64(bs[8*i:]) - } - } - return nil - } - - // Fallback to reflect-based decoding. - v := reflect.ValueOf(data) - size := -1 - switch v.Kind() { - case reflect.Ptr: - v = v.Elem() - size = dataSize(v) - case reflect.Slice: - size = dataSize(v) - } - if size < 0 { - return errors.New("binary.Read: invalid type " + reflect.TypeOf(data).String()) - } - d := &decoder{order: order, buf: make([]byte, size)} - if _, err := io.ReadFull(r, d.buf); err != nil { - return err - } - d.value(v) - return nil -} - -// Write writes the binary representation of data into w. -// Data must be a fixed-size value or a slice of fixed-size -// values, or a pointer to such data. -// Bytes written to w are encoded using the specified byte order -// and read from successive fields of the data. -// When writing structs, zero values are written for fields -// with blank (_) field names. -func Write(w io.Writer, order ByteOrder, data interface{}) error { - // Fast path for basic types and slices. - if n := intDataSize(data); n != 0 { - var b [8]byte - var bs []byte - if n > len(b) { - bs = make([]byte, n) - } else { - bs = b[:n] - } - switch v := data.(type) { - case *int8: - bs = b[:1] - b[0] = byte(*v) - case int8: - bs = b[:1] - b[0] = byte(v) - case []int8: - for i, x := range v { - bs[i] = byte(x) - } - case *uint8: - bs = b[:1] - b[0] = *v - case uint8: - bs = b[:1] - b[0] = byte(v) - case []uint8: - bs = v - case *int16: - bs = b[:2] - order.PutUint16(bs, uint16(*v)) - case int16: - bs = b[:2] - order.PutUint16(bs, uint16(v)) - case []int16: - for i, x := range v { - order.PutUint16(bs[2*i:], uint16(x)) - } - case *uint16: - bs = b[:2] - order.PutUint16(bs, *v) - case uint16: - bs = b[:2] - order.PutUint16(bs, v) - case []uint16: - for i, x := range v { - order.PutUint16(bs[2*i:], x) - } - case *int32: - bs = b[:4] - order.PutUint32(bs, uint32(*v)) - case int32: - bs = b[:4] - order.PutUint32(bs, uint32(v)) - case []int32: - for i, x := range v { - order.PutUint32(bs[4*i:], uint32(x)) - } - case *uint32: - bs = b[:4] - order.PutUint32(bs, *v) - case uint32: - bs = b[:4] - order.PutUint32(bs, v) - case []uint32: - for i, x := range v { - order.PutUint32(bs[4*i:], x) - } - case *int64: - bs = b[:8] - order.PutUint64(bs, uint64(*v)) - case int64: - bs = b[:8] - order.PutUint64(bs, uint64(v)) - case []int64: - for i, x := range v { - order.PutUint64(bs[8*i:], uint64(x)) - } - case *uint64: - bs = b[:8] - order.PutUint64(bs, *v) - case uint64: - bs = b[:8] - order.PutUint64(bs, v) - case []uint64: - for i, x := range v { - order.PutUint64(bs[8*i:], x) - } - } - _, err := w.Write(bs) - return err - } - - // Fallback to reflect-based encoding. - v := reflect.Indirect(reflect.ValueOf(data)) - size := dataSize(v) - if size < 0 { - return errors.New("binary.Write: invalid type " + reflect.TypeOf(data).String()) - } - buf := make([]byte, size) - e := &encoder{order: order, buf: buf} - e.value(v) - _, err := w.Write(buf) - return err -} - -// Size returns how many bytes Write would generate to encode the value v, which -// must be a fixed-size value or a slice of fixed-size values, or a pointer to such data. -// If v is neither of these, Size returns -1. -func Size(v interface{}) int { - return dataSize(reflect.Indirect(reflect.ValueOf(v))) -} - -// dataSize returns the number of bytes the actual data represented by v occupies in memory. -// For compound structures, it sums the sizes of the elements. Thus, for instance, for a slice -// it returns the length of the slice times the element size and does not count the memory -// occupied by the header. If the type of v is not acceptable, dataSize returns -1. -func dataSize(v reflect.Value) int { - if v.Kind() == reflect.Slice { - if s := sizeof(v.Type().Elem()); s >= 0 { - return s * v.Len() - } - return -1 - } - return sizeof(v.Type()) -} - -// sizeof returns the size >= 0 of variables for the given type or -1 if the type is not acceptable. -func sizeof(t reflect.Type) int { - switch t.Kind() { - case reflect.Array: - if s := sizeof(t.Elem()); s >= 0 { - return s * t.Len() - } - - case reflect.Struct: - sum := 0 - for i, n := 0, t.NumField(); i < n; i++ { - s := sizeof(t.Field(i).Type) - if s < 0 { - return -1 - } - sum += s - } - return sum - - case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, - reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, - reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128, reflect.Ptr: - return int(t.Size()) - } - - return -1 -} - -type coder struct { - order ByteOrder - buf []byte -} - -type ( - decoder coder - encoder coder -) - -func (d *decoder) uint8() uint8 { - x := d.buf[0] - d.buf = d.buf[1:] - return x -} - -func (e *encoder) uint8(x uint8) { - e.buf[0] = x - e.buf = e.buf[1:] -} - -func (d *decoder) uint16() uint16 { - x := d.order.Uint16(d.buf[0:2]) - d.buf = d.buf[2:] - return x -} - -func (e *encoder) uint16(x uint16) { - e.order.PutUint16(e.buf[0:2], x) - e.buf = e.buf[2:] -} - -func (d *decoder) uint32() uint32 { - x := d.order.Uint32(d.buf[0:4]) - d.buf = d.buf[4:] - return x -} - -func (e *encoder) uint32(x uint32) { - e.order.PutUint32(e.buf[0:4], x) - e.buf = e.buf[4:] -} - -func (d *decoder) uint64() uint64 { - x := d.order.Uint64(d.buf[0:8]) - d.buf = d.buf[8:] - return x -} - -func (e *encoder) uint64(x uint64) { - e.order.PutUint64(e.buf[0:8], x) - e.buf = e.buf[8:] -} - -func (d *decoder) int8() int8 { return int8(d.uint8()) } - -func (e *encoder) int8(x int8) { e.uint8(uint8(x)) } - -func (d *decoder) int16() int16 { return int16(d.uint16()) } - -func (e *encoder) int16(x int16) { e.uint16(uint16(x)) } - -func (d *decoder) int32() int32 { return int32(d.uint32()) } - -func (e *encoder) int32(x int32) { e.uint32(uint32(x)) } - -func (d *decoder) int64() int64 { return int64(d.uint64()) } - -func (e *encoder) int64(x int64) { e.uint64(uint64(x)) } - -func (d *decoder) value(v reflect.Value) { - switch v.Kind() { - case reflect.Array: - l := v.Len() - for i := 0; i < l; i++ { - d.value(v.Index(i)) - } - - case reflect.Struct: - t := v.Type() - l := v.NumField() - for i := 0; i < l; i++ { - // Note: Calling v.CanSet() below is an optimization. - // It would be sufficient to check the field name, - // but creating the StructField info for each field is - // costly (run "go test -bench=ReadStruct" and compare - // results when making changes to this code). - if v := v.Field(i); v.CanSet() || t.Field(i).Name != "_" { - d.value(v) - } else { - d.skip(v) - } - } - - case reflect.Slice: - l := v.Len() - for i := 0; i < l; i++ { - d.value(v.Index(i)) - } - - case reflect.Int8: - v.SetInt(int64(d.int8())) - case reflect.Int16: - v.SetInt(int64(d.int16())) - case reflect.Int32: - v.SetInt(int64(d.int32())) - case reflect.Int64: - v.SetInt(d.int64()) - - case reflect.Uint8: - v.SetUint(uint64(d.uint8())) - case reflect.Uint16: - v.SetUint(uint64(d.uint16())) - case reflect.Uint32: - v.SetUint(uint64(d.uint32())) - case reflect.Uint64: - v.SetUint(d.uint64()) - - case reflect.Float32: - v.SetFloat(float64(math.Float32frombits(d.uint32()))) - case reflect.Float64: - v.SetFloat(math.Float64frombits(d.uint64())) - - case reflect.Complex64: - v.SetComplex(complex( - float64(math.Float32frombits(d.uint32())), - float64(math.Float32frombits(d.uint32())), - )) - case reflect.Complex128: - v.SetComplex(complex( - math.Float64frombits(d.uint64()), - math.Float64frombits(d.uint64()), - )) - } -} - -func (e *encoder) value(v reflect.Value) { - switch v.Kind() { - case reflect.Array: - l := v.Len() - for i := 0; i < l; i++ { - e.value(v.Index(i)) - } - - case reflect.Struct: - t := v.Type() - l := v.NumField() - for i := 0; i < l; i++ { - // see comment for corresponding code in decoder.value() - if v := v.Field(i); v.CanSet() || t.Field(i).Name != "_" { - e.value(v) - } else { - e.skip(v) - } - } - - case reflect.Slice: - l := v.Len() - for i := 0; i < l; i++ { - e.value(v.Index(i)) - } - - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - switch v.Type().Kind() { - case reflect.Int8: - e.int8(int8(v.Int())) - case reflect.Int16: - e.int16(int16(v.Int())) - case reflect.Int32: - e.int32(int32(v.Int())) - case reflect.Int64: - e.int64(v.Int()) - } - - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - switch v.Type().Kind() { - case reflect.Uint8: - e.uint8(uint8(v.Uint())) - case reflect.Uint16: - e.uint16(uint16(v.Uint())) - case reflect.Uint32: - e.uint32(uint32(v.Uint())) - case reflect.Uint64: - e.uint64(v.Uint()) - } - - case reflect.Float32, reflect.Float64: - switch v.Type().Kind() { - case reflect.Float32: - e.uint32(math.Float32bits(float32(v.Float()))) - case reflect.Float64: - e.uint64(math.Float64bits(v.Float())) - } - - case reflect.Complex64, reflect.Complex128: - switch v.Type().Kind() { - case reflect.Complex64: - x := v.Complex() - e.uint32(math.Float32bits(float32(real(x)))) - e.uint32(math.Float32bits(float32(imag(x)))) - case reflect.Complex128: - x := v.Complex() - e.uint64(math.Float64bits(real(x))) - e.uint64(math.Float64bits(imag(x))) - } - } -} - -func (d *decoder) skip(v reflect.Value) { - d.buf = d.buf[dataSize(v):] -} - -func (e *encoder) skip(v reflect.Value) { - n := dataSize(v) - for i := range e.buf[0:n] { - e.buf[i] = 0 - } - e.buf = e.buf[n:] -} - -// intDataSize returns the size of the data required to represent the data when encoded. -// It returns zero if the type cannot be implemented by the fast path in Read or Write. -func intDataSize(data interface{}) int { - switch data := data.(type) { - case int8, *int8, *uint8: - return 1 - case []int8: - return len(data) - case []uint8: - return len(data) - case int16, *int16, *uint16: - return 2 - case []int16: - return 2 * len(data) - case []uint16: - return 2 * len(data) - case int32, *int32, *uint32: - return 4 - case []int32: - return 4 * len(data) - case []uint32: - return 4 * len(data) - case int64, *int64, *uint64: - return 8 - case []int64: - return 8 * len(data) - case []uint64: - return 8 * len(data) - } - return 0 -} diff --git a/vendor/github.com/shirou/gopsutil/v3/internal/common/common_darwin.go b/vendor/github.com/shirou/gopsutil/v3/internal/common/common_darwin.go deleted file mode 100644 index f1a78459..00000000 --- a/vendor/github.com/shirou/gopsutil/v3/internal/common/common_darwin.go +++ /dev/null @@ -1,66 +0,0 @@ -//go:build darwin -// +build darwin - -package common - -import ( - "context" - "os" - "os/exec" - "strings" - "unsafe" - - "golang.org/x/sys/unix" -) - -func DoSysctrlWithContext(ctx context.Context, mib string) ([]string, error) { - cmd := exec.CommandContext(ctx, "sysctl", "-n", mib) - cmd.Env = getSysctrlEnv(os.Environ()) - out, err := cmd.Output() - if err != nil { - return []string{}, err - } - v := strings.Replace(string(out), "{ ", "", 1) - v = strings.Replace(string(v), " }", "", 1) - values := strings.Fields(string(v)) - - return values, nil -} - -func CallSyscall(mib []int32) ([]byte, uint64, error) { - miblen := uint64(len(mib)) - - // get required buffer size - length := uint64(0) - _, _, err := unix.Syscall6( - 202, // unix.SYS___SYSCTL https://github.com/golang/sys/blob/76b94024e4b621e672466e8db3d7f084e7ddcad2/unix/zsysnum_darwin_amd64.go#L146 - uintptr(unsafe.Pointer(&mib[0])), - uintptr(miblen), - 0, - uintptr(unsafe.Pointer(&length)), - 0, - 0) - if err != 0 { - var b []byte - return b, length, err - } - if length == 0 { - var b []byte - return b, length, err - } - // get proc info itself - buf := make([]byte, length) - _, _, err = unix.Syscall6( - 202, // unix.SYS___SYSCTL https://github.com/golang/sys/blob/76b94024e4b621e672466e8db3d7f084e7ddcad2/unix/zsysnum_darwin_amd64.go#L146 - uintptr(unsafe.Pointer(&mib[0])), - uintptr(miblen), - uintptr(unsafe.Pointer(&buf[0])), - uintptr(unsafe.Pointer(&length)), - 0, - 0) - if err != 0 { - return buf, length, err - } - - return buf, length, nil -} diff --git a/vendor/github.com/shirou/gopsutil/v3/internal/common/warnings.go b/vendor/github.com/shirou/gopsutil/v3/internal/common/warnings.go deleted file mode 100644 index a4aaadaf..00000000 --- a/vendor/github.com/shirou/gopsutil/v3/internal/common/warnings.go +++ /dev/null @@ -1,30 +0,0 @@ -package common - -import "fmt" - -type Warnings struct { - List []error - Verbose bool -} - -func (w *Warnings) Add(err error) { - w.List = append(w.List, err) -} - -func (w *Warnings) Reference() error { - if len(w.List) > 0 { - return w - } - return nil -} - -func (w *Warnings) Error() string { - if w.Verbose { - str := "" - for i, e := range w.List { - str += fmt.Sprintf("\tError %d: %s\n", i, e.Error()) - } - return str - } - return fmt.Sprintf("Number of warnings: %v", len(w.List)) -} diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_darwin.go b/vendor/github.com/shirou/gopsutil/v3/mem/mem_darwin.go deleted file mode 100644 index a05a0fab..00000000 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_darwin.go +++ /dev/null @@ -1,72 +0,0 @@ -//go:build darwin -// +build darwin - -package mem - -import ( - "context" - "fmt" - "unsafe" - - "golang.org/x/sys/unix" - - "github.com/shirou/gopsutil/v3/internal/common" -) - -func getHwMemsize() (uint64, error) { - total, err := unix.SysctlUint64("hw.memsize") - if err != nil { - return 0, err - } - return total, nil -} - -// xsw_usage in sys/sysctl.h -type swapUsage struct { - Total uint64 - Avail uint64 - Used uint64 - Pagesize int32 - Encrypted bool -} - -// SwapMemory returns swapinfo. -func SwapMemory() (*SwapMemoryStat, error) { - return SwapMemoryWithContext(context.Background()) -} - -func SwapMemoryWithContext(ctx context.Context) (*SwapMemoryStat, error) { - // https://github.com/yanllearnn/go-osstat/blob/ae8a279d26f52ec946a03698c7f50a26cfb427e3/memory/memory_darwin.go - var ret *SwapMemoryStat - - value, err := unix.SysctlRaw("vm.swapusage") - if err != nil { - return ret, err - } - if len(value) != 32 { - return ret, fmt.Errorf("unexpected output of sysctl vm.swapusage: %v (len: %d)", value, len(value)) - } - swap := (*swapUsage)(unsafe.Pointer(&value[0])) - - u := float64(0) - if swap.Total != 0 { - u = ((float64(swap.Total) - float64(swap.Avail)) / float64(swap.Total)) * 100.0 - } - - ret = &SwapMemoryStat{ - Total: swap.Total, - Used: swap.Used, - Free: swap.Avail, - UsedPercent: u, - } - - return ret, nil -} - -func SwapDevices() ([]*SwapDevice, error) { - return SwapDevicesWithContext(context.Background()) -} - -func SwapDevicesWithContext(ctx context.Context) ([]*SwapDevice, error) { - return nil, common.ErrNotImplementedError -} diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_darwin_cgo.go b/vendor/github.com/shirou/gopsutil/v3/mem/mem_darwin_cgo.go deleted file mode 100644 index e5da7dcd..00000000 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_darwin_cgo.go +++ /dev/null @@ -1,58 +0,0 @@ -//go:build darwin && cgo -// +build darwin,cgo - -package mem - -/* -#include -#include -*/ -import "C" - -import ( - "context" - "fmt" - "unsafe" -) - -// VirtualMemory returns VirtualmemoryStat. -func VirtualMemory() (*VirtualMemoryStat, error) { - return VirtualMemoryWithContext(context.Background()) -} - -func VirtualMemoryWithContext(ctx context.Context) (*VirtualMemoryStat, error) { - count := C.mach_msg_type_number_t(C.HOST_VM_INFO_COUNT) - var vmstat C.vm_statistics_data_t - - status := C.host_statistics(C.host_t(C.mach_host_self()), - C.HOST_VM_INFO, - C.host_info_t(unsafe.Pointer(&vmstat)), - &count) - - if status != C.KERN_SUCCESS { - return nil, fmt.Errorf("host_statistics error=%d", status) - } - - pageSize := uint64(C.vm_kernel_page_size) - total, err := getHwMemsize() - if err != nil { - return nil, err - } - totalCount := C.natural_t(total / pageSize) - - availableCount := vmstat.inactive_count + vmstat.free_count - usedPercent := 100 * float64(totalCount-availableCount) / float64(totalCount) - - usedCount := totalCount - availableCount - - return &VirtualMemoryStat{ - Total: total, - Available: pageSize * uint64(availableCount), - Used: pageSize * uint64(usedCount), - UsedPercent: usedPercent, - Free: pageSize * uint64(vmstat.free_count), - Active: pageSize * uint64(vmstat.active_count), - Inactive: pageSize * uint64(vmstat.inactive_count), - Wired: pageSize * uint64(vmstat.wire_count), - }, nil -} diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_darwin_nocgo.go b/vendor/github.com/shirou/gopsutil/v3/mem/mem_darwin_nocgo.go deleted file mode 100644 index c9393168..00000000 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_darwin_nocgo.go +++ /dev/null @@ -1,89 +0,0 @@ -//go:build darwin && !cgo -// +build darwin,!cgo - -package mem - -import ( - "context" - "strconv" - "strings" - - "golang.org/x/sys/unix" -) - -// Runs vm_stat and returns Free and inactive pages -func getVMStat(vms *VirtualMemoryStat) error { - out, err := invoke.Command("vm_stat") - if err != nil { - return err - } - return parseVMStat(string(out), vms) -} - -func parseVMStat(out string, vms *VirtualMemoryStat) error { - var err error - - lines := strings.Split(out, "\n") - pagesize := uint64(unix.Getpagesize()) - for _, line := range lines { - fields := strings.Split(line, ":") - if len(fields) < 2 { - continue - } - key := strings.TrimSpace(fields[0]) - value := strings.Trim(fields[1], " .") - switch key { - case "Pages free": - free, e := strconv.ParseUint(value, 10, 64) - if e != nil { - err = e - } - vms.Free = free * pagesize - case "Pages inactive": - inactive, e := strconv.ParseUint(value, 10, 64) - if e != nil { - err = e - } - vms.Inactive = inactive * pagesize - case "Pages active": - active, e := strconv.ParseUint(value, 10, 64) - if e != nil { - err = e - } - vms.Active = active * pagesize - case "Pages wired down": - wired, e := strconv.ParseUint(value, 10, 64) - if e != nil { - err = e - } - vms.Wired = wired * pagesize - } - } - return err -} - -// VirtualMemory returns VirtualmemoryStat. -func VirtualMemory() (*VirtualMemoryStat, error) { - return VirtualMemoryWithContext(context.Background()) -} - -func VirtualMemoryWithContext(ctx context.Context) (*VirtualMemoryStat, error) { - ret := &VirtualMemoryStat{} - - total, err := getHwMemsize() - if err != nil { - return nil, err - } - err = getVMStat(ret) - if err != nil { - return nil, err - } - - ret.Available = ret.Free + ret.Inactive - ret.Total = total - - ret.Used = ret.Total - ret.Available - ret.UsedPercent = 100 * float64(ret.Used) / float64(ret.Total) - - return ret, nil -} diff --git a/vendor/github.com/shirou/gopsutil/v3/net/net_fallback.go b/vendor/github.com/shirou/gopsutil/v3/net/net_fallback.go deleted file mode 100644 index e136be1b..00000000 --- a/vendor/github.com/shirou/gopsutil/v3/net/net_fallback.go +++ /dev/null @@ -1,93 +0,0 @@ -//go:build !aix && !darwin && !linux && !freebsd && !openbsd && !windows && !solaris -// +build !aix,!darwin,!linux,!freebsd,!openbsd,!windows,!solaris - -package net - -import ( - "context" - - "github.com/shirou/gopsutil/v3/internal/common" -) - -func IOCounters(pernic bool) ([]IOCountersStat, error) { - return IOCountersWithContext(context.Background(), pernic) -} - -func IOCountersWithContext(ctx context.Context, pernic bool) ([]IOCountersStat, error) { - return []IOCountersStat{}, common.ErrNotImplementedError -} - -func FilterCounters() ([]FilterStat, error) { - return FilterCountersWithContext(context.Background()) -} - -func FilterCountersWithContext(ctx context.Context) ([]FilterStat, error) { - return []FilterStat{}, common.ErrNotImplementedError -} - -func ConntrackStats(percpu bool) ([]ConntrackStat, error) { - return ConntrackStatsWithContext(context.Background(), percpu) -} - -func ConntrackStatsWithContext(ctx context.Context, percpu bool) ([]ConntrackStat, error) { - return nil, common.ErrNotImplementedError -} - -func ProtoCounters(protocols []string) ([]ProtoCountersStat, error) { - return ProtoCountersWithContext(context.Background(), protocols) -} - -func ProtoCountersWithContext(ctx context.Context, protocols []string) ([]ProtoCountersStat, error) { - return []ProtoCountersStat{}, common.ErrNotImplementedError -} - -func Connections(kind string) ([]ConnectionStat, error) { - return ConnectionsWithContext(context.Background(), kind) -} - -func ConnectionsWithContext(ctx context.Context, kind string) ([]ConnectionStat, error) { - return []ConnectionStat{}, common.ErrNotImplementedError -} - -func ConnectionsMax(kind string, max int) ([]ConnectionStat, error) { - return ConnectionsMaxWithContext(context.Background(), kind, max) -} - -func ConnectionsMaxWithContext(ctx context.Context, kind string, max int) ([]ConnectionStat, error) { - return []ConnectionStat{}, common.ErrNotImplementedError -} - -// Return a list of network connections opened, omitting `Uids`. -// WithoutUids functions are reliant on implementation details. They may be altered to be an alias for Connections or be -// removed from the API in the future. -func ConnectionsWithoutUids(kind string) ([]ConnectionStat, error) { - return ConnectionsWithoutUidsWithContext(context.Background(), kind) -} - -func ConnectionsWithoutUidsWithContext(ctx context.Context, kind string) ([]ConnectionStat, error) { - return ConnectionsMaxWithoutUidsWithContext(ctx, kind, 0) -} - -func ConnectionsMaxWithoutUidsWithContext(ctx context.Context, kind string, max int) ([]ConnectionStat, error) { - return ConnectionsPidMaxWithoutUidsWithContext(ctx, kind, 0, max) -} - -func ConnectionsPidWithoutUids(kind string, pid int32) ([]ConnectionStat, error) { - return ConnectionsPidWithoutUidsWithContext(context.Background(), kind, pid) -} - -func ConnectionsPidWithoutUidsWithContext(ctx context.Context, kind string, pid int32) ([]ConnectionStat, error) { - return ConnectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, 0) -} - -func ConnectionsPidMaxWithoutUids(kind string, pid int32, max int) ([]ConnectionStat, error) { - return ConnectionsPidMaxWithoutUidsWithContext(context.Background(), kind, pid, max) -} - -func ConnectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, max int) ([]ConnectionStat, error) { - return connectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, max) -} - -func connectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, max int) ([]ConnectionStat, error) { - return []ConnectionStat{}, common.ErrNotImplementedError -} diff --git a/vendor/github.com/shirou/gopsutil/v3/net/net_linux_111.go b/vendor/github.com/shirou/gopsutil/v3/net/net_linux_111.go deleted file mode 100644 index bd5c9587..00000000 --- a/vendor/github.com/shirou/gopsutil/v3/net/net_linux_111.go +++ /dev/null @@ -1,12 +0,0 @@ -//go:build !go1.16 -// +build !go1.16 - -package net - -import ( - "os" -) - -func readDir(f *os.File, max int) ([]os.FileInfo, error) { - return f.Readdir(max) -} diff --git a/vendor/github.com/shirou/gopsutil/v3/net/net_linux_116.go b/vendor/github.com/shirou/gopsutil/v3/net/net_linux_116.go deleted file mode 100644 index a45072e9..00000000 --- a/vendor/github.com/shirou/gopsutil/v3/net/net_linux_116.go +++ /dev/null @@ -1,12 +0,0 @@ -//go:build go1.16 -// +build go1.16 - -package net - -import ( - "os" -) - -func readDir(f *os.File, max int) ([]os.DirEntry, error) { - return f.ReadDir(max) -} diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_bsd.go b/vendor/github.com/shirou/gopsutil/v3/process/process_bsd.go deleted file mode 100644 index 263829ff..00000000 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_bsd.go +++ /dev/null @@ -1,76 +0,0 @@ -//go:build darwin || freebsd || openbsd -// +build darwin freebsd openbsd - -package process - -import ( - "bytes" - "context" - "encoding/binary" - - "github.com/shirou/gopsutil/v3/cpu" - "github.com/shirou/gopsutil/v3/internal/common" -) - -type MemoryInfoExStat struct{} - -type MemoryMapsStat struct{} - -func (p *Process) TgidWithContext(ctx context.Context) (int32, error) { - return 0, common.ErrNotImplementedError -} - -func (p *Process) IOniceWithContext(ctx context.Context) (int32, error) { - return 0, common.ErrNotImplementedError -} - -func (p *Process) RlimitWithContext(ctx context.Context) ([]RlimitStat, error) { - return nil, common.ErrNotImplementedError -} - -func (p *Process) RlimitUsageWithContext(ctx context.Context, gatherUsed bool) ([]RlimitStat, error) { - return nil, common.ErrNotImplementedError -} - -func (p *Process) NumCtxSwitchesWithContext(ctx context.Context) (*NumCtxSwitchesStat, error) { - return nil, common.ErrNotImplementedError -} - -func (p *Process) NumFDsWithContext(ctx context.Context) (int32, error) { - return 0, common.ErrNotImplementedError -} - -func (p *Process) CPUAffinityWithContext(ctx context.Context) ([]int32, error) { - return nil, common.ErrNotImplementedError -} - -func (p *Process) MemoryInfoExWithContext(ctx context.Context) (*MemoryInfoExStat, error) { - return nil, common.ErrNotImplementedError -} - -func (p *Process) PageFaultsWithContext(ctx context.Context) (*PageFaultsStat, error) { - return nil, common.ErrNotImplementedError -} - -func (p *Process) OpenFilesWithContext(ctx context.Context) ([]OpenFilesStat, error) { - return nil, common.ErrNotImplementedError -} - -func (p *Process) MemoryMapsWithContext(ctx context.Context, grouped bool) (*[]MemoryMapsStat, error) { - return nil, common.ErrNotImplementedError -} - -func (p *Process) ThreadsWithContext(ctx context.Context) (map[int32]*cpu.TimesStat, error) { - return nil, common.ErrNotImplementedError -} - -func (p *Process) EnvironWithContext(ctx context.Context) ([]string, error) { - return nil, common.ErrNotImplementedError -} - -func parseKinfoProc(buf []byte) (KinfoProc, error) { - var k KinfoProc - br := bytes.NewReader(buf) - err := common.Read(br, binary.LittleEndian, &k) - return k, err -} diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_darwin.go b/vendor/github.com/shirou/gopsutil/v3/process/process_darwin.go deleted file mode 100644 index 176661cb..00000000 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_darwin.go +++ /dev/null @@ -1,325 +0,0 @@ -//go:build darwin -// +build darwin - -package process - -import ( - "context" - "fmt" - "path/filepath" - "strconv" - "strings" - - "github.com/tklauser/go-sysconf" - "golang.org/x/sys/unix" - - "github.com/shirou/gopsutil/v3/internal/common" - "github.com/shirou/gopsutil/v3/net" -) - -// copied from sys/sysctl.h -const ( - CTLKern = 1 // "high kernel": proc, limits - KernProc = 14 // struct: process entries - KernProcPID = 1 // by process id - KernProcProc = 8 // only return procs - KernProcAll = 0 // everything - KernProcPathname = 12 // path to executable -) - -var clockTicks = 100 // default value - -func init() { - clkTck, err := sysconf.Sysconf(sysconf.SC_CLK_TCK) - // ignore errors - if err == nil { - clockTicks = int(clkTck) - } -} - -type _Ctype_struct___0 struct { - Pad uint64 -} - -func pidsWithContext(ctx context.Context) ([]int32, error) { - var ret []int32 - - kprocs, err := unix.SysctlKinfoProcSlice("kern.proc.all") - if err != nil { - return ret, err - } - - for _, proc := range kprocs { - ret = append(ret, int32(proc.Proc.P_pid)) - } - - return ret, nil -} - -func (p *Process) PpidWithContext(ctx context.Context) (int32, error) { - k, err := p.getKProc() - if err != nil { - return 0, err - } - - return k.Eproc.Ppid, nil -} - -func (p *Process) NameWithContext(ctx context.Context) (string, error) { - k, err := p.getKProc() - if err != nil { - return "", err - } - - name := common.ByteToString(k.Proc.P_comm[:]) - - if len(name) >= 15 { - cmdName, err := p.cmdNameWithContext(ctx) - if err != nil { - return "", err - } - if len(cmdName) > 0 { - extendedName := filepath.Base(cmdName) - if strings.HasPrefix(extendedName, p.name) { - name = extendedName - } - } - } - - return name, nil -} - -func (p *Process) createTimeWithContext(ctx context.Context) (int64, error) { - k, err := p.getKProc() - if err != nil { - return 0, err - } - - return k.Proc.P_starttime.Sec*1000 + int64(k.Proc.P_starttime.Usec)/1000, nil -} - -func (p *Process) StatusWithContext(ctx context.Context) ([]string, error) { - r, err := callPsWithContext(ctx, "state", p.Pid, false, false) - if err != nil { - return []string{""}, err - } - status := convertStatusChar(r[0][0][0:1]) - return []string{status}, err -} - -func (p *Process) ForegroundWithContext(ctx context.Context) (bool, error) { - // see https://github.com/shirou/gopsutil/issues/596#issuecomment-432707831 for implementation details - pid := p.Pid - out, err := invoke.CommandWithContext(ctx, "ps", "-o", "stat=", "-p", strconv.Itoa(int(pid))) - if err != nil { - return false, err - } - return strings.IndexByte(string(out), '+') != -1, nil -} - -func (p *Process) UidsWithContext(ctx context.Context) ([]int32, error) { - k, err := p.getKProc() - if err != nil { - return nil, err - } - - // See: http://unix.superglobalmegacorp.com/Net2/newsrc/sys/ucred.h.html - userEffectiveUID := int32(k.Eproc.Ucred.Uid) - - return []int32{userEffectiveUID}, nil -} - -func (p *Process) GidsWithContext(ctx context.Context) ([]int32, error) { - k, err := p.getKProc() - if err != nil { - return nil, err - } - - gids := make([]int32, 0, 3) - gids = append(gids, int32(k.Eproc.Pcred.P_rgid), int32(k.Eproc.Pcred.P_rgid), int32(k.Eproc.Pcred.P_svgid)) - - return gids, nil -} - -func (p *Process) GroupsWithContext(ctx context.Context) ([]int32, error) { - return nil, common.ErrNotImplementedError - // k, err := p.getKProc() - // if err != nil { - // return nil, err - // } - - // groups := make([]int32, k.Eproc.Ucred.Ngroups) - // for i := int16(0); i < k.Eproc.Ucred.Ngroups; i++ { - // groups[i] = int32(k.Eproc.Ucred.Groups[i]) - // } - - // return groups, nil -} - -func (p *Process) TerminalWithContext(ctx context.Context) (string, error) { - return "", common.ErrNotImplementedError - /* - k, err := p.getKProc() - if err != nil { - return "", err - } - - ttyNr := uint64(k.Eproc.Tdev) - termmap, err := getTerminalMap() - if err != nil { - return "", err - } - - return termmap[ttyNr], nil - */ -} - -func (p *Process) NiceWithContext(ctx context.Context) (int32, error) { - k, err := p.getKProc() - if err != nil { - return 0, err - } - return int32(k.Proc.P_nice), nil -} - -func (p *Process) IOCountersWithContext(ctx context.Context) (*IOCountersStat, error) { - return nil, common.ErrNotImplementedError -} - -func convertCPUTimes(s string) (ret float64, err error) { - var t int - var _tmp string - if strings.Contains(s, ":") { - _t := strings.Split(s, ":") - switch len(_t) { - case 3: - hour, err := strconv.Atoi(_t[0]) - if err != nil { - return ret, err - } - t += hour * 60 * 60 * clockTicks - - mins, err := strconv.Atoi(_t[1]) - if err != nil { - return ret, err - } - t += mins * 60 * clockTicks - _tmp = _t[2] - case 2: - mins, err := strconv.Atoi(_t[0]) - if err != nil { - return ret, err - } - t += mins * 60 * clockTicks - _tmp = _t[1] - case 1, 0: - _tmp = s - default: - return ret, fmt.Errorf("wrong cpu time string") - } - } else { - _tmp = s - } - - _t := strings.Split(_tmp, ".") - if err != nil { - return ret, err - } - h, err := strconv.Atoi(_t[0]) - t += h * clockTicks - h, err = strconv.Atoi(_t[1]) - t += h - return float64(t) / float64(clockTicks), nil -} - -func (p *Process) ChildrenWithContext(ctx context.Context) ([]*Process, error) { - pids, err := common.CallPgrepWithContext(ctx, invoke, p.Pid) - if err != nil { - return nil, err - } - ret := make([]*Process, 0, len(pids)) - for _, pid := range pids { - np, err := NewProcessWithContext(ctx, pid) - if err != nil { - return nil, err - } - ret = append(ret, np) - } - return ret, nil -} - -func (p *Process) ConnectionsWithContext(ctx context.Context) ([]net.ConnectionStat, error) { - return net.ConnectionsPidWithContext(ctx, "all", p.Pid) -} - -func (p *Process) ConnectionsMaxWithContext(ctx context.Context, max int) ([]net.ConnectionStat, error) { - return net.ConnectionsPidMaxWithContext(ctx, "all", p.Pid, max) -} - -func ProcessesWithContext(ctx context.Context) ([]*Process, error) { - out := []*Process{} - - pids, err := PidsWithContext(ctx) - if err != nil { - return out, err - } - - for _, pid := range pids { - p, err := NewProcessWithContext(ctx, pid) - if err != nil { - continue - } - out = append(out, p) - } - - return out, nil -} - -// Returns a proc as defined here: -// http://unix.superglobalmegacorp.com/Net2/newsrc/sys/kinfo_proc.h.html -func (p *Process) getKProc() (*unix.KinfoProc, error) { - return unix.SysctlKinfoProc("kern.proc.pid", int(p.Pid)) -} - -// call ps command. -// Return value deletes Header line(you must not input wrong arg). -// And splited by Space. Caller have responsibility to manage. -// If passed arg pid is 0, get information from all process. -func callPsWithContext(ctx context.Context, arg string, pid int32, threadOption bool, nameOption bool) ([][]string, error) { - var cmd []string - if pid == 0 { // will get from all processes. - cmd = []string{"-ax", "-o", arg} - } else if threadOption { - cmd = []string{"-x", "-o", arg, "-M", "-p", strconv.Itoa(int(pid))} - } else { - cmd = []string{"-x", "-o", arg, "-p", strconv.Itoa(int(pid))} - } - if nameOption { - cmd = append(cmd, "-c") - } - out, err := invoke.CommandWithContext(ctx, "ps", cmd...) - if err != nil { - return [][]string{}, err - } - lines := strings.Split(string(out), "\n") - - var ret [][]string - for _, l := range lines[1:] { - var lr []string - if nameOption { - lr = append(lr, l) - } else { - for _, r := range strings.Split(l, " ") { - if r == "" { - continue - } - lr = append(lr, strings.TrimSpace(r)) - } - } - if len(lr) != 0 { - ret = append(ret, lr) - } - } - - return ret, nil -} diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_darwin_cgo.go b/vendor/github.com/shirou/gopsutil/v3/process/process_darwin_cgo.go deleted file mode 100644 index 858f08e7..00000000 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_darwin_cgo.go +++ /dev/null @@ -1,222 +0,0 @@ -//go:build darwin && cgo -// +build darwin,cgo - -package process - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -import "C" - -import ( - "bytes" - "context" - "fmt" - "strings" - "syscall" - "unsafe" - - "github.com/shirou/gopsutil/v3/cpu" -) - -var ( - argMax int - timescaleToNanoSeconds float64 -) - -func init() { - argMax = getArgMax() - timescaleToNanoSeconds = getTimeScaleToNanoSeconds() -} - -func getArgMax() int { - var ( - mib = [...]C.int{C.CTL_KERN, C.KERN_ARGMAX} - argmax C.int - size C.size_t = C.ulong(unsafe.Sizeof(argmax)) - ) - retval := C.sysctl(&mib[0], 2, unsafe.Pointer(&argmax), &size, C.NULL, 0) - if retval == 0 { - return int(argmax) - } - return 0 -} - -func getTimeScaleToNanoSeconds() float64 { - var timeBaseInfo C.struct_mach_timebase_info - - C.mach_timebase_info(&timeBaseInfo) - - return float64(timeBaseInfo.numer) / float64(timeBaseInfo.denom) -} - -func (p *Process) ExeWithContext(ctx context.Context) (string, error) { - var c C.char // need a var for unsafe.Sizeof need a var - const bufsize = C.PROC_PIDPATHINFO_MAXSIZE * unsafe.Sizeof(c) - buffer := (*C.char)(C.malloc(C.size_t(bufsize))) - defer C.free(unsafe.Pointer(buffer)) - - ret, err := C.proc_pidpath(C.int(p.Pid), unsafe.Pointer(buffer), C.uint32_t(bufsize)) - if err != nil { - return "", err - } - if ret <= 0 { - return "", fmt.Errorf("unknown error: proc_pidpath returned %d", ret) - } - - return C.GoString(buffer), nil -} - -// CwdWithContext retrieves the Current Working Directory for the given process. -// It uses the proc_pidinfo from libproc and will only work for processes the -// EUID can access. Otherwise "operation not permitted" will be returned as the -// error. -// Note: This might also work for other *BSD OSs. -func (p *Process) CwdWithContext(ctx context.Context) (string, error) { - const vpiSize = C.sizeof_struct_proc_vnodepathinfo - vpi := (*C.struct_proc_vnodepathinfo)(C.malloc(vpiSize)) - defer C.free(unsafe.Pointer(vpi)) - ret, err := C.proc_pidinfo(C.int(p.Pid), C.PROC_PIDVNODEPATHINFO, 0, unsafe.Pointer(vpi), vpiSize) - if err != nil { - // fmt.Printf("ret: %d %T\n", ret, err) - if err == syscall.EPERM { - return "", ErrorNotPermitted - } - return "", err - } - if ret <= 0 { - return "", fmt.Errorf("unknown error: proc_pidinfo returned %d", ret) - } - if ret != C.sizeof_struct_proc_vnodepathinfo { - return "", fmt.Errorf("too few bytes; expected %d, got %d", vpiSize, ret) - } - return C.GoString(&vpi.pvi_cdir.vip_path[0]), err -} - -func procArgs(pid int32) ([]byte, int, error) { - var ( - mib = [...]C.int{C.CTL_KERN, C.KERN_PROCARGS2, C.int(pid)} - size C.size_t = C.ulong(argMax) - nargs C.int - result []byte - ) - procargs := (*C.char)(C.malloc(C.ulong(argMax))) - defer C.free(unsafe.Pointer(procargs)) - retval, err := C.sysctl(&mib[0], 3, unsafe.Pointer(procargs), &size, C.NULL, 0) - if retval == 0 { - C.memcpy(unsafe.Pointer(&nargs), unsafe.Pointer(procargs), C.sizeof_int) - result = C.GoBytes(unsafe.Pointer(procargs), C.int(size)) - // fmt.Printf("size: %d %d\n%s\n", size, nargs, hex.Dump(result)) - return result, int(nargs), nil - } - return nil, 0, err -} - -func (p *Process) CmdlineSliceWithContext(ctx context.Context) ([]string, error) { - return p.cmdlineSliceWithContext(ctx, true) -} - -func (p *Process) cmdlineSliceWithContext(ctx context.Context, fallback bool) ([]string, error) { - pargs, nargs, err := procArgs(p.Pid) - if err != nil { - return nil, err - } - // The first bytes hold the nargs int, skip it. - args := bytes.Split((pargs)[C.sizeof_int:], []byte{0}) - var argStr string - // The first element is the actual binary/command path. - // command := args[0] - var argSlice []string - // var envSlice []string - // All other, non-zero elements are arguments. The first "nargs" elements - // are the arguments. Everything else in the slice is then the environment - // of the process. - for _, arg := range args[1:] { - argStr = string(arg[:]) - if len(argStr) > 0 { - if nargs > 0 { - argSlice = append(argSlice, argStr) - nargs-- - continue - } - break - // envSlice = append(envSlice, argStr) - } - } - return argSlice, err -} - -// cmdNameWithContext returns the command name (including spaces) without any arguments -func (p *Process) cmdNameWithContext(ctx context.Context) (string, error) { - r, err := p.cmdlineSliceWithContext(ctx, false) - if err != nil { - return "", err - } - - if len(r) == 0 { - return "", nil - } - - return r[0], err -} - -func (p *Process) CmdlineWithContext(ctx context.Context) (string, error) { - r, err := p.CmdlineSliceWithContext(ctx) - if err != nil { - return "", err - } - return strings.Join(r, " "), err -} - -func (p *Process) NumThreadsWithContext(ctx context.Context) (int32, error) { - const tiSize = C.sizeof_struct_proc_taskinfo - ti := (*C.struct_proc_taskinfo)(C.malloc(tiSize)) - defer C.free(unsafe.Pointer(ti)) - - _, err := C.proc_pidinfo(C.int(p.Pid), C.PROC_PIDTASKINFO, 0, unsafe.Pointer(ti), tiSize) - if err != nil { - return 0, err - } - - return int32(ti.pti_threadnum), nil -} - -func (p *Process) TimesWithContext(ctx context.Context) (*cpu.TimesStat, error) { - const tiSize = C.sizeof_struct_proc_taskinfo - ti := (*C.struct_proc_taskinfo)(C.malloc(tiSize)) - defer C.free(unsafe.Pointer(ti)) - - _, err := C.proc_pidinfo(C.int(p.Pid), C.PROC_PIDTASKINFO, 0, unsafe.Pointer(ti), tiSize) - if err != nil { - return nil, err - } - - ret := &cpu.TimesStat{ - CPU: "cpu", - User: float64(ti.pti_total_user) * timescaleToNanoSeconds / 1e9, - System: float64(ti.pti_total_system) * timescaleToNanoSeconds / 1e9, - } - return ret, nil -} - -func (p *Process) MemoryInfoWithContext(ctx context.Context) (*MemoryInfoStat, error) { - const tiSize = C.sizeof_struct_proc_taskinfo - ti := (*C.struct_proc_taskinfo)(C.malloc(tiSize)) - defer C.free(unsafe.Pointer(ti)) - - _, err := C.proc_pidinfo(C.int(p.Pid), C.PROC_PIDTASKINFO, 0, unsafe.Pointer(ti), tiSize) - if err != nil { - return nil, err - } - - ret := &MemoryInfoStat{ - RSS: uint64(ti.pti_resident_size), - VMS: uint64(ti.pti_virtual_size), - Swap: uint64(ti.pti_pageins), - } - return ret, nil -} diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_darwin_nocgo.go b/vendor/github.com/shirou/gopsutil/v3/process/process_darwin_nocgo.go deleted file mode 100644 index d903474f..00000000 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_darwin_nocgo.go +++ /dev/null @@ -1,134 +0,0 @@ -//go:build darwin && !cgo -// +build darwin,!cgo - -package process - -import ( - "context" - "fmt" - "strconv" - "strings" - - "github.com/shirou/gopsutil/v3/cpu" - "github.com/shirou/gopsutil/v3/internal/common" -) - -func (p *Process) CwdWithContext(ctx context.Context) (string, error) { - return "", common.ErrNotImplementedError -} - -func (p *Process) ExeWithContext(ctx context.Context) (string, error) { - out, err := invoke.CommandWithContext(ctx, "lsof", "-p", strconv.Itoa(int(p.Pid)), "-Fpfn") - if err != nil { - return "", fmt.Errorf("bad call to lsof: %s", err) - } - txtFound := 0 - lines := strings.Split(string(out), "\n") - fallback := "" - for i := 1; i < len(lines); i++ { - if lines[i] == "ftxt" { - txtFound++ - if txtFound == 1 { - fallback = lines[i-1][1:] - } - if txtFound == 2 { - return lines[i-1][1:], nil - } - } - } - if fallback != "" { - return fallback, nil - } - return "", fmt.Errorf("missing txt data returned by lsof") -} - -func (p *Process) CmdlineWithContext(ctx context.Context) (string, error) { - r, err := callPsWithContext(ctx, "command", p.Pid, false, false) - if err != nil { - return "", err - } - return strings.Join(r[0], " "), err -} - -func (p *Process) cmdNameWithContext(ctx context.Context) (string, error) { - r, err := callPsWithContext(ctx, "command", p.Pid, false, true) - if err != nil { - return "", err - } - if len(r) > 0 && len(r[0]) > 0 { - return r[0][0], err - } - - return "", err -} - -// CmdlineSliceWithContext returns the command line arguments of the process as a slice with each -// element being an argument. Because of current deficiencies in the way that the command -// line arguments are found, single arguments that have spaces in the will actually be -// reported as two separate items. In order to do something better CGO would be needed -// to use the native darwin functions. -func (p *Process) CmdlineSliceWithContext(ctx context.Context) ([]string, error) { - r, err := callPsWithContext(ctx, "command", p.Pid, false, false) - if err != nil { - return nil, err - } - return r[0], err -} - -func (p *Process) NumThreadsWithContext(ctx context.Context) (int32, error) { - r, err := callPsWithContext(ctx, "utime,stime", p.Pid, true, false) - if err != nil { - return 0, err - } - return int32(len(r)), nil -} - -func (p *Process) TimesWithContext(ctx context.Context) (*cpu.TimesStat, error) { - r, err := callPsWithContext(ctx, "utime,stime", p.Pid, false, false) - if err != nil { - return nil, err - } - - utime, err := convertCPUTimes(r[0][0]) - if err != nil { - return nil, err - } - stime, err := convertCPUTimes(r[0][1]) - if err != nil { - return nil, err - } - - ret := &cpu.TimesStat{ - CPU: "cpu", - User: utime, - System: stime, - } - return ret, nil -} - -func (p *Process) MemoryInfoWithContext(ctx context.Context) (*MemoryInfoStat, error) { - r, err := callPsWithContext(ctx, "rss,vsize,pagein", p.Pid, false, false) - if err != nil { - return nil, err - } - rss, err := strconv.Atoi(r[0][0]) - if err != nil { - return nil, err - } - vms, err := strconv.Atoi(r[0][1]) - if err != nil { - return nil, err - } - pagein, err := strconv.Atoi(r[0][2]) - if err != nil { - return nil, err - } - - ret := &MemoryInfoStat{ - RSS: uint64(rss) * 1024, - VMS: uint64(vms) * 1024, - Swap: uint64(pagein), - } - - return ret, nil -} diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_fallback.go b/vendor/github.com/shirou/gopsutil/v3/process/process_fallback.go deleted file mode 100644 index 1a5d0c4b..00000000 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_fallback.go +++ /dev/null @@ -1,203 +0,0 @@ -//go:build !darwin && !linux && !freebsd && !openbsd && !windows && !solaris && !plan9 -// +build !darwin,!linux,!freebsd,!openbsd,!windows,!solaris,!plan9 - -package process - -import ( - "context" - "syscall" - - "github.com/shirou/gopsutil/v3/cpu" - "github.com/shirou/gopsutil/v3/internal/common" - "github.com/shirou/gopsutil/v3/net" -) - -type Signal = syscall.Signal - -type MemoryMapsStat struct { - Path string `json:"path"` - Rss uint64 `json:"rss"` - Size uint64 `json:"size"` - Pss uint64 `json:"pss"` - SharedClean uint64 `json:"sharedClean"` - SharedDirty uint64 `json:"sharedDirty"` - PrivateClean uint64 `json:"privateClean"` - PrivateDirty uint64 `json:"privateDirty"` - Referenced uint64 `json:"referenced"` - Anonymous uint64 `json:"anonymous"` - Swap uint64 `json:"swap"` -} - -type MemoryInfoExStat struct{} - -func pidsWithContext(ctx context.Context) ([]int32, error) { - return nil, common.ErrNotImplementedError -} - -func ProcessesWithContext(ctx context.Context) ([]*Process, error) { - return nil, common.ErrNotImplementedError -} - -func PidExistsWithContext(ctx context.Context, pid int32) (bool, error) { - return false, common.ErrNotImplementedError -} - -func (p *Process) PpidWithContext(ctx context.Context) (int32, error) { - return 0, common.ErrNotImplementedError -} - -func (p *Process) NameWithContext(ctx context.Context) (string, error) { - return "", common.ErrNotImplementedError -} - -func (p *Process) TgidWithContext(ctx context.Context) (int32, error) { - return 0, common.ErrNotImplementedError -} - -func (p *Process) ExeWithContext(ctx context.Context) (string, error) { - return "", common.ErrNotImplementedError -} - -func (p *Process) CmdlineWithContext(ctx context.Context) (string, error) { - return "", common.ErrNotImplementedError -} - -func (p *Process) CmdlineSliceWithContext(ctx context.Context) ([]string, error) { - return nil, common.ErrNotImplementedError -} - -func (p *Process) createTimeWithContext(ctx context.Context) (int64, error) { - return 0, common.ErrNotImplementedError -} - -func (p *Process) CwdWithContext(ctx context.Context) (string, error) { - return "", common.ErrNotImplementedError -} - -func (p *Process) StatusWithContext(ctx context.Context) ([]string, error) { - return []string{""}, common.ErrNotImplementedError -} - -func (p *Process) ForegroundWithContext(ctx context.Context) (bool, error) { - return false, common.ErrNotImplementedError -} - -func (p *Process) UidsWithContext(ctx context.Context) ([]int32, error) { - return nil, common.ErrNotImplementedError -} - -func (p *Process) GidsWithContext(ctx context.Context) ([]int32, error) { - return nil, common.ErrNotImplementedError -} - -func (p *Process) GroupsWithContext(ctx context.Context) ([]int32, error) { - return nil, common.ErrNotImplementedError -} - -func (p *Process) TerminalWithContext(ctx context.Context) (string, error) { - return "", common.ErrNotImplementedError -} - -func (p *Process) NiceWithContext(ctx context.Context) (int32, error) { - return 0, common.ErrNotImplementedError -} - -func (p *Process) IOniceWithContext(ctx context.Context) (int32, error) { - return 0, common.ErrNotImplementedError -} - -func (p *Process) RlimitWithContext(ctx context.Context) ([]RlimitStat, error) { - return nil, common.ErrNotImplementedError -} - -func (p *Process) RlimitUsageWithContext(ctx context.Context, gatherUsed bool) ([]RlimitStat, error) { - return nil, common.ErrNotImplementedError -} - -func (p *Process) IOCountersWithContext(ctx context.Context) (*IOCountersStat, error) { - return nil, common.ErrNotImplementedError -} - -func (p *Process) NumCtxSwitchesWithContext(ctx context.Context) (*NumCtxSwitchesStat, error) { - return nil, common.ErrNotImplementedError -} - -func (p *Process) NumFDsWithContext(ctx context.Context) (int32, error) { - return 0, common.ErrNotImplementedError -} - -func (p *Process) NumThreadsWithContext(ctx context.Context) (int32, error) { - return 0, common.ErrNotImplementedError -} - -func (p *Process) ThreadsWithContext(ctx context.Context) (map[int32]*cpu.TimesStat, error) { - return nil, common.ErrNotImplementedError -} - -func (p *Process) TimesWithContext(ctx context.Context) (*cpu.TimesStat, error) { - return nil, common.ErrNotImplementedError -} - -func (p *Process) CPUAffinityWithContext(ctx context.Context) ([]int32, error) { - return nil, common.ErrNotImplementedError -} - -func (p *Process) MemoryInfoWithContext(ctx context.Context) (*MemoryInfoStat, error) { - return nil, common.ErrNotImplementedError -} - -func (p *Process) MemoryInfoExWithContext(ctx context.Context) (*MemoryInfoExStat, error) { - return nil, common.ErrNotImplementedError -} - -func (p *Process) PageFaultsWithContext(ctx context.Context) (*PageFaultsStat, error) { - return nil, common.ErrNotImplementedError -} - -func (p *Process) ChildrenWithContext(ctx context.Context) ([]*Process, error) { - return nil, common.ErrNotImplementedError -} - -func (p *Process) OpenFilesWithContext(ctx context.Context) ([]OpenFilesStat, error) { - return nil, common.ErrNotImplementedError -} - -func (p *Process) ConnectionsWithContext(ctx context.Context) ([]net.ConnectionStat, error) { - return nil, common.ErrNotImplementedError -} - -func (p *Process) ConnectionsMaxWithContext(ctx context.Context, max int) ([]net.ConnectionStat, error) { - return nil, common.ErrNotImplementedError -} - -func (p *Process) MemoryMapsWithContext(ctx context.Context, grouped bool) (*[]MemoryMapsStat, error) { - return nil, common.ErrNotImplementedError -} - -func (p *Process) SendSignalWithContext(ctx context.Context, sig Signal) error { - return common.ErrNotImplementedError -} - -func (p *Process) SuspendWithContext(ctx context.Context) error { - return common.ErrNotImplementedError -} - -func (p *Process) ResumeWithContext(ctx context.Context) error { - return common.ErrNotImplementedError -} - -func (p *Process) TerminateWithContext(ctx context.Context) error { - return common.ErrNotImplementedError -} - -func (p *Process) KillWithContext(ctx context.Context) error { - return common.ErrNotImplementedError -} - -func (p *Process) UsernameWithContext(ctx context.Context) (string, error) { - return "", common.ErrNotImplementedError -} - -func (p *Process) EnvironWithContext(ctx context.Context) ([]string, error) { - return nil, common.ErrNotImplementedError -} diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_freebsd_amd64.go b/vendor/github.com/shirou/gopsutil/v3/process/process_freebsd_amd64.go deleted file mode 100644 index 560e627d..00000000 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_freebsd_amd64.go +++ /dev/null @@ -1,192 +0,0 @@ -// Created by cgo -godefs - DO NOT EDIT -// cgo -godefs types_freebsd.go - -package process - -const ( - CTLKern = 1 - KernProc = 14 - KernProcPID = 1 - KernProcProc = 8 - KernProcPathname = 12 - KernProcArgs = 7 -) - -const ( - sizeofPtr = 0x8 - sizeofShort = 0x2 - sizeofInt = 0x4 - sizeofLong = 0x8 - sizeofLongLong = 0x8 -) - -const ( - sizeOfKinfoVmentry = 0x488 - sizeOfKinfoProc = 0x440 -) - -const ( - SIDL = 1 - SRUN = 2 - SSLEEP = 3 - SSTOP = 4 - SZOMB = 5 - SWAIT = 6 - SLOCK = 7 -) - -type ( - _C_short int16 - _C_int int32 - _C_long int64 - _C_long_long int64 -) - -type Timespec struct { - Sec int64 - Nsec int64 -} - -type Timeval struct { - Sec int64 - Usec int64 -} - -type Rusage struct { - Utime Timeval - Stime Timeval - Maxrss int64 - Ixrss int64 - Idrss int64 - Isrss int64 - Minflt int64 - Majflt int64 - Nswap int64 - Inblock int64 - Oublock int64 - Msgsnd int64 - Msgrcv int64 - Nsignals int64 - Nvcsw int64 - Nivcsw int64 -} - -type Rlimit struct { - Cur int64 - Max int64 -} - -type KinfoProc struct { - Structsize int32 - Layout int32 - Args int64 /* pargs */ - Paddr int64 /* proc */ - Addr int64 /* user */ - Tracep int64 /* vnode */ - Textvp int64 /* vnode */ - Fd int64 /* filedesc */ - Vmspace int64 /* vmspace */ - Wchan int64 - Pid int32 - Ppid int32 - Pgid int32 - Tpgid int32 - Sid int32 - Tsid int32 - Jobc int16 - Spare_short1 int16 - Tdev uint32 - Siglist [16]byte /* sigset */ - Sigmask [16]byte /* sigset */ - Sigignore [16]byte /* sigset */ - Sigcatch [16]byte /* sigset */ - Uid uint32 - Ruid uint32 - Svuid uint32 - Rgid uint32 - Svgid uint32 - Ngroups int16 - Spare_short2 int16 - Groups [16]uint32 - Size uint64 - Rssize int64 - Swrss int64 - Tsize int64 - Dsize int64 - Ssize int64 - Xstat uint16 - Acflag uint16 - Pctcpu uint32 - Estcpu uint32 - Slptime uint32 - Swtime uint32 - Cow uint32 - Runtime uint64 - Start Timeval - Childtime Timeval - Flag int64 - Kiflag int64 - Traceflag int32 - Stat int8 - Nice int8 - Lock int8 - Rqindex int8 - Oncpu uint8 - Lastcpu uint8 - Tdname [17]int8 - Wmesg [9]int8 - Login [18]int8 - Lockname [9]int8 - Comm [20]int8 - Emul [17]int8 - Loginclass [18]int8 - Sparestrings [50]int8 - Spareints [7]int32 - Flag2 int32 - Fibnum int32 - Cr_flags uint32 - Jid int32 - Numthreads int32 - Tid int32 - Pri Priority - Rusage Rusage - Rusage_ch Rusage - Pcb int64 /* pcb */ - Kstack int64 - Udata int64 - Tdaddr int64 /* thread */ - Spareptrs [6]int64 - Sparelongs [12]int64 - Sflag int64 - Tdflags int64 -} - -type Priority struct { - Class uint8 - Level uint8 - Native uint8 - User uint8 -} - -type KinfoVmentry struct { - Structsize int32 - Type int32 - Start uint64 - End uint64 - Offset uint64 - Vn_fileid uint64 - Vn_fsid uint32 - Flags int32 - Resident int32 - Private_resident int32 - Protection int32 - Ref_count int32 - Shadow_count int32 - Vn_type int32 - Vn_size uint64 - Vn_rdev uint32 - Vn_mode uint16 - Status uint16 - X_kve_ispare [12]int32 - Path [1024]int8 -} diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_plan9.go b/vendor/github.com/shirou/gopsutil/v3/process/process_plan9.go deleted file mode 100644 index bc4bc062..00000000 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_plan9.go +++ /dev/null @@ -1,203 +0,0 @@ -//go:build plan9 -// +build plan9 - -package process - -import ( - "context" - "syscall" - - "github.com/shirou/gopsutil/v3/cpu" - "github.com/shirou/gopsutil/v3/internal/common" - "github.com/shirou/gopsutil/v3/net" -) - -type Signal = syscall.Note - -type MemoryMapsStat struct { - Path string `json:"path"` - Rss uint64 `json:"rss"` - Size uint64 `json:"size"` - Pss uint64 `json:"pss"` - SharedClean uint64 `json:"sharedClean"` - SharedDirty uint64 `json:"sharedDirty"` - PrivateClean uint64 `json:"privateClean"` - PrivateDirty uint64 `json:"privateDirty"` - Referenced uint64 `json:"referenced"` - Anonymous uint64 `json:"anonymous"` - Swap uint64 `json:"swap"` -} - -type MemoryInfoExStat struct{} - -func pidsWithContext(ctx context.Context) ([]int32, error) { - return nil, common.ErrNotImplementedError -} - -func ProcessesWithContext(ctx context.Context) ([]*Process, error) { - return nil, common.ErrNotImplementedError -} - -func PidExistsWithContext(ctx context.Context, pid int32) (bool, error) { - return false, common.ErrNotImplementedError -} - -func (p *Process) PpidWithContext(ctx context.Context) (int32, error) { - return 0, common.ErrNotImplementedError -} - -func (p *Process) NameWithContext(ctx context.Context) (string, error) { - return "", common.ErrNotImplementedError -} - -func (p *Process) TgidWithContext(ctx context.Context) (int32, error) { - return 0, common.ErrNotImplementedError -} - -func (p *Process) ExeWithContext(ctx context.Context) (string, error) { - return "", common.ErrNotImplementedError -} - -func (p *Process) CmdlineWithContext(ctx context.Context) (string, error) { - return "", common.ErrNotImplementedError -} - -func (p *Process) CmdlineSliceWithContext(ctx context.Context) ([]string, error) { - return nil, common.ErrNotImplementedError -} - -func (p *Process) createTimeWithContext(ctx context.Context) (int64, error) { - return 0, common.ErrNotImplementedError -} - -func (p *Process) CwdWithContext(ctx context.Context) (string, error) { - return "", common.ErrNotImplementedError -} - -func (p *Process) StatusWithContext(ctx context.Context) ([]string, error) { - return []string{""}, common.ErrNotImplementedError -} - -func (p *Process) ForegroundWithContext(ctx context.Context) (bool, error) { - return false, common.ErrNotImplementedError -} - -func (p *Process) UidsWithContext(ctx context.Context) ([]int32, error) { - return nil, common.ErrNotImplementedError -} - -func (p *Process) GidsWithContext(ctx context.Context) ([]int32, error) { - return nil, common.ErrNotImplementedError -} - -func (p *Process) GroupsWithContext(ctx context.Context) ([]int32, error) { - return nil, common.ErrNotImplementedError -} - -func (p *Process) TerminalWithContext(ctx context.Context) (string, error) { - return "", common.ErrNotImplementedError -} - -func (p *Process) NiceWithContext(ctx context.Context) (int32, error) { - return 0, common.ErrNotImplementedError -} - -func (p *Process) IOniceWithContext(ctx context.Context) (int32, error) { - return 0, common.ErrNotImplementedError -} - -func (p *Process) RlimitWithContext(ctx context.Context) ([]RlimitStat, error) { - return nil, common.ErrNotImplementedError -} - -func (p *Process) RlimitUsageWithContext(ctx context.Context, gatherUsed bool) ([]RlimitStat, error) { - return nil, common.ErrNotImplementedError -} - -func (p *Process) IOCountersWithContext(ctx context.Context) (*IOCountersStat, error) { - return nil, common.ErrNotImplementedError -} - -func (p *Process) NumCtxSwitchesWithContext(ctx context.Context) (*NumCtxSwitchesStat, error) { - return nil, common.ErrNotImplementedError -} - -func (p *Process) NumFDsWithContext(ctx context.Context) (int32, error) { - return 0, common.ErrNotImplementedError -} - -func (p *Process) NumThreadsWithContext(ctx context.Context) (int32, error) { - return 0, common.ErrNotImplementedError -} - -func (p *Process) ThreadsWithContext(ctx context.Context) (map[int32]*cpu.TimesStat, error) { - return nil, common.ErrNotImplementedError -} - -func (p *Process) TimesWithContext(ctx context.Context) (*cpu.TimesStat, error) { - return nil, common.ErrNotImplementedError -} - -func (p *Process) CPUAffinityWithContext(ctx context.Context) ([]int32, error) { - return nil, common.ErrNotImplementedError -} - -func (p *Process) MemoryInfoWithContext(ctx context.Context) (*MemoryInfoStat, error) { - return nil, common.ErrNotImplementedError -} - -func (p *Process) MemoryInfoExWithContext(ctx context.Context) (*MemoryInfoExStat, error) { - return nil, common.ErrNotImplementedError -} - -func (p *Process) PageFaultsWithContext(ctx context.Context) (*PageFaultsStat, error) { - return nil, common.ErrNotImplementedError -} - -func (p *Process) ChildrenWithContext(ctx context.Context) ([]*Process, error) { - return nil, common.ErrNotImplementedError -} - -func (p *Process) OpenFilesWithContext(ctx context.Context) ([]OpenFilesStat, error) { - return nil, common.ErrNotImplementedError -} - -func (p *Process) ConnectionsWithContext(ctx context.Context) ([]net.ConnectionStat, error) { - return nil, common.ErrNotImplementedError -} - -func (p *Process) ConnectionsMaxWithContext(ctx context.Context, max int) ([]net.ConnectionStat, error) { - return nil, common.ErrNotImplementedError -} - -func (p *Process) MemoryMapsWithContext(ctx context.Context, grouped bool) (*[]MemoryMapsStat, error) { - return nil, common.ErrNotImplementedError -} - -func (p *Process) SendSignalWithContext(ctx context.Context, sig Signal) error { - return common.ErrNotImplementedError -} - -func (p *Process) SuspendWithContext(ctx context.Context) error { - return common.ErrNotImplementedError -} - -func (p *Process) ResumeWithContext(ctx context.Context) error { - return common.ErrNotImplementedError -} - -func (p *Process) TerminateWithContext(ctx context.Context) error { - return common.ErrNotImplementedError -} - -func (p *Process) KillWithContext(ctx context.Context) error { - return common.ErrNotImplementedError -} - -func (p *Process) UsernameWithContext(ctx context.Context) (string, error) { - return "", common.ErrNotImplementedError -} - -func (p *Process) EnvironWithContext(ctx context.Context) ([]string, error) { - return nil, common.ErrNotImplementedError -} diff --git a/vendor/github.com/shirou/gopsutil/v3/LICENSE b/vendor/github.com/shirou/gopsutil/v4/LICENSE similarity index 100% rename from vendor/github.com/shirou/gopsutil/v3/LICENSE rename to vendor/github.com/shirou/gopsutil/v4/LICENSE diff --git a/vendor/github.com/shirou/gopsutil/v3/common/env.go b/vendor/github.com/shirou/gopsutil/v4/common/env.go similarity index 51% rename from vendor/github.com/shirou/gopsutil/v3/common/env.go rename to vendor/github.com/shirou/gopsutil/v4/common/env.go index 4b5f4980..47e471c4 100644 --- a/vendor/github.com/shirou/gopsutil/v3/common/env.go +++ b/vendor/github.com/shirou/gopsutil/v4/common/env.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause package common type EnvKeyType string @@ -11,13 +12,14 @@ type EnvKeyType string var EnvKey = EnvKeyType("env") const ( - HostProcEnvKey EnvKeyType = "HOST_PROC" - HostSysEnvKey EnvKeyType = "HOST_SYS" - HostEtcEnvKey EnvKeyType = "HOST_ETC" - HostVarEnvKey EnvKeyType = "HOST_VAR" - HostRunEnvKey EnvKeyType = "HOST_RUN" - HostDevEnvKey EnvKeyType = "HOST_DEV" - HostRootEnvKey EnvKeyType = "HOST_ROOT" + HostProcEnvKey EnvKeyType = "HOST_PROC" + HostSysEnvKey EnvKeyType = "HOST_SYS" + HostEtcEnvKey EnvKeyType = "HOST_ETC" + HostVarEnvKey EnvKeyType = "HOST_VAR" + HostRunEnvKey EnvKeyType = "HOST_RUN" + HostDevEnvKey EnvKeyType = "HOST_DEV" + HostRootEnvKey EnvKeyType = "HOST_ROOT" + HostProcMountinfo EnvKeyType = "HOST_PROC_MOUNTINFO" ) type EnvMap map[EnvKeyType]string diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu.go similarity index 96% rename from vendor/github.com/shirou/gopsutil/v3/cpu/cpu.go rename to vendor/github.com/shirou/gopsutil/v4/cpu/cpu.go index 83bc23d4..9bc3dfb5 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu.go @@ -1,8 +1,10 @@ +// SPDX-License-Identifier: BSD-3-Clause package cpu import ( "context" "encoding/json" + "errors" "fmt" "math" "runtime" @@ -11,7 +13,7 @@ import ( "sync" "time" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" ) // TimesStat contains the amounts of time the CPU has spent performing different @@ -194,7 +196,7 @@ func percentUsedFromLastCallWithContext(ctx context.Context, percpu bool) ([]flo } if lastTimes == nil { - return nil, fmt.Errorf("error getting times for cpu percent. lastTimes was nil") + return nil, errors.New("error getting times for cpu percent. lastTimes was nil") } return calculateAllBusy(lastTimes, cpuTimes) } diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_aix.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_aix.go similarity index 85% rename from vendor/github.com/shirou/gopsutil/v3/cpu/cpu_aix.go rename to vendor/github.com/shirou/gopsutil/v4/cpu/cpu_aix.go index 1439d1d7..bc766bd4 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_aix.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_aix.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build aix -// +build aix package cpu diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_aix_cgo.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_aix_cgo.go similarity index 96% rename from vendor/github.com/shirou/gopsutil/v3/cpu/cpu_aix_cgo.go rename to vendor/github.com/shirou/gopsutil/v4/cpu/cpu_aix_cgo.go index 9c1e70b1..559dc5fe 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_aix_cgo.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_aix_cgo.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build aix && cgo -// +build aix,cgo package cpu diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_aix_nocgo.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_aix_nocgo.go similarity index 85% rename from vendor/github.com/shirou/gopsutil/v3/cpu/cpu_aix_nocgo.go rename to vendor/github.com/shirou/gopsutil/v4/cpu/cpu_aix_nocgo.go index f3a3186a..981e32e5 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_aix_nocgo.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_aix_nocgo.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build aix && !cgo -// +build aix,!cgo package cpu @@ -8,17 +8,17 @@ import ( "strconv" "strings" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" ) func TimesWithContext(ctx context.Context, percpu bool) ([]TimesStat, error) { var ret []TimesStat if percpu { - per_out, err := invoke.CommandWithContext(ctx, "sar", "-u", "-P", "ALL", "10", "1") + perOut, err := invoke.CommandWithContext(ctx, "sar", "-u", "-P", "ALL", "10", "1") if err != nil { return nil, err } - lines := strings.Split(string(per_out), "\n") + lines := strings.Split(string(perOut), "\n") if len(lines) < 6 { return []TimesStat{}, common.ErrNotImplementedError } @@ -105,14 +105,15 @@ func InfoWithContext(ctx context.Context) ([]InfoStat, error) { ret := InfoStat{} for _, line := range strings.Split(string(out), "\n") { - if strings.HasPrefix(line, "Number Of Processors:") { + switch { + case strings.HasPrefix(line, "Number Of Processors:"): p := strings.Fields(line) if len(p) > 3 { if t, err := strconv.ParseUint(p[3], 10, 64); err == nil { ret.Cores = int32(t) } } - } else if strings.HasPrefix(line, "Processor Clock Speed:") { + case strings.HasPrefix(line, "Processor Clock Speed:"): p := strings.Fields(line) if len(p) > 4 { if t, err := strconv.ParseFloat(p[3], 64); err == nil { @@ -128,13 +129,12 @@ func InfoWithContext(ctx context.Context) ([]InfoStat, error) { } } } - break - } else if strings.HasPrefix(line, "System Model:") { + case strings.HasPrefix(line, "System Model:"): p := strings.Split(string(line), ":") if p != nil { ret.VendorID = strings.TrimSpace(p[1]) } - } else if strings.HasPrefix(line, "Processor Type:") { + case strings.HasPrefix(line, "Processor Type:"): p := strings.Split(string(line), ":") if p != nil { c := strings.Split(string(p[1]), "_") @@ -148,7 +148,7 @@ func InfoWithContext(ctx context.Context) ([]InfoStat, error) { return []InfoStat{ret}, nil } -func CountsWithContext(ctx context.Context, logical bool) (int, error) { +func CountsWithContext(ctx context.Context, _ bool) (int, error) { info, err := InfoWithContext(ctx) if err == nil { return int(info[0].Cores), nil diff --git a/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_darwin.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_darwin.go new file mode 100644 index 00000000..c61a470f --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_darwin.go @@ -0,0 +1,203 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build darwin + +package cpu + +import ( + "context" + "errors" + "fmt" + "strconv" + "strings" + "unsafe" + + "github.com/tklauser/go-sysconf" + "golang.org/x/sys/unix" + + "github.com/shirou/gopsutil/v4/internal/common" +) + +// sys/resource.h +const ( + CPUser = 0 + cpNice = 1 + cpSys = 2 + cpIntr = 3 + cpIdle = 4 + cpUStates = 5 +) + +// mach/machine.h +const ( + cpuStateUser = 0 + cpuStateSystem = 1 + cpuStateIdle = 2 + cpuStateNice = 3 + cpuStateMax = 4 +) + +// mach/processor_info.h +const ( + processorCpuLoadInfo = 2 //nolint:revive //FIXME +) + +type hostCpuLoadInfoData struct { //nolint:revive //FIXME + cpuTicks [cpuStateMax]uint32 +} + +// default value. from time.h +var ClocksPerSec = float64(128) + +func init() { + clkTck, err := sysconf.Sysconf(sysconf.SC_CLK_TCK) + // ignore errors + if err == nil { + ClocksPerSec = float64(clkTck) + } +} + +func Times(percpu bool) ([]TimesStat, error) { + return TimesWithContext(context.Background(), percpu) +} + +func TimesWithContext(_ context.Context, percpu bool) ([]TimesStat, error) { + lib, err := common.NewLibrary(common.System) + if err != nil { + return nil, err + } + defer lib.Close() + + if percpu { + return perCPUTimes(lib) + } + + return allCPUTimes(lib) +} + +// Returns only one CPUInfoStat on FreeBSD +func Info() ([]InfoStat, error) { + return InfoWithContext(context.Background()) +} + +func InfoWithContext(_ context.Context) ([]InfoStat, error) { + var ret []InfoStat + + c := InfoStat{} + c.ModelName, _ = unix.Sysctl("machdep.cpu.brand_string") + family, _ := unix.SysctlUint32("machdep.cpu.family") + c.Family = strconv.FormatUint(uint64(family), 10) + model, _ := unix.SysctlUint32("machdep.cpu.model") + c.Model = strconv.FormatUint(uint64(model), 10) + stepping, _ := unix.SysctlUint32("machdep.cpu.stepping") + c.Stepping = int32(stepping) + features, err := unix.Sysctl("machdep.cpu.features") + if err == nil { + for _, v := range strings.Fields(features) { + c.Flags = append(c.Flags, strings.ToLower(v)) + } + } + leaf7Features, err := unix.Sysctl("machdep.cpu.leaf7_features") + if err == nil { + for _, v := range strings.Fields(leaf7Features) { + c.Flags = append(c.Flags, strings.ToLower(v)) + } + } + extfeatures, err := unix.Sysctl("machdep.cpu.extfeatures") + if err == nil { + for _, v := range strings.Fields(extfeatures) { + c.Flags = append(c.Flags, strings.ToLower(v)) + } + } + cores, _ := unix.SysctlUint32("machdep.cpu.core_count") + c.Cores = int32(cores) + cacheSize, _ := unix.SysctlUint32("machdep.cpu.cache.size") + c.CacheSize = int32(cacheSize) + c.VendorID, _ = unix.Sysctl("machdep.cpu.vendor") + + v, err := getFrequency() + if err == nil { + c.Mhz = v + } + + return append(ret, c), nil +} + +func CountsWithContext(_ context.Context, logical bool) (int, error) { + var cpuArgument string + if logical { + cpuArgument = "hw.logicalcpu" + } else { + cpuArgument = "hw.physicalcpu" + } + + count, err := unix.SysctlUint32(cpuArgument) + if err != nil { + return 0, err + } + + return int(count), nil +} + +func perCPUTimes(machLib *common.Library) ([]TimesStat, error) { + machHostSelf := common.GetFunc[common.MachHostSelfFunc](machLib, common.MachHostSelfSym) + machTaskSelf := common.GetFunc[common.MachTaskSelfFunc](machLib, common.MachTaskSelfSym) + hostProcessorInfo := common.GetFunc[common.HostProcessorInfoFunc](machLib, common.HostProcessorInfoSym) + vmDeallocate := common.GetFunc[common.VMDeallocateFunc](machLib, common.VMDeallocateSym) + + var count, ncpu uint32 + var cpuload *hostCpuLoadInfoData + + status := hostProcessorInfo(machHostSelf(), processorCpuLoadInfo, &ncpu, uintptr(unsafe.Pointer(&cpuload)), &count) + + if status != common.KERN_SUCCESS { + return nil, fmt.Errorf("host_processor_info error=%d", status) + } + + if cpuload == nil { + return nil, errors.New("host_processor_info returned nil cpuload") + } + + defer vmDeallocate(machTaskSelf(), uintptr(unsafe.Pointer(cpuload)), uintptr(ncpu)) + + ret := []TimesStat{} + loads := unsafe.Slice(cpuload, ncpu) + + for i := 0; i < int(ncpu); i++ { + c := TimesStat{ + CPU: fmt.Sprintf("cpu%d", i), + User: float64(loads[i].cpuTicks[cpuStateUser]) / ClocksPerSec, + System: float64(loads[i].cpuTicks[cpuStateSystem]) / ClocksPerSec, + Nice: float64(loads[i].cpuTicks[cpuStateNice]) / ClocksPerSec, + Idle: float64(loads[i].cpuTicks[cpuStateIdle]) / ClocksPerSec, + } + + ret = append(ret, c) + } + + return ret, nil +} + +func allCPUTimes(machLib *common.Library) ([]TimesStat, error) { + machHostSelf := common.GetFunc[common.MachHostSelfFunc](machLib, common.MachHostSelfSym) + hostStatistics := common.GetFunc[common.HostStatisticsFunc](machLib, common.HostStatisticsSym) + + var cpuload hostCpuLoadInfoData + count := uint32(cpuStateMax) + + status := hostStatistics(machHostSelf(), common.HOST_CPU_LOAD_INFO, + uintptr(unsafe.Pointer(&cpuload)), &count) + + if status != common.KERN_SUCCESS { + return nil, fmt.Errorf("host_statistics error=%d", status) + } + + c := TimesStat{ + CPU: "cpu-total", + User: float64(cpuload.cpuTicks[cpuStateUser]) / ClocksPerSec, + System: float64(cpuload.cpuTicks[cpuStateSystem]) / ClocksPerSec, + Nice: float64(cpuload.cpuTicks[cpuStateNice]) / ClocksPerSec, + Idle: float64(cpuload.cpuTicks[cpuStateIdle]) / ClocksPerSec, + } + + return []TimesStat{c}, nil +} diff --git a/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_darwin_arm64.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_darwin_arm64.go new file mode 100644 index 00000000..8e69d7cb --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_darwin_arm64.go @@ -0,0 +1,80 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build darwin && arm64 + +package cpu + +import ( + "encoding/binary" + "fmt" + "unsafe" + + "github.com/shirou/gopsutil/v4/internal/common" +) + +// https://github.com/shoenig/go-m1cpu/blob/v0.1.6/cpu.go +func getFrequency() (float64, error) { + ioKit, err := common.NewLibrary(common.IOKit) + if err != nil { + return 0, err + } + defer ioKit.Close() + + coreFoundation, err := common.NewLibrary(common.CoreFoundation) + if err != nil { + return 0, err + } + defer coreFoundation.Close() + + ioServiceMatching := common.GetFunc[common.IOServiceMatchingFunc](ioKit, common.IOServiceMatchingSym) + ioServiceGetMatchingServices := common.GetFunc[common.IOServiceGetMatchingServicesFunc](ioKit, common.IOServiceGetMatchingServicesSym) + ioIteratorNext := common.GetFunc[common.IOIteratorNextFunc](ioKit, common.IOIteratorNextSym) + ioRegistryEntryGetName := common.GetFunc[common.IORegistryEntryGetNameFunc](ioKit, common.IORegistryEntryGetNameSym) + ioRegistryEntryCreateCFProperty := common.GetFunc[common.IORegistryEntryCreateCFPropertyFunc](ioKit, common.IORegistryEntryCreateCFPropertySym) + ioObjectRelease := common.GetFunc[common.IOObjectReleaseFunc](ioKit, common.IOObjectReleaseSym) + + cfStringCreateWithCString := common.GetFunc[common.CFStringCreateWithCStringFunc](coreFoundation, common.CFStringCreateWithCStringSym) + cfDataGetLength := common.GetFunc[common.CFDataGetLengthFunc](coreFoundation, common.CFDataGetLengthSym) + cfDataGetBytePtr := common.GetFunc[common.CFDataGetBytePtrFunc](coreFoundation, common.CFDataGetBytePtrSym) + cfRelease := common.GetFunc[common.CFReleaseFunc](coreFoundation, common.CFReleaseSym) + + matching := ioServiceMatching("AppleARMIODevice") + + var iterator uint32 + if status := ioServiceGetMatchingServices(common.KIOMainPortDefault, uintptr(matching), &iterator); status != common.KERN_SUCCESS { + return 0.0, fmt.Errorf("IOServiceGetMatchingServices error=%d", status) + } + defer ioObjectRelease(iterator) + + pCorekey := cfStringCreateWithCString(common.KCFAllocatorDefault, "voltage-states5-sram", common.KCFStringEncodingUTF8) + defer cfRelease(uintptr(pCorekey)) + + var pCoreHz uint32 + for { + service := ioIteratorNext(iterator) + if service <= 0 { + break + } + + buf := common.NewCStr(512) + ioRegistryEntryGetName(service, buf) + + if buf.GoString() == "pmgr" { + pCoreRef := ioRegistryEntryCreateCFProperty(service, uintptr(pCorekey), common.KCFAllocatorDefault, common.KNilOptions) + length := cfDataGetLength(uintptr(pCoreRef)) + data := cfDataGetBytePtr(uintptr(pCoreRef)) + + // composite uint32 from the byte array + buf := unsafe.Slice((*byte)(data), length) + + // combine the bytes into a uint32 value + b := buf[length-8 : length-4] + pCoreHz = binary.LittleEndian.Uint32(b) + ioObjectRelease(service) + break + } + + ioObjectRelease(service) + } + + return float64(pCoreHz / 1_000_000), nil +} diff --git a/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_darwin_fallback.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_darwin_fallback.go new file mode 100644 index 00000000..b9e52aba --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_darwin_fallback.go @@ -0,0 +1,13 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build darwin && !arm64 + +package cpu + +import "golang.org/x/sys/unix" + +func getFrequency() (float64, error) { + // Use the rated frequency of the CPU. This is a static value and does not + // account for low power or Turbo Boost modes. + cpuFrequency, err := unix.SysctlUint64("hw.cpufrequency") + return float64(cpuFrequency) / 1000000.0, err +} diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_dragonfly.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_dragonfly.go similarity index 88% rename from vendor/github.com/shirou/gopsutil/v3/cpu/cpu_dragonfly.go rename to vendor/github.com/shirou/gopsutil/v4/cpu/cpu_dragonfly.go index fef53e5d..48f2804d 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_dragonfly.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_dragonfly.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause package cpu import ( @@ -10,9 +11,10 @@ import ( "strings" "unsafe" - "github.com/shirou/gopsutil/v3/internal/common" "github.com/tklauser/go-sysconf" "golang.org/x/sys/unix" + + "github.com/shirou/gopsutil/v4/internal/common" ) var ( @@ -49,7 +51,7 @@ func Times(percpu bool) ([]TimesStat, error) { return TimesWithContext(context.Background(), percpu) } -func TimesWithContext(ctx context.Context, percpu bool) ([]TimesStat, error) { +func TimesWithContext(_ context.Context, percpu bool) ([]TimesStat, error) { if percpu { buf, err := unix.SysctlRaw("kern.cp_times") if err != nil { @@ -90,7 +92,7 @@ func Info() ([]InfoStat, error) { return InfoWithContext(context.Background()) } -func InfoWithContext(ctx context.Context) ([]InfoStat, error) { +func InfoWithContext(_ context.Context) ([]InfoStat, error) { const dmesgBoot = "/var/run/dmesg.boot" c, err := parseDmesgBoot(dmesgBoot) @@ -126,7 +128,11 @@ func InfoWithContext(ctx context.Context) ([]InfoStat, error) { func parseDmesgBoot(fileName string) (InfoStat, error) { c := InfoStat{} - lines, _ := common.ReadLines(fileName) + lines, err := common.ReadLines(fileName) + if err != nil { + return c, fmt.Errorf("could not read %s: %w", fileName, err) + } + for _, line := range lines { if matches := cpuEnd.FindStringSubmatch(line); matches != nil { break @@ -134,7 +140,7 @@ func parseDmesgBoot(fileName string) (InfoStat, error) { c.VendorID = matches[1] t, err := strconv.ParseInt(matches[2], 10, 32) if err != nil { - return c, fmt.Errorf("unable to parse DragonflyBSD CPU stepping information from %q: %v", line, err) + return c, fmt.Errorf("unable to parse DragonflyBSD CPU stepping information from %q: %w", line, err) } c.Stepping = int32(t) } else if matches := featuresMatch.FindStringSubmatch(line); matches != nil { @@ -151,6 +157,6 @@ func parseDmesgBoot(fileName string) (InfoStat, error) { return c, nil } -func CountsWithContext(ctx context.Context, logical bool) (int, error) { +func CountsWithContext(_ context.Context, _ bool) (int, error) { return runtime.NumCPU(), nil } diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_dragonfly_amd64.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_dragonfly_amd64.go similarity index 71% rename from vendor/github.com/shirou/gopsutil/v3/cpu/cpu_dragonfly_amd64.go rename to vendor/github.com/shirou/gopsutil/v4/cpu/cpu_dragonfly_amd64.go index 57e14528..25ececa6 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_dragonfly_amd64.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_dragonfly_amd64.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause package cpu type cpuTimes struct { diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_fallback.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_fallback.go similarity index 83% rename from vendor/github.com/shirou/gopsutil/v3/cpu/cpu_fallback.go rename to vendor/github.com/shirou/gopsutil/v4/cpu/cpu_fallback.go index 089f603c..245c1ec9 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_fallback.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_fallback.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build !darwin && !linux && !freebsd && !openbsd && !netbsd && !solaris && !windows && !dragonfly && !plan9 && !aix -// +build !darwin,!linux,!freebsd,!openbsd,!netbsd,!solaris,!windows,!dragonfly,!plan9,!aix package cpu @@ -7,7 +7,7 @@ import ( "context" "runtime" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" ) func Times(percpu bool) ([]TimesStat, error) { diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_freebsd.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_freebsd.go similarity index 89% rename from vendor/github.com/shirou/gopsutil/v3/cpu/cpu_freebsd.go rename to vendor/github.com/shirou/gopsutil/v4/cpu/cpu_freebsd.go index d3f47353..3e0aeb26 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_freebsd.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_freebsd.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause package cpu import ( @@ -10,9 +11,10 @@ import ( "strings" "unsafe" - "github.com/shirou/gopsutil/v3/internal/common" "github.com/tklauser/go-sysconf" "golang.org/x/sys/unix" + + "github.com/shirou/gopsutil/v4/internal/common" ) var ( @@ -50,7 +52,7 @@ func Times(percpu bool) ([]TimesStat, error) { return TimesWithContext(context.Background(), percpu) } -func TimesWithContext(ctx context.Context, percpu bool) ([]TimesStat, error) { +func TimesWithContext(_ context.Context, percpu bool) ([]TimesStat, error) { if percpu { buf, err := unix.SysctlRaw("kern.cp_times") if err != nil { @@ -91,7 +93,7 @@ func Info() ([]InfoStat, error) { return InfoWithContext(context.Background()) } -func InfoWithContext(ctx context.Context) ([]InfoStat, error) { +func InfoWithContext(_ context.Context) ([]InfoStat, error) { const dmesgBoot = "/var/run/dmesg.boot" c, num, err := parseDmesgBoot(dmesgBoot) @@ -124,7 +126,11 @@ func InfoWithContext(ctx context.Context) ([]InfoStat, error) { func parseDmesgBoot(fileName string) (InfoStat, int, error) { c := InfoStat{} - lines, _ := common.ReadLines(fileName) + lines, err := common.ReadLines(fileName) + if err != nil { + return c, 0, fmt.Errorf("could not read %s: %w", fileName, err) + } + cpuNum := 1 // default cpu num is 1 for _, line := range lines { if matches := cpuEnd.FindStringSubmatch(line); matches != nil { @@ -135,7 +141,7 @@ func parseDmesgBoot(fileName string) (InfoStat, int, error) { c.Model = matches[4] t, err := strconv.ParseInt(matches[5], 10, 32) if err != nil { - return c, 0, fmt.Errorf("unable to parse FreeBSD CPU stepping information from %q: %v", line, err) + return c, 0, fmt.Errorf("unable to parse FreeBSD CPU stepping information from %q: %w", line, err) } c.Stepping = int32(t) } else if matches := featuresMatch.FindStringSubmatch(line); matches != nil { @@ -149,12 +155,12 @@ func parseDmesgBoot(fileName string) (InfoStat, int, error) { } else if matches := cpuCores.FindStringSubmatch(line); matches != nil { t, err := strconv.ParseInt(matches[1], 10, 32) if err != nil { - return c, 0, fmt.Errorf("unable to parse FreeBSD CPU Nums from %q: %v", line, err) + return c, 0, fmt.Errorf("unable to parse FreeBSD CPU Nums from %q: %w", line, err) } cpuNum = int(t) t2, err := strconv.ParseInt(matches[2], 10, 32) if err != nil { - return c, 0, fmt.Errorf("unable to parse FreeBSD CPU cores from %q: %v", line, err) + return c, 0, fmt.Errorf("unable to parse FreeBSD CPU cores from %q: %w", line, err) } c.Cores = int32(t2) } @@ -163,6 +169,6 @@ func parseDmesgBoot(fileName string) (InfoStat, int, error) { return c, cpuNum, nil } -func CountsWithContext(ctx context.Context, logical bool) (int, error) { +func CountsWithContext(_ context.Context, _ bool) (int, error) { return runtime.NumCPU(), nil } diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_freebsd_386.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_freebsd_386.go similarity index 71% rename from vendor/github.com/shirou/gopsutil/v3/cpu/cpu_freebsd_386.go rename to vendor/github.com/shirou/gopsutil/v4/cpu/cpu_freebsd_386.go index 8b7f4c32..e4799bcf 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_freebsd_386.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_freebsd_386.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause package cpu type cpuTimes struct { diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_freebsd_amd64.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_freebsd_amd64.go similarity index 71% rename from vendor/github.com/shirou/gopsutil/v3/cpu/cpu_freebsd_amd64.go rename to vendor/github.com/shirou/gopsutil/v4/cpu/cpu_freebsd_amd64.go index 57e14528..25ececa6 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_freebsd_amd64.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_freebsd_amd64.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause package cpu type cpuTimes struct { diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_freebsd_arm.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_freebsd_arm.go similarity index 71% rename from vendor/github.com/shirou/gopsutil/v3/cpu/cpu_freebsd_arm.go rename to vendor/github.com/shirou/gopsutil/v4/cpu/cpu_freebsd_arm.go index 8b7f4c32..e4799bcf 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_freebsd_arm.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_freebsd_arm.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause package cpu type cpuTimes struct { diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_freebsd_arm64.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_freebsd_arm64.go similarity index 71% rename from vendor/github.com/shirou/gopsutil/v3/cpu/cpu_freebsd_arm64.go rename to vendor/github.com/shirou/gopsutil/v4/cpu/cpu_freebsd_arm64.go index 57e14528..25ececa6 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_freebsd_arm64.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_freebsd_arm64.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause package cpu type cpuTimes struct { diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_linux.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_linux.go similarity index 93% rename from vendor/github.com/shirou/gopsutil/v3/cpu/cpu_linux.go rename to vendor/github.com/shirou/gopsutil/v4/cpu/cpu_linux.go index da467e2d..0897dfa3 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_linux.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_linux.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build linux -// +build linux package cpu @@ -13,7 +13,7 @@ import ( "github.com/tklauser/go-sysconf" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" ) var ClocksPerSec = float64(100) @@ -81,6 +81,13 @@ var armModelToModelName = map[uint64]string{ 0xd4c: "Cortex-X1C", 0xd4d: "Cortex-A715", 0xd4e: "Cortex-X3", + 0xd4f: "Neoverse-V2", + 0xd81: "Cortex-A720", + 0xd82: "Cortex-X4", + 0xd84: "Neoverse-V3", + 0xd85: "Cortex-X925", + 0xd87: "Cortex-A725", + 0xd8e: "Neoverse-N3", } func init() { @@ -98,6 +105,7 @@ func Times(percpu bool) ([]TimesStat, error) { func TimesWithContext(ctx context.Context, percpu bool) ([]TimesStat, error) { filename := common.HostProcWithContext(ctx, "stat") lines := []string{} + var err error if percpu { statlines, err := common.ReadLines(filename) if err != nil || len(statlines) < 2 { @@ -110,7 +118,10 @@ func TimesWithContext(ctx context.Context, percpu bool) ([]TimesStat, error) { lines = append(lines, line) } } else { - lines, _ = common.ReadLinesOffsetN(filename, 0, 1) + lines, err = common.ReadLinesOffsetN(filename, 0, 1) + if err != nil || len(lines) == 0 { + return []TimesStat{}, nil + } } ret := make([]TimesStat, 0, len(lines)) @@ -135,7 +146,7 @@ func finishCPUInfo(ctx context.Context, c *InfoStat) { var err error var value float64 - if len(c.CoreID) == 0 { + if c.CoreID == "" { lines, err = common.ReadLines(sysCPUPath(ctx, c.CPU, "topology/core_id")) if err == nil { c.CoreID = lines[0] @@ -157,7 +168,7 @@ func finishCPUInfo(ctx context.Context, c *InfoStat) { } c.Mhz = value / 1000.0 // value is in kHz if c.Mhz > 9999 { - c.Mhz = c.Mhz / 1000.0 // value in Hz + c.Mhz /= 1000.0 // value in Hz } } @@ -174,7 +185,10 @@ func Info() ([]InfoStat, error) { func InfoWithContext(ctx context.Context) ([]InfoStat, error) { filename := common.HostProcWithContext(ctx, "cpuinfo") - lines, _ := common.ReadLines(filename) + lines, err := common.ReadLines(filename) + if err != nil { + return nil, fmt.Errorf("could not read %s: %w", filename, err) + } var ret []InfoStat var processorName string @@ -271,6 +285,10 @@ func InfoWithContext(ctx context.Context) ([]InfoStat, error) { val = strings.Split(value, ".")[0] } + if strings.EqualFold(val, "unknown") { + continue + } + t, err := strconv.ParseInt(val, 10, 64) if err != nil { return ret, err @@ -395,7 +413,7 @@ func CountsWithContext(ctx context.Context, logical bool) (int, error) { for _, line := range lines { line = strings.ToLower(line) if strings.HasPrefix(line, "processor") { - _, err = strconv.Atoi(strings.TrimSpace(line[strings.IndexByte(line, ':')+1:])) + _, err = strconv.ParseInt(strings.TrimSpace(line[strings.IndexByte(line, ':')+1:]), 10, 32) if err == nil { ret++ } @@ -464,11 +482,11 @@ func CountsWithContext(ctx context.Context, logical bool) (int, error) { } fields[0] = strings.TrimSpace(fields[0]) if fields[0] == "physical id" || fields[0] == "cpu cores" { - val, err := strconv.Atoi(strings.TrimSpace(fields[1])) + val, err := strconv.ParseInt(strings.TrimSpace(fields[1]), 10, 32) if err != nil { continue } - currentInfo[fields[0]] = val + currentInfo[fields[0]] = int(val) } } ret := 0 diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_netbsd.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_netbsd.go similarity index 84% rename from vendor/github.com/shirou/gopsutil/v3/cpu/cpu_netbsd.go rename to vendor/github.com/shirou/gopsutil/v4/cpu/cpu_netbsd.go index 1f66be34..9e23edb6 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_netbsd.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_netbsd.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build netbsd -// +build netbsd package cpu @@ -9,9 +9,10 @@ import ( "runtime" "unsafe" - "github.com/shirou/gopsutil/v3/internal/common" "github.com/tklauser/go-sysconf" "golang.org/x/sys/unix" + + "github.com/shirou/gopsutil/v4/internal/common" ) const ( @@ -35,7 +36,8 @@ func Times(percpu bool) ([]TimesStat, error) { return TimesWithContext(context.Background(), percpu) } -func TimesWithContext(ctx context.Context, percpu bool) (ret []TimesStat, err error) { +func TimesWithContext(_ context.Context, percpu bool) ([]TimesStat, error) { + ret := make([]TimesStat, 0) if !percpu { mib := []int32{ctlKern, kernCpTime} buf, _, err := common.CallSyscall(mib) @@ -43,20 +45,20 @@ func TimesWithContext(ctx context.Context, percpu bool) (ret []TimesStat, err er return ret, err } times := (*cpuTimes)(unsafe.Pointer(&buf[0])) - stat := TimesStat{ + ret = append(ret, TimesStat{ CPU: "cpu-total", User: float64(times.User), Nice: float64(times.Nice), System: float64(times.Sys), Idle: float64(times.Idle), Irq: float64(times.Intr), - } - return []TimesStat{stat}, nil + }) + return ret, nil } ncpu, err := unix.SysctlUint32("hw.ncpu") if err != nil { - return + return ret, err } var i uint32 @@ -86,7 +88,7 @@ func Info() ([]InfoStat, error) { return InfoWithContext(context.Background()) } -func InfoWithContext(ctx context.Context) ([]InfoStat, error) { +func InfoWithContext(_ context.Context) ([]InfoStat, error) { var ret []InfoStat var err error @@ -114,6 +116,6 @@ func InfoWithContext(ctx context.Context) ([]InfoStat, error) { return append(ret, c), nil } -func CountsWithContext(ctx context.Context, logical bool) (int, error) { +func CountsWithContext(_ context.Context, _ bool) (int, error) { return runtime.NumCPU(), nil } diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_netbsd_amd64.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_netbsd_amd64.go similarity index 71% rename from vendor/github.com/shirou/gopsutil/v3/cpu/cpu_netbsd_amd64.go rename to vendor/github.com/shirou/gopsutil/v4/cpu/cpu_netbsd_amd64.go index 57e14528..25ececa6 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_netbsd_amd64.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_netbsd_amd64.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause package cpu type cpuTimes struct { diff --git a/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_netbsd_arm.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_netbsd_arm.go new file mode 100644 index 00000000..e4799bcf --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_netbsd_arm.go @@ -0,0 +1,10 @@ +// SPDX-License-Identifier: BSD-3-Clause +package cpu + +type cpuTimes struct { + User uint32 + Nice uint32 + Sys uint32 + Intr uint32 + Idle uint32 +} diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_netbsd_arm64.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_netbsd_arm64.go similarity index 71% rename from vendor/github.com/shirou/gopsutil/v3/cpu/cpu_netbsd_arm64.go rename to vendor/github.com/shirou/gopsutil/v4/cpu/cpu_netbsd_arm64.go index 57e14528..25ececa6 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_netbsd_arm64.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_netbsd_arm64.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause package cpu type cpuTimes struct { diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_openbsd.go similarity index 86% rename from vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd.go rename to vendor/github.com/shirou/gopsutil/v4/cpu/cpu_openbsd.go index fe332903..9b37d296 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_openbsd.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build openbsd -// +build openbsd package cpu @@ -9,9 +9,10 @@ import ( "runtime" "unsafe" - "github.com/shirou/gopsutil/v3/internal/common" "github.com/tklauser/go-sysconf" "golang.org/x/sys/unix" + + "github.com/shirou/gopsutil/v4/internal/common" ) const ( @@ -53,7 +54,8 @@ func Times(percpu bool) ([]TimesStat, error) { return TimesWithContext(context.Background(), percpu) } -func TimesWithContext(ctx context.Context, percpu bool) (ret []TimesStat, err error) { +func TimesWithContext(_ context.Context, percpu bool) ([]TimesStat, error) { + ret := make([]TimesStat, 0) if !percpu { mib := []int32{ctlKern, kernCpTime} buf, _, err := common.CallSyscall(mib) @@ -61,20 +63,20 @@ func TimesWithContext(ctx context.Context, percpu bool) (ret []TimesStat, err er return ret, err } times := (*cpuTimes)(unsafe.Pointer(&buf[0])) - stat := TimesStat{ + ret = append(ret, TimesStat{ CPU: "cpu-total", User: float64(times.User) / ClocksPerSec, Nice: float64(times.Nice) / ClocksPerSec, System: float64(times.Sys) / ClocksPerSec, Idle: float64(times.Idle) / ClocksPerSec, Irq: float64(times.Intr) / ClocksPerSec, - } - return []TimesStat{stat}, nil + }) + return ret, nil } ncpu, err := unix.SysctlUint32("hw.ncpu") if err != nil { - return + return ret, err } var i uint32 @@ -107,7 +109,7 @@ func Info() ([]InfoStat, error) { return InfoWithContext(context.Background()) } -func InfoWithContext(ctx context.Context) ([]InfoStat, error) { +func InfoWithContext(_ context.Context) ([]InfoStat, error) { var ret []InfoStat var err error @@ -132,6 +134,6 @@ func InfoWithContext(ctx context.Context) ([]InfoStat, error) { return append(ret, c), nil } -func CountsWithContext(ctx context.Context, logical bool) (int, error) { +func CountsWithContext(_ context.Context, _ bool) (int, error) { return runtime.NumCPU(), nil } diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd_386.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_openbsd_386.go similarity index 73% rename from vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd_386.go rename to vendor/github.com/shirou/gopsutil/v4/cpu/cpu_openbsd_386.go index 5e878399..40a6f43e 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd_386.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_openbsd_386.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause package cpu type cpuTimes struct { diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd_amd64.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_openbsd_amd64.go similarity index 73% rename from vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd_amd64.go rename to vendor/github.com/shirou/gopsutil/v4/cpu/cpu_openbsd_amd64.go index d659058c..464156d5 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd_amd64.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_openbsd_amd64.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause package cpu type cpuTimes struct { diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd_arm.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_openbsd_arm.go similarity index 73% rename from vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd_arm.go rename to vendor/github.com/shirou/gopsutil/v4/cpu/cpu_openbsd_arm.go index 5e878399..40a6f43e 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd_arm.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_openbsd_arm.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause package cpu type cpuTimes struct { diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd_arm64.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_openbsd_arm64.go similarity index 73% rename from vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd_arm64.go rename to vendor/github.com/shirou/gopsutil/v4/cpu/cpu_openbsd_arm64.go index d659058c..464156d5 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd_arm64.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_openbsd_arm64.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause package cpu type cpuTimes struct { diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd_riscv64.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_openbsd_riscv64.go similarity index 73% rename from vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd_riscv64.go rename to vendor/github.com/shirou/gopsutil/v4/cpu/cpu_openbsd_riscv64.go index d659058c..464156d5 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd_riscv64.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_openbsd_riscv64.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause package cpu type cpuTimes struct { diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_plan9.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_plan9.go similarity index 73% rename from vendor/github.com/shirou/gopsutil/v3/cpu/cpu_plan9.go rename to vendor/github.com/shirou/gopsutil/v4/cpu/cpu_plan9.go index a2e99d8c..02ad3f74 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_plan9.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_plan9.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build plan9 -// +build plan9 package cpu @@ -9,14 +9,15 @@ import ( "runtime" stats "github.com/lufia/plan9stats" - "github.com/shirou/gopsutil/v3/internal/common" + + "github.com/shirou/gopsutil/v4/internal/common" ) func Times(percpu bool) ([]TimesStat, error) { return TimesWithContext(context.Background(), percpu) } -func TimesWithContext(ctx context.Context, percpu bool) ([]TimesStat, error) { +func TimesWithContext(ctx context.Context, _ bool) ([]TimesStat, error) { // BUG: percpu flag is not supported yet. root := os.Getenv("HOST_ROOT") c, err := stats.ReadCPUType(ctx, stats.WithRootDir(root)) @@ -41,10 +42,10 @@ func Info() ([]InfoStat, error) { return InfoWithContext(context.Background()) } -func InfoWithContext(ctx context.Context) ([]InfoStat, error) { +func InfoWithContext(_ context.Context) ([]InfoStat, error) { return []InfoStat{}, common.ErrNotImplementedError } -func CountsWithContext(ctx context.Context, logical bool) (int, error) { +func CountsWithContext(_ context.Context, _ bool) (int, error) { return runtime.NumCPU(), nil } diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_solaris.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_solaris.go similarity index 84% rename from vendor/github.com/shirou/gopsutil/v3/cpu/cpu_solaris.go rename to vendor/github.com/shirou/gopsutil/v4/cpu/cpu_solaris.go index 4231ad16..9494e3c3 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_solaris.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_solaris.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause package cpu import ( @@ -41,7 +42,7 @@ var kstatSplit = regexp.MustCompile(`[:\s]+`) func TimesWithContext(ctx context.Context, percpu bool) ([]TimesStat, error) { kstatSysOut, err := invoke.CommandWithContext(ctx, "kstat", "-p", "cpu_stat:*:*:/^idle$|^user$|^kernel$|^iowait$|^swap$/") if err != nil { - return nil, fmt.Errorf("cannot execute kstat: %s", err) + return nil, fmt.Errorf("cannot execute kstat: %w", err) } cpu := make(map[float64]float64) idle := make(map[float64]float64) @@ -56,31 +57,31 @@ func TimesWithContext(ctx context.Context, percpu bool) ([]TimesStat, error) { } cpuNumber, err := strconv.ParseFloat(fields[1], 64) if err != nil { - return nil, fmt.Errorf("cannot parse cpu number: %s", err) + return nil, fmt.Errorf("cannot parse cpu number: %w", err) } cpu[cpuNumber] = cpuNumber switch fields[3] { case "idle": idle[cpuNumber], err = strconv.ParseFloat(fields[4], 64) if err != nil { - return nil, fmt.Errorf("cannot parse idle: %s", err) + return nil, fmt.Errorf("cannot parse idle: %w", err) } case "user": user[cpuNumber], err = strconv.ParseFloat(fields[4], 64) if err != nil { - return nil, fmt.Errorf("cannot parse user: %s", err) + return nil, fmt.Errorf("cannot parse user: %w", err) } case "kernel": kern[cpuNumber], err = strconv.ParseFloat(fields[4], 64) if err != nil { - return nil, fmt.Errorf("cannot parse kernel: %s", err) + return nil, fmt.Errorf("cannot parse kernel: %w", err) } case "iowait": iowt[cpuNumber], err = strconv.ParseFloat(fields[4], 64) if err != nil { - return nil, fmt.Errorf("cannot parse iowait: %s", err) + return nil, fmt.Errorf("cannot parse iowait: %w", err) } - //not sure how this translates, don't report, add to kernel, something else? + // not sure how this translates, don't report, add to kernel, something else? /*case "swap": swap[cpuNumber], err = strconv.ParseFloat(fields[4], 64) if err != nil { @@ -120,27 +121,27 @@ func Info() ([]InfoStat, error) { func InfoWithContext(ctx context.Context) ([]InfoStat, error) { psrInfoOut, err := invoke.CommandWithContext(ctx, "psrinfo", "-p", "-v") if err != nil { - return nil, fmt.Errorf("cannot execute psrinfo: %s", err) + return nil, fmt.Errorf("cannot execute psrinfo: %w", err) } procs, err := parseProcessorInfo(string(psrInfoOut)) if err != nil { - return nil, fmt.Errorf("error parsing psrinfo output: %s", err) + return nil, fmt.Errorf("error parsing psrinfo output: %w", err) } isaInfoOut, err := invoke.CommandWithContext(ctx, "isainfo", "-b", "-v") if err != nil { - return nil, fmt.Errorf("cannot execute isainfo: %s", err) + return nil, fmt.Errorf("cannot execute isainfo: %w", err) } flags, err := parseISAInfo(string(isaInfoOut)) if err != nil { - return nil, fmt.Errorf("error parsing isainfo output: %s", err) + return nil, fmt.Errorf("error parsing isainfo output: %w", err) } result := make([]InfoStat, 0, len(flags)) - for _, proc := range procs { - procWithFlags := proc + for i := range procs { + procWithFlags := procs[i] procWithFlags.Flags = flags result = append(result, procWithFlags) } @@ -148,7 +149,7 @@ func InfoWithContext(ctx context.Context) ([]InfoStat, error) { return result, nil } -var flagsMatch = regexp.MustCompile(`[\w\.]+`) +var flagsMatch = regexp.MustCompile(`[\w.]+`) func parseISAInfo(cmdOutput string) ([]string, error) { words := flagsMatch.FindAllString(cmdOutput, -1) @@ -158,10 +159,7 @@ func parseISAInfo(cmdOutput string) ([]string, error) { return nil, errors.New("attempted to parse invalid isainfo output") } - flags := make([]string, len(words)-4) - for i, val := range words[4:] { - flags[i] = val - } + flags := words[4:] sort.Strings(flags) return flags, nil @@ -193,7 +191,7 @@ func parseProcessorInfo(cmdOutput string) ([]InfoStat, error) { if physicalCPU[psrStepOffset] != "" { stepParsed, err := strconv.ParseInt(physicalCPU[psrStepOffset], 10, 32) if err != nil { - return nil, fmt.Errorf("cannot parse value %q for step as 32-bit integer: %s", physicalCPU[9], err) + return nil, fmt.Errorf("cannot parse value %q for step as 32-bit integer: %w", physicalCPU[9], err) } step = int32(stepParsed) } @@ -201,7 +199,7 @@ func parseProcessorInfo(cmdOutput string) ([]InfoStat, error) { if physicalCPU[psrClockOffset] != "" { clockParsed, err := strconv.ParseInt(physicalCPU[psrClockOffset], 10, 64) if err != nil { - return nil, fmt.Errorf("cannot parse value %q for clock as 32-bit integer: %s", physicalCPU[10], err) + return nil, fmt.Errorf("cannot parse value %q for clock as 32-bit integer: %w", physicalCPU[10], err) } clock = float64(clockParsed) } @@ -213,7 +211,7 @@ func parseProcessorInfo(cmdOutput string) ([]InfoStat, error) { case physicalCPU[psrNumCoresOffset] != "": numCores, err = strconv.ParseInt(physicalCPU[psrNumCoresOffset], 10, 32) if err != nil { - return nil, fmt.Errorf("cannot parse value %q for core count as 32-bit integer: %s", physicalCPU[1], err) + return nil, fmt.Errorf("cannot parse value %q for core count as 32-bit integer: %w", physicalCPU[1], err) } for i := 0; i < int(numCores); i++ { @@ -234,12 +232,12 @@ func parseProcessorInfo(cmdOutput string) ([]InfoStat, error) { case physicalCPU[psrNumCoresHTOffset] != "": numCores, err = strconv.ParseInt(physicalCPU[psrNumCoresHTOffset], 10, 32) if err != nil { - return nil, fmt.Errorf("cannot parse value %q for core count as 32-bit integer: %s", physicalCPU[3], err) + return nil, fmt.Errorf("cannot parse value %q for core count as 32-bit integer: %w", physicalCPU[3], err) } numHT, err = strconv.ParseInt(physicalCPU[psrNumHTOffset], 10, 32) if err != nil { - return nil, fmt.Errorf("cannot parse value %q for hyperthread count as 32-bit integer: %s", physicalCPU[4], err) + return nil, fmt.Errorf("cannot parse value %q for hyperthread count as 32-bit integer: %w", physicalCPU[4], err) } for i := 0; i < int(numCores); i++ { @@ -264,6 +262,6 @@ func parseProcessorInfo(cmdOutput string) ([]InfoStat, error) { return result, nil } -func CountsWithContext(ctx context.Context, logical bool) (int, error) { +func CountsWithContext(_ context.Context, _ bool) (int, error) { return runtime.NumCPU(), nil } diff --git a/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_windows.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_windows.go new file mode 100644 index 00000000..a6000a3c --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_windows.go @@ -0,0 +1,477 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build windows + +package cpu + +import ( + "context" + "errors" + "fmt" + "math/bits" + "path/filepath" + "strconv" + "strings" + "syscall" + "unsafe" + + "golang.org/x/sys/windows" + "golang.org/x/sys/windows/registry" + + "github.com/shirou/gopsutil/v4/internal/common" +) + +var ( + procGetNativeSystemInfo = common.Modkernel32.NewProc("GetNativeSystemInfo") + procGetLogicalProcessorInformationEx = common.Modkernel32.NewProc("GetLogicalProcessorInformationEx") + procGetSystemFirmwareTable = common.Modkernel32.NewProc("GetSystemFirmwareTable") + procCallNtPowerInformation = common.ModPowrProf.NewProc("CallNtPowerInformation") +) + +type win32_Processor struct { //nolint:revive //FIXME + Family uint16 + Manufacturer string + Name string + NumberOfLogicalProcessors uint32 + NumberOfCores uint32 + ProcessorID *string + Stepping *string + MaxClockSpeed uint32 +} + +// SYSTEM_PROCESSOR_PERFORMANCE_INFORMATION +// defined in windows api doc with the following +// https://docs.microsoft.com/en-us/windows/desktop/api/winternl/nf-winternl-ntquerysysteminformation#system_processor_performance_information +// additional fields documented here +// https://www.geoffchappell.com/studies/windows/km/ntoskrnl/api/ex/sysinfo/processor_performance.htm +type win32_SystemProcessorPerformanceInformation struct { //nolint:revive //FIXME + IdleTime int64 // idle time in 100ns (this is not a filetime). + KernelTime int64 // kernel time in 100ns. kernel time includes idle time. (this is not a filetime). + UserTime int64 // usertime in 100ns (this is not a filetime). + DpcTime int64 // dpc time in 100ns (this is not a filetime). + InterruptTime int64 // interrupt time in 100ns + InterruptCount uint64 // ULONG needs to be uint64 +} + +// https://learn.microsoft.com/en-us/windows/win32/power/processor-power-information-str +type processorPowerInformation struct { + number uint32 // http://download.microsoft.com/download/a/d/f/adf1347d-08dc-41a4-9084-623b1194d4b2/MoreThan64proc.docx + maxMhz uint32 + currentMhz uint32 + mhzLimit uint32 + maxIdleState uint32 + currentIdleState uint32 +} + +const ( + ClocksPerSec = 10000000.0 + + // systemProcessorPerformanceInformationClass information class to query with NTQuerySystemInformation + // https://processhacker.sourceforge.io/doc/ntexapi_8h.html#ad5d815b48e8f4da1ef2eb7a2f18a54e0 + win32_SystemProcessorPerformanceInformationClass = 8 //nolint:revive //FIXME + + // size of systemProcessorPerformanceInfoSize in memory + win32_SystemProcessorPerformanceInfoSize = uint32(unsafe.Sizeof(win32_SystemProcessorPerformanceInformation{})) //nolint:revive //FIXME + + firmwareTableProviderSignatureRSMB = 0x52534d42 // "RSMB" https://gitlab.winehq.org/dreamer/wine/-/blame/wine-7.0-rc6/dlls/ntdll/unix/system.c#L230 + smBiosHeaderSize = 8 // SMBIOS header size + smbiosEndOfTable = 127 // Minimum length for processor structure + smbiosTypeProcessor = 4 // SMBIOS Type 4: Processor Information + smbiosProcessorMinLength = 0x18 // Minimum length for processor structure + + centralProcessorRegistryKey = `HARDWARE\DESCRIPTION\System\CentralProcessor` +) + +type relationship uint32 + +// https://learn.microsoft.com/en-us/windows/win32/api/sysinfoapi/nf-sysinfoapi-getlogicalprocessorinformationex +const ( + relationProcessorCore = relationship(0) + relationProcessorPackage = relationship(3) +) + +const ( + kAffinitySize = unsafe.Sizeof(int(0)) + // https://learn.microsoft.com/en-us/windows-hardware/drivers/kernel/interrupt-affinity-and-priority + maxLogicalProcessorsPerGroup = uint32(unsafe.Sizeof(kAffinitySize * 8)) + // https://learn.microsoft.com/en-us/windows-hardware/drivers/ddi/wdm/ne-wdm-power_information_level + processorInformation = 11 +) + +// Times returns times stat per cpu and combined for all CPUs +func Times(percpu bool) ([]TimesStat, error) { + return TimesWithContext(context.Background(), percpu) +} + +func TimesWithContext(_ context.Context, percpu bool) ([]TimesStat, error) { + if percpu { + return perCPUTimes() + } + + var ret []TimesStat + var lpIdleTime common.FILETIME + var lpKernelTime common.FILETIME + var lpUserTime common.FILETIME + // GetSystemTimes returns 0 for error, in which case we check err, + // see https://pkg.go.dev/golang.org/x/sys/windows#LazyProc.Call + r, _, err := common.ProcGetSystemTimes.Call( + uintptr(unsafe.Pointer(&lpIdleTime)), + uintptr(unsafe.Pointer(&lpKernelTime)), + uintptr(unsafe.Pointer(&lpUserTime))) + if r == 0 { + return nil, err + } + + LOT := float64(0.0000001) + HIT := (LOT * 4294967296.0) + idle := ((HIT * float64(lpIdleTime.DwHighDateTime)) + (LOT * float64(lpIdleTime.DwLowDateTime))) + user := ((HIT * float64(lpUserTime.DwHighDateTime)) + (LOT * float64(lpUserTime.DwLowDateTime))) + kernel := ((HIT * float64(lpKernelTime.DwHighDateTime)) + (LOT * float64(lpKernelTime.DwLowDateTime))) + system := (kernel - idle) + + ret = append(ret, TimesStat{ + CPU: "cpu-total", + Idle: float64(idle), + User: float64(user), + System: float64(system), + }) + return ret, nil +} + +func Info() ([]InfoStat, error) { + return InfoWithContext(context.Background()) +} + +// this function iterates over each set bit in the package affinity mask, each bit represent a logical processor in a group (assuming you are iteriang over a package mask) +// the function is used also to compute the global logical processor number +// https://learn.microsoft.com/en-us/windows/win32/procthread/processor-groups +// see https://learn.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-group_affinity +// and https://learn.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-processor_relationship +// and https://learn.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-system_logical_processor_information_ex +func forEachSetBit64(mask uint64, fn func(bit int)) { + m := mask + for m != 0 { + b := bits.TrailingZeros64(m) + fn(b) + m &= m - 1 + } +} + +func getProcessorPowerInformation(ctx context.Context) ([]processorPowerInformation, error) { + numLP, countErr := CountsWithContext(ctx, true) + if countErr != nil { + return nil, fmt.Errorf("failed to get logical processor count: %w", countErr) + } + if numLP <= 0 { + return nil, fmt.Errorf("invalid logical processor count: %d", numLP) + } + + ppiSize := uintptr(numLP) * unsafe.Sizeof(processorPowerInformation{}) + buf := make([]byte, ppiSize) + ppi, _, err := procCallNtPowerInformation.Call( + uintptr(processorInformation), + 0, + 0, + uintptr(unsafe.Pointer(&buf[0])), + uintptr(ppiSize), + ) + if ppi != 0 { + return nil, fmt.Errorf("CallNtPowerInformation failed with code %d: %w", ppi, err) + } + ppis := unsafe.Slice((*processorPowerInformation)(unsafe.Pointer(&buf[0])), numLP) + return ppis, nil +} + +func InfoWithContext(ctx context.Context) ([]InfoStat, error) { + var ret []InfoStat + processorPackages, err := getSystemLogicalProcessorInformationEx(relationProcessorPackage) + if err != nil { + return ret, fmt.Errorf("failed to get processor package information: %w", err) + } + + ppis, powerInformationErr := getProcessorPowerInformation(ctx) + if powerInformationErr != nil { + return ret, fmt.Errorf("failed to get processor power information: %w", powerInformationErr) + } + + family, processorId, smBIOSErr := getSMBIOSProcessorInfo() + if smBIOSErr != nil { + return ret, smBIOSErr + } + + for i, pkg := range processorPackages { + logicalCount := 0 + maxMhz := 0 + model := "" + vendorId := "" + // iterate over each set bit in the package affinity mask + for _, ga := range pkg.processor.groupMask { + g := int(ga.group) + forEachSetBit64(uint64(ga.mask), func(bit int) { + // the global logical processor label + globalLpl := g*int(maxLogicalProcessorsPerGroup) + bit + if globalLpl >= 0 && globalLpl < len(ppis) { + logicalCount++ + m := int(ppis[globalLpl].maxMhz) + if m > maxMhz { + maxMhz = m + } + } + + registryKeyPath := filepath.Join(centralProcessorRegistryKey, strconv.Itoa(globalLpl)) + key, err := registry.OpenKey(registry.LOCAL_MACHINE, registryKeyPath, registry.QUERY_VALUE|registry.READ) + if err == nil { + model = getRegistryStringValueIfUnset(key, "ProcessorNameString", model) + vendorId = getRegistryStringValueIfUnset(key, "VendorIdentifier", vendorId) + _ = key.Close() + } + }) + } + ret = append(ret, InfoStat{ + CPU: int32(i), + Family: strconv.FormatUint(uint64(family), 10), + VendorID: vendorId, + ModelName: model, + Cores: int32(logicalCount), + PhysicalID: processorId, + Mhz: float64(maxMhz), + Flags: []string{}, + }) + } + + return ret, nil +} + +// perCPUTimes returns times stat per cpu, per core and overall for all CPUs +func perCPUTimes() ([]TimesStat, error) { + var ret []TimesStat + stats, err := perfInfo() + if err != nil { + return nil, err + } + for core, v := range stats { + c := TimesStat{ + CPU: fmt.Sprintf("cpu%d", core), + User: float64(v.UserTime) / ClocksPerSec, + System: float64(v.KernelTime-v.IdleTime) / ClocksPerSec, + Idle: float64(v.IdleTime) / ClocksPerSec, + Irq: float64(v.InterruptTime) / ClocksPerSec, + } + ret = append(ret, c) + } + return ret, nil +} + +// makes call to Windows API function to retrieve performance information for each core +func perfInfo() ([]win32_SystemProcessorPerformanceInformation, error) { + // Make maxResults large for safety. + // We can't invoke the api call with a results array that's too small. + // If we have more than 2056 cores on a single host, then it's probably the future. + maxBuffer := 2056 + // buffer for results from the windows proc + resultBuffer := make([]win32_SystemProcessorPerformanceInformation, maxBuffer) + // size of the buffer in memory + bufferSize := uintptr(win32_SystemProcessorPerformanceInfoSize) * uintptr(maxBuffer) + // size of the returned response + var retSize uint32 + + // Invoke windows api proc. + // The returned err from the windows dll proc will always be non-nil even when successful. + // See https://godoc.org/golang.org/x/sys/windows#LazyProc.Call for more information + retCode, _, err := common.ProcNtQuerySystemInformation.Call( + win32_SystemProcessorPerformanceInformationClass, // System Information Class -> SystemProcessorPerformanceInformation + uintptr(unsafe.Pointer(&resultBuffer[0])), // pointer to first element in result buffer + bufferSize, // size of the buffer in memory + uintptr(unsafe.Pointer(&retSize)), // pointer to the size of the returned results the windows proc will set this + ) + + // check return code for errors + if retCode != 0 { + return nil, fmt.Errorf("call to NtQuerySystemInformation returned %d. err: %s", retCode, err.Error()) + } + + // calculate the number of returned elements based on the returned size + numReturnedElements := retSize / win32_SystemProcessorPerformanceInfoSize + + // trim results to the number of returned elements + resultBuffer = resultBuffer[:numReturnedElements] + + return resultBuffer, nil +} + +// SystemInfo is an equivalent representation of SYSTEM_INFO in the Windows API. +// https://msdn.microsoft.com/en-us/library/ms724958%28VS.85%29.aspx?f=255&MSPPError=-2147217396 +// https://github.com/elastic/go-windows/blob/bb1581babc04d5cb29a2bfa7a9ac6781c730c8dd/kernel32.go#L43 +type systemInfo struct { + wProcessorArchitecture uint16 + wReserved uint16 + dwPageSize uint32 + lpMinimumApplicationAddress uintptr + lpMaximumApplicationAddress uintptr + dwActiveProcessorMask uintptr + dwNumberOfProcessors uint32 + dwProcessorType uint32 + dwAllocationGranularity uint32 + wProcessorLevel uint16 + wProcessorRevision uint16 +} + +type groupAffinity struct { + mask uintptr // https://learn.microsoft.com/en-us/windows-hardware/drivers/kernel/interrupt-affinity-and-priority#about-kaffinity + group uint16 + reserved [3]uint16 +} + +// https://learn.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-processor_relationship +type processorRelationship struct { + flags byte + efficientClass byte + reserved [20]byte + groupCount uint16 + groupMask [1]groupAffinity +} + +// https://learn.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-system_logical_processor_information_ex +type systemLogicalProcessorInformationEx struct { + relationship uint32 + size uint32 + processor processorRelationship +} + +// getSMBIOSProcessorInfo reads the SMBIOS Type 4 (Processor Information) structure and returns the Processor Family and ProcessorId fields. +// If not found, returns 0 and an empty string. +func getSMBIOSProcessorInfo() (family uint8, processorId string, err error) { + // https://learn.microsoft.com/en-us/windows/win32/api/sysinfoapi/nf-sysinfoapi-getsystemfirmwaretable + size, _, err := procGetSystemFirmwareTable.Call( + uintptr(firmwareTableProviderSignatureRSMB), + 0, + 0, + 0, + ) + if size == 0 { + return 0, "", fmt.Errorf("failed to get SMBIOS table size: %w", err) + } + buf := make([]byte, size) + ret, _, err := procGetSystemFirmwareTable.Call( + uintptr(firmwareTableProviderSignatureRSMB), + 0, + uintptr(unsafe.Pointer(&buf[0])), + uintptr(size), + ) + if ret == 0 { + return 0, "", fmt.Errorf("failed to read SMBIOS table: %w", err) + } + // https://wiki.osdev.org/System_Management_BIOS + i := smBiosHeaderSize // skip SMBIOS header (first 8 bytes) + maxIterations := len(buf) * 2 + iterations := 0 + for i < len(buf) && iterations < maxIterations { + iterations++ + if i+4 > len(buf) { + break + } + typ := buf[i] + length := buf[i+1] + if typ == smbiosEndOfTable { + break + } + if typ == smbiosTypeProcessor && length >= smbiosProcessorMinLength && i+int(length) <= len(buf) { + // Ensure we have enough bytes for procIdBytes + if i+16 > len(buf) { + break + } + // Get the processor family from byte at offset 6 + family = buf[i+6] + // Extract processor ID bytes (8 bytes total) from offsets 8-15 + procIdBytes := buf[i+8 : i+16] + // Convert first 4 bytes to 32-bit EAX register value (little endian) + eax := uint32(procIdBytes[0]) | uint32(procIdBytes[1])<<8 | uint32(procIdBytes[2])<<16 | uint32(procIdBytes[3])<<24 + // Convert last 4 bytes to 32-bit EDX register value (little endian) + edx := uint32(procIdBytes[4]) | uint32(procIdBytes[5])<<8 | uint32(procIdBytes[6])<<16 | uint32(procIdBytes[7])<<24 + // Format processor ID as 16 character hex string (EDX+EAX) + procId := fmt.Sprintf("%08X%08X", edx, eax) + return family, procId, nil + } + // skip to next structure + j := i + int(length) + innerIterations := 0 + maxInner := len(buf) // failsafe for inner loop + for j+1 < len(buf) && innerIterations < maxInner { + innerIterations++ + if buf[j] == 0 && buf[j+1] == 0 { + j += 2 + break + } + j++ + } + if innerIterations >= maxInner { + break // malformed buffer, avoid infinite loop + } + i = j + } + return 0, "", fmt.Errorf("SMBIOS processor information not found: %w", syscall.ERROR_NOT_FOUND) +} + +func getSystemLogicalProcessorInformationEx(relationship relationship) ([]systemLogicalProcessorInformationEx, error) { + var length uint32 + // First call to determine the required buffer size + _, _, err := procGetLogicalProcessorInformationEx.Call(uintptr(relationship), 0, uintptr(unsafe.Pointer(&length))) + if err != nil && !errors.Is(err, windows.ERROR_INSUFFICIENT_BUFFER) { + return nil, fmt.Errorf("failed to get buffer size: %w", err) + } + + // Allocate the buffer + buffer := make([]byte, length) + + // Second call to retrieve the processor information + _, _, err = procGetLogicalProcessorInformationEx.Call(uintptr(relationship), uintptr(unsafe.Pointer(&buffer[0])), uintptr(unsafe.Pointer(&length))) + if err != nil && !errors.Is(err, windows.NTE_OP_OK) { + return nil, fmt.Errorf("failed to get logical processor information: %w", err) + } + + // Convert the byte slice into a slice of systemLogicalProcessorInformationEx structs + offset := uintptr(0) + var infos []systemLogicalProcessorInformationEx + for offset < uintptr(length) { + info := (*systemLogicalProcessorInformationEx)(unsafe.Pointer(uintptr(unsafe.Pointer(&buffer[0])) + offset)) + infos = append(infos, *info) + offset += uintptr(info.size) + } + + return infos, nil +} + +func getPhysicalCoreCount() (int, error) { + infos, err := getSystemLogicalProcessorInformationEx(relationProcessorCore) + return len(infos), err +} + +func getRegistryStringValueIfUnset(key registry.Key, keyName, value string) string { + if value != "" { + return value + } + val, _, err := key.GetStringValue(keyName) + if err == nil { + return strings.TrimSpace(val) + } + return "" +} + +func CountsWithContext(_ context.Context, logical bool) (int, error) { + if logical { + // Get logical processor count https://github.com/giampaolo/psutil/blob/d01a9eaa35a8aadf6c519839e987a49d8be2d891/psutil/_psutil_windows.c#L97 + ret := windows.GetActiveProcessorCount(windows.ALL_PROCESSOR_GROUPS) + if ret != 0 { + return int(ret), nil + } + + var sInfo systemInfo + _, _, err := procGetNativeSystemInfo.Call(uintptr(unsafe.Pointer(&sInfo))) + if sInfo.dwNumberOfProcessors == 0 { + return 0, err + } + return int(sInfo.dwNumberOfProcessors), nil + } + + // Get physical core count https://github.com/giampaolo/psutil/blob/d01a9eaa35a8aadf6c519839e987a49d8be2d891/psutil/_psutil_windows.c#L499 + return getPhysicalCoreCount() +} diff --git a/vendor/github.com/shirou/gopsutil/v3/host/host.go b/vendor/github.com/shirou/gopsutil/v4/host/host.go similarity index 86% rename from vendor/github.com/shirou/gopsutil/v3/host/host.go rename to vendor/github.com/shirou/gopsutil/v4/host/host.go index ee948636..f85e5d7e 100644 --- a/vendor/github.com/shirou/gopsutil/v3/host/host.go +++ b/vendor/github.com/shirou/gopsutil/v4/host/host.go @@ -1,14 +1,16 @@ +// SPDX-License-Identifier: BSD-3-Clause package host import ( "context" "encoding/json" "errors" + "fmt" "os" "runtime" "time" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" ) type Warnings = common.Warnings @@ -40,13 +42,6 @@ type UserStat struct { Started int `json:"started"` } -type TemperatureStat struct { - SensorKey string `json:"sensorKey"` - Temperature float64 `json:"temperature"` - High float64 `json:"sensorHigh"` - Critical float64 `json:"sensorCritical"` -} - func (h InfoStat) String() string { s, _ := json.Marshal(h) return string(s) @@ -57,11 +52,6 @@ func (u UserStat) String() string { return string(s) } -func (t TemperatureStat) String() string { - s, _ := json.Marshal(t) - return string(s) -} - var enableBootTimeCache bool // EnableBootTimeCache change cache behavior of BootTime. If true, cache BootTime value. Default is false. @@ -81,47 +71,47 @@ func InfoWithContext(ctx context.Context) (*InfoStat, error) { ret.Hostname, err = os.Hostname() if err != nil && !errors.Is(err, common.ErrNotImplementedError) { - return nil, err + return nil, fmt.Errorf("getting hostname: %w", err) } ret.Platform, ret.PlatformFamily, ret.PlatformVersion, err = PlatformInformationWithContext(ctx) if err != nil && !errors.Is(err, common.ErrNotImplementedError) { - return nil, err + return nil, fmt.Errorf("getting platform information: %w", err) } ret.KernelVersion, err = KernelVersionWithContext(ctx) if err != nil && !errors.Is(err, common.ErrNotImplementedError) { - return nil, err + return nil, fmt.Errorf("getting kernel version: %w", err) } ret.KernelArch, err = KernelArch() if err != nil && !errors.Is(err, common.ErrNotImplementedError) { - return nil, err + return nil, fmt.Errorf("getting kernel architecture: %w", err) } ret.VirtualizationSystem, ret.VirtualizationRole, err = VirtualizationWithContext(ctx) if err != nil && !errors.Is(err, common.ErrNotImplementedError) { - return nil, err + return nil, fmt.Errorf("getting virtualization information: %w", err) } ret.BootTime, err = BootTimeWithContext(ctx) if err != nil && !errors.Is(err, common.ErrNotImplementedError) { - return nil, err + return nil, fmt.Errorf("getting boot time: %w", err) } ret.Uptime, err = UptimeWithContext(ctx) if err != nil && !errors.Is(err, common.ErrNotImplementedError) { - return nil, err + return nil, fmt.Errorf("getting uptime: %w", err) } ret.Procs, err = numProcs(ctx) if err != nil && !errors.Is(err, common.ErrNotImplementedError) { - return nil, err + return nil, fmt.Errorf("getting number of procs: %w", err) } ret.HostID, err = HostIDWithContext(ctx) if err != nil && !errors.Is(err, common.ErrNotImplementedError) { - return nil, err + return nil, fmt.Errorf("getting host ID: %w", err) } return ret, nil @@ -157,10 +147,6 @@ func KernelVersion() (string, error) { return KernelVersionWithContext(context.Background()) } -func SensorsTemperatures() ([]TemperatureStat, error) { - return SensorsTemperaturesWithContext(context.Background()) -} - func timeSince(ts uint64) uint64 { return uint64(time.Now().Unix()) - ts } diff --git a/vendor/github.com/shirou/gopsutil/v3/host/host_aix.go b/vendor/github.com/shirou/gopsutil/v4/host/host_aix.go similarity index 59% rename from vendor/github.com/shirou/gopsutil/v3/host/host_aix.go rename to vendor/github.com/shirou/gopsutil/v4/host/host_aix.go index d06899ea..2e8fb553 100644 --- a/vendor/github.com/shirou/gopsutil/v3/host/host_aix.go +++ b/vendor/github.com/shirou/gopsutil/v4/host/host_aix.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build aix -// +build aix package host @@ -9,14 +9,12 @@ import ( "strconv" "strings" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" ) // from https://www.ibm.com/docs/en/aix/7.2?topic=files-utmph-file const ( - user_PROCESS = 7 - - hostTemperatureScale = 1000.0 // Not part of the linked file, but kept just in case it becomes relevant + user_PROCESS = 7 //nolint:revive //FIXME ) func HostIDWithContext(ctx context.Context) (string, error) { @@ -26,10 +24,10 @@ func HostIDWithContext(ctx context.Context) (string, error) { } // The command always returns an extra newline, so we make use of Split() to get only the first line - return strings.Split(string(out[:]), "\n")[0], nil + return strings.Split(string(out), "\n")[0], nil } -func numProcs(ctx context.Context) (uint64, error) { +func numProcs(_ context.Context) (uint64, error) { return 0, common.ErrNotImplementedError } @@ -40,76 +38,86 @@ func BootTimeWithContext(ctx context.Context) (btime uint64, err error) { } if ut <= 0 { - return 0, errors.New("Uptime was not set, so cannot calculate boot time from it.") + return 0, errors.New("uptime was not set, so cannot calculate boot time from it") } - ut = ut * 60 + ut *= 60 return timeSince(ut), nil } -// This function takes multiple formats of output frmo the uptime -// command and converts the data into minutes. +// Parses result from uptime into minutes // Some examples of uptime output that this command handles: // 11:54AM up 13 mins, 1 user, load average: 2.78, 2.62, 1.79 // 12:41PM up 1 hr, 1 user, load average: 2.47, 2.85, 2.83 // 07:43PM up 5 hrs, 1 user, load average: 3.27, 2.91, 2.72 // 11:18:23 up 83 days, 18:29, 4 users, load average: 0.16, 0.03, 0.01 +// 08:47PM up 2 days, 20 hrs, 1 user, load average: 2.47, 2.17, 2.17 +// 01:16AM up 4 days, 29 mins, 1 user, load average: 2.29, 2.31, 2.21 func UptimeWithContext(ctx context.Context) (uint64, error) { out, err := invoke.CommandWithContext(ctx, "uptime") if err != nil { return 0, err } - // Convert our uptime to a series of fields we can extract - ut := strings.Fields(string(out[:])) + return parseUptime(string(out)), nil +} + +func parseUptime(uptime string) uint64 { + ut := strings.Fields(uptime) + var days, hours, mins uint64 + var err error - // Convert the second field value to integer - var days uint64 = 0 - var hours uint64 = 0 - var minutes uint64 = 0 - if ut[3] == "days," { + switch ut[3] { + case "day,", "days,": days, err = strconv.ParseUint(ut[2], 10, 64) if err != nil { - return 0, err + return 0 } - // Split field 4 into hours and minutes - hm := strings.Split(ut[4], ":") - hours, err = strconv.ParseUint(hm[0], 10, 64) - if err != nil { - return 0, err - } - minutes, err = strconv.ParseUint(strings.Replace(hm[1], ",", "", -1), 10, 64) - if err != nil { - return 0, err + // day provided along with a single hour or hours + // ie: up 2 days, 20 hrs, + if ut[5] == "hr," || ut[5] == "hrs," { + hours, err = strconv.ParseUint(ut[4], 10, 64) + if err != nil { + return 0 + } } - } else if ut[3] == "hr," || ut[3] == "hrs," { - hours, err = strconv.ParseUint(ut[2], 10, 64) - if err != nil { - return 0, err + + // mins provided along with a single min or mins + // ie: up 4 days, 29 mins, + if ut[5] == "min," || ut[5] == "mins," { + mins, err = strconv.ParseUint(ut[4], 10, 64) + if err != nil { + return 0 + } } - } else if ut[3] == "mins," { - minutes, err = strconv.ParseUint(ut[2], 10, 64) - if err != nil { - return 0, err + + // alternatively day provided with hh:mm + // ie: up 83 days, 18:29 + if strings.Contains(ut[4], ":") { + hm := strings.Split(ut[4], ":") + hours, err = strconv.ParseUint(hm[0], 10, 64) + if err != nil { + return 0 + } + mins, err = strconv.ParseUint(strings.Trim(hm[1], ","), 10, 64) + if err != nil { + return 0 + } } - } else if _, err := strconv.ParseInt(ut[3], 10, 64); err == nil && strings.Contains(ut[2], ":") { - // Split field 2 into hours and minutes - hm := strings.Split(ut[2], ":") - hours, err = strconv.ParseUint(hm[0], 10, 64) + case "hr,", "hrs,": + hours, err = strconv.ParseUint(ut[2], 10, 64) if err != nil { - return 0, err + return 0 } - minutes, err = strconv.ParseUint(strings.Replace(hm[1], ",", "", -1), 10, 64) + case "min,", "mins,": + mins, err = strconv.ParseUint(ut[2], 10, 64) if err != nil { - return 0, err + return 0 } } - // Stack them all together as minutes - total_time := (days * 24 * 60) + (hours * 60) + minutes - - return total_time, nil + return (days * 24 * 60) + (hours * 60) + mins } // This is a weak implementation due to the limitations on retrieving this data in AIX @@ -152,23 +160,23 @@ func UsersWithContext(ctx context.Context) ([]UserStat, error) { } // Much of this function could be static. However, to be future proofed, I've made it call the OS for the information in all instances. -func PlatformInformationWithContext(ctx context.Context) (platform string, family string, version string, err error) { +func PlatformInformationWithContext(ctx context.Context) (platform, family, version string, err error) { // Set the platform (which should always, and only be, "AIX") from `uname -s` out, err := invoke.CommandWithContext(ctx, "uname", "-s") if err != nil { return "", "", "", err } - platform = strings.TrimRight(string(out[:]), "\n") + platform = strings.TrimRight(string(out), "\n") // Set the family - family = strings.TrimRight(string(out[:]), "\n") + family = strings.TrimRight(string(out), "\n") // Set the version out, err = invoke.CommandWithContext(ctx, "oslevel") if err != nil { return "", "", "", err } - version = strings.TrimRight(string(out[:]), "\n") + version = strings.TrimRight(string(out), "\n") return platform, family, version, nil } @@ -178,7 +186,7 @@ func KernelVersionWithContext(ctx context.Context) (version string, err error) { if err != nil { return "", err } - version = strings.TrimRight(string(out[:]), "\n") + version = strings.TrimRight(string(out), "\n") return version, nil } @@ -188,15 +196,11 @@ func KernelArch() (arch string, err error) { if err != nil { return "", err } - arch = strings.TrimRight(string(out[:]), "\n") + arch = strings.TrimRight(string(out), "\n") return arch, nil } -func VirtualizationWithContext(ctx context.Context) (string, string, error) { +func VirtualizationWithContext(_ context.Context) (string, string, error) { return "", "", common.ErrNotImplementedError } - -func SensorsTemperaturesWithContext(ctx context.Context) ([]TemperatureStat, error) { - return nil, common.ErrNotImplementedError -} diff --git a/vendor/github.com/shirou/gopsutil/v3/host/host_aix_ppc64.go b/vendor/github.com/shirou/gopsutil/v4/host/host_aix_ppc64.go similarity index 97% rename from vendor/github.com/shirou/gopsutil/v3/host/host_aix_ppc64.go rename to vendor/github.com/shirou/gopsutil/v4/host/host_aix_ppc64.go index de9674b7..6e5d802d 100644 --- a/vendor/github.com/shirou/gopsutil/v3/host/host_aix_ppc64.go +++ b/vendor/github.com/shirou/gopsutil/v4/host/host_aix_ppc64.go @@ -1,5 +1,4 @@ //go:build aix && ppc64 && cgo -// +build aix,ppc64,cgo // Guessed at from the following document: // https://www.ibm.com/docs/sl/ibm-mq/9.2?topic=platforms-standard-data-types-aix-linux-windows diff --git a/vendor/github.com/shirou/gopsutil/v3/host/host_bsd.go b/vendor/github.com/shirou/gopsutil/v4/host/host_bsd.go similarity index 86% rename from vendor/github.com/shirou/gopsutil/v3/host/host_bsd.go rename to vendor/github.com/shirou/gopsutil/v4/host/host_bsd.go index f9a29614..4d27ed62 100644 --- a/vendor/github.com/shirou/gopsutil/v3/host/host_bsd.go +++ b/vendor/github.com/shirou/gopsutil/v4/host/host_bsd.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build darwin || freebsd || openbsd || netbsd -// +build darwin freebsd openbsd netbsd package host @@ -13,7 +13,7 @@ import ( // cachedBootTime must be accessed via atomic.Load/StoreUint64 var cachedBootTime uint64 -func BootTimeWithContext(ctx context.Context) (uint64, error) { +func BootTimeWithContext(_ context.Context) (uint64, error) { if enableBootTimeCache { t := atomic.LoadUint64(&cachedBootTime) if t != 0 { diff --git a/vendor/github.com/shirou/gopsutil/v3/host/host_darwin.go b/vendor/github.com/shirou/gopsutil/v4/host/host_darwin.go similarity index 85% rename from vendor/github.com/shirou/gopsutil/v3/host/host_darwin.go rename to vendor/github.com/shirou/gopsutil/v4/host/host_darwin.go index 873ed4ae..2764ce99 100644 --- a/vendor/github.com/shirou/gopsutil/v3/host/host_darwin.go +++ b/vendor/github.com/shirou/gopsutil/v4/host/host_darwin.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build darwin -// +build darwin package host @@ -15,12 +15,12 @@ import ( "golang.org/x/sys/unix" - "github.com/shirou/gopsutil/v3/internal/common" - "github.com/shirou/gopsutil/v3/process" + "github.com/shirou/gopsutil/v4/internal/common" + "github.com/shirou/gopsutil/v4/process" ) // from utmpx.h -const user_PROCESS = 7 +const user_PROCESS = 7 //nolint:revive //FIXME func HostIDWithContext(ctx context.Context) (string, error) { out, err := invoke.CommandWithContext(ctx, "ioreg", "-rd1", "-c", "IOPlatformExpertDevice") @@ -49,7 +49,7 @@ func numProcs(ctx context.Context) (uint64, error) { return uint64(len(procs)), nil } -func UsersWithContext(ctx context.Context) ([]UserStat, error) { +func UsersWithContext(_ context.Context) ([]UserStat, error) { utmpfile := "/var/run/utmpx" var ret []UserStat @@ -67,8 +67,7 @@ func UsersWithContext(ctx context.Context) ([]UserStat, error) { // Skip macOS utmpx header part buf = buf[604:] - u := Utmpx{} - entrySize := int(unsafe.Sizeof(u)) + entrySize := int(unsafe.Sizeof(Utmpx{})) count := len(buf) / entrySize for i := 0; i < count; i++ { @@ -123,11 +122,11 @@ func PlatformInformationWithContext(ctx context.Context) (string, string, string return platform, family, pver, nil } -func VirtualizationWithContext(ctx context.Context) (string, string, error) { +func VirtualizationWithContext(_ context.Context) (string, string, error) { return "", "", common.ErrNotImplementedError } -func KernelVersionWithContext(ctx context.Context) (string, error) { +func KernelVersionWithContext(_ context.Context) (string, error) { version, err := unix.Sysctl("kern.osrelease") return strings.ToLower(version), err } diff --git a/vendor/github.com/shirou/gopsutil/v3/host/host_darwin_amd64.go b/vendor/github.com/shirou/gopsutil/v4/host/host_darwin_amd64.go similarity index 88% rename from vendor/github.com/shirou/gopsutil/v3/host/host_darwin_amd64.go rename to vendor/github.com/shirou/gopsutil/v4/host/host_darwin_amd64.go index 8caeed2e..1efc353a 100644 --- a/vendor/github.com/shirou/gopsutil/v3/host/host_darwin_amd64.go +++ b/vendor/github.com/shirou/gopsutil/v4/host/host_darwin_amd64.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause // Created by cgo -godefs - DO NOT EDIT // cgo -godefs types_darwin.go diff --git a/vendor/github.com/shirou/gopsutil/v3/host/host_darwin_arm64.go b/vendor/github.com/shirou/gopsutil/v4/host/host_darwin_arm64.go similarity index 89% rename from vendor/github.com/shirou/gopsutil/v3/host/host_darwin_arm64.go rename to vendor/github.com/shirou/gopsutil/v4/host/host_darwin_arm64.go index 293bd4df..512e5697 100644 --- a/vendor/github.com/shirou/gopsutil/v3/host/host_darwin_arm64.go +++ b/vendor/github.com/shirou/gopsutil/v4/host/host_darwin_arm64.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build darwin && arm64 -// +build darwin,arm64 // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs host/types_darwin.go diff --git a/vendor/github.com/shirou/gopsutil/v4/host/host_fallback.go b/vendor/github.com/shirou/gopsutil/v4/host/host_fallback.go new file mode 100644 index 00000000..ed2a4ceb --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/host/host_fallback.go @@ -0,0 +1,46 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build !darwin && !linux && !freebsd && !openbsd && !netbsd && !solaris && !windows && !aix + +package host + +import ( + "context" + + "github.com/shirou/gopsutil/v4/internal/common" +) + +func HostIDWithContext(_ context.Context) (string, error) { + return "", common.ErrNotImplementedError +} + +func numProcs(_ context.Context) (uint64, error) { + return 0, common.ErrNotImplementedError +} + +func BootTimeWithContext(_ context.Context) (uint64, error) { + return 0, common.ErrNotImplementedError +} + +func UptimeWithContext(_ context.Context) (uint64, error) { + return 0, common.ErrNotImplementedError +} + +func UsersWithContext(_ context.Context) ([]UserStat, error) { + return []UserStat{}, common.ErrNotImplementedError +} + +func VirtualizationWithContext(_ context.Context) (string, string, error) { + return "", "", common.ErrNotImplementedError +} + +func KernelVersionWithContext(_ context.Context) (string, error) { + return "", common.ErrNotImplementedError +} + +func PlatformInformationWithContext(_ context.Context) (string, string, string, error) { + return "", "", "", common.ErrNotImplementedError +} + +func KernelArch() (string, error) { + return "", common.ErrNotImplementedError +} diff --git a/vendor/github.com/shirou/gopsutil/v3/host/host_freebsd.go b/vendor/github.com/shirou/gopsutil/v4/host/host_freebsd.go similarity index 81% rename from vendor/github.com/shirou/gopsutil/v3/host/host_freebsd.go rename to vendor/github.com/shirou/gopsutil/v4/host/host_freebsd.go index 9a5382d3..882e0bcd 100644 --- a/vendor/github.com/shirou/gopsutil/v3/host/host_freebsd.go +++ b/vendor/github.com/shirou/gopsutil/v4/host/host_freebsd.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build freebsd -// +build freebsd package host @@ -13,9 +13,10 @@ import ( "strings" "unsafe" - "github.com/shirou/gopsutil/v3/internal/common" - "github.com/shirou/gopsutil/v3/process" "golang.org/x/sys/unix" + + "github.com/shirou/gopsutil/v4/internal/common" + "github.com/shirou/gopsutil/v4/process" ) const ( @@ -24,7 +25,7 @@ const ( UTHostSize = 16 ) -func HostIDWithContext(ctx context.Context) (string, error) { +func HostIDWithContext(_ context.Context) (string, error) { uuid, err := unix.Sysctl("kern.hostuuid") if err != nil { return "", err @@ -40,7 +41,7 @@ func numProcs(ctx context.Context) (uint64, error) { return uint64(len(procs)), nil } -func UsersWithContext(ctx context.Context) ([]UserStat, error) { +func UsersWithContext(_ context.Context) ([]UserStat, error) { utmpfile := "/var/run/utx.active" if !common.PathExists(utmpfile) { utmpfile = "/var/run/utmp" // before 9.0 @@ -84,7 +85,7 @@ func UsersWithContext(ctx context.Context) ([]UserStat, error) { return ret, nil } -func PlatformInformationWithContext(ctx context.Context) (string, string, string, error) { +func PlatformInformationWithContext(_ context.Context) (string, string, string, error) { platform, err := unix.Sysctl("kern.ostype") if err != nil { return "", "", "", err @@ -98,7 +99,7 @@ func PlatformInformationWithContext(ctx context.Context) (string, string, string return strings.ToLower(platform), "", strings.ToLower(version), nil } -func VirtualizationWithContext(ctx context.Context) (string, string, error) { +func VirtualizationWithContext(_ context.Context) (string, string, error) { return "", "", common.ErrNotImplementedError } @@ -116,8 +117,7 @@ func getUsersFromUtmp(utmpfile string) ([]UserStat, error) { return ret, err } - u := Utmp{} - entrySize := int(unsafe.Sizeof(u)) + entrySize := int(unsafe.Sizeof(Utmp{})) count := len(buf) / entrySize for i := 0; i < count; i++ { @@ -141,10 +141,6 @@ func getUsersFromUtmp(utmpfile string) ([]UserStat, error) { return ret, nil } -func SensorsTemperaturesWithContext(ctx context.Context) ([]TemperatureStat, error) { - return []TemperatureStat{}, common.ErrNotImplementedError -} - func KernelVersionWithContext(ctx context.Context) (string, error) { _, _, version, err := PlatformInformationWithContext(ctx) return version, err diff --git a/vendor/github.com/shirou/gopsutil/v3/host/host_freebsd_386.go b/vendor/github.com/shirou/gopsutil/v4/host/host_freebsd_386.go similarity index 92% rename from vendor/github.com/shirou/gopsutil/v3/host/host_freebsd_386.go rename to vendor/github.com/shirou/gopsutil/v4/host/host_freebsd_386.go index 88453d2a..0d31eb11 100644 --- a/vendor/github.com/shirou/gopsutil/v3/host/host_freebsd_386.go +++ b/vendor/github.com/shirou/gopsutil/v4/host/host_freebsd_386.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs types_freebsd.go diff --git a/vendor/github.com/shirou/gopsutil/v3/host/host_freebsd_amd64.go b/vendor/github.com/shirou/gopsutil/v4/host/host_freebsd_amd64.go similarity index 92% rename from vendor/github.com/shirou/gopsutil/v3/host/host_freebsd_amd64.go rename to vendor/github.com/shirou/gopsutil/v4/host/host_freebsd_amd64.go index 8af74b0f..603a0ba5 100644 --- a/vendor/github.com/shirou/gopsutil/v3/host/host_freebsd_amd64.go +++ b/vendor/github.com/shirou/gopsutil/v4/host/host_freebsd_amd64.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs types_freebsd.go diff --git a/vendor/github.com/shirou/gopsutil/v3/host/host_freebsd_arm.go b/vendor/github.com/shirou/gopsutil/v4/host/host_freebsd_arm.go similarity index 92% rename from vendor/github.com/shirou/gopsutil/v3/host/host_freebsd_arm.go rename to vendor/github.com/shirou/gopsutil/v4/host/host_freebsd_arm.go index f7d6ede5..5021f5e1 100644 --- a/vendor/github.com/shirou/gopsutil/v3/host/host_freebsd_arm.go +++ b/vendor/github.com/shirou/gopsutil/v4/host/host_freebsd_arm.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs types_freebsd.go diff --git a/vendor/github.com/shirou/gopsutil/v3/host/host_freebsd_arm64.go b/vendor/github.com/shirou/gopsutil/v4/host/host_freebsd_arm64.go similarity index 93% rename from vendor/github.com/shirou/gopsutil/v3/host/host_freebsd_arm64.go rename to vendor/github.com/shirou/gopsutil/v4/host/host_freebsd_arm64.go index 41bec3c1..4fe188b3 100644 --- a/vendor/github.com/shirou/gopsutil/v3/host/host_freebsd_arm64.go +++ b/vendor/github.com/shirou/gopsutil/v4/host/host_freebsd_arm64.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build freebsd && arm64 -// +build freebsd,arm64 // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs host/types_freebsd.go diff --git a/vendor/github.com/shirou/gopsutil/v3/host/host_linux.go b/vendor/github.com/shirou/gopsutil/v4/host/host_linux.go similarity index 59% rename from vendor/github.com/shirou/gopsutil/v3/host/host_linux.go rename to vendor/github.com/shirou/gopsutil/v4/host/host_linux.go index 5d4c1a90..f0fe076f 100644 --- a/vendor/github.com/shirou/gopsutil/v3/host/host_linux.go +++ b/vendor/github.com/shirou/gopsutil/v4/host/host_linux.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build linux -// +build linux package host @@ -10,14 +10,12 @@ import ( "fmt" "io" "os" - "path/filepath" "regexp" - "strconv" "strings" "golang.org/x/sys/unix" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" ) type lsbStruct struct { @@ -29,9 +27,7 @@ type lsbStruct struct { // from utmp.h const ( - user_PROCESS = 7 - - hostTemperatureScale = 1000.0 + user_PROCESS = 7 //nolint:revive //FIXME ) func HostIDWithContext(ctx context.Context) (string, error) { @@ -74,7 +70,7 @@ func BootTimeWithContext(ctx context.Context) (uint64, error) { return common.BootTimeWithContext(ctx, enableBootTimeCache) } -func UptimeWithContext(ctx context.Context) (uint64, error) { +func UptimeWithContext(_ context.Context) (uint64, error) { sysinfo := &unix.Sysinfo_t{} if err := unix.Sysinfo(sysinfo); err != nil { return 0, err @@ -174,51 +170,53 @@ func getlsbStruct(ctx context.Context) (*lsbStruct, error) { return ret, nil } -func PlatformInformationWithContext(ctx context.Context) (platform string, family string, version string, err error) { +func PlatformInformationWithContext(ctx context.Context) (platform, family, version string, err error) { lsb, err := getlsbStruct(ctx) if err != nil { lsb = &lsbStruct{} } - if common.PathExistsWithContents(common.HostEtcWithContext(ctx, "oracle-release")) { + switch { + case common.PathExistsWithContents(common.HostEtcWithContext(ctx, "oracle-release")): platform = "oracle" contents, err := common.ReadLines(common.HostEtcWithContext(ctx, "oracle-release")) if err == nil { version = getRedhatishVersion(contents) } - } else if common.PathExistsWithContents(common.HostEtcWithContext(ctx, "enterprise-release")) { + case common.PathExistsWithContents(common.HostEtcWithContext(ctx, "enterprise-release")): platform = "oracle" contents, err := common.ReadLines(common.HostEtcWithContext(ctx, "enterprise-release")) if err == nil { version = getRedhatishVersion(contents) } - } else if common.PathExistsWithContents(common.HostEtcWithContext(ctx, "slackware-version")) { + case common.PathExistsWithContents(common.HostEtcWithContext(ctx, "slackware-version")): platform = "slackware" contents, err := common.ReadLines(common.HostEtcWithContext(ctx, "slackware-version")) if err == nil { version = getSlackwareVersion(contents) } - } else if common.PathExistsWithContents(common.HostEtcWithContext(ctx, "debian_version")) { - if lsb.ID == "Ubuntu" { + case common.PathExistsWithContents(common.HostEtcWithContext(ctx, "debian_version")): + switch lsb.ID { + case "Ubuntu": platform = "ubuntu" version = lsb.Release - } else if lsb.ID == "LinuxMint" { + case "LinuxMint": platform = "linuxmint" version = lsb.Release - } else if lsb.ID == "Kylin" { + case "Kylin": platform = "Kylin" version = lsb.Release - } else if lsb.ID == `"Cumulus Linux"` { + case `"Cumulus Linux"`: platform = "cumuluslinux" version = lsb.Release - } else if lsb.ID == "uos" { + case "uos": platform = "uos" version = lsb.Release - } else if lsb.ID == "Deepin" { + case "Deepin": platform = "Deepin" version = lsb.Release - } else { + default: if common.PathExistsWithContents("/usr/bin/raspi-config") { platform = "raspbian" } else { @@ -229,65 +227,65 @@ func PlatformInformationWithContext(ctx context.Context) (platform string, famil version = contents[0] } } - } else if common.PathExistsWithContents(common.HostEtcWithContext(ctx, "neokylin-release")) { + case common.PathExistsWithContents(common.HostEtcWithContext(ctx, "neokylin-release")): contents, err := common.ReadLines(common.HostEtcWithContext(ctx, "neokylin-release")) if err == nil { version = getRedhatishVersion(contents) platform = getRedhatishPlatform(contents) } - } else if common.PathExistsWithContents(common.HostEtcWithContext(ctx, "redhat-release")) { + case common.PathExistsWithContents(common.HostEtcWithContext(ctx, "redhat-release")): contents, err := common.ReadLines(common.HostEtcWithContext(ctx, "redhat-release")) if err == nil { version = getRedhatishVersion(contents) platform = getRedhatishPlatform(contents) } - } else if common.PathExistsWithContents(common.HostEtcWithContext(ctx, "system-release")) { + case common.PathExistsWithContents(common.HostEtcWithContext(ctx, "system-release")): contents, err := common.ReadLines(common.HostEtcWithContext(ctx, "system-release")) if err == nil { version = getRedhatishVersion(contents) platform = getRedhatishPlatform(contents) } - } else if common.PathExistsWithContents(common.HostEtcWithContext(ctx, "gentoo-release")) { + case common.PathExistsWithContents(common.HostEtcWithContext(ctx, "gentoo-release")): platform = "gentoo" contents, err := common.ReadLines(common.HostEtcWithContext(ctx, "gentoo-release")) if err == nil { version = getRedhatishVersion(contents) } - } else if common.PathExistsWithContents(common.HostEtcWithContext(ctx, "SuSE-release")) { + case common.PathExistsWithContents(common.HostEtcWithContext(ctx, "SuSE-release")): contents, err := common.ReadLines(common.HostEtcWithContext(ctx, "SuSE-release")) if err == nil { version = getSuseVersion(contents) platform = getSusePlatform(contents) } - // TODO: slackware detecion - } else if common.PathExistsWithContents(common.HostEtcWithContext(ctx, "arch-release")) { + // TODO: slackware detection + case common.PathExistsWithContents(common.HostEtcWithContext(ctx, "arch-release")): platform = "arch" version = lsb.Release - } else if common.PathExistsWithContents(common.HostEtcWithContext(ctx, "alpine-release")) { + case common.PathExistsWithContents(common.HostEtcWithContext(ctx, "alpine-release")): platform = "alpine" contents, err := common.ReadLines(common.HostEtcWithContext(ctx, "alpine-release")) if err == nil && len(contents) > 0 && contents[0] != "" { version = contents[0] } - } else if common.PathExistsWithContents(common.HostEtcWithContext(ctx, "os-release")) { + case common.PathExistsWithContents(common.HostEtcWithContext(ctx, "os-release")): p, v, err := common.GetOSReleaseWithContext(ctx) if err == nil { platform = p version = v } - } else if lsb.ID == "RedHat" { + case lsb.ID == "RedHat": platform = "redhat" version = lsb.Release - } else if lsb.ID == "Amazon" { + case lsb.ID == "Amazon": platform = "amazon" version = lsb.Release - } else if lsb.ID == "ScientificSL" { + case lsb.ID == "ScientificSL": platform = "scientific" version = lsb.Release - } else if lsb.ID == "XenServer" { + case lsb.ID == "XenServer": platform = "xenserver" version = lsb.Release - } else if lsb.ID != "" { + case lsb.ID != "": platform = strings.ToLower(lsb.ID) version = lsb.Release } @@ -319,12 +317,14 @@ func PlatformInformationWithContext(ctx context.Context) (platform string, famil family = "solus" case "neokylin": family = "neokylin" + case "anolis": + family = "anolis" } return platform, family, version, nil } -func KernelVersionWithContext(ctx context.Context) (version string, err error) { +func KernelVersionWithContext(_ context.Context) (version string, err error) { var utsname unix.Utsname err = unix.Uname(&utsname) if err != nil { @@ -392,147 +392,3 @@ func getSusePlatform(contents []string) string { func VirtualizationWithContext(ctx context.Context) (string, string, error) { return common.VirtualizationWithContext(ctx) } - -func SensorsTemperaturesWithContext(ctx context.Context) ([]TemperatureStat, error) { - var err error - - var files []string - - temperatures := make([]TemperatureStat, 0) - - // Only the temp*_input file provides current temperature - // value in millidegree Celsius as reported by the temperature to the device: - // https://www.kernel.org/doc/Documentation/hwmon/sysfs-interface - if files, err = filepath.Glob(common.HostSysWithContext(ctx, "/class/hwmon/hwmon*/temp*_input")); err != nil { - return temperatures, err - } - - if len(files) == 0 { - // CentOS has an intermediate /device directory: - // https://github.com/giampaolo/psutil/issues/971 - if files, err = filepath.Glob(common.HostSysWithContext(ctx, "/class/hwmon/hwmon*/device/temp*_input")); err != nil { - return temperatures, err - } - } - - var warns Warnings - - if len(files) == 0 { // handle distributions without hwmon, like raspbian #391, parse legacy thermal_zone files - files, err = filepath.Glob(common.HostSysWithContext(ctx, "/class/thermal/thermal_zone*/")) - if err != nil { - return temperatures, err - } - for _, file := range files { - // Get the name of the temperature you are reading - name, err := os.ReadFile(filepath.Join(file, "type")) - if err != nil { - warns.Add(err) - continue - } - // Get the temperature reading - current, err := os.ReadFile(filepath.Join(file, "temp")) - if err != nil { - warns.Add(err) - continue - } - temperature, err := strconv.ParseInt(strings.TrimSpace(string(current)), 10, 64) - if err != nil { - warns.Add(err) - continue - } - - temperatures = append(temperatures, TemperatureStat{ - SensorKey: strings.TrimSpace(string(name)), - Temperature: float64(temperature) / 1000.0, - }) - } - return temperatures, warns.Reference() - } - - temperatures = make([]TemperatureStat, 0, len(files)) - - // example directory - // device/ temp1_crit_alarm temp2_crit_alarm temp3_crit_alarm temp4_crit_alarm temp5_crit_alarm temp6_crit_alarm temp7_crit_alarm - // name temp1_input temp2_input temp3_input temp4_input temp5_input temp6_input temp7_input - // power/ temp1_label temp2_label temp3_label temp4_label temp5_label temp6_label temp7_label - // subsystem/ temp1_max temp2_max temp3_max temp4_max temp5_max temp6_max temp7_max - // temp1_crit temp2_crit temp3_crit temp4_crit temp5_crit temp6_crit temp7_crit uevent - for _, file := range files { - var raw []byte - - var temperature float64 - - // Get the base directory location - directory := filepath.Dir(file) - - // Get the base filename prefix like temp1 - basename := strings.Split(filepath.Base(file), "_")[0] - - // Get the base path like /temp1 - basepath := filepath.Join(directory, basename) - - // Get the label of the temperature you are reading - label := "" - - if raw, _ = os.ReadFile(basepath + "_label"); len(raw) != 0 { - // Format the label from "Core 0" to "core_0" - label = strings.Join(strings.Split(strings.TrimSpace(strings.ToLower(string(raw))), " "), "_") - } - - // Get the name of the temperature you are reading - if raw, err = os.ReadFile(filepath.Join(directory, "name")); err != nil { - warns.Add(err) - continue - } - - name := strings.TrimSpace(string(raw)) - - if label != "" { - name = name + "_" + label - } - - // Get the temperature reading - if raw, err = os.ReadFile(file); err != nil { - warns.Add(err) - continue - } - - if temperature, err = strconv.ParseFloat(strings.TrimSpace(string(raw)), 64); err != nil { - warns.Add(err) - continue - } - - // Add discovered temperature sensor to the list - temperatures = append(temperatures, TemperatureStat{ - SensorKey: name, - Temperature: temperature / hostTemperatureScale, - High: optionalValueReadFromFile(basepath+"_max") / hostTemperatureScale, - Critical: optionalValueReadFromFile(basepath+"_crit") / hostTemperatureScale, - }) - } - - return temperatures, warns.Reference() -} - -func optionalValueReadFromFile(filename string) float64 { - var raw []byte - - var err error - - var value float64 - - // Check if file exists - if _, err := os.Stat(filename); os.IsNotExist(err) { - return 0 - } - - if raw, err = os.ReadFile(filename); err != nil { - return 0 - } - - if value, err = strconv.ParseFloat(strings.TrimSpace(string(raw)), 64); err != nil { - return 0 - } - - return value -} diff --git a/vendor/github.com/shirou/gopsutil/v3/host/host_linux_386.go b/vendor/github.com/shirou/gopsutil/v4/host/host_linux_386.go similarity index 60% rename from vendor/github.com/shirou/gopsutil/v3/host/host_linux_386.go rename to vendor/github.com/shirou/gopsutil/v4/host/host_linux_386.go index 46e0c5d5..b7e9301f 100644 --- a/vendor/github.com/shirou/gopsutil/v3/host/host_linux_386.go +++ b/vendor/github.com/shirou/gopsutil/v4/host/host_linux_386.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause // ATTENTION - FILE MANUAL FIXED AFTER CGO. // Fixed line: Tv _Ctype_struct_timeval -> Tv UtTv // Created by cgo -godefs, MANUAL FIXED @@ -15,15 +16,15 @@ const ( ) type ( - _C_short int16 - _C_int int32 - _C_long int32 - _C_long_long int64 + _C_short int16 //nolint:revive //FIXME + _C_int int32 //nolint:revive //FIXME + _C_long int32 //nolint:revive //FIXME + _C_long_long int64 //nolint:revive //FIXME ) type utmp struct { Type int16 - Pad_cgo_0 [2]byte + Pad_cgo_0 [2]byte //nolint:revive //FIXME Pid int32 Line [32]int8 ID [4]int8 @@ -32,11 +33,11 @@ type utmp struct { Exit exit_status Session int32 Tv UtTv - Addr_v6 [4]int32 - X__unused [20]int8 + Addr_v6 [4]int32 //nolint:revive //FIXME + X__unused [20]int8 //nolint:revive //FIXME } -type exit_status struct { +type exit_status struct { //nolint:revive //FIXME Termination int16 Exit int16 } diff --git a/vendor/github.com/shirou/gopsutil/v3/host/host_linux_amd64.go b/vendor/github.com/shirou/gopsutil/v4/host/host_linux_amd64.go similarity index 95% rename from vendor/github.com/shirou/gopsutil/v3/host/host_linux_amd64.go rename to vendor/github.com/shirou/gopsutil/v4/host/host_linux_amd64.go index 1e574482..480e72d3 100644 --- a/vendor/github.com/shirou/gopsutil/v3/host/host_linux_amd64.go +++ b/vendor/github.com/shirou/gopsutil/v4/host/host_linux_amd64.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause // Created by cgo -godefs - DO NOT EDIT // cgo -godefs types_linux.go diff --git a/vendor/github.com/shirou/gopsutil/v3/host/host_linux_arm.go b/vendor/github.com/shirou/gopsutil/v4/host/host_linux_arm.go similarity index 95% rename from vendor/github.com/shirou/gopsutil/v3/host/host_linux_arm.go rename to vendor/github.com/shirou/gopsutil/v4/host/host_linux_arm.go index 7abbbb8a..1b7ee978 100644 --- a/vendor/github.com/shirou/gopsutil/v3/host/host_linux_arm.go +++ b/vendor/github.com/shirou/gopsutil/v4/host/host_linux_arm.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause // Created by cgo -godefs - DO NOT EDIT // cgo -godefs types_linux.go | sed "s/uint8/int8/g" diff --git a/vendor/github.com/shirou/gopsutil/v3/host/host_linux_arm64.go b/vendor/github.com/shirou/gopsutil/v4/host/host_linux_arm64.go similarity index 95% rename from vendor/github.com/shirou/gopsutil/v3/host/host_linux_arm64.go rename to vendor/github.com/shirou/gopsutil/v4/host/host_linux_arm64.go index cd0b4ddc..0e6fc8b7 100644 --- a/vendor/github.com/shirou/gopsutil/v3/host/host_linux_arm64.go +++ b/vendor/github.com/shirou/gopsutil/v4/host/host_linux_arm64.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs types_linux.go package host diff --git a/vendor/github.com/shirou/gopsutil/v3/host/host_linux_loong64.go b/vendor/github.com/shirou/gopsutil/v4/host/host_linux_loong64.go similarity index 95% rename from vendor/github.com/shirou/gopsutil/v3/host/host_linux_loong64.go rename to vendor/github.com/shirou/gopsutil/v4/host/host_linux_loong64.go index edf1be59..c4c8390f 100644 --- a/vendor/github.com/shirou/gopsutil/v3/host/host_linux_loong64.go +++ b/vendor/github.com/shirou/gopsutil/v4/host/host_linux_loong64.go @@ -1,8 +1,8 @@ +// SPDX-License-Identifier: BSD-3-Clause // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs host/types_linux.go //go:build linux && loong64 -// +build linux,loong64 package host diff --git a/vendor/github.com/shirou/gopsutil/v3/host/host_linux_mips.go b/vendor/github.com/shirou/gopsutil/v4/host/host_linux_mips.go similarity index 94% rename from vendor/github.com/shirou/gopsutil/v3/host/host_linux_mips.go rename to vendor/github.com/shirou/gopsutil/v4/host/host_linux_mips.go index 50207e5b..8aa049ca 100644 --- a/vendor/github.com/shirou/gopsutil/v3/host/host_linux_mips.go +++ b/vendor/github.com/shirou/gopsutil/v4/host/host_linux_mips.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause // Created by cgo -godefs - DO NOT EDIT // cgo -godefs types_linux.go diff --git a/vendor/github.com/shirou/gopsutil/v3/host/host_linux_mips64.go b/vendor/github.com/shirou/gopsutil/v4/host/host_linux_mips64.go similarity index 94% rename from vendor/github.com/shirou/gopsutil/v3/host/host_linux_mips64.go rename to vendor/github.com/shirou/gopsutil/v4/host/host_linux_mips64.go index 50207e5b..8aa049ca 100644 --- a/vendor/github.com/shirou/gopsutil/v3/host/host_linux_mips64.go +++ b/vendor/github.com/shirou/gopsutil/v4/host/host_linux_mips64.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause // Created by cgo -godefs - DO NOT EDIT // cgo -godefs types_linux.go diff --git a/vendor/github.com/shirou/gopsutil/v3/host/host_linux_mips64le.go b/vendor/github.com/shirou/gopsutil/v4/host/host_linux_mips64le.go similarity index 94% rename from vendor/github.com/shirou/gopsutil/v3/host/host_linux_mips64le.go rename to vendor/github.com/shirou/gopsutil/v4/host/host_linux_mips64le.go index 50207e5b..8aa049ca 100644 --- a/vendor/github.com/shirou/gopsutil/v3/host/host_linux_mips64le.go +++ b/vendor/github.com/shirou/gopsutil/v4/host/host_linux_mips64le.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause // Created by cgo -godefs - DO NOT EDIT // cgo -godefs types_linux.go diff --git a/vendor/github.com/shirou/gopsutil/v3/host/host_linux_mipsle.go b/vendor/github.com/shirou/gopsutil/v4/host/host_linux_mipsle.go similarity index 94% rename from vendor/github.com/shirou/gopsutil/v3/host/host_linux_mipsle.go rename to vendor/github.com/shirou/gopsutil/v4/host/host_linux_mipsle.go index 50207e5b..8aa049ca 100644 --- a/vendor/github.com/shirou/gopsutil/v3/host/host_linux_mipsle.go +++ b/vendor/github.com/shirou/gopsutil/v4/host/host_linux_mipsle.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause // Created by cgo -godefs - DO NOT EDIT // cgo -godefs types_linux.go diff --git a/vendor/github.com/shirou/gopsutil/v3/host/host_linux_ppc64.go b/vendor/github.com/shirou/gopsutil/v4/host/host_linux_ppc64.go similarity index 95% rename from vendor/github.com/shirou/gopsutil/v3/host/host_linux_ppc64.go rename to vendor/github.com/shirou/gopsutil/v4/host/host_linux_ppc64.go index 5b324eff..23f5cb9d 100644 --- a/vendor/github.com/shirou/gopsutil/v3/host/host_linux_ppc64.go +++ b/vendor/github.com/shirou/gopsutil/v4/host/host_linux_ppc64.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build linux && ppc64 -// +build linux,ppc64 // Created by cgo -godefs - DO NOT EDIT // cgo -godefs types_linux.go diff --git a/vendor/github.com/shirou/gopsutil/v3/host/host_linux_ppc64le.go b/vendor/github.com/shirou/gopsutil/v4/host/host_linux_ppc64le.go similarity index 95% rename from vendor/github.com/shirou/gopsutil/v3/host/host_linux_ppc64le.go rename to vendor/github.com/shirou/gopsutil/v4/host/host_linux_ppc64le.go index 51f5bee1..e81f1239 100644 --- a/vendor/github.com/shirou/gopsutil/v3/host/host_linux_ppc64le.go +++ b/vendor/github.com/shirou/gopsutil/v4/host/host_linux_ppc64le.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build linux && ppc64le -// +build linux,ppc64le // Created by cgo -godefs - DO NOT EDIT // cgo -godefs types_linux.go diff --git a/vendor/github.com/shirou/gopsutil/v3/host/host_linux_riscv64.go b/vendor/github.com/shirou/gopsutil/v4/host/host_linux_riscv64.go similarity index 95% rename from vendor/github.com/shirou/gopsutil/v3/host/host_linux_riscv64.go rename to vendor/github.com/shirou/gopsutil/v4/host/host_linux_riscv64.go index bb03a0b3..080fdb8d 100644 --- a/vendor/github.com/shirou/gopsutil/v3/host/host_linux_riscv64.go +++ b/vendor/github.com/shirou/gopsutil/v4/host/host_linux_riscv64.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause // Created by cgo -godefs - DO NOT EDIT // cgo -godefs types_linux.go diff --git a/vendor/github.com/shirou/gopsutil/v3/host/host_linux_s390x.go b/vendor/github.com/shirou/gopsutil/v4/host/host_linux_s390x.go similarity index 95% rename from vendor/github.com/shirou/gopsutil/v3/host/host_linux_s390x.go rename to vendor/github.com/shirou/gopsutil/v4/host/host_linux_s390x.go index 6ea432a6..738af601 100644 --- a/vendor/github.com/shirou/gopsutil/v3/host/host_linux_s390x.go +++ b/vendor/github.com/shirou/gopsutil/v4/host/host_linux_s390x.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build linux && s390x -// +build linux,s390x // Created by cgo -godefs - DO NOT EDIT // cgo -godefs types_linux.go diff --git a/vendor/github.com/shirou/gopsutil/v3/host/host_netbsd.go b/vendor/github.com/shirou/gopsutil/v4/host/host_netbsd.go similarity index 56% rename from vendor/github.com/shirou/gopsutil/v3/host/host_netbsd.go rename to vendor/github.com/shirou/gopsutil/v4/host/host_netbsd.go index 488f1dfc..942f6871 100644 --- a/vendor/github.com/shirou/gopsutil/v3/host/host_netbsd.go +++ b/vendor/github.com/shirou/gopsutil/v4/host/host_netbsd.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build netbsd -// +build netbsd package host @@ -7,19 +7,20 @@ import ( "context" "strings" - "github.com/shirou/gopsutil/v3/internal/common" "golang.org/x/sys/unix" + + "github.com/shirou/gopsutil/v4/internal/common" ) -func HostIDWithContext(ctx context.Context) (string, error) { +func HostIDWithContext(_ context.Context) (string, error) { return "", common.ErrNotImplementedError } -func numProcs(ctx context.Context) (uint64, error) { +func numProcs(_ context.Context) (uint64, error) { return 0, common.ErrNotImplementedError } -func PlatformInformationWithContext(ctx context.Context) (string, string, string, error) { +func PlatformInformationWithContext(_ context.Context) (string, string, string, error) { platform := "" family := "" version := "" @@ -36,19 +37,15 @@ func PlatformInformationWithContext(ctx context.Context) (string, string, string return platform, family, version, nil } -func VirtualizationWithContext(ctx context.Context) (string, string, error) { +func VirtualizationWithContext(_ context.Context) (string, string, error) { return "", "", common.ErrNotImplementedError } -func UsersWithContext(ctx context.Context) ([]UserStat, error) { +func UsersWithContext(_ context.Context) ([]UserStat, error) { var ret []UserStat return ret, common.ErrNotImplementedError } -func SensorsTemperaturesWithContext(ctx context.Context) ([]TemperatureStat, error) { - return []TemperatureStat{}, common.ErrNotImplementedError -} - func KernelVersionWithContext(ctx context.Context) (string, error) { _, _, version, err := PlatformInformationWithContext(ctx) return version, err diff --git a/vendor/github.com/shirou/gopsutil/v3/host/host_openbsd.go b/vendor/github.com/shirou/gopsutil/v4/host/host_openbsd.go similarity index 73% rename from vendor/github.com/shirou/gopsutil/v3/host/host_openbsd.go rename to vendor/github.com/shirou/gopsutil/v4/host/host_openbsd.go index 325015c2..f80236b5 100644 --- a/vendor/github.com/shirou/gopsutil/v3/host/host_openbsd.go +++ b/vendor/github.com/shirou/gopsutil/v4/host/host_openbsd.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build openbsd -// +build openbsd package host @@ -12,9 +12,10 @@ import ( "strings" "unsafe" - "github.com/shirou/gopsutil/v3/internal/common" - "github.com/shirou/gopsutil/v3/process" "golang.org/x/sys/unix" + + "github.com/shirou/gopsutil/v4/internal/common" + "github.com/shirou/gopsutil/v4/process" ) const ( @@ -23,7 +24,7 @@ const ( UTHostSize = 16 ) -func HostIDWithContext(ctx context.Context) (string, error) { +func HostIDWithContext(_ context.Context) (string, error) { return "", common.ErrNotImplementedError } @@ -35,7 +36,7 @@ func numProcs(ctx context.Context) (uint64, error) { return uint64(len(procs)), nil } -func PlatformInformationWithContext(ctx context.Context) (string, string, string, error) { +func PlatformInformationWithContext(_ context.Context) (string, string, string, error) { platform := "" family := "" version := "" @@ -52,11 +53,11 @@ func PlatformInformationWithContext(ctx context.Context) (string, string, string return platform, family, version, nil } -func VirtualizationWithContext(ctx context.Context) (string, string, error) { +func VirtualizationWithContext(_ context.Context) (string, string, error) { return "", "", common.ErrNotImplementedError } -func UsersWithContext(ctx context.Context) ([]UserStat, error) { +func UsersWithContext(_ context.Context) ([]UserStat, error) { var ret []UserStat utmpfile := "/var/run/utmp" file, err := os.Open(utmpfile) @@ -70,8 +71,7 @@ func UsersWithContext(ctx context.Context) ([]UserStat, error) { return ret, err } - u := Utmp{} - entrySize := int(unsafe.Sizeof(u)) + entrySize := int(unsafe.Sizeof(Utmp{})) count := len(buf) / entrySize for i := 0; i < count; i++ { @@ -95,10 +95,6 @@ func UsersWithContext(ctx context.Context) ([]UserStat, error) { return ret, nil } -func SensorsTemperaturesWithContext(ctx context.Context) ([]TemperatureStat, error) { - return []TemperatureStat{}, common.ErrNotImplementedError -} - func KernelVersionWithContext(ctx context.Context) (string, error) { _, _, version, err := PlatformInformationWithContext(ctx) return version, err diff --git a/vendor/github.com/shirou/gopsutil/v3/host/host_openbsd_386.go b/vendor/github.com/shirou/gopsutil/v4/host/host_openbsd_386.go similarity index 92% rename from vendor/github.com/shirou/gopsutil/v3/host/host_openbsd_386.go rename to vendor/github.com/shirou/gopsutil/v4/host/host_openbsd_386.go index b299d7ae..df820a4a 100644 --- a/vendor/github.com/shirou/gopsutil/v3/host/host_openbsd_386.go +++ b/vendor/github.com/shirou/gopsutil/v4/host/host_openbsd_386.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build openbsd && 386 -// +build openbsd,386 // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs host/types_openbsd.go diff --git a/vendor/github.com/shirou/gopsutil/v3/host/host_openbsd_amd64.go b/vendor/github.com/shirou/gopsutil/v4/host/host_openbsd_amd64.go similarity index 91% rename from vendor/github.com/shirou/gopsutil/v3/host/host_openbsd_amd64.go rename to vendor/github.com/shirou/gopsutil/v4/host/host_openbsd_amd64.go index 2d23b9b7..b1d674e8 100644 --- a/vendor/github.com/shirou/gopsutil/v3/host/host_openbsd_amd64.go +++ b/vendor/github.com/shirou/gopsutil/v4/host/host_openbsd_amd64.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause // Created by cgo -godefs - DO NOT EDIT // cgo -godefs types_openbsd.go diff --git a/vendor/github.com/shirou/gopsutil/v3/host/host_openbsd_arm.go b/vendor/github.com/shirou/gopsutil/v4/host/host_openbsd_arm.go similarity index 92% rename from vendor/github.com/shirou/gopsutil/v3/host/host_openbsd_arm.go rename to vendor/github.com/shirou/gopsutil/v4/host/host_openbsd_arm.go index f0ac57d0..e5f1590f 100644 --- a/vendor/github.com/shirou/gopsutil/v3/host/host_openbsd_arm.go +++ b/vendor/github.com/shirou/gopsutil/v4/host/host_openbsd_arm.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build openbsd && arm -// +build openbsd,arm // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs host/types_openbsd.go diff --git a/vendor/github.com/shirou/gopsutil/v3/host/host_openbsd_arm64.go b/vendor/github.com/shirou/gopsutil/v4/host/host_openbsd_arm64.go similarity index 92% rename from vendor/github.com/shirou/gopsutil/v3/host/host_openbsd_arm64.go rename to vendor/github.com/shirou/gopsutil/v4/host/host_openbsd_arm64.go index 20fb42dd..d8c1061f 100644 --- a/vendor/github.com/shirou/gopsutil/v3/host/host_openbsd_arm64.go +++ b/vendor/github.com/shirou/gopsutil/v4/host/host_openbsd_arm64.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build openbsd && arm64 -// +build openbsd,arm64 // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs host/types_openbsd.go diff --git a/vendor/github.com/shirou/gopsutil/v3/host/host_openbsd_riscv64.go b/vendor/github.com/shirou/gopsutil/v4/host/host_openbsd_riscv64.go similarity index 92% rename from vendor/github.com/shirou/gopsutil/v3/host/host_openbsd_riscv64.go rename to vendor/github.com/shirou/gopsutil/v4/host/host_openbsd_riscv64.go index 7a123b64..584004b2 100644 --- a/vendor/github.com/shirou/gopsutil/v3/host/host_openbsd_riscv64.go +++ b/vendor/github.com/shirou/gopsutil/v4/host/host_openbsd_riscv64.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build openbsd && riscv64 -// +build openbsd,riscv64 // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs host/types_openbsd.go diff --git a/vendor/github.com/shirou/gopsutil/v3/host/host_posix.go b/vendor/github.com/shirou/gopsutil/v4/host/host_posix.go similarity index 84% rename from vendor/github.com/shirou/gopsutil/v3/host/host_posix.go rename to vendor/github.com/shirou/gopsutil/v4/host/host_posix.go index e7e0d837..91ab6aee 100644 --- a/vendor/github.com/shirou/gopsutil/v3/host/host_posix.go +++ b/vendor/github.com/shirou/gopsutil/v4/host/host_posix.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build linux || freebsd || openbsd || netbsd || darwin || solaris -// +build linux freebsd openbsd netbsd darwin solaris package host diff --git a/vendor/github.com/shirou/gopsutil/v3/host/host_solaris.go b/vendor/github.com/shirou/gopsutil/v4/host/host_solaris.go similarity index 70% rename from vendor/github.com/shirou/gopsutil/v3/host/host_solaris.go rename to vendor/github.com/shirou/gopsutil/v4/host/host_solaris.go index fef67f83..77cd1ab1 100644 --- a/vendor/github.com/shirou/gopsutil/v3/host/host_solaris.go +++ b/vendor/github.com/shirou/gopsutil/v4/host/host_solaris.go @@ -1,18 +1,20 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build solaris + package host import ( "bufio" "bytes" "context" - "encoding/csv" + "errors" "fmt" - "io" "os" "regexp" "strconv" "strings" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" ) func HostIDWithContext(ctx context.Context) (string, error) { @@ -30,14 +32,13 @@ func HostIDWithContext(ctx context.Context) (string, error) { line := sc.Text() // If we're in the global zone, rely on the hostname. - if line == "global" { - hostname, err := os.Hostname() - if err == nil { - return hostname, nil - } - } else { + if line != "global" { return strings.TrimSpace(line), nil } + hostname, err := os.Hostname() + if err == nil { + return hostname, nil + } } } } @@ -58,7 +59,7 @@ func HostIDWithContext(ctx context.Context) (string, error) { } // Count number of processes based on the number of entries in /proc -func numProcs(ctx context.Context) (uint64, error) { +func numProcs(_ context.Context) (uint64, error) { dirs, err := os.ReadDir("/proc") if err != nil { return 0, err @@ -66,7 +67,7 @@ func numProcs(ctx context.Context) (uint64, error) { return uint64(len(dirs)), nil } -var kstatMatch = regexp.MustCompile(`([^\s]+)[\s]+([^\s]*)`) +var kstatMatch = regexp.MustCompile(`(\S+)\s+(\S*)`) func BootTimeWithContext(ctx context.Context) (uint64, error) { out, err := invoke.CommandWithContext(ctx, "kstat", "-p", "unix:0:system_misc:boot_time") @@ -83,55 +84,18 @@ func BootTimeWithContext(ctx context.Context) (uint64, error) { } func UptimeWithContext(ctx context.Context) (uint64, error) { - bootTime, err := BootTime() + bootTime, err := BootTimeWithContext(ctx) if err != nil { return 0, err } return timeSince(bootTime), nil } -func UsersWithContext(ctx context.Context) ([]UserStat, error) { +func UsersWithContext(_ context.Context) ([]UserStat, error) { return []UserStat{}, common.ErrNotImplementedError } -func SensorsTemperaturesWithContext(ctx context.Context) ([]TemperatureStat, error) { - var ret []TemperatureStat - - out, err := invoke.CommandWithContext(ctx, "ipmitool", "-c", "sdr", "list") - if err != nil { - return ret, err - } - - r := csv.NewReader(strings.NewReader(string(out))) - // Output may contain errors, e.g. "bmc_send_cmd: Permission denied", don't expect a consistent number of records - r.FieldsPerRecord = -1 - for { - record, err := r.Read() - if err == io.EOF { - break - } - if err != nil { - return ret, err - } - // CPU1 Temp,40,degrees C,ok - if len(record) < 3 || record[1] == "" || record[2] != "degrees C" { - continue - } - v, err := strconv.ParseFloat(record[1], 64) - if err != nil { - return ret, err - } - ts := TemperatureStat{ - SensorKey: strings.TrimSuffix(record[0], " Temp"), - Temperature: v, - } - ret = append(ret, ts) - } - - return ret, nil -} - -func VirtualizationWithContext(ctx context.Context) (string, string, error) { +func VirtualizationWithContext(_ context.Context) (string, string, error) { return "", "", common.ErrNotImplementedError } @@ -175,7 +139,7 @@ func parseUnameOutput(ctx context.Context) (string, string, string, error) { fields := strings.Fields(string(out)) if len(fields) < 3 { - return "", "", "", fmt.Errorf("malformed `uname` output") + return "", "", "", errors.New("malformed `uname` output") } return fields[0], fields[1], fields[2], nil diff --git a/vendor/github.com/shirou/gopsutil/v3/host/host_windows.go b/vendor/github.com/shirou/gopsutil/v4/host/host_windows.go similarity index 76% rename from vendor/github.com/shirou/gopsutil/v3/host/host_windows.go rename to vendor/github.com/shirou/gopsutil/v4/host/host_windows.go index b83ad6db..99eed3fd 100644 --- a/vendor/github.com/shirou/gopsutil/v3/host/host_windows.go +++ b/vendor/github.com/shirou/gopsutil/v4/host/host_windows.go @@ -1,12 +1,11 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build windows -// +build windows package host import ( "context" "fmt" - "math" "strconv" "strings" "sync/atomic" @@ -14,10 +13,10 @@ import ( "time" "unsafe" - "github.com/shirou/gopsutil/v3/internal/common" - "github.com/shirou/gopsutil/v3/process" - "github.com/yusufpapurcu/wmi" "golang.org/x/sys/windows" + + "github.com/shirou/gopsutil/v4/internal/common" + "github.com/shirou/gopsutil/v4/process" ) var ( @@ -57,14 +56,7 @@ type systemInfo struct { wProcessorRevision uint16 } -type msAcpi_ThermalZoneTemperature struct { - Active bool - CriticalTripPoint uint32 - CurrentTemperature uint32 - InstanceName string -} - -func HostIDWithContext(ctx context.Context) (string, error) { +func HostIDWithContext(_ context.Context) (string, error) { // there has been reports of issues on 32bit using golang.org/x/sys/windows/registry, see https://github.com/shirou/gopsutil/pull/312#issuecomment-277422612 // for rationale of using windows.RegOpenKeyEx/RegQueryValueEx instead of registry.OpenKey/GetStringValue var h windows.Handle @@ -88,7 +80,7 @@ func HostIDWithContext(ctx context.Context) (string, error) { hostID := windows.UTF16ToString(regBuf[:]) hostIDLen := len(hostID) if hostIDLen != uuidLen { - return "", fmt.Errorf("HostID incorrect: %q\n", hostID) + return "", fmt.Errorf("HostID incorrect: %q", hostID) } return strings.ToLower(hostID), nil @@ -102,7 +94,7 @@ func numProcs(ctx context.Context) (uint64, error) { return uint64(len(procs)), nil } -func UptimeWithContext(ctx context.Context) (uint64, error) { +func UptimeWithContext(_ context.Context) (uint64, error) { up, err := uptimeMillis() if err != nil { return 0, err @@ -126,7 +118,7 @@ func uptimeMillis() (uint64, error) { // cachedBootTime must be accessed via atomic.Load/StoreUint64 var cachedBootTime uint64 -func BootTimeWithContext(ctx context.Context) (uint64, error) { +func BootTimeWithContext(_ context.Context) (uint64, error) { if enableBootTimeCache { t := atomic.LoadUint64(&cachedBootTime) if t != 0 { @@ -144,7 +136,15 @@ func BootTimeWithContext(ctx context.Context) (uint64, error) { return t, nil } -func PlatformInformationWithContext(ctx context.Context) (platform string, family string, version string, err error) { +func PlatformInformationWithContext(_ context.Context) (platform, family, version string, err error) { + platform, family, _, displayVersion, err := platformInformation() + if err != nil { + return "", "", "", err + } + return platform, family, displayVersion, nil +} + +func platformInformation() (platform, family, version, displayVersion string, err error) { // GetVersionEx lies on Windows 8.1 and returns as Windows 8 if we don't declare compatibility in manifest // RtlGetVersion bypasses this lying layer and returns the true Windows version // https://docs.microsoft.com/en-us/windows-hardware/drivers/ddi/content/wdm/nf-wdm-rtlgetversion @@ -153,36 +153,36 @@ func PlatformInformationWithContext(ctx context.Context) (platform string, famil osInfo.dwOSVersionInfoSize = uint32(unsafe.Sizeof(osInfo)) ret, _, err := procRtlGetVersion.Call(uintptr(unsafe.Pointer(&osInfo))) if ret != 0 { - return + return platform, family, version, displayVersion, err } // Platform var h windows.Handle // like HostIDWithContext(), we query the registry using the raw windows.RegOpenKeyEx/RegQueryValueEx err = windows.RegOpenKeyEx(windows.HKEY_LOCAL_MACHINE, windows.StringToUTF16Ptr(`SOFTWARE\Microsoft\Windows NT\CurrentVersion`), 0, windows.KEY_READ|windows.KEY_WOW64_64KEY, &h) if err != nil { - return + return platform, family, version, displayVersion, err } defer windows.RegCloseKey(h) var bufLen uint32 var valType uint32 err = windows.RegQueryValueEx(h, windows.StringToUTF16Ptr(`ProductName`), nil, &valType, nil, &bufLen) if err != nil { - return + return platform, family, version, displayVersion, err } regBuf := make([]uint16, bufLen/2+1) err = windows.RegQueryValueEx(h, windows.StringToUTF16Ptr(`ProductName`), nil, &valType, (*byte)(unsafe.Pointer(®Buf[0])), &bufLen) if err != nil { - return + return platform, family, version, displayVersion, err } - platform = windows.UTF16ToString(regBuf[:]) + platform = windows.UTF16ToString(regBuf) if strings.Contains(platform, "Windows 10") { // check build number to determine whether it's actually Windows 11 err = windows.RegQueryValueEx(h, windows.StringToUTF16Ptr(`CurrentBuildNumber`), nil, &valType, nil, &bufLen) if err == nil { regBuf = make([]uint16, bufLen/2+1) err = windows.RegQueryValueEx(h, windows.StringToUTF16Ptr(`CurrentBuildNumber`), nil, &valType, (*byte)(unsafe.Pointer(®Buf[0])), &bufLen) if err == nil { - buildNumberStr := windows.UTF16ToString(regBuf[:]) - if buildNumber, err := strconv.Atoi(buildNumberStr); err == nil && buildNumber >= 22000 { + buildNumberStr := windows.UTF16ToString(regBuf) + if buildNumber, err := strconv.ParseInt(buildNumberStr, 10, 32); err == nil && buildNumber >= 22000 { platform = strings.Replace(platform, "Windows 10", "Windows 11", 1) } } @@ -196,7 +196,7 @@ func PlatformInformationWithContext(ctx context.Context) (platform string, famil regBuf = make([]uint16, bufLen/2+1) err = windows.RegQueryValueEx(h, windows.StringToUTF16Ptr(`CSDVersion`), nil, &valType, (*byte)(unsafe.Pointer(®Buf[0])), &bufLen) if err == nil { - platform += " " + windows.UTF16ToString(regBuf[:]) + platform += " " + windows.UTF16ToString(regBuf) } } @@ -208,6 +208,14 @@ func PlatformInformationWithContext(ctx context.Context) (platform string, famil copy((*[4]byte)(unsafe.Pointer(&UBR))[:], regBuf) } + // Get DisplayVersion(ex: 23H2) as platformVersion + err = windows.RegQueryValueEx(h, windows.StringToUTF16Ptr(`DisplayVersion`), nil, &valType, nil, &bufLen) + if err == nil { + regBuf := make([]uint16, bufLen/2+1) + err = windows.RegQueryValueEx(h, windows.StringToUTF16Ptr(`DisplayVersion`), nil, &valType, (*byte)(unsafe.Pointer(®Buf[0])), &bufLen) + displayVersion = windows.UTF16ToString(regBuf) + } + // PlatformFamily switch osInfo.wProductType { case 1: @@ -223,54 +231,27 @@ func PlatformInformationWithContext(ctx context.Context) (platform string, famil osInfo.dwMajorVersion, osInfo.dwMinorVersion, osInfo.dwBuildNumber, UBR, osInfo.dwBuildNumber, UBR) - return platform, family, version, nil + return platform, family, version, displayVersion, nil } -func UsersWithContext(ctx context.Context) ([]UserStat, error) { +func UsersWithContext(_ context.Context) ([]UserStat, error) { var ret []UserStat return ret, common.ErrNotImplementedError } -func SensorsTemperaturesWithContext(ctx context.Context) ([]TemperatureStat, error) { - var ret []TemperatureStat - var dst []msAcpi_ThermalZoneTemperature - q := wmi.CreateQuery(&dst, "") - if err := common.WMIQueryWithContext(ctx, q, &dst, nil, "root/wmi"); err != nil { - return ret, err - } - - for _, v := range dst { - ts := TemperatureStat{ - SensorKey: v.InstanceName, - Temperature: kelvinToCelsius(v.CurrentTemperature, 2), - } - ret = append(ret, ts) - } - - return ret, nil -} - -func kelvinToCelsius(temp uint32, n int) float64 { - // wmi return temperature Kelvin * 10, so need to divide the result by 10, - // and then minus 273.15 to get °Celsius. - t := float64(temp/10) - 273.15 - n10 := math.Pow10(n) - return math.Trunc((t+0.5/n10)*n10) / n10 -} - -func VirtualizationWithContext(ctx context.Context) (string, string, error) { +func VirtualizationWithContext(_ context.Context) (string, string, error) { return "", "", common.ErrNotImplementedError } -func KernelVersionWithContext(ctx context.Context) (string, error) { - _, _, version, err := PlatformInformationWithContext(ctx) +func KernelVersionWithContext(_ context.Context) (string, error) { + _, _, version, _, err := platformInformation() return version, err } func KernelArch() (string, error) { - var systemInfo systemInfo - procGetNativeSystemInfo.Call(uintptr(unsafe.Pointer(&systemInfo))) + var sInfo systemInfo + procGetNativeSystemInfo.Call(uintptr(unsafe.Pointer(&sInfo))) const ( PROCESSOR_ARCHITECTURE_INTEL = 0 @@ -279,15 +260,15 @@ func KernelArch() (string, error) { PROCESSOR_ARCHITECTURE_IA64 = 6 PROCESSOR_ARCHITECTURE_AMD64 = 9 ) - switch systemInfo.wProcessorArchitecture { + switch sInfo.wProcessorArchitecture { case PROCESSOR_ARCHITECTURE_INTEL: - if systemInfo.wProcessorLevel < 3 { + if sInfo.wProcessorLevel < 3 { return "i386", nil } - if systemInfo.wProcessorLevel > 6 { + if sInfo.wProcessorLevel > 6 { return "i686", nil } - return fmt.Sprintf("i%d86", systemInfo.wProcessorLevel), nil + return fmt.Sprintf("i%d86", sInfo.wProcessorLevel), nil case PROCESSOR_ARCHITECTURE_ARM: return "arm", nil case PROCESSOR_ARCHITECTURE_ARM64: diff --git a/vendor/github.com/shirou/gopsutil/v3/internal/common/common.go b/vendor/github.com/shirou/gopsutil/v4/internal/common/common.go similarity index 88% rename from vendor/github.com/shirou/gopsutil/v3/internal/common/common.go rename to vendor/github.com/shirou/gopsutil/v4/internal/common/common.go index 5e25e507..36eb1d21 100644 --- a/vendor/github.com/shirou/gopsutil/v3/internal/common/common.go +++ b/vendor/github.com/shirou/gopsutil/v4/internal/common/common.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause package common // @@ -14,6 +15,7 @@ import ( "errors" "fmt" "io" + "math" "net/url" "os" "os/exec" @@ -21,16 +23,18 @@ import ( "path/filepath" "reflect" "runtime" + "slices" "strconv" "strings" "time" - "github.com/shirou/gopsutil/v3/common" + "github.com/shirou/gopsutil/v4/common" ) var ( - Timeout = 3 * time.Second - ErrTimeout = errors.New("command timed out") + Timeout = 3 * time.Second + ErrNotImplementedError = errors.New("not implemented yet") + ErrTimeout = errors.New("command timed out") ) type Invoker interface { @@ -46,7 +50,7 @@ func (i Invoke) Command(name string, arg ...string) ([]byte, error) { return i.CommandWithContext(ctx, name, arg...) } -func (i Invoke) CommandWithContext(ctx context.Context, name string, arg ...string) ([]byte, error) { +func (Invoke) CommandWithContext(ctx context.Context, name string, arg ...string) ([]byte, error) { cmd := exec.CommandContext(ctx, name, arg...) var buf bytes.Buffer @@ -91,12 +95,10 @@ func (i FakeInvoke) Command(name string, arg ...string) ([]byte, error) { return []byte{}, fmt.Errorf("could not find testdata: %s", fpath) } -func (i FakeInvoke) CommandWithContext(ctx context.Context, name string, arg ...string) ([]byte, error) { +func (i FakeInvoke) CommandWithContext(_ context.Context, name string, arg ...string) ([]byte, error) { return i.Command(name, arg...) } -var ErrNotImplementedError = errors.New("not implemented yet") - // ReadFile reads contents from a file func ReadFile(filename string) (string, error) { content, err := os.ReadFile(filename) @@ -114,7 +116,7 @@ func ReadLines(filename string) ([]string, error) { } // ReadLine reads a file and returns the first occurrence of a line that is prefixed with prefix. -func ReadLine(filename string, prefix string) (string, error) { +func ReadLine(filename, prefix string) (string, error) { f, err := os.Open(filename) if err != nil { return "", err @@ -152,15 +154,15 @@ func ReadLinesOffsetN(filename string, offset uint, n int) ([]string, error) { var ret []string r := bufio.NewReader(f) - for i := 0; i < n+int(offset) || n < 0; i++ { + for i := uint(0); i < uint(n)+offset || n < 0; i++ { line, err := r.ReadString('\n') if err != nil { - if err == io.EOF && len(line) > 0 { + if err == io.EOF && line != "" { ret = append(ret, strings.Trim(line, "\n")) } break } - if i < int(offset) { + if i < offset { continue } ret = append(ret, strings.Trim(line, "\n")) @@ -289,27 +291,19 @@ func StringsHas(target []string, src string) bool { // StringsContains checks the src in any string of the target string slice func StringsContains(target []string, src string) bool { - for _, t := range target { - if strings.Contains(t, src) { - return true - } - } - return false + return slices.ContainsFunc(target, func(s string) bool { + return strings.Contains(s, src) + }) } // IntContains checks the src in any int of the target int slice. func IntContains(target []int, src int) bool { - for _, t := range target { - if src == t { - return true - } - } - return false + return slices.Contains(target, src) } // get struct attributes. // This method is used only for debugging platform dependent code. -func attributes(m interface{}) map[string]reflect.Type { +func attributes(m any) map[string]reflect.Type { typ := reflect.TypeOf(m) if typ.Kind() == reflect.Ptr { typ = typ.Elem() @@ -348,7 +342,7 @@ func PathExistsWithContents(filename string) bool { // GetEnvWithContext retrieves the environment variable key. If it does not exist it returns the default. // The context may optionally contain a map superseding os.EnvKey. -func GetEnvWithContext(ctx context.Context, key string, dfault string, combineWith ...string) string { +func GetEnvWithContext(ctx context.Context, key, dfault string, combineWith ...string) string { var value string if env, ok := ctx.Value(common.EnvKey).(common.EnvMap); ok { value = env[common.EnvKeyType(key)] @@ -364,7 +358,7 @@ func GetEnvWithContext(ctx context.Context, key string, dfault string, combineWi } // GetEnv retrieves the environment variable key. If it does not exist it returns the default. -func GetEnv(key string, dfault string, combineWith ...string) string { +func GetEnv(key, dfault string, combineWith ...string) string { value := os.Getenv(key) if value == "" { value = dfault @@ -448,7 +442,7 @@ func HostRootWithContext(ctx context.Context, combineWith ...string) string { } // getSysctrlEnv sets LC_ALL=C in a list of env vars for use when running -// sysctl commands (see DoSysctrl). +// sysctl commands. func getSysctrlEnv(env []string) []string { foundLC := false for i, line := range env { @@ -462,3 +456,11 @@ func getSysctrlEnv(env []string) []string { } return env } + +// Round places rounds the number 'val' to 'n' decimal places +func Round(val float64, n int) float64 { + // Calculate the power of 10 to the n + pow10 := math.Pow(10, float64(n)) + // Multiply the value by pow10, round it, then divide it by pow10 + return math.Round(val*pow10) / pow10 +} diff --git a/vendor/github.com/shirou/gopsutil/v4/internal/common/common_darwin.go b/vendor/github.com/shirou/gopsutil/v4/internal/common/common_darwin.go new file mode 100644 index 00000000..8b756a11 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/internal/common/common_darwin.go @@ -0,0 +1,382 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build darwin + +package common + +import ( + "errors" + "fmt" + "unsafe" + + "github.com/ebitengine/purego" + "golang.org/x/sys/unix" +) + +func CallSyscall(mib []int32) ([]byte, uint64, error) { + miblen := uint64(len(mib)) + + // get required buffer size + length := uint64(0) + _, _, err := unix.Syscall6( + 202, // unix.SYS___SYSCTL https://github.com/golang/sys/blob/76b94024e4b621e672466e8db3d7f084e7ddcad2/unix/zsysnum_darwin_amd64.go#L146 + uintptr(unsafe.Pointer(&mib[0])), + uintptr(miblen), + 0, + uintptr(unsafe.Pointer(&length)), + 0, + 0) + if err != 0 { + var b []byte + return b, length, err + } + if length == 0 { + var b []byte + return b, length, err + } + // get proc info itself + buf := make([]byte, length) + _, _, err = unix.Syscall6( + 202, // unix.SYS___SYSCTL https://github.com/golang/sys/blob/76b94024e4b621e672466e8db3d7f084e7ddcad2/unix/zsysnum_darwin_amd64.go#L146 + uintptr(unsafe.Pointer(&mib[0])), + uintptr(miblen), + uintptr(unsafe.Pointer(&buf[0])), + uintptr(unsafe.Pointer(&length)), + 0, + 0) + if err != 0 { + return buf, length, err + } + + return buf, length, nil +} + +// Library represents a dynamic library loaded by purego. +type Library struct { + addr uintptr + path string + close func() +} + +// library paths +const ( + IOKit = "/System/Library/Frameworks/IOKit.framework/IOKit" + CoreFoundation = "/System/Library/Frameworks/CoreFoundation.framework/CoreFoundation" + System = "/usr/lib/libSystem.B.dylib" +) + +func NewLibrary(path string) (*Library, error) { + lib, err := purego.Dlopen(path, purego.RTLD_LAZY|purego.RTLD_GLOBAL) + if err != nil { + return nil, err + } + + closeFunc := func() { + purego.Dlclose(lib) + } + + return &Library{ + addr: lib, + path: path, + close: closeFunc, + }, nil +} + +func (lib *Library) Dlsym(symbol string) (uintptr, error) { + return purego.Dlsym(lib.addr, symbol) +} + +func GetFunc[T any](lib *Library, symbol string) T { + var fptr T + purego.RegisterLibFunc(&fptr, lib.addr, symbol) + return fptr +} + +func (lib *Library) Close() { + lib.close() +} + +// status codes +const ( + KERN_SUCCESS = 0 +) + +// IOKit functions and symbols. +type ( + IOServiceGetMatchingServiceFunc func(mainPort uint32, matching uintptr) uint32 + IOServiceGetMatchingServicesFunc func(mainPort uint32, matching uintptr, existing *uint32) int + IOServiceMatchingFunc func(name string) unsafe.Pointer + IOServiceOpenFunc func(service, owningTask, connType uint32, connect *uint32) int + IOServiceCloseFunc func(connect uint32) int + IOIteratorNextFunc func(iterator uint32) uint32 + IORegistryEntryGetNameFunc func(entry uint32, name CStr) int + IORegistryEntryGetParentEntryFunc func(entry uint32, plane string, parent *uint32) int + IORegistryEntryCreateCFPropertyFunc func(entry uint32, key, allocator uintptr, options uint32) unsafe.Pointer + IORegistryEntryCreateCFPropertiesFunc func(entry uint32, properties unsafe.Pointer, allocator uintptr, options uint32) int + IOObjectConformsToFunc func(object uint32, className string) bool + IOObjectReleaseFunc func(object uint32) int + IOConnectCallStructMethodFunc func(connection, selector uint32, inputStruct, inputStructCnt, outputStruct uintptr, outputStructCnt *uintptr) int + + IOHIDEventSystemClientCreateFunc func(allocator uintptr) unsafe.Pointer + IOHIDEventSystemClientSetMatchingFunc func(client, match uintptr) int + IOHIDServiceClientCopyEventFunc func(service uintptr, eventType int64, + options int32, timeout int64) unsafe.Pointer + IOHIDServiceClientCopyPropertyFunc func(service, property uintptr) unsafe.Pointer + IOHIDEventGetFloatValueFunc func(event uintptr, field int32) float64 + IOHIDEventSystemClientCopyServicesFunc func(client uintptr) unsafe.Pointer +) + +const ( + IOServiceGetMatchingServiceSym = "IOServiceGetMatchingService" + IOServiceGetMatchingServicesSym = "IOServiceGetMatchingServices" + IOServiceMatchingSym = "IOServiceMatching" + IOServiceOpenSym = "IOServiceOpen" + IOServiceCloseSym = "IOServiceClose" + IOIteratorNextSym = "IOIteratorNext" + IORegistryEntryGetNameSym = "IORegistryEntryGetName" + IORegistryEntryGetParentEntrySym = "IORegistryEntryGetParentEntry" + IORegistryEntryCreateCFPropertySym = "IORegistryEntryCreateCFProperty" + IORegistryEntryCreateCFPropertiesSym = "IORegistryEntryCreateCFProperties" + IOObjectConformsToSym = "IOObjectConformsTo" + IOObjectReleaseSym = "IOObjectRelease" + IOConnectCallStructMethodSym = "IOConnectCallStructMethod" + + IOHIDEventSystemClientCreateSym = "IOHIDEventSystemClientCreate" + IOHIDEventSystemClientSetMatchingSym = "IOHIDEventSystemClientSetMatching" + IOHIDServiceClientCopyEventSym = "IOHIDServiceClientCopyEvent" + IOHIDServiceClientCopyPropertySym = "IOHIDServiceClientCopyProperty" + IOHIDEventGetFloatValueSym = "IOHIDEventGetFloatValue" + IOHIDEventSystemClientCopyServicesSym = "IOHIDEventSystemClientCopyServices" +) + +const ( + KIOMainPortDefault = 0 + + KIOHIDEventTypeTemperature = 15 + + KNilOptions = 0 +) + +const ( + KIOMediaWholeKey = "Media" + KIOServicePlane = "IOService" +) + +// CoreFoundation functions and symbols. +type ( + CFGetTypeIDFunc func(cf uintptr) int32 + CFNumberCreateFunc func(allocator uintptr, theType int32, valuePtr uintptr) unsafe.Pointer + CFNumberGetValueFunc func(num uintptr, theType int32, valuePtr uintptr) bool + CFDictionaryCreateFunc func(allocator uintptr, keys, values *unsafe.Pointer, numValues int32, + keyCallBacks, valueCallBacks uintptr) unsafe.Pointer + CFDictionaryAddValueFunc func(theDict, key, value uintptr) + CFDictionaryGetValueFunc func(theDict, key uintptr) unsafe.Pointer + CFArrayGetCountFunc func(theArray uintptr) int32 + CFArrayGetValueAtIndexFunc func(theArray uintptr, index int32) unsafe.Pointer + CFStringCreateMutableFunc func(alloc uintptr, maxLength int32) unsafe.Pointer + CFStringGetLengthFunc func(theString uintptr) int32 + CFStringGetCStringFunc func(theString uintptr, buffer CStr, bufferSize int32, encoding uint32) + CFStringCreateWithCStringFunc func(alloc uintptr, cStr string, encoding uint32) unsafe.Pointer + CFDataGetLengthFunc func(theData uintptr) int32 + CFDataGetBytePtrFunc func(theData uintptr) unsafe.Pointer + CFReleaseFunc func(cf uintptr) +) + +const ( + CFGetTypeIDSym = "CFGetTypeID" + CFNumberCreateSym = "CFNumberCreate" + CFNumberGetValueSym = "CFNumberGetValue" + CFDictionaryCreateSym = "CFDictionaryCreate" + CFDictionaryAddValueSym = "CFDictionaryAddValue" + CFDictionaryGetValueSym = "CFDictionaryGetValue" + CFArrayGetCountSym = "CFArrayGetCount" + CFArrayGetValueAtIndexSym = "CFArrayGetValueAtIndex" + CFStringCreateMutableSym = "CFStringCreateMutable" + CFStringGetLengthSym = "CFStringGetLength" + CFStringGetCStringSym = "CFStringGetCString" + CFStringCreateWithCStringSym = "CFStringCreateWithCString" + CFDataGetLengthSym = "CFDataGetLength" + CFDataGetBytePtrSym = "CFDataGetBytePtr" + CFReleaseSym = "CFRelease" +) + +const ( + KCFStringEncodingUTF8 = 0x08000100 + KCFNumberSInt64Type = 4 + KCFNumberIntType = 9 + KCFAllocatorDefault = 0 +) + +// Kernel functions and symbols. +type MachTimeBaseInfo struct { + Numer uint32 + Denom uint32 +} + +type ( + HostProcessorInfoFunc func(host uint32, flavor int32, outProcessorCount *uint32, outProcessorInfo uintptr, + outProcessorInfoCnt *uint32) int + HostStatisticsFunc func(host uint32, flavor int32, hostInfoOut uintptr, hostInfoOutCnt *uint32) int + MachHostSelfFunc func() uint32 + MachTaskSelfFunc func() uint32 + MachTimeBaseInfoFunc func(info uintptr) int + VMDeallocateFunc func(targetTask uint32, vmAddress, vmSize uintptr) int +) + +const ( + HostProcessorInfoSym = "host_processor_info" + HostStatisticsSym = "host_statistics" + MachHostSelfSym = "mach_host_self" + MachTaskSelfSym = "mach_task_self" + MachTimeBaseInfoSym = "mach_timebase_info" + VMDeallocateSym = "vm_deallocate" +) + +const ( + CTL_KERN = 1 + KERN_ARGMAX = 8 + KERN_PROCARGS2 = 49 + + HOST_VM_INFO = 2 + HOST_CPU_LOAD_INFO = 3 + + HOST_VM_INFO_COUNT = 0xf +) + +// System functions and symbols. +type ( + ProcPidPathFunc func(pid int32, buffer uintptr, bufferSize uint32) int32 + ProcPidInfoFunc func(pid, flavor int32, arg uint64, buffer uintptr, bufferSize int32) int32 +) + +const ( + SysctlSym = "sysctl" + ProcPidPathSym = "proc_pidpath" + ProcPidInfoSym = "proc_pidinfo" +) + +const ( + MAXPATHLEN = 1024 + PROC_PIDPATHINFO_MAXSIZE = 4 * MAXPATHLEN + PROC_PIDTASKINFO = 4 + PROC_PIDVNODEPATHINFO = 9 +) + +// SMC represents a SMC instance. +type SMC struct { + lib *Library + conn uint32 + callStruct IOConnectCallStructMethodFunc +} + +const ioServiceSMC = "AppleSMC" + +const ( + KSMCUserClientOpen = 0 + KSMCUserClientClose = 1 + KSMCHandleYPCEvent = 2 + KSMCReadKey = 5 + KSMCWriteKey = 6 + KSMCGetKeyCount = 7 + KSMCGetKeyFromIndex = 8 + KSMCGetKeyInfo = 9 +) + +const ( + KSMCSuccess = 0 + KSMCError = 1 + KSMCKeyNotFound = 132 +) + +func NewSMC(ioKit *Library) (*SMC, error) { + if ioKit.path != IOKit { + return nil, errors.New("library is not IOKit") + } + + ioServiceGetMatchingService := GetFunc[IOServiceGetMatchingServiceFunc](ioKit, IOServiceGetMatchingServiceSym) + ioServiceMatching := GetFunc[IOServiceMatchingFunc](ioKit, IOServiceMatchingSym) + ioServiceOpen := GetFunc[IOServiceOpenFunc](ioKit, IOServiceOpenSym) + ioObjectRelease := GetFunc[IOObjectReleaseFunc](ioKit, IOObjectReleaseSym) + machTaskSelf := GetFunc[MachTaskSelfFunc](ioKit, MachTaskSelfSym) + + ioConnectCallStructMethod := GetFunc[IOConnectCallStructMethodFunc](ioKit, IOConnectCallStructMethodSym) + + service := ioServiceGetMatchingService(0, uintptr(ioServiceMatching(ioServiceSMC))) + if service == 0 { + return nil, fmt.Errorf("ERROR: %s NOT FOUND", ioServiceSMC) + } + + var conn uint32 + if result := ioServiceOpen(service, machTaskSelf(), 0, &conn); result != 0 { + return nil, errors.New("ERROR: IOServiceOpen failed") + } + + ioObjectRelease(service) + return &SMC{ + lib: ioKit, + conn: conn, + callStruct: ioConnectCallStructMethod, + }, nil +} + +func (s *SMC) CallStruct(selector uint32, inputStruct, inputStructCnt, outputStruct uintptr, outputStructCnt *uintptr) int { + return s.callStruct(s.conn, selector, inputStruct, inputStructCnt, outputStruct, outputStructCnt) +} + +func (s *SMC) Close() error { + ioServiceClose := GetFunc[IOServiceCloseFunc](s.lib, IOServiceCloseSym) + + if result := ioServiceClose(s.conn); result != 0 { + return errors.New("ERROR: IOServiceClose failed") + } + return nil +} + +type CStr []byte + +func NewCStr(length int32) CStr { + return make(CStr, length) +} + +func (s CStr) Length() int32 { + // Include null terminator to make CFStringGetCString properly functions + return int32(len(s)) + 1 +} + +func (s CStr) Ptr() *byte { + if len(s) < 1 { + return nil + } + + return &s[0] +} + +func (s CStr) Addr() uintptr { + return uintptr(unsafe.Pointer(s.Ptr())) +} + +func (s CStr) GoString() string { + if s == nil { + return "" + } + + var length int + for _, char := range s { + if char == '\x00' { + break + } + length++ + } + return string(s[:length]) +} + +// https://github.com/ebitengine/purego/blob/main/internal/strings/strings.go#L26 +func GoString(cStr *byte) string { + if cStr == nil { + return "" + } + var length int + for *(*byte)(unsafe.Add(unsafe.Pointer(cStr), uintptr(length))) != '\x00' { + length++ + } + return string(unsafe.Slice(cStr, length)) +} diff --git a/vendor/github.com/shirou/gopsutil/v3/internal/common/common_freebsd.go b/vendor/github.com/shirou/gopsutil/v4/internal/common/common_freebsd.go similarity index 74% rename from vendor/github.com/shirou/gopsutil/v3/internal/common/common_freebsd.go rename to vendor/github.com/shirou/gopsutil/v4/internal/common/common_freebsd.go index f590e2e6..7a40a40c 100644 --- a/vendor/github.com/shirou/gopsutil/v3/internal/common/common_freebsd.go +++ b/vendor/github.com/shirou/gopsutil/v4/internal/common/common_freebsd.go @@ -1,13 +1,10 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build freebsd || openbsd -// +build freebsd openbsd package common import ( "fmt" - "os" - "os/exec" - "strings" "unsafe" "golang.org/x/sys/unix" @@ -28,20 +25,6 @@ func SysctlUint(mib string) (uint64, error) { return 0, fmt.Errorf("unexpected size: %s, %d", mib, len(buf)) } -func DoSysctrl(mib string) ([]string, error) { - cmd := exec.Command("sysctl", "-n", mib) - cmd.Env = getSysctrlEnv(os.Environ()) - out, err := cmd.Output() - if err != nil { - return []string{}, err - } - v := strings.Replace(string(out), "{ ", "", 1) - v = strings.Replace(string(v), " }", "", 1) - values := strings.Fields(string(v)) - - return values, nil -} - func CallSyscall(mib []int32) ([]byte, uint64, error) { mibptr := unsafe.Pointer(&mib[0]) miblen := uint64(len(mib)) diff --git a/vendor/github.com/shirou/gopsutil/v3/internal/common/common_linux.go b/vendor/github.com/shirou/gopsutil/v4/internal/common/common_linux.go similarity index 82% rename from vendor/github.com/shirou/gopsutil/v3/internal/common/common_linux.go rename to vendor/github.com/shirou/gopsutil/v4/internal/common/common_linux.go index a429e16a..a2473f41 100644 --- a/vendor/github.com/shirou/gopsutil/v3/internal/common/common_linux.go +++ b/vendor/github.com/shirou/gopsutil/v4/internal/common/common_linux.go @@ -1,13 +1,12 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build linux -// +build linux package common import ( "context" - "fmt" + "errors" "os" - "os/exec" "path/filepath" "strconv" "strings" @@ -20,20 +19,6 @@ import ( // cachedBootTime must be accessed via atomic.Load/StoreUint64 var cachedBootTime uint64 -func DoSysctrl(mib string) ([]string, error) { - cmd := exec.Command("sysctl", "-n", mib) - cmd.Env = getSysctrlEnv(os.Environ()) - out, err := cmd.Output() - if err != nil { - return []string{}, err - } - v := strings.Replace(string(out), "{ ", "", 1) - v = strings.Replace(string(v), " }", "", 1) - values := strings.Fields(string(v)) - - return values, nil -} - func NumProcs() (uint64, error) { return NumProcsWithContext(context.Background()) } @@ -90,6 +75,8 @@ func BootTimeWithContext(ctx context.Context, enableCache bool) (uint64, error) if enableCache { atomic.StoreUint64(&cachedBootTime, t) } + + return t, nil } filename := HostProcWithContext(ctx, "uptime") @@ -97,15 +84,16 @@ func BootTimeWithContext(ctx context.Context, enableCache bool) (uint64, error) if err != nil { return handleBootTimeFileReadErr(err) } + currentTime := float64(time.Now().UnixNano()) / float64(time.Second) + if len(lines) != 1 { - return 0, fmt.Errorf("wrong uptime format") + return 0, errors.New("wrong uptime format") } f := strings.Fields(lines[0]) b, err := strconv.ParseFloat(f[0], 64) if err != nil { return 0, err } - currentTime := float64(time.Now().UnixNano()) / float64(time.Second) t := currentTime - b if enableCache { @@ -116,18 +104,18 @@ func BootTimeWithContext(ctx context.Context, enableCache bool) (uint64, error) } func handleBootTimeFileReadErr(err error) (uint64, error) { - if os.IsPermission(err) { - var info syscall.Sysinfo_t - err := syscall.Sysinfo(&info) - if err != nil { - return 0, err - } - - currentTime := time.Now().UnixNano() / int64(time.Second) - t := currentTime - int64(info.Uptime) - return uint64(t), nil + if !os.IsPermission(err) { + return 0, err } - return 0, err + var info syscall.Sysinfo_t + err = syscall.Sysinfo(&info) + if err != nil { + return 0, err + } + + currentTime := time.Now().UnixNano() / int64(time.Second) + t := currentTime - int64(info.Uptime) + return uint64(t), nil } func readBootTimeStat(ctx context.Context) (uint64, error) { @@ -139,7 +127,7 @@ func readBootTimeStat(ctx context.Context) (uint64, error) { if strings.HasPrefix(line, "btime") { f := strings.Fields(line) if len(f) != 2 { - return 0, fmt.Errorf("wrong btime format") + return 0, errors.New("wrong btime format") } b, err := strconv.ParseInt(f[1], 10, 64) if err != nil { @@ -148,7 +136,7 @@ func readBootTimeStat(ctx context.Context) (uint64, error) { t := uint64(b) return t, nil } - return 0, fmt.Errorf("could not find btime") + return 0, errors.New("could not find btime") } func Virtualization() (string, string, error) { @@ -193,19 +181,20 @@ func VirtualizationWithContext(ctx context.Context) (string, string, error) { if PathExists(filename) { contents, err := ReadLines(filename) if err == nil { - if StringsContains(contents, "kvm") { + switch { + case StringsContains(contents, "kvm"): system = "kvm" role = "host" - } else if StringsContains(contents, "hv_util") { + case StringsContains(contents, "hv_util"): system = "hyperv" role = "guest" - } else if StringsContains(contents, "vboxdrv") { + case StringsContains(contents, "vboxdrv"): system = "vbox" role = "host" - } else if StringsContains(contents, "vboxguest") { + case StringsContains(contents, "vboxguest"): system = "vbox" role = "guest" - } else if StringsContains(contents, "vmware") { + case StringsContains(contents, "vmware"): system = "vmware" role = "guest" } @@ -270,16 +259,17 @@ func VirtualizationWithContext(ctx context.Context) (string, string, error) { if PathExists(filepath.Join(filename, "self", "cgroup")) { contents, err := ReadLines(filepath.Join(filename, "self", "cgroup")) if err == nil { - if StringsContains(contents, "lxc") { + switch { + case StringsContains(contents, "lxc"): system = "lxc" role = "guest" - } else if StringsContains(contents, "docker") { + case StringsContains(contents, "docker"): system = "docker" role = "guest" - } else if StringsContains(contents, "machine-rkt") { + case StringsContains(contents, "machine-rkt"): system = "rkt" role = "guest" - } else if PathExists("/usr/bin/lxc-version") { + case PathExists("/usr/bin/lxc-version"): system = "lxc" role = "host" } @@ -312,11 +302,11 @@ func VirtualizationWithContext(ctx context.Context) (string, string, error) { return system, role, nil } -func GetOSRelease() (platform string, version string, err error) { +func GetOSRelease() (platform, version string, err error) { return GetOSReleaseWithContext(context.Background()) } -func GetOSReleaseWithContext(ctx context.Context) (platform string, version string, err error) { +func GetOSReleaseWithContext(ctx context.Context) (platform, version string, err error) { contents, err := ReadLines(HostEtcWithContext(ctx, "os-release")) if err != nil { return "", "", nil // return empty diff --git a/vendor/github.com/shirou/gopsutil/v3/internal/common/common_netbsd.go b/vendor/github.com/shirou/gopsutil/v4/internal/common/common_netbsd.go similarity index 66% rename from vendor/github.com/shirou/gopsutil/v3/internal/common/common_netbsd.go rename to vendor/github.com/shirou/gopsutil/v4/internal/common/common_netbsd.go index efbc710a..52796ddb 100644 --- a/vendor/github.com/shirou/gopsutil/v3/internal/common/common_netbsd.go +++ b/vendor/github.com/shirou/gopsutil/v4/internal/common/common_netbsd.go @@ -1,31 +1,14 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build netbsd -// +build netbsd package common import ( - "os" - "os/exec" - "strings" "unsafe" "golang.org/x/sys/unix" ) -func DoSysctrl(mib string) ([]string, error) { - cmd := exec.Command("sysctl", "-n", mib) - cmd.Env = getSysctrlEnv(os.Environ()) - out, err := cmd.Output() - if err != nil { - return []string{}, err - } - v := strings.Replace(string(out), "{ ", "", 1) - v = strings.Replace(string(v), " }", "", 1) - values := strings.Fields(string(v)) - - return values, nil -} - func CallSyscall(mib []int32) ([]byte, uint64, error) { mibptr := unsafe.Pointer(&mib[0]) miblen := uint64(len(mib)) diff --git a/vendor/github.com/shirou/gopsutil/v3/internal/common/common_openbsd.go b/vendor/github.com/shirou/gopsutil/v4/internal/common/common_openbsd.go similarity index 66% rename from vendor/github.com/shirou/gopsutil/v3/internal/common/common_openbsd.go rename to vendor/github.com/shirou/gopsutil/v4/internal/common/common_openbsd.go index 58d76f33..df44ac04 100644 --- a/vendor/github.com/shirou/gopsutil/v3/internal/common/common_openbsd.go +++ b/vendor/github.com/shirou/gopsutil/v4/internal/common/common_openbsd.go @@ -1,31 +1,14 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build openbsd -// +build openbsd package common import ( - "os" - "os/exec" - "strings" "unsafe" "golang.org/x/sys/unix" ) -func DoSysctrl(mib string) ([]string, error) { - cmd := exec.Command("sysctl", "-n", mib) - cmd.Env = getSysctrlEnv(os.Environ()) - out, err := cmd.Output() - if err != nil { - return []string{}, err - } - v := strings.Replace(string(out), "{ ", "", 1) - v = strings.Replace(string(v), " }", "", 1) - values := strings.Fields(string(v)) - - return values, nil -} - func CallSyscall(mib []int32) ([]byte, uint64, error) { mibptr := unsafe.Pointer(&mib[0]) miblen := uint64(len(mib)) diff --git a/vendor/github.com/shirou/gopsutil/v3/internal/common/common_unix.go b/vendor/github.com/shirou/gopsutil/v4/internal/common/common_unix.go similarity index 61% rename from vendor/github.com/shirou/gopsutil/v3/internal/common/common_unix.go rename to vendor/github.com/shirou/gopsutil/v4/internal/common/common_unix.go index 4af7e5c2..2ccb3760 100644 --- a/vendor/github.com/shirou/gopsutil/v3/internal/common/common_unix.go +++ b/vendor/github.com/shirou/gopsutil/v4/internal/common/common_unix.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build linux || freebsd || darwin || openbsd -// +build linux freebsd darwin openbsd package common @@ -33,30 +33,10 @@ func CallLsofWithContext(ctx context.Context, invoke Invoker, pid int32, args .. var ret []string for _, l := range lines[1:] { - if len(l) == 0 { + if l == "" { continue } ret = append(ret, l) } return ret, nil } - -func CallPgrepWithContext(ctx context.Context, invoke Invoker, pid int32) ([]int32, error) { - out, err := invoke.CommandWithContext(ctx, "pgrep", "-P", strconv.Itoa(int(pid))) - if err != nil { - return []int32{}, err - } - lines := strings.Split(string(out), "\n") - ret := make([]int32, 0, len(lines)) - for _, l := range lines { - if len(l) == 0 { - continue - } - i, err := strconv.ParseInt(l, 10, 32) - if err != nil { - continue - } - ret = append(ret, int32(i)) - } - return ret, nil -} diff --git a/vendor/github.com/shirou/gopsutil/v3/internal/common/common_windows.go b/vendor/github.com/shirou/gopsutil/v4/internal/common/common_windows.go similarity index 92% rename from vendor/github.com/shirou/gopsutil/v3/internal/common/common_windows.go rename to vendor/github.com/shirou/gopsutil/v4/internal/common/common_windows.go index 301b2315..31df6efe 100644 --- a/vendor/github.com/shirou/gopsutil/v3/internal/common/common_windows.go +++ b/vendor/github.com/shirou/gopsutil/v4/internal/common/common_windows.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build windows -// +build windows package common @@ -17,19 +17,19 @@ import ( ) // for double values -type PDH_FMT_COUNTERVALUE_DOUBLE struct { +type PDH_FMT_COUNTERVALUE_DOUBLE struct { //nolint:revive //FIXME CStatus uint32 DoubleValue float64 } // for 64 bit integer values -type PDH_FMT_COUNTERVALUE_LARGE struct { +type PDH_FMT_COUNTERVALUE_LARGE struct { //nolint:revive //FIXME CStatus uint32 LargeValue int64 } // for long values -type PDH_FMT_COUNTERVALUE_LONG struct { +type PDH_FMT_COUNTERVALUE_LONG struct { //nolint:revive //FIXME CStatus uint32 LongValue int32 padding [4]byte @@ -69,6 +69,7 @@ var ( ModNt = windows.NewLazySystemDLL("ntdll.dll") ModPdh = windows.NewLazySystemDLL("pdh.dll") ModPsapi = windows.NewLazySystemDLL("psapi.dll") + ModPowrProf = windows.NewLazySystemDLL("powrprof.dll") ProcGetSystemTimes = Modkernel32.NewProc("GetSystemTimes") ProcNtQuerySystemInformation = ModNt.NewProc("NtQuerySystemInformation") @@ -197,7 +198,7 @@ func ProcessorQueueLengthCounter() (*Win32PerformanceCounter, error) { } // WMIQueryWithContext - wraps wmi.Query with a timed-out context to avoid hanging -func WMIQueryWithContext(ctx context.Context, query string, dst interface{}, connectServerArgs ...interface{}) error { +func WMIQueryWithContext(ctx context.Context, query string, dst any, connectServerArgs ...any) error { if _, ok := ctx.Deadline(); !ok { ctxTimeout, cancel := context.WithTimeout(ctx, Timeout) defer cancel() @@ -233,7 +234,7 @@ func ConvertDOSPath(p string) string { ret, _, _ := procQueryDosDeviceW.Call(uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(szDeviceName))), uintptr(unsafe.Pointer(&szTarget[0])), uintptr(len(szTarget))) - if ret != 0 && windows.UTF16ToString(szTarget[:]) == rawDrive { + if ret != 0 && windows.UTF16ToString(szTarget) == rawDrive { return filepath.Join(szDeviceName, p[len(rawDrive):]) } } @@ -273,19 +274,19 @@ type SystemExtendedHandleInformation struct { // CallWithExpandingBuffer https://github.com/hillu/go-ntdll func CallWithExpandingBuffer(fn func() NtStatus, buf *[]byte, resultLength *uint32) NtStatus { for { - if st := fn(); st == STATUS_BUFFER_OVERFLOW || st == STATUS_BUFFER_TOO_SMALL || st == STATUS_INFO_LENGTH_MISMATCH { + st := fn() + if st == STATUS_BUFFER_OVERFLOW || st == STATUS_BUFFER_TOO_SMALL || st == STATUS_INFO_LENGTH_MISMATCH { if int(*resultLength) <= cap(*buf) { (*reflect.SliceHeader)(unsafe.Pointer(buf)).Len = int(*resultLength) } else { *buf = make([]byte, int(*resultLength)) } continue - } else { - if !st.IsError() { - *buf = (*buf)[:int(*resultLength)] - } - return st } + if !st.IsError() { + *buf = (*buf)[:int(*resultLength)] + } + return st } } diff --git a/vendor/github.com/shirou/gopsutil/v3/internal/common/endian.go b/vendor/github.com/shirou/gopsutil/v4/internal/common/endian.go similarity index 88% rename from vendor/github.com/shirou/gopsutil/v3/internal/common/endian.go rename to vendor/github.com/shirou/gopsutil/v4/internal/common/endian.go index 147cfdc4..113ff2e9 100644 --- a/vendor/github.com/shirou/gopsutil/v3/internal/common/endian.go +++ b/vendor/github.com/shirou/gopsutil/v4/internal/common/endian.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause package common import "unsafe" diff --git a/vendor/github.com/shirou/gopsutil/v4/internal/common/readlink_linux.go b/vendor/github.com/shirou/gopsutil/v4/internal/common/readlink_linux.go new file mode 100644 index 00000000..ea2d4677 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/internal/common/readlink_linux.go @@ -0,0 +1,53 @@ +package common + +import ( + "errors" + "os" + "sync" + "syscall" +) + +var bufferPool = sync.Pool{ + New: func() any { + b := make([]byte, syscall.PathMax) + return &b + }, +} + +// The following three functions are copied from stdlib. + +// ignoringEINTR2 is ignoringEINTR, but returning an additional value. +func ignoringEINTR2[T any](fn func() (T, error)) (T, error) { + for { + v, err := fn() + if !errors.Is(err, syscall.EINTR) { + return v, err + } + } +} + +// Many functions in package syscall return a count of -1 instead of 0. +// Using fixCount(call()) instead of call() corrects the count. +func fixCount(n int, err error) (int, error) { + if n < 0 { + n = 0 + } + return n, err +} + +// Readlink behaves like os.Readlink but caches the buffer passed to syscall.Readlink. +func Readlink(name string) (string, error) { + b := bufferPool.Get().(*[]byte) + + n, err := ignoringEINTR2(func() (int, error) { + return fixCount(syscall.Readlink(name, *b)) + }) + if err != nil { + bufferPool.Put(b) + return "", &os.PathError{Op: "readlink", Path: name, Err: err} + } + + result := string((*b)[:n]) + bufferPool.Put(b) + return result, nil +} diff --git a/vendor/github.com/shirou/gopsutil/v3/internal/common/sleep.go b/vendor/github.com/shirou/gopsutil/v4/internal/common/sleep.go similarity index 89% rename from vendor/github.com/shirou/gopsutil/v3/internal/common/sleep.go rename to vendor/github.com/shirou/gopsutil/v4/internal/common/sleep.go index 94cedfd3..504f13ff 100644 --- a/vendor/github.com/shirou/gopsutil/v3/internal/common/sleep.go +++ b/vendor/github.com/shirou/gopsutil/v4/internal/common/sleep.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause package common import ( diff --git a/vendor/github.com/shirou/gopsutil/v4/internal/common/warnings.go b/vendor/github.com/shirou/gopsutil/v4/internal/common/warnings.go new file mode 100644 index 00000000..e09768f3 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/internal/common/warnings.go @@ -0,0 +1,53 @@ +// SPDX-License-Identifier: BSD-3-Clause +package common + +import ( + "fmt" + "strings" +) + +const ( + maxWarnings = 100 // An arbitrary limit to avoid excessive memory usage, it has no sense to store hundreds of errors + tooManyErrorsMessage = "too many errors reported, next errors were discarded" + numberOfWarningsMessage = "Number of warnings:" +) + +type Warnings struct { + List []error + tooManyErrors bool + Verbose bool +} + +func (w *Warnings) Add(err error) { + if len(w.List) >= maxWarnings { + w.tooManyErrors = true + return + } + w.List = append(w.List, err) +} + +func (w *Warnings) Reference() error { + if len(w.List) > 0 { + return w + } + return nil +} + +func (w *Warnings) Error() string { + if w.Verbose { + str := "" + var sb strings.Builder + for i, e := range w.List { + sb.WriteString(fmt.Sprintf("\tError %d: %s\n", i, e.Error())) + } + str += sb.String() + if w.tooManyErrors { + str += fmt.Sprintf("\t%s\n", tooManyErrorsMessage) + } + return str + } + if w.tooManyErrors { + return fmt.Sprintf("%s > %v - %s", numberOfWarningsMessage, maxWarnings, tooManyErrorsMessage) + } + return fmt.Sprintf("%s %v", numberOfWarningsMessage, len(w.List)) +} diff --git a/vendor/github.com/shirou/gopsutil/v4/mem/ex_linux.go b/vendor/github.com/shirou/gopsutil/v4/mem/ex_linux.go new file mode 100644 index 00000000..659b6557 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/mem/ex_linux.go @@ -0,0 +1,40 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build linux + +package mem + +import ( + "context" + "encoding/json" +) + +type ExVirtualMemory struct { + ActiveFile uint64 `json:"activefile"` + InactiveFile uint64 `json:"inactivefile"` + ActiveAnon uint64 `json:"activeanon"` + InactiveAnon uint64 `json:"inactiveanon"` + Unevictable uint64 `json:"unevictable"` +} + +func (v ExVirtualMemory) String() string { + s, _ := json.Marshal(v) + return string(s) +} + +type ExLinux struct{} + +func NewExLinux() *ExLinux { + return &ExLinux{} +} + +func (ex *ExLinux) VirtualMemory() (*ExVirtualMemory, error) { + return ex.VirtualMemoryWithContext(context.Background()) +} + +func (*ExLinux) VirtualMemoryWithContext(ctx context.Context) (*ExVirtualMemory, error) { + _, vmEx, err := fillFromMeminfoWithContext(ctx) + if err != nil { + return nil, err + } + return vmEx, nil +} diff --git a/vendor/github.com/shirou/gopsutil/v4/mem/ex_windows.go b/vendor/github.com/shirou/gopsutil/v4/mem/ex_windows.go new file mode 100644 index 00000000..907143d3 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/mem/ex_windows.go @@ -0,0 +1,62 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build windows + +package mem + +import ( + "unsafe" +) + +// ExVirtualMemory represents Windows specific information +// https://learn.microsoft.com/en-us/windows/win32/api/sysinfoapi/ns-sysinfoapi-memorystatusex +// https://learn.microsoft.com/en-us/windows/win32/api/psapi/ns-psapi-performance_information +type ExVirtualMemory struct { + CommitLimit uint64 `json:"commitLimit"` + CommitTotal uint64 `json:"commitTotal"` + VirtualTotal uint64 `json:"virtualTotal"` + VirtualAvail uint64 `json:"virtualAvail"` + PhysTotal uint64 `json:"physTotal"` + PhysAvail uint64 `json:"physAvail"` + PageFileTotal uint64 `json:"pageFileTotal"` + PageFileAvail uint64 `json:"pageFileAvail"` +} + +type ExWindows struct{} + +func NewExWindows() *ExWindows { + return &ExWindows{} +} + +func (*ExWindows) VirtualMemory() (*ExVirtualMemory, error) { + var memInfo memoryStatusEx + memInfo.cbSize = uint32(unsafe.Sizeof(memInfo)) + // If mem == 0 since this is an error according to GlobalMemoryStatusEx documentation + // In that case, use err which is constructed from GetLastError(), + // see https://pkg.go.dev/golang.org/x/sys/windows#LazyProc.Call + mem, _, err := procGlobalMemoryStatusEx.Call(uintptr(unsafe.Pointer(&memInfo))) + if mem == 0 { + return nil, err + } + + var perfInfo performanceInformation + perfInfo.cb = uint32(unsafe.Sizeof(perfInfo)) + // Analogous to above: perf == 0 is an error according to the GetPerformanceInfo documentation, + // use err in that case + perf, _, err := procGetPerformanceInfo.Call(uintptr(unsafe.Pointer(&perfInfo)), uintptr(perfInfo.cb)) + if perf == 0 { + return nil, err + } + + ret := &ExVirtualMemory{ + CommitLimit: perfInfo.commitLimit * perfInfo.pageSize, + CommitTotal: perfInfo.commitTotal * perfInfo.pageSize, + VirtualTotal: memInfo.ullTotalVirtual, + VirtualAvail: memInfo.ullAvailVirtual, + PhysTotal: memInfo.ullTotalPhys, + PhysAvail: memInfo.ullAvailPhys, + PageFileTotal: memInfo.ullTotalPageFile, + PageFileAvail: memInfo.ullAvailPageFile, + } + + return ret, nil +} diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem.go similarity index 94% rename from vendor/github.com/shirou/gopsutil/v3/mem/mem.go rename to vendor/github.com/shirou/gopsutil/v4/mem/mem.go index edaf268b..01932ddf 100644 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem.go +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem.go @@ -1,9 +1,10 @@ +// SPDX-License-Identifier: BSD-3-Clause package mem import ( "encoding/json" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" ) var invoke common.Invoker = common.Invoke{} @@ -47,7 +48,7 @@ type VirtualMemoryStat struct { Laundry uint64 `json:"laundry"` // Linux specific numbers - // https://www.centos.org/docs/5/html/5.1/Deployment_Guide/s2-proc-meminfo.html + // https://docs.redhat.com/en/documentation/red_hat_enterprise_linux/6/html/deployment_guide/s2-proc-meminfo // https://www.kernel.org/doc/Documentation/filesystems/proc.txt // https://www.kernel.org/doc/Documentation/vm/overcommit-accounting // https://www.kernel.org/doc/Documentation/vm/transhuge.txt diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_aix.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_aix.go similarity index 58% rename from vendor/github.com/shirou/gopsutil/v3/mem/mem_aix.go rename to vendor/github.com/shirou/gopsutil/v4/mem/mem_aix.go index 22a6a4e9..ac2c39dd 100644 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_aix.go +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_aix.go @@ -1,10 +1,12 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build aix -// +build aix package mem import ( "context" + + "github.com/shirou/gopsutil/v4/internal/common" ) func VirtualMemory() (*VirtualMemoryStat, error) { @@ -14,3 +16,7 @@ func VirtualMemory() (*VirtualMemoryStat, error) { func SwapMemory() (*SwapMemoryStat, error) { return SwapMemoryWithContext(context.Background()) } + +func SwapDevices() ([]*SwapDevice, error) { + return nil, common.ErrNotImplementedError +} diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_aix_cgo.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_aix_cgo.go similarity index 97% rename from vendor/github.com/shirou/gopsutil/v3/mem/mem_aix_cgo.go rename to vendor/github.com/shirou/gopsutil/v4/mem/mem_aix_cgo.go index 67e11dff..2d03dd0c 100644 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_aix_cgo.go +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_aix_cgo.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build aix && cgo -// +build aix,cgo package mem diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_aix_nocgo.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_aix_nocgo.go similarity index 95% rename from vendor/github.com/shirou/gopsutil/v3/mem/mem_aix_nocgo.go rename to vendor/github.com/shirou/gopsutil/v4/mem/mem_aix_nocgo.go index 027879d9..bc3c0ed3 100644 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_aix_nocgo.go +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_aix_nocgo.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build aix && !cgo -// +build aix,!cgo package mem @@ -8,7 +8,7 @@ import ( "strconv" "strings" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" ) func VirtualMemoryWithContext(ctx context.Context) (*VirtualMemoryStat, error) { diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_bsd.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_bsd.go similarity index 98% rename from vendor/github.com/shirou/gopsutil/v3/mem/mem_bsd.go rename to vendor/github.com/shirou/gopsutil/v4/mem/mem_bsd.go index ef867d74..4f3e57c0 100644 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_bsd.go +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_bsd.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build freebsd || openbsd || netbsd -// +build freebsd openbsd netbsd package mem diff --git a/vendor/github.com/shirou/gopsutil/v4/mem/mem_darwin.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_darwin.go new file mode 100644 index 00000000..7d96a3bb --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_darwin.go @@ -0,0 +1,130 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build darwin + +package mem + +import ( + "context" + "fmt" + "unsafe" + + "golang.org/x/sys/unix" + + "github.com/shirou/gopsutil/v4/internal/common" +) + +func getHwMemsize() (uint64, error) { + total, err := unix.SysctlUint64("hw.memsize") + if err != nil { + return 0, err + } + return total, nil +} + +// xsw_usage in sys/sysctl.h +type swapUsage struct { + Total uint64 + Avail uint64 + Used uint64 + Pagesize int32 + Encrypted bool +} + +// SwapMemory returns swapinfo. +func SwapMemory() (*SwapMemoryStat, error) { + return SwapMemoryWithContext(context.Background()) +} + +func SwapMemoryWithContext(_ context.Context) (*SwapMemoryStat, error) { + // https://github.com/yanllearnn/go-osstat/blob/ae8a279d26f52ec946a03698c7f50a26cfb427e3/memory/memory_darwin.go + var ret *SwapMemoryStat + + value, err := unix.SysctlRaw("vm.swapusage") + if err != nil { + return ret, err + } + if len(value) != 32 { + return ret, fmt.Errorf("unexpected output of sysctl vm.swapusage: %v (len: %d)", value, len(value)) + } + swap := (*swapUsage)(unsafe.Pointer(&value[0])) + + u := float64(0) + if swap.Total != 0 { + u = ((float64(swap.Total) - float64(swap.Avail)) / float64(swap.Total)) * 100.0 + } + + ret = &SwapMemoryStat{ + Total: swap.Total, + Used: swap.Used, + Free: swap.Avail, + UsedPercent: u, + } + + return ret, nil +} + +func SwapDevices() ([]*SwapDevice, error) { + return SwapDevicesWithContext(context.Background()) +} + +func SwapDevicesWithContext(_ context.Context) ([]*SwapDevice, error) { + return nil, common.ErrNotImplementedError +} + +type vmStatisticsData struct { + freeCount uint32 + activeCount uint32 + inactiveCount uint32 + wireCount uint32 + _ [44]byte // Not used here +} + +// VirtualMemory returns VirtualmemoryStat. +func VirtualMemory() (*VirtualMemoryStat, error) { + return VirtualMemoryWithContext(context.Background()) +} + +func VirtualMemoryWithContext(_ context.Context) (*VirtualMemoryStat, error) { + machLib, err := common.NewLibrary(common.System) + if err != nil { + return nil, err + } + defer machLib.Close() + + hostStatistics := common.GetFunc[common.HostStatisticsFunc](machLib, common.HostStatisticsSym) + machHostSelf := common.GetFunc[common.MachHostSelfFunc](machLib, common.MachHostSelfSym) + + count := uint32(common.HOST_VM_INFO_COUNT) + var vmstat vmStatisticsData + + status := hostStatistics(machHostSelf(), common.HOST_VM_INFO, + uintptr(unsafe.Pointer(&vmstat)), &count) + + if status != common.KERN_SUCCESS { + return nil, fmt.Errorf("host_statistics error=%d", status) + } + + pageSizeAddr, _ := machLib.Dlsym("vm_kernel_page_size") + pageSize := **(**uint64)(unsafe.Pointer(&pageSizeAddr)) + total, err := getHwMemsize() + if err != nil { + return nil, err + } + totalCount := uint32(total / pageSize) + + availableCount := vmstat.inactiveCount + vmstat.freeCount + usedPercent := 100 * float64(totalCount-availableCount) / float64(totalCount) + + usedCount := totalCount - availableCount + + return &VirtualMemoryStat{ + Total: total, + Available: pageSize * uint64(availableCount), + Used: pageSize * uint64(usedCount), + UsedPercent: usedPercent, + Free: pageSize * uint64(vmstat.freeCount), + Active: pageSize * uint64(vmstat.activeCount), + Inactive: pageSize * uint64(vmstat.inactiveCount), + Wired: pageSize * uint64(vmstat.wireCount), + }, nil +} diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_fallback.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_fallback.go similarity index 62% rename from vendor/github.com/shirou/gopsutil/v3/mem/mem_fallback.go rename to vendor/github.com/shirou/gopsutil/v4/mem/mem_fallback.go index 697fd870..74283a2b 100644 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_fallback.go +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_fallback.go @@ -1,19 +1,19 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build !darwin && !linux && !freebsd && !openbsd && !solaris && !windows && !plan9 && !aix && !netbsd -// +build !darwin,!linux,!freebsd,!openbsd,!solaris,!windows,!plan9,!aix,!netbsd package mem import ( "context" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" ) func VirtualMemory() (*VirtualMemoryStat, error) { return VirtualMemoryWithContext(context.Background()) } -func VirtualMemoryWithContext(ctx context.Context) (*VirtualMemoryStat, error) { +func VirtualMemoryWithContext(_ context.Context) (*VirtualMemoryStat, error) { return nil, common.ErrNotImplementedError } @@ -21,7 +21,7 @@ func SwapMemory() (*SwapMemoryStat, error) { return SwapMemoryWithContext(context.Background()) } -func SwapMemoryWithContext(ctx context.Context) (*SwapMemoryStat, error) { +func SwapMemoryWithContext(_ context.Context) (*SwapMemoryStat, error) { return nil, common.ErrNotImplementedError } @@ -29,6 +29,6 @@ func SwapDevices() ([]*SwapDevice, error) { return SwapDevicesWithContext(context.Background()) } -func SwapDevicesWithContext(ctx context.Context) ([]*SwapDevice, error) { +func SwapDevicesWithContext(_ context.Context) ([]*SwapDevice, error) { return nil, common.ErrNotImplementedError } diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_freebsd.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_freebsd.go similarity index 91% rename from vendor/github.com/shirou/gopsutil/v3/mem/mem_freebsd.go rename to vendor/github.com/shirou/gopsutil/v4/mem/mem_freebsd.go index 9a56785b..dbe6d919 100644 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_freebsd.go +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_freebsd.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build freebsd -// +build freebsd package mem @@ -8,15 +8,16 @@ import ( "errors" "unsafe" - "github.com/shirou/gopsutil/v3/internal/common" "golang.org/x/sys/unix" + + "github.com/shirou/gopsutil/v4/internal/common" ) func VirtualMemory() (*VirtualMemoryStat, error) { return VirtualMemoryWithContext(context.Background()) } -func VirtualMemoryWithContext(ctx context.Context) (*VirtualMemoryStat, error) { +func VirtualMemoryWithContext(_ context.Context) (*VirtualMemoryStat, error) { pageSize, err := common.SysctlUint("vm.stats.vm.v_page_size") if err != nil { return nil, err @@ -85,7 +86,6 @@ func SwapMemory() (*SwapMemoryStat, error) { } // Constants from vm/vm_param.h -// nolint: golint const ( XSWDEV_VERSION11 = 1 XSWDEV_VERSION = 2 @@ -110,7 +110,7 @@ type xswdev11 struct { Used int32 // Used is the number of blocks used } -func SwapMemoryWithContext(ctx context.Context) (*SwapMemoryStat, error) { +func SwapMemoryWithContext(_ context.Context) (*SwapMemoryStat, error) { // FreeBSD can have multiple swap devices so we total them up i, err := common.SysctlUint("vm.nswapdev") if err != nil { @@ -139,7 +139,8 @@ func SwapMemoryWithContext(ctx context.Context) (*SwapMemoryStat, error) { // first, try to parse with version 2 xsw := (*xswdev)(unsafe.Pointer(&buf[0])) - if xsw.Version == XSWDEV_VERSION11 { + switch { + case xsw.Version == XSWDEV_VERSION11: // this is version 1, so try to parse again xsw := (*xswdev11)(unsafe.Pointer(&buf[0])) if xsw.Version != XSWDEV_VERSION11 { @@ -147,9 +148,9 @@ func SwapMemoryWithContext(ctx context.Context) (*SwapMemoryStat, error) { } s.Total += uint64(xsw.NBlks) s.Used += uint64(xsw.Used) - } else if xsw.Version != XSWDEV_VERSION { + case xsw.Version != XSWDEV_VERSION: return nil, errors.New("xswdev version mismatch") - } else { + default: s.Total += uint64(xsw.NBlks) s.Used += uint64(xsw.Used) } diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_linux.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_linux.go similarity index 89% rename from vendor/github.com/shirou/gopsutil/v3/mem/mem_linux.go rename to vendor/github.com/shirou/gopsutil/v4/mem/mem_linux.go index 214a91e4..4b53b4a0 100644 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_linux.go +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_linux.go @@ -1,12 +1,11 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build linux -// +build linux package mem import ( "bufio" "context" - "encoding/json" "fmt" "io" "math" @@ -16,21 +15,15 @@ import ( "golang.org/x/sys/unix" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" ) -type VirtualMemoryExStat struct { - ActiveFile uint64 `json:"activefile"` - InactiveFile uint64 `json:"inactivefile"` - ActiveAnon uint64 `json:"activeanon"` - InactiveAnon uint64 `json:"inactiveanon"` - Unevictable uint64 `json:"unevictable"` -} - -func (v VirtualMemoryExStat) String() string { - s, _ := json.Marshal(v) - return string(s) -} +// WillBeDeletedOptOutMemAvailableCalc is a context key to opt out of calculating Mem.Used. +// This is not documented, and will be removed in Mar. 2026. This constant will be removed +// in the future, but it is currently public. The reason is that making it public allows +// developers to notice its removal when their build fails. +// See https://github.com/shirou/gopsutil/issues/1873 +const WillBeDeletedOptOutMemAvailableCalc = "optOutMemAvailableCalc" func VirtualMemory() (*VirtualMemoryStat, error) { return VirtualMemoryWithContext(context.Background()) @@ -44,21 +37,12 @@ func VirtualMemoryWithContext(ctx context.Context) (*VirtualMemoryStat, error) { return vm, nil } -func VirtualMemoryEx() (*VirtualMemoryExStat, error) { - return VirtualMemoryExWithContext(context.Background()) -} - -func VirtualMemoryExWithContext(ctx context.Context) (*VirtualMemoryExStat, error) { - _, vmEx, err := fillFromMeminfoWithContext(ctx) +func fillFromMeminfoWithContext(ctx context.Context) (*VirtualMemoryStat, *ExVirtualMemory, error) { + filename := common.HostProcWithContext(ctx, "meminfo") + lines, err := common.ReadLines(filename) if err != nil { - return nil, err + return nil, nil, fmt.Errorf("couldn't read %s: %w", filename, err) } - return vmEx, nil -} - -func fillFromMeminfoWithContext(ctx context.Context) (*VirtualMemoryStat, *VirtualMemoryExStat, error) { - filename := common.HostProcWithContext(ctx, "meminfo") - lines, _ := common.ReadLines(filename) // flag if MemAvailable is in /proc/meminfo (kernel 3.14+) memavail := false @@ -67,7 +51,7 @@ func fillFromMeminfoWithContext(ctx context.Context) (*VirtualMemoryStat, *Virtu sReclaimable := false // "Sreclaimable:" not available: 2.6.19 / Nov 2006 ret := &VirtualMemoryStat{} - retEx := &VirtualMemoryExStat{} + retEx := &ExVirtualMemory{} for _, line := range lines { fields := strings.Split(line, ":") @@ -76,7 +60,7 @@ func fillFromMeminfoWithContext(ctx context.Context) (*VirtualMemoryStat, *Virtu } key := strings.TrimSpace(fields[0]) value := strings.TrimSpace(fields[1]) - value = strings.Replace(value, " kB", "", -1) + value = strings.ReplaceAll(value, " kB", "") switch key { case "MemTotal": @@ -329,8 +313,17 @@ func fillFromMeminfoWithContext(ctx context.Context) (*VirtualMemoryStat, *Virtu ret.Available = ret.Cached + ret.Free } } + // Opt-Out of calculating Mem.Used if the context has the context key set to true. + // This is used for backward compatibility with applications that expect the old calculation method. + // However, we plan to standardize on using MemAvailable in the future. + // Therefore, please avoid using this opt-out unless it is absolutely necessary. + // see https://github.com/shirou/gopsutil/issues/1873 + if val, ok := ctx.Value(WillBeDeletedOptOutMemAvailableCalc).(bool); ok && val { + ret.Used = ret.Total - ret.Free - ret.Buffers - ret.Cached + } else { + ret.Used = ret.Total - ret.Available + } - ret.Used = ret.Total - ret.Free - ret.Buffers - ret.Cached ret.UsedPercent = float64(ret.Used) / float64(ret.Total) * 100.0 return ret, retEx, nil @@ -358,7 +351,10 @@ func SwapMemoryWithContext(ctx context.Context) (*SwapMemoryStat, error) { ret.UsedPercent = 0 } filename := common.HostProcWithContext(ctx, "vmstat") - lines, _ := common.ReadLines(filename) + lines, err := common.ReadLines(filename) + if err != nil { + return nil, fmt.Errorf("couldn't read %s: %w", filename, err) + } for _, l := range lines { fields := strings.Fields(l) if len(fields) < 2 { @@ -409,7 +405,7 @@ func SwapMemoryWithContext(ctx context.Context) (*SwapMemoryStat, error) { // calculateAvailVmem is a fallback under kernel 3.14 where /proc/meminfo does not provide // "MemAvailable:" column. It reimplements an algorithm from the link below // https://github.com/giampaolo/psutil/pull/890 -func calculateAvailVmem(ctx context.Context, ret *VirtualMemoryStat, retEx *VirtualMemoryExStat) uint64 { +func calculateAvailVmem(ctx context.Context, ret *VirtualMemoryStat, retEx *ExVirtualMemory) uint64 { var watermarkLow uint64 fn := common.HostProcWithContext(ctx, "zoneinfo") diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_netbsd.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_netbsd.go similarity index 90% rename from vendor/github.com/shirou/gopsutil/v3/mem/mem_netbsd.go rename to vendor/github.com/shirou/gopsutil/v4/mem/mem_netbsd.go index d1f54eca..8ef539ca 100644 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_netbsd.go +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_netbsd.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build netbsd -// +build netbsd package mem @@ -15,7 +15,7 @@ func GetPageSize() (uint64, error) { return GetPageSizeWithContext(context.Background()) } -func GetPageSizeWithContext(ctx context.Context) (uint64, error) { +func GetPageSizeWithContext(_ context.Context) (uint64, error) { uvmexp, err := unix.SysctlUvmexp("vm.uvmexp2") if err != nil { return 0, err @@ -27,7 +27,7 @@ func VirtualMemory() (*VirtualMemoryStat, error) { return VirtualMemoryWithContext(context.Background()) } -func VirtualMemoryWithContext(ctx context.Context) (*VirtualMemoryStat, error) { +func VirtualMemoryWithContext(_ context.Context) (*VirtualMemoryStat, error) { uvmexp, err := unix.SysctlUvmexp("vm.uvmexp2") if err != nil { return nil, err diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_openbsd.go similarity index 87% rename from vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd.go rename to vendor/github.com/shirou/gopsutil/v4/mem/mem_openbsd.go index e37d5abe..1cb785f0 100644 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd.go +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_openbsd.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build openbsd -// +build openbsd package mem @@ -10,15 +10,16 @@ import ( "errors" "fmt" - "github.com/shirou/gopsutil/v3/internal/common" "golang.org/x/sys/unix" + + "github.com/shirou/gopsutil/v4/internal/common" ) func GetPageSize() (uint64, error) { return GetPageSizeWithContext(context.Background()) } -func GetPageSizeWithContext(ctx context.Context) (uint64, error) { +func GetPageSizeWithContext(_ context.Context) (uint64, error) { uvmexp, err := unix.SysctlUvmexp("vm.uvmexp") if err != nil { return 0, err @@ -30,7 +31,7 @@ func VirtualMemory() (*VirtualMemoryStat, error) { return VirtualMemoryWithContext(context.Background()) } -func VirtualMemoryWithContext(ctx context.Context) (*VirtualMemoryStat, error) { +func VirtualMemoryWithContext(_ context.Context) (*VirtualMemoryStat, error) { uvmexp, err := unix.SysctlUvmexp("vm.uvmexp") if err != nil { return nil, err @@ -60,8 +61,7 @@ func VirtualMemoryWithContext(ctx context.Context) (*VirtualMemoryStat, error) { } var bcs Bcachestats br := bytes.NewReader(buf) - err = common.Read(br, binary.LittleEndian, &bcs) - if err != nil { + if err := binary.Read(br, binary.LittleEndian, &bcs); err != nil { return nil, err } ret.Buffers = uint64(bcs.Numbufpages) * p diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd_386.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_openbsd_386.go similarity index 93% rename from vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd_386.go rename to vendor/github.com/shirou/gopsutil/v4/mem/mem_openbsd_386.go index de2b26ca..552e93f4 100644 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd_386.go +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_openbsd_386.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build openbsd && 386 -// +build openbsd,386 // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs mem/types_openbsd.go diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd_amd64.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_openbsd_amd64.go similarity index 92% rename from vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd_amd64.go rename to vendor/github.com/shirou/gopsutil/v4/mem/mem_openbsd_amd64.go index d187abf0..73e5b72a 100644 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd_amd64.go +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_openbsd_amd64.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause // Created by cgo -godefs - DO NOT EDIT // cgo -godefs types_openbsd.go diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd_arm.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_openbsd_arm.go similarity index 93% rename from vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd_arm.go rename to vendor/github.com/shirou/gopsutil/v4/mem/mem_openbsd_arm.go index 2488f185..57b5861d 100644 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd_arm.go +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_openbsd_arm.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build openbsd && arm -// +build openbsd,arm // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs mem/types_openbsd.go diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd_arm64.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_openbsd_arm64.go similarity index 93% rename from vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd_arm64.go rename to vendor/github.com/shirou/gopsutil/v4/mem/mem_openbsd_arm64.go index 3661b16f..f39a6456 100644 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd_arm64.go +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_openbsd_arm64.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build openbsd && arm64 -// +build openbsd,arm64 // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs mem/types_openbsd.go diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd_riscv64.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_openbsd_riscv64.go similarity index 94% rename from vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd_riscv64.go rename to vendor/github.com/shirou/gopsutil/v4/mem/mem_openbsd_riscv64.go index 7a7b4803..f9f838f5 100644 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd_riscv64.go +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_openbsd_riscv64.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build openbsd && riscv64 -// +build openbsd,riscv64 // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs mem/types_openbsd.go diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_plan9.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_plan9.go similarity index 91% rename from vendor/github.com/shirou/gopsutil/v3/mem/mem_plan9.go rename to vendor/github.com/shirou/gopsutil/v4/mem/mem_plan9.go index b5259f84..0df0745c 100644 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_plan9.go +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_plan9.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build plan9 -// +build plan9 package mem @@ -8,7 +8,8 @@ import ( "os" stats "github.com/lufia/plan9stats" - "github.com/shirou/gopsutil/v3/internal/common" + + "github.com/shirou/gopsutil/v4/internal/common" ) func SwapMemory() (*SwapMemoryStat, error) { @@ -63,6 +64,6 @@ func SwapDevices() ([]*SwapDevice, error) { return SwapDevicesWithContext(context.Background()) } -func SwapDevicesWithContext(ctx context.Context) ([]*SwapDevice, error) { +func SwapDevicesWithContext(_ context.Context) ([]*SwapDevice, error) { return nil, common.ErrNotImplementedError } diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_solaris.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_solaris.go similarity index 90% rename from vendor/github.com/shirou/gopsutil/v3/mem/mem_solaris.go rename to vendor/github.com/shirou/gopsutil/v4/mem/mem_solaris.go index c911267e..1a391dc4 100644 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_solaris.go +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_solaris.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build solaris -// +build solaris package mem @@ -11,8 +11,9 @@ import ( "strconv" "strings" - "github.com/shirou/gopsutil/v3/internal/common" "github.com/tklauser/go-sysconf" + + "github.com/shirou/gopsutil/v4/internal/common" ) // VirtualMemory for Solaris is a minimal implementation which only returns @@ -24,17 +25,17 @@ func VirtualMemory() (*VirtualMemoryStat, error) { func VirtualMemoryWithContext(ctx context.Context) (*VirtualMemoryStat, error) { result := &VirtualMemoryStat{} - zoneName, err := zoneName() + zoneName, err := zoneName(ctx) if err != nil { return nil, err } if zoneName == "global" { - cap, err := globalZoneMemoryCapacity() + capacity, err := globalZoneMemoryCapacity(ctx) if err != nil { return nil, err } - result.Total = cap + result.Total = capacity freemem, err := globalZoneFreeMemory(ctx) if err != nil { return nil, err @@ -43,11 +44,11 @@ func VirtualMemoryWithContext(ctx context.Context) (*VirtualMemoryStat, error) { result.Free = freemem result.Used = result.Total - result.Free } else { - cap, err := nonGlobalZoneMemoryCapacity() + capacity, err := nonGlobalZoneMemoryCapacity(ctx) if err != nil { return nil, err } - result.Total = cap + result.Total = capacity } return result, nil @@ -57,12 +58,11 @@ func SwapMemory() (*SwapMemoryStat, error) { return SwapMemoryWithContext(context.Background()) } -func SwapMemoryWithContext(ctx context.Context) (*SwapMemoryStat, error) { +func SwapMemoryWithContext(_ context.Context) (*SwapMemoryStat, error) { return nil, common.ErrNotImplementedError } -func zoneName() (string, error) { - ctx := context.Background() +func zoneName(ctx context.Context) (string, error) { out, err := invoke.CommandWithContext(ctx, "zonename") if err != nil { return "", err @@ -73,8 +73,7 @@ func zoneName() (string, error) { var globalZoneMemoryCapacityMatch = regexp.MustCompile(`[Mm]emory size: (\d+) Megabytes`) -func globalZoneMemoryCapacity() (uint64, error) { - ctx := context.Background() +func globalZoneMemoryCapacity(ctx context.Context) (uint64, error) { out, err := invoke.CommandWithContext(ctx, "prtconf") if err != nil { return 0, err @@ -114,8 +113,7 @@ func globalZoneFreeMemory(ctx context.Context) (uint64, error) { var kstatMatch = regexp.MustCompile(`(\S+)\s+(\S*)`) -func nonGlobalZoneMemoryCapacity() (uint64, error) { - ctx := context.Background() +func nonGlobalZoneMemoryCapacity(ctx context.Context) (uint64, error) { out, err := invoke.CommandWithContext(ctx, "kstat", "-p", "-c", "zone_memory_cap", "memory_cap:*:*:physcap") if err != nil { return 0, err diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_windows.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_windows.go similarity index 67% rename from vendor/github.com/shirou/gopsutil/v3/mem/mem_windows.go rename to vendor/github.com/shirou/gopsutil/v4/mem/mem_windows.go index 8c7fb1a1..f7421f64 100644 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_windows.go +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_windows.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build windows -// +build windows package mem @@ -9,8 +9,9 @@ import ( "syscall" "unsafe" - "github.com/shirou/gopsutil/v3/internal/common" "golang.org/x/sys/windows" + + "github.com/shirou/gopsutil/v4/internal/common" ) var ( @@ -36,12 +37,14 @@ func VirtualMemory() (*VirtualMemoryStat, error) { return VirtualMemoryWithContext(context.Background()) } -func VirtualMemoryWithContext(ctx context.Context) (*VirtualMemoryStat, error) { +func VirtualMemoryWithContext(_ context.Context) (*VirtualMemoryStat, error) { var memInfo memoryStatusEx memInfo.cbSize = uint32(unsafe.Sizeof(memInfo)) - mem, _, _ := procGlobalMemoryStatusEx.Call(uintptr(unsafe.Pointer(&memInfo))) + // GlobalMemoryStatusEx returns 0 for error, in which case we check err, + // see https://pkg.go.dev/golang.org/x/sys/windows#LazyProc.Call + mem, _, err := procGlobalMemoryStatusEx.Call(uintptr(unsafe.Pointer(&memInfo))) if mem == 0 { - return nil, windows.GetLastError() + return nil, err } ret := &VirtualMemoryStat{ @@ -76,27 +79,45 @@ func SwapMemory() (*SwapMemoryStat, error) { return SwapMemoryWithContext(context.Background()) } -func SwapMemoryWithContext(ctx context.Context) (*SwapMemoryStat, error) { +func SwapMemoryWithContext(_ context.Context) (*SwapMemoryStat, error) { + // Use the performance counter to get the swap usage percentage + counter, err := common.NewWin32PerformanceCounter("swap_percentage", `\Paging File(_Total)\% Usage`) + if err != nil { + return nil, err + } + defer common.PdhCloseQuery.Call(uintptr(counter.Query)) + + usedPercent, err := counter.GetValue() + if err != nil { + return nil, err + } + + // Get total memory from performance information var perfInfo performanceInformation perfInfo.cb = uint32(unsafe.Sizeof(perfInfo)) - mem, _, _ := procGetPerformanceInfo.Call(uintptr(unsafe.Pointer(&perfInfo)), uintptr(perfInfo.cb)) + // GetPerformanceInfo returns 0 for error, in which case we check err, + // see https://pkg.go.dev/golang.org/x/sys/windows#LazyProc.Call + mem, _, err := procGetPerformanceInfo.Call(uintptr(unsafe.Pointer(&perfInfo)), uintptr(perfInfo.cb)) if mem == 0 { - return nil, windows.GetLastError() + return nil, err } - tot := perfInfo.commitLimit * perfInfo.pageSize - used := perfInfo.commitTotal * perfInfo.pageSize - free := tot - used - var usedPercent float64 - if tot == 0 { - usedPercent = 0 + totalPhys := perfInfo.physicalTotal * perfInfo.pageSize + totalSys := perfInfo.commitLimit * perfInfo.pageSize + total := totalSys - totalPhys + + var used uint64 + if total > 0 { + used = uint64(0.01 * usedPercent * float64(total)) } else { - usedPercent = float64(used) / float64(tot) * 100 + usedPercent = 0.0 + used = 0 } + ret := &SwapMemoryStat{ - Total: tot, + Total: total, Used: used, - Free: free, - UsedPercent: usedPercent, + Free: total - used, + UsedPercent: common.Round(usedPercent, 1), } return ret, nil @@ -134,7 +155,7 @@ func SwapDevices() ([]*SwapDevice, error) { return SwapDevicesWithContext(context.Background()) } -func SwapDevicesWithContext(ctx context.Context) ([]*SwapDevice, error) { +func SwapDevicesWithContext(_ context.Context) ([]*SwapDevice, error) { pageSizeOnce.Do(func() { var sysInfo systemInfo procGetNativeSystemInfo.Call(uintptr(unsafe.Pointer(&sysInfo))) @@ -144,9 +165,11 @@ func SwapDevicesWithContext(ctx context.Context) ([]*SwapDevice, error) { // the following system call invokes the supplied callback function once for each page file before returning // see https://docs.microsoft.com/en-us/windows/win32/api/psapi/nf-psapi-enumpagefilesw var swapDevices []*SwapDevice - result, _, _ := procEnumPageFilesW.Call(windows.NewCallback(pEnumPageFileCallbackW), uintptr(unsafe.Pointer(&swapDevices))) + // EnumPageFilesW returns 0 for error, in which case we check err, + // see https://pkg.go.dev/golang.org/x/sys/windows#LazyProc.Call + result, _, err := procEnumPageFilesW.Call(windows.NewCallback(pEnumPageFileCallbackW), uintptr(unsafe.Pointer(&swapDevices))) if result == 0 { - return nil, windows.GetLastError() + return nil, err } return swapDevices, nil diff --git a/vendor/github.com/shirou/gopsutil/v3/net/net.go b/vendor/github.com/shirou/gopsutil/v4/net/net.go similarity index 67% rename from vendor/github.com/shirou/gopsutil/v3/net/net.go rename to vendor/github.com/shirou/gopsutil/v4/net/net.go index 0f3a62f3..1d1f9f08 100644 --- a/vendor/github.com/shirou/gopsutil/v3/net/net.go +++ b/vendor/github.com/shirou/gopsutil/v4/net/net.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause package net import ( @@ -5,7 +6,7 @@ import ( "encoding/json" "net" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" ) var invoke common.Invoker = common.Invoke{} @@ -93,7 +94,7 @@ type ConntrackStat struct { SearchRestart uint32 `json:"searchRestart"` // Conntrack table lookups restarted due to hashtable resizes } -func NewConntrackStat(e uint32, s uint32, f uint32, n uint32, inv uint32, ign uint32, del uint32, dlst uint32, ins uint32, insfail uint32, drop uint32, edrop uint32, ie uint32, en uint32, ec uint32, ed uint32, sr uint32) *ConntrackStat { +func NewConntrackStat(e, s, f, n, inv, ign, del, dlst, ins, insfail, drop, edrop, ie, en, ec, ed, sr uint32) *ConntrackStat { return &ConntrackStat{ Entries: e, Searched: s, @@ -206,7 +207,7 @@ func Interfaces() (InterfaceStatList, error) { return InterfacesWithContext(context.Background()) } -func InterfacesWithContext(ctx context.Context) (InterfaceStatList, error) { +func InterfacesWithContext(_ context.Context) (InterfaceStatList, error) { is, err := net.Interfaces() if err != nil { return nil, err @@ -254,7 +255,7 @@ func InterfacesWithContext(ctx context.Context) (InterfaceStatList, error) { return ret, nil } -func getIOCountersAll(n []IOCountersStat) ([]IOCountersStat, error) { +func getIOCountersAll(n []IOCountersStat) []IOCountersStat { r := IOCountersStat{ Name: "all", } @@ -269,5 +270,87 @@ func getIOCountersAll(n []IOCountersStat) ([]IOCountersStat, error) { r.Dropout += nic.Dropout } - return []IOCountersStat{r}, nil + return []IOCountersStat{r} +} + +// IOCounters returns network I/O statistics for every network +// interface installed on the system. If pernic argument is false, +// return only sum of all information (which name is 'all'). If true, +// every network interface installed on the system is returned +// separately. +func IOCounters(pernic bool) ([]IOCountersStat, error) { + return IOCountersWithContext(context.Background(), pernic) +} + +func IOCountersByFile(pernic bool, filename string) ([]IOCountersStat, error) { + return IOCountersByFileWithContext(context.Background(), pernic, filename) +} + +// ProtoCounters returns network statistics for the entire system +// If protocols is empty then all protocols are returned, otherwise +// just the protocols in the list are returned. +// Available protocols: +// [ip,icmp,icmpmsg,tcp,udp,udplite] +// Not Implemented for FreeBSD, Windows, OpenBSD, Darwin +func ProtoCounters(protocols []string) ([]ProtoCountersStat, error) { + return ProtoCountersWithContext(context.Background(), protocols) +} + +// FilterCounters returns iptables conntrack statistics +// the currently in use conntrack count and the max. +// If the file does not exist or is invalid it will return nil. +func FilterCounters() ([]FilterStat, error) { + return FilterCountersWithContext(context.Background()) +} + +// ConntrackStats returns more detailed info about the conntrack table +func ConntrackStats(percpu bool) ([]ConntrackStat, error) { + return ConntrackStatsWithContext(context.Background(), percpu) +} + +// Return a list of network connections opened. +func Connections(kind string) ([]ConnectionStat, error) { + return ConnectionsWithContext(context.Background(), kind) +} + +// Return a list of network connections opened returning at most `max` +// connections for each running process. +func ConnectionsMax(kind string, maxConn int) ([]ConnectionStat, error) { + return ConnectionsMaxWithContext(context.Background(), kind, maxConn) +} + +// Return a list of network connections opened, omitting `Uids`. +// WithoutUids functions are reliant on implementation details. They may be altered to be an alias for Connections or be +// removed from the API in the future. +func ConnectionsWithoutUids(kind string) ([]ConnectionStat, error) { + return ConnectionsWithoutUidsWithContext(context.Background(), kind) +} + +// Return a list of network connections opened by a process. +func ConnectionsPid(kind string, pid int32) ([]ConnectionStat, error) { + return ConnectionsPidWithContext(context.Background(), kind, pid) +} + +// Return a list of network connections opened, omitting `Uids`. +// WithoutUids functions are reliant on implementation details. They may be altered to be an alias for Connections or be +// removed from the API in the future. +func ConnectionsPidWithoutUids(kind string, pid int32) ([]ConnectionStat, error) { + return ConnectionsPidWithoutUidsWithContext(context.Background(), kind, pid) +} + +func ConnectionsPidMaxWithoutUids(kind string, pid int32, maxConn int) ([]ConnectionStat, error) { + return ConnectionsPidMaxWithoutUidsWithContext(context.Background(), kind, pid, maxConn) +} + +// Return up to `max` network connections opened by a process. +func ConnectionsPidMax(kind string, pid int32, maxConn int) ([]ConnectionStat, error) { + return ConnectionsPidMaxWithContext(context.Background(), kind, pid, maxConn) +} + +// Pids retunres all pids. +// Note: this is a copy of process_linux.Pids() +// FIXME: Import process occurs import cycle. +// move to common made other platform breaking. Need consider. +func Pids() ([]int32, error) { + return PidsWithContext(context.Background()) } diff --git a/vendor/github.com/shirou/gopsutil/v3/net/net_aix.go b/vendor/github.com/shirou/gopsutil/v4/net/net_aix.go similarity index 61% rename from vendor/github.com/shirou/gopsutil/v3/net/net_aix.go rename to vendor/github.com/shirou/gopsutil/v4/net/net_aix.go index 81feaa8d..4531dd44 100644 --- a/vendor/github.com/shirou/gopsutil/v3/net/net_aix.go +++ b/vendor/github.com/shirou/gopsutil/v4/net/net_aix.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build aix -// +build aix package net @@ -11,43 +11,27 @@ import ( "strings" "syscall" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" ) -func IOCounters(pernic bool) ([]IOCountersStat, error) { - return IOCountersWithContext(context.Background(), pernic) -} - -// IOCountersByFile exists just for compatibility with Linux. -func IOCountersByFile(pernic bool, filename string) ([]IOCountersStat, error) { - return IOCountersByFileWithContext(context.Background(), pernic, filename) -} - -func IOCountersByFileWithContext(ctx context.Context, pernic bool, filename string) ([]IOCountersStat, error) { - return IOCounters(pernic) -} - -func FilterCounters() ([]FilterStat, error) { - return FilterCountersWithContext(context.Background()) -} - -func FilterCountersWithContext(ctx context.Context) ([]FilterStat, error) { +// Deprecated: use process.PidsWithContext instead +func PidsWithContext(_ context.Context) ([]int32, error) { return nil, common.ErrNotImplementedError } -func ConntrackStats(percpu bool) ([]ConntrackStat, error) { - return ConntrackStatsWithContext(context.Background(), percpu) +func IOCountersByFileWithContext(ctx context.Context, pernic bool, _ string) ([]IOCountersStat, error) { + return IOCountersWithContext(ctx, pernic) } -func ConntrackStatsWithContext(ctx context.Context, percpu bool) ([]ConntrackStat, error) { +func FilterCountersWithContext(_ context.Context) ([]FilterStat, error) { return nil, common.ErrNotImplementedError } -func ProtoCounters(protocols []string) ([]ProtoCountersStat, error) { - return ProtoCountersWithContext(context.Background(), protocols) +func ConntrackStatsWithContext(_ context.Context, _ bool) ([]ConntrackStat, error) { + return nil, common.ErrNotImplementedError } -func ProtoCountersWithContext(ctx context.Context, protocols []string) ([]ProtoCountersStat, error) { +func ProtoCountersWithContext(_ context.Context, _ []string) ([]ProtoCountersStat, error) { return nil, common.ErrNotImplementedError } @@ -97,36 +81,36 @@ func parseNetstatNetLine(line string) (ConnectionStat, error) { var portMatch = regexp.MustCompile(`(.*)\.(\d+)$`) -// This function only works for netstat returning addresses with a "." -// before the port (0.0.0.0.22 instead of 0.0.0.0:22). -func parseNetstatAddr(local string, remote string, family uint32) (laddr Addr, raddr Addr, err error) { - parse := func(l string) (Addr, error) { - matches := portMatch.FindStringSubmatch(l) - if matches == nil { - return Addr{}, fmt.Errorf("wrong addr, %s", l) - } - host := matches[1] - port := matches[2] - if host == "*" { - switch family { - case syscall.AF_INET: - host = "0.0.0.0" - case syscall.AF_INET6: - host = "::" - default: - return Addr{}, fmt.Errorf("unknown family, %d", family) - } - } - lport, err := strconv.Atoi(port) - if err != nil { - return Addr{}, err +func parseAddr(l string, family uint32) (Addr, error) { + matches := portMatch.FindStringSubmatch(l) + if matches == nil { + return Addr{}, fmt.Errorf("wrong addr, %s", l) + } + host := matches[1] + port := matches[2] + if host == "*" { + switch family { + case syscall.AF_INET: + host = "0.0.0.0" + case syscall.AF_INET6: + host = "::" + default: + return Addr{}, fmt.Errorf("unknown family, %d", family) } - return Addr{IP: host, Port: uint32(lport)}, nil } + lport, err := strconv.ParseInt(port, 10, 32) + if err != nil { + return Addr{}, err + } + return Addr{IP: host, Port: uint32(lport)}, nil +} - laddr, err = parse(local) +// This function only works for netstat returning addresses with a "." +// before the port (0.0.0.0.22 instead of 0.0.0.0:22). +func parseNetstatAddr(local, remote string, family uint32) (laddr, raddr Addr, err error) { + laddr, err = parseAddr(local, family) if remote != "*.*" { // remote addr exists - raddr, err = parse(remote) + raddr, err = parseAddr(remote, family) if err != nil { return laddr, raddr, err } @@ -199,7 +183,7 @@ func hasCorrectInetProto(kind, proto string) bool { return false } -func parseNetstatA(output string, kind string) ([]ConnectionStat, error) { +func parseNetstatA(output, kind string) ([]ConnectionStat, error) { var ret []ConnectionStat lines := strings.Split(string(output), "\n") @@ -209,7 +193,8 @@ func parseNetstatA(output string, kind string) ([]ConnectionStat, error) { continue } - if strings.HasPrefix(fields[0], "f1") { + switch { + case strings.HasPrefix(fields[0], "f1"): // Unix lines if len(fields) < 2 { // every unix connections have two lines @@ -218,12 +203,12 @@ func parseNetstatA(output string, kind string) ([]ConnectionStat, error) { c, err := parseNetstatUnixLine(fields) if err != nil { - return nil, fmt.Errorf("failed to parse Unix Address (%s): %s", line, err) + return nil, fmt.Errorf("failed to parse Unix Address (%s): %w", line, err) } ret = append(ret, c) - } else if strings.HasPrefix(fields[0], "tcp") || strings.HasPrefix(fields[0], "udp") { + case strings.HasPrefix(fields[0], "tcp") || strings.HasPrefix(fields[0], "udp"): // Inet lines if !hasCorrectInetProto(kind, fields[0]) { continue @@ -237,11 +222,11 @@ func parseNetstatA(output string, kind string) ([]ConnectionStat, error) { c, err := parseNetstatNetLine(line) if err != nil { - return nil, fmt.Errorf("failed to parse Inet Address (%s): %s", line, err) + return nil, fmt.Errorf("failed to parse Inet Address (%s): %w", line, err) } ret = append(ret, c) - } else { + default: // Header lines continue } @@ -250,10 +235,6 @@ func parseNetstatA(output string, kind string) ([]ConnectionStat, error) { return ret, nil } -func Connections(kind string) ([]ConnectionStat, error) { - return ConnectionsWithContext(context.Background(), kind) -} - func ConnectionsWithContext(ctx context.Context, kind string) ([]ConnectionStat, error) { args := []string{"-na"} switch strings.ToLower(kind) { @@ -286,45 +267,34 @@ func ConnectionsWithContext(ctx context.Context, kind string) ([]ConnectionStat, return ret, nil } -func ConnectionsMax(kind string, max int) ([]ConnectionStat, error) { - return ConnectionsMaxWithContext(context.Background(), kind, max) -} - -func ConnectionsMaxWithContext(ctx context.Context, kind string, max int) ([]ConnectionStat, error) { - return []ConnectionStat{}, common.ErrNotImplementedError -} - -// Return a list of network connections opened, omitting `Uids`. -// WithoutUids functions are reliant on implementation details. They may be altered to be an alias for Connections or be -// removed from the API in the future. -func ConnectionsWithoutUids(kind string) ([]ConnectionStat, error) { - return ConnectionsWithoutUidsWithContext(context.Background(), kind) +func ConnectionsMaxWithContext(ctx context.Context, kind string, maxConn int) ([]ConnectionStat, error) { + return ConnectionsPidMaxWithContext(ctx, kind, 0, maxConn) } func ConnectionsWithoutUidsWithContext(ctx context.Context, kind string) ([]ConnectionStat, error) { return ConnectionsMaxWithoutUidsWithContext(ctx, kind, 0) } -func ConnectionsMaxWithoutUidsWithContext(ctx context.Context, kind string, max int) ([]ConnectionStat, error) { - return ConnectionsPidMaxWithoutUidsWithContext(ctx, kind, 0, max) +func ConnectionsMaxWithoutUidsWithContext(ctx context.Context, kind string, maxConn int) ([]ConnectionStat, error) { + return ConnectionsPidMaxWithoutUidsWithContext(ctx, kind, 0, maxConn) } -func ConnectionsPidWithoutUids(kind string, pid int32) ([]ConnectionStat, error) { - return ConnectionsPidWithoutUidsWithContext(context.Background(), kind, pid) +func ConnectionsPidWithContext(ctx context.Context, kind string, pid int32) ([]ConnectionStat, error) { + return ConnectionsPidMaxWithContext(ctx, kind, pid, 0) } func ConnectionsPidWithoutUidsWithContext(ctx context.Context, kind string, pid int32) ([]ConnectionStat, error) { return ConnectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, 0) } -func ConnectionsPidMaxWithoutUids(kind string, pid int32, max int) ([]ConnectionStat, error) { - return ConnectionsPidMaxWithoutUidsWithContext(context.Background(), kind, pid, max) +func ConnectionsPidMaxWithContext(ctx context.Context, kind string, pid int32, maxConn int) ([]ConnectionStat, error) { + return connectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, maxConn, false) } -func ConnectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, max int) ([]ConnectionStat, error) { - return connectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, max) +func ConnectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, maxConn int) ([]ConnectionStat, error) { + return connectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, maxConn, true) } -func connectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, max int) ([]ConnectionStat, error) { +func connectionsPidMaxWithoutUidsWithContext(_ context.Context, _ string, _ int32, _ int, _ bool) ([]ConnectionStat, error) { return []ConnectionStat{}, common.ErrNotImplementedError } diff --git a/vendor/github.com/shirou/gopsutil/v3/net/net_aix_cgo.go b/vendor/github.com/shirou/gopsutil/v4/net/net_aix_cgo.go similarity index 88% rename from vendor/github.com/shirou/gopsutil/v3/net/net_aix_cgo.go rename to vendor/github.com/shirou/gopsutil/v4/net/net_aix_cgo.go index 8c34f881..f7da4ce1 100644 --- a/vendor/github.com/shirou/gopsutil/v3/net/net_aix_cgo.go +++ b/vendor/github.com/shirou/gopsutil/v4/net/net_aix_cgo.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build aix && cgo -// +build aix,cgo package net @@ -29,8 +29,8 @@ func IOCountersWithContext(ctx context.Context, pernic bool) ([]IOCountersStat, } iocounters = append(iocounters, n) } - if pernic == false { - return getIOCountersAll(iocounters) + if !pernic { + return getIOCountersAll(iocounters), nil } return iocounters, nil } diff --git a/vendor/github.com/shirou/gopsutil/v3/net/net_aix_nocgo.go b/vendor/github.com/shirou/gopsutil/v4/net/net_aix_nocgo.go similarity index 89% rename from vendor/github.com/shirou/gopsutil/v3/net/net_aix_nocgo.go rename to vendor/github.com/shirou/gopsutil/v4/net/net_aix_nocgo.go index e3fce902..834534d3 100644 --- a/vendor/github.com/shirou/gopsutil/v3/net/net_aix_nocgo.go +++ b/vendor/github.com/shirou/gopsutil/v4/net/net_aix_nocgo.go @@ -1,15 +1,15 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build aix && !cgo -// +build aix,!cgo package net import ( "context" - "fmt" + "errors" "strconv" "strings" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" ) func parseNetstatI(output string) ([]IOCountersStat, error) { @@ -19,7 +19,7 @@ func parseNetstatI(output string) ([]IOCountersStat, error) { // Check first line is header if len(lines) > 0 && strings.Fields(lines[0])[0] != "Name" { - return nil, fmt.Errorf("not a 'netstat -i' output") + return nil, errors.New("not a 'netstat -i' output") } for _, line := range lines[1:] { @@ -88,8 +88,8 @@ func IOCountersWithContext(ctx context.Context, pernic bool) ([]IOCountersStat, if err != nil { return nil, err } - if pernic == false { - return getIOCountersAll(iocounters) + if !pernic { + return getIOCountersAll(iocounters), nil } return iocounters, nil } diff --git a/vendor/github.com/shirou/gopsutil/v3/net/net_darwin.go b/vendor/github.com/shirou/gopsutil/v4/net/net_darwin.go similarity index 77% rename from vendor/github.com/shirou/gopsutil/v3/net/net_darwin.go rename to vendor/github.com/shirou/gopsutil/v4/net/net_darwin.go index 8a7b6374..c47e0c37 100644 --- a/vendor/github.com/shirou/gopsutil/v3/net/net_darwin.go +++ b/vendor/github.com/shirou/gopsutil/v4/net/net_darwin.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build darwin -// +build darwin package net @@ -12,11 +12,11 @@ import ( "strconv" "strings" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" ) var ( - errNetstatHeader = errors.New("Can't parse header of netstat output") + errNetstatHeader = errors.New("can't parse header of netstat output") netstatLinkRegexp = regexp.MustCompile(`^$`) ) @@ -29,15 +29,14 @@ func parseNetstatLine(line string) (stat *IOCountersStat, linkID *uint, err erro ) if columns[0] == "Name" { - err = errNetstatHeader - return + return nil, nil, errNetstatHeader } // try to extract the numeric value from if subMatch := netstatLinkRegexp.FindStringSubmatch(columns[2]); len(subMatch) == 2 { numericValue, err = strconv.ParseUint(subMatch[1], 10, 64) if err != nil { - return + return nil, nil, err } linkIDUint := uint(numericValue) linkID = &linkIDUint @@ -50,8 +49,7 @@ func parseNetstatLine(line string) (stat *IOCountersStat, linkID *uint, err erro base = 0 } if numberColumns < 11 || numberColumns > 13 { - err = fmt.Errorf("Line %q do have an invalid number of columns %d", line, numberColumns) - return + return nil, nil, fmt.Errorf("line %q do have an invalid number of columns %d", line, numberColumns) } parsed := make([]uint64, 0, 7) @@ -74,7 +72,7 @@ func parseNetstatLine(line string) (stat *IOCountersStat, linkID *uint, err erro } if numericValue, err = strconv.ParseUint(target, 10, 64); err != nil { - return + return nil, nil, err } parsed = append(parsed, numericValue) } @@ -91,7 +89,7 @@ func parseNetstatLine(line string) (stat *IOCountersStat, linkID *uint, err erro if len(parsed) == 7 { stat.Dropout = parsed[6] } - return + return stat, linkID, nil } type netstatInterface struct { @@ -143,8 +141,8 @@ func newMapInterfaceNameUsage(ifaces []netstatInterface) mapInterfaceNameUsage { return output } -func (min mapInterfaceNameUsage) isTruncated() bool { - for _, usage := range min { +func (mapi mapInterfaceNameUsage) isTruncated() bool { + for _, usage := range mapi { if usage > 1 { return true } @@ -152,9 +150,9 @@ func (min mapInterfaceNameUsage) isTruncated() bool { return false } -func (min mapInterfaceNameUsage) notTruncated() []string { +func (mapi mapInterfaceNameUsage) notTruncated() []string { output := make([]string, 0) - for ifaceName, usage := range min { + for ifaceName, usage := range mapi { if usage == 1 { output = append(output, ifaceName) } @@ -162,15 +160,16 @@ func (min mapInterfaceNameUsage) notTruncated() []string { return output } +// Deprecated: use process.PidsWithContext instead +func PidsWithContext(_ context.Context) ([]int32, error) { + return nil, common.ErrNotImplementedError +} + // example of `netstat -ibdnW` output on yosemite // Name Mtu Network Address Ipkts Ierrs Ibytes Opkts Oerrs Obytes Coll Drop // lo0 16384 869107 0 169411755 869107 0 169411755 0 0 // lo0 16384 ::1/128 ::1 869107 - 169411755 869107 - 169411755 - - // lo0 16384 127 127.0.0.1 869107 - 169411755 869107 - 169411755 - - -func IOCounters(pernic bool) ([]IOCountersStat, error) { - return IOCountersWithContext(context.Background(), pernic) -} - func IOCountersWithContext(ctx context.Context, pernic bool) ([]IOCountersStat, error) { var ( ret []IOCountersStat @@ -247,45 +246,24 @@ func IOCountersWithContext(ctx context.Context, pernic bool) ([]IOCountersStat, } } - if pernic == false { - return getIOCountersAll(ret) + if !pernic { + return getIOCountersAll(ret), nil } return ret, nil } -// IOCountersByFile exists just for compatibility with Linux. -func IOCountersByFile(pernic bool, filename string) ([]IOCountersStat, error) { - return IOCountersByFileWithContext(context.Background(), pernic, filename) -} - -func IOCountersByFileWithContext(ctx context.Context, pernic bool, filename string) ([]IOCountersStat, error) { +func IOCountersByFileWithContext(ctx context.Context, pernic bool, _ string) ([]IOCountersStat, error) { return IOCountersWithContext(ctx, pernic) } -func FilterCounters() ([]FilterStat, error) { - return FilterCountersWithContext(context.Background()) -} - -func FilterCountersWithContext(ctx context.Context) ([]FilterStat, error) { +func FilterCountersWithContext(_ context.Context) ([]FilterStat, error) { return nil, common.ErrNotImplementedError } -func ConntrackStats(percpu bool) ([]ConntrackStat, error) { - return ConntrackStatsWithContext(context.Background(), percpu) -} - -func ConntrackStatsWithContext(ctx context.Context, percpu bool) ([]ConntrackStat, error) { +func ConntrackStatsWithContext(_ context.Context, _ bool) ([]ConntrackStat, error) { return nil, common.ErrNotImplementedError } -// ProtoCounters returns network statistics for the entire system -// If protocols is empty then all protocols are returned, otherwise -// just the protocols in the list are returned. -// Not Implemented for Darwin -func ProtoCounters(protocols []string) ([]ProtoCountersStat, error) { - return ProtoCountersWithContext(context.Background(), protocols) -} - -func ProtoCountersWithContext(ctx context.Context, protocols []string) ([]ProtoCountersStat, error) { +func ProtoCountersWithContext(_ context.Context, _ []string) ([]ProtoCountersStat, error) { return nil, common.ErrNotImplementedError } diff --git a/vendor/github.com/shirou/gopsutil/v4/net/net_fallback.go b/vendor/github.com/shirou/gopsutil/v4/net/net_fallback.go new file mode 100644 index 00000000..29c2a148 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/net/net_fallback.go @@ -0,0 +1,71 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build !aix && !darwin && !linux && !freebsd && !openbsd && !windows && !solaris + +package net + +import ( + "context" + + "github.com/shirou/gopsutil/v4/internal/common" +) + +func IOCountersWithContext(_ context.Context, _ bool) ([]IOCountersStat, error) { + return []IOCountersStat{}, common.ErrNotImplementedError +} + +func IOCountersByFileWithContext(ctx context.Context, pernic bool, _ string) ([]IOCountersStat, error) { + return IOCountersWithContext(ctx, pernic) +} + +func FilterCountersWithContext(_ context.Context) ([]FilterStat, error) { + return nil, common.ErrNotImplementedError +} + +func ConntrackStatsWithContext(_ context.Context, _ bool) ([]ConntrackStat, error) { + return nil, common.ErrNotImplementedError +} + +func ProtoCountersWithContext(_ context.Context, _ []string) ([]ProtoCountersStat, error) { + return nil, common.ErrNotImplementedError +} + +// Deprecated: use process.PidsWithContext instead +func PidsWithContext(_ context.Context) ([]int32, error) { + return nil, common.ErrNotImplementedError +} + +func ConnectionsWithContext(_ context.Context, _ string) ([]ConnectionStat, error) { + return []ConnectionStat{}, common.ErrNotImplementedError +} + +func ConnectionsMaxWithContext(ctx context.Context, kind string, maxConn int) ([]ConnectionStat, error) { + return ConnectionsPidMaxWithContext(ctx, kind, 0, maxConn) +} + +func ConnectionsWithoutUidsWithContext(ctx context.Context, kind string) ([]ConnectionStat, error) { + return ConnectionsMaxWithoutUidsWithContext(ctx, kind, 0) +} + +func ConnectionsMaxWithoutUidsWithContext(ctx context.Context, kind string, maxConn int) ([]ConnectionStat, error) { + return ConnectionsPidMaxWithoutUidsWithContext(ctx, kind, 0, maxConn) +} + +func ConnectionsPidWithoutUidsWithContext(ctx context.Context, kind string, pid int32) ([]ConnectionStat, error) { + return ConnectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, 0) +} + +func ConnectionsPidWithContext(ctx context.Context, kind string, pid int32) ([]ConnectionStat, error) { + return ConnectionsPidMaxWithContext(ctx, kind, pid, 0) +} + +func ConnectionsPidMaxWithContext(ctx context.Context, kind string, pid int32, maxConn int) ([]ConnectionStat, error) { + return connectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, maxConn, false) +} + +func ConnectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, maxConn int) ([]ConnectionStat, error) { + return connectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, maxConn, true) +} + +func connectionsPidMaxWithoutUidsWithContext(_ context.Context, _ string, _ int32, _ int, _ bool) ([]ConnectionStat, error) { + return []ConnectionStat{}, common.ErrNotImplementedError +} diff --git a/vendor/github.com/shirou/gopsutil/v3/net/net_freebsd.go b/vendor/github.com/shirou/gopsutil/v4/net/net_freebsd.go similarity index 57% rename from vendor/github.com/shirou/gopsutil/v3/net/net_freebsd.go rename to vendor/github.com/shirou/gopsutil/v4/net/net_freebsd.go index bf8baf09..a72aa00a 100644 --- a/vendor/github.com/shirou/gopsutil/v3/net/net_freebsd.go +++ b/vendor/github.com/shirou/gopsutil/v4/net/net_freebsd.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build freebsd -// +build freebsd package net @@ -8,11 +8,12 @@ import ( "strconv" "strings" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" ) -func IOCounters(pernic bool) ([]IOCountersStat, error) { - return IOCountersWithContext(context.Background(), pernic) +// Deprecated: use process.PidsWithContext instead +func PidsWithContext(_ context.Context) ([]int32, error) { + return nil, common.ErrNotImplementedError } func IOCountersWithContext(ctx context.Context, pernic bool) ([]IOCountersStat, error) { @@ -83,46 +84,25 @@ func IOCountersWithContext(ctx context.Context, pernic bool) ([]IOCountersStat, ret = append(ret, n) } - if pernic == false { - return getIOCountersAll(ret) + if !pernic { + return getIOCountersAll(ret), nil } return ret, nil } -// IOCountersByFile exists just for compatibility with Linux. -func IOCountersByFile(pernic bool, filename string) ([]IOCountersStat, error) { - return IOCountersByFileWithContext(context.Background(), pernic, filename) -} - -func IOCountersByFileWithContext(ctx context.Context, pernic bool, filename string) ([]IOCountersStat, error) { - return IOCounters(pernic) -} - -func FilterCounters() ([]FilterStat, error) { - return FilterCountersWithContext(context.Background()) +func IOCountersByFileWithContext(ctx context.Context, pernic bool, _ string) ([]IOCountersStat, error) { + return IOCountersWithContext(ctx, pernic) } -func FilterCountersWithContext(ctx context.Context) ([]FilterStat, error) { +func FilterCountersWithContext(_ context.Context) ([]FilterStat, error) { return nil, common.ErrNotImplementedError } -func ConntrackStats(percpu bool) ([]ConntrackStat, error) { - return ConntrackStatsWithContext(context.Background(), percpu) -} - -func ConntrackStatsWithContext(ctx context.Context, percpu bool) ([]ConntrackStat, error) { +func ConntrackStatsWithContext(_ context.Context, _ bool) ([]ConntrackStat, error) { return nil, common.ErrNotImplementedError } -// ProtoCounters returns network statistics for the entire system -// If protocols is empty then all protocols are returned, otherwise -// just the protocols in the list are returned. -// Not Implemented for FreeBSD -func ProtoCounters(protocols []string) ([]ProtoCountersStat, error) { - return ProtoCountersWithContext(context.Background(), protocols) -} - -func ProtoCountersWithContext(ctx context.Context, protocols []string) ([]ProtoCountersStat, error) { +func ProtoCountersWithContext(_ context.Context, _ []string) ([]ProtoCountersStat, error) { return nil, common.ErrNotImplementedError } diff --git a/vendor/github.com/shirou/gopsutil/v3/net/net_linux.go b/vendor/github.com/shirou/gopsutil/v4/net/net_linux.go similarity index 73% rename from vendor/github.com/shirou/gopsutil/v3/net/net_linux.go rename to vendor/github.com/shirou/gopsutil/v4/net/net_linux.go index 20ca5470..d1e7f0ce 100644 --- a/vendor/github.com/shirou/gopsutil/v3/net/net_linux.go +++ b/vendor/github.com/shirou/gopsutil/v4/net/net_linux.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build linux -// +build linux package net @@ -16,7 +16,7 @@ import ( "strings" "syscall" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" ) const ( // Conntrack Column numbers @@ -27,62 +27,48 @@ const ( // Conntrack Column numbers ctINVALID ctIGNORE ctDELETE - ctDELETE_LIST + ctDELETE_LIST //nolint:revive //FIXME ctINSERT - ctINSERT_FAILED + ctINSERT_FAILED //nolint:revive //FIXME ctDROP - ctEARLY_DROP - ctICMP_ERROR - CT_EXPEctNEW - ctEXPECT_CREATE - CT_EXPEctDELETE - ctSEARCH_RESTART + ctEARLY_DROP //nolint:revive //FIXME + ctICMP_ERROR //nolint:revive //FIXME + CT_EXPEctNEW //nolint:revive //FIXME + ctEXPECT_CREATE //nolint:revive //FIXME + CT_EXPEctDELETE //nolint:revive //FIXME + ctSEARCH_RESTART //nolint:revive //FIXME ) -// NetIOCounters returns network I/O statistics for every network -// interface installed on the system. If pernic argument is false, -// return only sum of all information (which name is 'all'). If true, -// every network interface installed on the system is returned -// separately. -func IOCounters(pernic bool) ([]IOCountersStat, error) { - return IOCountersWithContext(context.Background(), pernic) -} - func IOCountersWithContext(ctx context.Context, pernic bool) ([]IOCountersStat, error) { filename := common.HostProcWithContext(ctx, "net/dev") return IOCountersByFileWithContext(ctx, pernic, filename) } -func IOCountersByFile(pernic bool, filename string) ([]IOCountersStat, error) { - return IOCountersByFileWithContext(context.Background(), pernic, filename) -} - -func IOCountersByFileWithContext(ctx context.Context, pernic bool, filename string) ([]IOCountersStat, error) { +func IOCountersByFileWithContext(_ context.Context, pernic bool, filename string) ([]IOCountersStat, error) { lines, err := common.ReadLines(filename) if err != nil { return nil, err } - parts := make([]string, 2) - statlen := len(lines) - 1 ret := make([]IOCountersStat, 0, statlen) for _, line := range lines[2:] { + // Split interface name and stats data at the last ":" separatorPos := strings.LastIndex(line, ":") if separatorPos == -1 { continue } - parts[0] = line[0:separatorPos] - parts[1] = line[separatorPos+1:] + interfacePart := line[0:separatorPos] + statsPart := line[separatorPos+1:] - interfaceName := strings.TrimSpace(parts[0]) + interfaceName := strings.TrimSpace(interfacePart) if interfaceName == "" { continue } - fields := strings.Fields(strings.TrimSpace(parts[1])) + fields := strings.Fields(strings.TrimSpace(statsPart)) bytesRecv, err := strconv.ParseUint(fields[0], 10, 64) if err != nil { return ret, err @@ -141,7 +127,7 @@ func IOCountersByFileWithContext(ctx context.Context, pernic bool, filename stri } if !pernic { - return getIOCountersAll(ret) + return getIOCountersAll(ret), nil } return ret, nil @@ -156,15 +142,6 @@ var netProtocols = []string{ "udplite", } -// ProtoCounters returns network statistics for the entire system -// If protocols is empty then all protocols are returned, otherwise -// just the protocols in the list are returned. -// Available protocols: -// [ip,icmp,icmpmsg,tcp,udp,udplite] -func ProtoCounters(protocols []string) ([]ProtoCountersStat, error) { - return ProtoCountersWithContext(context.Background(), protocols) -} - func ProtoCountersWithContext(ctx context.Context, protocols []string) ([]ProtoCountersStat, error) { if len(protocols) == 0 { protocols = netProtocols @@ -221,13 +198,6 @@ func ProtoCountersWithContext(ctx context.Context, protocols []string) ([]ProtoC return stats, nil } -// NetFilterCounters returns iptables conntrack statistics -// the currently in use conntrack count and the max. -// If the file does not exist or is invalid it will return nil. -func FilterCounters() ([]FilterStat, error) { - return FilterCountersWithContext(context.Background()) -} - func FilterCountersWithContext(ctx context.Context) ([]FilterStat, error) { countfile := common.HostProcWithContext(ctx, "sys/net/netfilter/nf_conntrack_count") maxfile := common.HostProcWithContext(ctx, "sys/net/netfilter/nf_conntrack_max") @@ -238,25 +208,20 @@ func FilterCountersWithContext(ctx context.Context) ([]FilterStat, error) { } stats := make([]FilterStat, 0, 1) - max, err := common.ReadInts(maxfile) + maxConn, err := common.ReadInts(maxfile) if err != nil { return nil, err } payload := FilterStat{ ConnTrackCount: count[0], - ConnTrackMax: max[0], + ConnTrackMax: maxConn[0], } stats = append(stats, payload) return stats, nil } -// ConntrackStats returns more detailed info about the conntrack table -func ConntrackStats(percpu bool) ([]ConntrackStat, error) { - return ConntrackStatsWithContext(context.Background(), percpu) -} - // ConntrackStatsWithContext returns more detailed info about the conntrack table func ConntrackStatsWithContext(ctx context.Context, percpu bool) ([]ConntrackStat, error) { return conntrackStatsFromFile(common.HostProcWithContext(ctx, "net/stat/nf_conntrack"), percpu) @@ -385,47 +350,20 @@ type connTmp struct { path string } -// Return a list of network connections opened. -func Connections(kind string) ([]ConnectionStat, error) { - return ConnectionsWithContext(context.Background(), kind) -} - func ConnectionsWithContext(ctx context.Context, kind string) ([]ConnectionStat, error) { return ConnectionsPidWithContext(ctx, kind, 0) } -// Return a list of network connections opened returning at most `max` -// connections for each running process. -func ConnectionsMax(kind string, max int) ([]ConnectionStat, error) { - return ConnectionsMaxWithContext(context.Background(), kind, max) -} - -func ConnectionsMaxWithContext(ctx context.Context, kind string, max int) ([]ConnectionStat, error) { - return ConnectionsPidMaxWithContext(ctx, kind, 0, max) -} - -// Return a list of network connections opened, omitting `Uids`. -// WithoutUids functions are reliant on implementation details. They may be altered to be an alias for Connections or be -// removed from the API in the future. -func ConnectionsWithoutUids(kind string) ([]ConnectionStat, error) { - return ConnectionsWithoutUidsWithContext(context.Background(), kind) +func ConnectionsMaxWithContext(ctx context.Context, kind string, maxConn int) ([]ConnectionStat, error) { + return ConnectionsPidMaxWithContext(ctx, kind, 0, maxConn) } func ConnectionsWithoutUidsWithContext(ctx context.Context, kind string) ([]ConnectionStat, error) { return ConnectionsMaxWithoutUidsWithContext(ctx, kind, 0) } -func ConnectionsMaxWithoutUidsWithContext(ctx context.Context, kind string, max int) ([]ConnectionStat, error) { - return ConnectionsPidMaxWithoutUidsWithContext(ctx, kind, 0, max) -} - -// Return a list of network connections opened by a process. -func ConnectionsPid(kind string, pid int32) ([]ConnectionStat, error) { - return ConnectionsPidWithContext(context.Background(), kind, pid) -} - -func ConnectionsPidWithoutUids(kind string, pid int32) ([]ConnectionStat, error) { - return ConnectionsPidWithoutUidsWithContext(context.Background(), kind, pid) +func ConnectionsMaxWithoutUidsWithContext(ctx context.Context, kind string, maxConn int) ([]ConnectionStat, error) { + return ConnectionsPidMaxWithoutUidsWithContext(ctx, kind, 0, maxConn) } func ConnectionsPidWithContext(ctx context.Context, kind string, pid int32) ([]ConnectionStat, error) { @@ -436,24 +374,15 @@ func ConnectionsPidWithoutUidsWithContext(ctx context.Context, kind string, pid return ConnectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, 0) } -// Return up to `max` network connections opened by a process. -func ConnectionsPidMax(kind string, pid int32, max int) ([]ConnectionStat, error) { - return ConnectionsPidMaxWithContext(context.Background(), kind, pid, max) -} - -func ConnectionsPidMaxWithoutUids(kind string, pid int32, max int) ([]ConnectionStat, error) { - return ConnectionsPidMaxWithoutUidsWithContext(context.Background(), kind, pid, max) -} - -func ConnectionsPidMaxWithContext(ctx context.Context, kind string, pid int32, max int) ([]ConnectionStat, error) { - return connectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, max, false) +func ConnectionsPidMaxWithContext(ctx context.Context, kind string, pid int32, maxConn int) ([]ConnectionStat, error) { + return connectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, maxConn, false) } -func ConnectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, max int) ([]ConnectionStat, error) { - return connectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, max, true) +func ConnectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, maxConn int) ([]ConnectionStat, error) { + return connectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, maxConn, true) } -func connectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, max int, skipUids bool) ([]ConnectionStat, error) { +func connectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, maxConn int, skipUids bool) ([]ConnectionStat, error) { tmap, ok := netConnectionKindMap[kind] if !ok { return nil, fmt.Errorf("invalid kind, %s", kind) @@ -462,9 +391,9 @@ func connectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, p var err error var inodes map[string][]inodeMap if pid == 0 { - inodes, err = getProcInodesAllWithContext(ctx, root, max) + inodes, err = getProcInodesAllWithContext(ctx, root, maxConn) } else { - inodes, err = getProcInodes(root, pid, max) + inodes, err = getProcInodes(root, pid, maxConn) if len(inodes) == 0 { // no connection for the pid return []ConnectionStat{}, nil @@ -476,10 +405,6 @@ func connectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, p return statsFromInodesWithContext(ctx, root, pid, tmap, inodes, skipUids) } -func statsFromInodes(root string, pid int32, tmap []netConnectionKindType, inodes map[string][]inodeMap, skipUids bool) ([]ConnectionStat, error) { - return statsFromInodesWithContext(context.Background(), root, pid, tmap, inodes, skipUids) -} - func statsFromInodesWithContext(ctx context.Context, root string, pid int32, tmap []netConnectionKindType, inodes map[string][]inodeMap, skipUids bool) ([]ConnectionStat, error) { dupCheckMap := make(map[string]struct{}) var ret []ConnectionStat @@ -496,7 +421,7 @@ func statsFromInodesWithContext(ctx context.Context, root string, pid int32, tma } switch t.family { case syscall.AF_INET, syscall.AF_INET6: - ls, err = processInetWithContext(ctx, path, t, inodes, pid) + ls, err = processInet(path, t, inodes, pid) case syscall.AF_UNIX: ls, err = processUnix(path, t, inodes, pid) } @@ -543,7 +468,7 @@ func statsFromInodesWithContext(ctx context.Context, root string, pid int32, tma } // getProcInodes returns fd of the pid. -func getProcInodes(root string, pid int32, max int) (map[string][]inodeMap, error) { +func getProcInodes(root string, pid int32, maxConn int) (map[string][]inodeMap, error) { ret := make(map[string][]inodeMap) dir := fmt.Sprintf("%s/%d/fd", root, pid) @@ -552,7 +477,7 @@ func getProcInodes(root string, pid int32, max int) (map[string][]inodeMap, erro return ret, err } defer f.Close() - dirEntries, err := readDir(f, max) + dirEntries, err := f.ReadDir(maxConn) if err != nil { return ret, err } @@ -573,7 +498,7 @@ func getProcInodes(root string, pid int32, max int) (map[string][]inodeMap, erro if !ok { ret[inode] = make([]inodeMap, 0) } - fd, err := strconv.Atoi(dirEntry.Name()) + fd, err := strconv.ParseInt(dirEntry.Name(), 10, 32) if err != nil { continue } @@ -587,14 +512,6 @@ func getProcInodes(root string, pid int32, max int) (map[string][]inodeMap, erro return ret, nil } -// Pids retunres all pids. -// Note: this is a copy of process_linux.Pids() -// FIXME: Import process occures import cycle. -// move to common made other platform breaking. Need consider. -func Pids() ([]int32, error) { - return PidsWithContext(context.Background()) -} - func PidsWithContext(ctx context.Context) ([]int32, error) { var ret []int32 @@ -622,7 +539,7 @@ func PidsWithContext(ctx context.Context) ([]int32, error) { // Note: the following is based off process_linux structs and methods // we need these to fetch the owner of a process ID -// FIXME: Import process occures import cycle. +// FIXME: Import process occurs import cycle. // see remarks on pids() type process struct { Pid int32 `json:"pid"` @@ -653,8 +570,7 @@ func (p *process) fillFromStatus(ctx context.Context) error { continue } value := tabParts[1] - switch strings.TrimRight(tabParts[0], ":") { - case "Uid": + if strings.TrimRight(tabParts[0], ":") == "Uid" { p.uids = make([]int32, 0, 4) for _, i := range strings.Split(value, "\t") { v, err := strconv.ParseInt(i, 10, 32) @@ -668,11 +584,7 @@ func (p *process) fillFromStatus(ctx context.Context) error { return nil } -func getProcInodesAll(root string, max int) (map[string][]inodeMap, error) { - return getProcInodesAllWithContext(context.Background(), root, max) -} - -func getProcInodesAllWithContext(ctx context.Context, root string, max int) (map[string][]inodeMap, error) { +func getProcInodesAllWithContext(ctx context.Context, root string, maxConn int) (map[string][]inodeMap, error) { pids, err := PidsWithContext(ctx) if err != nil { return nil, err @@ -680,7 +592,7 @@ func getProcInodesAllWithContext(ctx context.Context, root string, max int) (map ret := make(map[string][]inodeMap) for _, pid := range pids { - t, err := getProcInodes(root, pid, max) + t, err := getProcInodes(root, pid, maxConn) if err != nil { // skip if permission error or no longer exists if os.IsPermission(err) || os.IsNotExist(err) || errors.Is(err, io.EOF) { @@ -697,15 +609,11 @@ func getProcInodesAllWithContext(ctx context.Context, root string, max int) (map return ret, nil } -// decodeAddress decode addresse represents addr in proc/net/* +// decodeAddress decode address represents addr in proc/net/* // ex: // "0500000A:0016" -> "10.0.0.5", 22 // "0085002452100113070057A13F025401:0035" -> "2400:8500:1301:1052:a157:7:154:23f", 53 func decodeAddress(family uint32, src string) (Addr, error) { - return decodeAddressWithContext(context.Background(), family, src) -} - -func decodeAddressWithContext(ctx context.Context, family uint32, src string) (Addr, error) { t := strings.Split(src, ":") if len(t) != 2 { return Addr{}, fmt.Errorf("does not contain port, %s", src) @@ -723,12 +631,12 @@ func decodeAddressWithContext(ctx context.Context, family uint32, src string) (A if family == syscall.AF_INET { if common.IsLittleEndian() { - ip = net.IP(ReverseWithContext(ctx, decoded)) + ip = net.IP(Reverse(decoded)) } else { ip = net.IP(decoded) } } else { // IPv6 - ip, err = parseIPv6HexStringWithContext(ctx, decoded) + ip, err = parseIPv6HexString(decoded) if err != nil { return Addr{}, err } @@ -739,12 +647,7 @@ func decodeAddressWithContext(ctx context.Context, family uint32, src string) (A }, nil } -// Reverse reverses array of bytes. func Reverse(s []byte) []byte { - return ReverseWithContext(context.Background(), s) -} - -func ReverseWithContext(ctx context.Context, s []byte) []byte { for i, j := 0, len(s)-1; i < j; i, j = i+1, j-1 { s[i], s[j] = s[j], s[i] } @@ -753,27 +656,19 @@ func ReverseWithContext(ctx context.Context, s []byte) []byte { // parseIPv6HexString parse array of bytes to IPv6 string func parseIPv6HexString(src []byte) (net.IP, error) { - return parseIPv6HexStringWithContext(context.Background(), src) -} - -func parseIPv6HexStringWithContext(ctx context.Context, src []byte) (net.IP, error) { if len(src) != 16 { - return nil, fmt.Errorf("invalid IPv6 string") + return nil, errors.New("invalid IPv6 string") } buf := make([]byte, 0, 16) for i := 0; i < len(src); i += 4 { - r := ReverseWithContext(ctx, src[i:i+4]) + r := Reverse(src[i : i+4]) buf = append(buf, r...) } return net.IP(buf), nil } func processInet(file string, kind netConnectionKindType, inodes map[string][]inodeMap, filterPid int32) ([]connTmp, error) { - return processInetWithContext(context.Background(), file, kind, inodes, filterPid) -} - -func processInetWithContext(ctx context.Context, file string, kind netConnectionKindType, inodes map[string][]inodeMap, filterPid int32) ([]connTmp, error) { if strings.HasSuffix(file, "6") && !common.PathExists(file) { // IPv6 not supported, return empty. return []connTmp{}, nil @@ -816,11 +711,11 @@ func processInetWithContext(ctx context.Context, file string, kind netConnection } else { status = "NONE" } - la, err := decodeAddressWithContext(ctx, kind.family, laddr) + la, err := decodeAddress(kind.family, laddr) if err != nil { continue } - ra, err := decodeAddressWithContext(ctx, kind.family, raddr) + ra, err := decodeAddress(kind.family, raddr) if err != nil { continue } @@ -858,7 +753,7 @@ func processUnix(file string, kind netConnectionKindType, inodes map[string][]in if len(tokens) < 6 { continue } - st, err := strconv.Atoi(tokens[4]) + st, err := strconv.ParseInt(tokens[4], 10, 32) if err != nil { return nil, err } @@ -897,7 +792,7 @@ func processUnix(file string, kind netConnectionKindType, inodes map[string][]in return ret, nil } -func updateMap(src map[string][]inodeMap, add map[string][]inodeMap) map[string][]inodeMap { +func updateMap(src, add map[string][]inodeMap) map[string][]inodeMap { for key, value := range add { a, exists := src[key] if !exists { diff --git a/vendor/github.com/shirou/gopsutil/v3/net/net_openbsd.go b/vendor/github.com/shirou/gopsutil/v4/net/net_openbsd.go similarity index 64% rename from vendor/github.com/shirou/gopsutil/v3/net/net_openbsd.go rename to vendor/github.com/shirou/gopsutil/v4/net/net_openbsd.go index 25bbe49c..ec4cfb95 100644 --- a/vendor/github.com/shirou/gopsutil/v3/net/net_openbsd.go +++ b/vendor/github.com/shirou/gopsutil/v4/net/net_openbsd.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build openbsd -// +build openbsd package net @@ -12,13 +12,14 @@ import ( "strings" "syscall" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" ) var portMatch = regexp.MustCompile(`(.*)\.(\d+)$`) func ParseNetstat(output string, mode string, - iocs map[string]IOCountersStat) error { + iocs map[string]IOCountersStat, +) error { lines := strings.Split(output, "\n") exists := make([]string, 0, len(lines)-1) @@ -96,7 +97,7 @@ func ParseNetstat(output string, mode string, n.PacketsSent = parsed[2] n.Dropout = parsed[3] case "ine": - n.Errin = parsed[0] + n.Errin = parsed[0] n.Errout = parsed[1] } @@ -105,8 +106,9 @@ func ParseNetstat(output string, mode string, return nil } -func IOCounters(pernic bool) ([]IOCountersStat, error) { - return IOCountersWithContext(context.Background(), pernic) +// Deprecated: use process.PidsWithContext instead +func PidsWithContext(_ context.Context) ([]int32, error) { + return nil, common.ErrNotImplementedError } func IOCountersWithContext(ctx context.Context, pernic bool) ([]IOCountersStat, error) { @@ -148,47 +150,26 @@ func IOCountersWithContext(ctx context.Context, pernic bool) ([]IOCountersStat, ret = append(ret, ioc) } - if pernic == false { - return getIOCountersAll(ret) + if !pernic { + return getIOCountersAll(ret), nil } return ret, nil } -// IOCountersByFile exists just for compatibility with Linux. -func IOCountersByFile(pernic bool, filename string) ([]IOCountersStat, error) { - return IOCountersByFileWithContext(context.Background(), pernic, filename) -} - -func IOCountersByFileWithContext(ctx context.Context, pernic bool, filename string) ([]IOCountersStat, error) { - return IOCounters(pernic) +func IOCountersByFileWithContext(ctx context.Context, pernic bool, _ string) ([]IOCountersStat, error) { + return IOCountersWithContext(ctx, pernic) } -func FilterCounters() ([]FilterStat, error) { - return FilterCountersWithContext(context.Background()) -} - -func FilterCountersWithContext(ctx context.Context) ([]FilterStat, error) { +func FilterCountersWithContext(_ context.Context) ([]FilterStat, error) { return nil, common.ErrNotImplementedError } -func ConntrackStats(percpu bool) ([]ConntrackStat, error) { - return ConntrackStatsWithContext(context.Background(), percpu) -} - -func ConntrackStatsWithContext(ctx context.Context, percpu bool) ([]ConntrackStat, error) { +func ConntrackStatsWithContext(_ context.Context, _ bool) ([]ConntrackStat, error) { return nil, common.ErrNotImplementedError } -// ProtoCounters returns network statistics for the entire system -// If protocols is empty then all protocols are returned, otherwise -// just the protocols in the list are returned. -// Not Implemented for OpenBSD -func ProtoCounters(protocols []string) ([]ProtoCountersStat, error) { - return ProtoCountersWithContext(context.Background(), protocols) -} - -func ProtoCountersWithContext(ctx context.Context, protocols []string) ([]ProtoCountersStat, error) { +func ProtoCountersWithContext(_ context.Context, _ []string) ([]ProtoCountersStat, error) { return nil, common.ErrNotImplementedError } @@ -236,34 +217,34 @@ func parseNetstatLine(line string) (ConnectionStat, error) { return n, nil } -func parseNetstatAddr(local string, remote string, family uint32) (laddr Addr, raddr Addr, err error) { - parse := func(l string) (Addr, error) { - matches := portMatch.FindStringSubmatch(l) - if matches == nil { - return Addr{}, fmt.Errorf("wrong addr, %s", l) - } - host := matches[1] - port := matches[2] - if host == "*" { - switch family { - case syscall.AF_INET: - host = "0.0.0.0" - case syscall.AF_INET6: - host = "::" - default: - return Addr{}, fmt.Errorf("unknown family, %d", family) - } - } - lport, err := strconv.Atoi(port) - if err != nil { - return Addr{}, err +func parseAddr(l string, family uint32) (Addr, error) { + matches := portMatch.FindStringSubmatch(l) + if matches == nil { + return Addr{}, fmt.Errorf("wrong addr, %s", l) + } + host := matches[1] + port := matches[2] + if host == "*" { + switch family { + case syscall.AF_INET: + host = "0.0.0.0" + case syscall.AF_INET6: + host = "::" + default: + return Addr{}, fmt.Errorf("unknown family, %d", family) } - return Addr{IP: host, Port: uint32(lport)}, nil } + lport, err := strconv.ParseInt(port, 10, 32) + if err != nil { + return Addr{}, err + } + return Addr{IP: host, Port: uint32(lport)}, nil +} - laddr, err = parse(local) +func parseNetstatAddr(local, remote string, family uint32) (laddr, raddr Addr, err error) { + laddr, err = parseAddr(local, family) if remote != "*.*" { // remote addr exists - raddr, err = parse(remote) + raddr, err = parseAddr(remote, family) if err != nil { return laddr, raddr, err } @@ -272,11 +253,6 @@ func parseNetstatAddr(local string, remote string, family uint32) (laddr Addr, r return laddr, raddr, err } -// Return a list of network connections opened. -func Connections(kind string) ([]ConnectionStat, error) { - return ConnectionsWithContext(context.Background(), kind) -} - func ConnectionsWithContext(ctx context.Context, kind string) ([]ConnectionStat, error) { var ret []ConnectionStat @@ -284,11 +260,7 @@ func ConnectionsWithContext(ctx context.Context, kind string) ([]ConnectionStat, switch strings.ToLower(kind) { default: fallthrough - case "": - fallthrough - case "all": - fallthrough - case "inet": + case "", "all", "inet": // nothing to add case "inet4": args = append(args, "-finet") @@ -320,7 +292,7 @@ func ConnectionsWithContext(ctx context.Context, kind string) ([]ConnectionStat, } lines := strings.Split(string(out), "\n") for _, line := range lines { - if !(strings.HasPrefix(line, "tcp") || strings.HasPrefix(line, "udp")) { + if !strings.HasPrefix(line, "tcp") && !strings.HasPrefix(line, "udp") { continue } n, err := parseNetstatLine(line) @@ -333,3 +305,35 @@ func ConnectionsWithContext(ctx context.Context, kind string) ([]ConnectionStat, return ret, nil } + +func ConnectionsPidWithContext(_ context.Context, _ string, _ int32) ([]ConnectionStat, error) { + return nil, common.ErrNotImplementedError +} + +func ConnectionsMaxWithContext(_ context.Context, _ string, _ int) ([]ConnectionStat, error) { + return nil, common.ErrNotImplementedError +} + +func ConnectionsPidMaxWithContext(_ context.Context, _ string, _ int32, _ int) ([]ConnectionStat, error) { + return nil, common.ErrNotImplementedError +} + +func ConnectionsWithoutUidsWithContext(ctx context.Context, kind string) ([]ConnectionStat, error) { + return ConnectionsMaxWithoutUidsWithContext(ctx, kind, 0) +} + +func ConnectionsMaxWithoutUidsWithContext(ctx context.Context, kind string, maxConn int) ([]ConnectionStat, error) { + return ConnectionsPidMaxWithoutUidsWithContext(ctx, kind, 0, maxConn) +} + +func ConnectionsPidWithoutUidsWithContext(ctx context.Context, kind string, pid int32) ([]ConnectionStat, error) { + return ConnectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, 0) +} + +func ConnectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, maxConn int) ([]ConnectionStat, error) { + return connectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, maxConn) +} + +func connectionsPidMaxWithoutUidsWithContext(_ context.Context, _ string, _ int32, _ int) ([]ConnectionStat, error) { + return nil, common.ErrNotImplementedError +} diff --git a/vendor/github.com/shirou/gopsutil/v3/net/net_solaris.go b/vendor/github.com/shirou/gopsutil/v4/net/net_solaris.go similarity index 58% rename from vendor/github.com/shirou/gopsutil/v3/net/net_solaris.go rename to vendor/github.com/shirou/gopsutil/v4/net/net_solaris.go index 79d8ac30..df067806 100644 --- a/vendor/github.com/shirou/gopsutil/v3/net/net_solaris.go +++ b/vendor/github.com/shirou/gopsutil/v4/net/net_solaris.go @@ -1,28 +1,20 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build solaris -// +build solaris package net import ( "context" + "errors" "fmt" "regexp" "runtime" "strconv" "strings" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" ) -// NetIOCounters returnes network I/O statistics for every network -// interface installed on the system. If pernic argument is false, -// return only sum of all information (which name is 'all'). If true, -// every network interface installed on the system is returned -// separately. -func IOCounters(pernic bool) ([]IOCountersStat, error) { - return IOCountersWithContext(context.Background(), pernic) -} - var kstatSplit = regexp.MustCompile(`[:\s]+`) func IOCountersWithContext(ctx context.Context, pernic bool) ([]IOCountersStat, error) { @@ -38,7 +30,7 @@ func IOCountersWithContext(ctx context.Context, pernic bool) ([]IOCountersStat, lines := strings.Split(strings.TrimSpace(string(kstatSysOut)), "\n") if len(lines) == 0 { - return nil, fmt.Errorf("no interface found") + return nil, errors.New("no interface found") } rbytes64arr := make(map[string]uint64) ipackets64arr := make(map[string]uint64) @@ -113,32 +105,65 @@ func IOCountersWithContext(ctx context.Context, pernic bool) ([]IOCountersStat, } if !pernic { - return getIOCountersAll(ret) + return getIOCountersAll(ret), nil } return ret, nil } -func Connections(kind string) ([]ConnectionStat, error) { - return ConnectionsWithContext(context.Background(), kind) +func IOCountersByFileWithContext(ctx context.Context, pernic bool, _ string) ([]IOCountersStat, error) { + return IOCountersWithContext(ctx, pernic) +} + +func FilterCountersWithContext(_ context.Context) ([]FilterStat, error) { + return nil, common.ErrNotImplementedError +} + +func ConntrackStatsWithContext(_ context.Context, _ bool) ([]ConntrackStat, error) { + return nil, common.ErrNotImplementedError +} + +func ProtoCountersWithContext(_ context.Context, _ []string) ([]ProtoCountersStat, error) { + return nil, common.ErrNotImplementedError +} + +// Deprecated: use process.PidsWithContext instead +func PidsWithContext(_ context.Context) ([]int32, error) { + return nil, common.ErrNotImplementedError } -func ConnectionsWithContext(ctx context.Context, kind string) ([]ConnectionStat, error) { +func ConnectionsWithContext(_ context.Context, _ string) ([]ConnectionStat, error) { return []ConnectionStat{}, common.ErrNotImplementedError } -func FilterCounters() ([]FilterStat, error) { - return FilterCountersWithContext(context.Background()) +func ConnectionsMaxWithContext(ctx context.Context, kind string, maxConn int) ([]ConnectionStat, error) { + return ConnectionsPidMaxWithContext(ctx, kind, 0, maxConn) } -func FilterCountersWithContext(ctx context.Context) ([]FilterStat, error) { - return []FilterStat{}, common.ErrNotImplementedError +func ConnectionsWithoutUidsWithContext(ctx context.Context, kind string) ([]ConnectionStat, error) { + return ConnectionsMaxWithoutUidsWithContext(ctx, kind, 0) } -func ProtoCounters(protocols []string) ([]ProtoCountersStat, error) { - return ProtoCountersWithContext(context.Background(), protocols) +func ConnectionsMaxWithoutUidsWithContext(ctx context.Context, kind string, maxConn int) ([]ConnectionStat, error) { + return ConnectionsPidMaxWithoutUidsWithContext(ctx, kind, 0, maxConn) } -func ProtoCountersWithContext(ctx context.Context, protocols []string) ([]ProtoCountersStat, error) { - return []ProtoCountersStat{}, common.ErrNotImplementedError +func ConnectionsPidWithoutUidsWithContext(ctx context.Context, kind string, pid int32) ([]ConnectionStat, error) { + return ConnectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, 0) +} + +func ConnectionsPidWithContext(ctx context.Context, kind string, pid int32) ([]ConnectionStat, error) { + return ConnectionsPidMaxWithContext(ctx, kind, pid, 0) +} + +func ConnectionsPidMaxWithContext(ctx context.Context, kind string, pid int32, maxConn int) ([]ConnectionStat, error) { + return connectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, maxConn, false) +} + +func ConnectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, maxConn int) ([]ConnectionStat, error) { + return connectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, maxConn, true) +} + +func connectionsPidMaxWithoutUidsWithContext(_ context.Context, _ string, _ int32, _ int, _ bool) ([]ConnectionStat, error) { + return []ConnectionStat{}, common.ErrNotImplementedError } diff --git a/vendor/github.com/shirou/gopsutil/v3/net/net_unix.go b/vendor/github.com/shirou/gopsutil/v4/net/net_unix.go similarity index 56% rename from vendor/github.com/shirou/gopsutil/v3/net/net_unix.go rename to vendor/github.com/shirou/gopsutil/v4/net/net_unix.go index cb846e28..c491a291 100644 --- a/vendor/github.com/shirou/gopsutil/v3/net/net_unix.go +++ b/vendor/github.com/shirou/gopsutil/v4/net/net_unix.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build freebsd || darwin -// +build freebsd darwin package net @@ -11,33 +11,17 @@ import ( "strings" "syscall" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" ) -// Return a list of network connections opened. -func Connections(kind string) ([]ConnectionStat, error) { - return ConnectionsWithContext(context.Background(), kind) -} - func ConnectionsWithContext(ctx context.Context, kind string) ([]ConnectionStat, error) { return ConnectionsPidWithContext(ctx, kind, 0) } -// Return a list of network connections opened returning at most `max` -// connections for each running process. -func ConnectionsMax(kind string, max int) ([]ConnectionStat, error) { - return ConnectionsMaxWithContext(context.Background(), kind, max) -} - -func ConnectionsMaxWithContext(ctx context.Context, kind string, max int) ([]ConnectionStat, error) { +func ConnectionsMaxWithContext(_ context.Context, _ string, _ int) ([]ConnectionStat, error) { return []ConnectionStat{}, common.ErrNotImplementedError } -// Return a list of network connections opened by a process. -func ConnectionsPid(kind string, pid int32) ([]ConnectionStat, error) { - return ConnectionsPidWithContext(context.Background(), kind, pid) -} - func ConnectionsPidWithContext(ctx context.Context, kind string, pid int32) ([]ConnectionStat, error) { var ret []ConnectionStat @@ -45,11 +29,7 @@ func ConnectionsPidWithContext(ctx context.Context, kind string, pid int32) ([]C switch strings.ToLower(kind) { default: fallthrough - case "": - fallthrough - case "all": - fallthrough - case "inet": + case "", "all", "inet": args = append(args, "tcp", "-i", "udp") case "inet4": args = append(args, "4") @@ -109,11 +89,11 @@ func parseNetLine(line string) (ConnectionStat, error) { f[7] = "unix" } - pid, err := strconv.Atoi(f[1]) + pid, err := strconv.ParseInt(f[1], 10, 32) if err != nil { return ConnectionStat{}, err } - fd, err := strconv.Atoi(strings.Trim(f[3], "u")) + fd, err := strconv.ParseInt(strings.Trim(f[3], "u"), 10, 32) if err != nil { return ConnectionStat{}, fmt.Errorf("unknown fd, %s", f[3]) } @@ -151,26 +131,26 @@ func parseNetLine(line string) (ConnectionStat, error) { return n, nil } -func parseNetAddr(line string) (laddr Addr, raddr Addr, err error) { - parse := func(l string) (Addr, error) { - host, port, err := net.SplitHostPort(l) - if err != nil { - return Addr{}, fmt.Errorf("wrong addr, %s", l) - } - lport, err := strconv.Atoi(port) - if err != nil { - return Addr{}, err - } - return Addr{IP: host, Port: uint32(lport)}, nil +func parseAddr(l string) (Addr, error) { + host, port, err := net.SplitHostPort(l) + if err != nil { + return Addr{}, fmt.Errorf("wrong addr, %s", l) + } + lport, err := strconv.ParseInt(port, 10, 32) + if err != nil { + return Addr{}, err } + return Addr{IP: host, Port: uint32(lport)}, nil +} +func parseNetAddr(line string) (laddr, raddr Addr, err error) { addrs := strings.Split(line, "->") if len(addrs) == 0 { return laddr, raddr, fmt.Errorf("wrong netaddr, %s", line) } - laddr, err = parse(addrs[0]) + laddr, err = parseAddr(addrs[0]) if len(addrs) == 2 { // remote addr exists - raddr, err = parse(addrs[1]) + raddr, err = parseAddr(addrs[1]) if err != nil { return laddr, raddr, err } @@ -179,46 +159,26 @@ func parseNetAddr(line string) (laddr Addr, raddr Addr, err error) { return laddr, raddr, err } -// Return up to `max` network connections opened by a process. -func ConnectionsPidMax(kind string, pid int32, max int) ([]ConnectionStat, error) { - return ConnectionsPidMaxWithContext(context.Background(), kind, pid, max) -} - -func ConnectionsPidMaxWithContext(ctx context.Context, kind string, pid int32, max int) ([]ConnectionStat, error) { +func ConnectionsPidMaxWithContext(_ context.Context, _ string, _ int32, _ int) ([]ConnectionStat, error) { return []ConnectionStat{}, common.ErrNotImplementedError } -// Return a list of network connections opened, omitting `Uids`. -// WithoutUids functions are reliant on implementation details. They may be altered to be an alias for Connections or be -// removed from the API in the future. -func ConnectionsWithoutUids(kind string) ([]ConnectionStat, error) { - return ConnectionsWithoutUidsWithContext(context.Background(), kind) -} - func ConnectionsWithoutUidsWithContext(ctx context.Context, kind string) ([]ConnectionStat, error) { return ConnectionsMaxWithoutUidsWithContext(ctx, kind, 0) } -func ConnectionsMaxWithoutUidsWithContext(ctx context.Context, kind string, max int) ([]ConnectionStat, error) { - return ConnectionsPidMaxWithoutUidsWithContext(ctx, kind, 0, max) -} - -func ConnectionsPidWithoutUids(kind string, pid int32) ([]ConnectionStat, error) { - return ConnectionsPidWithoutUidsWithContext(context.Background(), kind, pid) +func ConnectionsMaxWithoutUidsWithContext(ctx context.Context, kind string, maxConn int) ([]ConnectionStat, error) { + return ConnectionsPidMaxWithoutUidsWithContext(ctx, kind, 0, maxConn) } func ConnectionsPidWithoutUidsWithContext(ctx context.Context, kind string, pid int32) ([]ConnectionStat, error) { return ConnectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, 0) } -func ConnectionsPidMaxWithoutUids(kind string, pid int32, max int) ([]ConnectionStat, error) { - return ConnectionsPidMaxWithoutUidsWithContext(context.Background(), kind, pid, max) -} - -func ConnectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, max int) ([]ConnectionStat, error) { - return connectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, max) +func ConnectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, maxConn int) ([]ConnectionStat, error) { + return connectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, maxConn) } -func connectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, max int) ([]ConnectionStat, error) { +func connectionsPidMaxWithoutUidsWithContext(_ context.Context, _ string, _ int32, _ int) ([]ConnectionStat, error) { return []ConnectionStat{}, common.ErrNotImplementedError } diff --git a/vendor/github.com/shirou/gopsutil/v3/net/net_windows.go b/vendor/github.com/shirou/gopsutil/v4/net/net_windows.go similarity index 80% rename from vendor/github.com/shirou/gopsutil/v3/net/net_windows.go rename to vendor/github.com/shirou/gopsutil/v4/net/net_windows.go index 5d384342..f530e4e5 100644 --- a/vendor/github.com/shirou/gopsutil/v3/net/net_windows.go +++ b/vendor/github.com/shirou/gopsutil/v4/net/net_windows.go @@ -1,18 +1,20 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build windows -// +build windows package net import ( "context" + "errors" "fmt" "net" "os" "syscall" "unsafe" - "github.com/shirou/gopsutil/v3/internal/common" "golang.org/x/sys/windows" + + "github.com/shirou/gopsutil/v4/internal/common" ) var ( @@ -94,7 +96,7 @@ const ( type mibIfRow2 struct { InterfaceLuid uint64 InterfaceIndex uint32 - InterfaceGuid guid + InterfaceGuid guid //nolint:revive //FIXME Alias [maxStringSize + 1]uint16 Description [maxStringSize + 1]uint16 PhysicalAddressLength uint32 @@ -111,7 +113,7 @@ type mibIfRow2 struct { OperStatus uint32 AdminStatus uint32 MediaConnectState uint32 - NetworkGuid guid + NetworkGuid guid //nolint:revive //FIXME ConnectionType uint32 padding1 [pad0for64_4for32]byte TransmitLinkSpeed uint64 @@ -136,11 +138,7 @@ type mibIfRow2 struct { OutQLen uint64 } -func IOCounters(pernic bool) ([]IOCountersStat, error) { - return IOCountersWithContext(context.Background(), pernic) -} - -func IOCountersWithContext(ctx context.Context, pernic bool) ([]IOCountersStat, error) { +func IOCountersWithContext(_ context.Context, pernic bool) ([]IOCountersStat, error) { ifs, err := net.Interfaces() if err != nil { return nil, err @@ -195,38 +193,20 @@ func IOCountersWithContext(ctx context.Context, pernic bool) ([]IOCountersStat, } if !pernic { - return getIOCountersAll(counters) + return getIOCountersAll(counters), nil } return counters, nil } -// IOCountersByFile exists just for compatibility with Linux. -func IOCountersByFile(pernic bool, filename string) ([]IOCountersStat, error) { - return IOCountersByFileWithContext(context.Background(), pernic, filename) -} - -func IOCountersByFileWithContext(ctx context.Context, pernic bool, filename string) ([]IOCountersStat, error) { - return IOCounters(pernic) -} - -// Return a list of network connections -// Available kind: -// -// reference to netConnectionKindMap -func Connections(kind string) ([]ConnectionStat, error) { - return ConnectionsWithContext(context.Background(), kind) +func IOCountersByFileWithContext(ctx context.Context, pernic bool, _ string) ([]IOCountersStat, error) { + return IOCountersWithContext(ctx, pernic) } func ConnectionsWithContext(ctx context.Context, kind string) ([]ConnectionStat, error) { return ConnectionsPidWithContext(ctx, kind, 0) } -// ConnectionsPid Return a list of network connections opened by a process -func ConnectionsPid(kind string, pid int32) ([]ConnectionStat, error) { - return ConnectionsPidWithContext(context.Background(), kind, pid) -} - -func ConnectionsPidWithContext(ctx context.Context, kind string, pid int32) ([]ConnectionStat, error) { +func ConnectionsPidWithContext(_ context.Context, kind string, pid int32) ([]ConnectionStat, error) { tmap, ok := netConnectionKindMap[kind] if !ok { return nil, fmt.Errorf("invalid kind, %s", kind) @@ -260,7 +240,7 @@ func getProcInet(kinds []netConnectionKindType, pid int32) ([]ConnectionStat, er func getNetStatWithKind(kindType netConnectionKindType) ([]ConnectionStat, error) { if kindType.filename == "" { - return nil, fmt.Errorf("kind filename must be required") + return nil, errors.New("kind filename must be required") } switch kindType.filename { @@ -277,76 +257,48 @@ func getNetStatWithKind(kindType netConnectionKindType) ([]ConnectionStat, error return nil, fmt.Errorf("invalid kind filename, %s", kindType.filename) } -// Return a list of network connections opened returning at most `max` -// connections for each running process. -func ConnectionsMax(kind string, max int) ([]ConnectionStat, error) { - return ConnectionsMaxWithContext(context.Background(), kind, max) -} - -func ConnectionsMaxWithContext(ctx context.Context, kind string, max int) ([]ConnectionStat, error) { - return []ConnectionStat{}, common.ErrNotImplementedError +// Deprecated: use process.PidsWithContext instead +func PidsWithContext(_ context.Context) ([]int32, error) { + return nil, common.ErrNotImplementedError } -// Return a list of network connections opened, omitting `Uids`. -// WithoutUids functions are reliant on implementation details. They may be altered to be an alias for Connections or be -// removed from the API in the future. -func ConnectionsWithoutUids(kind string) ([]ConnectionStat, error) { - return ConnectionsWithoutUidsWithContext(context.Background(), kind) +func ConnectionsMaxWithContext(ctx context.Context, kind string, maxConn int) ([]ConnectionStat, error) { + return ConnectionsPidMaxWithContext(ctx, kind, 0, maxConn) } func ConnectionsWithoutUidsWithContext(ctx context.Context, kind string) ([]ConnectionStat, error) { return ConnectionsMaxWithoutUidsWithContext(ctx, kind, 0) } -func ConnectionsMaxWithoutUidsWithContext(ctx context.Context, kind string, max int) ([]ConnectionStat, error) { - return ConnectionsPidMaxWithoutUidsWithContext(ctx, kind, 0, max) -} - -func ConnectionsPidWithoutUids(kind string, pid int32) ([]ConnectionStat, error) { - return ConnectionsPidWithoutUidsWithContext(context.Background(), kind, pid) +func ConnectionsMaxWithoutUidsWithContext(ctx context.Context, kind string, maxConn int) ([]ConnectionStat, error) { + return ConnectionsPidMaxWithoutUidsWithContext(ctx, kind, 0, maxConn) } func ConnectionsPidWithoutUidsWithContext(ctx context.Context, kind string, pid int32) ([]ConnectionStat, error) { return ConnectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, 0) } -func ConnectionsPidMaxWithoutUids(kind string, pid int32, max int) ([]ConnectionStat, error) { - return ConnectionsPidMaxWithoutUidsWithContext(context.Background(), kind, pid, max) +func ConnectionsPidMaxWithContext(ctx context.Context, kind string, pid int32, maxConn int) ([]ConnectionStat, error) { + return connectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, maxConn, false) } -func ConnectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, max int) ([]ConnectionStat, error) { - return connectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, max) +func ConnectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, maxConn int) ([]ConnectionStat, error) { + return connectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, maxConn, true) } -func connectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, max int) ([]ConnectionStat, error) { +func connectionsPidMaxWithoutUidsWithContext(_ context.Context, _ string, _ int32, _ int, _ bool) ([]ConnectionStat, error) { return []ConnectionStat{}, common.ErrNotImplementedError } -func FilterCounters() ([]FilterStat, error) { - return FilterCountersWithContext(context.Background()) -} - -func FilterCountersWithContext(ctx context.Context) ([]FilterStat, error) { +func FilterCountersWithContext(_ context.Context) ([]FilterStat, error) { return nil, common.ErrNotImplementedError } -func ConntrackStats(percpu bool) ([]ConntrackStat, error) { - return ConntrackStatsWithContext(context.Background(), percpu) -} - -func ConntrackStatsWithContext(ctx context.Context, percpu bool) ([]ConntrackStat, error) { +func ConntrackStatsWithContext(_ context.Context, _ bool) ([]ConntrackStat, error) { return nil, common.ErrNotImplementedError } -// ProtoCounters returns network statistics for the entire system -// If protocols is empty then all protocols are returned, otherwise -// just the protocols in the list are returned. -// Not Implemented for Windows -func ProtoCounters(protocols []string) ([]ProtoCountersStat, error) { - return ProtoCountersWithContext(context.Background(), protocols) -} - -func ProtoCountersWithContext(ctx context.Context, protocols []string) ([]ProtoCountersStat, error) { +func ProtoCountersWithContext(_ context.Context, _ []string) ([]ProtoCountersStat, error) { return nil, common.ErrNotImplementedError } @@ -376,7 +328,7 @@ func getTableUintptr(family uint32, buf []byte) uintptr { return p } -func getTableInfo(filename string, table interface{}) (index, step, length int) { +func getTableInfo(filename string, table any) (index, step, length int) { switch filename { case kindTCP4.filename: index = int(unsafe.Sizeof(table.(pmibTCPTableOwnerPidAll).DwNumEntries)) @@ -396,7 +348,7 @@ func getTableInfo(filename string, table interface{}) (index, step, length int) length = int(table.(pmibUDP6TableOwnerPid).DwNumEntries) } - return + return index, step, length } func getTCPConnections(family uint32) ([]ConnectionStat, error) { @@ -410,7 +362,7 @@ func getTCPConnections(family uint32) ([]ConnectionStat, error) { ) if family == 0 { - return nil, fmt.Errorf("faimly must be required") + return nil, errors.New("faimly must be required") } for { @@ -431,7 +383,7 @@ func getTCPConnections(family uint32) ([]ConnectionStat, error) { } } - err := getExtendedTcpTable(p, + err := getExtendedTCPTable(p, &size, true, family, @@ -440,7 +392,7 @@ func getTCPConnections(family uint32) ([]ConnectionStat, error) { if err == nil { break } - if err != windows.ERROR_INSUFFICIENT_BUFFER { + if !errors.Is(err, windows.ERROR_INSUFFICIENT_BUFFER) { return nil, err } buf = make([]byte, size) @@ -491,7 +443,7 @@ func getUDPConnections(family uint32) ([]ConnectionStat, error) { ) if family == 0 { - return nil, fmt.Errorf("faimly must be required") + return nil, errors.New("faimly must be required") } for { @@ -512,7 +464,7 @@ func getUDPConnections(family uint32) ([]ConnectionStat, error) { } } - err := getExtendedUdpTable( + err := getExtendedUDPTable( p, &size, true, @@ -523,7 +475,7 @@ func getUDPConnections(family uint32) ([]ConnectionStat, error) { if err == nil { break } - if err != windows.ERROR_INSUFFICIENT_BUFFER { + if !errors.Is(err, windows.ERROR_INSUFFICIENT_BUFFER) { return nil, err } buf = make([]byte, size) @@ -576,20 +528,20 @@ var tcpStatuses = map[mibTCPState]string{ 12: "DELETE", } -func getExtendedTcpTable(pTcpTable uintptr, pdwSize *uint32, bOrder bool, ulAf uint32, tableClass tcpTableClass, reserved uint32) (errcode error) { - r1, _, _ := syscall.Syscall6(procGetExtendedTCPTable.Addr(), 6, pTcpTable, uintptr(unsafe.Pointer(pdwSize)), getUintptrFromBool(bOrder), uintptr(ulAf), uintptr(tableClass), uintptr(reserved)) +func getExtendedTCPTable(pTCPTable uintptr, pdwSize *uint32, bOrder bool, ulAf uint32, tableClass tcpTableClass, reserved uint32) (errcode error) { + r1, _, _ := syscall.Syscall6(procGetExtendedTCPTable.Addr(), 6, pTCPTable, uintptr(unsafe.Pointer(pdwSize)), getUintptrFromBool(bOrder), uintptr(ulAf), uintptr(tableClass), uintptr(reserved)) if r1 != 0 { errcode = syscall.Errno(r1) } - return + return errcode } -func getExtendedUdpTable(pUdpTable uintptr, pdwSize *uint32, bOrder bool, ulAf uint32, tableClass udpTableClass, reserved uint32) (errcode error) { - r1, _, _ := syscall.Syscall6(procGetExtendedUDPTable.Addr(), 6, pUdpTable, uintptr(unsafe.Pointer(pdwSize)), getUintptrFromBool(bOrder), uintptr(ulAf), uintptr(tableClass), uintptr(reserved)) +func getExtendedUDPTable(pUDPTable uintptr, pdwSize *uint32, bOrder bool, ulAf uint32, tableClass udpTableClass, reserved uint32) (errcode error) { + r1, _, _ := syscall.Syscall6(procGetExtendedUDPTable.Addr(), 6, pUDPTable, uintptr(unsafe.Pointer(pdwSize)), getUintptrFromBool(bOrder), uintptr(ulAf), uintptr(tableClass), uintptr(reserved)) if r1 != 0 { errcode = syscall.Errno(r1) } - return + return errcode } func getUintptrFromBool(b bool) uintptr { diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process.go b/vendor/github.com/shirou/gopsutil/v4/process/process.go similarity index 88% rename from vendor/github.com/shirou/gopsutil/v3/process/process.go rename to vendor/github.com/shirou/gopsutil/v4/process/process.go index 1bb27abf..5db5ff48 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process.go @@ -1,23 +1,26 @@ +// SPDX-License-Identifier: BSD-3-Clause package process import ( "context" "encoding/json" "errors" + "regexp" "runtime" "sort" "sync" "time" - "github.com/shirou/gopsutil/v3/cpu" - "github.com/shirou/gopsutil/v3/internal/common" - "github.com/shirou/gopsutil/v3/mem" - "github.com/shirou/gopsutil/v3/net" + "github.com/shirou/gopsutil/v4/cpu" + "github.com/shirou/gopsutil/v4/internal/common" + "github.com/shirou/gopsutil/v4/mem" + "github.com/shirou/gopsutil/v4/net" ) var ( invoke common.Invoker = common.Invoke{} - ErrorNoChildren = errors.New("process does not have children") + strictIntPtrn = regexp.MustCompile(`^\d+$`) + ErrorNoChildren = errors.New("process does not have children") // Deprecated: ErrorNoChildren is never returned by process.Children(), check its returned []*Process slice length instead ErrorProcessNotRunning = errors.New("process does not exist") ErrorNotPermitted = errors.New("operation not permitted") ) @@ -29,9 +32,9 @@ type Process struct { parent int32 parentMutex sync.RWMutex // for windows ppid cache numCtxSwitches *NumCtxSwitchesStat - uids []int32 - gids []int32 - groups []int32 + uids []uint32 + gids []uint32 + groups []uint32 numThreads int32 memInfo *MemoryInfoStat sigInfo *SignalInfoStat @@ -102,10 +105,18 @@ type RlimitStat struct { } type IOCountersStat struct { - ReadCount uint64 `json:"readCount"` + // ReadCount is a number of read I/O operations such as syscalls. + ReadCount uint64 `json:"readCount"` + // WriteCount is a number of read I/O operations such as syscalls. WriteCount uint64 `json:"writeCount"` - ReadBytes uint64 `json:"readBytes"` + // ReadBytes is a number of all I/O read in bytes. This includes disk I/O on Linux and Windows. + ReadBytes uint64 `json:"readBytes"` + // WriteBytes is a number of all I/O write in bytes. This includes disk I/O on Linux and Windows. WriteBytes uint64 `json:"writeBytes"` + // DiskReadBytes is a number of disk I/O write in bytes. Currently only Linux has this value. + DiskReadBytes uint64 `json:"diskReadBytes"` + // DiskWriteBytes is a number of disk I/O read in bytes. Currently only Linux has this value. + DiskWriteBytes uint64 `json:"diskWriteBytes"` } type NumCtxSwitchesStat struct { @@ -260,13 +271,11 @@ func (p *Process) PercentWithContext(ctx context.Context, interval time.Duration if err != nil { return 0, err } - } else { - if p.lastCPUTimes == nil { - // invoked first time - p.lastCPUTimes = cpuTimes - p.lastCPUTime = now - return 0, nil - } + } else if p.lastCPUTimes == nil { + // invoked first time + p.lastCPUTimes = cpuTimes + p.lastCPUTime = now + return 0, nil } numcpu := runtime.NumCPU() @@ -316,9 +325,13 @@ func calculatePercent(t1, t2 *cpu.TimesStat, delta float64, numcpu int) float64 if delta == 0 { return 0 } - delta_proc := t2.Total() - t1.Total() - overall_percent := ((delta_proc / delta) * 100) * float64(numcpu) - return overall_percent + // https://github.com/giampaolo/psutil/blob/c034e6692cf736b5e87d14418a8153bb03f6cf42/psutil/__init__.py#L1064 + deltaProc := (t2.User - t1.User) + (t2.System - t1.System) + if deltaProc <= 0 { + return 0 + } + overallPercent := ((deltaProc / delta) * 100) * float64(numcpu) + return overallPercent } // MemoryPercent returns how many percent of the total RAM this process uses @@ -348,7 +361,7 @@ func (p *Process) CPUPercent() (float64, error) { } func (p *Process) CPUPercentWithContext(ctx context.Context) (float64, error) { - crt_time, err := p.createTimeWithContext(ctx) + createTime, err := p.createTimeWithContext(ctx) if err != nil { return 0, err } @@ -358,7 +371,7 @@ func (p *Process) CPUPercentWithContext(ctx context.Context) (float64, error) { return 0, err } - created := time.Unix(0, crt_time*int64(time.Millisecond)) + created := time.Unix(0, createTime*int64(time.Millisecond)) totalTime := time.Since(created).Seconds() if totalTime <= 0 { return 0, nil @@ -368,7 +381,7 @@ func (p *Process) CPUPercentWithContext(ctx context.Context) (float64, error) { } // Groups returns all group IDs(include supplementary groups) of the process as a slice of the int -func (p *Process) Groups() ([]int32, error) { +func (p *Process) Groups() ([]uint32, error) { return p.GroupsWithContext(context.Background()) } @@ -395,6 +408,11 @@ func (p *Process) Cmdline() (string, error) { // CmdlineSlice returns the command line arguments of the process as a slice with each // element being an argument. +// +// On Windows, this assumes the command line is encoded according to the convention accepted by +// [golang.org/x/sys/windows.CmdlineToArgv] (the most common convention). If this is not suitable, +// you should instead use [Process.Cmdline] and parse the command line according to your specific +// requirements. func (p *Process) CmdlineSlice() ([]string, error) { return p.CmdlineSliceWithContext(context.Background()) } @@ -433,12 +451,12 @@ func (p *Process) Foreground() (bool, error) { } // Uids returns user ids of the process as a slice of the int -func (p *Process) Uids() ([]int32, error) { +func (p *Process) Uids() ([]uint32, error) { return p.UidsWithContext(context.Background()) } // Gids returns group ids of the process as a slice of the int -func (p *Process) Gids() ([]int32, error) { +func (p *Process) Gids() ([]uint32, error) { return p.GidsWithContext(context.Background()) } @@ -538,8 +556,8 @@ func (p *Process) Connections() ([]net.ConnectionStat, error) { } // ConnectionsMax returns a slice of net.ConnectionStat used by the process at most `max`. -func (p *Process) ConnectionsMax(max int) ([]net.ConnectionStat, error) { - return p.ConnectionsMaxWithContext(context.Background(), max) +func (p *Process) ConnectionsMax(maxConn int) ([]net.ConnectionStat, error) { + return p.ConnectionsMaxWithContext(context.Background(), maxConn) } // MemoryMaps get memory maps from /proc/(pid)/smaps diff --git a/vendor/github.com/shirou/gopsutil/v4/process/process_bsd.go b/vendor/github.com/shirou/gopsutil/v4/process/process_bsd.go new file mode 100644 index 00000000..e591e2d1 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_bsd.go @@ -0,0 +1,76 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build darwin || freebsd || openbsd + +package process + +import ( + "bytes" + "context" + "encoding/binary" + + "github.com/shirou/gopsutil/v4/cpu" + "github.com/shirou/gopsutil/v4/internal/common" +) + +type MemoryInfoExStat struct{} + +type MemoryMapsStat struct{} + +func (*Process) TgidWithContext(_ context.Context) (int32, error) { + return 0, common.ErrNotImplementedError +} + +func (*Process) IOniceWithContext(_ context.Context) (int32, error) { + return 0, common.ErrNotImplementedError +} + +func (*Process) RlimitWithContext(_ context.Context) ([]RlimitStat, error) { + return nil, common.ErrNotImplementedError +} + +func (*Process) RlimitUsageWithContext(_ context.Context, _ bool) ([]RlimitStat, error) { + return nil, common.ErrNotImplementedError +} + +func (*Process) NumCtxSwitchesWithContext(_ context.Context) (*NumCtxSwitchesStat, error) { + return nil, common.ErrNotImplementedError +} + +func (*Process) NumFDsWithContext(_ context.Context) (int32, error) { + return 0, common.ErrNotImplementedError +} + +func (*Process) CPUAffinityWithContext(_ context.Context) ([]int32, error) { + return nil, common.ErrNotImplementedError +} + +func (*Process) MemoryInfoExWithContext(_ context.Context) (*MemoryInfoExStat, error) { + return nil, common.ErrNotImplementedError +} + +func (*Process) PageFaultsWithContext(_ context.Context) (*PageFaultsStat, error) { + return nil, common.ErrNotImplementedError +} + +func (*Process) OpenFilesWithContext(_ context.Context) ([]OpenFilesStat, error) { + return nil, common.ErrNotImplementedError +} + +func (*Process) MemoryMapsWithContext(_ context.Context, _ bool) (*[]MemoryMapsStat, error) { + return nil, common.ErrNotImplementedError +} + +func (*Process) ThreadsWithContext(_ context.Context) (map[int32]*cpu.TimesStat, error) { + return nil, common.ErrNotImplementedError +} + +func (*Process) EnvironWithContext(_ context.Context) ([]string, error) { + return nil, common.ErrNotImplementedError +} + +func parseKinfoProc(buf []byte) (KinfoProc, error) { + var k KinfoProc + br := bytes.NewReader(buf) + err := binary.Read(br, binary.LittleEndian, &k) + return k, err +} diff --git a/vendor/github.com/shirou/gopsutil/v4/process/process_darwin.go b/vendor/github.com/shirou/gopsutil/v4/process/process_darwin.go new file mode 100644 index 00000000..d0bba150 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_darwin.go @@ -0,0 +1,490 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build darwin + +package process + +import ( + "bytes" + "context" + "encoding/binary" + "errors" + "fmt" + "path/filepath" + "runtime" + "sort" + "strconv" + "strings" + "unsafe" + + "golang.org/x/sys/unix" + + "github.com/shirou/gopsutil/v4/cpu" + "github.com/shirou/gopsutil/v4/internal/common" + "github.com/shirou/gopsutil/v4/net" +) + +// copied from sys/sysctl.h +const ( + CTLKern = 1 // "high kernel": proc, limits + KernProc = 14 // struct: process entries + KernProcPID = 1 // by process id + KernProcProc = 8 // only return procs + KernProcAll = 0 // everything + KernProcPathname = 12 // path to executable +) + +type _Ctype_struct___0 struct { //nolint:revive //FIXME + Pad uint64 +} + +func pidsWithContext(_ context.Context) ([]int32, error) { + var ret []int32 + + kprocs, err := unix.SysctlKinfoProcSlice("kern.proc.all") + if err != nil { + return ret, err + } + + for i := range kprocs { + proc := &kprocs[i] + ret = append(ret, int32(proc.Proc.P_pid)) + } + + return ret, nil +} + +func (p *Process) PpidWithContext(_ context.Context) (int32, error) { + k, err := p.getKProc() + if err != nil { + return 0, err + } + + return k.Eproc.Ppid, nil +} + +func (p *Process) NameWithContext(ctx context.Context) (string, error) { + k, err := p.getKProc() + if err != nil { + return "", err + } + + name := common.ByteToString(k.Proc.P_comm[:]) + + if len(name) >= 15 { + cmdName, err := p.cmdNameWithContext(ctx) + if err != nil { + return "", err + } + if cmdName != "" { + extendedName := filepath.Base(cmdName) + if strings.HasPrefix(extendedName, p.name) { + name = extendedName + } + } + } + + return name, nil +} + +func (p *Process) createTimeWithContext(_ context.Context) (int64, error) { + k, err := p.getKProc() + if err != nil { + return 0, err + } + + return k.Proc.P_starttime.Sec*1000 + int64(k.Proc.P_starttime.Usec)/1000, nil +} + +func (p *Process) StatusWithContext(ctx context.Context) ([]string, error) { + r, err := callPsWithContext(ctx, "state", p.Pid, false, false) + if err != nil { + return []string{""}, err + } + status := convertStatusChar(r[0][0][0:1]) + return []string{status}, err +} + +func (p *Process) ForegroundWithContext(ctx context.Context) (bool, error) { + // see https://github.com/shirou/gopsutil/issues/596#issuecomment-432707831 for implementation details + pid := p.Pid + out, err := invoke.CommandWithContext(ctx, "ps", "-o", "stat=", "-p", strconv.Itoa(int(pid))) + if err != nil { + return false, err + } + return strings.IndexByte(string(out), '+') != -1, nil +} + +func (p *Process) UidsWithContext(_ context.Context) ([]uint32, error) { + k, err := p.getKProc() + if err != nil { + return nil, err + } + + // See: http://unix.superglobalmegacorp.com/Net2/newsrc/sys/ucred.h.html + userEffectiveUID := uint32(k.Eproc.Ucred.Uid) + + return []uint32{userEffectiveUID}, nil +} + +func (p *Process) GidsWithContext(_ context.Context) ([]uint32, error) { + k, err := p.getKProc() + if err != nil { + return nil, err + } + + gids := make([]uint32, 0, 3) + gids = append(gids, uint32(k.Eproc.Pcred.P_rgid), uint32(k.Eproc.Pcred.P_rgid), uint32(k.Eproc.Pcred.P_svgid)) + + return gids, nil +} + +func (*Process) GroupsWithContext(_ context.Context) ([]uint32, error) { + return nil, common.ErrNotImplementedError + // k, err := p.getKProc() + // if err != nil { + // return nil, err + // } + + // groups := make([]int32, k.Eproc.Ucred.Ngroups) + // for i := int16(0); i < k.Eproc.Ucred.Ngroups; i++ { + // groups[i] = int32(k.Eproc.Ucred.Groups[i]) + // } + + // return groups, nil +} + +func (*Process) TerminalWithContext(_ context.Context) (string, error) { + return "", common.ErrNotImplementedError + /* + k, err := p.getKProc() + if err != nil { + return "", err + } + + ttyNr := uint64(k.Eproc.Tdev) + termmap, err := getTerminalMap() + if err != nil { + return "", err + } + + return termmap[ttyNr], nil + */ +} + +func (p *Process) NiceWithContext(_ context.Context) (int32, error) { + k, err := p.getKProc() + if err != nil { + return 0, err + } + return int32(k.Proc.P_nice), nil +} + +func (*Process) IOCountersWithContext(_ context.Context) (*IOCountersStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) ChildrenWithContext(ctx context.Context) ([]*Process, error) { + procs, err := ProcessesWithContext(ctx) + if err != nil { + return nil, nil + } + ret := make([]*Process, 0, len(procs)) + for _, proc := range procs { + ppid, err := proc.PpidWithContext(ctx) + if err != nil { + continue + } + if ppid == p.Pid { + ret = append(ret, proc) + } + } + sort.Slice(ret, func(i, j int) bool { return ret[i].Pid < ret[j].Pid }) + return ret, nil +} + +func (p *Process) ConnectionsWithContext(ctx context.Context) ([]net.ConnectionStat, error) { + return net.ConnectionsPidWithContext(ctx, "all", p.Pid) +} + +func (p *Process) ConnectionsMaxWithContext(ctx context.Context, maxConn int) ([]net.ConnectionStat, error) { + return net.ConnectionsPidMaxWithContext(ctx, "all", p.Pid, maxConn) +} + +func ProcessesWithContext(ctx context.Context) ([]*Process, error) { + out := []*Process{} + + pids, err := PidsWithContext(ctx) + if err != nil { + return out, err + } + + for _, pid := range pids { + p, err := NewProcessWithContext(ctx, pid) + if err != nil { + continue + } + out = append(out, p) + } + + return out, nil +} + +// Returns a proc as defined here: +// http://unix.superglobalmegacorp.com/Net2/newsrc/sys/kinfo_proc.h.html +func (p *Process) getKProc() (*unix.KinfoProc, error) { + return unix.SysctlKinfoProc("kern.proc.pid", int(p.Pid)) +} + +// call ps command. +// Return value deletes Header line(you must not input wrong arg). +// And split by Space. Caller have responsibility to manage. +// If passed arg pid is 0, get information from all process. +func callPsWithContext(ctx context.Context, arg string, pid int32, threadOption, nameOption bool) ([][]string, error) { + var cmd []string + switch { + case pid == 0: // will get from all processes. + cmd = []string{"-ax", "-o", arg} + case threadOption: + cmd = []string{"-x", "-o", arg, "-M", "-p", strconv.Itoa(int(pid))} + default: + cmd = []string{"-x", "-o", arg, "-p", strconv.Itoa(int(pid))} + } + if nameOption { + cmd = append(cmd, "-c") + } + out, err := invoke.CommandWithContext(ctx, "ps", cmd...) + if err != nil { + return [][]string{}, err + } + lines := strings.Split(string(out), "\n") + + var ret [][]string + for _, l := range lines[1:] { + var lr []string + if nameOption { + lr = append(lr, l) + } else { + for _, r := range strings.Split(l, " ") { + if r == "" { + continue + } + lr = append(lr, strings.TrimSpace(r)) + } + } + if len(lr) != 0 { + ret = append(ret, lr) + } + } + + return ret, nil +} + +type dlFuncs struct { + lib *common.Library + + procPidPath common.ProcPidPathFunc + procPidInfo common.ProcPidInfoFunc + machTimeBaseInfo common.MachTimeBaseInfoFunc +} + +func loadProcFuncs() (*dlFuncs, error) { + lib, err := common.NewLibrary(common.System) + if err != nil { + return nil, err + } + + return &dlFuncs{ + lib: lib, + procPidPath: common.GetFunc[common.ProcPidPathFunc](lib, common.ProcPidPathSym), + procPidInfo: common.GetFunc[common.ProcPidInfoFunc](lib, common.ProcPidInfoSym), + machTimeBaseInfo: common.GetFunc[common.MachTimeBaseInfoFunc](lib, common.MachTimeBaseInfoSym), + }, nil +} + +func (f *dlFuncs) getTimeScaleToNanoSeconds() float64 { + var timeBaseInfo common.MachTimeBaseInfo + + f.machTimeBaseInfo(uintptr(unsafe.Pointer(&timeBaseInfo))) + + return float64(timeBaseInfo.Numer) / float64(timeBaseInfo.Denom) +} + +func (f *dlFuncs) Close() { + f.lib.Close() +} + +func (p *Process) ExeWithContext(_ context.Context) (string, error) { + funcs, err := loadProcFuncs() + if err != nil { + return "", err + } + defer funcs.Close() + + buf := common.NewCStr(common.PROC_PIDPATHINFO_MAXSIZE) + ret := funcs.procPidPath(p.Pid, buf.Addr(), common.PROC_PIDPATHINFO_MAXSIZE) + + if ret <= 0 { + return "", fmt.Errorf("unknown error: proc_pidpath returned %d", ret) + } + + return buf.GoString(), nil +} + +// sys/proc_info.h +type vnodePathInfo struct { + _ [152]byte + vipPath [common.MAXPATHLEN]byte + _ [1176]byte +} + +// CwdWithContext retrieves the Current Working Directory for the given process. +// It uses the proc_pidinfo from libproc and will only work for processes the +// EUID can access. Otherwise "operation not permitted" will be returned as the +// error. +// Note: This might also work for other *BSD OSs. +func (p *Process) CwdWithContext(_ context.Context) (string, error) { + funcs, err := loadProcFuncs() + if err != nil { + return "", err + } + defer funcs.Close() + + // Lock OS thread to ensure the errno does not change + runtime.LockOSThread() + defer runtime.UnlockOSThread() + + var vpi vnodePathInfo + const vpiSize = int32(unsafe.Sizeof(vpi)) + ret := funcs.procPidInfo(p.Pid, common.PROC_PIDVNODEPATHINFO, 0, uintptr(unsafe.Pointer(&vpi)), vpiSize) + errno, _ := funcs.lib.Dlsym("errno") + err = *(**unix.Errno)(unsafe.Pointer(&errno)) + if errors.Is(err, unix.EPERM) { + return "", ErrorNotPermitted + } + + if ret <= 0 { + return "", fmt.Errorf("unknown error: proc_pidinfo returned %d", ret) + } + + if ret != vpiSize { + return "", fmt.Errorf("too few bytes; expected %d, got %d", vpiSize, ret) + } + return common.GoString(&vpi.vipPath[0]), nil +} + +func procArgs(pid int32) ([]byte, int, error) { + procargs, _, err := common.CallSyscall([]int32{common.CTL_KERN, common.KERN_PROCARGS2, pid}) + if err != nil { + return nil, 0, err + } + + // The first 4 bytes indicate the number of arguments. + nargs := procargs[:4] + return procargs, int(binary.LittleEndian.Uint32(nargs)), nil +} + +func (p *Process) CmdlineSliceWithContext(_ context.Context) ([]string, error) { + return p.cmdlineSlice() +} + +func (p *Process) cmdlineSlice() ([]string, error) { + pargs, nargs, err := procArgs(p.Pid) + if err != nil { + return nil, err + } + // The first bytes hold the nargs int, skip it. + args := bytes.Split((pargs)[unsafe.Sizeof(int(0)):], []byte{0}) + var argStr string + // The first element is the actual binary/command path. + // command := args[0] + var argSlice []string + // var envSlice []string + // All other, non-zero elements are arguments. The first "nargs" elements + // are the arguments. Everything else in the slice is then the environment + // of the process. + for _, arg := range args[1:] { + argStr = string(arg) + if argStr != "" { + if nargs > 0 { + argSlice = append(argSlice, argStr) + nargs-- + continue + } + break + // envSlice = append(envSlice, argStr) + } + } + return argSlice, err +} + +// cmdNameWithContext returns the command name (including spaces) without any arguments +func (p *Process) cmdNameWithContext(_ context.Context) (string, error) { + r, err := p.cmdlineSlice() + if err != nil { + return "", err + } + + if len(r) == 0 { + return "", nil + } + + return r[0], err +} + +func (p *Process) CmdlineWithContext(ctx context.Context) (string, error) { + r, err := p.CmdlineSliceWithContext(ctx) + if err != nil { + return "", err + } + return strings.Join(r, " "), err +} + +func (p *Process) NumThreadsWithContext(_ context.Context) (int32, error) { + funcs, err := loadProcFuncs() + if err != nil { + return 0, err + } + defer funcs.Close() + + var ti ProcTaskInfo + funcs.procPidInfo(p.Pid, common.PROC_PIDTASKINFO, 0, uintptr(unsafe.Pointer(&ti)), int32(unsafe.Sizeof(ti))) + + return int32(ti.Threadnum), nil +} + +func (p *Process) TimesWithContext(_ context.Context) (*cpu.TimesStat, error) { + funcs, err := loadProcFuncs() + if err != nil { + return nil, err + } + defer funcs.Close() + + var ti ProcTaskInfo + funcs.procPidInfo(p.Pid, common.PROC_PIDTASKINFO, 0, uintptr(unsafe.Pointer(&ti)), int32(unsafe.Sizeof(ti))) + + timescaleToNanoSeconds := funcs.getTimeScaleToNanoSeconds() + ret := &cpu.TimesStat{ + CPU: "cpu", + User: float64(ti.Total_user) * timescaleToNanoSeconds / 1e9, + System: float64(ti.Total_system) * timescaleToNanoSeconds / 1e9, + } + return ret, nil +} + +func (p *Process) MemoryInfoWithContext(_ context.Context) (*MemoryInfoStat, error) { + funcs, err := loadProcFuncs() + if err != nil { + return nil, err + } + defer funcs.Close() + + var ti ProcTaskInfo + funcs.procPidInfo(p.Pid, common.PROC_PIDTASKINFO, 0, uintptr(unsafe.Pointer(&ti)), int32(unsafe.Sizeof(ti))) + + ret := &MemoryInfoStat{ + RSS: uint64(ti.Resident_size), + VMS: uint64(ti.Virtual_size), + Swap: uint64(ti.Pageins), + } + return ret, nil +} diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_darwin_amd64.go b/vendor/github.com/shirou/gopsutil/v4/process/process_darwin_amd64.go similarity index 87% rename from vendor/github.com/shirou/gopsutil/v3/process/process_darwin_amd64.go rename to vendor/github.com/shirou/gopsutil/v4/process/process_darwin_amd64.go index b353e5ea..890a5d53 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_darwin_amd64.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_darwin_amd64.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause // Created by cgo -godefs - DO NOT EDIT // cgo -godefs types_darwin.go @@ -211,6 +212,27 @@ type Posix_cred struct { type Label struct{} +type ProcTaskInfo struct { + Virtual_size uint64 + Resident_size uint64 + Total_user uint64 + Total_system uint64 + Threads_user uint64 + Threads_system uint64 + Policy int32 + Faults int32 + Pageins int32 + Cow_faults int32 + Messages_sent int32 + Messages_received int32 + Syscalls_mach int32 + Syscalls_unix int32 + Csw int32 + Threadnum int32 + Numrunning int32 + Priority int32 +} + type AuditinfoAddr struct { Auid uint32 Mask AuMask diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_darwin_arm64.go b/vendor/github.com/shirou/gopsutil/v4/process/process_darwin_arm64.go similarity index 85% rename from vendor/github.com/shirou/gopsutil/v3/process/process_darwin_arm64.go rename to vendor/github.com/shirou/gopsutil/v4/process/process_darwin_arm64.go index cbd6bdc7..8075cf22 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_darwin_arm64.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_darwin_arm64.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build darwin && arm64 -// +build darwin,arm64 // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs process/types_darwin.go @@ -190,6 +190,27 @@ type Posix_cred struct{} type Label struct{} +type ProcTaskInfo struct { + Virtual_size uint64 + Resident_size uint64 + Total_user uint64 + Total_system uint64 + Threads_user uint64 + Threads_system uint64 + Policy int32 + Faults int32 + Pageins int32 + Cow_faults int32 + Messages_sent int32 + Messages_received int32 + Syscalls_mach int32 + Syscalls_unix int32 + Csw int32 + Threadnum int32 + Numrunning int32 + Priority int32 +} + type AuditinfoAddr struct { Auid uint32 Mask AuMask diff --git a/vendor/github.com/shirou/gopsutil/v4/process/process_fallback.go b/vendor/github.com/shirou/gopsutil/v4/process/process_fallback.go new file mode 100644 index 00000000..699311a9 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_fallback.go @@ -0,0 +1,203 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build !darwin && !linux && !freebsd && !openbsd && !windows && !solaris && !plan9 + +package process + +import ( + "context" + "syscall" + + "github.com/shirou/gopsutil/v4/cpu" + "github.com/shirou/gopsutil/v4/internal/common" + "github.com/shirou/gopsutil/v4/net" +) + +type Signal = syscall.Signal + +type MemoryMapsStat struct { + Path string `json:"path"` + Rss uint64 `json:"rss"` + Size uint64 `json:"size"` + Pss uint64 `json:"pss"` + SharedClean uint64 `json:"sharedClean"` + SharedDirty uint64 `json:"sharedDirty"` + PrivateClean uint64 `json:"privateClean"` + PrivateDirty uint64 `json:"privateDirty"` + Referenced uint64 `json:"referenced"` + Anonymous uint64 `json:"anonymous"` + Swap uint64 `json:"swap"` +} + +type MemoryInfoExStat struct{} + +func pidsWithContext(_ context.Context) ([]int32, error) { + return nil, common.ErrNotImplementedError +} + +func ProcessesWithContext(_ context.Context) ([]*Process, error) { + return nil, common.ErrNotImplementedError +} + +func PidExistsWithContext(_ context.Context, _ int32) (bool, error) { + return false, common.ErrNotImplementedError +} + +func (*Process) PpidWithContext(_ context.Context) (int32, error) { + return 0, common.ErrNotImplementedError +} + +func (*Process) NameWithContext(_ context.Context) (string, error) { + return "", common.ErrNotImplementedError +} + +func (*Process) TgidWithContext(_ context.Context) (int32, error) { + return 0, common.ErrNotImplementedError +} + +func (*Process) ExeWithContext(_ context.Context) (string, error) { + return "", common.ErrNotImplementedError +} + +func (*Process) CmdlineWithContext(_ context.Context) (string, error) { + return "", common.ErrNotImplementedError +} + +func (*Process) CmdlineSliceWithContext(_ context.Context) ([]string, error) { + return nil, common.ErrNotImplementedError +} + +func (*Process) createTimeWithContext(_ context.Context) (int64, error) { + return 0, common.ErrNotImplementedError +} + +func (*Process) CwdWithContext(_ context.Context) (string, error) { + return "", common.ErrNotImplementedError +} + +func (*Process) StatusWithContext(_ context.Context) ([]string, error) { + return []string{""}, common.ErrNotImplementedError +} + +func (*Process) ForegroundWithContext(_ context.Context) (bool, error) { + return false, common.ErrNotImplementedError +} + +func (*Process) UidsWithContext(_ context.Context) ([]uint32, error) { + return nil, common.ErrNotImplementedError +} + +func (*Process) GidsWithContext(_ context.Context) ([]uint32, error) { + return nil, common.ErrNotImplementedError +} + +func (*Process) GroupsWithContext(_ context.Context) ([]uint32, error) { + return nil, common.ErrNotImplementedError +} + +func (*Process) TerminalWithContext(_ context.Context) (string, error) { + return "", common.ErrNotImplementedError +} + +func (*Process) NiceWithContext(_ context.Context) (int32, error) { + return 0, common.ErrNotImplementedError +} + +func (*Process) IOniceWithContext(_ context.Context) (int32, error) { + return 0, common.ErrNotImplementedError +} + +func (*Process) RlimitWithContext(_ context.Context) ([]RlimitStat, error) { + return nil, common.ErrNotImplementedError +} + +func (*Process) RlimitUsageWithContext(_ context.Context, _ bool) ([]RlimitStat, error) { + return nil, common.ErrNotImplementedError +} + +func (*Process) IOCountersWithContext(_ context.Context) (*IOCountersStat, error) { + return nil, common.ErrNotImplementedError +} + +func (*Process) NumCtxSwitchesWithContext(_ context.Context) (*NumCtxSwitchesStat, error) { + return nil, common.ErrNotImplementedError +} + +func (*Process) NumFDsWithContext(_ context.Context) (int32, error) { + return 0, common.ErrNotImplementedError +} + +func (*Process) NumThreadsWithContext(_ context.Context) (int32, error) { + return 0, common.ErrNotImplementedError +} + +func (*Process) ThreadsWithContext(_ context.Context) (map[int32]*cpu.TimesStat, error) { + return nil, common.ErrNotImplementedError +} + +func (*Process) TimesWithContext(_ context.Context) (*cpu.TimesStat, error) { + return nil, common.ErrNotImplementedError +} + +func (*Process) CPUAffinityWithContext(_ context.Context) ([]int32, error) { + return nil, common.ErrNotImplementedError +} + +func (*Process) MemoryInfoWithContext(_ context.Context) (*MemoryInfoStat, error) { + return nil, common.ErrNotImplementedError +} + +func (*Process) MemoryInfoExWithContext(_ context.Context) (*MemoryInfoExStat, error) { + return nil, common.ErrNotImplementedError +} + +func (*Process) PageFaultsWithContext(_ context.Context) (*PageFaultsStat, error) { + return nil, common.ErrNotImplementedError +} + +func (*Process) ChildrenWithContext(_ context.Context) ([]*Process, error) { + return nil, common.ErrNotImplementedError +} + +func (*Process) OpenFilesWithContext(_ context.Context) ([]OpenFilesStat, error) { + return nil, common.ErrNotImplementedError +} + +func (*Process) ConnectionsWithContext(_ context.Context) ([]net.ConnectionStat, error) { + return nil, common.ErrNotImplementedError +} + +func (*Process) ConnectionsMaxWithContext(_ context.Context, _ int) ([]net.ConnectionStat, error) { + return nil, common.ErrNotImplementedError +} + +func (*Process) MemoryMapsWithContext(_ context.Context, _ bool) (*[]MemoryMapsStat, error) { + return nil, common.ErrNotImplementedError +} + +func (*Process) SendSignalWithContext(_ context.Context, _ Signal) error { + return common.ErrNotImplementedError +} + +func (*Process) SuspendWithContext(_ context.Context) error { + return common.ErrNotImplementedError +} + +func (*Process) ResumeWithContext(_ context.Context) error { + return common.ErrNotImplementedError +} + +func (*Process) TerminateWithContext(_ context.Context) error { + return common.ErrNotImplementedError +} + +func (*Process) KillWithContext(_ context.Context) error { + return common.ErrNotImplementedError +} + +func (*Process) UsernameWithContext(_ context.Context) (string, error) { + return "", common.ErrNotImplementedError +} + +func (*Process) EnvironWithContext(_ context.Context) ([]string, error) { + return nil, common.ErrNotImplementedError +} diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_freebsd.go b/vendor/github.com/shirou/gopsutil/v4/process/process_freebsd.go similarity index 66% rename from vendor/github.com/shirou/gopsutil/v3/process/process_freebsd.go rename to vendor/github.com/shirou/gopsutil/v4/process/process_freebsd.go index 40b10e14..ae173ff1 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_freebsd.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_freebsd.go @@ -1,19 +1,23 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build freebsd -// +build freebsd package process import ( "bytes" "context" + "encoding/binary" + "errors" "path/filepath" + "sort" "strconv" "strings" - cpu "github.com/shirou/gopsutil/v3/cpu" - "github.com/shirou/gopsutil/v3/internal/common" - net "github.com/shirou/gopsutil/v3/net" "golang.org/x/sys/unix" + + "github.com/shirou/gopsutil/v4/cpu" + "github.com/shirou/gopsutil/v4/internal/common" + "github.com/shirou/gopsutil/v4/net" ) func pidsWithContext(ctx context.Context) ([]int32, error) { @@ -30,7 +34,7 @@ func pidsWithContext(ctx context.Context) ([]int32, error) { return ret, nil } -func (p *Process) PpidWithContext(ctx context.Context) (int32, error) { +func (p *Process) PpidWithContext(_ context.Context) (int32, error) { k, err := p.getKProc() if err != nil { return 0, err @@ -62,11 +66,28 @@ func (p *Process) NameWithContext(ctx context.Context) (string, error) { return name, nil } -func (p *Process) CwdWithContext(ctx context.Context) (string, error) { - return "", common.ErrNotImplementedError +func (p *Process) CwdWithContext(_ context.Context) (string, error) { + mib := []int32{CTLKern, KernProc, KernProcCwd, p.Pid} + buf, length, err := common.CallSyscall(mib) + if err != nil { + return "", err + } + + if length != sizeOfKinfoFile { + return "", errors.New("unexpected size of KinfoFile") + } + + var k kinfoFile + br := bytes.NewReader(buf) + if err := binary.Read(br, binary.LittleEndian, &k); err != nil { + return "", err + } + cwd := common.IntToString(k.Path[:]) + + return cwd, nil } -func (p *Process) ExeWithContext(ctx context.Context) (string, error) { +func (p *Process) ExeWithContext(_ context.Context) (string, error) { mib := []int32{CTLKern, KernProc, KernProcPathname, p.Pid} buf, _, err := common.CallSyscall(mib) if err != nil { @@ -76,23 +97,20 @@ func (p *Process) ExeWithContext(ctx context.Context) (string, error) { return strings.Trim(string(buf), "\x00"), nil } -func (p *Process) CmdlineWithContext(ctx context.Context) (string, error) { +func (p *Process) CmdlineWithContext(_ context.Context) (string, error) { mib := []int32{CTLKern, KernProc, KernProcArgs, p.Pid} buf, _, err := common.CallSyscall(mib) if err != nil { return "", err } ret := strings.FieldsFunc(string(buf), func(r rune) bool { - if r == '\u0000' { - return true - } - return false + return r == '\u0000' }) return strings.Join(ret, " "), nil } -func (p *Process) CmdlineSliceWithContext(ctx context.Context) ([]string, error) { +func (p *Process) CmdlineSliceWithContext(_ context.Context) ([]string, error) { mib := []int32{CTLKern, KernProc, KernProcArgs, p.Pid} buf, _, err := common.CallSyscall(mib) if err != nil { @@ -113,7 +131,7 @@ func (p *Process) CmdlineSliceWithContext(ctx context.Context) ([]string, error) return strParts, nil } -func (p *Process) createTimeWithContext(ctx context.Context) (int64, error) { +func (p *Process) createTimeWithContext(_ context.Context) (int64, error) { k, err := p.getKProc() if err != nil { return 0, err @@ -121,7 +139,7 @@ func (p *Process) createTimeWithContext(ctx context.Context) (int64, error) { return int64(k.Start.Sec)*1000 + int64(k.Start.Usec)/1000, nil } -func (p *Process) StatusWithContext(ctx context.Context) ([]string, error) { +func (p *Process) StatusWithContext(_ context.Context) ([]string, error) { k, err := p.getKProc() if err != nil { return []string{""}, err @@ -157,46 +175,46 @@ func (p *Process) ForegroundWithContext(ctx context.Context) (bool, error) { return strings.IndexByte(string(out), '+') != -1, nil } -func (p *Process) UidsWithContext(ctx context.Context) ([]int32, error) { +func (p *Process) UidsWithContext(_ context.Context) ([]uint32, error) { k, err := p.getKProc() if err != nil { return nil, err } - uids := make([]int32, 0, 3) + uids := make([]uint32, 0, 3) - uids = append(uids, int32(k.Ruid), int32(k.Uid), int32(k.Svuid)) + uids = append(uids, uint32(k.Ruid), uint32(k.Uid), uint32(k.Svuid)) return uids, nil } -func (p *Process) GidsWithContext(ctx context.Context) ([]int32, error) { +func (p *Process) GidsWithContext(_ context.Context) ([]uint32, error) { k, err := p.getKProc() if err != nil { return nil, err } - gids := make([]int32, 0, 3) - gids = append(gids, int32(k.Rgid), int32(k.Ngroups), int32(k.Svgid)) + gids := make([]uint32, 0, 3) + gids = append(gids, uint32(k.Rgid), uint32(k.Ngroups), uint32(k.Svgid)) return gids, nil } -func (p *Process) GroupsWithContext(ctx context.Context) ([]int32, error) { +func (p *Process) GroupsWithContext(_ context.Context) ([]uint32, error) { k, err := p.getKProc() if err != nil { return nil, err } - groups := make([]int32, k.Ngroups) + groups := make([]uint32, k.Ngroups) for i := int16(0); i < k.Ngroups; i++ { - groups[i] = int32(k.Groups[i]) + groups[i] = uint32(k.Groups[i]) } return groups, nil } -func (p *Process) TerminalWithContext(ctx context.Context) (string, error) { +func (p *Process) TerminalWithContext(_ context.Context) (string, error) { k, err := p.getKProc() if err != nil { return "", err @@ -212,7 +230,7 @@ func (p *Process) TerminalWithContext(ctx context.Context) (string, error) { return termmap[ttyNr], nil } -func (p *Process) NiceWithContext(ctx context.Context) (int32, error) { +func (p *Process) NiceWithContext(_ context.Context) (int32, error) { k, err := p.getKProc() if err != nil { return 0, err @@ -220,7 +238,7 @@ func (p *Process) NiceWithContext(ctx context.Context) (int32, error) { return int32(k.Nice), nil } -func (p *Process) IOCountersWithContext(ctx context.Context) (*IOCountersStat, error) { +func (p *Process) IOCountersWithContext(_ context.Context) (*IOCountersStat, error) { k, err := p.getKProc() if err != nil { return nil, err @@ -231,7 +249,7 @@ func (p *Process) IOCountersWithContext(ctx context.Context) (*IOCountersStat, e }, nil } -func (p *Process) NumThreadsWithContext(ctx context.Context) (int32, error) { +func (p *Process) NumThreadsWithContext(_ context.Context) (int32, error) { k, err := p.getKProc() if err != nil { return 0, err @@ -240,7 +258,7 @@ func (p *Process) NumThreadsWithContext(ctx context.Context) (int32, error) { return k.Numthreads, nil } -func (p *Process) TimesWithContext(ctx context.Context) (*cpu.TimesStat, error) { +func (p *Process) TimesWithContext(_ context.Context) (*cpu.TimesStat, error) { k, err := p.getKProc() if err != nil { return nil, err @@ -252,7 +270,7 @@ func (p *Process) TimesWithContext(ctx context.Context) (*cpu.TimesStat, error) }, nil } -func (p *Process) MemoryInfoWithContext(ctx context.Context) (*MemoryInfoStat, error) { +func (p *Process) MemoryInfoWithContext(_ context.Context) (*MemoryInfoStat, error) { k, err := p.getKProc() if err != nil { return nil, err @@ -261,7 +279,7 @@ func (p *Process) MemoryInfoWithContext(ctx context.Context) (*MemoryInfoStat, e if err != nil { return nil, err } - pageSize := common.LittleEndian.Uint16([]byte(v)) + pageSize := binary.LittleEndian.Uint16([]byte(v)) return &MemoryInfoStat{ RSS: uint64(k.Rssize) * uint64(pageSize), @@ -270,18 +288,21 @@ func (p *Process) MemoryInfoWithContext(ctx context.Context) (*MemoryInfoStat, e } func (p *Process) ChildrenWithContext(ctx context.Context) ([]*Process, error) { - pids, err := common.CallPgrepWithContext(ctx, invoke, p.Pid) + procs, err := ProcessesWithContext(ctx) if err != nil { - return nil, err + return nil, nil } - ret := make([]*Process, 0, len(pids)) - for _, pid := range pids { - np, err := NewProcessWithContext(ctx, pid) + ret := make([]*Process, 0, len(procs)) + for _, proc := range procs { + ppid, err := proc.PpidWithContext(ctx) if err != nil { - return nil, err + continue + } + if ppid == p.Pid { + ret = append(ret, proc) } - ret = append(ret, np) } + sort.Slice(ret, func(i, j int) bool { return ret[i].Pid < ret[j].Pid }) return ret, nil } @@ -289,8 +310,8 @@ func (p *Process) ConnectionsWithContext(ctx context.Context) ([]net.ConnectionS return net.ConnectionsPidWithContext(ctx, "all", p.Pid) } -func (p *Process) ConnectionsMaxWithContext(ctx context.Context, max int) ([]net.ConnectionStat, error) { - return net.ConnectionsPidMaxWithContext(ctx, "all", p.Pid, max) +func (p *Process) ConnectionsMaxWithContext(ctx context.Context, maxConn int) ([]net.ConnectionStat, error) { + return net.ConnectionsPidMaxWithContext(ctx, "all", p.Pid, maxConn) } func ProcessesWithContext(ctx context.Context) ([]*Process, error) { @@ -331,7 +352,7 @@ func (p *Process) getKProc() (*KinfoProc, error) { return nil, err } if length != sizeOfKinfoProc { - return nil, err + return nil, errors.New("unexpected size of KinfoProc") } k, err := parseKinfoProc(buf) diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_freebsd_386.go b/vendor/github.com/shirou/gopsutil/v4/process/process_freebsd_386.go similarity index 83% rename from vendor/github.com/shirou/gopsutil/v3/process/process_freebsd_386.go rename to vendor/github.com/shirou/gopsutil/v4/process/process_freebsd_386.go index 08ab333b..0193ba25 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_freebsd_386.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_freebsd_386.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause // Created by cgo -godefs - DO NOT EDIT // cgo -godefs types_freebsd.go @@ -10,6 +11,7 @@ const ( KernProcProc = 8 KernProcPathname = 12 KernProcArgs = 7 + KernProcCwd = 42 ) const ( @@ -23,6 +25,7 @@ const ( const ( sizeOfKinfoVmentry = 0x488 sizeOfKinfoProc = 0x300 + sizeOfKinfoFile = 0x570 // TODO: should be changed by running on the target machine ) const ( @@ -190,3 +193,26 @@ type KinfoVmentry struct { X_kve_ispare [12]int32 Path [1024]int8 } + +// TODO: should be changed by running on the target machine +type kinfoFile struct { + Structsize int32 + Type int32 + Fd int32 + Ref_count int32 + Flags int32 + Pad0 int32 + Offset int64 + Anon0 [304]byte + Status uint16 + Pad1 uint16 + X_kf_ispare0 int32 + Cap_rights capRights + X_kf_cap_spare uint64 + Path [1024]int8 // changed from uint8 by hand +} + +// TODO: should be changed by running on the target machine +type capRights struct { + Rights [2]uint64 +} diff --git a/vendor/github.com/shirou/gopsutil/v4/process/process_freebsd_amd64.go b/vendor/github.com/shirou/gopsutil/v4/process/process_freebsd_amd64.go new file mode 100644 index 00000000..67970f64 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_freebsd_amd64.go @@ -0,0 +1,224 @@ +// SPDX-License-Identifier: BSD-3-Clause +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs types_freebsd.go + +package process + +const ( + CTLKern = 1 + KernProc = 14 + KernProcPID = 1 + KernProcProc = 8 + KernProcPathname = 12 + KernProcArgs = 7 + KernProcCwd = 42 +) + +const ( + sizeofPtr = 0x8 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x8 + sizeofLongLong = 0x8 +) + +const ( + sizeOfKinfoVmentry = 0x488 + sizeOfKinfoProc = 0x440 + sizeOfKinfoFile = 0x570 +) + +const ( + SIDL = 1 + SRUN = 2 + SSLEEP = 3 + SSTOP = 4 + SZOMB = 5 + SWAIT = 6 + SLOCK = 7 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int64 + _C_long_long int64 +) + +type Timespec struct { + Sec int64 + Nsec int64 +} + +type Timeval struct { + Sec int64 + Usec int64 +} + +type Rusage struct { + Utime Timeval + Stime Timeval + Maxrss int64 + Ixrss int64 + Idrss int64 + Isrss int64 + Minflt int64 + Majflt int64 + Nswap int64 + Inblock int64 + Oublock int64 + Msgsnd int64 + Msgrcv int64 + Nsignals int64 + Nvcsw int64 + Nivcsw int64 +} + +type Rlimit struct { + Cur int64 + Max int64 +} + +type KinfoProc struct { + Structsize int32 + Layout int32 + Args int64 /* pargs */ + Paddr int64 /* proc */ + Addr int64 /* user */ + Tracep int64 /* vnode */ + Textvp int64 /* vnode */ + Fd int64 /* filedesc */ + Vmspace int64 /* vmspace */ + Wchan int64 + Pid int32 + Ppid int32 + Pgid int32 + Tpgid int32 + Sid int32 + Tsid int32 + Jobc int16 + Spare_short1 int16 + Tdev_freebsd11 uint32 + Siglist [16]byte /* sigset */ + Sigmask [16]byte /* sigset */ + Sigignore [16]byte /* sigset */ + Sigcatch [16]byte /* sigset */ + Uid uint32 + Ruid uint32 + Svuid uint32 + Rgid uint32 + Svgid uint32 + Ngroups int16 + Spare_short2 int16 + Groups [16]uint32 + Size uint64 + Rssize int64 + Swrss int64 + Tsize int64 + Dsize int64 + Ssize int64 + Xstat uint16 + Acflag uint16 + Pctcpu uint32 + Estcpu uint32 + Slptime uint32 + Swtime uint32 + Cow uint32 + Runtime uint64 + Start Timeval + Childtime Timeval + Flag int64 + Kiflag int64 + Traceflag int32 + Stat int8 + Nice int8 + Lock int8 + Rqindex int8 + Oncpu_old uint8 + Lastcpu_old uint8 + Tdname [17]int8 + Wmesg [9]int8 + Login [18]int8 + Lockname [9]int8 + Comm [20]int8 + Emul [17]int8 + Loginclass [18]int8 + Moretdname [4]int8 + Sparestrings [46]int8 + Spareints [2]int32 + Tdev uint64 + Oncpu int32 + Lastcpu int32 + Tracer int32 + Flag2 int32 + Fibnum int32 + Cr_flags uint32 + Jid int32 + Numthreads int32 + Tid int32 + Pri Priority + Rusage Rusage + Rusage_ch Rusage + Pcb int64 /* pcb */ + Kstack int64 + Udata int64 + Tdaddr int64 /* thread */ + Pd int64 /* pwddesc, not accurate */ + Spareptrs [5]int64 + Sparelongs [12]int64 + Sflag int64 + Tdflags int64 +} + +type Priority struct { + Class uint8 + Level uint8 + Native uint8 + User uint8 +} + +type KinfoVmentry struct { + Structsize int32 + Type int32 + Start uint64 + End uint64 + Offset uint64 + Vn_fileid uint64 + Vn_fsid_freebsd11 uint32 + Flags int32 + Resident int32 + Private_resident int32 + Protection int32 + Ref_count int32 + Shadow_count int32 + Vn_type int32 + Vn_size uint64 + Vn_rdev_freebsd11 uint32 + Vn_mode uint16 + Status uint16 + Type_spec [8]byte + Vn_rdev uint64 + X_kve_ispare [8]int32 + Path [1024]int8 +} + +type kinfoFile struct { + Structsize int32 + Type int32 + Fd int32 + Ref_count int32 + Flags int32 + Pad0 int32 + Offset int64 + Anon0 [304]byte + Status uint16 + Pad1 uint16 + X_kf_ispare0 int32 + Cap_rights capRights + X_kf_cap_spare uint64 + Path [1024]int8 +} + +type capRights struct { + Rights [2]uint64 +} diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_freebsd_arm.go b/vendor/github.com/shirou/gopsutil/v4/process/process_freebsd_arm.go similarity index 83% rename from vendor/github.com/shirou/gopsutil/v3/process/process_freebsd_arm.go rename to vendor/github.com/shirou/gopsutil/v4/process/process_freebsd_arm.go index 81ae0b9a..6c4fbf69 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_freebsd_arm.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_freebsd_arm.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause // Created by cgo -godefs - DO NOT EDIT // cgo -godefs types_freebsd.go @@ -10,6 +11,7 @@ const ( KernProcProc = 8 KernProcPathname = 12 KernProcArgs = 7 + KernProcCwd = 42 ) const ( @@ -23,6 +25,7 @@ const ( const ( sizeOfKinfoVmentry = 0x488 sizeOfKinfoProc = 0x440 + sizeOfKinfoFile = 0x570 // TODO: should be changed by running on the target machine ) const ( @@ -190,3 +193,26 @@ type KinfoVmentry struct { X_kve_ispare [12]int32 Path [1024]int8 } + +// TODO: should be changed by running on the target machine +type kinfoFile struct { + Structsize int32 + Type int32 + Fd int32 + Ref_count int32 + Flags int32 + Pad0 int32 + Offset int64 + Anon0 [304]byte + Status uint16 + Pad1 uint16 + X_kf_ispare0 int32 + Cap_rights capRights + X_kf_cap_spare uint64 + Path [1024]int8 // changed from uint8 by hand +} + +// TODO: should be changed by running on the target machine +type capRights struct { + Rights [2]uint64 +} diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_freebsd_arm64.go b/vendor/github.com/shirou/gopsutil/v4/process/process_freebsd_arm64.go similarity index 88% rename from vendor/github.com/shirou/gopsutil/v3/process/process_freebsd_arm64.go rename to vendor/github.com/shirou/gopsutil/v4/process/process_freebsd_arm64.go index 73ac0820..dabdc3e3 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_freebsd_arm64.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_freebsd_arm64.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build freebsd && arm64 // Code generated by cmd/cgo -godefs; DO NOT EDIT. @@ -12,6 +13,7 @@ const ( KernProcProc = 8 KernProcPathname = 12 KernProcArgs = 7 + KernProcCwd = 42 ) const ( @@ -25,6 +27,7 @@ const ( const ( sizeOfKinfoVmentry = 0x488 sizeOfKinfoProc = 0x440 + sizeOfKinfoFile = 0x570 ) const ( @@ -200,3 +203,24 @@ type KinfoVmentry struct { X_kve_ispare [8]int32 Path [1024]uint8 } + +type kinfoFile struct { + Structsize int32 + Type int32 + Fd int32 + Ref_count int32 + Flags int32 + Pad0 int32 + Offset int64 + Anon0 [304]byte + Status uint16 + Pad1 uint16 + X_kf_ispare0 int32 + Cap_rights capRights + X_kf_cap_spare uint64 + Path [1024]int8 // changed from uint8 by hand +} + +type capRights struct { + Rights [2]uint64 +} diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_linux.go b/vendor/github.com/shirou/gopsutil/v4/process/process_linux.go similarity index 91% rename from vendor/github.com/shirou/gopsutil/v3/process/process_linux.go rename to vendor/github.com/shirou/gopsutil/v4/process/process_linux.go index 557435b3..499d54ac 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_linux.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_linux.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build linux -// +build linux package process @@ -12,15 +12,16 @@ import ( "math" "os" "path/filepath" + "sort" "strconv" "strings" "github.com/tklauser/go-sysconf" "golang.org/x/sys/unix" - "github.com/shirou/gopsutil/v3/cpu" - "github.com/shirou/gopsutil/v3/internal/common" - "github.com/shirou/gopsutil/v3/net" + "github.com/shirou/gopsutil/v4/cpu" + "github.com/shirou/gopsutil/v4/internal/common" + "github.com/shirou/gopsutil/v4/net" ) var pageSize = uint64(os.Getpagesize()) @@ -148,26 +149,26 @@ func (p *Process) ForegroundWithContext(ctx context.Context) (bool, error) { return pgid == tpgid, nil } -func (p *Process) UidsWithContext(ctx context.Context) ([]int32, error) { +func (p *Process) UidsWithContext(ctx context.Context) ([]uint32, error) { err := p.fillFromStatusWithContext(ctx) if err != nil { - return []int32{}, err + return []uint32{}, err } return p.uids, nil } -func (p *Process) GidsWithContext(ctx context.Context) ([]int32, error) { +func (p *Process) GidsWithContext(ctx context.Context) ([]uint32, error) { err := p.fillFromStatusWithContext(ctx) if err != nil { - return []int32{}, err + return []uint32{}, err } return p.gids, nil } -func (p *Process) GroupsWithContext(ctx context.Context) ([]int32, error) { +func (p *Process) GroupsWithContext(ctx context.Context) ([]uint32, error) { err := p.fillFromStatusWithContext(ctx) if err != nil { - return []int32{}, err + return []uint32{}, err } return p.groups, nil } @@ -193,7 +194,7 @@ func (p *Process) NiceWithContext(ctx context.Context) (int32, error) { return nice, nil } -func (p *Process) IOniceWithContext(ctx context.Context) (int32, error) { +func (*Process) IOniceWithContext(_ context.Context) (int32, error) { return 0, common.ErrNotImplementedError } @@ -309,7 +310,7 @@ func (p *Process) TimesWithContext(ctx context.Context) (*cpu.TimesStat, error) return cpuTimes, nil } -func (p *Process) CPUAffinityWithContext(ctx context.Context) ([]int32, error) { +func (*Process) CPUAffinityWithContext(_ context.Context) ([]int32, error) { return nil, common.ErrNotImplementedError } @@ -338,43 +339,48 @@ func (p *Process) PageFaultsWithContext(ctx context.Context) (*PageFaultsStat, e } func (p *Process) ChildrenWithContext(ctx context.Context) ([]*Process, error) { - pids, err := common.CallPgrepWithContext(ctx, invoke, p.Pid) + statFiles, err := filepath.Glob(common.HostProcWithContext(ctx, "[0-9]*/stat")) if err != nil { return nil, err } - if len(pids) == 0 { - return nil, ErrorNoChildren - } - ret := make([]*Process, 0, len(pids)) - for _, pid := range pids { - np, err := NewProcessWithContext(ctx, pid) + ret := make([]*Process, 0, len(statFiles)) + for _, statFile := range statFiles { + statContents, err := os.ReadFile(statFile) if err != nil { - return nil, err + continue + } + fields := splitProcStat(statContents) + pid, err := strconv.ParseInt(fields[1], 10, 32) + if err != nil { + continue + } + ppid, err := strconv.ParseInt(fields[4], 10, 32) + if err != nil { + continue + } + if ppid == int64(p.Pid) { + np, err := NewProcessWithContext(ctx, int32(pid)) + if err != nil { + continue + } + ret = append(ret, np) } - ret = append(ret, np) } + sort.Slice(ret, func(i, j int) bool { return ret[i].Pid < ret[j].Pid }) return ret, nil } func (p *Process) OpenFilesWithContext(ctx context.Context) ([]OpenFilesStat, error) { _, ofs, err := p.fillFromfdWithContext(ctx) - if err != nil { - return nil, err - } - ret := make([]OpenFilesStat, len(ofs)) - for i, o := range ofs { - ret[i] = *o - } - - return ret, nil + return ofs, err } func (p *Process) ConnectionsWithContext(ctx context.Context) ([]net.ConnectionStat, error) { return net.ConnectionsPidWithContext(ctx, "all", p.Pid) } -func (p *Process) ConnectionsMaxWithContext(ctx context.Context, max int) ([]net.ConnectionStat, error) { - return net.ConnectionsPidMaxWithContext(ctx, "all", p.Pid, max) +func (p *Process) ConnectionsMaxWithContext(ctx context.Context, maxConn int) ([]net.ConnectionStat, error) { + return net.ConnectionsPidMaxWithContext(ctx, "all", p.Pid, maxConn) } func (p *Process) MemoryMapsWithContext(ctx context.Context, grouped bool) (*[]MemoryMapsStat, error) { @@ -399,7 +405,9 @@ func (p *Process) MemoryMapsWithContext(ctx context.Context, grouped bool) (*[]M // function of parsing a block getBlock := func(firstLine []string, block []string) (MemoryMapsStat, error) { m := MemoryMapsStat{} - m.Path = firstLine[len(firstLine)-1] + if len(firstLine) >= 6 { + m.Path = strings.Join(firstLine[5:], " ") + } for _, line := range block { if strings.Contains(line, "VmFlags") { @@ -613,17 +621,17 @@ func (p *Process) fillFromfdListWithContext(ctx context.Context) (string, []stri } // Get num_fds from /proc/(pid)/fd -func (p *Process) fillFromfdWithContext(ctx context.Context) (int32, []*OpenFilesStat, error) { +func (p *Process) fillFromfdWithContext(ctx context.Context) (int32, []OpenFilesStat, error) { statPath, fnames, err := p.fillFromfdListWithContext(ctx) if err != nil { return 0, nil, err } numFDs := int32(len(fnames)) - var openfiles []*OpenFilesStat + openfiles := make([]OpenFilesStat, 0, numFDs) for _, fd := range fnames { fpath := filepath.Join(statPath, fd) - filepath, err := os.Readlink(fpath) + path, err := common.Readlink(fpath) if err != nil { continue } @@ -631,8 +639,8 @@ func (p *Process) fillFromfdWithContext(ctx context.Context) (int32, []*OpenFile if err != nil { return numFDs, openfiles, err } - o := &OpenFilesStat{ - Path: filepath, + o := OpenFilesStat{ + Path: path, Fd: t, } openfiles = append(openfiles, o) @@ -727,8 +735,12 @@ func (p *Process) fillFromIOWithContext(ctx context.Context) (*IOCountersStat, e case "syscw": ret.WriteCount = t case "read_bytes": - ret.ReadBytes = t + ret.DiskReadBytes = t case "write_bytes": + ret.DiskWriteBytes = t + case "rchar": + ret.ReadBytes = t + case "wchar": ret.WriteBytes = t } } @@ -866,32 +878,32 @@ func (p *Process) fillFromStatusWithContext(ctx context.Context) error { } p.tgid = int32(pval) case "Uid": - p.uids = make([]int32, 0, 4) + p.uids = make([]uint32, 0, 4) for _, i := range strings.Split(value, "\t") { - v, err := strconv.ParseInt(i, 10, 32) + v, err := strconv.ParseUint(i, 10, 32) if err != nil { return err } - p.uids = append(p.uids, int32(v)) + p.uids = append(p.uids, uint32(v)) } case "Gid": - p.gids = make([]int32, 0, 4) + p.gids = make([]uint32, 0, 4) for _, i := range strings.Split(value, "\t") { - v, err := strconv.ParseInt(i, 10, 32) + v, err := strconv.ParseUint(i, 10, 32) if err != nil { return err } - p.gids = append(p.gids, int32(v)) + p.gids = append(p.gids, uint32(v)) } case "Groups": groups := strings.Fields(value) - p.groups = make([]int32, 0, len(groups)) + p.groups = make([]uint32, 0, len(groups)) for _, i := range groups { - v, err := strconv.ParseInt(i, 10, 32) + v, err := strconv.ParseUint(i, 10, 32) if err != nil { return err } - p.groups = append(p.groups, int32(v)) + p.groups = append(p.groups, uint32(v)) } case "Threads": v, err := strconv.ParseInt(value, 10, 32) @@ -912,49 +924,49 @@ func (p *Process) fillFromStatusWithContext(ctx context.Context) error { } p.numCtxSwitches.Involuntary = v case "VmRSS": - value := strings.Trim(value, " kB") // remove last "kB" + value = strings.Trim(value, " kB") // remove last "kB" v, err := strconv.ParseUint(value, 10, 64) if err != nil { return err } p.memInfo.RSS = v * 1024 case "VmSize": - value := strings.Trim(value, " kB") // remove last "kB" + value = strings.Trim(value, " kB") // remove last "kB" v, err := strconv.ParseUint(value, 10, 64) if err != nil { return err } p.memInfo.VMS = v * 1024 case "VmSwap": - value := strings.Trim(value, " kB") // remove last "kB" + value = strings.Trim(value, " kB") // remove last "kB" v, err := strconv.ParseUint(value, 10, 64) if err != nil { return err } p.memInfo.Swap = v * 1024 case "VmHWM": - value := strings.Trim(value, " kB") // remove last "kB" + value = strings.Trim(value, " kB") // remove last "kB" v, err := strconv.ParseUint(value, 10, 64) if err != nil { return err } p.memInfo.HWM = v * 1024 case "VmData": - value := strings.Trim(value, " kB") // remove last "kB" + value = strings.Trim(value, " kB") // remove last "kB" v, err := strconv.ParseUint(value, 10, 64) if err != nil { return err } p.memInfo.Data = v * 1024 case "VmStk": - value := strings.Trim(value, " kB") // remove last "kB" + value = strings.Trim(value, " kB") // remove last "kB" v, err := strconv.ParseUint(value, 10, 64) if err != nil { return err } p.memInfo.Stack = v * 1024 case "VmLck": - value := strings.Trim(value, " kB") // remove last "kB" + value = strings.Trim(value, " kB") // remove last "kB" v, err := strconv.ParseUint(value, 10, 64) if err != nil { return err @@ -1076,8 +1088,7 @@ func (p *Process) fillFromTIDStatWithContext(ctx context.Context, tid int32) (ui if err != nil { return 0, 0, nil, 0, 0, 0, nil, err } - ctime := (t / uint64(clockTicks)) + uint64(bootTime) - createTime := int64(ctime * 1000) + createTime := int64((t * 1000 / uint64(clockTicks)) + uint64(bootTime*1000)) rtpriority, err := strconv.ParseInt(fields[18], 10, 32) if err != nil { @@ -1162,6 +1173,9 @@ func readPidsFromDir(path string) ([]int32, error) { return nil, err } for _, fname := range fnames { + if !strictIntPtrn.MatchString(fname) { + continue + } pid, err := strconv.ParseInt(fname, 10, 32) if err != nil { // if not numeric name, just skip diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_openbsd.go b/vendor/github.com/shirou/gopsutil/v4/process/process_openbsd.go similarity index 75% rename from vendor/github.com/shirou/gopsutil/v3/process/process_openbsd.go rename to vendor/github.com/shirou/gopsutil/v4/process/process_openbsd.go index 35869406..11bc5c18 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_openbsd.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_openbsd.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build openbsd -// +build openbsd package process @@ -7,18 +7,20 @@ import ( "bytes" "context" "encoding/binary" - "fmt" + "errors" "io" "path/filepath" + "sort" "strconv" "strings" "unsafe" - cpu "github.com/shirou/gopsutil/v3/cpu" - "github.com/shirou/gopsutil/v3/internal/common" - mem "github.com/shirou/gopsutil/v3/mem" - net "github.com/shirou/gopsutil/v3/net" "golang.org/x/sys/unix" + + "github.com/shirou/gopsutil/v4/cpu" + "github.com/shirou/gopsutil/v4/internal/common" + "github.com/shirou/gopsutil/v4/mem" + "github.com/shirou/gopsutil/v4/net" ) func pidsWithContext(ctx context.Context) ([]int32, error) { @@ -35,7 +37,7 @@ func pidsWithContext(ctx context.Context) ([]int32, error) { return ret, nil } -func (p *Process) PpidWithContext(ctx context.Context) (int32, error) { +func (p *Process) PpidWithContext(_ context.Context) (int32, error) { k, err := p.getKProc() if err != nil { return 0, err @@ -67,7 +69,7 @@ func (p *Process) NameWithContext(ctx context.Context) (string, error) { return name, nil } -func (p *Process) CwdWithContext(ctx context.Context) (string, error) { +func (p *Process) CwdWithContext(_ context.Context) (string, error) { mib := []int32{CTLKern, KernProcCwd, p.Pid} buf, _, err := common.CallSyscall(mib) if err != nil { @@ -76,11 +78,11 @@ func (p *Process) CwdWithContext(ctx context.Context) (string, error) { return common.ByteToString(buf), nil } -func (p *Process) ExeWithContext(ctx context.Context) (string, error) { +func (*Process) ExeWithContext(_ context.Context) (string, error) { return "", common.ErrNotImplementedError } -func (p *Process) CmdlineSliceWithContext(ctx context.Context) ([]string, error) { +func (p *Process) CmdlineSliceWithContext(_ context.Context) ([]string, error) { mib := []int32{CTLKern, KernProcArgs, p.Pid, KernProcArgv} buf, _, err := common.CallSyscall(mib) if err != nil { @@ -128,7 +130,7 @@ func readPtr(r io.Reader) (uintptr, error) { } return uintptr(p), nil default: - return 0, fmt.Errorf("unsupported pointer size") + return 0, errors.New("unsupported pointer size") } } @@ -140,11 +142,11 @@ func (p *Process) CmdlineWithContext(ctx context.Context) (string, error) { return strings.Join(argv, " "), nil } -func (p *Process) createTimeWithContext(ctx context.Context) (int64, error) { +func (*Process) createTimeWithContext(_ context.Context) (int64, error) { return 0, common.ErrNotImplementedError } -func (p *Process) StatusWithContext(ctx context.Context) ([]string, error) { +func (p *Process) StatusWithContext(_ context.Context) ([]string, error) { k, err := p.getKProc() if err != nil { return []string{""}, err @@ -176,46 +178,46 @@ func (p *Process) ForegroundWithContext(ctx context.Context) (bool, error) { return strings.IndexByte(string(out), '+') != -1, nil } -func (p *Process) UidsWithContext(ctx context.Context) ([]int32, error) { +func (p *Process) UidsWithContext(_ context.Context) ([]uint32, error) { k, err := p.getKProc() if err != nil { return nil, err } - uids := make([]int32, 0, 3) + uids := make([]uint32, 0, 3) - uids = append(uids, int32(k.Ruid), int32(k.Uid), int32(k.Svuid)) + uids = append(uids, uint32(k.Ruid), uint32(k.Uid), uint32(k.Svuid)) return uids, nil } -func (p *Process) GidsWithContext(ctx context.Context) ([]int32, error) { +func (p *Process) GidsWithContext(_ context.Context) ([]uint32, error) { k, err := p.getKProc() if err != nil { return nil, err } - gids := make([]int32, 0, 3) - gids = append(gids, int32(k.Rgid), int32(k.Ngroups), int32(k.Svgid)) + gids := make([]uint32, 0, 3) + gids = append(gids, uint32(k.Rgid), uint32(k.Ngroups), uint32(k.Svgid)) return gids, nil } -func (p *Process) GroupsWithContext(ctx context.Context) ([]int32, error) { +func (p *Process) GroupsWithContext(_ context.Context) ([]uint32, error) { k, err := p.getKProc() if err != nil { return nil, err } - groups := make([]int32, k.Ngroups) + groups := make([]uint32, k.Ngroups) for i := int16(0); i < k.Ngroups; i++ { - groups[i] = int32(k.Groups[i]) + groups[i] = uint32(k.Groups[i]) } return groups, nil } -func (p *Process) TerminalWithContext(ctx context.Context) (string, error) { +func (p *Process) TerminalWithContext(_ context.Context) (string, error) { k, err := p.getKProc() if err != nil { return "", err @@ -231,7 +233,7 @@ func (p *Process) TerminalWithContext(ctx context.Context) (string, error) { return termmap[ttyNr], nil } -func (p *Process) NiceWithContext(ctx context.Context) (int32, error) { +func (p *Process) NiceWithContext(_ context.Context) (int32, error) { k, err := p.getKProc() if err != nil { return 0, err @@ -239,7 +241,7 @@ func (p *Process) NiceWithContext(ctx context.Context) (int32, error) { return int32(k.Nice), nil } -func (p *Process) IOCountersWithContext(ctx context.Context) (*IOCountersStat, error) { +func (p *Process) IOCountersWithContext(_ context.Context) (*IOCountersStat, error) { k, err := p.getKProc() if err != nil { return nil, err @@ -250,12 +252,12 @@ func (p *Process) IOCountersWithContext(ctx context.Context) (*IOCountersStat, e }, nil } -func (p *Process) NumThreadsWithContext(ctx context.Context) (int32, error) { +func (*Process) NumThreadsWithContext(_ context.Context) (int32, error) { /* not supported, just return 1 */ return 1, nil } -func (p *Process) TimesWithContext(ctx context.Context) (*cpu.TimesStat, error) { +func (p *Process) TimesWithContext(_ context.Context) (*cpu.TimesStat, error) { k, err := p.getKProc() if err != nil { return nil, err @@ -285,26 +287,29 @@ func (p *Process) MemoryInfoWithContext(ctx context.Context) (*MemoryInfoStat, e } func (p *Process) ChildrenWithContext(ctx context.Context) ([]*Process, error) { - pids, err := common.CallPgrepWithContext(ctx, invoke, p.Pid) + procs, err := ProcessesWithContext(ctx) if err != nil { - return nil, err + return nil, nil } - ret := make([]*Process, 0, len(pids)) - for _, pid := range pids { - np, err := NewProcessWithContext(ctx, pid) + ret := make([]*Process, 0, len(procs)) + for _, proc := range procs { + ppid, err := proc.PpidWithContext(ctx) if err != nil { - return nil, err + continue + } + if ppid == p.Pid { + ret = append(ret, proc) } - ret = append(ret, np) } + sort.Slice(ret, func(i, j int) bool { return ret[i].Pid < ret[j].Pid }) return ret, nil } -func (p *Process) ConnectionsWithContext(ctx context.Context) ([]net.ConnectionStat, error) { +func (*Process) ConnectionsWithContext(_ context.Context) ([]net.ConnectionStat, error) { return nil, common.ErrNotImplementedError } -func (p *Process) ConnectionsMaxWithContext(ctx context.Context, max int) ([]net.ConnectionStat, error) { +func (*Process) ConnectionsMaxWithContext(_ context.Context, _ int) ([]net.ConnectionStat, error) { return nil, common.ErrNotImplementedError } @@ -343,7 +348,7 @@ func (p *Process) getKProc() (*KinfoProc, error) { return nil, err } if length != sizeOfKinfoProc { - return nil, err + return nil, errors.New("unexpected size of KinfoProc") } k, err := parseKinfoProc(buf) @@ -353,7 +358,7 @@ func (p *Process) getKProc() (*KinfoProc, error) { return &k, nil } -func callKernProcSyscall(op int32, arg int32) ([]byte, uint64, error) { +func callKernProcSyscall(op, arg int32) ([]byte, uint64, error) { mib := []int32{CTLKern, KernProc, op, arg, sizeOfKinfoProc, 0} mibptr := unsafe.Pointer(&mib[0]) miblen := uint64(len(mib)) diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_openbsd_386.go b/vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_386.go similarity index 98% rename from vendor/github.com/shirou/gopsutil/v3/process/process_openbsd_386.go rename to vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_386.go index 6e9edc20..5b84706a 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_openbsd_386.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_386.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build openbsd && 386 -// +build openbsd,386 // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs process/types_openbsd.go diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_openbsd_amd64.go b/vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_amd64.go similarity index 98% rename from vendor/github.com/shirou/gopsutil/v3/process/process_openbsd_amd64.go rename to vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_amd64.go index a46d28af..3229bb32 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_openbsd_amd64.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_amd64.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause // Created by cgo -godefs - DO NOT EDIT // cgo -godefs types_openbsd.go diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_openbsd_arm.go b/vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_arm.go similarity index 98% rename from vendor/github.com/shirou/gopsutil/v3/process/process_openbsd_arm.go rename to vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_arm.go index 68ea3c8f..6f74ce75 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_openbsd_arm.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_arm.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build openbsd && arm -// +build openbsd,arm // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs process/types_openbsd.go diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_openbsd_arm64.go b/vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_arm64.go similarity index 98% rename from vendor/github.com/shirou/gopsutil/v3/process/process_openbsd_arm64.go rename to vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_arm64.go index fa620ff6..91045456 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_openbsd_arm64.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_arm64.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build openbsd && arm64 -// +build openbsd,arm64 // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs process/types_openbsd.go diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_openbsd_riscv64.go b/vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_riscv64.go similarity index 98% rename from vendor/github.com/shirou/gopsutil/v3/process/process_openbsd_riscv64.go rename to vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_riscv64.go index b677e70a..e3e0d36a 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_openbsd_riscv64.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_riscv64.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build openbsd && riscv64 -// +build openbsd,riscv64 // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs process/types_openbsd.go diff --git a/vendor/github.com/shirou/gopsutil/v4/process/process_plan9.go b/vendor/github.com/shirou/gopsutil/v4/process/process_plan9.go new file mode 100644 index 00000000..bdb07ff2 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_plan9.go @@ -0,0 +1,203 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build plan9 + +package process + +import ( + "context" + "syscall" + + "github.com/shirou/gopsutil/v4/cpu" + "github.com/shirou/gopsutil/v4/internal/common" + "github.com/shirou/gopsutil/v4/net" +) + +type Signal = syscall.Note + +type MemoryMapsStat struct { + Path string `json:"path"` + Rss uint64 `json:"rss"` + Size uint64 `json:"size"` + Pss uint64 `json:"pss"` + SharedClean uint64 `json:"sharedClean"` + SharedDirty uint64 `json:"sharedDirty"` + PrivateClean uint64 `json:"privateClean"` + PrivateDirty uint64 `json:"privateDirty"` + Referenced uint64 `json:"referenced"` + Anonymous uint64 `json:"anonymous"` + Swap uint64 `json:"swap"` +} + +type MemoryInfoExStat struct{} + +func pidsWithContext(_ context.Context) ([]int32, error) { + return nil, common.ErrNotImplementedError +} + +func ProcessesWithContext(_ context.Context) ([]*Process, error) { + return nil, common.ErrNotImplementedError +} + +func PidExistsWithContext(_ context.Context, _ int32) (bool, error) { + return false, common.ErrNotImplementedError +} + +func (*Process) PpidWithContext(_ context.Context) (int32, error) { + return 0, common.ErrNotImplementedError +} + +func (*Process) NameWithContext(_ context.Context) (string, error) { + return "", common.ErrNotImplementedError +} + +func (*Process) TgidWithContext(_ context.Context) (int32, error) { + return 0, common.ErrNotImplementedError +} + +func (*Process) ExeWithContext(_ context.Context) (string, error) { + return "", common.ErrNotImplementedError +} + +func (*Process) CmdlineWithContext(_ context.Context) (string, error) { + return "", common.ErrNotImplementedError +} + +func (*Process) CmdlineSliceWithContext(_ context.Context) ([]string, error) { + return nil, common.ErrNotImplementedError +} + +func (*Process) createTimeWithContext(_ context.Context) (int64, error) { + return 0, common.ErrNotImplementedError +} + +func (*Process) CwdWithContext(_ context.Context) (string, error) { + return "", common.ErrNotImplementedError +} + +func (*Process) StatusWithContext(_ context.Context) ([]string, error) { + return []string{""}, common.ErrNotImplementedError +} + +func (*Process) ForegroundWithContext(_ context.Context) (bool, error) { + return false, common.ErrNotImplementedError +} + +func (*Process) UidsWithContext(_ context.Context) ([]uint32, error) { + return nil, common.ErrNotImplementedError +} + +func (*Process) GidsWithContext(_ context.Context) ([]uint32, error) { + return nil, common.ErrNotImplementedError +} + +func (*Process) GroupsWithContext(_ context.Context) ([]uint32, error) { + return nil, common.ErrNotImplementedError +} + +func (*Process) TerminalWithContext(_ context.Context) (string, error) { + return "", common.ErrNotImplementedError +} + +func (*Process) NiceWithContext(_ context.Context) (int32, error) { + return 0, common.ErrNotImplementedError +} + +func (*Process) IOniceWithContext(_ context.Context) (int32, error) { + return 0, common.ErrNotImplementedError +} + +func (*Process) RlimitWithContext(_ context.Context) ([]RlimitStat, error) { + return nil, common.ErrNotImplementedError +} + +func (*Process) RlimitUsageWithContext(_ context.Context, _ bool) ([]RlimitStat, error) { + return nil, common.ErrNotImplementedError +} + +func (*Process) IOCountersWithContext(_ context.Context) (*IOCountersStat, error) { + return nil, common.ErrNotImplementedError +} + +func (*Process) NumCtxSwitchesWithContext(_ context.Context) (*NumCtxSwitchesStat, error) { + return nil, common.ErrNotImplementedError +} + +func (*Process) NumFDsWithContext(_ context.Context) (int32, error) { + return 0, common.ErrNotImplementedError +} + +func (*Process) NumThreadsWithContext(_ context.Context) (int32, error) { + return 0, common.ErrNotImplementedError +} + +func (*Process) ThreadsWithContext(_ context.Context) (map[int32]*cpu.TimesStat, error) { + return nil, common.ErrNotImplementedError +} + +func (*Process) TimesWithContext(_ context.Context) (*cpu.TimesStat, error) { + return nil, common.ErrNotImplementedError +} + +func (*Process) CPUAffinityWithContext(_ context.Context) ([]int32, error) { + return nil, common.ErrNotImplementedError +} + +func (*Process) MemoryInfoWithContext(_ context.Context) (*MemoryInfoStat, error) { + return nil, common.ErrNotImplementedError +} + +func (*Process) MemoryInfoExWithContext(_ context.Context) (*MemoryInfoExStat, error) { + return nil, common.ErrNotImplementedError +} + +func (*Process) PageFaultsWithContext(_ context.Context) (*PageFaultsStat, error) { + return nil, common.ErrNotImplementedError +} + +func (*Process) ChildrenWithContext(_ context.Context) ([]*Process, error) { + return nil, common.ErrNotImplementedError +} + +func (*Process) OpenFilesWithContext(_ context.Context) ([]OpenFilesStat, error) { + return nil, common.ErrNotImplementedError +} + +func (*Process) ConnectionsWithContext(_ context.Context) ([]net.ConnectionStat, error) { + return nil, common.ErrNotImplementedError +} + +func (*Process) ConnectionsMaxWithContext(_ context.Context, _ int) ([]net.ConnectionStat, error) { + return nil, common.ErrNotImplementedError +} + +func (*Process) MemoryMapsWithContext(_ context.Context, _ bool) (*[]MemoryMapsStat, error) { + return nil, common.ErrNotImplementedError +} + +func (*Process) SendSignalWithContext(_ context.Context, _ Signal) error { + return common.ErrNotImplementedError +} + +func (*Process) SuspendWithContext(_ context.Context) error { + return common.ErrNotImplementedError +} + +func (*Process) ResumeWithContext(_ context.Context) error { + return common.ErrNotImplementedError +} + +func (*Process) TerminateWithContext(_ context.Context) error { + return common.ErrNotImplementedError +} + +func (*Process) KillWithContext(_ context.Context) error { + return common.ErrNotImplementedError +} + +func (*Process) UsernameWithContext(_ context.Context) (string, error) { + return "", common.ErrNotImplementedError +} + +func (*Process) EnvironWithContext(_ context.Context) ([]string, error) { + return nil, common.ErrNotImplementedError +} diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_posix.go b/vendor/github.com/shirou/gopsutil/v4/process/process_posix.go similarity index 92% rename from vendor/github.com/shirou/gopsutil/v3/process/process_posix.go rename to vendor/github.com/shirou/gopsutil/v4/process/process_posix.go index a01f9ecf..9fe55b49 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_posix.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_posix.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build linux || freebsd || openbsd || darwin || solaris -// +build linux freebsd openbsd darwin solaris package process @@ -16,7 +16,7 @@ import ( "golang.org/x/sys/unix" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" ) type Signal = syscall.Signal @@ -67,11 +67,12 @@ func getTerminalMap() (map[uint64]string, error) { for _, name := range termfiles { stat := unix.Stat_t{} - if err = unix.Stat(name, &stat); err != nil { + err = unix.Stat(name, &stat) + if err != nil { return nil, err } rdev := uint64(stat.Rdev) - ret[rdev] = strings.Replace(name, "/dev", "", -1) + ret[rdev] = strings.ReplaceAll(name, "/dev", "") } return ret, nil } @@ -108,6 +109,7 @@ func PidExistsWithContext(ctx context.Context, pid int32) (bool, error) { if err != nil { return false, err } + defer proc.Release() if isMount(common.HostProcWithContext(ctx)) { // if //proc exists and is mounted, check if //proc/ folder exists _, err := os.Stat(common.HostProcWithContext(ctx, strconv.Itoa(int(pid)))) @@ -139,11 +141,12 @@ func PidExistsWithContext(ctx context.Context, pid int32) (bool, error) { return false, err } -func (p *Process) SendSignalWithContext(ctx context.Context, sig syscall.Signal) error { +func (p *Process) SendSignalWithContext(_ context.Context, sig syscall.Signal) error { process, err := os.FindProcess(int(p.Pid)) if err != nil { return err } + defer process.Release() err = process.Signal(sig) if err != nil { diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_solaris.go b/vendor/github.com/shirou/gopsutil/v4/process/process_solaris.go similarity index 68% rename from vendor/github.com/shirou/gopsutil/v3/process/process_solaris.go rename to vendor/github.com/shirou/gopsutil/v4/process/process_solaris.go index dd4bd476..547d2287 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_solaris.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_solaris.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause package process import ( @@ -7,9 +8,9 @@ import ( "strconv" "strings" - "github.com/shirou/gopsutil/v3/cpu" - "github.com/shirou/gopsutil/v3/internal/common" - "github.com/shirou/gopsutil/v3/net" + "github.com/shirou/gopsutil/v4/cpu" + "github.com/shirou/gopsutil/v4/internal/common" + "github.com/shirou/gopsutil/v4/net" ) type MemoryMapsStat struct { @@ -51,15 +52,15 @@ func ProcessesWithContext(ctx context.Context) ([]*Process, error) { return out, nil } -func (p *Process) PpidWithContext(ctx context.Context) (int32, error) { +func (*Process) PpidWithContext(_ context.Context) (int32, error) { return 0, common.ErrNotImplementedError } -func (p *Process) NameWithContext(ctx context.Context) (string, error) { +func (*Process) NameWithContext(_ context.Context) (string, error) { return "", common.ErrNotImplementedError } -func (p *Process) TgidWithContext(ctx context.Context) (int32, error) { +func (*Process) TgidWithContext(_ context.Context) (int32, error) { return 0, common.ErrNotImplementedError } @@ -79,7 +80,7 @@ func (p *Process) CmdlineSliceWithContext(ctx context.Context) ([]string, error) return p.fillSliceFromCmdlineWithContext(ctx) } -func (p *Process) createTimeWithContext(ctx context.Context) (int64, error) { +func (*Process) createTimeWithContext(_ context.Context) (int64, error) { return 0, common.ErrNotImplementedError } @@ -87,51 +88,51 @@ func (p *Process) CwdWithContext(ctx context.Context) (string, error) { return p.fillFromPathCwdWithContext(ctx) } -func (p *Process) StatusWithContext(ctx context.Context) ([]string, error) { +func (*Process) StatusWithContext(_ context.Context) ([]string, error) { return []string{""}, common.ErrNotImplementedError } -func (p *Process) ForegroundWithContext(ctx context.Context) (bool, error) { +func (*Process) ForegroundWithContext(_ context.Context) (bool, error) { return false, common.ErrNotImplementedError } -func (p *Process) UidsWithContext(ctx context.Context) ([]int32, error) { +func (*Process) UidsWithContext(_ context.Context) ([]uint32, error) { return nil, common.ErrNotImplementedError } -func (p *Process) GidsWithContext(ctx context.Context) ([]int32, error) { +func (*Process) GidsWithContext(_ context.Context) ([]uint32, error) { return nil, common.ErrNotImplementedError } -func (p *Process) GroupsWithContext(ctx context.Context) ([]int32, error) { +func (*Process) GroupsWithContext(_ context.Context) ([]uint32, error) { return nil, common.ErrNotImplementedError } -func (p *Process) TerminalWithContext(ctx context.Context) (string, error) { +func (*Process) TerminalWithContext(_ context.Context) (string, error) { return "", common.ErrNotImplementedError } -func (p *Process) NiceWithContext(ctx context.Context) (int32, error) { +func (*Process) NiceWithContext(_ context.Context) (int32, error) { return 0, common.ErrNotImplementedError } -func (p *Process) IOniceWithContext(ctx context.Context) (int32, error) { +func (*Process) IOniceWithContext(_ context.Context) (int32, error) { return 0, common.ErrNotImplementedError } -func (p *Process) RlimitWithContext(ctx context.Context) ([]RlimitStat, error) { +func (*Process) RlimitWithContext(_ context.Context) ([]RlimitStat, error) { return nil, common.ErrNotImplementedError } -func (p *Process) RlimitUsageWithContext(ctx context.Context, gatherUsed bool) ([]RlimitStat, error) { +func (*Process) RlimitUsageWithContext(_ context.Context, _ bool) ([]RlimitStat, error) { return nil, common.ErrNotImplementedError } -func (p *Process) IOCountersWithContext(ctx context.Context) (*IOCountersStat, error) { +func (*Process) IOCountersWithContext(_ context.Context) (*IOCountersStat, error) { return nil, common.ErrNotImplementedError } -func (p *Process) NumCtxSwitchesWithContext(ctx context.Context) (*NumCtxSwitchesStat, error) { +func (*Process) NumCtxSwitchesWithContext(_ context.Context) (*NumCtxSwitchesStat, error) { return nil, common.ErrNotImplementedError } @@ -140,55 +141,55 @@ func (p *Process) NumFDsWithContext(ctx context.Context) (int32, error) { return int32(len(fnames)), err } -func (p *Process) NumThreadsWithContext(ctx context.Context) (int32, error) { +func (*Process) NumThreadsWithContext(_ context.Context) (int32, error) { return 0, common.ErrNotImplementedError } -func (p *Process) ThreadsWithContext(ctx context.Context) (map[int32]*cpu.TimesStat, error) { +func (*Process) ThreadsWithContext(_ context.Context) (map[int32]*cpu.TimesStat, error) { return nil, common.ErrNotImplementedError } -func (p *Process) TimesWithContext(ctx context.Context) (*cpu.TimesStat, error) { +func (*Process) TimesWithContext(_ context.Context) (*cpu.TimesStat, error) { return nil, common.ErrNotImplementedError } -func (p *Process) CPUAffinityWithContext(ctx context.Context) ([]int32, error) { +func (*Process) CPUAffinityWithContext(_ context.Context) ([]int32, error) { return nil, common.ErrNotImplementedError } -func (p *Process) MemoryInfoWithContext(ctx context.Context) (*MemoryInfoStat, error) { +func (*Process) MemoryInfoWithContext(_ context.Context) (*MemoryInfoStat, error) { return nil, common.ErrNotImplementedError } -func (p *Process) MemoryInfoExWithContext(ctx context.Context) (*MemoryInfoExStat, error) { +func (*Process) MemoryInfoExWithContext(_ context.Context) (*MemoryInfoExStat, error) { return nil, common.ErrNotImplementedError } -func (p *Process) PageFaultsWithContext(ctx context.Context) (*PageFaultsStat, error) { +func (*Process) PageFaultsWithContext(_ context.Context) (*PageFaultsStat, error) { return nil, common.ErrNotImplementedError } -func (p *Process) ChildrenWithContext(ctx context.Context) ([]*Process, error) { +func (*Process) ChildrenWithContext(_ context.Context) ([]*Process, error) { return nil, common.ErrNotImplementedError } -func (p *Process) OpenFilesWithContext(ctx context.Context) ([]OpenFilesStat, error) { +func (*Process) OpenFilesWithContext(_ context.Context) ([]OpenFilesStat, error) { return nil, common.ErrNotImplementedError } -func (p *Process) ConnectionsWithContext(ctx context.Context) ([]net.ConnectionStat, error) { +func (*Process) ConnectionsWithContext(_ context.Context) ([]net.ConnectionStat, error) { return nil, common.ErrNotImplementedError } -func (p *Process) ConnectionsMaxWithContext(ctx context.Context, max int) ([]net.ConnectionStat, error) { +func (*Process) ConnectionsMaxWithContext(_ context.Context, _ int) ([]net.ConnectionStat, error) { return nil, common.ErrNotImplementedError } -func (p *Process) MemoryMapsWithContext(ctx context.Context, grouped bool) (*[]MemoryMapsStat, error) { +func (*Process) MemoryMapsWithContext(_ context.Context, _ bool) (*[]MemoryMapsStat, error) { return nil, common.ErrNotImplementedError } -func (p *Process) EnvironWithContext(ctx context.Context) ([]string, error) { +func (*Process) EnvironWithContext(_ context.Context) ([]string, error) { return nil, common.ErrNotImplementedError } @@ -246,10 +247,7 @@ func (p *Process) fillFromCmdlineWithContext(ctx context.Context) (string, error return "", err } ret := strings.FieldsFunc(string(cmdline), func(r rune) bool { - if r == '\u0000' { - return true - } - return false + return r == '\u0000' }) return strings.Join(ret, " "), nil @@ -291,6 +289,9 @@ func readPidsFromDir(path string) ([]int32, error) { return nil, err } for _, fname := range fnames { + if !strictIntPtrn.MatchString(fname) { + continue + } pid, err := strconv.ParseInt(fname, 10, 32) if err != nil { // if not numeric name, just skip diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_windows.go b/vendor/github.com/shirou/gopsutil/v4/process/process_windows.go similarity index 83% rename from vendor/github.com/shirou/gopsutil/v3/process/process_windows.go rename to vendor/github.com/shirou/gopsutil/v4/process/process_windows.go index f2053d98..f4cbfa29 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_windows.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_windows.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build windows -// +build windows package process @@ -12,16 +12,16 @@ import ( "os" "path/filepath" "reflect" - "strings" "syscall" "time" "unicode/utf16" "unsafe" - "github.com/shirou/gopsutil/v3/cpu" - "github.com/shirou/gopsutil/v3/internal/common" - "github.com/shirou/gopsutil/v3/net" "golang.org/x/sys/windows" + + "github.com/shirou/gopsutil/v4/cpu" + "github.com/shirou/gopsutil/v4/internal/common" + "github.com/shirou/gopsutil/v4/net" ) type Signal = syscall.Signal @@ -43,6 +43,7 @@ var ( procGetPriorityClass = common.Modkernel32.NewProc("GetPriorityClass") procGetProcessIoCounters = common.Modkernel32.NewProc("GetProcessIoCounters") procGetNativeSystemInfo = common.Modkernel32.NewProc("GetNativeSystemInfo") + procGetProcessHandleCount = common.Modkernel32.NewProc("GetProcessHandleCount") processorArchitecture uint ) @@ -201,10 +202,10 @@ type ( ) func init() { - var systemInfo systemInfo + var sInfo systemInfo - procGetNativeSystemInfo.Call(uintptr(unsafe.Pointer(&systemInfo))) - processorArchitecture = uint(systemInfo.wProcessorArchitecture) + procGetNativeSystemInfo.Call(uintptr(unsafe.Pointer(&sInfo))) + processorArchitecture = uint(sInfo.wProcessorArchitecture) // enable SeDebugPrivilege https://github.com/midstar/proci/blob/6ec79f57b90ba3d9efa2a7b16ef9c9369d4be875/proci_windows.go#L80-L119 handle, err := syscall.GetCurrentProcess() @@ -240,11 +241,11 @@ func init() { 0) } -func pidsWithContext(ctx context.Context) ([]int32, error) { +func pidsWithContext(_ context.Context) ([]int32, error) { // inspired by https://gist.github.com/henkman/3083408 // and https://github.com/giampaolo/psutil/blob/1c3a15f637521ba5c0031283da39c733fda53e4c/psutil/arch/windows/process_info.c#L315-L329 var ret []int32 - var read uint32 = 0 + var read uint32 var psSize uint32 = 1024 const dwordSize uint32 = 4 @@ -287,10 +288,10 @@ func PidExistsWithContext(ctx context.Context, pid int32) (bool, error) { return false, err } h, err := windows.OpenProcess(windows.SYNCHRONIZE, false, uint32(pid)) - if err == windows.ERROR_ACCESS_DENIED { + if errors.Is(err, windows.ERROR_ACCESS_DENIED) { return true, nil } - if err == windows.ERROR_INVALID_PARAMETER { + if errors.Is(err, windows.ERROR_INVALID_PARAMETER) { return false, nil } if err != nil { @@ -301,7 +302,7 @@ func PidExistsWithContext(ctx context.Context, pid int32) (bool, error) { return event == uint32(windows.WAIT_TIMEOUT), err } -func (p *Process) PpidWithContext(ctx context.Context) (int32, error) { +func (p *Process) PpidWithContext(_ context.Context) (int32, error) { // if cached already, return from cache cachedPpid := p.getPpid() if cachedPpid != 0 { @@ -329,17 +330,17 @@ func (p *Process) NameWithContext(ctx context.Context) (string, error) { exe, err := p.ExeWithContext(ctx) if err != nil { - return "", fmt.Errorf("could not get Name: %s", err) + return "", fmt.Errorf("could not get Name: %w", err) } return filepath.Base(exe), nil } -func (p *Process) TgidWithContext(ctx context.Context) (int32, error) { +func (*Process) TgidWithContext(_ context.Context) (int32, error) { return 0, common.ErrNotImplementedError } -func (p *Process) ExeWithContext(ctx context.Context) (string, error) { +func (p *Process) ExeWithContext(_ context.Context) (string, error) { c, err := windows.OpenProcess(processQueryInformation, false, uint32(p.Pid)) if err != nil { return "", err @@ -356,20 +357,20 @@ func (p *Process) ExeWithContext(ctx context.Context) (string, error) { if ret == 0 { return "", err } - return windows.UTF16ToString(buf[:]), nil + return windows.UTF16ToString(buf), nil } // XP fallback ret, _, err := procGetProcessImageFileNameW.Call(uintptr(c), uintptr(unsafe.Pointer(&buf[0])), uintptr(size)) if ret == 0 { return "", err } - return common.ConvertDOSPath(windows.UTF16ToString(buf[:])), nil + return common.ConvertDOSPath(windows.UTF16ToString(buf)), nil } func (p *Process) CmdlineWithContext(_ context.Context) (string, error) { cmdline, err := getProcessCommandLine(p.Pid) if err != nil { - return "", fmt.Errorf("could not get CommandLine: %s", err) + return "", fmt.Errorf("could not get CommandLine: %w", err) } return cmdline, nil } @@ -379,13 +380,33 @@ func (p *Process) CmdlineSliceWithContext(ctx context.Context) ([]string, error) if err != nil { return nil, err } - return strings.Split(cmdline, " "), nil + return parseCmdline(cmdline) +} + +func parseCmdline(cmdline string) ([]string, error) { + cmdlineptr, err := windows.UTF16PtrFromString(cmdline) + if err != nil { + return nil, err + } + + var argc int32 + argvptr, err := windows.CommandLineToArgv(cmdlineptr, &argc) + if err != nil { + return nil, err + } + defer windows.LocalFree(windows.Handle(uintptr(unsafe.Pointer(argvptr)))) + + argv := make([]string, argc) + for i, v := range (*argvptr)[:argc] { + argv[i] = windows.UTF16ToString((*v)[:]) + } + return argv, nil } -func (p *Process) createTimeWithContext(ctx context.Context) (int64, error) { +func (p *Process) createTimeWithContext(_ context.Context) (int64, error) { ru, err := getRusage(p.Pid) if err != nil { - return 0, fmt.Errorf("could not get CreationDate: %s", err) + return 0, fmt.Errorf("could not get CreationDate: %w", err) } return ru.CreationTime.Nanoseconds() / 1000000, nil @@ -393,7 +414,7 @@ func (p *Process) createTimeWithContext(ctx context.Context) (int64, error) { func (p *Process) CwdWithContext(_ context.Context) (string, error) { h, err := windows.OpenProcess(processQueryInformation|windows.PROCESS_VM_READ, false, uint32(p.Pid)) - if err == windows.ERROR_ACCESS_DENIED || err == windows.ERROR_INVALID_PARAMETER { + if errors.Is(err, windows.ERROR_ACCESS_DENIED) || errors.Is(err, windows.ERROR_INVALID_PARAMETER) { return "", nil } if err != nil { @@ -435,15 +456,15 @@ func (p *Process) CwdWithContext(_ context.Context) (string, error) { return "", nil } -func (p *Process) StatusWithContext(ctx context.Context) ([]string, error) { +func (*Process) StatusWithContext(_ context.Context) ([]string, error) { return []string{""}, common.ErrNotImplementedError } -func (p *Process) ForegroundWithContext(ctx context.Context) (bool, error) { +func (*Process) ForegroundWithContext(_ context.Context) (bool, error) { return false, common.ErrNotImplementedError } -func (p *Process) UsernameWithContext(ctx context.Context) (string, error) { +func (p *Process) UsernameWithContext(_ context.Context) (string, error) { pid := p.Pid c, err := windows.OpenProcess(processQueryInformation, false, uint32(pid)) if err != nil { @@ -466,19 +487,19 @@ func (p *Process) UsernameWithContext(ctx context.Context) (string, error) { return domain + "\\" + user, err } -func (p *Process) UidsWithContext(ctx context.Context) ([]int32, error) { +func (*Process) UidsWithContext(_ context.Context) ([]uint32, error) { return nil, common.ErrNotImplementedError } -func (p *Process) GidsWithContext(ctx context.Context) ([]int32, error) { +func (*Process) GidsWithContext(_ context.Context) ([]uint32, error) { return nil, common.ErrNotImplementedError } -func (p *Process) GroupsWithContext(ctx context.Context) ([]int32, error) { +func (*Process) GroupsWithContext(_ context.Context) ([]uint32, error) { return nil, common.ErrNotImplementedError } -func (p *Process) TerminalWithContext(ctx context.Context) (string, error) { +func (*Process) TerminalWithContext(_ context.Context) (string, error) { return "", common.ErrNotImplementedError } @@ -494,7 +515,7 @@ var priorityClasses = map[int]int32{ 0x00000100: 24, // REALTIME_PRIORITY_CLASS } -func (p *Process) NiceWithContext(ctx context.Context) (int32, error) { +func (p *Process) NiceWithContext(_ context.Context) (int32, error) { c, err := windows.OpenProcess(processQueryInformation, false, uint32(p.Pid)) if err != nil { return 0, err @@ -511,48 +532,61 @@ func (p *Process) NiceWithContext(ctx context.Context) (int32, error) { return priority, nil } -func (p *Process) IOniceWithContext(ctx context.Context) (int32, error) { +func (*Process) IOniceWithContext(_ context.Context) (int32, error) { return 0, common.ErrNotImplementedError } -func (p *Process) RlimitWithContext(ctx context.Context) ([]RlimitStat, error) { +func (*Process) RlimitWithContext(_ context.Context) ([]RlimitStat, error) { return nil, common.ErrNotImplementedError } -func (p *Process) RlimitUsageWithContext(ctx context.Context, gatherUsed bool) ([]RlimitStat, error) { +func (*Process) RlimitUsageWithContext(_ context.Context, _ bool) ([]RlimitStat, error) { return nil, common.ErrNotImplementedError } -func (p *Process) IOCountersWithContext(ctx context.Context) (*IOCountersStat, error) { +func (p *Process) IOCountersWithContext(_ context.Context) (*IOCountersStat, error) { c, err := windows.OpenProcess(processQueryInformation, false, uint32(p.Pid)) if err != nil { return nil, err } defer windows.CloseHandle(c) - var ioCounters ioCounters - ret, _, err := procGetProcessIoCounters.Call(uintptr(c), uintptr(unsafe.Pointer(&ioCounters))) + var counters ioCounters + ret, _, err := procGetProcessIoCounters.Call(uintptr(c), uintptr(unsafe.Pointer(&counters))) if ret == 0 { return nil, err } stats := &IOCountersStat{ - ReadCount: ioCounters.ReadOperationCount, - ReadBytes: ioCounters.ReadTransferCount, - WriteCount: ioCounters.WriteOperationCount, - WriteBytes: ioCounters.WriteTransferCount, + ReadCount: counters.ReadOperationCount, + ReadBytes: counters.ReadTransferCount, + WriteCount: counters.WriteOperationCount, + WriteBytes: counters.WriteTransferCount, } return stats, nil } -func (p *Process) NumCtxSwitchesWithContext(ctx context.Context) (*NumCtxSwitchesStat, error) { +func (*Process) NumCtxSwitchesWithContext(_ context.Context) (*NumCtxSwitchesStat, error) { return nil, common.ErrNotImplementedError } -func (p *Process) NumFDsWithContext(ctx context.Context) (int32, error) { - return 0, common.ErrNotImplementedError +// NumFDsWithContext returns the number of handles for a process on Windows, +// not the number of file descriptors (FDs). +func (p *Process) NumFDsWithContext(_ context.Context) (int32, error) { + handle, err := windows.OpenProcess(processQueryInformation, false, uint32(p.Pid)) + if err != nil { + return 0, err + } + defer windows.CloseHandle(handle) + + var handleCount uint32 + ret, _, err := procGetProcessHandleCount.Call(uintptr(handle), uintptr(unsafe.Pointer(&handleCount))) + if ret == 0 { + return 0, err + } + return int32(handleCount), nil } -func (p *Process) NumThreadsWithContext(ctx context.Context) (int32, error) { +func (p *Process) NumThreadsWithContext(_ context.Context) (int32, error) { ppid, ret, _, err := getFromSnapProcess(p.Pid) if err != nil { return 0, err @@ -560,18 +594,18 @@ func (p *Process) NumThreadsWithContext(ctx context.Context) (int32, error) { // if no errors and not cached already, cache ppid p.parent = ppid - if 0 == p.getPpid() { + if p.getPpid() == 0 { p.setPpid(ppid) } return ret, nil } -func (p *Process) ThreadsWithContext(ctx context.Context) (map[int32]*cpu.TimesStat, error) { +func (*Process) ThreadsWithContext(_ context.Context) (map[int32]*cpu.TimesStat, error) { return nil, common.ErrNotImplementedError } -func (p *Process) TimesWithContext(ctx context.Context) (*cpu.TimesStat, error) { +func (p *Process) TimesWithContext(_ context.Context) (*cpu.TimesStat, error) { sysTimes, err := getProcessCPUTimes(p.Pid) if err != nil { return nil, err @@ -595,11 +629,11 @@ func (p *Process) TimesWithContext(ctx context.Context) (*cpu.TimesStat, error) }, nil } -func (p *Process) CPUAffinityWithContext(ctx context.Context) ([]int32, error) { +func (*Process) CPUAffinityWithContext(_ context.Context) ([]int32, error) { return nil, common.ErrNotImplementedError } -func (p *Process) MemoryInfoWithContext(ctx context.Context) (*MemoryInfoStat, error) { +func (p *Process) MemoryInfoWithContext(_ context.Context) (*MemoryInfoStat, error) { mem, err := getMemoryInfo(p.Pid) if err != nil { return nil, err @@ -613,12 +647,22 @@ func (p *Process) MemoryInfoWithContext(ctx context.Context) (*MemoryInfoStat, e return ret, nil } -func (p *Process) MemoryInfoExWithContext(ctx context.Context) (*MemoryInfoExStat, error) { +func (*Process) MemoryInfoExWithContext(_ context.Context) (*MemoryInfoExStat, error) { return nil, common.ErrNotImplementedError } -func (p *Process) PageFaultsWithContext(ctx context.Context) (*PageFaultsStat, error) { - return nil, common.ErrNotImplementedError +func (p *Process) PageFaultsWithContext(_ context.Context) (*PageFaultsStat, error) { + mem, err := getMemoryInfo(p.Pid) + if err != nil { + return nil, err + } + + ret := &PageFaultsStat{ + // Since Windows does not distinguish between Major and Minor faults, all faults are treated as Major + MajorFaults: uint64(mem.PageFaultCount), + } + + return ret, nil } func (p *Process) ChildrenWithContext(ctx context.Context) ([]*Process, error) { @@ -655,6 +699,7 @@ func (p *Process) OpenFilesWithContext(ctx context.Context) ([]OpenFilesStat, er if err != nil { return nil, err } + defer windows.CloseHandle(process) buffer := make([]byte, 1024) var size uint32 @@ -744,19 +789,19 @@ func (p *Process) ConnectionsWithContext(ctx context.Context) ([]net.ConnectionS return net.ConnectionsPidWithContext(ctx, "all", p.Pid) } -func (p *Process) ConnectionsMaxWithContext(ctx context.Context, max int) ([]net.ConnectionStat, error) { +func (*Process) ConnectionsMaxWithContext(_ context.Context, _ int) ([]net.ConnectionStat, error) { return nil, common.ErrNotImplementedError } -func (p *Process) MemoryMapsWithContext(ctx context.Context, grouped bool) (*[]MemoryMapsStat, error) { +func (*Process) MemoryMapsWithContext(_ context.Context, _ bool) (*[]MemoryMapsStat, error) { return nil, common.ErrNotImplementedError } -func (p *Process) SendSignalWithContext(ctx context.Context, sig syscall.Signal) error { +func (*Process) SendSignalWithContext(_ context.Context, _ syscall.Signal) error { return common.ErrNotImplementedError } -func (p *Process) SuspendWithContext(ctx context.Context) error { +func (p *Process) SuspendWithContext(_ context.Context) error { c, err := windows.OpenProcess(windows.PROCESS_SUSPEND_RESUME, false, uint32(p.Pid)) if err != nil { return err @@ -772,7 +817,7 @@ func (p *Process) SuspendWithContext(ctx context.Context) error { return nil } -func (p *Process) ResumeWithContext(ctx context.Context) error { +func (p *Process) ResumeWithContext(_ context.Context) error { c, err := windows.OpenProcess(windows.PROCESS_SUSPEND_RESUME, false, uint32(p.Pid)) if err != nil { return err @@ -788,7 +833,7 @@ func (p *Process) ResumeWithContext(ctx context.Context) error { return nil } -func (p *Process) TerminateWithContext(ctx context.Context) error { +func (p *Process) TerminateWithContext(_ context.Context) error { proc, err := windows.OpenProcess(windows.PROCESS_TERMINATE, false, uint32(p.Pid)) if err != nil { return err @@ -798,18 +843,19 @@ func (p *Process) TerminateWithContext(ctx context.Context) error { return err } -func (p *Process) KillWithContext(ctx context.Context) error { +func (p *Process) KillWithContext(_ context.Context) error { process, err := os.FindProcess(int(p.Pid)) if err != nil { return err } + defer process.Release() return process.Kill() } func (p *Process) EnvironWithContext(ctx context.Context) ([]string, error) { - envVars, err := getProcessEnvironmentVariables(p.Pid, ctx) + envVars, err := getProcessEnvironmentVariables(ctx, p.Pid) if err != nil { - return nil, fmt.Errorf("could not get environment variables: %s", err) + return nil, fmt.Errorf("could not get environment variables: %w", err) } return envVars, nil } @@ -829,7 +875,7 @@ func (p *Process) setPpid(ppid int32) { p.parent = ppid } -func getFromSnapProcess(pid int32) (int32, int32, string, error) { +func getFromSnapProcess(pid int32) (int32, int32, string, error) { //nolint:unparam //FIXME snap, err := windows.CreateToolhelp32Snapshot(windows.TH32CS_SNAPPROCESS, uint32(pid)) if err != nil { return 0, 0, "", err @@ -837,7 +883,8 @@ func getFromSnapProcess(pid int32) (int32, int32, string, error) { defer windows.CloseHandle(snap) var pe32 windows.ProcessEntry32 pe32.Size = uint32(unsafe.Sizeof(pe32)) - if err = windows.Process32First(snap, &pe32); err != nil { + err = windows.Process32First(snap, &pe32) + if err != nil { return 0, 0, "", err } for { @@ -857,7 +904,7 @@ func ProcessesWithContext(ctx context.Context) ([]*Process, error) { pids, err := PidsWithContext(ctx) if err != nil { - return out, fmt.Errorf("could not get Processes %s", err) + return out, fmt.Errorf("could not get Processes %w", err) } for _, pid := range pids { @@ -910,10 +957,10 @@ func getProcessMemoryInfo(h windows.Handle, mem *PROCESS_MEMORY_COUNTERS) (err e err = syscall.EINVAL } } - return + return err } -type SYSTEM_TIMES struct { +type SYSTEM_TIMES struct { //nolint:revive //FIXME CreateTime syscall.Filetime ExitTime syscall.Filetime KernelTime syscall.Filetime @@ -948,13 +995,13 @@ func getUserProcessParams32(handle windows.Handle) (rtlUserProcessParameters32, buf := readProcessMemory(syscall.Handle(handle), true, pebAddress, uint(unsafe.Sizeof(processEnvironmentBlock32{}))) if len(buf) != int(unsafe.Sizeof(processEnvironmentBlock32{})) { - return rtlUserProcessParameters32{}, fmt.Errorf("cannot read process PEB") + return rtlUserProcessParameters32{}, errors.New("cannot read process PEB") } peb := (*processEnvironmentBlock32)(unsafe.Pointer(&buf[0])) userProcessAddress := uint64(peb.ProcessParameters) buf = readProcessMemory(syscall.Handle(handle), true, userProcessAddress, uint(unsafe.Sizeof(rtlUserProcessParameters32{}))) if len(buf) != int(unsafe.Sizeof(rtlUserProcessParameters32{})) { - return rtlUserProcessParameters32{}, fmt.Errorf("cannot read user process parameters") + return rtlUserProcessParameters32{}, errors.New("cannot read user process parameters") } return *(*rtlUserProcessParameters32)(unsafe.Pointer(&buf[0])), nil } @@ -967,13 +1014,13 @@ func getUserProcessParams64(handle windows.Handle) (rtlUserProcessParameters64, buf := readProcessMemory(syscall.Handle(handle), false, pebAddress, uint(unsafe.Sizeof(processEnvironmentBlock64{}))) if len(buf) != int(unsafe.Sizeof(processEnvironmentBlock64{})) { - return rtlUserProcessParameters64{}, fmt.Errorf("cannot read process PEB") + return rtlUserProcessParameters64{}, errors.New("cannot read process PEB") } peb := (*processEnvironmentBlock64)(unsafe.Pointer(&buf[0])) userProcessAddress := peb.ProcessParameters buf = readProcessMemory(syscall.Handle(handle), false, userProcessAddress, uint(unsafe.Sizeof(rtlUserProcessParameters64{}))) if len(buf) != int(unsafe.Sizeof(rtlUserProcessParameters64{})) { - return rtlUserProcessParameters64{}, fmt.Errorf("cannot read user process parameters") + return rtlUserProcessParameters64{}, errors.New("cannot read user process parameters") } return *(*rtlUserProcessParameters64)(unsafe.Pointer(&buf[0])), nil } @@ -1023,9 +1070,9 @@ func is32BitProcess(h windows.Handle) bool { return procIs32Bits } -func getProcessEnvironmentVariables(pid int32, ctx context.Context) ([]string, error) { +func getProcessEnvironmentVariables(ctx context.Context, pid int32) ([]string, error) { h, err := windows.OpenProcess(processQueryInformation|windows.PROCESS_VM_READ, false, uint32(pid)) - if err == windows.ERROR_ACCESS_DENIED || err == windows.ERROR_INVALID_PARAMETER { + if errors.Is(err, windows.ERROR_ACCESS_DENIED) || errors.Is(err, windows.ERROR_INVALID_PARAMETER) { return nil, nil } if err != nil { @@ -1109,7 +1156,7 @@ func (p *processReader) Read(buf []byte) (int, error) { func getProcessCommandLine(pid int32) (string, error) { h, err := windows.OpenProcess(processQueryInformation|windows.PROCESS_VM_READ, false, uint32(pid)) - if err == windows.ERROR_ACCESS_DENIED || err == windows.ERROR_INVALID_PARAMETER { + if errors.Is(err, windows.ERROR_ACCESS_DENIED) || errors.Is(err, windows.ERROR_INVALID_PARAMETER) { return "", nil } if err != nil { diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_windows_32bit.go b/vendor/github.com/shirou/gopsutil/v4/process/process_windows_32bit.go similarity index 52% rename from vendor/github.com/shirou/gopsutil/v3/process/process_windows_32bit.go rename to vendor/github.com/shirou/gopsutil/v4/process/process_windows_32bit.go index db4d4533..911351b1 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_windows_32bit.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_windows_32bit.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build (windows && 386) || (windows && arm) -// +build windows,386 windows,arm package process @@ -8,11 +8,12 @@ import ( "syscall" "unsafe" - "github.com/shirou/gopsutil/v3/internal/common" "golang.org/x/sys/windows" + + "github.com/shirou/gopsutil/v4/internal/common" ) -type PROCESS_MEMORY_COUNTERS struct { +type PROCESS_MEMORY_COUNTERS struct { //nolint:revive //FIXME CB uint32 PageFaultCount uint32 PeakWorkingSetSize uint32 @@ -39,30 +40,27 @@ func queryPebAddress(procHandle syscall.Handle, is32BitProcess bool) (uint64, er ) if status := windows.NTStatus(ret); status == windows.STATUS_SUCCESS { return uint64(info.PebBaseAddress), nil - } else { - return 0, windows.NTStatus(ret) } - } else { - // we are on a 32-bit process reading an external 64-bit process - if common.ProcNtWow64QueryInformationProcess64.Find() == nil { // avoid panic - var info processBasicInformation64 + return 0, windows.NTStatus(ret) + } + // we are on a 32-bit process reading an external 64-bit process + if common.ProcNtWow64QueryInformationProcess64.Find() != nil { + return 0, errors.New("can't find API to query 64 bit process from 32 bit") + } + // avoid panic + var info processBasicInformation64 - ret, _, _ := common.ProcNtWow64QueryInformationProcess64.Call( - uintptr(procHandle), - uintptr(common.ProcessBasicInformation), - uintptr(unsafe.Pointer(&info)), - uintptr(unsafe.Sizeof(info)), - uintptr(0), - ) - if status := windows.NTStatus(ret); status == windows.STATUS_SUCCESS { - return info.PebBaseAddress, nil - } else { - return 0, windows.NTStatus(ret) - } - } else { - return 0, errors.New("can't find API to query 64 bit process from 32 bit") - } + ret, _, _ := common.ProcNtWow64QueryInformationProcess64.Call( + uintptr(procHandle), + uintptr(common.ProcessBasicInformation), + uintptr(unsafe.Pointer(&info)), + uintptr(unsafe.Sizeof(info)), + uintptr(0), + ) + if status := windows.NTStatus(ret); status == windows.STATUS_SUCCESS { + return info.PebBaseAddress, nil } + return 0, windows.NTStatus(ret) } func readProcessMemory(h syscall.Handle, is32BitProcess bool, address uint64, size uint) []byte { @@ -81,25 +79,23 @@ func readProcessMemory(h syscall.Handle, is32BitProcess bool, address uint64, si if int(ret) >= 0 && read > 0 { return buffer[:read] } - } else { // reading a 64-bit process from a 32-bit one - if common.ProcNtWow64ReadVirtualMemory64.Find() == nil { // avoid panic - var read uint64 + } else if common.ProcNtWow64ReadVirtualMemory64.Find() == nil { // avoid panic + var read uint64 - buffer := make([]byte, size) + buffer := make([]byte, size) - ret, _, _ := common.ProcNtWow64ReadVirtualMemory64.Call( - uintptr(h), - uintptr(address&0xFFFFFFFF), // the call expects a 64-bit value - uintptr(address>>32), - uintptr(unsafe.Pointer(&buffer[0])), - uintptr(size), // the call expects a 64-bit value - uintptr(0), // but size is 32-bit so pass zero as the high dword - uintptr(unsafe.Pointer(&read)), - ) - if int(ret) >= 0 && read > 0 { - return buffer[:uint(read)] - } + ret, _, _ := common.ProcNtWow64ReadVirtualMemory64.Call( + uintptr(h), + uintptr(address&0xFFFFFFFF), // the call expects a 64-bit value + uintptr(address>>32), + uintptr(unsafe.Pointer(&buffer[0])), + uintptr(size), // the call expects a 64-bit value + uintptr(0), // but size is 32-bit so pass zero as the high dword + uintptr(unsafe.Pointer(&read)), + ) + if int(ret) >= 0 && read > 0 { + return buffer[:uint(read)] } } diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_windows_64bit.go b/vendor/github.com/shirou/gopsutil/v4/process/process_windows_64bit.go similarity index 68% rename from vendor/github.com/shirou/gopsutil/v3/process/process_windows_64bit.go rename to vendor/github.com/shirou/gopsutil/v4/process/process_windows_64bit.go index 74c6212c..8cc26c37 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_windows_64bit.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_windows_64bit.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build (windows && amd64) || (windows && arm64) -// +build windows,amd64 windows,arm64 package process @@ -7,11 +7,12 @@ import ( "syscall" "unsafe" - "github.com/shirou/gopsutil/v3/internal/common" "golang.org/x/sys/windows" + + "github.com/shirou/gopsutil/v4/internal/common" ) -type PROCESS_MEMORY_COUNTERS struct { +type PROCESS_MEMORY_COUNTERS struct { //nolint:revive //FIXME CB uint32 PageFaultCount uint32 PeakWorkingSetSize uint64 @@ -38,26 +39,23 @@ func queryPebAddress(procHandle syscall.Handle, is32BitProcess bool) (uint64, er ) if status := windows.NTStatus(ret); status == windows.STATUS_SUCCESS { return uint64(wow64), nil - } else { - return 0, windows.NTStatus(ret) } - } else { - // we are on a 64-bit process reading an external 64-bit process - var info processBasicInformation64 + return 0, windows.NTStatus(ret) + } + // we are on a 64-bit process reading an external 64-bit process + var info processBasicInformation64 - ret, _, _ := common.ProcNtQueryInformationProcess.Call( - uintptr(procHandle), - uintptr(common.ProcessBasicInformation), - uintptr(unsafe.Pointer(&info)), - uintptr(unsafe.Sizeof(info)), - uintptr(0), - ) - if status := windows.NTStatus(ret); status == windows.STATUS_SUCCESS { - return info.PebBaseAddress, nil - } else { - return 0, windows.NTStatus(ret) - } + ret, _, _ := common.ProcNtQueryInformationProcess.Call( + uintptr(procHandle), + uintptr(common.ProcessBasicInformation), + uintptr(unsafe.Pointer(&info)), + uintptr(unsafe.Sizeof(info)), + uintptr(0), + ) + if status := windows.NTStatus(ret); status == windows.STATUS_SUCCESS { + return info.PebBaseAddress, nil } + return 0, windows.NTStatus(ret) } func readProcessMemory(procHandle syscall.Handle, _ bool, address uint64, size uint) []byte { diff --git a/vendor/github.com/shoenig/go-m1cpu/.golangci.yaml b/vendor/github.com/shoenig/go-m1cpu/.golangci.yaml deleted file mode 100644 index dc6fefb9..00000000 --- a/vendor/github.com/shoenig/go-m1cpu/.golangci.yaml +++ /dev/null @@ -1,12 +0,0 @@ -run: - timeout: 5m -linters: - enable: - - gofmt - - errcheck - - errname - - errorlint - - bodyclose - - durationcheck - - whitespace - diff --git a/vendor/github.com/shoenig/go-m1cpu/LICENSE b/vendor/github.com/shoenig/go-m1cpu/LICENSE deleted file mode 100644 index e87a115e..00000000 --- a/vendor/github.com/shoenig/go-m1cpu/LICENSE +++ /dev/null @@ -1,363 +0,0 @@ -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. "Contributor" - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. "Contributor Version" - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor's Contribution. - -1.3. "Contribution" - - means Covered Software of a particular Contributor. - -1.4. "Covered Software" - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. "Incompatible With Secondary Licenses" - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of - version 1.1 or earlier of the License, but not also under the terms of - a Secondary License. - -1.6. "Executable Form" - - means any form of the work other than Source Code Form. - -1.7. "Larger Work" - - means a work that combines Covered Software with other material, in a - separate file or files, that is not Covered Software. - -1.8. "License" - - means this document. - -1.9. "Licensable" - - means having the right to grant, to the maximum extent possible, whether - at the time of the initial grant or subsequently, any and all of the - rights conveyed by this License. - -1.10. "Modifications" - - means any of the following: - - a. any file in Source Code Form that results from an addition to, - deletion from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. "Patent Claims" of a Contributor - - means any patent claim(s), including without limitation, method, - process, and apparatus claims, in any patent Licensable by such - Contributor that would be infringed, but for the grant of the License, - by the making, using, selling, offering for sale, having made, import, - or transfer of either its Contributions or its Contributor Version. - -1.12. "Secondary License" - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. "Source Code Form" - - means the form of the work preferred for making modifications. - -1.14. "You" (or "Your") - - means an individual or a legal entity exercising rights under this - License. For legal entities, "You" includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, "control" means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or - as part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its - Contributions or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution - become effective for each Contribution on the date the Contributor first - distributes such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under - this License. No additional rights or licenses will be implied from the - distribution or licensing of Covered Software under this License. - Notwithstanding Section 2.1(b) above, no patent license is granted by a - Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party's - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of - its Contributions. - - This License does not grant any rights in the trademarks, service marks, - or logos of any Contributor (except as may be necessary to comply with - the notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this - License (see Section 10.2) or under the terms of a Secondary License (if - permitted under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its - Contributions are its original creation(s) or it has sufficient rights to - grant the rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under - applicable copyright doctrines of fair use, fair dealing, or other - equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under - the terms of this License. You must inform recipients that the Source - Code Form of the Covered Software is governed by the terms of this - License, and how they can obtain a copy of this License. You may not - attempt to alter or restrict the recipients' rights in the Source Code - Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this - License, or sublicense it under different terms, provided that the - license for the Executable Form does not attempt to limit or alter the - recipients' rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for - the Covered Software. If the Larger Work is a combination of Covered - Software with a work governed by one or more Secondary Licenses, and the - Covered Software is not Incompatible With Secondary Licenses, this - License permits You to additionally distribute such Covered Software - under the terms of such Secondary License(s), so that the recipient of - the Larger Work may, at their option, further distribute the Covered - Software under the terms of either this License or such Secondary - License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices - (including copyright notices, patent notices, disclaimers of warranty, or - limitations of liability) contained within the Source Code Form of the - Covered Software, except that You may alter any license notices to the - extent required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on - behalf of any Contributor. You must make it absolutely clear that any - such warranty, support, indemnity, or liability obligation is offered by - You alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, - judicial order, or regulation then You must: (a) comply with the terms of - this License to the maximum extent possible; and (b) describe the - limitations and the code they affect. Such description must be placed in a - text file included with all distributions of the Covered Software under - this License. Except to the extent prohibited by statute or regulation, - such description must be sufficiently detailed for a recipient of ordinary - skill to be able to understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing - basis, if such Contributor fails to notify You of the non-compliance by - some reasonable means prior to 60 days after You have come back into - compliance. Moreover, Your grants from a particular Contributor are - reinstated on an ongoing basis if such Contributor notifies You of the - non-compliance by some reasonable means, this is the first time You have - received notice of non-compliance with this License from such - Contributor, and You become compliant prior to 30 days after Your receipt - of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, - counter-claims, and cross-claims) alleging that a Contributor Version - directly or indirectly infringes any patent, then the rights granted to - You by any and all Contributors for the Covered Software under Section - 2.1 of this License shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an "as is" basis, - without warranty of any kind, either expressed, implied, or statutory, - including, without limitation, warranties that the Covered Software is free - of defects, merchantable, fit for a particular purpose or non-infringing. - The entire risk as to the quality and performance of the Covered Software - is with You. Should any Covered Software prove defective in any respect, - You (not any Contributor) assume the cost of any necessary servicing, - repair, or correction. This disclaimer of warranty constitutes an essential - part of this License. No use of any Covered Software is authorized under - this License except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from - such party's negligence to the extent applicable law prohibits such - limitation. Some jurisdictions do not allow the exclusion or limitation of - incidental or consequential damages, so this exclusion and limitation may - not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts - of a jurisdiction where the defendant maintains its principal place of - business and such litigation shall be governed by laws of that - jurisdiction, without reference to its conflict-of-law provisions. Nothing - in this Section shall prevent a party's ability to bring cross-claims or - counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject - matter hereof. If any provision of this License is held to be - unenforceable, such provision shall be reformed only to the extent - necessary to make it enforceable. Any law or regulation which provides that - the language of a contract shall be construed against the drafter shall not - be used to construe this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version - of the License under which You originally received the Covered Software, - or under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a - modified version of this License if you rename the license and remove - any references to the name of the license steward (except to note that - such modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary - Licenses If You choose to distribute Source Code Form that is - Incompatible With Secondary Licenses under the terms of this version of - the License, the notice described in Exhibit B of this License must be - attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, -then You may include the notice in a location (such as a LICENSE file in a -relevant directory) where a recipient would be likely to look for such a -notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - "Incompatible With Secondary Licenses" Notice - - This Source Code Form is "Incompatible - With Secondary Licenses", as defined by - the Mozilla Public License, v. 2.0. - diff --git a/vendor/github.com/shoenig/go-m1cpu/Makefile b/vendor/github.com/shoenig/go-m1cpu/Makefile deleted file mode 100644 index 28d78639..00000000 --- a/vendor/github.com/shoenig/go-m1cpu/Makefile +++ /dev/null @@ -1,12 +0,0 @@ -SHELL = bash - -default: test - -.PHONY: test -test: - @echo "--> Running Tests ..." - @go test -v -race ./... - -vet: - @echo "--> Vet Go sources ..." - @go vet ./... diff --git a/vendor/github.com/shoenig/go-m1cpu/README.md b/vendor/github.com/shoenig/go-m1cpu/README.md deleted file mode 100644 index 399657ac..00000000 --- a/vendor/github.com/shoenig/go-m1cpu/README.md +++ /dev/null @@ -1,66 +0,0 @@ -# m1cpu - -[![Go Reference](https://pkg.go.dev/badge/github.com/shoenig/go-m1cpu.svg)](https://pkg.go.dev/github.com/shoenig/go-m1cpu) -[![MPL License](https://img.shields.io/github/license/shoenig/go-m1cpu?color=g&style=flat-square)](https://github.com/shoenig/go-m1cpu/blob/main/LICENSE) -[![Run CI Tests](https://github.com/shoenig/go-m1cpu/actions/workflows/ci.yaml/badge.svg)](https://github.com/shoenig/go-m1cpu/actions/workflows/ci.yaml) - -The `go-m1cpu` module is a library for inspecting Apple Silicon CPUs in Go. - -Use the `m1cpu` Go package for looking up the CPU frequency for Apple M1 and M2 CPUs. - -# Install - -```shell -go get github.com/shoenig/go-m1cpu@latest -``` - -# CGO - -This package requires the use of [CGO](https://go.dev/blog/cgo). - -Extracting the CPU properties is done via Apple's [IOKit](https://developer.apple.com/documentation/iokit?language=objc) -framework, which is accessible only through system C libraries. - -# Example - -Simple Go program to print Apple Silicon M1/M2 CPU speeds. - -```go -package main - -import ( - "fmt" - - "github.com/shoenig/go-m1cpu" -) - -func main() { - fmt.Println("Apple Silicon", m1cpu.IsAppleSilicon()) - - fmt.Println("pCore GHz", m1cpu.PCoreGHz()) - fmt.Println("eCore GHz", m1cpu.ECoreGHz()) - - fmt.Println("pCore Hz", m1cpu.PCoreHz()) - fmt.Println("eCore Hz", m1cpu.ECoreHz()) -} -``` - -Using `go test` to print out available information. - -``` -➜ go test -v -run Show -=== RUN Test_Show - cpu_test.go:42: pCore Hz 3504000000 - cpu_test.go:43: eCore Hz 2424000000 - cpu_test.go:44: pCore GHz 3.504 - cpu_test.go:45: eCore GHz 2.424 - cpu_test.go:46: pCore count 8 - cpu_test.go:47: eCoreCount 4 - cpu_test.go:50: pCore Caches 196608 131072 16777216 - cpu_test.go:53: eCore Caches 131072 65536 4194304 ---- PASS: Test_Show (0.00s) -``` - -# License - -Open source under the [MPL](LICENSE) diff --git a/vendor/github.com/shoenig/go-m1cpu/cpu.go b/vendor/github.com/shoenig/go-m1cpu/cpu.go deleted file mode 100644 index 502a8cce..00000000 --- a/vendor/github.com/shoenig/go-m1cpu/cpu.go +++ /dev/null @@ -1,213 +0,0 @@ -//go:build darwin && arm64 && cgo - -package m1cpu - -// #cgo LDFLAGS: -framework CoreFoundation -framework IOKit -// #include -// #include -// #include -// #include -// -// #if !defined(MAC_OS_VERSION_12_0) || MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_VERSION_12_0 -// #define kIOMainPortDefault kIOMasterPortDefault -// #endif -// -// #define HzToGHz(hz) ((hz) / 1000000000.0) -// -// UInt64 global_pCoreHz; -// UInt64 global_eCoreHz; -// int global_pCoreCount; -// int global_eCoreCount; -// int global_pCoreL1InstCacheSize; -// int global_eCoreL1InstCacheSize; -// int global_pCoreL1DataCacheSize; -// int global_eCoreL1DataCacheSize; -// int global_pCoreL2CacheSize; -// int global_eCoreL2CacheSize; -// char global_brand[32]; -// -// UInt64 getFrequency(CFTypeRef typeRef) { -// CFDataRef cfData = typeRef; -// -// CFIndex size = CFDataGetLength(cfData); -// UInt8 buf[size]; -// CFDataGetBytes(cfData, CFRangeMake(0, size), buf); -// -// UInt8 b1 = buf[size-5]; -// UInt8 b2 = buf[size-6]; -// UInt8 b3 = buf[size-7]; -// UInt8 b4 = buf[size-8]; -// -// UInt64 pCoreHz = 0x00000000FFFFFFFF & ((b1<<24) | (b2 << 16) | (b3 << 8) | (b4)); -// return pCoreHz; -// } -// -// int sysctl_int(const char * name) { -// int value = -1; -// size_t size = 8; -// sysctlbyname(name, &value, &size, NULL, 0); -// return value; -// } -// -// void sysctl_string(const char * name, char * dest) { -// size_t size = 32; -// sysctlbyname(name, dest, &size, NULL, 0); -// } -// -// void initialize() { -// global_pCoreCount = sysctl_int("hw.perflevel0.physicalcpu"); -// global_eCoreCount = sysctl_int("hw.perflevel1.physicalcpu"); -// global_pCoreL1InstCacheSize = sysctl_int("hw.perflevel0.l1icachesize"); -// global_eCoreL1InstCacheSize = sysctl_int("hw.perflevel1.l1icachesize"); -// global_pCoreL1DataCacheSize = sysctl_int("hw.perflevel0.l1dcachesize"); -// global_eCoreL1DataCacheSize = sysctl_int("hw.perflevel1.l1dcachesize"); -// global_pCoreL2CacheSize = sysctl_int("hw.perflevel0.l2cachesize"); -// global_eCoreL2CacheSize = sysctl_int("hw.perflevel1.l2cachesize"); -// sysctl_string("machdep.cpu.brand_string", global_brand); -// -// CFMutableDictionaryRef matching = IOServiceMatching("AppleARMIODevice"); -// io_iterator_t iter; -// IOServiceGetMatchingServices(kIOMainPortDefault, matching, &iter); -// -// const size_t bufsize = 512; -// io_object_t obj; -// while ((obj = IOIteratorNext(iter))) { -// char class[bufsize]; -// IOObjectGetClass(obj, class); -// char name[bufsize]; -// IORegistryEntryGetName(obj, name); -// -// if (strncmp(name, "pmgr", bufsize) == 0) { -// CFTypeRef pCoreRef = IORegistryEntryCreateCFProperty(obj, CFSTR("voltage-states5-sram"), kCFAllocatorDefault, 0); -// CFTypeRef eCoreRef = IORegistryEntryCreateCFProperty(obj, CFSTR("voltage-states1-sram"), kCFAllocatorDefault, 0); -// -// long long pCoreHz = getFrequency(pCoreRef); -// long long eCoreHz = getFrequency(eCoreRef); -// -// global_pCoreHz = pCoreHz; -// global_eCoreHz = eCoreHz; -// return; -// } -// } -// } -// -// UInt64 eCoreHz() { -// return global_eCoreHz; -// } -// -// UInt64 pCoreHz() { -// return global_pCoreHz; -// } -// -// Float64 eCoreGHz() { -// return HzToGHz(global_eCoreHz); -// } -// -// Float64 pCoreGHz() { -// return HzToGHz(global_pCoreHz); -// } -// -// int pCoreCount() { -// return global_pCoreCount; -// } -// -// int eCoreCount() { -// return global_eCoreCount; -// } -// -// int pCoreL1InstCacheSize() { -// return global_pCoreL1InstCacheSize; -// } -// -// int pCoreL1DataCacheSize() { -// return global_pCoreL1DataCacheSize; -// } -// -// int pCoreL2CacheSize() { -// return global_pCoreL2CacheSize; -// } -// -// int eCoreL1InstCacheSize() { -// return global_eCoreL1InstCacheSize; -// } -// -// int eCoreL1DataCacheSize() { -// return global_eCoreL1DataCacheSize; -// } -// -// int eCoreL2CacheSize() { -// return global_eCoreL2CacheSize; -// } -// -// char * modelName() { -// return global_brand; -// } -import "C" - -func init() { - C.initialize() -} - -// IsAppleSilicon returns true on this platform. -func IsAppleSilicon() bool { - return true -} - -// PCoreHZ returns the max frequency in Hertz of the P-Core of an Apple Silicon CPU. -func PCoreHz() uint64 { - return uint64(C.pCoreHz()) -} - -// ECoreHZ returns the max frequency in Hertz of the E-Core of an Apple Silicon CPU. -func ECoreHz() uint64 { - return uint64(C.eCoreHz()) -} - -// PCoreGHz returns the max frequency in Gigahertz of the P-Core of an Apple Silicon CPU. -func PCoreGHz() float64 { - return float64(C.pCoreGHz()) -} - -// ECoreGHz returns the max frequency in Gigahertz of the E-Core of an Apple Silicon CPU. -func ECoreGHz() float64 { - return float64(C.eCoreGHz()) -} - -// PCoreCount returns the number of physical P (performance) cores. -func PCoreCount() int { - return int(C.pCoreCount()) -} - -// ECoreCount returns the number of physical E (efficiency) cores. -func ECoreCount() int { - return int(C.eCoreCount()) -} - -// PCoreCacheSize returns the sizes of the P (performance) core cache sizes -// in the order of -// -// - L1 instruction cache -// - L1 data cache -// - L2 cache -func PCoreCache() (int, int, int) { - return int(C.pCoreL1InstCacheSize()), - int(C.pCoreL1DataCacheSize()), - int(C.pCoreL2CacheSize()) -} - -// ECoreCacheSize returns the sizes of the E (efficiency) core cache sizes -// in the order of -// -// - L1 instruction cache -// - L1 data cache -// - L2 cache -func ECoreCache() (int, int, int) { - return int(C.eCoreL1InstCacheSize()), - int(C.eCoreL1DataCacheSize()), - int(C.eCoreL2CacheSize()) -} - -// ModelName returns the model name of the CPU. -func ModelName() string { - return C.GoString(C.modelName()) -} diff --git a/vendor/github.com/shoenig/go-m1cpu/incompatible.go b/vendor/github.com/shoenig/go-m1cpu/incompatible.go deleted file mode 100644 index d425025a..00000000 --- a/vendor/github.com/shoenig/go-m1cpu/incompatible.go +++ /dev/null @@ -1,53 +0,0 @@ -//go:build !darwin || !arm64 || !cgo - -package m1cpu - -// IsAppleSilicon return false on this platform. -func IsAppleSilicon() bool { - return false -} - -// PCoreHZ requires darwin/arm64 -func PCoreHz() uint64 { - panic("m1cpu: not a darwin/arm64 system") -} - -// ECoreHZ requires darwin/arm64 -func ECoreHz() uint64 { - panic("m1cpu: not a darwin/arm64 system") -} - -// PCoreGHz requires darwin/arm64 -func PCoreGHz() float64 { - panic("m1cpu: not a darwin/arm64 system") -} - -// ECoreGHz requires darwin/arm64 -func ECoreGHz() float64 { - panic("m1cpu: not a darwin/arm64 system") -} - -// PCoreCount requires darwin/arm64 -func PCoreCount() int { - panic("m1cpu: not a darwin/arm64 system") -} - -// ECoreCount requires darwin/arm64 -func ECoreCount() int { - panic("m1cpu: not a darwin/arm64 system") -} - -// PCoreCacheSize requires darwin/arm64 -func PCoreCache() (int, int, int) { - panic("m1cpu: not a darwin/arm64 system") -} - -// ECoreCacheSize requires darwin/arm64 -func ECoreCache() (int, int, int) { - panic("m1cpu: not a darwin/arm64 system") -} - -// ModelName requires darwin/arm64 -func ModelName() string { - panic("m1cpu: not a darwin/arm64 system") -} diff --git a/vendor/github.com/stretchr/testify/assert/assertion_compare.go b/vendor/github.com/stretchr/testify/assert/assertion_compare.go index 7e19eba0..ffb24e8e 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_compare.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_compare.go @@ -390,7 +390,8 @@ func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface if h, ok := t.(tHelper); ok { h.Helper() } - return compareTwoValues(t, e1, e2, []compareResult{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...) + failMessage := fmt.Sprintf("\"%v\" is not greater than \"%v\"", e1, e2) + return compareTwoValues(t, e1, e2, []compareResult{compareGreater}, failMessage, msgAndArgs...) } // GreaterOrEqual asserts that the first element is greater than or equal to the second @@ -403,7 +404,8 @@ func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...in if h, ok := t.(tHelper); ok { h.Helper() } - return compareTwoValues(t, e1, e2, []compareResult{compareGreater, compareEqual}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...) + failMessage := fmt.Sprintf("\"%v\" is not greater than or equal to \"%v\"", e1, e2) + return compareTwoValues(t, e1, e2, []compareResult{compareGreater, compareEqual}, failMessage, msgAndArgs...) } // Less asserts that the first element is less than the second @@ -415,7 +417,8 @@ func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) if h, ok := t.(tHelper); ok { h.Helper() } - return compareTwoValues(t, e1, e2, []compareResult{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...) + failMessage := fmt.Sprintf("\"%v\" is not less than \"%v\"", e1, e2) + return compareTwoValues(t, e1, e2, []compareResult{compareLess}, failMessage, msgAndArgs...) } // LessOrEqual asserts that the first element is less than or equal to the second @@ -428,7 +431,8 @@ func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...inter if h, ok := t.(tHelper); ok { h.Helper() } - return compareTwoValues(t, e1, e2, []compareResult{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...) + failMessage := fmt.Sprintf("\"%v\" is not less than or equal to \"%v\"", e1, e2) + return compareTwoValues(t, e1, e2, []compareResult{compareLess, compareEqual}, failMessage, msgAndArgs...) } // Positive asserts that the specified element is positive @@ -440,7 +444,8 @@ func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) bool { h.Helper() } zero := reflect.Zero(reflect.TypeOf(e)) - return compareTwoValues(t, e, zero.Interface(), []compareResult{compareGreater}, "\"%v\" is not positive", msgAndArgs...) + failMessage := fmt.Sprintf("\"%v\" is not positive", e) + return compareTwoValues(t, e, zero.Interface(), []compareResult{compareGreater}, failMessage, msgAndArgs...) } // Negative asserts that the specified element is negative @@ -452,7 +457,8 @@ func Negative(t TestingT, e interface{}, msgAndArgs ...interface{}) bool { h.Helper() } zero := reflect.Zero(reflect.TypeOf(e)) - return compareTwoValues(t, e, zero.Interface(), []compareResult{compareLess}, "\"%v\" is not negative", msgAndArgs...) + failMessage := fmt.Sprintf("\"%v\" is not negative", e) + return compareTwoValues(t, e, zero.Interface(), []compareResult{compareLess}, failMessage, msgAndArgs...) } func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedComparesResults []compareResult, failMessage string, msgAndArgs ...interface{}) bool { @@ -468,11 +474,11 @@ func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedCompare compareResult, isComparable := compare(e1, e2, e1Kind) if !isComparable { - return Fail(t, fmt.Sprintf("Can not compare type \"%s\"", reflect.TypeOf(e1)), msgAndArgs...) + return Fail(t, fmt.Sprintf(`Can not compare type "%T"`, e1), msgAndArgs...) } if !containsValue(allowedComparesResults, compareResult) { - return Fail(t, fmt.Sprintf(failMessage, e1, e2), msgAndArgs...) + return Fail(t, failMessage, msgAndArgs...) } return true diff --git a/vendor/github.com/stretchr/testify/assert/assertion_format.go b/vendor/github.com/stretchr/testify/assert/assertion_format.go index 19063416..c592f6ad 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_format.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_format.go @@ -50,10 +50,19 @@ func ElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string return ElementsMatch(t, listA, listB, append([]interface{}{msg}, args...)...) } -// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either -// a slice or a channel with len == 0. +// Emptyf asserts that the given value is "empty". +// +// [Zero values] are "empty". +// +// Arrays are "empty" if every element is the zero value of the type (stricter than "empty"). +// +// Slices, maps and channels with zero length are "empty". +// +// Pointer values are "empty" if the pointer is nil or if the pointed value is "empty". // // assert.Emptyf(t, obj, "error message %s", "formatted") +// +// [Zero values]: https://go.dev/ref/spec#The_zero_value func Emptyf(t TestingT, object interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -117,10 +126,8 @@ func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg stri // Errorf asserts that a function returned an error (i.e. not `nil`). // -// actualObj, err := SomeFunction() -// if assert.Errorf(t, err, "error message %s", "formatted") { -// assert.Equal(t, expectedErrorf, err) -// } +// actualObj, err := SomeFunction() +// assert.Errorf(t, err, "error message %s", "formatted") func Errorf(t TestingT, err error, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -438,7 +445,19 @@ func IsNonIncreasingf(t TestingT, object interface{}, msg string, args ...interf return IsNonIncreasing(t, object, append([]interface{}{msg}, args...)...) } +// IsNotTypef asserts that the specified objects are not of the same type. +// +// assert.IsNotTypef(t, &NotMyStruct{}, &MyStruct{}, "error message %s", "formatted") +func IsNotTypef(t TestingT, theType interface{}, object interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return IsNotType(t, theType, object, append([]interface{}{msg}, args...)...) +} + // IsTypef asserts that the specified objects are of the same type. +// +// assert.IsTypef(t, &MyStruct{}, &MyStruct{}, "error message %s", "formatted") func IsTypef(t TestingT, expectedType interface{}, object interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -585,8 +604,7 @@ func NotElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg str return NotElementsMatch(t, listA, listB, append([]interface{}{msg}, args...)...) } -// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either -// a slice or a channel with len == 0. +// NotEmptyf asserts that the specified object is NOT [Empty]. // // if assert.NotEmptyf(t, obj, "error message %s", "formatted") { // assert.Equal(t, "two", obj[1]) @@ -693,12 +711,15 @@ func NotSamef(t TestingT, expected interface{}, actual interface{}, msg string, return NotSame(t, expected, actual, append([]interface{}{msg}, args...)...) } -// NotSubsetf asserts that the specified list(array, slice...) or map does NOT -// contain all elements given in the specified subset list(array, slice...) or -// map. +// NotSubsetf asserts that the list (array, slice, or map) does NOT contain all +// elements given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // assert.NotSubsetf(t, [1, 3, 4], [1, 2], "error message %s", "formatted") // assert.NotSubsetf(t, {"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted") +// assert.NotSubsetf(t, [1, 3, 4], {1: "one", 2: "two"}, "error message %s", "formatted") +// assert.NotSubsetf(t, {"x": 1, "y": 2}, ["z"], "error message %s", "formatted") func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -782,11 +803,15 @@ func Samef(t TestingT, expected interface{}, actual interface{}, msg string, arg return Same(t, expected, actual, append([]interface{}{msg}, args...)...) } -// Subsetf asserts that the specified list(array, slice...) or map contains all -// elements given in the specified subset list(array, slice...) or map. +// Subsetf asserts that the list (array, slice, or map) contains all elements +// given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // assert.Subsetf(t, [1, 2, 3], [1, 2], "error message %s", "formatted") // assert.Subsetf(t, {"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted") +// assert.Subsetf(t, [1, 2, 3], {1: "one", 2: "two"}, "error message %s", "formatted") +// assert.Subsetf(t, {"x": 1, "y": 2}, ["x"], "error message %s", "formatted") func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go b/vendor/github.com/stretchr/testify/assert/assertion_forward.go index 21629087..58db9284 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_forward.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_forward.go @@ -92,10 +92,19 @@ func (a *Assertions) ElementsMatchf(listA interface{}, listB interface{}, msg st return ElementsMatchf(a.t, listA, listB, msg, args...) } -// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either -// a slice or a channel with len == 0. +// Empty asserts that the given value is "empty". +// +// [Zero values] are "empty". +// +// Arrays are "empty" if every element is the zero value of the type (stricter than "empty"). +// +// Slices, maps and channels with zero length are "empty". +// +// Pointer values are "empty" if the pointer is nil or if the pointed value is "empty". // // a.Empty(obj) +// +// [Zero values]: https://go.dev/ref/spec#The_zero_value func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -103,10 +112,19 @@ func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) bool { return Empty(a.t, object, msgAndArgs...) } -// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either -// a slice or a channel with len == 0. +// Emptyf asserts that the given value is "empty". +// +// [Zero values] are "empty". +// +// Arrays are "empty" if every element is the zero value of the type (stricter than "empty"). +// +// Slices, maps and channels with zero length are "empty". +// +// Pointer values are "empty" if the pointer is nil or if the pointed value is "empty". // // a.Emptyf(obj, "error message %s", "formatted") +// +// [Zero values]: https://go.dev/ref/spec#The_zero_value func (a *Assertions) Emptyf(object interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -224,10 +242,8 @@ func (a *Assertions) Equalf(expected interface{}, actual interface{}, msg string // Error asserts that a function returned an error (i.e. not `nil`). // -// actualObj, err := SomeFunction() -// if a.Error(err) { -// assert.Equal(t, expectedError, err) -// } +// actualObj, err := SomeFunction() +// a.Error(err) func (a *Assertions) Error(err error, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -297,10 +313,8 @@ func (a *Assertions) ErrorIsf(err error, target error, msg string, args ...inter // Errorf asserts that a function returned an error (i.e. not `nil`). // -// actualObj, err := SomeFunction() -// if a.Errorf(err, "error message %s", "formatted") { -// assert.Equal(t, expectedErrorf, err) -// } +// actualObj, err := SomeFunction() +// a.Errorf(err, "error message %s", "formatted") func (a *Assertions) Errorf(err error, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -868,7 +882,29 @@ func (a *Assertions) IsNonIncreasingf(object interface{}, msg string, args ...in return IsNonIncreasingf(a.t, object, msg, args...) } +// IsNotType asserts that the specified objects are not of the same type. +// +// a.IsNotType(&NotMyStruct{}, &MyStruct{}) +func (a *Assertions) IsNotType(theType interface{}, object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return IsNotType(a.t, theType, object, msgAndArgs...) +} + +// IsNotTypef asserts that the specified objects are not of the same type. +// +// a.IsNotTypef(&NotMyStruct{}, &MyStruct{}, "error message %s", "formatted") +func (a *Assertions) IsNotTypef(theType interface{}, object interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return IsNotTypef(a.t, theType, object, msg, args...) +} + // IsType asserts that the specified objects are of the same type. +// +// a.IsType(&MyStruct{}, &MyStruct{}) func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -877,6 +913,8 @@ func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAnd } // IsTypef asserts that the specified objects are of the same type. +// +// a.IsTypef(&MyStruct{}, &MyStruct{}, "error message %s", "formatted") func (a *Assertions) IsTypef(expectedType interface{}, object interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1162,8 +1200,7 @@ func (a *Assertions) NotElementsMatchf(listA interface{}, listB interface{}, msg return NotElementsMatchf(a.t, listA, listB, msg, args...) } -// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either -// a slice or a channel with len == 0. +// NotEmpty asserts that the specified object is NOT [Empty]. // // if a.NotEmpty(obj) { // assert.Equal(t, "two", obj[1]) @@ -1175,8 +1212,7 @@ func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) boo return NotEmpty(a.t, object, msgAndArgs...) } -// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either -// a slice or a channel with len == 0. +// NotEmptyf asserts that the specified object is NOT [Empty]. // // if a.NotEmptyf(obj, "error message %s", "formatted") { // assert.Equal(t, "two", obj[1]) @@ -1378,12 +1414,15 @@ func (a *Assertions) NotSamef(expected interface{}, actual interface{}, msg stri return NotSamef(a.t, expected, actual, msg, args...) } -// NotSubset asserts that the specified list(array, slice...) or map does NOT -// contain all elements given in the specified subset list(array, slice...) or -// map. +// NotSubset asserts that the list (array, slice, or map) does NOT contain all +// elements given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // a.NotSubset([1, 3, 4], [1, 2]) // a.NotSubset({"x": 1, "y": 2}, {"z": 3}) +// a.NotSubset([1, 3, 4], {1: "one", 2: "two"}) +// a.NotSubset({"x": 1, "y": 2}, ["z"]) func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1391,12 +1430,15 @@ func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs return NotSubset(a.t, list, subset, msgAndArgs...) } -// NotSubsetf asserts that the specified list(array, slice...) or map does NOT -// contain all elements given in the specified subset list(array, slice...) or -// map. +// NotSubsetf asserts that the list (array, slice, or map) does NOT contain all +// elements given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // a.NotSubsetf([1, 3, 4], [1, 2], "error message %s", "formatted") // a.NotSubsetf({"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted") +// a.NotSubsetf([1, 3, 4], {1: "one", 2: "two"}, "error message %s", "formatted") +// a.NotSubsetf({"x": 1, "y": 2}, ["z"], "error message %s", "formatted") func (a *Assertions) NotSubsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1556,11 +1598,15 @@ func (a *Assertions) Samef(expected interface{}, actual interface{}, msg string, return Samef(a.t, expected, actual, msg, args...) } -// Subset asserts that the specified list(array, slice...) or map contains all -// elements given in the specified subset list(array, slice...) or map. +// Subset asserts that the list (array, slice, or map) contains all elements +// given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // a.Subset([1, 2, 3], [1, 2]) // a.Subset({"x": 1, "y": 2}, {"x": 1}) +// a.Subset([1, 2, 3], {1: "one", 2: "two"}) +// a.Subset({"x": 1, "y": 2}, ["x"]) func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1568,11 +1614,15 @@ func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ... return Subset(a.t, list, subset, msgAndArgs...) } -// Subsetf asserts that the specified list(array, slice...) or map contains all -// elements given in the specified subset list(array, slice...) or map. +// Subsetf asserts that the list (array, slice, or map) contains all elements +// given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // a.Subsetf([1, 2, 3], [1, 2], "error message %s", "formatted") // a.Subsetf({"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted") +// a.Subsetf([1, 2, 3], {1: "one", 2: "two"}, "error message %s", "formatted") +// a.Subsetf({"x": 1, "y": 2}, ["x"], "error message %s", "formatted") func (a *Assertions) Subsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() diff --git a/vendor/github.com/stretchr/testify/assert/assertion_order.go b/vendor/github.com/stretchr/testify/assert/assertion_order.go index 1d2f7182..2fdf80fd 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_order.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_order.go @@ -33,7 +33,7 @@ func isOrdered(t TestingT, object interface{}, allowedComparesResults []compareR compareResult, isComparable := compare(prevValueInterface, valueInterface, firstValueKind) if !isComparable { - return Fail(t, fmt.Sprintf("Can not compare type \"%s\" and \"%s\"", reflect.TypeOf(value), reflect.TypeOf(prevValue)), msgAndArgs...) + return Fail(t, fmt.Sprintf(`Can not compare type "%T" and "%T"`, value, prevValue), msgAndArgs...) } if !containsValue(allowedComparesResults, compareResult) { diff --git a/vendor/github.com/stretchr/testify/assert/assertions.go b/vendor/github.com/stretchr/testify/assert/assertions.go index 4e91332b..de8de0cb 100644 --- a/vendor/github.com/stretchr/testify/assert/assertions.go +++ b/vendor/github.com/stretchr/testify/assert/assertions.go @@ -210,59 +210,77 @@ the problem actually occurred in calling code.*/ // of each stack frame leading from the current test to the assert call that // failed. func CallerInfo() []string { - var pc uintptr - var ok bool var file string var line int var name string + const stackFrameBufferSize = 10 + pcs := make([]uintptr, stackFrameBufferSize) + callers := []string{} - for i := 0; ; i++ { - pc, file, line, ok = runtime.Caller(i) - if !ok { - // The breaks below failed to terminate the loop, and we ran off the - // end of the call stack. - break - } + offset := 1 - // This is a huge edge case, but it will panic if this is the case, see #180 - if file == "" { - break - } + for { + n := runtime.Callers(offset, pcs) - f := runtime.FuncForPC(pc) - if f == nil { - break - } - name = f.Name() - - // testing.tRunner is the standard library function that calls - // tests. Subtests are called directly by tRunner, without going through - // the Test/Benchmark/Example function that contains the t.Run calls, so - // with subtests we should break when we hit tRunner, without adding it - // to the list of callers. - if name == "testing.tRunner" { + if n == 0 { break } - parts := strings.Split(file, "/") - if len(parts) > 1 { - filename := parts[len(parts)-1] - dir := parts[len(parts)-2] - if (dir != "assert" && dir != "mock" && dir != "require") || filename == "mock_test.go" { - callers = append(callers, fmt.Sprintf("%s:%d", file, line)) + frames := runtime.CallersFrames(pcs[:n]) + + for { + frame, more := frames.Next() + pc = frame.PC + file = frame.File + line = frame.Line + + // This is a huge edge case, but it will panic if this is the case, see #180 + if file == "" { + break } - } - // Drop the package - segments := strings.Split(name, ".") - name = segments[len(segments)-1] - if isTest(name, "Test") || - isTest(name, "Benchmark") || - isTest(name, "Example") { - break + f := runtime.FuncForPC(pc) + if f == nil { + break + } + name = f.Name() + + // testing.tRunner is the standard library function that calls + // tests. Subtests are called directly by tRunner, without going through + // the Test/Benchmark/Example function that contains the t.Run calls, so + // with subtests we should break when we hit tRunner, without adding it + // to the list of callers. + if name == "testing.tRunner" { + break + } + + parts := strings.Split(file, "/") + if len(parts) > 1 { + filename := parts[len(parts)-1] + dir := parts[len(parts)-2] + if (dir != "assert" && dir != "mock" && dir != "require") || filename == "mock_test.go" { + callers = append(callers, fmt.Sprintf("%s:%d", file, line)) + } + } + + // Drop the package + dotPos := strings.LastIndexByte(name, '.') + name = name[dotPos+1:] + if isTest(name, "Test") || + isTest(name, "Benchmark") || + isTest(name, "Example") { + break + } + + if !more { + break + } } + + // Next batch + offset += cap(pcs) } return callers @@ -437,17 +455,34 @@ func NotImplements(t TestingT, interfaceObject interface{}, object interface{}, return true } +func isType(expectedType, object interface{}) bool { + return ObjectsAreEqual(reflect.TypeOf(object), reflect.TypeOf(expectedType)) +} + // IsType asserts that the specified objects are of the same type. -func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool { +// +// assert.IsType(t, &MyStruct{}, &MyStruct{}) +func IsType(t TestingT, expectedType, object interface{}, msgAndArgs ...interface{}) bool { + if isType(expectedType, object) { + return true + } if h, ok := t.(tHelper); ok { h.Helper() } + return Fail(t, fmt.Sprintf("Object expected to be of type %T, but was %T", expectedType, object), msgAndArgs...) +} - if !ObjectsAreEqual(reflect.TypeOf(object), reflect.TypeOf(expectedType)) { - return Fail(t, fmt.Sprintf("Object expected to be of type %v, but was %v", reflect.TypeOf(expectedType), reflect.TypeOf(object)), msgAndArgs...) +// IsNotType asserts that the specified objects are not of the same type. +// +// assert.IsNotType(t, &NotMyStruct{}, &MyStruct{}) +func IsNotType(t TestingT, theType, object interface{}, msgAndArgs ...interface{}) bool { + if !isType(theType, object) { + return true } - - return true + if h, ok := t.(tHelper); ok { + h.Helper() + } + return Fail(t, fmt.Sprintf("Object type expected to be different than %T", theType), msgAndArgs...) } // Equal asserts that two objects are equal. @@ -475,7 +510,6 @@ func Equal(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) } return true - } // validateEqualArgs checks whether provided arguments can be safely used in the @@ -510,8 +544,9 @@ func Same(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) b if !same { // both are pointers but not the same type & pointing to the same address return Fail(t, fmt.Sprintf("Not same: \n"+ - "expected: %p %#v\n"+ - "actual : %p %#v", expected, expected, actual, actual), msgAndArgs...) + "expected: %p %#[1]v\n"+ + "actual : %p %#[2]v", + expected, actual), msgAndArgs...) } return true @@ -530,14 +565,14 @@ func NotSame(t TestingT, expected, actual interface{}, msgAndArgs ...interface{} same, ok := samePointers(expected, actual) if !ok { - //fails when the arguments are not pointers + // fails when the arguments are not pointers return !(Fail(t, "Both arguments must be pointers", msgAndArgs...)) } if same { return Fail(t, fmt.Sprintf( - "Expected and actual point to the same object: %p %#v", - expected, expected), msgAndArgs...) + "Expected and actual point to the same object: %p %#[1]v", + expected), msgAndArgs...) } return true } @@ -549,7 +584,7 @@ func NotSame(t TestingT, expected, actual interface{}, msgAndArgs ...interface{} func samePointers(first, second interface{}) (same bool, ok bool) { firstPtr, secondPtr := reflect.ValueOf(first), reflect.ValueOf(second) if firstPtr.Kind() != reflect.Ptr || secondPtr.Kind() != reflect.Ptr { - return false, false //not both are pointers + return false, false // not both are pointers } firstType, secondType := reflect.TypeOf(first), reflect.TypeOf(second) @@ -610,7 +645,6 @@ func EqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...interfa } return true - } // EqualExportedValues asserts that the types of two objects are equal and their public @@ -665,7 +699,6 @@ func Exactly(t TestingT, expected, actual interface{}, msgAndArgs ...interface{} } return Equal(t, expected, actual, msgAndArgs...) - } // NotNil asserts that the specified object is not nil. @@ -715,37 +748,45 @@ func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { // isEmpty gets whether the specified object is considered empty or not. func isEmpty(object interface{}) bool { - // get nil case out of the way if object == nil { return true } - objValue := reflect.ValueOf(object) + return isEmptyValue(reflect.ValueOf(object)) +} +// isEmptyValue gets whether the specified reflect.Value is considered empty or not. +func isEmptyValue(objValue reflect.Value) bool { + if objValue.IsZero() { + return true + } + // Special cases of non-zero values that we consider empty switch objValue.Kind() { // collection types are empty when they have no element + // Note: array types are empty when they match their zero-initialized state. case reflect.Chan, reflect.Map, reflect.Slice: return objValue.Len() == 0 - // pointers are empty if nil or if the value they point to is empty + // non-nil pointers are empty if the value they point to is empty case reflect.Ptr: - if objValue.IsNil() { - return true - } - deref := objValue.Elem().Interface() - return isEmpty(deref) - // for all other types, compare against the zero value - // array types are empty when they match their zero-initialized state - default: - zero := reflect.Zero(objValue.Type()) - return reflect.DeepEqual(object, zero.Interface()) + return isEmptyValue(objValue.Elem()) } + return false } -// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either -// a slice or a channel with len == 0. +// Empty asserts that the given value is "empty". +// +// [Zero values] are "empty". +// +// Arrays are "empty" if every element is the zero value of the type (stricter than "empty"). +// +// Slices, maps and channels with zero length are "empty". +// +// Pointer values are "empty" if the pointer is nil or if the pointed value is "empty". // // assert.Empty(t, obj) +// +// [Zero values]: https://go.dev/ref/spec#The_zero_value func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { pass := isEmpty(object) if !pass { @@ -756,11 +797,9 @@ func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { } return pass - } -// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either -// a slice or a channel with len == 0. +// NotEmpty asserts that the specified object is NOT [Empty]. // // if assert.NotEmpty(t, obj) { // assert.Equal(t, "two", obj[1]) @@ -775,7 +814,6 @@ func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { } return pass - } // getLen tries to get the length of an object. @@ -819,7 +857,6 @@ func True(t TestingT, value bool, msgAndArgs ...interface{}) bool { } return true - } // False asserts that the specified value is false. @@ -834,7 +871,6 @@ func False(t TestingT, value bool, msgAndArgs ...interface{}) bool { } return true - } // NotEqual asserts that the specified values are NOT equal. @@ -857,7 +893,6 @@ func NotEqual(t TestingT, expected, actual interface{}, msgAndArgs ...interface{ } return true - } // NotEqualValues asserts that two objects are not equal even when converted to the same type @@ -880,7 +915,6 @@ func NotEqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...inte // return (true, false) if element was not found. // return (true, true) if element was found. func containsElement(list interface{}, element interface{}) (ok, found bool) { - listValue := reflect.ValueOf(list) listType := reflect.TypeOf(list) if listType == nil { @@ -915,7 +949,6 @@ func containsElement(list interface{}, element interface{}) (ok, found bool) { } } return true, false - } // Contains asserts that the specified string, list(array, slice...) or map contains the @@ -938,7 +971,6 @@ func Contains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bo } return true - } // NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the @@ -961,14 +993,17 @@ func NotContains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) } return true - } -// Subset asserts that the specified list(array, slice...) or map contains all -// elements given in the specified subset list(array, slice...) or map. +// Subset asserts that the list (array, slice, or map) contains all elements +// given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // assert.Subset(t, [1, 2, 3], [1, 2]) // assert.Subset(t, {"x": 1, "y": 2}, {"x": 1}) +// assert.Subset(t, [1, 2, 3], {1: "one", 2: "two"}) +// assert.Subset(t, {"x": 1, "y": 2}, ["x"]) func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) { if h, ok := t.(tHelper); ok { h.Helper() @@ -983,7 +1018,7 @@ func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok } subsetKind := reflect.TypeOf(subset).Kind() - if subsetKind != reflect.Array && subsetKind != reflect.Slice && listKind != reflect.Map { + if subsetKind != reflect.Array && subsetKind != reflect.Slice && subsetKind != reflect.Map { return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...) } @@ -1007,6 +1042,13 @@ func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok } subsetList := reflect.ValueOf(subset) + if subsetKind == reflect.Map { + keys := make([]interface{}, subsetList.Len()) + for idx, key := range subsetList.MapKeys() { + keys[idx] = key.Interface() + } + subsetList = reflect.ValueOf(keys) + } for i := 0; i < subsetList.Len(); i++ { element := subsetList.Index(i).Interface() ok, found := containsElement(list, element) @@ -1021,12 +1063,15 @@ func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok return true } -// NotSubset asserts that the specified list(array, slice...) or map does NOT -// contain all elements given in the specified subset list(array, slice...) or -// map. +// NotSubset asserts that the list (array, slice, or map) does NOT contain all +// elements given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // assert.NotSubset(t, [1, 3, 4], [1, 2]) // assert.NotSubset(t, {"x": 1, "y": 2}, {"z": 3}) +// assert.NotSubset(t, [1, 3, 4], {1: "one", 2: "two"}) +// assert.NotSubset(t, {"x": 1, "y": 2}, ["z"]) func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1041,7 +1086,7 @@ func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) } subsetKind := reflect.TypeOf(subset).Kind() - if subsetKind != reflect.Array && subsetKind != reflect.Slice && listKind != reflect.Map { + if subsetKind != reflect.Array && subsetKind != reflect.Slice && subsetKind != reflect.Map { return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...) } @@ -1065,11 +1110,18 @@ func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) } subsetList := reflect.ValueOf(subset) + if subsetKind == reflect.Map { + keys := make([]interface{}, subsetList.Len()) + for idx, key := range subsetList.MapKeys() { + keys[idx] = key.Interface() + } + subsetList = reflect.ValueOf(keys) + } for i := 0; i < subsetList.Len(); i++ { element := subsetList.Index(i).Interface() ok, found := containsElement(list, element) if !ok { - return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...) + return Fail(t, fmt.Sprintf("%q could not be applied builtin len()", list), msgAndArgs...) } if !found { return true @@ -1591,10 +1643,8 @@ func NoError(t TestingT, err error, msgAndArgs ...interface{}) bool { // Error asserts that a function returned an error (i.e. not `nil`). // -// actualObj, err := SomeFunction() -// if assert.Error(t, err) { -// assert.Equal(t, expectedError, err) -// } +// actualObj, err := SomeFunction() +// assert.Error(t, err) func Error(t TestingT, err error, msgAndArgs ...interface{}) bool { if err == nil { if h, ok := t.(tHelper); ok { @@ -1667,7 +1717,6 @@ func matchRegexp(rx interface{}, str interface{}) bool { default: return r.MatchString(fmt.Sprint(v)) } - } // Regexp asserts that a specified regexp matches a string. @@ -1703,7 +1752,6 @@ func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interf } return !match - } // Zero asserts that i is the zero value for its type. @@ -1814,6 +1862,11 @@ func JSONEq(t TestingT, expected string, actual string, msgAndArgs ...interface{ return Fail(t, fmt.Sprintf("Expected value ('%s') is not valid json.\nJSON parsing error: '%s'", expected, err.Error()), msgAndArgs...) } + // Shortcut if same bytes + if actual == expected { + return true + } + if err := json.Unmarshal([]byte(actual), &actualJSONAsInterface); err != nil { return Fail(t, fmt.Sprintf("Input ('%s') needs to be valid json.\nJSON parsing error: '%s'", actual, err.Error()), msgAndArgs...) } @@ -1832,6 +1885,11 @@ func YAMLEq(t TestingT, expected string, actual string, msgAndArgs ...interface{ return Fail(t, fmt.Sprintf("Expected value ('%s') is not valid yaml.\nYAML parsing error: '%s'", expected, err.Error()), msgAndArgs...) } + // Shortcut if same bytes + if actual == expected { + return true + } + if err := yaml.Unmarshal([]byte(actual), &actualYAMLAsInterface); err != nil { return Fail(t, fmt.Sprintf("Input ('%s') needs to be valid yaml.\nYAML error: '%s'", actual, err.Error()), msgAndArgs...) } @@ -1933,6 +1991,7 @@ func Eventually(t TestingT, condition func() bool, waitFor time.Duration, tick t } ch := make(chan bool, 1) + checkCond := func() { ch <- condition() } timer := time.NewTimer(waitFor) defer timer.Stop() @@ -1940,18 +1999,23 @@ func Eventually(t TestingT, condition func() bool, waitFor time.Duration, tick t ticker := time.NewTicker(tick) defer ticker.Stop() - for tick := ticker.C; ; { + var tickC <-chan time.Time + + // Check the condition once first on the initial call. + go checkCond() + + for { select { case <-timer.C: return Fail(t, "Condition never satisfied", msgAndArgs...) - case <-tick: - tick = nil - go func() { ch <- condition() }() + case <-tickC: + tickC = nil + go checkCond() case v := <-ch: if v { return true } - tick = ticker.C + tickC = ticker.C } } } @@ -1964,6 +2028,9 @@ type CollectT struct { errors []error } +// Helper is like [testing.T.Helper] but does nothing. +func (CollectT) Helper() {} + // Errorf collects the error. func (c *CollectT) Errorf(format string, args ...interface{}) { c.errors = append(c.errors, fmt.Errorf(format, args...)) @@ -2021,35 +2088,42 @@ func EventuallyWithT(t TestingT, condition func(collect *CollectT), waitFor time var lastFinishedTickErrs []error ch := make(chan *CollectT, 1) + checkCond := func() { + collect := new(CollectT) + defer func() { + ch <- collect + }() + condition(collect) + } + timer := time.NewTimer(waitFor) defer timer.Stop() ticker := time.NewTicker(tick) defer ticker.Stop() - for tick := ticker.C; ; { + var tickC <-chan time.Time + + // Check the condition once first on the initial call. + go checkCond() + + for { select { case <-timer.C: for _, err := range lastFinishedTickErrs { t.Errorf("%v", err) } return Fail(t, "Condition never satisfied", msgAndArgs...) - case <-tick: - tick = nil - go func() { - collect := new(CollectT) - defer func() { - ch <- collect - }() - condition(collect) - }() + case <-tickC: + tickC = nil + go checkCond() case collect := <-ch: if !collect.failed() { return true } // Keep the errors from the last ended condition, so that they can be copied to t if timeout is reached. lastFinishedTickErrs = collect.errors - tick = ticker.C + tickC = ticker.C } } } @@ -2064,6 +2138,7 @@ func Never(t TestingT, condition func() bool, waitFor time.Duration, tick time.D } ch := make(chan bool, 1) + checkCond := func() { ch <- condition() } timer := time.NewTimer(waitFor) defer timer.Stop() @@ -2071,18 +2146,23 @@ func Never(t TestingT, condition func() bool, waitFor time.Duration, tick time.D ticker := time.NewTicker(tick) defer ticker.Stop() - for tick := ticker.C; ; { + var tickC <-chan time.Time + + // Check the condition once first on the initial call. + go checkCond() + + for { select { case <-timer.C: return true - case <-tick: - tick = nil - go func() { ch <- condition() }() + case <-tickC: + tickC = nil + go checkCond() case v := <-ch: if v { return Fail(t, "Condition satisfied", msgAndArgs...) } - tick = ticker.C + tickC = ticker.C } } } @@ -2100,9 +2180,12 @@ func ErrorIs(t TestingT, err, target error, msgAndArgs ...interface{}) bool { var expectedText string if target != nil { expectedText = target.Error() + if err == nil { + return Fail(t, fmt.Sprintf("Expected error with %q in chain but got nil.", expectedText), msgAndArgs...) + } } - chain := buildErrorChainString(err) + chain := buildErrorChainString(err, false) return Fail(t, fmt.Sprintf("Target error should be in err chain:\n"+ "expected: %q\n"+ @@ -2125,7 +2208,7 @@ func NotErrorIs(t TestingT, err, target error, msgAndArgs ...interface{}) bool { expectedText = target.Error() } - chain := buildErrorChainString(err) + chain := buildErrorChainString(err, false) return Fail(t, fmt.Sprintf("Target error should not be in err chain:\n"+ "found: %q\n"+ @@ -2143,11 +2226,17 @@ func ErrorAs(t TestingT, err error, target interface{}, msgAndArgs ...interface{ return true } - chain := buildErrorChainString(err) + expectedType := reflect.TypeOf(target).Elem().String() + if err == nil { + return Fail(t, fmt.Sprintf("An error is expected but got nil.\n"+ + "expected: %s", expectedType), msgAndArgs...) + } + + chain := buildErrorChainString(err, true) return Fail(t, fmt.Sprintf("Should be in error chain:\n"+ - "expected: %q\n"+ - "in chain: %s", target, chain, + "expected: %s\n"+ + "in chain: %s", expectedType, chain, ), msgAndArgs...) } @@ -2161,24 +2250,46 @@ func NotErrorAs(t TestingT, err error, target interface{}, msgAndArgs ...interfa return true } - chain := buildErrorChainString(err) + chain := buildErrorChainString(err, true) return Fail(t, fmt.Sprintf("Target error should not be in err chain:\n"+ - "found: %q\n"+ - "in chain: %s", target, chain, + "found: %s\n"+ + "in chain: %s", reflect.TypeOf(target).Elem().String(), chain, ), msgAndArgs...) } -func buildErrorChainString(err error) string { +func unwrapAll(err error) (errs []error) { + errs = append(errs, err) + switch x := err.(type) { + case interface{ Unwrap() error }: + err = x.Unwrap() + if err == nil { + return + } + errs = append(errs, unwrapAll(err)...) + case interface{ Unwrap() []error }: + for _, err := range x.Unwrap() { + errs = append(errs, unwrapAll(err)...) + } + } + return +} + +func buildErrorChainString(err error, withType bool) string { if err == nil { return "" } - e := errors.Unwrap(err) - chain := fmt.Sprintf("%q", err.Error()) - for e != nil { - chain += fmt.Sprintf("\n\t%q", e.Error()) - e = errors.Unwrap(e) + var chain string + errs := unwrapAll(err) + for i := range errs { + if i != 0 { + chain += "\n\t" + } + chain += fmt.Sprintf("%q", errs[i].Error()) + if withType { + chain += fmt.Sprintf(" (%T)", errs[i]) + } } return chain } diff --git a/vendor/github.com/stretchr/testify/assert/doc.go b/vendor/github.com/stretchr/testify/assert/doc.go index 4953981d..a0b953aa 100644 --- a/vendor/github.com/stretchr/testify/assert/doc.go +++ b/vendor/github.com/stretchr/testify/assert/doc.go @@ -1,5 +1,9 @@ // Package assert provides a set of comprehensive testing tools for use with the normal Go testing system. // +// # Note +// +// All functions in this package return a bool value indicating whether the assertion has passed. +// // # Example Usage // // The following is a complete example using assert in a standard test function: diff --git a/vendor/github.com/stretchr/testify/assert/http_assertions.go b/vendor/github.com/stretchr/testify/assert/http_assertions.go index 861ed4b7..5a6bb75f 100644 --- a/vendor/github.com/stretchr/testify/assert/http_assertions.go +++ b/vendor/github.com/stretchr/testify/assert/http_assertions.go @@ -138,7 +138,7 @@ func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method, url string, contains := strings.Contains(body, fmt.Sprint(str)) if !contains { - Fail(t, fmt.Sprintf("Expected response body for \"%s\" to contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body), msgAndArgs...) + Fail(t, fmt.Sprintf("Expected response body for %q to contain %q but found %q", url+"?"+values.Encode(), str, body), msgAndArgs...) } return contains @@ -158,7 +158,7 @@ func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method, url strin contains := strings.Contains(body, fmt.Sprint(str)) if contains { - Fail(t, fmt.Sprintf("Expected response body for \"%s\" to NOT contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body), msgAndArgs...) + Fail(t, fmt.Sprintf("Expected response body for %q to NOT contain %q but found %q", url+"?"+values.Encode(), str, body), msgAndArgs...) } return !contains diff --git a/vendor/github.com/stretchr/testify/assert/yaml/yaml_custom.go b/vendor/github.com/stretchr/testify/assert/yaml/yaml_custom.go index baa0cc7d..5a74c4f4 100644 --- a/vendor/github.com/stretchr/testify/assert/yaml/yaml_custom.go +++ b/vendor/github.com/stretchr/testify/assert/yaml/yaml_custom.go @@ -1,5 +1,4 @@ //go:build testify_yaml_custom && !testify_yaml_fail && !testify_yaml_default -// +build testify_yaml_custom,!testify_yaml_fail,!testify_yaml_default // Package yaml is an implementation of YAML functions that calls a pluggable implementation. // diff --git a/vendor/github.com/stretchr/testify/assert/yaml/yaml_default.go b/vendor/github.com/stretchr/testify/assert/yaml/yaml_default.go index b83c6cf6..0bae80e3 100644 --- a/vendor/github.com/stretchr/testify/assert/yaml/yaml_default.go +++ b/vendor/github.com/stretchr/testify/assert/yaml/yaml_default.go @@ -1,5 +1,4 @@ //go:build !testify_yaml_fail && !testify_yaml_custom -// +build !testify_yaml_fail,!testify_yaml_custom // Package yaml is just an indirection to handle YAML deserialization. // diff --git a/vendor/github.com/stretchr/testify/assert/yaml/yaml_fail.go b/vendor/github.com/stretchr/testify/assert/yaml/yaml_fail.go index e78f7dfe..8041803f 100644 --- a/vendor/github.com/stretchr/testify/assert/yaml/yaml_fail.go +++ b/vendor/github.com/stretchr/testify/assert/yaml/yaml_fail.go @@ -1,5 +1,4 @@ //go:build testify_yaml_fail && !testify_yaml_custom && !testify_yaml_default -// +build testify_yaml_fail,!testify_yaml_custom,!testify_yaml_default // Package yaml is an implementation of YAML functions that always fail. // diff --git a/vendor/github.com/stretchr/testify/require/doc.go b/vendor/github.com/stretchr/testify/require/doc.go index 96843472..c8e3f94a 100644 --- a/vendor/github.com/stretchr/testify/require/doc.go +++ b/vendor/github.com/stretchr/testify/require/doc.go @@ -23,6 +23,8 @@ // // The `require` package have same global functions as in the `assert` package, // but instead of returning a boolean result they call `t.FailNow()`. +// A consequence of this is that it must be called from the goroutine running +// the test function, not from other goroutines created during the test. // // Every assertion function also takes an optional string message as the final argument, // allowing custom error messages to be appended to the message the assertion method outputs. diff --git a/vendor/github.com/stretchr/testify/require/require.go b/vendor/github.com/stretchr/testify/require/require.go index d8921950..2d02f9bc 100644 --- a/vendor/github.com/stretchr/testify/require/require.go +++ b/vendor/github.com/stretchr/testify/require/require.go @@ -117,10 +117,19 @@ func ElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string t.FailNow() } -// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either -// a slice or a channel with len == 0. +// Empty asserts that the given value is "empty". +// +// [Zero values] are "empty". +// +// Arrays are "empty" if every element is the zero value of the type (stricter than "empty"). +// +// Slices, maps and channels with zero length are "empty". +// +// Pointer values are "empty" if the pointer is nil or if the pointed value is "empty". // // require.Empty(t, obj) +// +// [Zero values]: https://go.dev/ref/spec#The_zero_value func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -131,10 +140,19 @@ func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) { t.FailNow() } -// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either -// a slice or a channel with len == 0. +// Emptyf asserts that the given value is "empty". +// +// [Zero values] are "empty". +// +// Arrays are "empty" if every element is the zero value of the type (stricter than "empty"). +// +// Slices, maps and channels with zero length are "empty". +// +// Pointer values are "empty" if the pointer is nil or if the pointed value is "empty". // // require.Emptyf(t, obj, "error message %s", "formatted") +// +// [Zero values]: https://go.dev/ref/spec#The_zero_value func Emptyf(t TestingT, object interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -279,10 +297,8 @@ func Equalf(t TestingT, expected interface{}, actual interface{}, msg string, ar // Error asserts that a function returned an error (i.e. not `nil`). // -// actualObj, err := SomeFunction() -// if require.Error(t, err) { -// require.Equal(t, expectedError, err) -// } +// actualObj, err := SomeFunction() +// require.Error(t, err) func Error(t TestingT, err error, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -373,10 +389,8 @@ func ErrorIsf(t TestingT, err error, target error, msg string, args ...interface // Errorf asserts that a function returned an error (i.e. not `nil`). // -// actualObj, err := SomeFunction() -// if require.Errorf(t, err, "error message %s", "formatted") { -// require.Equal(t, expectedErrorf, err) -// } +// actualObj, err := SomeFunction() +// require.Errorf(t, err, "error message %s", "formatted") func Errorf(t TestingT, err error, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1097,7 +1111,35 @@ func IsNonIncreasingf(t TestingT, object interface{}, msg string, args ...interf t.FailNow() } +// IsNotType asserts that the specified objects are not of the same type. +// +// require.IsNotType(t, &NotMyStruct{}, &MyStruct{}) +func IsNotType(t TestingT, theType interface{}, object interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.IsNotType(t, theType, object, msgAndArgs...) { + return + } + t.FailNow() +} + +// IsNotTypef asserts that the specified objects are not of the same type. +// +// require.IsNotTypef(t, &NotMyStruct{}, &MyStruct{}, "error message %s", "formatted") +func IsNotTypef(t TestingT, theType interface{}, object interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.IsNotTypef(t, theType, object, msg, args...) { + return + } + t.FailNow() +} + // IsType asserts that the specified objects are of the same type. +// +// require.IsType(t, &MyStruct{}, &MyStruct{}) func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1109,6 +1151,8 @@ func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs } // IsTypef asserts that the specified objects are of the same type. +// +// require.IsTypef(t, &MyStruct{}, &MyStruct{}, "error message %s", "formatted") func IsTypef(t TestingT, expectedType interface{}, object interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1469,8 +1513,7 @@ func NotElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg str t.FailNow() } -// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either -// a slice or a channel with len == 0. +// NotEmpty asserts that the specified object is NOT [Empty]. // // if require.NotEmpty(t, obj) { // require.Equal(t, "two", obj[1]) @@ -1485,8 +1528,7 @@ func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) { t.FailNow() } -// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either -// a slice or a channel with len == 0. +// NotEmptyf asserts that the specified object is NOT [Empty]. // // if require.NotEmptyf(t, obj, "error message %s", "formatted") { // require.Equal(t, "two", obj[1]) @@ -1745,12 +1787,15 @@ func NotSamef(t TestingT, expected interface{}, actual interface{}, msg string, t.FailNow() } -// NotSubset asserts that the specified list(array, slice...) or map does NOT -// contain all elements given in the specified subset list(array, slice...) or -// map. +// NotSubset asserts that the list (array, slice, or map) does NOT contain all +// elements given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // require.NotSubset(t, [1, 3, 4], [1, 2]) // require.NotSubset(t, {"x": 1, "y": 2}, {"z": 3}) +// require.NotSubset(t, [1, 3, 4], {1: "one", 2: "two"}) +// require.NotSubset(t, {"x": 1, "y": 2}, ["z"]) func NotSubset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1761,12 +1806,15 @@ func NotSubset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...i t.FailNow() } -// NotSubsetf asserts that the specified list(array, slice...) or map does NOT -// contain all elements given in the specified subset list(array, slice...) or -// map. +// NotSubsetf asserts that the list (array, slice, or map) does NOT contain all +// elements given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // require.NotSubsetf(t, [1, 3, 4], [1, 2], "error message %s", "formatted") // require.NotSubsetf(t, {"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted") +// require.NotSubsetf(t, [1, 3, 4], {1: "one", 2: "two"}, "error message %s", "formatted") +// require.NotSubsetf(t, {"x": 1, "y": 2}, ["z"], "error message %s", "formatted") func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1971,11 +2019,15 @@ func Samef(t TestingT, expected interface{}, actual interface{}, msg string, arg t.FailNow() } -// Subset asserts that the specified list(array, slice...) or map contains all -// elements given in the specified subset list(array, slice...) or map. +// Subset asserts that the list (array, slice, or map) contains all elements +// given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // require.Subset(t, [1, 2, 3], [1, 2]) // require.Subset(t, {"x": 1, "y": 2}, {"x": 1}) +// require.Subset(t, [1, 2, 3], {1: "one", 2: "two"}) +// require.Subset(t, {"x": 1, "y": 2}, ["x"]) func Subset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1986,11 +2038,15 @@ func Subset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...inte t.FailNow() } -// Subsetf asserts that the specified list(array, slice...) or map contains all -// elements given in the specified subset list(array, slice...) or map. +// Subsetf asserts that the list (array, slice, or map) contains all elements +// given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // require.Subsetf(t, [1, 2, 3], [1, 2], "error message %s", "formatted") // require.Subsetf(t, {"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted") +// require.Subsetf(t, [1, 2, 3], {1: "one", 2: "two"}, "error message %s", "formatted") +// require.Subsetf(t, {"x": 1, "y": 2}, ["x"], "error message %s", "formatted") func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() diff --git a/vendor/github.com/stretchr/testify/require/require_forward.go b/vendor/github.com/stretchr/testify/require/require_forward.go index 1bd87304..e6f7e944 100644 --- a/vendor/github.com/stretchr/testify/require/require_forward.go +++ b/vendor/github.com/stretchr/testify/require/require_forward.go @@ -93,10 +93,19 @@ func (a *Assertions) ElementsMatchf(listA interface{}, listB interface{}, msg st ElementsMatchf(a.t, listA, listB, msg, args...) } -// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either -// a slice or a channel with len == 0. +// Empty asserts that the given value is "empty". +// +// [Zero values] are "empty". +// +// Arrays are "empty" if every element is the zero value of the type (stricter than "empty"). +// +// Slices, maps and channels with zero length are "empty". +// +// Pointer values are "empty" if the pointer is nil or if the pointed value is "empty". // // a.Empty(obj) +// +// [Zero values]: https://go.dev/ref/spec#The_zero_value func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -104,10 +113,19 @@ func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) { Empty(a.t, object, msgAndArgs...) } -// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either -// a slice or a channel with len == 0. +// Emptyf asserts that the given value is "empty". +// +// [Zero values] are "empty". +// +// Arrays are "empty" if every element is the zero value of the type (stricter than "empty"). +// +// Slices, maps and channels with zero length are "empty". +// +// Pointer values are "empty" if the pointer is nil or if the pointed value is "empty". // // a.Emptyf(obj, "error message %s", "formatted") +// +// [Zero values]: https://go.dev/ref/spec#The_zero_value func (a *Assertions) Emptyf(object interface{}, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -225,10 +243,8 @@ func (a *Assertions) Equalf(expected interface{}, actual interface{}, msg string // Error asserts that a function returned an error (i.e. not `nil`). // -// actualObj, err := SomeFunction() -// if a.Error(err) { -// assert.Equal(t, expectedError, err) -// } +// actualObj, err := SomeFunction() +// a.Error(err) func (a *Assertions) Error(err error, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -298,10 +314,8 @@ func (a *Assertions) ErrorIsf(err error, target error, msg string, args ...inter // Errorf asserts that a function returned an error (i.e. not `nil`). // -// actualObj, err := SomeFunction() -// if a.Errorf(err, "error message %s", "formatted") { -// assert.Equal(t, expectedErrorf, err) -// } +// actualObj, err := SomeFunction() +// a.Errorf(err, "error message %s", "formatted") func (a *Assertions) Errorf(err error, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -869,7 +883,29 @@ func (a *Assertions) IsNonIncreasingf(object interface{}, msg string, args ...in IsNonIncreasingf(a.t, object, msg, args...) } +// IsNotType asserts that the specified objects are not of the same type. +// +// a.IsNotType(&NotMyStruct{}, &MyStruct{}) +func (a *Assertions) IsNotType(theType interface{}, object interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + IsNotType(a.t, theType, object, msgAndArgs...) +} + +// IsNotTypef asserts that the specified objects are not of the same type. +// +// a.IsNotTypef(&NotMyStruct{}, &MyStruct{}, "error message %s", "formatted") +func (a *Assertions) IsNotTypef(theType interface{}, object interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + IsNotTypef(a.t, theType, object, msg, args...) +} + // IsType asserts that the specified objects are of the same type. +// +// a.IsType(&MyStruct{}, &MyStruct{}) func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -878,6 +914,8 @@ func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAnd } // IsTypef asserts that the specified objects are of the same type. +// +// a.IsTypef(&MyStruct{}, &MyStruct{}, "error message %s", "formatted") func (a *Assertions) IsTypef(expectedType interface{}, object interface{}, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1163,8 +1201,7 @@ func (a *Assertions) NotElementsMatchf(listA interface{}, listB interface{}, msg NotElementsMatchf(a.t, listA, listB, msg, args...) } -// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either -// a slice or a channel with len == 0. +// NotEmpty asserts that the specified object is NOT [Empty]. // // if a.NotEmpty(obj) { // assert.Equal(t, "two", obj[1]) @@ -1176,8 +1213,7 @@ func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) { NotEmpty(a.t, object, msgAndArgs...) } -// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either -// a slice or a channel with len == 0. +// NotEmptyf asserts that the specified object is NOT [Empty]. // // if a.NotEmptyf(obj, "error message %s", "formatted") { // assert.Equal(t, "two", obj[1]) @@ -1379,12 +1415,15 @@ func (a *Assertions) NotSamef(expected interface{}, actual interface{}, msg stri NotSamef(a.t, expected, actual, msg, args...) } -// NotSubset asserts that the specified list(array, slice...) or map does NOT -// contain all elements given in the specified subset list(array, slice...) or -// map. +// NotSubset asserts that the list (array, slice, or map) does NOT contain all +// elements given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // a.NotSubset([1, 3, 4], [1, 2]) // a.NotSubset({"x": 1, "y": 2}, {"z": 3}) +// a.NotSubset([1, 3, 4], {1: "one", 2: "two"}) +// a.NotSubset({"x": 1, "y": 2}, ["z"]) func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1392,12 +1431,15 @@ func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs NotSubset(a.t, list, subset, msgAndArgs...) } -// NotSubsetf asserts that the specified list(array, slice...) or map does NOT -// contain all elements given in the specified subset list(array, slice...) or -// map. +// NotSubsetf asserts that the list (array, slice, or map) does NOT contain all +// elements given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // a.NotSubsetf([1, 3, 4], [1, 2], "error message %s", "formatted") // a.NotSubsetf({"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted") +// a.NotSubsetf([1, 3, 4], {1: "one", 2: "two"}, "error message %s", "formatted") +// a.NotSubsetf({"x": 1, "y": 2}, ["z"], "error message %s", "formatted") func (a *Assertions) NotSubsetf(list interface{}, subset interface{}, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1557,11 +1599,15 @@ func (a *Assertions) Samef(expected interface{}, actual interface{}, msg string, Samef(a.t, expected, actual, msg, args...) } -// Subset asserts that the specified list(array, slice...) or map contains all -// elements given in the specified subset list(array, slice...) or map. +// Subset asserts that the list (array, slice, or map) contains all elements +// given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // a.Subset([1, 2, 3], [1, 2]) // a.Subset({"x": 1, "y": 2}, {"x": 1}) +// a.Subset([1, 2, 3], {1: "one", 2: "two"}) +// a.Subset({"x": 1, "y": 2}, ["x"]) func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1569,11 +1615,15 @@ func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ... Subset(a.t, list, subset, msgAndArgs...) } -// Subsetf asserts that the specified list(array, slice...) or map contains all -// elements given in the specified subset list(array, slice...) or map. +// Subsetf asserts that the list (array, slice, or map) contains all elements +// given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // a.Subsetf([1, 2, 3], [1, 2], "error message %s", "formatted") // a.Subsetf({"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted") +// a.Subsetf([1, 2, 3], {1: "one", 2: "two"}, "error message %s", "formatted") +// a.Subsetf({"x": 1, "y": 2}, ["x"], "error message %s", "formatted") func (a *Assertions) Subsetf(list interface{}, subset interface{}, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() diff --git a/vendor/github.com/tklauser/go-sysconf/.cirrus.yml b/vendor/github.com/tklauser/go-sysconf/.cirrus.yml index 33e6595c..61724abe 100644 --- a/vendor/github.com/tklauser/go-sysconf/.cirrus.yml +++ b/vendor/github.com/tklauser/go-sysconf/.cirrus.yml @@ -1,10 +1,10 @@ env: CIRRUS_CLONE_DEPTH: 1 - GO_VERSION: go1.22.2 + GO_VERSION: go1.25.0 freebsd_13_task: freebsd_instance: - image_family: freebsd-13-2 + image_family: freebsd-13-5 install_script: | pkg install -y go GOBIN=$PWD/bin go install golang.org/dl/${GO_VERSION}@latest @@ -14,7 +14,7 @@ freebsd_13_task: freebsd_14_task: freebsd_instance: - image_family: freebsd-14-0 + image_family: freebsd-14-2 install_script: | pkg install -y go GOBIN=$PWD/bin go install golang.org/dl/${GO_VERSION}@latest diff --git a/vendor/github.com/tklauser/go-sysconf/sysconf_netbsd.go b/vendor/github.com/tklauser/go-sysconf/sysconf_netbsd.go index 40f6c345..87cf6a10 100644 --- a/vendor/github.com/tklauser/go-sysconf/sysconf_netbsd.go +++ b/vendor/github.com/tklauser/go-sysconf/sysconf_netbsd.go @@ -25,10 +25,13 @@ const ( _POSIX2_UPE = -1 ) -var clktck struct { - sync.Once - v int64 -} +var clktck = sync.OnceValue(func() int64 { + ci, err := unix.SysctlClockinfo("kern.clockrate") + if err != nil { + return -1 + } + return int64(ci.Hz) +}) func sysconfPOSIX(name int) (int64, error) { // NetBSD does not define all _POSIX_* values used in sysconf_posix.go @@ -54,14 +57,7 @@ func sysconf(name int) (int64, error) { } return -1, nil case SC_CLK_TCK: - // TODO: use sync.OnceValue once Go 1.21 is the minimal supported version - clktck.Do(func() { - clktck.v = -1 - if ci, err := unix.SysctlClockinfo("kern.clockrate"); err == nil { - clktck.v = int64(ci.Hz) - } - }) - return clktck.v, nil + return clktck(), nil case SC_NGROUPS_MAX: return sysctl32("kern.ngroups"), nil case SC_JOB_CONTROL: diff --git a/vendor/github.com/tklauser/numcpus/.cirrus.yml b/vendor/github.com/tklauser/numcpus/.cirrus.yml index b3091efd..61724abe 100644 --- a/vendor/github.com/tklauser/numcpus/.cirrus.yml +++ b/vendor/github.com/tklauser/numcpus/.cirrus.yml @@ -1,10 +1,10 @@ env: CIRRUS_CLONE_DEPTH: 1 - GO_VERSION: go1.23.0 + GO_VERSION: go1.25.0 freebsd_13_task: freebsd_instance: - image_family: freebsd-13-3 + image_family: freebsd-13-5 install_script: | pkg install -y go GOBIN=$PWD/bin go install golang.org/dl/${GO_VERSION}@latest @@ -14,7 +14,7 @@ freebsd_13_task: freebsd_14_task: freebsd_instance: - image_family: freebsd-14-0 + image_family: freebsd-14-2 install_script: | pkg install -y go GOBIN=$PWD/bin go install golang.org/dl/${GO_VERSION}@latest diff --git a/vendor/github.com/tklauser/numcpus/numcpus_linux.go b/vendor/github.com/tklauser/numcpus/numcpus_linux.go index 7b991da4..d05ee982 100644 --- a/vendor/github.com/tklauser/numcpus/numcpus_linux.go +++ b/vendor/github.com/tklauser/numcpus/numcpus_linux.go @@ -47,10 +47,12 @@ func readCPURangeWith[T any](file string, f func(cpus string) (T, error)) (T, er if err != nil { return zero, err } - return f(strings.Trim(string(buf), "\n ")) + return f(string(buf)) } func countCPURange(cpus string) (int, error) { + cpus = strings.Trim(cpus, "\n ") + // Treat empty file as valid. This might be the case if there are no offline CPUs in which // case /sys/devices/system/cpu/offline is empty. if cpus == "" { @@ -58,7 +60,7 @@ func countCPURange(cpus string) (int, error) { } n := int(0) - for _, cpuRange := range strings.Split(cpus, ",") { + for cpuRange := range strings.SplitSeq(cpus, ",") { if cpuRange == "" { return 0, fmt.Errorf("empty CPU range in CPU string %q", cpus) } @@ -84,13 +86,15 @@ func countCPURange(cpus string) (int, error) { } func listCPURange(cpus string) ([]int, error) { + cpus = strings.Trim(cpus, "\n ") + // See comment in countCPURange. if cpus == "" { return []int{}, nil } list := []int{} - for _, cpuRange := range strings.Split(cpus, ",") { + for cpuRange := range strings.SplitSeq(cpus, ",") { if cpuRange == "" { return nil, fmt.Errorf("empty CPU range in CPU string %q", cpus) } diff --git a/vendor/golang.org/x/sys/cpu/cpu.go b/vendor/golang.org/x/sys/cpu/cpu.go index 63541994..34c9ae76 100644 --- a/vendor/golang.org/x/sys/cpu/cpu.go +++ b/vendor/golang.org/x/sys/cpu/cpu.go @@ -92,6 +92,9 @@ var ARM64 struct { HasSHA2 bool // SHA2 hardware implementation HasCRC32 bool // CRC32 hardware implementation HasATOMICS bool // Atomic memory operation instruction set + HasHPDS bool // Hierarchical permission disables in translations tables + HasLOR bool // Limited ordering regions + HasPAN bool // Privileged access never HasFPHP bool // Half precision floating-point instruction set HasASIMDHP bool // Advanced SIMD half precision instruction set HasCPUID bool // CPUID identification scheme registers diff --git a/vendor/golang.org/x/sys/cpu/cpu_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_arm64.go index af2aa99f..f449c679 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_arm64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_arm64.go @@ -65,10 +65,10 @@ func setMinimalFeatures() { func readARM64Registers() { Initialized = true - parseARM64SystemRegisters(getisar0(), getisar1(), getpfr0()) + parseARM64SystemRegisters(getisar0(), getisar1(), getmmfr1(), getpfr0()) } -func parseARM64SystemRegisters(isar0, isar1, pfr0 uint64) { +func parseARM64SystemRegisters(isar0, isar1, mmfr1, pfr0 uint64) { // ID_AA64ISAR0_EL1 switch extractBits(isar0, 4, 7) { case 1: @@ -152,6 +152,22 @@ func parseARM64SystemRegisters(isar0, isar1, pfr0 uint64) { ARM64.HasI8MM = true } + // ID_AA64MMFR1_EL1 + switch extractBits(mmfr1, 12, 15) { + case 1, 2: + ARM64.HasHPDS = true + } + + switch extractBits(mmfr1, 16, 19) { + case 1: + ARM64.HasLOR = true + } + + switch extractBits(mmfr1, 20, 23) { + case 1, 2, 3: + ARM64.HasPAN = true + } + // ID_AA64PFR0_EL1 switch extractBits(pfr0, 16, 19) { case 0: diff --git a/vendor/golang.org/x/sys/cpu/cpu_arm64.s b/vendor/golang.org/x/sys/cpu/cpu_arm64.s index 22cc9984..a4f24b3b 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_arm64.s +++ b/vendor/golang.org/x/sys/cpu/cpu_arm64.s @@ -9,31 +9,34 @@ // func getisar0() uint64 TEXT ·getisar0(SB),NOSPLIT,$0-8 // get Instruction Set Attributes 0 into x0 - // mrs x0, ID_AA64ISAR0_EL1 = d5380600 - WORD $0xd5380600 + MRS ID_AA64ISAR0_EL1, R0 MOVD R0, ret+0(FP) RET // func getisar1() uint64 TEXT ·getisar1(SB),NOSPLIT,$0-8 // get Instruction Set Attributes 1 into x0 - // mrs x0, ID_AA64ISAR1_EL1 = d5380620 - WORD $0xd5380620 + MRS ID_AA64ISAR1_EL1, R0 + MOVD R0, ret+0(FP) + RET + +// func getmmfr1() uint64 +TEXT ·getmmfr1(SB),NOSPLIT,$0-8 + // get Memory Model Feature Register 1 into x0 + MRS ID_AA64MMFR1_EL1, R0 MOVD R0, ret+0(FP) RET // func getpfr0() uint64 TEXT ·getpfr0(SB),NOSPLIT,$0-8 // get Processor Feature Register 0 into x0 - // mrs x0, ID_AA64PFR0_EL1 = d5380400 - WORD $0xd5380400 + MRS ID_AA64PFR0_EL1, R0 MOVD R0, ret+0(FP) RET // func getzfr0() uint64 TEXT ·getzfr0(SB),NOSPLIT,$0-8 // get SVE Feature Register 0 into x0 - // mrs x0, ID_AA64ZFR0_EL1 = d5380480 - WORD $0xd5380480 + MRS ID_AA64ZFR0_EL1, R0 MOVD R0, ret+0(FP) RET diff --git a/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go index 6ac6e1ef..e3fc5a8d 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go @@ -8,5 +8,6 @@ package cpu func getisar0() uint64 func getisar1() uint64 +func getmmfr1() uint64 func getpfr0() uint64 func getzfr0() uint64 diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go index 7f194678..8df2079e 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go @@ -8,4 +8,5 @@ package cpu func getisar0() uint64 { return 0 } func getisar1() uint64 { return 0 } +func getmmfr1() uint64 { return 0 } func getpfr0() uint64 { return 0 } diff --git a/vendor/golang.org/x/sys/cpu/cpu_netbsd_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_netbsd_arm64.go index ebfb3fc8..19aea063 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_netbsd_arm64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_netbsd_arm64.go @@ -167,7 +167,7 @@ func doinit() { setMinimalFeatures() return } - parseARM64SystemRegisters(cpuid.aa64isar0, cpuid.aa64isar1, cpuid.aa64pfr0) + parseARM64SystemRegisters(cpuid.aa64isar0, cpuid.aa64isar1, cpuid.aa64mmfr1, cpuid.aa64pfr0) Initialized = true } diff --git a/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.go index 85b64d5c..87fd3a77 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.go @@ -59,7 +59,7 @@ func doinit() { if !ok { return } - parseARM64SystemRegisters(isar0, isar1, 0) + parseARM64SystemRegisters(isar0, isar1, 0, 0) Initialized = true } diff --git a/vendor/golang.org/x/sys/plan9/pwd_go15_plan9.go b/vendor/golang.org/x/sys/plan9/pwd_go15_plan9.go deleted file mode 100644 index 73687de7..00000000 --- a/vendor/golang.org/x/sys/plan9/pwd_go15_plan9.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.5 - -package plan9 - -import "syscall" - -func fixwd() { - syscall.Fixwd() -} - -func Getwd() (wd string, err error) { - return syscall.Getwd() -} - -func Chdir(path string) error { - return syscall.Chdir(path) -} diff --git a/vendor/golang.org/x/sys/plan9/pwd_plan9.go b/vendor/golang.org/x/sys/plan9/pwd_plan9.go index fb945821..7a76489d 100644 --- a/vendor/golang.org/x/sys/plan9/pwd_plan9.go +++ b/vendor/golang.org/x/sys/plan9/pwd_plan9.go @@ -2,22 +2,18 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !go1.5 - package plan9 +import "syscall" + func fixwd() { + syscall.Fixwd() } func Getwd() (wd string, err error) { - fd, err := open(".", O_RDONLY) - if err != nil { - return "", err - } - defer Close(fd) - return Fd2path(fd) + return syscall.Getwd() } func Chdir(path string) error { - return chdir(path) + return syscall.Chdir(path) } diff --git a/vendor/golang.org/x/sys/unix/affinity_linux.go b/vendor/golang.org/x/sys/unix/affinity_linux.go index 6e5c81ac..3ea47038 100644 --- a/vendor/golang.org/x/sys/unix/affinity_linux.go +++ b/vendor/golang.org/x/sys/unix/affinity_linux.go @@ -38,8 +38,15 @@ func SchedSetaffinity(pid int, set *CPUSet) error { // Zero clears the set s, so that it contains no CPUs. func (s *CPUSet) Zero() { + clear(s[:]) +} + +// Fill adds all possible CPU bits to the set s. On Linux, [SchedSetaffinity] +// will silently ignore any invalid CPU bits in [CPUSet] so this is an +// efficient way of resetting the CPU affinity of a process. +func (s *CPUSet) Fill() { for i := range s { - s[i] = 0 + s[i] = ^cpuMask(0) } } diff --git a/vendor/golang.org/x/sys/unix/fdset.go b/vendor/golang.org/x/sys/unix/fdset.go index 9e83d18c..62ed1264 100644 --- a/vendor/golang.org/x/sys/unix/fdset.go +++ b/vendor/golang.org/x/sys/unix/fdset.go @@ -23,7 +23,5 @@ func (fds *FdSet) IsSet(fd int) bool { // Zero clears the set fds. func (fds *FdSet) Zero() { - for i := range fds.Bits { - fds.Bits[i] = 0 - } + clear(fds.Bits[:]) } diff --git a/vendor/golang.org/x/sys/unix/ifreq_linux.go b/vendor/golang.org/x/sys/unix/ifreq_linux.go index 848840ae..309f5a2b 100644 --- a/vendor/golang.org/x/sys/unix/ifreq_linux.go +++ b/vendor/golang.org/x/sys/unix/ifreq_linux.go @@ -111,9 +111,7 @@ func (ifr *Ifreq) SetUint32(v uint32) { // clear zeroes the ifreq's union field to prevent trailing garbage data from // being sent to the kernel if an ifreq is reused. func (ifr *Ifreq) clear() { - for i := range ifr.raw.Ifru { - ifr.raw.Ifru[i] = 0 - } + clear(ifr.raw.Ifru[:]) } // TODO(mdlayher): export as IfreqData? For now we can provide helpers such as diff --git a/vendor/golang.org/x/sys/unix/mkall.sh b/vendor/golang.org/x/sys/unix/mkall.sh index e6f31d37..d0ed6119 100644 --- a/vendor/golang.org/x/sys/unix/mkall.sh +++ b/vendor/golang.org/x/sys/unix/mkall.sh @@ -49,6 +49,7 @@ esac if [[ "$GOOS" = "linux" ]]; then # Use the Docker-based build system # Files generated through docker (use $cmd so you can Ctl-C the build or run) + set -e $cmd docker build --tag generate:$GOOS $GOOS $cmd docker run --interactive --tty --volume $(cd -- "$(dirname -- "$0")/.." && pwd):/build generate:$GOOS exit diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index 6ab02b6c..42517077 100644 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -226,6 +226,7 @@ struct ltchars { #include #include #include +#include #include #include #include @@ -349,6 +350,9 @@ struct ltchars { #define _HIDIOCGRAWPHYS HIDIOCGRAWPHYS(_HIDIOCGRAWPHYS_LEN) #define _HIDIOCGRAWUNIQ HIDIOCGRAWUNIQ(_HIDIOCGRAWUNIQ_LEN) +// Renamed in v6.16, commit c6d732c38f93 ("net: ethtool: remove duplicate defines for family info") +#define ETHTOOL_FAMILY_NAME ETHTOOL_GENL_NAME +#define ETHTOOL_FAMILY_VERSION ETHTOOL_GENL_VERSION ' includes_NetBSD=' @@ -526,6 +530,7 @@ ccflags="$@" $2 ~ /^O[CNPFPL][A-Z]+[^_][A-Z]+$/ || $2 ~ /^(NL|CR|TAB|BS|VT|FF)DLY$/ || $2 ~ /^(NL|CR|TAB|BS|VT|FF)[0-9]$/ || + $2 ~ /^(DT|EI|ELF|EV|NN|NT|PF|SHF|SHN|SHT|STB|STT|VER)_/ || $2 ~ /^O?XTABS$/ || $2 ~ /^TC[IO](ON|OFF)$/ || $2 ~ /^IN_/ || diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.go b/vendor/golang.org/x/sys/unix/syscall_darwin.go index 798f61ad..7838ca5d 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin.go @@ -602,14 +602,9 @@ func Connectx(fd int, srcIf uint32, srcAddr, dstAddr Sockaddr, associd SaeAssocI return } -// sys connectx(fd int, endpoints *SaEndpoints, associd SaeAssocID, flags uint32, iov []Iovec, n *uintptr, connid *SaeConnID) (err error) const minIovec = 8 func Readv(fd int, iovs [][]byte) (n int, err error) { - if !darwinKernelVersionMin(11, 0, 0) { - return 0, ENOSYS - } - iovecs := make([]Iovec, 0, minIovec) iovecs = appendBytes(iovecs, iovs) n, err = readv(fd, iovecs) @@ -618,9 +613,6 @@ func Readv(fd int, iovs [][]byte) (n int, err error) { } func Preadv(fd int, iovs [][]byte, offset int64) (n int, err error) { - if !darwinKernelVersionMin(11, 0, 0) { - return 0, ENOSYS - } iovecs := make([]Iovec, 0, minIovec) iovecs = appendBytes(iovecs, iovs) n, err = preadv(fd, iovecs, offset) @@ -629,10 +621,6 @@ func Preadv(fd int, iovs [][]byte, offset int64) (n int, err error) { } func Writev(fd int, iovs [][]byte) (n int, err error) { - if !darwinKernelVersionMin(11, 0, 0) { - return 0, ENOSYS - } - iovecs := make([]Iovec, 0, minIovec) iovecs = appendBytes(iovecs, iovs) if raceenabled { @@ -644,10 +632,6 @@ func Writev(fd int, iovs [][]byte) (n int, err error) { } func Pwritev(fd int, iovs [][]byte, offset int64) (n int, err error) { - if !darwinKernelVersionMin(11, 0, 0) { - return 0, ENOSYS - } - iovecs := make([]Iovec, 0, minIovec) iovecs = appendBytes(iovecs, iovs) if raceenabled { @@ -707,45 +691,7 @@ func readvRacedetect(iovecs []Iovec, n int, err error) { } } -func darwinMajorMinPatch() (maj, min, patch int, err error) { - var un Utsname - err = Uname(&un) - if err != nil { - return - } - - var mmp [3]int - c := 0 -Loop: - for _, b := range un.Release[:] { - switch { - case b >= '0' && b <= '9': - mmp[c] = 10*mmp[c] + int(b-'0') - case b == '.': - c++ - if c > 2 { - return 0, 0, 0, ENOTSUP - } - case b == 0: - break Loop - default: - return 0, 0, 0, ENOTSUP - } - } - if c != 2 { - return 0, 0, 0, ENOTSUP - } - return mmp[0], mmp[1], mmp[2], nil -} - -func darwinKernelVersionMin(maj, min, patch int) bool { - actualMaj, actualMin, actualPatch, err := darwinMajorMinPatch() - if err != nil { - return false - } - return actualMaj > maj || actualMaj == maj && (actualMin > min || actualMin == min && actualPatch >= patch) -} - +//sys connectx(fd int, endpoints *SaEndpoints, associd SaeAssocID, flags uint32, iov []Iovec, n *uintptr, connid *SaeConnID) (err error) //sys sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) //sys shmat(id int, addr uintptr, flag int) (ret uintptr, err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index 4958a657..06c0eea6 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -801,9 +801,7 @@ func (sa *SockaddrPPPoE) sockaddr() (unsafe.Pointer, _Socklen, error) { // one. The kernel expects SID to be in network byte order. binary.BigEndian.PutUint16(sa.raw[6:8], sa.SID) copy(sa.raw[8:14], sa.Remote) - for i := 14; i < 14+IFNAMSIZ; i++ { - sa.raw[i] = 0 - } + clear(sa.raw[14 : 14+IFNAMSIZ]) copy(sa.raw[14:], sa.Dev) return unsafe.Pointer(&sa.raw), SizeofSockaddrPPPoX, nil } @@ -2645,3 +2643,9 @@ func SchedGetAttr(pid int, flags uint) (*SchedAttr, error) { //sys Cachestat(fd uint, crange *CachestatRange, cstat *Cachestat_t, flags uint) (err error) //sys Mseal(b []byte, flags uint) (err error) + +//sys setMemPolicy(mode int, mask *CPUSet, size int) (err error) = SYS_SET_MEMPOLICY + +func SetMemPolicy(mode int, mask *CPUSet) error { + return setMemPolicy(mode, mask, _CPU_SETSIZE) +} diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd.go b/vendor/golang.org/x/sys/unix/syscall_netbsd.go index 88162099..34a46769 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd.go @@ -248,6 +248,23 @@ func Statvfs(path string, buf *Statvfs_t) (err error) { return Statvfs1(path, buf, ST_WAIT) } +func Getvfsstat(buf []Statvfs_t, flags int) (n int, err error) { + var ( + _p0 unsafe.Pointer + bufsize uintptr + ) + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + bufsize = unsafe.Sizeof(Statvfs_t{}) * uintptr(len(buf)) + } + r0, _, e1 := Syscall(SYS_GETVFSSTAT, uintptr(_p0), bufsize, uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = e1 + } + return +} + /* * Exposed directly */ diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris.go b/vendor/golang.org/x/sys/unix/syscall_solaris.go index abc39554..18a3d9bd 100644 --- a/vendor/golang.org/x/sys/unix/syscall_solaris.go +++ b/vendor/golang.org/x/sys/unix/syscall_solaris.go @@ -629,7 +629,7 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sys Kill(pid int, signum syscall.Signal) (err error) //sys Lchown(path string, uid int, gid int) (err error) //sys Link(path string, link string) (err error) -//sys Listen(s int, backlog int) (err error) = libsocket.__xnet_llisten +//sys Listen(s int, backlog int) (err error) = libsocket.__xnet_listen //sys Lstat(path string, stat *Stat_t) (err error) //sys Madvise(b []byte, advice int) (err error) //sys Mkdir(path string, mode uint32) (err error) diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go index 9e7a6c5a..d0a75da5 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -328,6 +328,8 @@ const ( AUDIT_KERNEL = 0x7d0 AUDIT_KERNEL_OTHER = 0x524 AUDIT_KERN_MODULE = 0x532 + AUDIT_LANDLOCK_ACCESS = 0x58f + AUDIT_LANDLOCK_DOMAIN = 0x590 AUDIT_LAST_FEATURE = 0x1 AUDIT_LAST_KERN_ANOM_MSG = 0x707 AUDIT_LAST_USER_MSG = 0x4af @@ -492,6 +494,7 @@ const ( BPF_F_BEFORE = 0x8 BPF_F_ID = 0x20 BPF_F_NETFILTER_IP_DEFRAG = 0x1 + BPF_F_PREORDER = 0x40 BPF_F_QUERY_EFFECTIVE = 0x1 BPF_F_REDIRECT_FLAGS = 0x19 BPF_F_REPLACE = 0x4 @@ -528,6 +531,7 @@ const ( BPF_LDX = 0x1 BPF_LEN = 0x80 BPF_LL_OFF = -0x200000 + BPF_LOAD_ACQ = 0x100 BPF_LSH = 0x60 BPF_MAJOR_VERSION = 0x1 BPF_MAXINSNS = 0x1000 @@ -555,6 +559,7 @@ const ( BPF_RET = 0x6 BPF_RSH = 0x70 BPF_ST = 0x2 + BPF_STORE_REL = 0x110 BPF_STX = 0x3 BPF_SUB = 0x10 BPF_TAG_SIZE = 0x8 @@ -844,24 +849,90 @@ const ( DM_UUID_FLAG = 0x4000 DM_UUID_LEN = 0x81 DM_VERSION = 0xc138fd00 - DM_VERSION_EXTRA = "-ioctl (2025-01-17)" + DM_VERSION_EXTRA = "-ioctl (2025-04-28)" DM_VERSION_MAJOR = 0x4 - DM_VERSION_MINOR = 0x31 + DM_VERSION_MINOR = 0x32 DM_VERSION_PATCHLEVEL = 0x0 + DT_ADDRRNGHI = 0x6ffffeff + DT_ADDRRNGLO = 0x6ffffe00 DT_BLK = 0x6 DT_CHR = 0x2 + DT_DEBUG = 0x15 DT_DIR = 0x4 + DT_ENCODING = 0x20 DT_FIFO = 0x1 + DT_FINI = 0xd + DT_FLAGS_1 = 0x6ffffffb + DT_GNU_HASH = 0x6ffffef5 + DT_HASH = 0x4 + DT_HIOS = 0x6ffff000 + DT_HIPROC = 0x7fffffff + DT_INIT = 0xc + DT_JMPREL = 0x17 DT_LNK = 0xa + DT_LOOS = 0x6000000d + DT_LOPROC = 0x70000000 + DT_NEEDED = 0x1 + DT_NULL = 0x0 + DT_PLTGOT = 0x3 + DT_PLTREL = 0x14 + DT_PLTRELSZ = 0x2 DT_REG = 0x8 + DT_REL = 0x11 + DT_RELA = 0x7 + DT_RELACOUNT = 0x6ffffff9 + DT_RELAENT = 0x9 + DT_RELASZ = 0x8 + DT_RELCOUNT = 0x6ffffffa + DT_RELENT = 0x13 + DT_RELSZ = 0x12 + DT_RPATH = 0xf DT_SOCK = 0xc + DT_SONAME = 0xe + DT_STRSZ = 0xa + DT_STRTAB = 0x5 + DT_SYMBOLIC = 0x10 + DT_SYMENT = 0xb + DT_SYMTAB = 0x6 + DT_TEXTREL = 0x16 DT_UNKNOWN = 0x0 + DT_VALRNGHI = 0x6ffffdff + DT_VALRNGLO = 0x6ffffd00 + DT_VERDEF = 0x6ffffffc + DT_VERDEFNUM = 0x6ffffffd + DT_VERNEED = 0x6ffffffe + DT_VERNEEDNUM = 0x6fffffff + DT_VERSYM = 0x6ffffff0 DT_WHT = 0xe ECHO = 0x8 ECRYPTFS_SUPER_MAGIC = 0xf15f EFD_SEMAPHORE = 0x1 EFIVARFS_MAGIC = 0xde5e81e4 EFS_SUPER_MAGIC = 0x414a53 + EI_CLASS = 0x4 + EI_DATA = 0x5 + EI_MAG0 = 0x0 + EI_MAG1 = 0x1 + EI_MAG2 = 0x2 + EI_MAG3 = 0x3 + EI_NIDENT = 0x10 + EI_OSABI = 0x7 + EI_PAD = 0x8 + EI_VERSION = 0x6 + ELFCLASS32 = 0x1 + ELFCLASS64 = 0x2 + ELFCLASSNONE = 0x0 + ELFCLASSNUM = 0x3 + ELFDATA2LSB = 0x1 + ELFDATA2MSB = 0x2 + ELFDATANONE = 0x0 + ELFMAG = "\177ELF" + ELFMAG0 = 0x7f + ELFMAG1 = 'E' + ELFMAG2 = 'L' + ELFMAG3 = 'F' + ELFOSABI_LINUX = 0x3 + ELFOSABI_NONE = 0x0 EM_386 = 0x3 EM_486 = 0x6 EM_68K = 0x4 @@ -937,9 +1008,6 @@ const ( EPOLL_CTL_MOD = 0x3 EPOLL_IOC_TYPE = 0x8a EROFS_SUPER_MAGIC_V1 = 0xe0f5e1e2 - ESP_V4_FLOW = 0xa - ESP_V6_FLOW = 0xc - ETHER_FLOW = 0x12 ETHTOOL_BUSINFO_LEN = 0x20 ETHTOOL_EROMVERS_LEN = 0x20 ETHTOOL_FAMILY_NAME = "ethtool" @@ -1150,14 +1218,24 @@ const ( ETH_P_WCCP = 0x883e ETH_P_X25 = 0x805 ETH_P_XDSA = 0xf8 + ET_CORE = 0x4 + ET_DYN = 0x3 + ET_EXEC = 0x2 + ET_HIPROC = 0xffff + ET_LOPROC = 0xff00 + ET_NONE = 0x0 + ET_REL = 0x1 EV_ABS = 0x3 EV_CNT = 0x20 + EV_CURRENT = 0x1 EV_FF = 0x15 EV_FF_STATUS = 0x17 EV_KEY = 0x1 EV_LED = 0x11 EV_MAX = 0x1f EV_MSC = 0x4 + EV_NONE = 0x0 + EV_NUM = 0x2 EV_PWR = 0x16 EV_REL = 0x2 EV_REP = 0x14 @@ -1213,6 +1291,7 @@ const ( FAN_EVENT_INFO_TYPE_DFID_NAME = 0x2 FAN_EVENT_INFO_TYPE_ERROR = 0x5 FAN_EVENT_INFO_TYPE_FID = 0x1 + FAN_EVENT_INFO_TYPE_MNT = 0x7 FAN_EVENT_INFO_TYPE_NEW_DFID_NAME = 0xc FAN_EVENT_INFO_TYPE_OLD_DFID_NAME = 0xa FAN_EVENT_INFO_TYPE_PIDFD = 0x4 @@ -1231,9 +1310,12 @@ const ( FAN_MARK_IGNORED_SURV_MODIFY = 0x40 FAN_MARK_IGNORE_SURV = 0x440 FAN_MARK_INODE = 0x0 + FAN_MARK_MNTNS = 0x110 FAN_MARK_MOUNT = 0x10 FAN_MARK_ONLYDIR = 0x8 FAN_MARK_REMOVE = 0x2 + FAN_MNT_ATTACH = 0x1000000 + FAN_MNT_DETACH = 0x2000000 FAN_MODIFY = 0x2 FAN_MOVE = 0xc0 FAN_MOVED_FROM = 0x40 @@ -1255,6 +1337,7 @@ const ( FAN_REPORT_DIR_FID = 0x400 FAN_REPORT_FD_ERROR = 0x2000 FAN_REPORT_FID = 0x200 + FAN_REPORT_MNT = 0x4000 FAN_REPORT_NAME = 0x800 FAN_REPORT_PIDFD = 0x80 FAN_REPORT_TARGET_FID = 0x1000 @@ -1274,6 +1357,7 @@ const ( FIB_RULE_PERMANENT = 0x1 FIB_RULE_UNRESOLVED = 0x4 FIDEDUPERANGE = 0xc0189436 + FSCRYPT_ADD_KEY_FLAG_HW_WRAPPED = 0x1 FSCRYPT_KEY_DESCRIPTOR_SIZE = 0x8 FSCRYPT_KEY_DESC_PREFIX = "fscrypt:" FSCRYPT_KEY_DESC_PREFIX_SIZE = 0x8 @@ -1582,7 +1666,6 @@ const ( IPV6_DONTFRAG = 0x3e IPV6_DROP_MEMBERSHIP = 0x15 IPV6_DSTOPTS = 0x3b - IPV6_FLOW = 0x11 IPV6_FREEBIND = 0x4e IPV6_HDRINCL = 0x24 IPV6_HOPLIMIT = 0x34 @@ -1633,7 +1716,6 @@ const ( IPV6_TRANSPARENT = 0x4b IPV6_UNICAST_HOPS = 0x10 IPV6_UNICAST_IF = 0x4c - IPV6_USER_FLOW = 0xe IPV6_V6ONLY = 0x1a IPV6_VERSION = 0x60 IPV6_VERSION_MASK = 0xf0 @@ -1695,7 +1777,6 @@ const ( IP_TTL = 0x2 IP_UNBLOCK_SOURCE = 0x25 IP_UNICAST_IF = 0x32 - IP_USER_FLOW = 0xd IP_XFRM_POLICY = 0x11 ISOFS_SUPER_MAGIC = 0x9660 ISTRIP = 0x20 @@ -1817,7 +1898,11 @@ const ( LANDLOCK_ACCESS_FS_WRITE_FILE = 0x2 LANDLOCK_ACCESS_NET_BIND_TCP = 0x1 LANDLOCK_ACCESS_NET_CONNECT_TCP = 0x2 + LANDLOCK_CREATE_RULESET_ERRATA = 0x2 LANDLOCK_CREATE_RULESET_VERSION = 0x1 + LANDLOCK_RESTRICT_SELF_LOG_NEW_EXEC_ON = 0x2 + LANDLOCK_RESTRICT_SELF_LOG_SAME_EXEC_OFF = 0x1 + LANDLOCK_RESTRICT_SELF_LOG_SUBDOMAINS_OFF = 0x4 LANDLOCK_SCOPE_ABSTRACT_UNIX_SOCKET = 0x1 LANDLOCK_SCOPE_SIGNAL = 0x2 LINUX_REBOOT_CMD_CAD_OFF = 0x0 @@ -2267,7 +2352,167 @@ const ( NLM_F_REPLACE = 0x100 NLM_F_REQUEST = 0x1 NLM_F_ROOT = 0x100 + NN_386_IOPERM = "LINUX" + NN_386_TLS = "LINUX" + NN_ARC_V2 = "LINUX" + NN_ARM_FPMR = "LINUX" + NN_ARM_GCS = "LINUX" + NN_ARM_HW_BREAK = "LINUX" + NN_ARM_HW_WATCH = "LINUX" + NN_ARM_PACA_KEYS = "LINUX" + NN_ARM_PACG_KEYS = "LINUX" + NN_ARM_PAC_ENABLED_KEYS = "LINUX" + NN_ARM_PAC_MASK = "LINUX" + NN_ARM_POE = "LINUX" + NN_ARM_SSVE = "LINUX" + NN_ARM_SVE = "LINUX" + NN_ARM_SYSTEM_CALL = "LINUX" + NN_ARM_TAGGED_ADDR_CTRL = "LINUX" + NN_ARM_TLS = "LINUX" + NN_ARM_VFP = "LINUX" + NN_ARM_ZA = "LINUX" + NN_ARM_ZT = "LINUX" + NN_AUXV = "CORE" + NN_FILE = "CORE" + NN_GNU_PROPERTY_TYPE_0 = "GNU" + NN_LOONGARCH_CPUCFG = "LINUX" + NN_LOONGARCH_CSR = "LINUX" + NN_LOONGARCH_HW_BREAK = "LINUX" + NN_LOONGARCH_HW_WATCH = "LINUX" + NN_LOONGARCH_LASX = "LINUX" + NN_LOONGARCH_LBT = "LINUX" + NN_LOONGARCH_LSX = "LINUX" + NN_MIPS_DSP = "LINUX" + NN_MIPS_FP_MODE = "LINUX" + NN_MIPS_MSA = "LINUX" + NN_PPC_DEXCR = "LINUX" + NN_PPC_DSCR = "LINUX" + NN_PPC_EBB = "LINUX" + NN_PPC_HASHKEYR = "LINUX" + NN_PPC_PKEY = "LINUX" + NN_PPC_PMU = "LINUX" + NN_PPC_PPR = "LINUX" + NN_PPC_SPE = "LINUX" + NN_PPC_TAR = "LINUX" + NN_PPC_TM_CDSCR = "LINUX" + NN_PPC_TM_CFPR = "LINUX" + NN_PPC_TM_CGPR = "LINUX" + NN_PPC_TM_CPPR = "LINUX" + NN_PPC_TM_CTAR = "LINUX" + NN_PPC_TM_CVMX = "LINUX" + NN_PPC_TM_CVSX = "LINUX" + NN_PPC_TM_SPR = "LINUX" + NN_PPC_VMX = "LINUX" + NN_PPC_VSX = "LINUX" + NN_PRFPREG = "CORE" + NN_PRPSINFO = "CORE" + NN_PRSTATUS = "CORE" + NN_PRXFPREG = "LINUX" + NN_RISCV_CSR = "LINUX" + NN_RISCV_TAGGED_ADDR_CTRL = "LINUX" + NN_RISCV_VECTOR = "LINUX" + NN_S390_CTRS = "LINUX" + NN_S390_GS_BC = "LINUX" + NN_S390_GS_CB = "LINUX" + NN_S390_HIGH_GPRS = "LINUX" + NN_S390_LAST_BREAK = "LINUX" + NN_S390_PREFIX = "LINUX" + NN_S390_PV_CPU_DATA = "LINUX" + NN_S390_RI_CB = "LINUX" + NN_S390_SYSTEM_CALL = "LINUX" + NN_S390_TDB = "LINUX" + NN_S390_TIMER = "LINUX" + NN_S390_TODCMP = "LINUX" + NN_S390_TODPREG = "LINUX" + NN_S390_VXRS_HIGH = "LINUX" + NN_S390_VXRS_LOW = "LINUX" + NN_SIGINFO = "CORE" + NN_TASKSTRUCT = "CORE" + NN_VMCOREDD = "LINUX" + NN_X86_SHSTK = "LINUX" + NN_X86_XSAVE_LAYOUT = "LINUX" + NN_X86_XSTATE = "LINUX" NSFS_MAGIC = 0x6e736673 + NT_386_IOPERM = 0x201 + NT_386_TLS = 0x200 + NT_ARC_V2 = 0x600 + NT_ARM_FPMR = 0x40e + NT_ARM_GCS = 0x410 + NT_ARM_HW_BREAK = 0x402 + NT_ARM_HW_WATCH = 0x403 + NT_ARM_PACA_KEYS = 0x407 + NT_ARM_PACG_KEYS = 0x408 + NT_ARM_PAC_ENABLED_KEYS = 0x40a + NT_ARM_PAC_MASK = 0x406 + NT_ARM_POE = 0x40f + NT_ARM_SSVE = 0x40b + NT_ARM_SVE = 0x405 + NT_ARM_SYSTEM_CALL = 0x404 + NT_ARM_TAGGED_ADDR_CTRL = 0x409 + NT_ARM_TLS = 0x401 + NT_ARM_VFP = 0x400 + NT_ARM_ZA = 0x40c + NT_ARM_ZT = 0x40d + NT_AUXV = 0x6 + NT_FILE = 0x46494c45 + NT_GNU_PROPERTY_TYPE_0 = 0x5 + NT_LOONGARCH_CPUCFG = 0xa00 + NT_LOONGARCH_CSR = 0xa01 + NT_LOONGARCH_HW_BREAK = 0xa05 + NT_LOONGARCH_HW_WATCH = 0xa06 + NT_LOONGARCH_LASX = 0xa03 + NT_LOONGARCH_LBT = 0xa04 + NT_LOONGARCH_LSX = 0xa02 + NT_MIPS_DSP = 0x800 + NT_MIPS_FP_MODE = 0x801 + NT_MIPS_MSA = 0x802 + NT_PPC_DEXCR = 0x111 + NT_PPC_DSCR = 0x105 + NT_PPC_EBB = 0x106 + NT_PPC_HASHKEYR = 0x112 + NT_PPC_PKEY = 0x110 + NT_PPC_PMU = 0x107 + NT_PPC_PPR = 0x104 + NT_PPC_SPE = 0x101 + NT_PPC_TAR = 0x103 + NT_PPC_TM_CDSCR = 0x10f + NT_PPC_TM_CFPR = 0x109 + NT_PPC_TM_CGPR = 0x108 + NT_PPC_TM_CPPR = 0x10e + NT_PPC_TM_CTAR = 0x10d + NT_PPC_TM_CVMX = 0x10a + NT_PPC_TM_CVSX = 0x10b + NT_PPC_TM_SPR = 0x10c + NT_PPC_VMX = 0x100 + NT_PPC_VSX = 0x102 + NT_PRFPREG = 0x2 + NT_PRPSINFO = 0x3 + NT_PRSTATUS = 0x1 + NT_PRXFPREG = 0x46e62b7f + NT_RISCV_CSR = 0x900 + NT_RISCV_TAGGED_ADDR_CTRL = 0x902 + NT_RISCV_VECTOR = 0x901 + NT_S390_CTRS = 0x304 + NT_S390_GS_BC = 0x30c + NT_S390_GS_CB = 0x30b + NT_S390_HIGH_GPRS = 0x300 + NT_S390_LAST_BREAK = 0x306 + NT_S390_PREFIX = 0x305 + NT_S390_PV_CPU_DATA = 0x30e + NT_S390_RI_CB = 0x30d + NT_S390_SYSTEM_CALL = 0x307 + NT_S390_TDB = 0x308 + NT_S390_TIMER = 0x301 + NT_S390_TODCMP = 0x302 + NT_S390_TODPREG = 0x303 + NT_S390_VXRS_HIGH = 0x30a + NT_S390_VXRS_LOW = 0x309 + NT_SIGINFO = 0x53494749 + NT_TASKSTRUCT = 0x4 + NT_VMCOREDD = 0x700 + NT_X86_SHSTK = 0x204 + NT_X86_XSAVE_LAYOUT = 0x205 + NT_X86_XSTATE = 0x202 OCFS2_SUPER_MAGIC = 0x7461636f OCRNL = 0x8 OFDEL = 0x80 @@ -2454,6 +2699,59 @@ const ( PERF_RECORD_MISC_USER = 0x2 PERF_SAMPLE_BRANCH_PLM_ALL = 0x7 PERF_SAMPLE_WEIGHT_TYPE = 0x1004000 + PF_ALG = 0x26 + PF_APPLETALK = 0x5 + PF_ASH = 0x12 + PF_ATMPVC = 0x8 + PF_ATMSVC = 0x14 + PF_AX25 = 0x3 + PF_BLUETOOTH = 0x1f + PF_BRIDGE = 0x7 + PF_CAIF = 0x25 + PF_CAN = 0x1d + PF_DECnet = 0xc + PF_ECONET = 0x13 + PF_FILE = 0x1 + PF_IB = 0x1b + PF_IEEE802154 = 0x24 + PF_INET = 0x2 + PF_INET6 = 0xa + PF_IPX = 0x4 + PF_IRDA = 0x17 + PF_ISDN = 0x22 + PF_IUCV = 0x20 + PF_KCM = 0x29 + PF_KEY = 0xf + PF_LLC = 0x1a + PF_LOCAL = 0x1 + PF_MAX = 0x2e + PF_MCTP = 0x2d + PF_MPLS = 0x1c + PF_NETBEUI = 0xd + PF_NETLINK = 0x10 + PF_NETROM = 0x6 + PF_NFC = 0x27 + PF_PACKET = 0x11 + PF_PHONET = 0x23 + PF_PPPOX = 0x18 + PF_QIPCRTR = 0x2a + PF_R = 0x4 + PF_RDS = 0x15 + PF_ROSE = 0xb + PF_ROUTE = 0x10 + PF_RXRPC = 0x21 + PF_SECURITY = 0xe + PF_SMC = 0x2b + PF_SNA = 0x16 + PF_TIPC = 0x1e + PF_UNIX = 0x1 + PF_UNSPEC = 0x0 + PF_VSOCK = 0x28 + PF_W = 0x2 + PF_WANPIPE = 0x19 + PF_X = 0x1 + PF_X25 = 0x9 + PF_XDP = 0x2c PID_FS_MAGIC = 0x50494446 PIPEFS_MAGIC = 0x50495045 PPPIOCGNPMODE = 0xc008744c @@ -2493,6 +2791,10 @@ const ( PR_FP_EXC_UND = 0x40000 PR_FP_MODE_FR = 0x1 PR_FP_MODE_FRE = 0x2 + PR_FUTEX_HASH = 0x4e + PR_FUTEX_HASH_GET_IMMUTABLE = 0x3 + PR_FUTEX_HASH_GET_SLOTS = 0x2 + PR_FUTEX_HASH_SET_SLOTS = 0x1 PR_GET_AUXV = 0x41555856 PR_GET_CHILD_SUBREAPER = 0x25 PR_GET_DUMPABLE = 0x3 @@ -2652,6 +2954,10 @@ const ( PR_TAGGED_ADDR_ENABLE = 0x1 PR_TASK_PERF_EVENTS_DISABLE = 0x1f PR_TASK_PERF_EVENTS_ENABLE = 0x20 + PR_TIMER_CREATE_RESTORE_IDS = 0x4d + PR_TIMER_CREATE_RESTORE_IDS_GET = 0x2 + PR_TIMER_CREATE_RESTORE_IDS_OFF = 0x0 + PR_TIMER_CREATE_RESTORE_IDS_ON = 0x1 PR_TIMING_STATISTICAL = 0x0 PR_TIMING_TIMESTAMP = 0x1 PR_TSC_ENABLE = 0x1 @@ -2732,6 +3038,7 @@ const ( PTRACE_SETREGSET = 0x4205 PTRACE_SETSIGINFO = 0x4203 PTRACE_SETSIGMASK = 0x420b + PTRACE_SET_SYSCALL_INFO = 0x4212 PTRACE_SET_SYSCALL_USER_DISPATCH_CONFIG = 0x4210 PTRACE_SINGLESTEP = 0x9 PTRACE_SYSCALL = 0x18 @@ -2740,6 +3047,23 @@ const ( PTRACE_SYSCALL_INFO_NONE = 0x0 PTRACE_SYSCALL_INFO_SECCOMP = 0x3 PTRACE_TRACEME = 0x0 + PT_AARCH64_MEMTAG_MTE = 0x70000002 + PT_DYNAMIC = 0x2 + PT_GNU_EH_FRAME = 0x6474e550 + PT_GNU_PROPERTY = 0x6474e553 + PT_GNU_RELRO = 0x6474e552 + PT_GNU_STACK = 0x6474e551 + PT_HIOS = 0x6fffffff + PT_HIPROC = 0x7fffffff + PT_INTERP = 0x3 + PT_LOAD = 0x1 + PT_LOOS = 0x60000000 + PT_LOPROC = 0x70000000 + PT_NOTE = 0x4 + PT_NULL = 0x0 + PT_PHDR = 0x6 + PT_SHLIB = 0x5 + PT_TLS = 0x7 P_ALL = 0x0 P_PGID = 0x2 P_PID = 0x1 @@ -2982,6 +3306,7 @@ const ( RTPROT_NTK = 0xf RTPROT_OPENR = 0x63 RTPROT_OSPF = 0xbc + RTPROT_OVN = 0x54 RTPROT_RA = 0x9 RTPROT_REDIRECT = 0x1 RTPROT_RIP = 0xbd @@ -3072,6 +3397,47 @@ const ( SEEK_MAX = 0x4 SEEK_SET = 0x0 SELINUX_MAGIC = 0xf97cff8c + SHF_ALLOC = 0x2 + SHF_EXCLUDE = 0x8000000 + SHF_EXECINSTR = 0x4 + SHF_GROUP = 0x200 + SHF_INFO_LINK = 0x40 + SHF_LINK_ORDER = 0x80 + SHF_MASKOS = 0xff00000 + SHF_MASKPROC = 0xf0000000 + SHF_MERGE = 0x10 + SHF_ORDERED = 0x4000000 + SHF_OS_NONCONFORMING = 0x100 + SHF_RELA_LIVEPATCH = 0x100000 + SHF_RO_AFTER_INIT = 0x200000 + SHF_STRINGS = 0x20 + SHF_TLS = 0x400 + SHF_WRITE = 0x1 + SHN_ABS = 0xfff1 + SHN_COMMON = 0xfff2 + SHN_HIPROC = 0xff1f + SHN_HIRESERVE = 0xffff + SHN_LIVEPATCH = 0xff20 + SHN_LOPROC = 0xff00 + SHN_LORESERVE = 0xff00 + SHN_UNDEF = 0x0 + SHT_DYNAMIC = 0x6 + SHT_DYNSYM = 0xb + SHT_HASH = 0x5 + SHT_HIPROC = 0x7fffffff + SHT_HIUSER = 0xffffffff + SHT_LOPROC = 0x70000000 + SHT_LOUSER = 0x80000000 + SHT_NOBITS = 0x8 + SHT_NOTE = 0x7 + SHT_NULL = 0x0 + SHT_NUM = 0xc + SHT_PROGBITS = 0x1 + SHT_REL = 0x9 + SHT_RELA = 0x4 + SHT_SHLIB = 0xa + SHT_STRTAB = 0x3 + SHT_SYMTAB = 0x2 SHUT_RD = 0x0 SHUT_RDWR = 0x2 SHUT_WR = 0x1 @@ -3298,6 +3664,16 @@ const ( STATX_UID = 0x8 STATX_WRITE_ATOMIC = 0x10000 STATX__RESERVED = 0x80000000 + STB_GLOBAL = 0x1 + STB_LOCAL = 0x0 + STB_WEAK = 0x2 + STT_COMMON = 0x5 + STT_FILE = 0x4 + STT_FUNC = 0x2 + STT_NOTYPE = 0x0 + STT_OBJECT = 0x1 + STT_SECTION = 0x3 + STT_TLS = 0x6 SYNC_FILE_RANGE_WAIT_AFTER = 0x4 SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 SYNC_FILE_RANGE_WRITE = 0x2 @@ -3336,7 +3712,7 @@ const ( TASKSTATS_GENL_NAME = "TASKSTATS" TASKSTATS_GENL_VERSION = 0x1 TASKSTATS_TYPE_MAX = 0x6 - TASKSTATS_VERSION = 0xf + TASKSTATS_VERSION = 0x10 TCIFLUSH = 0x0 TCIOFF = 0x2 TCIOFLUSH = 0x2 @@ -3406,8 +3782,6 @@ const ( TCP_TX_DELAY = 0x25 TCP_ULP = 0x1f TCP_USER_TIMEOUT = 0x12 - TCP_V4_FLOW = 0x1 - TCP_V6_FLOW = 0x5 TCP_WINDOW_CLAMP = 0xa TCP_ZEROCOPY_RECEIVE = 0x23 TFD_TIMER_ABSTIME = 0x1 @@ -3530,14 +3904,14 @@ const ( UDP_NO_CHECK6_RX = 0x66 UDP_NO_CHECK6_TX = 0x65 UDP_SEGMENT = 0x67 - UDP_V4_FLOW = 0x2 - UDP_V6_FLOW = 0x6 UMOUNT_NOFOLLOW = 0x8 USBDEVICE_SUPER_MAGIC = 0x9fa2 UTIME_NOW = 0x3fffffff UTIME_OMIT = 0x3ffffffe V9FS_MAGIC = 0x1021997 VERASE = 0x2 + VER_FLG_BASE = 0x1 + VER_FLG_WEAK = 0x2 VINTR = 0x0 VKILL = 0x3 VLNEXT = 0xf @@ -3574,7 +3948,7 @@ const ( WDIOS_TEMPPANIC = 0x4 WDIOS_UNKNOWN = -0x1 WEXITED = 0x4 - WGALLOWEDIP_A_MAX = 0x3 + WGALLOWEDIP_A_MAX = 0x4 WGDEVICE_A_MAX = 0x8 WGPEER_A_MAX = 0xa WG_CMD_MAX = 0x1 @@ -3688,6 +4062,7 @@ const ( XDP_SHARED_UMEM = 0x1 XDP_STATISTICS = 0x7 XDP_TXMD_FLAGS_CHECKSUM = 0x2 + XDP_TXMD_FLAGS_LAUNCH_TIME = 0x4 XDP_TXMD_FLAGS_TIMESTAMP = 0x1 XDP_TX_METADATA = 0x2 XDP_TX_RING = 0x3 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index a8c421e2..1c37f9fb 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0xfd12 ECCGETLAYOUT = 0x81484d11 ECCGETSTATS = 0x80104d12 ECHOCTL = 0x200 @@ -360,6 +361,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index 9a88d181..6f54d34a 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0xfd12 ECCGETLAYOUT = 0x81484d11 ECCGETSTATS = 0x80104d12 ECHOCTL = 0x200 @@ -361,6 +362,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index 7cb6a867..783ec5c1 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0xfd12 ECCGETLAYOUT = 0x81484d11 ECCGETSTATS = 0x80104d12 ECHOCTL = 0x200 @@ -366,6 +367,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index d0ecd2c5..ca83d3ba 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0xfd12 ECCGETLAYOUT = 0x81484d11 ECCGETSTATS = 0x80104d12 ECHOCTL = 0x200 @@ -359,6 +360,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go index 7a2940ae..607e611c 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0xfd12 ECCGETLAYOUT = 0x81484d11 ECCGETSTATS = 0x80104d12 ECHOCTL = 0x200 @@ -353,6 +354,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index d14ca8f2..b9cb5bd3 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0x2000fd12 ECCGETLAYOUT = 0x41484d11 ECCGETSTATS = 0x40104d12 ECHOCTL = 0x200 @@ -359,6 +360,7 @@ const ( SO_OOBINLINE = 0x100 SO_PASSCRED = 0x11 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x12 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index 2da1bac1..65b078a6 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0x2000fd12 ECCGETLAYOUT = 0x41484d11 ECCGETSTATS = 0x40104d12 ECHOCTL = 0x200 @@ -359,6 +360,7 @@ const ( SO_OOBINLINE = 0x100 SO_PASSCRED = 0x11 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x12 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index 28727514..5298a303 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0x2000fd12 ECCGETLAYOUT = 0x41484d11 ECCGETSTATS = 0x40104d12 ECHOCTL = 0x200 @@ -359,6 +360,7 @@ const ( SO_OOBINLINE = 0x100 SO_PASSCRED = 0x11 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x12 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index 7f287b54..7bc557c8 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0x2000fd12 ECCGETLAYOUT = 0x41484d11 ECCGETSTATS = 0x40104d12 ECHOCTL = 0x200 @@ -359,6 +360,7 @@ const ( SO_OOBINLINE = 0x100 SO_PASSCRED = 0x11 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x12 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go index 7e5f9e6a..152399bb 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go @@ -68,6 +68,7 @@ const ( CS8 = 0x300 CSIZE = 0x300 CSTOPB = 0x400 + DM_MPATH_PROBE_PATHS = 0x2000fd12 ECCGETLAYOUT = 0x41484d11 ECCGETSTATS = 0x40104d12 ECHOCTL = 0x40 @@ -414,6 +415,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x14 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x15 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index 37c87952..1a1ce240 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -68,6 +68,7 @@ const ( CS8 = 0x300 CSIZE = 0x300 CSTOPB = 0x400 + DM_MPATH_PROBE_PATHS = 0x2000fd12 ECCGETLAYOUT = 0x41484d11 ECCGETSTATS = 0x40104d12 ECHOCTL = 0x40 @@ -418,6 +419,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x14 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x15 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index 52201336..4231a1fb 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -68,6 +68,7 @@ const ( CS8 = 0x300 CSIZE = 0x300 CSTOPB = 0x400 + DM_MPATH_PROBE_PATHS = 0x2000fd12 ECCGETLAYOUT = 0x41484d11 ECCGETSTATS = 0x40104d12 ECHOCTL = 0x40 @@ -418,6 +419,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x14 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x15 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index 4bfe2b5b..21c0e952 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0xfd12 ECCGETLAYOUT = 0x81484d11 ECCGETSTATS = 0x80104d12 ECHOCTL = 0x200 @@ -350,6 +351,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index e3cffb86..f00d1cd7 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0xfd12 ECCGETLAYOUT = 0x81484d11 ECCGETSTATS = 0x80104d12 ECHOCTL = 0x200 @@ -422,6 +423,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index c219c8db..bc8d539e 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -71,6 +71,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0x2000fd12 ECCGETLAYOUT = 0x41484d11 ECCGETSTATS = 0x40104d12 ECHOCTL = 0x200 @@ -461,6 +462,7 @@ const ( SO_OOBINLINE = 0x100 SO_PASSCRED = 0x2 SO_PASSPIDFD = 0x55 + SO_PASSRIGHTS = 0x5c SO_PASSSEC = 0x1f SO_PEEK_OFF = 0x26 SO_PEERCRED = 0x40 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux.go b/vendor/golang.org/x/sys/unix/zsyscall_linux.go index 5cc1e8eb..8935d10a 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux.go @@ -2238,3 +2238,13 @@ func Mseal(b []byte, flags uint) (err error) { } return } + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setMemPolicy(mode int, mask *CPUSet, size int) (err error) { + _, _, e1 := Syscall(SYS_SET_MEMPOLICY, uintptr(mode), uintptr(unsafe.Pointer(mask)), uintptr(size)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go index c6545413..b4609c20 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go @@ -72,7 +72,7 @@ import ( //go:cgo_import_dynamic libc_kill kill "libc.so" //go:cgo_import_dynamic libc_lchown lchown "libc.so" //go:cgo_import_dynamic libc_link link "libc.so" -//go:cgo_import_dynamic libc___xnet_llisten __xnet_llisten "libsocket.so" +//go:cgo_import_dynamic libc___xnet_listen __xnet_listen "libsocket.so" //go:cgo_import_dynamic libc_lstat lstat "libc.so" //go:cgo_import_dynamic libc_madvise madvise "libc.so" //go:cgo_import_dynamic libc_mkdir mkdir "libc.so" @@ -221,7 +221,7 @@ import ( //go:linkname procKill libc_kill //go:linkname procLchown libc_lchown //go:linkname procLink libc_link -//go:linkname proc__xnet_llisten libc___xnet_llisten +//go:linkname proc__xnet_listen libc___xnet_listen //go:linkname procLstat libc_lstat //go:linkname procMadvise libc_madvise //go:linkname procMkdir libc_mkdir @@ -371,7 +371,7 @@ var ( procKill, procLchown, procLink, - proc__xnet_llisten, + proc__xnet_listen, procLstat, procMadvise, procMkdir, @@ -1178,7 +1178,7 @@ func Link(path string, link string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Listen(s int, backlog int) (err error) { - _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&proc__xnet_llisten)), 2, uintptr(s), uintptr(backlog), 0, 0, 0, 0) + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&proc__xnet_listen)), 2, uintptr(s), uintptr(backlog), 0, 0, 0, 0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go index c79aaff3..aca56ee4 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go @@ -462,4 +462,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go index 5eb45069..2ea1ef58 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go @@ -385,4 +385,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go index 05e50297..d22c8af3 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go @@ -426,4 +426,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go index 38c53ec5..5ee264ae 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go @@ -329,4 +329,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go index 31d2e71a..f9f03ebf 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go @@ -325,4 +325,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go index f4184a33..87c2118e 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go @@ -446,4 +446,5 @@ const ( SYS_GETXATTRAT = 4464 SYS_LISTXATTRAT = 4465 SYS_REMOVEXATTRAT = 4466 + SYS_OPEN_TREE_ATTR = 4467 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go index 05b99622..391ad102 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go @@ -376,4 +376,5 @@ const ( SYS_GETXATTRAT = 5464 SYS_LISTXATTRAT = 5465 SYS_REMOVEXATTRAT = 5466 + SYS_OPEN_TREE_ATTR = 5467 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go index 43a256e9..56561577 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go @@ -376,4 +376,5 @@ const ( SYS_GETXATTRAT = 5464 SYS_LISTXATTRAT = 5465 SYS_REMOVEXATTRAT = 5466 + SYS_OPEN_TREE_ATTR = 5467 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go index eea5ddfc..0482b52e 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go @@ -446,4 +446,5 @@ const ( SYS_GETXATTRAT = 4464 SYS_LISTXATTRAT = 4465 SYS_REMOVEXATTRAT = 4466 + SYS_OPEN_TREE_ATTR = 4467 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go index 0d777bfb..71806f08 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go @@ -453,4 +453,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go index b4463650..e35a7105 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go @@ -425,4 +425,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go index 0c7d21c1..2aea4767 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go @@ -425,4 +425,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go index 84053916..6c9bb4e5 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go @@ -330,4 +330,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go index fcf1b790..680bc991 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go @@ -391,4 +391,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go index 52d15b5f..620f2710 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go @@ -404,4 +404,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index 8bcac283..c1a46701 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -115,7 +115,9 @@ type Statx_t struct { Atomic_write_unit_max uint32 Atomic_write_segments_max uint32 Dio_read_offset_align uint32 - _ [9]uint64 + Atomic_write_unit_max_opt uint32 + _ [1]uint32 + _ [8]uint64 } type Fsid struct { @@ -199,7 +201,8 @@ type FscryptAddKeyArg struct { Key_spec FscryptKeySpecifier Raw_size uint32 Key_id uint32 - _ [8]uint32 + Flags uint32 + _ [7]uint32 } type FscryptRemoveKeyArg struct { @@ -629,6 +632,8 @@ const ( IFA_FLAGS = 0x8 IFA_RT_PRIORITY = 0x9 IFA_TARGET_NETNSID = 0xa + IFAL_LABEL = 0x2 + IFAL_ADDRESS = 0x1 RT_SCOPE_UNIVERSE = 0x0 RT_SCOPE_SITE = 0xc8 RT_SCOPE_LINK = 0xfd @@ -686,6 +691,7 @@ const ( SizeofRtAttr = 0x4 SizeofIfInfomsg = 0x10 SizeofIfAddrmsg = 0x8 + SizeofIfAddrlblmsg = 0xc SizeofIfaCacheinfo = 0x10 SizeofRtMsg = 0xc SizeofRtNexthop = 0x8 @@ -737,6 +743,15 @@ type IfAddrmsg struct { Index uint32 } +type IfAddrlblmsg struct { + Family uint8 + _ uint8 + Prefixlen uint8 + Flags uint8 + Index uint32 + Seq uint32 +} + type IfaCacheinfo struct { Prefered uint32 Valid uint32 @@ -2317,6 +2332,11 @@ const ( NFT_CT_AVGPKT = 0x10 NFT_CT_ZONE = 0x11 NFT_CT_EVENTMASK = 0x12 + NFT_CT_SRC_IP = 0x13 + NFT_CT_DST_IP = 0x14 + NFT_CT_SRC_IP6 = 0x15 + NFT_CT_DST_IP6 = 0x16 + NFT_CT_ID = 0x17 NFTA_CT_UNSPEC = 0x0 NFTA_CT_DREG = 0x1 NFTA_CT_KEY = 0x2 @@ -2597,8 +2617,8 @@ const ( SOF_TIMESTAMPING_BIND_PHC = 0x8000 SOF_TIMESTAMPING_OPT_ID_TCP = 0x10000 - SOF_TIMESTAMPING_LAST = 0x20000 - SOF_TIMESTAMPING_MASK = 0x3ffff + SOF_TIMESTAMPING_LAST = 0x40000 + SOF_TIMESTAMPING_MASK = 0x7ffff SCM_TSTAMP_SND = 0x0 SCM_TSTAMP_SCHED = 0x1 @@ -3044,6 +3064,23 @@ const ( ) const ( + TCA_UNSPEC = 0x0 + TCA_KIND = 0x1 + TCA_OPTIONS = 0x2 + TCA_STATS = 0x3 + TCA_XSTATS = 0x4 + TCA_RATE = 0x5 + TCA_FCNT = 0x6 + TCA_STATS2 = 0x7 + TCA_STAB = 0x8 + TCA_PAD = 0x9 + TCA_DUMP_INVISIBLE = 0xa + TCA_CHAIN = 0xb + TCA_HW_OFFLOAD = 0xc + TCA_INGRESS_BLOCK = 0xd + TCA_EGRESS_BLOCK = 0xe + TCA_DUMP_FLAGS = 0xf + TCA_EXT_WARN_MSG = 0x10 RTNLGRP_NONE = 0x0 RTNLGRP_LINK = 0x1 RTNLGRP_NOTIFY = 0x2 @@ -3078,6 +3115,18 @@ const ( RTNLGRP_IPV6_MROUTE_R = 0x1f RTNLGRP_NEXTHOP = 0x20 RTNLGRP_BRVLAN = 0x21 + RTNLGRP_MCTP_IFADDR = 0x22 + RTNLGRP_TUNNEL = 0x23 + RTNLGRP_STATS = 0x24 + RTNLGRP_IPV4_MCADDR = 0x25 + RTNLGRP_IPV6_MCADDR = 0x26 + RTNLGRP_IPV6_ACADDR = 0x27 + TCA_ROOT_UNSPEC = 0x0 + TCA_ROOT_TAB = 0x1 + TCA_ROOT_FLAGS = 0x2 + TCA_ROOT_COUNT = 0x3 + TCA_ROOT_TIME_DELTA = 0x4 + TCA_ROOT_EXT_WARN_MSG = 0x5 ) type CapUserHeader struct { @@ -3541,6 +3590,8 @@ type Nhmsg struct { Flags uint32 } +const SizeofNhmsg = 0x8 + type NexthopGrp struct { Id uint32 Weight uint8 @@ -3548,6 +3599,8 @@ type NexthopGrp struct { Resvd2 uint16 } +const SizeofNexthopGrp = 0x8 + const ( NHA_UNSPEC = 0x0 NHA_ID = 0x1 @@ -4044,7 +4097,7 @@ const ( ETHTOOL_A_TSINFO_PHC_INDEX = 0x5 ETHTOOL_A_TSINFO_STATS = 0x6 ETHTOOL_A_TSINFO_HWTSTAMP_PROVIDER = 0x7 - ETHTOOL_A_TSINFO_MAX = 0x7 + ETHTOOL_A_TSINFO_MAX = 0x9 ETHTOOL_A_CABLE_TEST_UNSPEC = 0x0 ETHTOOL_A_CABLE_TEST_HEADER = 0x1 ETHTOOL_A_CABLE_TEST_MAX = 0x1 @@ -4130,6 +4183,19 @@ const ( ETHTOOL_A_TUNNEL_INFO_MAX = 0x2 ) +const ( + TCP_V4_FLOW = 0x1 + UDP_V4_FLOW = 0x2 + TCP_V6_FLOW = 0x5 + UDP_V6_FLOW = 0x6 + ESP_V4_FLOW = 0xa + ESP_V6_FLOW = 0xc + IP_USER_FLOW = 0xd + IPV6_USER_FLOW = 0xe + IPV6_FLOW = 0x11 + ETHER_FLOW = 0x12 +) + const SPEED_UNKNOWN = -0x1 type EthtoolDrvinfo struct { @@ -4780,7 +4846,7 @@ const ( NL80211_ATTR_MAC_HINT = 0xc8 NL80211_ATTR_MAC_MASK = 0xd7 NL80211_ATTR_MAX_AP_ASSOC_STA = 0xca - NL80211_ATTR_MAX = 0x150 + NL80211_ATTR_MAX = 0x151 NL80211_ATTR_MAX_CRIT_PROT_DURATION = 0xb4 NL80211_ATTR_MAX_CSA_COUNTERS = 0xce NL80211_ATTR_MAX_HW_TIMESTAMP_PEERS = 0x143 @@ -5414,7 +5480,7 @@ const ( NL80211_FREQUENCY_ATTR_GO_CONCURRENT = 0xf NL80211_FREQUENCY_ATTR_INDOOR_ONLY = 0xe NL80211_FREQUENCY_ATTR_IR_CONCURRENT = 0xf - NL80211_FREQUENCY_ATTR_MAX = 0x21 + NL80211_FREQUENCY_ATTR_MAX = 0x22 NL80211_FREQUENCY_ATTR_MAX_TX_POWER = 0x6 NL80211_FREQUENCY_ATTR_NO_10MHZ = 0x11 NL80211_FREQUENCY_ATTR_NO_160MHZ = 0xc @@ -5530,7 +5596,7 @@ const ( NL80211_MAX_SUPP_SELECTORS = 0x80 NL80211_MBSSID_CONFIG_ATTR_EMA = 0x5 NL80211_MBSSID_CONFIG_ATTR_INDEX = 0x3 - NL80211_MBSSID_CONFIG_ATTR_MAX = 0x5 + NL80211_MBSSID_CONFIG_ATTR_MAX = 0x6 NL80211_MBSSID_CONFIG_ATTR_MAX_EMA_PROFILE_PERIODICITY = 0x2 NL80211_MBSSID_CONFIG_ATTR_MAX_INTERFACES = 0x1 NL80211_MBSSID_CONFIG_ATTR_TX_IFINDEX = 0x4 @@ -6270,3 +6336,30 @@ type SockDiagReq struct { } const RTM_NEWNVLAN = 0x70 + +const ( + MPOL_BIND = 0x2 + MPOL_DEFAULT = 0x0 + MPOL_F_ADDR = 0x2 + MPOL_F_MEMS_ALLOWED = 0x4 + MPOL_F_MOF = 0x8 + MPOL_F_MORON = 0x10 + MPOL_F_NODE = 0x1 + MPOL_F_NUMA_BALANCING = 0x2000 + MPOL_F_RELATIVE_NODES = 0x4000 + MPOL_F_SHARED = 0x1 + MPOL_F_STATIC_NODES = 0x8000 + MPOL_INTERLEAVE = 0x3 + MPOL_LOCAL = 0x4 + MPOL_MAX = 0x7 + MPOL_MF_INTERNAL = 0x10 + MPOL_MF_LAZY = 0x8 + MPOL_MF_MOVE_ALL = 0x4 + MPOL_MF_MOVE = 0x2 + MPOL_MF_STRICT = 0x1 + MPOL_MF_VALID = 0x7 + MPOL_MODE_FLAGS = 0xe000 + MPOL_PREFERRED = 0x1 + MPOL_PREFERRED_MANY = 0x5 + MPOL_WEIGHTED_INTERLEAVE = 0x6 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go index 62db85f6..485f2d3a 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go @@ -282,19 +282,13 @@ type Taskstats struct { Ac_exitcode uint32 Ac_flag uint8 Ac_nice uint8 - _ [4]byte + _ [6]byte Cpu_count uint64 Cpu_delay_total uint64 - Cpu_delay_max uint64 - Cpu_delay_min uint64 Blkio_count uint64 Blkio_delay_total uint64 - Blkio_delay_max uint64 - Blkio_delay_min uint64 Swapin_count uint64 Swapin_delay_total uint64 - Swapin_delay_max uint64 - Swapin_delay_min uint64 Cpu_run_real_total uint64 Cpu_run_virtual_total uint64 Ac_comm [32]int8 @@ -330,17 +324,11 @@ type Taskstats struct { Cpu_scaled_run_real_total uint64 Freepages_count uint64 Freepages_delay_total uint64 - Freepages_delay_max uint64 - Freepages_delay_min uint64 Thrashing_count uint64 Thrashing_delay_total uint64 - Thrashing_delay_max uint64 - Thrashing_delay_min uint64 Ac_btime64 uint64 Compact_count uint64 Compact_delay_total uint64 - Compact_delay_max uint64 - Compact_delay_min uint64 Ac_tgid uint32 _ [4]byte Ac_tgetime uint64 @@ -348,10 +336,22 @@ type Taskstats struct { Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 - Wpcopy_delay_max uint64 - Wpcopy_delay_min uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 Irq_delay_max uint64 Irq_delay_min uint64 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go index 7d89d648..ecbd1ad8 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go @@ -300,16 +300,10 @@ type Taskstats struct { Ac_nice uint8 Cpu_count uint64 Cpu_delay_total uint64 - Cpu_delay_max uint64 - Cpu_delay_min uint64 Blkio_count uint64 Blkio_delay_total uint64 - Blkio_delay_max uint64 - Blkio_delay_min uint64 Swapin_count uint64 Swapin_delay_total uint64 - Swapin_delay_max uint64 - Swapin_delay_min uint64 Cpu_run_real_total uint64 Cpu_run_virtual_total uint64 Ac_comm [32]int8 @@ -344,27 +338,33 @@ type Taskstats struct { Cpu_scaled_run_real_total uint64 Freepages_count uint64 Freepages_delay_total uint64 - Freepages_delay_max uint64 - Freepages_delay_min uint64 Thrashing_count uint64 Thrashing_delay_total uint64 - Thrashing_delay_max uint64 - Thrashing_delay_min uint64 Ac_btime64 uint64 Compact_count uint64 Compact_delay_total uint64 - Compact_delay_max uint64 - Compact_delay_min uint64 Ac_tgid uint32 Ac_tgetime uint64 Ac_exe_dev uint64 Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 - Wpcopy_delay_max uint64 - Wpcopy_delay_min uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 Irq_delay_max uint64 Irq_delay_min uint64 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go index 9c0b39ee..02f0463a 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go @@ -91,7 +91,7 @@ type Stat_t struct { Gid uint32 Rdev uint64 _ uint16 - _ [4]byte + _ [6]byte Size int64 Blksize int32 _ [4]byte @@ -273,19 +273,13 @@ type Taskstats struct { Ac_exitcode uint32 Ac_flag uint8 Ac_nice uint8 - _ [4]byte + _ [6]byte Cpu_count uint64 Cpu_delay_total uint64 - Cpu_delay_max uint64 - Cpu_delay_min uint64 Blkio_count uint64 Blkio_delay_total uint64 - Blkio_delay_max uint64 - Blkio_delay_min uint64 Swapin_count uint64 Swapin_delay_total uint64 - Swapin_delay_max uint64 - Swapin_delay_min uint64 Cpu_run_real_total uint64 Cpu_run_virtual_total uint64 Ac_comm [32]uint8 @@ -321,17 +315,11 @@ type Taskstats struct { Cpu_scaled_run_real_total uint64 Freepages_count uint64 Freepages_delay_total uint64 - Freepages_delay_max uint64 - Freepages_delay_min uint64 Thrashing_count uint64 Thrashing_delay_total uint64 - Thrashing_delay_max uint64 - Thrashing_delay_min uint64 Ac_btime64 uint64 Compact_count uint64 Compact_delay_total uint64 - Compact_delay_max uint64 - Compact_delay_min uint64 Ac_tgid uint32 _ [4]byte Ac_tgetime uint64 @@ -339,10 +327,22 @@ type Taskstats struct { Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 - Wpcopy_delay_max uint64 - Wpcopy_delay_min uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 Irq_delay_max uint64 Irq_delay_min uint64 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go index de9c7ff3..6f4d400d 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go @@ -279,16 +279,10 @@ type Taskstats struct { Ac_nice uint8 Cpu_count uint64 Cpu_delay_total uint64 - Cpu_delay_max uint64 - Cpu_delay_min uint64 Blkio_count uint64 Blkio_delay_total uint64 - Blkio_delay_max uint64 - Blkio_delay_min uint64 Swapin_count uint64 Swapin_delay_total uint64 - Swapin_delay_max uint64 - Swapin_delay_min uint64 Cpu_run_real_total uint64 Cpu_run_virtual_total uint64 Ac_comm [32]int8 @@ -323,27 +317,33 @@ type Taskstats struct { Cpu_scaled_run_real_total uint64 Freepages_count uint64 Freepages_delay_total uint64 - Freepages_delay_max uint64 - Freepages_delay_min uint64 Thrashing_count uint64 Thrashing_delay_total uint64 - Thrashing_delay_max uint64 - Thrashing_delay_min uint64 Ac_btime64 uint64 Compact_count uint64 Compact_delay_total uint64 - Compact_delay_max uint64 - Compact_delay_min uint64 Ac_tgid uint32 Ac_tgetime uint64 Ac_exe_dev uint64 Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 - Wpcopy_delay_max uint64 - Wpcopy_delay_min uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 Irq_delay_max uint64 Irq_delay_min uint64 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go index 2336bd2b..cd532cfa 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go @@ -280,16 +280,10 @@ type Taskstats struct { Ac_nice uint8 Cpu_count uint64 Cpu_delay_total uint64 - Cpu_delay_max uint64 - Cpu_delay_min uint64 Blkio_count uint64 Blkio_delay_total uint64 - Blkio_delay_max uint64 - Blkio_delay_min uint64 Swapin_count uint64 Swapin_delay_total uint64 - Swapin_delay_max uint64 - Swapin_delay_min uint64 Cpu_run_real_total uint64 Cpu_run_virtual_total uint64 Ac_comm [32]int8 @@ -324,27 +318,33 @@ type Taskstats struct { Cpu_scaled_run_real_total uint64 Freepages_count uint64 Freepages_delay_total uint64 - Freepages_delay_max uint64 - Freepages_delay_min uint64 Thrashing_count uint64 Thrashing_delay_total uint64 - Thrashing_delay_max uint64 - Thrashing_delay_min uint64 Ac_btime64 uint64 Compact_count uint64 Compact_delay_total uint64 - Compact_delay_max uint64 - Compact_delay_min uint64 Ac_tgid uint32 Ac_tgetime uint64 Ac_exe_dev uint64 Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 - Wpcopy_delay_max uint64 - Wpcopy_delay_min uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 Irq_delay_max uint64 Irq_delay_min uint64 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go index 4711f0be..41336208 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go @@ -278,19 +278,13 @@ type Taskstats struct { Ac_exitcode uint32 Ac_flag uint8 Ac_nice uint8 - _ [4]byte + _ [6]byte Cpu_count uint64 Cpu_delay_total uint64 - Cpu_delay_max uint64 - Cpu_delay_min uint64 Blkio_count uint64 Blkio_delay_total uint64 - Blkio_delay_max uint64 - Blkio_delay_min uint64 Swapin_count uint64 Swapin_delay_total uint64 - Swapin_delay_max uint64 - Swapin_delay_min uint64 Cpu_run_real_total uint64 Cpu_run_virtual_total uint64 Ac_comm [32]int8 @@ -326,17 +320,11 @@ type Taskstats struct { Cpu_scaled_run_real_total uint64 Freepages_count uint64 Freepages_delay_total uint64 - Freepages_delay_max uint64 - Freepages_delay_min uint64 Thrashing_count uint64 Thrashing_delay_total uint64 - Thrashing_delay_max uint64 - Thrashing_delay_min uint64 Ac_btime64 uint64 Compact_count uint64 Compact_delay_total uint64 - Compact_delay_max uint64 - Compact_delay_min uint64 Ac_tgid uint32 _ [4]byte Ac_tgetime uint64 @@ -344,10 +332,22 @@ type Taskstats struct { Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 - Wpcopy_delay_max uint64 - Wpcopy_delay_min uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 Irq_delay_max uint64 Irq_delay_min uint64 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go index ab99a34b..eaa37eb7 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go @@ -282,16 +282,10 @@ type Taskstats struct { Ac_nice uint8 Cpu_count uint64 Cpu_delay_total uint64 - Cpu_delay_max uint64 - Cpu_delay_min uint64 Blkio_count uint64 Blkio_delay_total uint64 - Blkio_delay_max uint64 - Blkio_delay_min uint64 Swapin_count uint64 Swapin_delay_total uint64 - Swapin_delay_max uint64 - Swapin_delay_min uint64 Cpu_run_real_total uint64 Cpu_run_virtual_total uint64 Ac_comm [32]int8 @@ -326,27 +320,33 @@ type Taskstats struct { Cpu_scaled_run_real_total uint64 Freepages_count uint64 Freepages_delay_total uint64 - Freepages_delay_max uint64 - Freepages_delay_min uint64 Thrashing_count uint64 Thrashing_delay_total uint64 - Thrashing_delay_max uint64 - Thrashing_delay_min uint64 Ac_btime64 uint64 Compact_count uint64 Compact_delay_total uint64 - Compact_delay_max uint64 - Compact_delay_min uint64 Ac_tgid uint32 Ac_tgetime uint64 Ac_exe_dev uint64 Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 - Wpcopy_delay_max uint64 - Wpcopy_delay_min uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 Irq_delay_max uint64 Irq_delay_min uint64 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go index 04c9866e..98ae6a1e 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go @@ -282,16 +282,10 @@ type Taskstats struct { Ac_nice uint8 Cpu_count uint64 Cpu_delay_total uint64 - Cpu_delay_max uint64 - Cpu_delay_min uint64 Blkio_count uint64 Blkio_delay_total uint64 - Blkio_delay_max uint64 - Blkio_delay_min uint64 Swapin_count uint64 Swapin_delay_total uint64 - Swapin_delay_max uint64 - Swapin_delay_min uint64 Cpu_run_real_total uint64 Cpu_run_virtual_total uint64 Ac_comm [32]int8 @@ -326,27 +320,33 @@ type Taskstats struct { Cpu_scaled_run_real_total uint64 Freepages_count uint64 Freepages_delay_total uint64 - Freepages_delay_max uint64 - Freepages_delay_min uint64 Thrashing_count uint64 Thrashing_delay_total uint64 - Thrashing_delay_max uint64 - Thrashing_delay_min uint64 Ac_btime64 uint64 Compact_count uint64 Compact_delay_total uint64 - Compact_delay_max uint64 - Compact_delay_min uint64 Ac_tgid uint32 Ac_tgetime uint64 Ac_exe_dev uint64 Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 - Wpcopy_delay_max uint64 - Wpcopy_delay_min uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 Irq_delay_max uint64 Irq_delay_min uint64 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go index 60aa69f6..cae19615 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go @@ -278,19 +278,13 @@ type Taskstats struct { Ac_exitcode uint32 Ac_flag uint8 Ac_nice uint8 - _ [4]byte + _ [6]byte Cpu_count uint64 Cpu_delay_total uint64 - Cpu_delay_max uint64 - Cpu_delay_min uint64 Blkio_count uint64 Blkio_delay_total uint64 - Blkio_delay_max uint64 - Blkio_delay_min uint64 Swapin_count uint64 Swapin_delay_total uint64 - Swapin_delay_max uint64 - Swapin_delay_min uint64 Cpu_run_real_total uint64 Cpu_run_virtual_total uint64 Ac_comm [32]int8 @@ -326,17 +320,11 @@ type Taskstats struct { Cpu_scaled_run_real_total uint64 Freepages_count uint64 Freepages_delay_total uint64 - Freepages_delay_max uint64 - Freepages_delay_min uint64 Thrashing_count uint64 Thrashing_delay_total uint64 - Thrashing_delay_max uint64 - Thrashing_delay_min uint64 Ac_btime64 uint64 Compact_count uint64 Compact_delay_total uint64 - Compact_delay_max uint64 - Compact_delay_min uint64 Ac_tgid uint32 _ [4]byte Ac_tgetime uint64 @@ -344,10 +332,22 @@ type Taskstats struct { Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 - Wpcopy_delay_max uint64 - Wpcopy_delay_min uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 Irq_delay_max uint64 Irq_delay_min uint64 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go index cb4fad78..6ce3b4e0 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go @@ -90,7 +90,7 @@ type Stat_t struct { Gid uint32 Rdev uint64 _ uint16 - _ [4]byte + _ [6]byte Size int64 Blksize int32 _ [4]byte @@ -285,19 +285,13 @@ type Taskstats struct { Ac_exitcode uint32 Ac_flag uint8 Ac_nice uint8 - _ [4]byte + _ [6]byte Cpu_count uint64 Cpu_delay_total uint64 - Cpu_delay_max uint64 - Cpu_delay_min uint64 Blkio_count uint64 Blkio_delay_total uint64 - Blkio_delay_max uint64 - Blkio_delay_min uint64 Swapin_count uint64 Swapin_delay_total uint64 - Swapin_delay_max uint64 - Swapin_delay_min uint64 Cpu_run_real_total uint64 Cpu_run_virtual_total uint64 Ac_comm [32]uint8 @@ -333,17 +327,11 @@ type Taskstats struct { Cpu_scaled_run_real_total uint64 Freepages_count uint64 Freepages_delay_total uint64 - Freepages_delay_max uint64 - Freepages_delay_min uint64 Thrashing_count uint64 Thrashing_delay_total uint64 - Thrashing_delay_max uint64 - Thrashing_delay_min uint64 Ac_btime64 uint64 Compact_count uint64 Compact_delay_total uint64 - Compact_delay_max uint64 - Compact_delay_min uint64 Ac_tgid uint32 _ [4]byte Ac_tgetime uint64 @@ -351,10 +339,22 @@ type Taskstats struct { Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 - Wpcopy_delay_max uint64 - Wpcopy_delay_min uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 Irq_delay_max uint64 Irq_delay_min uint64 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go index 60272cfc..c7429c6a 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go @@ -289,16 +289,10 @@ type Taskstats struct { Ac_nice uint8 Cpu_count uint64 Cpu_delay_total uint64 - Cpu_delay_max uint64 - Cpu_delay_min uint64 Blkio_count uint64 Blkio_delay_total uint64 - Blkio_delay_max uint64 - Blkio_delay_min uint64 Swapin_count uint64 Swapin_delay_total uint64 - Swapin_delay_max uint64 - Swapin_delay_min uint64 Cpu_run_real_total uint64 Cpu_run_virtual_total uint64 Ac_comm [32]uint8 @@ -333,27 +327,33 @@ type Taskstats struct { Cpu_scaled_run_real_total uint64 Freepages_count uint64 Freepages_delay_total uint64 - Freepages_delay_max uint64 - Freepages_delay_min uint64 Thrashing_count uint64 Thrashing_delay_total uint64 - Thrashing_delay_max uint64 - Thrashing_delay_min uint64 Ac_btime64 uint64 Compact_count uint64 Compact_delay_total uint64 - Compact_delay_max uint64 - Compact_delay_min uint64 Ac_tgid uint32 Ac_tgetime uint64 Ac_exe_dev uint64 Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 - Wpcopy_delay_max uint64 - Wpcopy_delay_min uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 Irq_delay_max uint64 Irq_delay_min uint64 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go index 3f5b91bc..4bf4baf4 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go @@ -289,16 +289,10 @@ type Taskstats struct { Ac_nice uint8 Cpu_count uint64 Cpu_delay_total uint64 - Cpu_delay_max uint64 - Cpu_delay_min uint64 Blkio_count uint64 Blkio_delay_total uint64 - Blkio_delay_max uint64 - Blkio_delay_min uint64 Swapin_count uint64 Swapin_delay_total uint64 - Swapin_delay_max uint64 - Swapin_delay_min uint64 Cpu_run_real_total uint64 Cpu_run_virtual_total uint64 Ac_comm [32]uint8 @@ -333,27 +327,33 @@ type Taskstats struct { Cpu_scaled_run_real_total uint64 Freepages_count uint64 Freepages_delay_total uint64 - Freepages_delay_max uint64 - Freepages_delay_min uint64 Thrashing_count uint64 Thrashing_delay_total uint64 - Thrashing_delay_max uint64 - Thrashing_delay_min uint64 Ac_btime64 uint64 Compact_count uint64 Compact_delay_total uint64 - Compact_delay_max uint64 - Compact_delay_min uint64 Ac_tgid uint32 Ac_tgetime uint64 Ac_exe_dev uint64 Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 - Wpcopy_delay_max uint64 - Wpcopy_delay_min uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 Irq_delay_max uint64 Irq_delay_min uint64 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go index 51550f15..e9709d70 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go @@ -307,16 +307,10 @@ type Taskstats struct { Ac_nice uint8 Cpu_count uint64 Cpu_delay_total uint64 - Cpu_delay_max uint64 - Cpu_delay_min uint64 Blkio_count uint64 Blkio_delay_total uint64 - Blkio_delay_max uint64 - Blkio_delay_min uint64 Swapin_count uint64 Swapin_delay_total uint64 - Swapin_delay_max uint64 - Swapin_delay_min uint64 Cpu_run_real_total uint64 Cpu_run_virtual_total uint64 Ac_comm [32]uint8 @@ -351,27 +345,33 @@ type Taskstats struct { Cpu_scaled_run_real_total uint64 Freepages_count uint64 Freepages_delay_total uint64 - Freepages_delay_max uint64 - Freepages_delay_min uint64 Thrashing_count uint64 Thrashing_delay_total uint64 - Thrashing_delay_max uint64 - Thrashing_delay_min uint64 Ac_btime64 uint64 Compact_count uint64 Compact_delay_total uint64 - Compact_delay_max uint64 - Compact_delay_min uint64 Ac_tgid uint32 Ac_tgetime uint64 Ac_exe_dev uint64 Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 - Wpcopy_delay_max uint64 - Wpcopy_delay_min uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 Irq_delay_max uint64 Irq_delay_min uint64 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go index 3239e50e..fb44268c 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go @@ -302,16 +302,10 @@ type Taskstats struct { Ac_nice uint8 Cpu_count uint64 Cpu_delay_total uint64 - Cpu_delay_max uint64 - Cpu_delay_min uint64 Blkio_count uint64 Blkio_delay_total uint64 - Blkio_delay_max uint64 - Blkio_delay_min uint64 Swapin_count uint64 Swapin_delay_total uint64 - Swapin_delay_max uint64 - Swapin_delay_min uint64 Cpu_run_real_total uint64 Cpu_run_virtual_total uint64 Ac_comm [32]int8 @@ -346,27 +340,33 @@ type Taskstats struct { Cpu_scaled_run_real_total uint64 Freepages_count uint64 Freepages_delay_total uint64 - Freepages_delay_max uint64 - Freepages_delay_min uint64 Thrashing_count uint64 Thrashing_delay_total uint64 - Thrashing_delay_max uint64 - Thrashing_delay_min uint64 Ac_btime64 uint64 Compact_count uint64 Compact_delay_total uint64 - Compact_delay_max uint64 - Compact_delay_min uint64 Ac_tgid uint32 Ac_tgetime uint64 Ac_exe_dev uint64 Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 - Wpcopy_delay_max uint64 - Wpcopy_delay_min uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 Irq_delay_max uint64 Irq_delay_min uint64 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go index faf20027..9c38265c 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go @@ -284,16 +284,10 @@ type Taskstats struct { Ac_nice uint8 Cpu_count uint64 Cpu_delay_total uint64 - Cpu_delay_max uint64 - Cpu_delay_min uint64 Blkio_count uint64 Blkio_delay_total uint64 - Blkio_delay_max uint64 - Blkio_delay_min uint64 Swapin_count uint64 Swapin_delay_total uint64 - Swapin_delay_max uint64 - Swapin_delay_min uint64 Cpu_run_real_total uint64 Cpu_run_virtual_total uint64 Ac_comm [32]int8 @@ -328,27 +322,33 @@ type Taskstats struct { Cpu_scaled_run_real_total uint64 Freepages_count uint64 Freepages_delay_total uint64 - Freepages_delay_max uint64 - Freepages_delay_min uint64 Thrashing_count uint64 Thrashing_delay_total uint64 - Thrashing_delay_max uint64 - Thrashing_delay_min uint64 Ac_btime64 uint64 Compact_count uint64 Compact_delay_total uint64 - Compact_delay_max uint64 - Compact_delay_min uint64 Ac_tgid uint32 Ac_tgetime uint64 Ac_exe_dev uint64 Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 - Wpcopy_delay_max uint64 - Wpcopy_delay_min uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 Irq_delay_max uint64 Irq_delay_min uint64 } diff --git a/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go index fc1835d8..bc1ce436 100644 --- a/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go @@ -52,7 +52,7 @@ var ( ) func regConnectRegistry(machinename *uint16, key syscall.Handle, result *syscall.Handle) (regerrno error) { - r0, _, _ := syscall.Syscall(procRegConnectRegistryW.Addr(), 3, uintptr(unsafe.Pointer(machinename)), uintptr(key), uintptr(unsafe.Pointer(result))) + r0, _, _ := syscall.SyscallN(procRegConnectRegistryW.Addr(), uintptr(unsafe.Pointer(machinename)), uintptr(key), uintptr(unsafe.Pointer(result))) if r0 != 0 { regerrno = syscall.Errno(r0) } @@ -60,7 +60,7 @@ func regConnectRegistry(machinename *uint16, key syscall.Handle, result *syscall } func regCreateKeyEx(key syscall.Handle, subkey *uint16, reserved uint32, class *uint16, options uint32, desired uint32, sa *syscall.SecurityAttributes, result *syscall.Handle, disposition *uint32) (regerrno error) { - r0, _, _ := syscall.Syscall9(procRegCreateKeyExW.Addr(), 9, uintptr(key), uintptr(unsafe.Pointer(subkey)), uintptr(reserved), uintptr(unsafe.Pointer(class)), uintptr(options), uintptr(desired), uintptr(unsafe.Pointer(sa)), uintptr(unsafe.Pointer(result)), uintptr(unsafe.Pointer(disposition))) + r0, _, _ := syscall.SyscallN(procRegCreateKeyExW.Addr(), uintptr(key), uintptr(unsafe.Pointer(subkey)), uintptr(reserved), uintptr(unsafe.Pointer(class)), uintptr(options), uintptr(desired), uintptr(unsafe.Pointer(sa)), uintptr(unsafe.Pointer(result)), uintptr(unsafe.Pointer(disposition))) if r0 != 0 { regerrno = syscall.Errno(r0) } @@ -68,7 +68,7 @@ func regCreateKeyEx(key syscall.Handle, subkey *uint16, reserved uint32, class * } func regDeleteKey(key syscall.Handle, subkey *uint16) (regerrno error) { - r0, _, _ := syscall.Syscall(procRegDeleteKeyW.Addr(), 2, uintptr(key), uintptr(unsafe.Pointer(subkey)), 0) + r0, _, _ := syscall.SyscallN(procRegDeleteKeyW.Addr(), uintptr(key), uintptr(unsafe.Pointer(subkey))) if r0 != 0 { regerrno = syscall.Errno(r0) } @@ -76,7 +76,7 @@ func regDeleteKey(key syscall.Handle, subkey *uint16) (regerrno error) { } func regDeleteValue(key syscall.Handle, name *uint16) (regerrno error) { - r0, _, _ := syscall.Syscall(procRegDeleteValueW.Addr(), 2, uintptr(key), uintptr(unsafe.Pointer(name)), 0) + r0, _, _ := syscall.SyscallN(procRegDeleteValueW.Addr(), uintptr(key), uintptr(unsafe.Pointer(name))) if r0 != 0 { regerrno = syscall.Errno(r0) } @@ -84,7 +84,7 @@ func regDeleteValue(key syscall.Handle, name *uint16) (regerrno error) { } func regEnumValue(key syscall.Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) { - r0, _, _ := syscall.Syscall9(procRegEnumValueW.Addr(), 8, uintptr(key), uintptr(index), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(valtype)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(buflen)), 0) + r0, _, _ := syscall.SyscallN(procRegEnumValueW.Addr(), uintptr(key), uintptr(index), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(valtype)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(buflen))) if r0 != 0 { regerrno = syscall.Errno(r0) } @@ -92,7 +92,7 @@ func regEnumValue(key syscall.Handle, index uint32, name *uint16, nameLen *uint3 } func regLoadMUIString(key syscall.Handle, name *uint16, buf *uint16, buflen uint32, buflenCopied *uint32, flags uint32, dir *uint16) (regerrno error) { - r0, _, _ := syscall.Syscall9(procRegLoadMUIStringW.Addr(), 7, uintptr(key), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buf)), uintptr(buflen), uintptr(unsafe.Pointer(buflenCopied)), uintptr(flags), uintptr(unsafe.Pointer(dir)), 0, 0) + r0, _, _ := syscall.SyscallN(procRegLoadMUIStringW.Addr(), uintptr(key), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buf)), uintptr(buflen), uintptr(unsafe.Pointer(buflenCopied)), uintptr(flags), uintptr(unsafe.Pointer(dir))) if r0 != 0 { regerrno = syscall.Errno(r0) } @@ -100,7 +100,7 @@ func regLoadMUIString(key syscall.Handle, name *uint16, buf *uint16, buflen uint } func regSetValueEx(key syscall.Handle, valueName *uint16, reserved uint32, vtype uint32, buf *byte, bufsize uint32) (regerrno error) { - r0, _, _ := syscall.Syscall6(procRegSetValueExW.Addr(), 6, uintptr(key), uintptr(unsafe.Pointer(valueName)), uintptr(reserved), uintptr(vtype), uintptr(unsafe.Pointer(buf)), uintptr(bufsize)) + r0, _, _ := syscall.SyscallN(procRegSetValueExW.Addr(), uintptr(key), uintptr(unsafe.Pointer(valueName)), uintptr(reserved), uintptr(vtype), uintptr(unsafe.Pointer(buf)), uintptr(bufsize)) if r0 != 0 { regerrno = syscall.Errno(r0) } @@ -108,7 +108,7 @@ func regSetValueEx(key syscall.Handle, valueName *uint16, reserved uint32, vtype } func expandEnvironmentStrings(src *uint16, dst *uint16, size uint32) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procExpandEnvironmentStringsW.Addr(), 3, uintptr(unsafe.Pointer(src)), uintptr(unsafe.Pointer(dst)), uintptr(size)) + r0, _, e1 := syscall.SyscallN(procExpandEnvironmentStringsW.Addr(), uintptr(unsafe.Pointer(src)), uintptr(unsafe.Pointer(dst)), uintptr(size)) n = uint32(r0) if n == 0 { err = errnoErr(e1) diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go index 640f6b15..69439df2 100644 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -321,6 +321,8 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys SetConsoleOutputCP(cp uint32) (err error) = kernel32.SetConsoleOutputCP //sys WriteConsole(console Handle, buf *uint16, towrite uint32, written *uint32, reserved *byte) (err error) = kernel32.WriteConsoleW //sys ReadConsole(console Handle, buf *uint16, toread uint32, read *uint32, inputControl *byte) (err error) = kernel32.ReadConsoleW +//sys GetNumberOfConsoleInputEvents(console Handle, numevents *uint32) (err error) = kernel32.GetNumberOfConsoleInputEvents +//sys FlushConsoleInputBuffer(console Handle) (err error) = kernel32.FlushConsoleInputBuffer //sys resizePseudoConsole(pconsole Handle, size uint32) (hr error) = kernel32.ResizePseudoConsole //sys CreateToolhelp32Snapshot(flags uint32, processId uint32) (handle Handle, err error) [failretval==InvalidHandle] = kernel32.CreateToolhelp32Snapshot //sys Module32First(snapshot Handle, moduleEntry *ModuleEntry32) (err error) = kernel32.Module32FirstW @@ -890,8 +892,12 @@ const socket_error = uintptr(^uint32(0)) //sys MultiByteToWideChar(codePage uint32, dwFlags uint32, str *byte, nstr int32, wchar *uint16, nwchar int32) (nwrite int32, err error) = kernel32.MultiByteToWideChar //sys getBestInterfaceEx(sockaddr unsafe.Pointer, pdwBestIfIndex *uint32) (errcode error) = iphlpapi.GetBestInterfaceEx //sys GetIfEntry2Ex(level uint32, row *MibIfRow2) (errcode error) = iphlpapi.GetIfEntry2Ex +//sys GetIpForwardEntry2(row *MibIpForwardRow2) (errcode error) = iphlpapi.GetIpForwardEntry2 +//sys GetIpForwardTable2(family uint16, table **MibIpForwardTable2) (errcode error) = iphlpapi.GetIpForwardTable2 //sys GetUnicastIpAddressEntry(row *MibUnicastIpAddressRow) (errcode error) = iphlpapi.GetUnicastIpAddressEntry +//sys FreeMibTable(memory unsafe.Pointer) = iphlpapi.FreeMibTable //sys NotifyIpInterfaceChange(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) = iphlpapi.NotifyIpInterfaceChange +//sys NotifyRouteChange2(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) = iphlpapi.NotifyRouteChange2 //sys NotifyUnicastIpAddressChange(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) = iphlpapi.NotifyUnicastIpAddressChange //sys CancelMibChangeNotify2(notificationHandle Handle) (errcode error) = iphlpapi.CancelMibChangeNotify2 @@ -914,6 +920,17 @@ type RawSockaddrInet6 struct { Scope_id uint32 } +// RawSockaddrInet is a union that contains an IPv4, an IPv6 address, or an address family. See +// https://learn.microsoft.com/en-us/windows/win32/api/ws2ipdef/ns-ws2ipdef-sockaddr_inet. +// +// A [*RawSockaddrInet] may be converted to a [*RawSockaddrInet4] or [*RawSockaddrInet6] using +// unsafe, depending on the address family. +type RawSockaddrInet struct { + Family uint16 + Port uint16 + Data [6]uint32 +} + type RawSockaddr struct { Family uint16 Data [14]int8 diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go index 958bcf47..6e4f50eb 100644 --- a/vendor/golang.org/x/sys/windows/types_windows.go +++ b/vendor/golang.org/x/sys/windows/types_windows.go @@ -65,6 +65,22 @@ var signals = [...]string{ 15: "terminated", } +// File flags for [os.OpenFile]. The O_ prefix is used to indicate +// that these flags are specific to the OpenFile function. +const ( + O_FILE_FLAG_OPEN_NO_RECALL = FILE_FLAG_OPEN_NO_RECALL + O_FILE_FLAG_OPEN_REPARSE_POINT = FILE_FLAG_OPEN_REPARSE_POINT + O_FILE_FLAG_SESSION_AWARE = FILE_FLAG_SESSION_AWARE + O_FILE_FLAG_POSIX_SEMANTICS = FILE_FLAG_POSIX_SEMANTICS + O_FILE_FLAG_BACKUP_SEMANTICS = FILE_FLAG_BACKUP_SEMANTICS + O_FILE_FLAG_DELETE_ON_CLOSE = FILE_FLAG_DELETE_ON_CLOSE + O_FILE_FLAG_SEQUENTIAL_SCAN = FILE_FLAG_SEQUENTIAL_SCAN + O_FILE_FLAG_RANDOM_ACCESS = FILE_FLAG_RANDOM_ACCESS + O_FILE_FLAG_NO_BUFFERING = FILE_FLAG_NO_BUFFERING + O_FILE_FLAG_OVERLAPPED = FILE_FLAG_OVERLAPPED + O_FILE_FLAG_WRITE_THROUGH = FILE_FLAG_WRITE_THROUGH +) + const ( FILE_READ_DATA = 0x00000001 FILE_READ_ATTRIBUTES = 0x00000080 @@ -1976,6 +1992,12 @@ const ( SYMBOLIC_LINK_FLAG_DIRECTORY = 0x1 ) +// FILE_ZERO_DATA_INFORMATION from winioctl.h +type FileZeroDataInformation struct { + FileOffset int64 + BeyondFinalZero int64 +} + const ( ComputerNameNetBIOS = 0 ComputerNameDnsHostname = 1 @@ -2298,6 +2320,82 @@ type MibIfRow2 struct { OutQLen uint64 } +// IP_ADDRESS_PREFIX stores an IP address prefix. See +// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ns-netioapi-ip_address_prefix. +type IpAddressPrefix struct { + Prefix RawSockaddrInet + PrefixLength uint8 +} + +// NL_ROUTE_ORIGIN enumeration from nldef.h or +// https://learn.microsoft.com/en-us/windows/win32/api/nldef/ne-nldef-nl_route_origin. +const ( + NlroManual = 0 + NlroWellKnown = 1 + NlroDHCP = 2 + NlroRouterAdvertisement = 3 + Nlro6to4 = 4 +) + +// NL_ROUTE_ORIGIN enumeration from nldef.h or +// https://learn.microsoft.com/en-us/windows/win32/api/nldef/ne-nldef-nl_route_protocol. +const ( + MIB_IPPROTO_OTHER = 1 + MIB_IPPROTO_LOCAL = 2 + MIB_IPPROTO_NETMGMT = 3 + MIB_IPPROTO_ICMP = 4 + MIB_IPPROTO_EGP = 5 + MIB_IPPROTO_GGP = 6 + MIB_IPPROTO_HELLO = 7 + MIB_IPPROTO_RIP = 8 + MIB_IPPROTO_IS_IS = 9 + MIB_IPPROTO_ES_IS = 10 + MIB_IPPROTO_CISCO = 11 + MIB_IPPROTO_BBN = 12 + MIB_IPPROTO_OSPF = 13 + MIB_IPPROTO_BGP = 14 + MIB_IPPROTO_IDPR = 15 + MIB_IPPROTO_EIGRP = 16 + MIB_IPPROTO_DVMRP = 17 + MIB_IPPROTO_RPL = 18 + MIB_IPPROTO_DHCP = 19 + MIB_IPPROTO_NT_AUTOSTATIC = 10002 + MIB_IPPROTO_NT_STATIC = 10006 + MIB_IPPROTO_NT_STATIC_NON_DOD = 10007 +) + +// MIB_IPFORWARD_ROW2 stores information about an IP route entry. See +// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ns-netioapi-mib_ipforward_row2. +type MibIpForwardRow2 struct { + InterfaceLuid uint64 + InterfaceIndex uint32 + DestinationPrefix IpAddressPrefix + NextHop RawSockaddrInet + SitePrefixLength uint8 + ValidLifetime uint32 + PreferredLifetime uint32 + Metric uint32 + Protocol uint32 + Loopback uint8 + AutoconfigureAddress uint8 + Publish uint8 + Immortal uint8 + Age uint32 + Origin uint32 +} + +// MIB_IPFORWARD_TABLE2 contains a table of IP route entries. See +// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ns-netioapi-mib_ipforward_table2. +type MibIpForwardTable2 struct { + NumEntries uint32 + Table [1]MibIpForwardRow2 +} + +// Rows returns the IP route entries in the table. +func (t *MibIpForwardTable2) Rows() []MibIpForwardRow2 { + return unsafe.Slice(&t.Table[0], t.NumEntries) +} + // MIB_UNICASTIPADDRESS_ROW stores information about a unicast IP address. See // https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ns-netioapi-mib_unicastipaddress_row. type MibUnicastIpAddressRow struct { diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go index a58bc48b..f25b7308 100644 --- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -182,13 +182,17 @@ var ( procDwmGetWindowAttribute = moddwmapi.NewProc("DwmGetWindowAttribute") procDwmSetWindowAttribute = moddwmapi.NewProc("DwmSetWindowAttribute") procCancelMibChangeNotify2 = modiphlpapi.NewProc("CancelMibChangeNotify2") + procFreeMibTable = modiphlpapi.NewProc("FreeMibTable") procGetAdaptersAddresses = modiphlpapi.NewProc("GetAdaptersAddresses") procGetAdaptersInfo = modiphlpapi.NewProc("GetAdaptersInfo") procGetBestInterfaceEx = modiphlpapi.NewProc("GetBestInterfaceEx") procGetIfEntry = modiphlpapi.NewProc("GetIfEntry") procGetIfEntry2Ex = modiphlpapi.NewProc("GetIfEntry2Ex") + procGetIpForwardEntry2 = modiphlpapi.NewProc("GetIpForwardEntry2") + procGetIpForwardTable2 = modiphlpapi.NewProc("GetIpForwardTable2") procGetUnicastIpAddressEntry = modiphlpapi.NewProc("GetUnicastIpAddressEntry") procNotifyIpInterfaceChange = modiphlpapi.NewProc("NotifyIpInterfaceChange") + procNotifyRouteChange2 = modiphlpapi.NewProc("NotifyRouteChange2") procNotifyUnicastIpAddressChange = modiphlpapi.NewProc("NotifyUnicastIpAddressChange") procAddDllDirectory = modkernel32.NewProc("AddDllDirectory") procAssignProcessToJobObject = modkernel32.NewProc("AssignProcessToJobObject") @@ -238,6 +242,7 @@ var ( procFindResourceW = modkernel32.NewProc("FindResourceW") procFindVolumeClose = modkernel32.NewProc("FindVolumeClose") procFindVolumeMountPointClose = modkernel32.NewProc("FindVolumeMountPointClose") + procFlushConsoleInputBuffer = modkernel32.NewProc("FlushConsoleInputBuffer") procFlushFileBuffers = modkernel32.NewProc("FlushFileBuffers") procFlushViewOfFile = modkernel32.NewProc("FlushViewOfFile") procFormatMessageW = modkernel32.NewProc("FormatMessageW") @@ -284,6 +289,7 @@ var ( procGetNamedPipeHandleStateW = modkernel32.NewProc("GetNamedPipeHandleStateW") procGetNamedPipeInfo = modkernel32.NewProc("GetNamedPipeInfo") procGetNamedPipeServerProcessId = modkernel32.NewProc("GetNamedPipeServerProcessId") + procGetNumberOfConsoleInputEvents = modkernel32.NewProc("GetNumberOfConsoleInputEvents") procGetOverlappedResult = modkernel32.NewProc("GetOverlappedResult") procGetPriorityClass = modkernel32.NewProc("GetPriorityClass") procGetProcAddress = modkernel32.NewProc("GetProcAddress") @@ -546,25 +552,25 @@ var ( ) func cm_Get_DevNode_Status(status *uint32, problemNumber *uint32, devInst DEVINST, flags uint32) (ret CONFIGRET) { - r0, _, _ := syscall.Syscall6(procCM_Get_DevNode_Status.Addr(), 4, uintptr(unsafe.Pointer(status)), uintptr(unsafe.Pointer(problemNumber)), uintptr(devInst), uintptr(flags), 0, 0) + r0, _, _ := syscall.SyscallN(procCM_Get_DevNode_Status.Addr(), uintptr(unsafe.Pointer(status)), uintptr(unsafe.Pointer(problemNumber)), uintptr(devInst), uintptr(flags)) ret = CONFIGRET(r0) return } func cm_Get_Device_Interface_List(interfaceClass *GUID, deviceID *uint16, buffer *uint16, bufferLen uint32, flags uint32) (ret CONFIGRET) { - r0, _, _ := syscall.Syscall6(procCM_Get_Device_Interface_ListW.Addr(), 5, uintptr(unsafe.Pointer(interfaceClass)), uintptr(unsafe.Pointer(deviceID)), uintptr(unsafe.Pointer(buffer)), uintptr(bufferLen), uintptr(flags), 0) + r0, _, _ := syscall.SyscallN(procCM_Get_Device_Interface_ListW.Addr(), uintptr(unsafe.Pointer(interfaceClass)), uintptr(unsafe.Pointer(deviceID)), uintptr(unsafe.Pointer(buffer)), uintptr(bufferLen), uintptr(flags)) ret = CONFIGRET(r0) return } func cm_Get_Device_Interface_List_Size(len *uint32, interfaceClass *GUID, deviceID *uint16, flags uint32) (ret CONFIGRET) { - r0, _, _ := syscall.Syscall6(procCM_Get_Device_Interface_List_SizeW.Addr(), 4, uintptr(unsafe.Pointer(len)), uintptr(unsafe.Pointer(interfaceClass)), uintptr(unsafe.Pointer(deviceID)), uintptr(flags), 0, 0) + r0, _, _ := syscall.SyscallN(procCM_Get_Device_Interface_List_SizeW.Addr(), uintptr(unsafe.Pointer(len)), uintptr(unsafe.Pointer(interfaceClass)), uintptr(unsafe.Pointer(deviceID)), uintptr(flags)) ret = CONFIGRET(r0) return } func cm_MapCrToWin32Err(configRet CONFIGRET, defaultWin32Error Errno) (ret Errno) { - r0, _, _ := syscall.Syscall(procCM_MapCrToWin32Err.Addr(), 2, uintptr(configRet), uintptr(defaultWin32Error), 0) + r0, _, _ := syscall.SyscallN(procCM_MapCrToWin32Err.Addr(), uintptr(configRet), uintptr(defaultWin32Error)) ret = Errno(r0) return } @@ -574,7 +580,7 @@ func AdjustTokenGroups(token Token, resetToDefault bool, newstate *Tokengroups, if resetToDefault { _p0 = 1 } - r1, _, e1 := syscall.Syscall6(procAdjustTokenGroups.Addr(), 6, uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(newstate)), uintptr(buflen), uintptr(unsafe.Pointer(prevstate)), uintptr(unsafe.Pointer(returnlen))) + r1, _, e1 := syscall.SyscallN(procAdjustTokenGroups.Addr(), uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(newstate)), uintptr(buflen), uintptr(unsafe.Pointer(prevstate)), uintptr(unsafe.Pointer(returnlen))) if r1 == 0 { err = errnoErr(e1) } @@ -586,7 +592,7 @@ func AdjustTokenPrivileges(token Token, disableAllPrivileges bool, newstate *Tok if disableAllPrivileges { _p0 = 1 } - r1, _, e1 := syscall.Syscall6(procAdjustTokenPrivileges.Addr(), 6, uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(newstate)), uintptr(buflen), uintptr(unsafe.Pointer(prevstate)), uintptr(unsafe.Pointer(returnlen))) + r1, _, e1 := syscall.SyscallN(procAdjustTokenPrivileges.Addr(), uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(newstate)), uintptr(buflen), uintptr(unsafe.Pointer(prevstate)), uintptr(unsafe.Pointer(returnlen))) if r1 == 0 { err = errnoErr(e1) } @@ -594,7 +600,7 @@ func AdjustTokenPrivileges(token Token, disableAllPrivileges bool, newstate *Tok } func AllocateAndInitializeSid(identAuth *SidIdentifierAuthority, subAuth byte, subAuth0 uint32, subAuth1 uint32, subAuth2 uint32, subAuth3 uint32, subAuth4 uint32, subAuth5 uint32, subAuth6 uint32, subAuth7 uint32, sid **SID) (err error) { - r1, _, e1 := syscall.Syscall12(procAllocateAndInitializeSid.Addr(), 11, uintptr(unsafe.Pointer(identAuth)), uintptr(subAuth), uintptr(subAuth0), uintptr(subAuth1), uintptr(subAuth2), uintptr(subAuth3), uintptr(subAuth4), uintptr(subAuth5), uintptr(subAuth6), uintptr(subAuth7), uintptr(unsafe.Pointer(sid)), 0) + r1, _, e1 := syscall.SyscallN(procAllocateAndInitializeSid.Addr(), uintptr(unsafe.Pointer(identAuth)), uintptr(subAuth), uintptr(subAuth0), uintptr(subAuth1), uintptr(subAuth2), uintptr(subAuth3), uintptr(subAuth4), uintptr(subAuth5), uintptr(subAuth6), uintptr(subAuth7), uintptr(unsafe.Pointer(sid))) if r1 == 0 { err = errnoErr(e1) } @@ -602,7 +608,7 @@ func AllocateAndInitializeSid(identAuth *SidIdentifierAuthority, subAuth byte, s } func buildSecurityDescriptor(owner *TRUSTEE, group *TRUSTEE, countAccessEntries uint32, accessEntries *EXPLICIT_ACCESS, countAuditEntries uint32, auditEntries *EXPLICIT_ACCESS, oldSecurityDescriptor *SECURITY_DESCRIPTOR, sizeNewSecurityDescriptor *uint32, newSecurityDescriptor **SECURITY_DESCRIPTOR) (ret error) { - r0, _, _ := syscall.Syscall9(procBuildSecurityDescriptorW.Addr(), 9, uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(countAccessEntries), uintptr(unsafe.Pointer(accessEntries)), uintptr(countAuditEntries), uintptr(unsafe.Pointer(auditEntries)), uintptr(unsafe.Pointer(oldSecurityDescriptor)), uintptr(unsafe.Pointer(sizeNewSecurityDescriptor)), uintptr(unsafe.Pointer(newSecurityDescriptor))) + r0, _, _ := syscall.SyscallN(procBuildSecurityDescriptorW.Addr(), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(countAccessEntries), uintptr(unsafe.Pointer(accessEntries)), uintptr(countAuditEntries), uintptr(unsafe.Pointer(auditEntries)), uintptr(unsafe.Pointer(oldSecurityDescriptor)), uintptr(unsafe.Pointer(sizeNewSecurityDescriptor)), uintptr(unsafe.Pointer(newSecurityDescriptor))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -610,7 +616,7 @@ func buildSecurityDescriptor(owner *TRUSTEE, group *TRUSTEE, countAccessEntries } func ChangeServiceConfig2(service Handle, infoLevel uint32, info *byte) (err error) { - r1, _, e1 := syscall.Syscall(procChangeServiceConfig2W.Addr(), 3, uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(info))) + r1, _, e1 := syscall.SyscallN(procChangeServiceConfig2W.Addr(), uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(info))) if r1 == 0 { err = errnoErr(e1) } @@ -618,7 +624,7 @@ func ChangeServiceConfig2(service Handle, infoLevel uint32, info *byte) (err err } func ChangeServiceConfig(service Handle, serviceType uint32, startType uint32, errorControl uint32, binaryPathName *uint16, loadOrderGroup *uint16, tagId *uint32, dependencies *uint16, serviceStartName *uint16, password *uint16, displayName *uint16) (err error) { - r1, _, e1 := syscall.Syscall12(procChangeServiceConfigW.Addr(), 11, uintptr(service), uintptr(serviceType), uintptr(startType), uintptr(errorControl), uintptr(unsafe.Pointer(binaryPathName)), uintptr(unsafe.Pointer(loadOrderGroup)), uintptr(unsafe.Pointer(tagId)), uintptr(unsafe.Pointer(dependencies)), uintptr(unsafe.Pointer(serviceStartName)), uintptr(unsafe.Pointer(password)), uintptr(unsafe.Pointer(displayName)), 0) + r1, _, e1 := syscall.SyscallN(procChangeServiceConfigW.Addr(), uintptr(service), uintptr(serviceType), uintptr(startType), uintptr(errorControl), uintptr(unsafe.Pointer(binaryPathName)), uintptr(unsafe.Pointer(loadOrderGroup)), uintptr(unsafe.Pointer(tagId)), uintptr(unsafe.Pointer(dependencies)), uintptr(unsafe.Pointer(serviceStartName)), uintptr(unsafe.Pointer(password)), uintptr(unsafe.Pointer(displayName))) if r1 == 0 { err = errnoErr(e1) } @@ -626,7 +632,7 @@ func ChangeServiceConfig(service Handle, serviceType uint32, startType uint32, e } func checkTokenMembership(tokenHandle Token, sidToCheck *SID, isMember *int32) (err error) { - r1, _, e1 := syscall.Syscall(procCheckTokenMembership.Addr(), 3, uintptr(tokenHandle), uintptr(unsafe.Pointer(sidToCheck)), uintptr(unsafe.Pointer(isMember))) + r1, _, e1 := syscall.SyscallN(procCheckTokenMembership.Addr(), uintptr(tokenHandle), uintptr(unsafe.Pointer(sidToCheck)), uintptr(unsafe.Pointer(isMember))) if r1 == 0 { err = errnoErr(e1) } @@ -634,7 +640,7 @@ func checkTokenMembership(tokenHandle Token, sidToCheck *SID, isMember *int32) ( } func CloseServiceHandle(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procCloseServiceHandle.Addr(), 1, uintptr(handle), 0, 0) + r1, _, e1 := syscall.SyscallN(procCloseServiceHandle.Addr(), uintptr(handle)) if r1 == 0 { err = errnoErr(e1) } @@ -642,7 +648,7 @@ func CloseServiceHandle(handle Handle) (err error) { } func ControlService(service Handle, control uint32, status *SERVICE_STATUS) (err error) { - r1, _, e1 := syscall.Syscall(procControlService.Addr(), 3, uintptr(service), uintptr(control), uintptr(unsafe.Pointer(status))) + r1, _, e1 := syscall.SyscallN(procControlService.Addr(), uintptr(service), uintptr(control), uintptr(unsafe.Pointer(status))) if r1 == 0 { err = errnoErr(e1) } @@ -650,7 +656,7 @@ func ControlService(service Handle, control uint32, status *SERVICE_STATUS) (err } func convertSecurityDescriptorToStringSecurityDescriptor(sd *SECURITY_DESCRIPTOR, revision uint32, securityInformation SECURITY_INFORMATION, str **uint16, strLen *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procConvertSecurityDescriptorToStringSecurityDescriptorW.Addr(), 5, uintptr(unsafe.Pointer(sd)), uintptr(revision), uintptr(securityInformation), uintptr(unsafe.Pointer(str)), uintptr(unsafe.Pointer(strLen)), 0) + r1, _, e1 := syscall.SyscallN(procConvertSecurityDescriptorToStringSecurityDescriptorW.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(revision), uintptr(securityInformation), uintptr(unsafe.Pointer(str)), uintptr(unsafe.Pointer(strLen))) if r1 == 0 { err = errnoErr(e1) } @@ -658,7 +664,7 @@ func convertSecurityDescriptorToStringSecurityDescriptor(sd *SECURITY_DESCRIPTOR } func ConvertSidToStringSid(sid *SID, stringSid **uint16) (err error) { - r1, _, e1 := syscall.Syscall(procConvertSidToStringSidW.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(stringSid)), 0) + r1, _, e1 := syscall.SyscallN(procConvertSidToStringSidW.Addr(), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(stringSid))) if r1 == 0 { err = errnoErr(e1) } @@ -675,7 +681,7 @@ func convertStringSecurityDescriptorToSecurityDescriptor(str string, revision ui } func _convertStringSecurityDescriptorToSecurityDescriptor(str *uint16, revision uint32, sd **SECURITY_DESCRIPTOR, size *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procConvertStringSecurityDescriptorToSecurityDescriptorW.Addr(), 4, uintptr(unsafe.Pointer(str)), uintptr(revision), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(size)), 0, 0) + r1, _, e1 := syscall.SyscallN(procConvertStringSecurityDescriptorToSecurityDescriptorW.Addr(), uintptr(unsafe.Pointer(str)), uintptr(revision), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(size))) if r1 == 0 { err = errnoErr(e1) } @@ -683,7 +689,7 @@ func _convertStringSecurityDescriptorToSecurityDescriptor(str *uint16, revision } func ConvertStringSidToSid(stringSid *uint16, sid **SID) (err error) { - r1, _, e1 := syscall.Syscall(procConvertStringSidToSidW.Addr(), 2, uintptr(unsafe.Pointer(stringSid)), uintptr(unsafe.Pointer(sid)), 0) + r1, _, e1 := syscall.SyscallN(procConvertStringSidToSidW.Addr(), uintptr(unsafe.Pointer(stringSid)), uintptr(unsafe.Pointer(sid))) if r1 == 0 { err = errnoErr(e1) } @@ -691,7 +697,7 @@ func ConvertStringSidToSid(stringSid *uint16, sid **SID) (err error) { } func CopySid(destSidLen uint32, destSid *SID, srcSid *SID) (err error) { - r1, _, e1 := syscall.Syscall(procCopySid.Addr(), 3, uintptr(destSidLen), uintptr(unsafe.Pointer(destSid)), uintptr(unsafe.Pointer(srcSid))) + r1, _, e1 := syscall.SyscallN(procCopySid.Addr(), uintptr(destSidLen), uintptr(unsafe.Pointer(destSid)), uintptr(unsafe.Pointer(srcSid))) if r1 == 0 { err = errnoErr(e1) } @@ -703,7 +709,7 @@ func CreateProcessAsUser(token Token, appName *uint16, commandLine *uint16, proc if inheritHandles { _p0 = 1 } - r1, _, e1 := syscall.Syscall12(procCreateProcessAsUserW.Addr(), 11, uintptr(token), uintptr(unsafe.Pointer(appName)), uintptr(unsafe.Pointer(commandLine)), uintptr(unsafe.Pointer(procSecurity)), uintptr(unsafe.Pointer(threadSecurity)), uintptr(_p0), uintptr(creationFlags), uintptr(unsafe.Pointer(env)), uintptr(unsafe.Pointer(currentDir)), uintptr(unsafe.Pointer(startupInfo)), uintptr(unsafe.Pointer(outProcInfo)), 0) + r1, _, e1 := syscall.SyscallN(procCreateProcessAsUserW.Addr(), uintptr(token), uintptr(unsafe.Pointer(appName)), uintptr(unsafe.Pointer(commandLine)), uintptr(unsafe.Pointer(procSecurity)), uintptr(unsafe.Pointer(threadSecurity)), uintptr(_p0), uintptr(creationFlags), uintptr(unsafe.Pointer(env)), uintptr(unsafe.Pointer(currentDir)), uintptr(unsafe.Pointer(startupInfo)), uintptr(unsafe.Pointer(outProcInfo))) if r1 == 0 { err = errnoErr(e1) } @@ -711,7 +717,7 @@ func CreateProcessAsUser(token Token, appName *uint16, commandLine *uint16, proc } func CreateService(mgr Handle, serviceName *uint16, displayName *uint16, access uint32, srvType uint32, startType uint32, errCtl uint32, pathName *uint16, loadOrderGroup *uint16, tagId *uint32, dependencies *uint16, serviceStartName *uint16, password *uint16) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall15(procCreateServiceW.Addr(), 13, uintptr(mgr), uintptr(unsafe.Pointer(serviceName)), uintptr(unsafe.Pointer(displayName)), uintptr(access), uintptr(srvType), uintptr(startType), uintptr(errCtl), uintptr(unsafe.Pointer(pathName)), uintptr(unsafe.Pointer(loadOrderGroup)), uintptr(unsafe.Pointer(tagId)), uintptr(unsafe.Pointer(dependencies)), uintptr(unsafe.Pointer(serviceStartName)), uintptr(unsafe.Pointer(password)), 0, 0) + r0, _, e1 := syscall.SyscallN(procCreateServiceW.Addr(), uintptr(mgr), uintptr(unsafe.Pointer(serviceName)), uintptr(unsafe.Pointer(displayName)), uintptr(access), uintptr(srvType), uintptr(startType), uintptr(errCtl), uintptr(unsafe.Pointer(pathName)), uintptr(unsafe.Pointer(loadOrderGroup)), uintptr(unsafe.Pointer(tagId)), uintptr(unsafe.Pointer(dependencies)), uintptr(unsafe.Pointer(serviceStartName)), uintptr(unsafe.Pointer(password))) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -720,7 +726,7 @@ func CreateService(mgr Handle, serviceName *uint16, displayName *uint16, access } func createWellKnownSid(sidType WELL_KNOWN_SID_TYPE, domainSid *SID, sid *SID, sizeSid *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procCreateWellKnownSid.Addr(), 4, uintptr(sidType), uintptr(unsafe.Pointer(domainSid)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sizeSid)), 0, 0) + r1, _, e1 := syscall.SyscallN(procCreateWellKnownSid.Addr(), uintptr(sidType), uintptr(unsafe.Pointer(domainSid)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sizeSid))) if r1 == 0 { err = errnoErr(e1) } @@ -728,7 +734,7 @@ func createWellKnownSid(sidType WELL_KNOWN_SID_TYPE, domainSid *SID, sid *SID, s } func CryptAcquireContext(provhandle *Handle, container *uint16, provider *uint16, provtype uint32, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procCryptAcquireContextW.Addr(), 5, uintptr(unsafe.Pointer(provhandle)), uintptr(unsafe.Pointer(container)), uintptr(unsafe.Pointer(provider)), uintptr(provtype), uintptr(flags), 0) + r1, _, e1 := syscall.SyscallN(procCryptAcquireContextW.Addr(), uintptr(unsafe.Pointer(provhandle)), uintptr(unsafe.Pointer(container)), uintptr(unsafe.Pointer(provider)), uintptr(provtype), uintptr(flags)) if r1 == 0 { err = errnoErr(e1) } @@ -736,7 +742,7 @@ func CryptAcquireContext(provhandle *Handle, container *uint16, provider *uint16 } func CryptGenRandom(provhandle Handle, buflen uint32, buf *byte) (err error) { - r1, _, e1 := syscall.Syscall(procCryptGenRandom.Addr(), 3, uintptr(provhandle), uintptr(buflen), uintptr(unsafe.Pointer(buf))) + r1, _, e1 := syscall.SyscallN(procCryptGenRandom.Addr(), uintptr(provhandle), uintptr(buflen), uintptr(unsafe.Pointer(buf))) if r1 == 0 { err = errnoErr(e1) } @@ -744,7 +750,7 @@ func CryptGenRandom(provhandle Handle, buflen uint32, buf *byte) (err error) { } func CryptReleaseContext(provhandle Handle, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procCryptReleaseContext.Addr(), 2, uintptr(provhandle), uintptr(flags), 0) + r1, _, e1 := syscall.SyscallN(procCryptReleaseContext.Addr(), uintptr(provhandle), uintptr(flags)) if r1 == 0 { err = errnoErr(e1) } @@ -752,7 +758,7 @@ func CryptReleaseContext(provhandle Handle, flags uint32) (err error) { } func DeleteService(service Handle) (err error) { - r1, _, e1 := syscall.Syscall(procDeleteService.Addr(), 1, uintptr(service), 0, 0) + r1, _, e1 := syscall.SyscallN(procDeleteService.Addr(), uintptr(service)) if r1 == 0 { err = errnoErr(e1) } @@ -760,7 +766,7 @@ func DeleteService(service Handle) (err error) { } func DeregisterEventSource(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procDeregisterEventSource.Addr(), 1, uintptr(handle), 0, 0) + r1, _, e1 := syscall.SyscallN(procDeregisterEventSource.Addr(), uintptr(handle)) if r1 == 0 { err = errnoErr(e1) } @@ -768,7 +774,7 @@ func DeregisterEventSource(handle Handle) (err error) { } func DuplicateTokenEx(existingToken Token, desiredAccess uint32, tokenAttributes *SecurityAttributes, impersonationLevel uint32, tokenType uint32, newToken *Token) (err error) { - r1, _, e1 := syscall.Syscall6(procDuplicateTokenEx.Addr(), 6, uintptr(existingToken), uintptr(desiredAccess), uintptr(unsafe.Pointer(tokenAttributes)), uintptr(impersonationLevel), uintptr(tokenType), uintptr(unsafe.Pointer(newToken))) + r1, _, e1 := syscall.SyscallN(procDuplicateTokenEx.Addr(), uintptr(existingToken), uintptr(desiredAccess), uintptr(unsafe.Pointer(tokenAttributes)), uintptr(impersonationLevel), uintptr(tokenType), uintptr(unsafe.Pointer(newToken))) if r1 == 0 { err = errnoErr(e1) } @@ -776,7 +782,7 @@ func DuplicateTokenEx(existingToken Token, desiredAccess uint32, tokenAttributes } func EnumDependentServices(service Handle, activityState uint32, services *ENUM_SERVICE_STATUS, buffSize uint32, bytesNeeded *uint32, servicesReturned *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procEnumDependentServicesW.Addr(), 6, uintptr(service), uintptr(activityState), uintptr(unsafe.Pointer(services)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded)), uintptr(unsafe.Pointer(servicesReturned))) + r1, _, e1 := syscall.SyscallN(procEnumDependentServicesW.Addr(), uintptr(service), uintptr(activityState), uintptr(unsafe.Pointer(services)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded)), uintptr(unsafe.Pointer(servicesReturned))) if r1 == 0 { err = errnoErr(e1) } @@ -784,7 +790,7 @@ func EnumDependentServices(service Handle, activityState uint32, services *ENUM_ } func EnumServicesStatusEx(mgr Handle, infoLevel uint32, serviceType uint32, serviceState uint32, services *byte, bufSize uint32, bytesNeeded *uint32, servicesReturned *uint32, resumeHandle *uint32, groupName *uint16) (err error) { - r1, _, e1 := syscall.Syscall12(procEnumServicesStatusExW.Addr(), 10, uintptr(mgr), uintptr(infoLevel), uintptr(serviceType), uintptr(serviceState), uintptr(unsafe.Pointer(services)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)), uintptr(unsafe.Pointer(servicesReturned)), uintptr(unsafe.Pointer(resumeHandle)), uintptr(unsafe.Pointer(groupName)), 0, 0) + r1, _, e1 := syscall.SyscallN(procEnumServicesStatusExW.Addr(), uintptr(mgr), uintptr(infoLevel), uintptr(serviceType), uintptr(serviceState), uintptr(unsafe.Pointer(services)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)), uintptr(unsafe.Pointer(servicesReturned)), uintptr(unsafe.Pointer(resumeHandle)), uintptr(unsafe.Pointer(groupName))) if r1 == 0 { err = errnoErr(e1) } @@ -792,13 +798,13 @@ func EnumServicesStatusEx(mgr Handle, infoLevel uint32, serviceType uint32, serv } func EqualSid(sid1 *SID, sid2 *SID) (isEqual bool) { - r0, _, _ := syscall.Syscall(procEqualSid.Addr(), 2, uintptr(unsafe.Pointer(sid1)), uintptr(unsafe.Pointer(sid2)), 0) + r0, _, _ := syscall.SyscallN(procEqualSid.Addr(), uintptr(unsafe.Pointer(sid1)), uintptr(unsafe.Pointer(sid2))) isEqual = r0 != 0 return } func FreeSid(sid *SID) (err error) { - r1, _, e1 := syscall.Syscall(procFreeSid.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) + r1, _, e1 := syscall.SyscallN(procFreeSid.Addr(), uintptr(unsafe.Pointer(sid))) if r1 != 0 { err = errnoErr(e1) } @@ -806,7 +812,7 @@ func FreeSid(sid *SID) (err error) { } func GetAce(acl *ACL, aceIndex uint32, pAce **ACCESS_ALLOWED_ACE) (err error) { - r1, _, e1 := syscall.Syscall(procGetAce.Addr(), 3, uintptr(unsafe.Pointer(acl)), uintptr(aceIndex), uintptr(unsafe.Pointer(pAce))) + r1, _, e1 := syscall.SyscallN(procGetAce.Addr(), uintptr(unsafe.Pointer(acl)), uintptr(aceIndex), uintptr(unsafe.Pointer(pAce))) if r1 == 0 { err = errnoErr(e1) } @@ -814,7 +820,7 @@ func GetAce(acl *ACL, aceIndex uint32, pAce **ACCESS_ALLOWED_ACE) (err error) { } func GetLengthSid(sid *SID) (len uint32) { - r0, _, _ := syscall.Syscall(procGetLengthSid.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) + r0, _, _ := syscall.SyscallN(procGetLengthSid.Addr(), uintptr(unsafe.Pointer(sid))) len = uint32(r0) return } @@ -829,7 +835,7 @@ func getNamedSecurityInfo(objectName string, objectType SE_OBJECT_TYPE, security } func _getNamedSecurityInfo(objectName *uint16, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner **SID, group **SID, dacl **ACL, sacl **ACL, sd **SECURITY_DESCRIPTOR) (ret error) { - r0, _, _ := syscall.Syscall9(procGetNamedSecurityInfoW.Addr(), 8, uintptr(unsafe.Pointer(objectName)), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(sd)), 0) + r0, _, _ := syscall.SyscallN(procGetNamedSecurityInfoW.Addr(), uintptr(unsafe.Pointer(objectName)), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(sd))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -837,7 +843,7 @@ func _getNamedSecurityInfo(objectName *uint16, objectType SE_OBJECT_TYPE, securi } func getSecurityDescriptorControl(sd *SECURITY_DESCRIPTOR, control *SECURITY_DESCRIPTOR_CONTROL, revision *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetSecurityDescriptorControl.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(control)), uintptr(unsafe.Pointer(revision))) + r1, _, e1 := syscall.SyscallN(procGetSecurityDescriptorControl.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(control)), uintptr(unsafe.Pointer(revision))) if r1 == 0 { err = errnoErr(e1) } @@ -853,7 +859,7 @@ func getSecurityDescriptorDacl(sd *SECURITY_DESCRIPTOR, daclPresent *bool, dacl if *daclDefaulted { _p1 = 1 } - r1, _, e1 := syscall.Syscall6(procGetSecurityDescriptorDacl.Addr(), 4, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(&_p0)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(&_p1)), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetSecurityDescriptorDacl.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(&_p0)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(&_p1))) *daclPresent = _p0 != 0 *daclDefaulted = _p1 != 0 if r1 == 0 { @@ -867,7 +873,7 @@ func getSecurityDescriptorGroup(sd *SECURITY_DESCRIPTOR, group **SID, groupDefau if *groupDefaulted { _p0 = 1 } - r1, _, e1 := syscall.Syscall(procGetSecurityDescriptorGroup.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(&_p0))) + r1, _, e1 := syscall.SyscallN(procGetSecurityDescriptorGroup.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(&_p0))) *groupDefaulted = _p0 != 0 if r1 == 0 { err = errnoErr(e1) @@ -876,7 +882,7 @@ func getSecurityDescriptorGroup(sd *SECURITY_DESCRIPTOR, group **SID, groupDefau } func getSecurityDescriptorLength(sd *SECURITY_DESCRIPTOR) (len uint32) { - r0, _, _ := syscall.Syscall(procGetSecurityDescriptorLength.Addr(), 1, uintptr(unsafe.Pointer(sd)), 0, 0) + r0, _, _ := syscall.SyscallN(procGetSecurityDescriptorLength.Addr(), uintptr(unsafe.Pointer(sd))) len = uint32(r0) return } @@ -886,7 +892,7 @@ func getSecurityDescriptorOwner(sd *SECURITY_DESCRIPTOR, owner **SID, ownerDefau if *ownerDefaulted { _p0 = 1 } - r1, _, e1 := syscall.Syscall(procGetSecurityDescriptorOwner.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(&_p0))) + r1, _, e1 := syscall.SyscallN(procGetSecurityDescriptorOwner.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(&_p0))) *ownerDefaulted = _p0 != 0 if r1 == 0 { err = errnoErr(e1) @@ -895,7 +901,7 @@ func getSecurityDescriptorOwner(sd *SECURITY_DESCRIPTOR, owner **SID, ownerDefau } func getSecurityDescriptorRMControl(sd *SECURITY_DESCRIPTOR, rmControl *uint8) (ret error) { - r0, _, _ := syscall.Syscall(procGetSecurityDescriptorRMControl.Addr(), 2, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(rmControl)), 0) + r0, _, _ := syscall.SyscallN(procGetSecurityDescriptorRMControl.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(rmControl))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -911,7 +917,7 @@ func getSecurityDescriptorSacl(sd *SECURITY_DESCRIPTOR, saclPresent *bool, sacl if *saclDefaulted { _p1 = 1 } - r1, _, e1 := syscall.Syscall6(procGetSecurityDescriptorSacl.Addr(), 4, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(&_p0)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(&_p1)), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetSecurityDescriptorSacl.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(&_p0)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(&_p1))) *saclPresent = _p0 != 0 *saclDefaulted = _p1 != 0 if r1 == 0 { @@ -921,7 +927,7 @@ func getSecurityDescriptorSacl(sd *SECURITY_DESCRIPTOR, saclPresent *bool, sacl } func getSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner **SID, group **SID, dacl **ACL, sacl **ACL, sd **SECURITY_DESCRIPTOR) (ret error) { - r0, _, _ := syscall.Syscall9(procGetSecurityInfo.Addr(), 8, uintptr(handle), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(sd)), 0) + r0, _, _ := syscall.SyscallN(procGetSecurityInfo.Addr(), uintptr(handle), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(sd))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -929,25 +935,25 @@ func getSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformati } func getSidIdentifierAuthority(sid *SID) (authority *SidIdentifierAuthority) { - r0, _, _ := syscall.Syscall(procGetSidIdentifierAuthority.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) + r0, _, _ := syscall.SyscallN(procGetSidIdentifierAuthority.Addr(), uintptr(unsafe.Pointer(sid))) authority = (*SidIdentifierAuthority)(unsafe.Pointer(r0)) return } func getSidSubAuthority(sid *SID, index uint32) (subAuthority *uint32) { - r0, _, _ := syscall.Syscall(procGetSidSubAuthority.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(index), 0) + r0, _, _ := syscall.SyscallN(procGetSidSubAuthority.Addr(), uintptr(unsafe.Pointer(sid)), uintptr(index)) subAuthority = (*uint32)(unsafe.Pointer(r0)) return } func getSidSubAuthorityCount(sid *SID) (count *uint8) { - r0, _, _ := syscall.Syscall(procGetSidSubAuthorityCount.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) + r0, _, _ := syscall.SyscallN(procGetSidSubAuthorityCount.Addr(), uintptr(unsafe.Pointer(sid))) count = (*uint8)(unsafe.Pointer(r0)) return } func GetTokenInformation(token Token, infoClass uint32, info *byte, infoLen uint32, returnedLen *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetTokenInformation.Addr(), 5, uintptr(token), uintptr(infoClass), uintptr(unsafe.Pointer(info)), uintptr(infoLen), uintptr(unsafe.Pointer(returnedLen)), 0) + r1, _, e1 := syscall.SyscallN(procGetTokenInformation.Addr(), uintptr(token), uintptr(infoClass), uintptr(unsafe.Pointer(info)), uintptr(infoLen), uintptr(unsafe.Pointer(returnedLen))) if r1 == 0 { err = errnoErr(e1) } @@ -955,7 +961,7 @@ func GetTokenInformation(token Token, infoClass uint32, info *byte, infoLen uint } func ImpersonateSelf(impersonationlevel uint32) (err error) { - r1, _, e1 := syscall.Syscall(procImpersonateSelf.Addr(), 1, uintptr(impersonationlevel), 0, 0) + r1, _, e1 := syscall.SyscallN(procImpersonateSelf.Addr(), uintptr(impersonationlevel)) if r1 == 0 { err = errnoErr(e1) } @@ -963,7 +969,7 @@ func ImpersonateSelf(impersonationlevel uint32) (err error) { } func initializeSecurityDescriptor(absoluteSD *SECURITY_DESCRIPTOR, revision uint32) (err error) { - r1, _, e1 := syscall.Syscall(procInitializeSecurityDescriptor.Addr(), 2, uintptr(unsafe.Pointer(absoluteSD)), uintptr(revision), 0) + r1, _, e1 := syscall.SyscallN(procInitializeSecurityDescriptor.Addr(), uintptr(unsafe.Pointer(absoluteSD)), uintptr(revision)) if r1 == 0 { err = errnoErr(e1) } @@ -979,7 +985,7 @@ func InitiateSystemShutdownEx(machineName *uint16, message *uint16, timeout uint if rebootAfterShutdown { _p1 = 1 } - r1, _, e1 := syscall.Syscall6(procInitiateSystemShutdownExW.Addr(), 6, uintptr(unsafe.Pointer(machineName)), uintptr(unsafe.Pointer(message)), uintptr(timeout), uintptr(_p0), uintptr(_p1), uintptr(reason)) + r1, _, e1 := syscall.SyscallN(procInitiateSystemShutdownExW.Addr(), uintptr(unsafe.Pointer(machineName)), uintptr(unsafe.Pointer(message)), uintptr(timeout), uintptr(_p0), uintptr(_p1), uintptr(reason)) if r1 == 0 { err = errnoErr(e1) } @@ -987,7 +993,7 @@ func InitiateSystemShutdownEx(machineName *uint16, message *uint16, timeout uint } func isTokenRestricted(tokenHandle Token) (ret bool, err error) { - r0, _, e1 := syscall.Syscall(procIsTokenRestricted.Addr(), 1, uintptr(tokenHandle), 0, 0) + r0, _, e1 := syscall.SyscallN(procIsTokenRestricted.Addr(), uintptr(tokenHandle)) ret = r0 != 0 if !ret { err = errnoErr(e1) @@ -996,25 +1002,25 @@ func isTokenRestricted(tokenHandle Token) (ret bool, err error) { } func isValidSecurityDescriptor(sd *SECURITY_DESCRIPTOR) (isValid bool) { - r0, _, _ := syscall.Syscall(procIsValidSecurityDescriptor.Addr(), 1, uintptr(unsafe.Pointer(sd)), 0, 0) + r0, _, _ := syscall.SyscallN(procIsValidSecurityDescriptor.Addr(), uintptr(unsafe.Pointer(sd))) isValid = r0 != 0 return } func isValidSid(sid *SID) (isValid bool) { - r0, _, _ := syscall.Syscall(procIsValidSid.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) + r0, _, _ := syscall.SyscallN(procIsValidSid.Addr(), uintptr(unsafe.Pointer(sid))) isValid = r0 != 0 return } func isWellKnownSid(sid *SID, sidType WELL_KNOWN_SID_TYPE) (isWellKnown bool) { - r0, _, _ := syscall.Syscall(procIsWellKnownSid.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(sidType), 0) + r0, _, _ := syscall.SyscallN(procIsWellKnownSid.Addr(), uintptr(unsafe.Pointer(sid)), uintptr(sidType)) isWellKnown = r0 != 0 return } func LookupAccountName(systemName *uint16, accountName *uint16, sid *SID, sidLen *uint32, refdDomainName *uint16, refdDomainNameLen *uint32, use *uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procLookupAccountNameW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(accountName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sidLen)), uintptr(unsafe.Pointer(refdDomainName)), uintptr(unsafe.Pointer(refdDomainNameLen)), uintptr(unsafe.Pointer(use)), 0, 0) + r1, _, e1 := syscall.SyscallN(procLookupAccountNameW.Addr(), uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(accountName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sidLen)), uintptr(unsafe.Pointer(refdDomainName)), uintptr(unsafe.Pointer(refdDomainNameLen)), uintptr(unsafe.Pointer(use))) if r1 == 0 { err = errnoErr(e1) } @@ -1022,7 +1028,7 @@ func LookupAccountName(systemName *uint16, accountName *uint16, sid *SID, sidLen } func LookupAccountSid(systemName *uint16, sid *SID, name *uint16, nameLen *uint32, refdDomainName *uint16, refdDomainNameLen *uint32, use *uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procLookupAccountSidW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(refdDomainName)), uintptr(unsafe.Pointer(refdDomainNameLen)), uintptr(unsafe.Pointer(use)), 0, 0) + r1, _, e1 := syscall.SyscallN(procLookupAccountSidW.Addr(), uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(refdDomainName)), uintptr(unsafe.Pointer(refdDomainNameLen)), uintptr(unsafe.Pointer(use))) if r1 == 0 { err = errnoErr(e1) } @@ -1030,7 +1036,7 @@ func LookupAccountSid(systemName *uint16, sid *SID, name *uint16, nameLen *uint3 } func LookupPrivilegeValue(systemname *uint16, name *uint16, luid *LUID) (err error) { - r1, _, e1 := syscall.Syscall(procLookupPrivilegeValueW.Addr(), 3, uintptr(unsafe.Pointer(systemname)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid))) + r1, _, e1 := syscall.SyscallN(procLookupPrivilegeValueW.Addr(), uintptr(unsafe.Pointer(systemname)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid))) if r1 == 0 { err = errnoErr(e1) } @@ -1038,7 +1044,7 @@ func LookupPrivilegeValue(systemname *uint16, name *uint16, luid *LUID) (err err } func makeAbsoluteSD(selfRelativeSD *SECURITY_DESCRIPTOR, absoluteSD *SECURITY_DESCRIPTOR, absoluteSDSize *uint32, dacl *ACL, daclSize *uint32, sacl *ACL, saclSize *uint32, owner *SID, ownerSize *uint32, group *SID, groupSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall12(procMakeAbsoluteSD.Addr(), 11, uintptr(unsafe.Pointer(selfRelativeSD)), uintptr(unsafe.Pointer(absoluteSD)), uintptr(unsafe.Pointer(absoluteSDSize)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(daclSize)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(saclSize)), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(ownerSize)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(groupSize)), 0) + r1, _, e1 := syscall.SyscallN(procMakeAbsoluteSD.Addr(), uintptr(unsafe.Pointer(selfRelativeSD)), uintptr(unsafe.Pointer(absoluteSD)), uintptr(unsafe.Pointer(absoluteSDSize)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(daclSize)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(saclSize)), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(ownerSize)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(groupSize))) if r1 == 0 { err = errnoErr(e1) } @@ -1046,7 +1052,7 @@ func makeAbsoluteSD(selfRelativeSD *SECURITY_DESCRIPTOR, absoluteSD *SECURITY_DE } func makeSelfRelativeSD(absoluteSD *SECURITY_DESCRIPTOR, selfRelativeSD *SECURITY_DESCRIPTOR, selfRelativeSDSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procMakeSelfRelativeSD.Addr(), 3, uintptr(unsafe.Pointer(absoluteSD)), uintptr(unsafe.Pointer(selfRelativeSD)), uintptr(unsafe.Pointer(selfRelativeSDSize))) + r1, _, e1 := syscall.SyscallN(procMakeSelfRelativeSD.Addr(), uintptr(unsafe.Pointer(absoluteSD)), uintptr(unsafe.Pointer(selfRelativeSD)), uintptr(unsafe.Pointer(selfRelativeSDSize))) if r1 == 0 { err = errnoErr(e1) } @@ -1054,7 +1060,7 @@ func makeSelfRelativeSD(absoluteSD *SECURITY_DESCRIPTOR, selfRelativeSD *SECURIT } func NotifyServiceStatusChange(service Handle, notifyMask uint32, notifier *SERVICE_NOTIFY) (ret error) { - r0, _, _ := syscall.Syscall(procNotifyServiceStatusChangeW.Addr(), 3, uintptr(service), uintptr(notifyMask), uintptr(unsafe.Pointer(notifier))) + r0, _, _ := syscall.SyscallN(procNotifyServiceStatusChangeW.Addr(), uintptr(service), uintptr(notifyMask), uintptr(unsafe.Pointer(notifier))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -1062,7 +1068,7 @@ func NotifyServiceStatusChange(service Handle, notifyMask uint32, notifier *SERV } func OpenProcessToken(process Handle, access uint32, token *Token) (err error) { - r1, _, e1 := syscall.Syscall(procOpenProcessToken.Addr(), 3, uintptr(process), uintptr(access), uintptr(unsafe.Pointer(token))) + r1, _, e1 := syscall.SyscallN(procOpenProcessToken.Addr(), uintptr(process), uintptr(access), uintptr(unsafe.Pointer(token))) if r1 == 0 { err = errnoErr(e1) } @@ -1070,7 +1076,7 @@ func OpenProcessToken(process Handle, access uint32, token *Token) (err error) { } func OpenSCManager(machineName *uint16, databaseName *uint16, access uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procOpenSCManagerW.Addr(), 3, uintptr(unsafe.Pointer(machineName)), uintptr(unsafe.Pointer(databaseName)), uintptr(access)) + r0, _, e1 := syscall.SyscallN(procOpenSCManagerW.Addr(), uintptr(unsafe.Pointer(machineName)), uintptr(unsafe.Pointer(databaseName)), uintptr(access)) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -1079,7 +1085,7 @@ func OpenSCManager(machineName *uint16, databaseName *uint16, access uint32) (ha } func OpenService(mgr Handle, serviceName *uint16, access uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procOpenServiceW.Addr(), 3, uintptr(mgr), uintptr(unsafe.Pointer(serviceName)), uintptr(access)) + r0, _, e1 := syscall.SyscallN(procOpenServiceW.Addr(), uintptr(mgr), uintptr(unsafe.Pointer(serviceName)), uintptr(access)) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -1092,7 +1098,7 @@ func OpenThreadToken(thread Handle, access uint32, openAsSelf bool, token *Token if openAsSelf { _p0 = 1 } - r1, _, e1 := syscall.Syscall6(procOpenThreadToken.Addr(), 4, uintptr(thread), uintptr(access), uintptr(_p0), uintptr(unsafe.Pointer(token)), 0, 0) + r1, _, e1 := syscall.SyscallN(procOpenThreadToken.Addr(), uintptr(thread), uintptr(access), uintptr(_p0), uintptr(unsafe.Pointer(token))) if r1 == 0 { err = errnoErr(e1) } @@ -1100,7 +1106,7 @@ func OpenThreadToken(thread Handle, access uint32, openAsSelf bool, token *Token } func QueryServiceConfig2(service Handle, infoLevel uint32, buff *byte, buffSize uint32, bytesNeeded *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procQueryServiceConfig2W.Addr(), 5, uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(buff)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded)), 0) + r1, _, e1 := syscall.SyscallN(procQueryServiceConfig2W.Addr(), uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(buff)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded))) if r1 == 0 { err = errnoErr(e1) } @@ -1108,7 +1114,7 @@ func QueryServiceConfig2(service Handle, infoLevel uint32, buff *byte, buffSize } func QueryServiceConfig(service Handle, serviceConfig *QUERY_SERVICE_CONFIG, bufSize uint32, bytesNeeded *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procQueryServiceConfigW.Addr(), 4, uintptr(service), uintptr(unsafe.Pointer(serviceConfig)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)), 0, 0) + r1, _, e1 := syscall.SyscallN(procQueryServiceConfigW.Addr(), uintptr(service), uintptr(unsafe.Pointer(serviceConfig)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded))) if r1 == 0 { err = errnoErr(e1) } @@ -1120,7 +1126,7 @@ func QueryServiceDynamicInformation(service Handle, infoLevel uint32, dynamicInf if err != nil { return } - r1, _, e1 := syscall.Syscall(procQueryServiceDynamicInformation.Addr(), 3, uintptr(service), uintptr(infoLevel), uintptr(dynamicInfo)) + r1, _, e1 := syscall.SyscallN(procQueryServiceDynamicInformation.Addr(), uintptr(service), uintptr(infoLevel), uintptr(dynamicInfo)) if r1 == 0 { err = errnoErr(e1) } @@ -1128,7 +1134,7 @@ func QueryServiceDynamicInformation(service Handle, infoLevel uint32, dynamicInf } func QueryServiceLockStatus(mgr Handle, lockStatus *QUERY_SERVICE_LOCK_STATUS, bufSize uint32, bytesNeeded *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procQueryServiceLockStatusW.Addr(), 4, uintptr(mgr), uintptr(unsafe.Pointer(lockStatus)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)), 0, 0) + r1, _, e1 := syscall.SyscallN(procQueryServiceLockStatusW.Addr(), uintptr(mgr), uintptr(unsafe.Pointer(lockStatus)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded))) if r1 == 0 { err = errnoErr(e1) } @@ -1136,7 +1142,7 @@ func QueryServiceLockStatus(mgr Handle, lockStatus *QUERY_SERVICE_LOCK_STATUS, b } func QueryServiceStatus(service Handle, status *SERVICE_STATUS) (err error) { - r1, _, e1 := syscall.Syscall(procQueryServiceStatus.Addr(), 2, uintptr(service), uintptr(unsafe.Pointer(status)), 0) + r1, _, e1 := syscall.SyscallN(procQueryServiceStatus.Addr(), uintptr(service), uintptr(unsafe.Pointer(status))) if r1 == 0 { err = errnoErr(e1) } @@ -1144,7 +1150,7 @@ func QueryServiceStatus(service Handle, status *SERVICE_STATUS) (err error) { } func QueryServiceStatusEx(service Handle, infoLevel uint32, buff *byte, buffSize uint32, bytesNeeded *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procQueryServiceStatusEx.Addr(), 5, uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(buff)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded)), 0) + r1, _, e1 := syscall.SyscallN(procQueryServiceStatusEx.Addr(), uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(buff)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded))) if r1 == 0 { err = errnoErr(e1) } @@ -1152,7 +1158,7 @@ func QueryServiceStatusEx(service Handle, infoLevel uint32, buff *byte, buffSize } func RegCloseKey(key Handle) (regerrno error) { - r0, _, _ := syscall.Syscall(procRegCloseKey.Addr(), 1, uintptr(key), 0, 0) + r0, _, _ := syscall.SyscallN(procRegCloseKey.Addr(), uintptr(key)) if r0 != 0 { regerrno = syscall.Errno(r0) } @@ -1160,7 +1166,7 @@ func RegCloseKey(key Handle) (regerrno error) { } func RegEnumKeyEx(key Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, class *uint16, classLen *uint32, lastWriteTime *Filetime) (regerrno error) { - r0, _, _ := syscall.Syscall9(procRegEnumKeyExW.Addr(), 8, uintptr(key), uintptr(index), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(class)), uintptr(unsafe.Pointer(classLen)), uintptr(unsafe.Pointer(lastWriteTime)), 0) + r0, _, _ := syscall.SyscallN(procRegEnumKeyExW.Addr(), uintptr(key), uintptr(index), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(class)), uintptr(unsafe.Pointer(classLen)), uintptr(unsafe.Pointer(lastWriteTime))) if r0 != 0 { regerrno = syscall.Errno(r0) } @@ -1176,7 +1182,7 @@ func RegNotifyChangeKeyValue(key Handle, watchSubtree bool, notifyFilter uint32, if asynchronous { _p1 = 1 } - r0, _, _ := syscall.Syscall6(procRegNotifyChangeKeyValue.Addr(), 5, uintptr(key), uintptr(_p0), uintptr(notifyFilter), uintptr(event), uintptr(_p1), 0) + r0, _, _ := syscall.SyscallN(procRegNotifyChangeKeyValue.Addr(), uintptr(key), uintptr(_p0), uintptr(notifyFilter), uintptr(event), uintptr(_p1)) if r0 != 0 { regerrno = syscall.Errno(r0) } @@ -1184,7 +1190,7 @@ func RegNotifyChangeKeyValue(key Handle, watchSubtree bool, notifyFilter uint32, } func RegOpenKeyEx(key Handle, subkey *uint16, options uint32, desiredAccess uint32, result *Handle) (regerrno error) { - r0, _, _ := syscall.Syscall6(procRegOpenKeyExW.Addr(), 5, uintptr(key), uintptr(unsafe.Pointer(subkey)), uintptr(options), uintptr(desiredAccess), uintptr(unsafe.Pointer(result)), 0) + r0, _, _ := syscall.SyscallN(procRegOpenKeyExW.Addr(), uintptr(key), uintptr(unsafe.Pointer(subkey)), uintptr(options), uintptr(desiredAccess), uintptr(unsafe.Pointer(result))) if r0 != 0 { regerrno = syscall.Errno(r0) } @@ -1192,7 +1198,7 @@ func RegOpenKeyEx(key Handle, subkey *uint16, options uint32, desiredAccess uint } func RegQueryInfoKey(key Handle, class *uint16, classLen *uint32, reserved *uint32, subkeysLen *uint32, maxSubkeyLen *uint32, maxClassLen *uint32, valuesLen *uint32, maxValueNameLen *uint32, maxValueLen *uint32, saLen *uint32, lastWriteTime *Filetime) (regerrno error) { - r0, _, _ := syscall.Syscall12(procRegQueryInfoKeyW.Addr(), 12, uintptr(key), uintptr(unsafe.Pointer(class)), uintptr(unsafe.Pointer(classLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(subkeysLen)), uintptr(unsafe.Pointer(maxSubkeyLen)), uintptr(unsafe.Pointer(maxClassLen)), uintptr(unsafe.Pointer(valuesLen)), uintptr(unsafe.Pointer(maxValueNameLen)), uintptr(unsafe.Pointer(maxValueLen)), uintptr(unsafe.Pointer(saLen)), uintptr(unsafe.Pointer(lastWriteTime))) + r0, _, _ := syscall.SyscallN(procRegQueryInfoKeyW.Addr(), uintptr(key), uintptr(unsafe.Pointer(class)), uintptr(unsafe.Pointer(classLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(subkeysLen)), uintptr(unsafe.Pointer(maxSubkeyLen)), uintptr(unsafe.Pointer(maxClassLen)), uintptr(unsafe.Pointer(valuesLen)), uintptr(unsafe.Pointer(maxValueNameLen)), uintptr(unsafe.Pointer(maxValueLen)), uintptr(unsafe.Pointer(saLen)), uintptr(unsafe.Pointer(lastWriteTime))) if r0 != 0 { regerrno = syscall.Errno(r0) } @@ -1200,7 +1206,7 @@ func RegQueryInfoKey(key Handle, class *uint16, classLen *uint32, reserved *uint } func RegQueryValueEx(key Handle, name *uint16, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) { - r0, _, _ := syscall.Syscall6(procRegQueryValueExW.Addr(), 6, uintptr(key), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(valtype)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(buflen))) + r0, _, _ := syscall.SyscallN(procRegQueryValueExW.Addr(), uintptr(key), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(valtype)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(buflen))) if r0 != 0 { regerrno = syscall.Errno(r0) } @@ -1208,7 +1214,7 @@ func RegQueryValueEx(key Handle, name *uint16, reserved *uint32, valtype *uint32 } func RegisterEventSource(uncServerName *uint16, sourceName *uint16) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procRegisterEventSourceW.Addr(), 2, uintptr(unsafe.Pointer(uncServerName)), uintptr(unsafe.Pointer(sourceName)), 0) + r0, _, e1 := syscall.SyscallN(procRegisterEventSourceW.Addr(), uintptr(unsafe.Pointer(uncServerName)), uintptr(unsafe.Pointer(sourceName))) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -1217,7 +1223,7 @@ func RegisterEventSource(uncServerName *uint16, sourceName *uint16) (handle Hand } func RegisterServiceCtrlHandlerEx(serviceName *uint16, handlerProc uintptr, context uintptr) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procRegisterServiceCtrlHandlerExW.Addr(), 3, uintptr(unsafe.Pointer(serviceName)), uintptr(handlerProc), uintptr(context)) + r0, _, e1 := syscall.SyscallN(procRegisterServiceCtrlHandlerExW.Addr(), uintptr(unsafe.Pointer(serviceName)), uintptr(handlerProc), uintptr(context)) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -1226,7 +1232,7 @@ func RegisterServiceCtrlHandlerEx(serviceName *uint16, handlerProc uintptr, cont } func ReportEvent(log Handle, etype uint16, category uint16, eventId uint32, usrSId uintptr, numStrings uint16, dataSize uint32, strings **uint16, rawData *byte) (err error) { - r1, _, e1 := syscall.Syscall9(procReportEventW.Addr(), 9, uintptr(log), uintptr(etype), uintptr(category), uintptr(eventId), uintptr(usrSId), uintptr(numStrings), uintptr(dataSize), uintptr(unsafe.Pointer(strings)), uintptr(unsafe.Pointer(rawData))) + r1, _, e1 := syscall.SyscallN(procReportEventW.Addr(), uintptr(log), uintptr(etype), uintptr(category), uintptr(eventId), uintptr(usrSId), uintptr(numStrings), uintptr(dataSize), uintptr(unsafe.Pointer(strings)), uintptr(unsafe.Pointer(rawData))) if r1 == 0 { err = errnoErr(e1) } @@ -1234,7 +1240,7 @@ func ReportEvent(log Handle, etype uint16, category uint16, eventId uint32, usrS } func RevertToSelf() (err error) { - r1, _, e1 := syscall.Syscall(procRevertToSelf.Addr(), 0, 0, 0, 0) + r1, _, e1 := syscall.SyscallN(procRevertToSelf.Addr()) if r1 == 0 { err = errnoErr(e1) } @@ -1242,7 +1248,7 @@ func RevertToSelf() (err error) { } func setEntriesInAcl(countExplicitEntries uint32, explicitEntries *EXPLICIT_ACCESS, oldACL *ACL, newACL **ACL) (ret error) { - r0, _, _ := syscall.Syscall6(procSetEntriesInAclW.Addr(), 4, uintptr(countExplicitEntries), uintptr(unsafe.Pointer(explicitEntries)), uintptr(unsafe.Pointer(oldACL)), uintptr(unsafe.Pointer(newACL)), 0, 0) + r0, _, _ := syscall.SyscallN(procSetEntriesInAclW.Addr(), uintptr(countExplicitEntries), uintptr(unsafe.Pointer(explicitEntries)), uintptr(unsafe.Pointer(oldACL)), uintptr(unsafe.Pointer(newACL))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -1250,7 +1256,7 @@ func setEntriesInAcl(countExplicitEntries uint32, explicitEntries *EXPLICIT_ACCE } func SetKernelObjectSecurity(handle Handle, securityInformation SECURITY_INFORMATION, securityDescriptor *SECURITY_DESCRIPTOR) (err error) { - r1, _, e1 := syscall.Syscall(procSetKernelObjectSecurity.Addr(), 3, uintptr(handle), uintptr(securityInformation), uintptr(unsafe.Pointer(securityDescriptor))) + r1, _, e1 := syscall.SyscallN(procSetKernelObjectSecurity.Addr(), uintptr(handle), uintptr(securityInformation), uintptr(unsafe.Pointer(securityDescriptor))) if r1 == 0 { err = errnoErr(e1) } @@ -1267,7 +1273,7 @@ func SetNamedSecurityInfo(objectName string, objectType SE_OBJECT_TYPE, security } func _SetNamedSecurityInfo(objectName *uint16, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner *SID, group *SID, dacl *ACL, sacl *ACL) (ret error) { - r0, _, _ := syscall.Syscall9(procSetNamedSecurityInfoW.Addr(), 7, uintptr(unsafe.Pointer(objectName)), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), 0, 0) + r0, _, _ := syscall.SyscallN(procSetNamedSecurityInfoW.Addr(), uintptr(unsafe.Pointer(objectName)), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -1275,7 +1281,7 @@ func _SetNamedSecurityInfo(objectName *uint16, objectType SE_OBJECT_TYPE, securi } func setSecurityDescriptorControl(sd *SECURITY_DESCRIPTOR, controlBitsOfInterest SECURITY_DESCRIPTOR_CONTROL, controlBitsToSet SECURITY_DESCRIPTOR_CONTROL) (err error) { - r1, _, e1 := syscall.Syscall(procSetSecurityDescriptorControl.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(controlBitsOfInterest), uintptr(controlBitsToSet)) + r1, _, e1 := syscall.SyscallN(procSetSecurityDescriptorControl.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(controlBitsOfInterest), uintptr(controlBitsToSet)) if r1 == 0 { err = errnoErr(e1) } @@ -1291,7 +1297,7 @@ func setSecurityDescriptorDacl(sd *SECURITY_DESCRIPTOR, daclPresent bool, dacl * if daclDefaulted { _p1 = 1 } - r1, _, e1 := syscall.Syscall6(procSetSecurityDescriptorDacl.Addr(), 4, uintptr(unsafe.Pointer(sd)), uintptr(_p0), uintptr(unsafe.Pointer(dacl)), uintptr(_p1), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetSecurityDescriptorDacl.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(_p0), uintptr(unsafe.Pointer(dacl)), uintptr(_p1)) if r1 == 0 { err = errnoErr(e1) } @@ -1303,7 +1309,7 @@ func setSecurityDescriptorGroup(sd *SECURITY_DESCRIPTOR, group *SID, groupDefaul if groupDefaulted { _p0 = 1 } - r1, _, e1 := syscall.Syscall(procSetSecurityDescriptorGroup.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(group)), uintptr(_p0)) + r1, _, e1 := syscall.SyscallN(procSetSecurityDescriptorGroup.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(group)), uintptr(_p0)) if r1 == 0 { err = errnoErr(e1) } @@ -1315,7 +1321,7 @@ func setSecurityDescriptorOwner(sd *SECURITY_DESCRIPTOR, owner *SID, ownerDefaul if ownerDefaulted { _p0 = 1 } - r1, _, e1 := syscall.Syscall(procSetSecurityDescriptorOwner.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(owner)), uintptr(_p0)) + r1, _, e1 := syscall.SyscallN(procSetSecurityDescriptorOwner.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(owner)), uintptr(_p0)) if r1 == 0 { err = errnoErr(e1) } @@ -1323,7 +1329,7 @@ func setSecurityDescriptorOwner(sd *SECURITY_DESCRIPTOR, owner *SID, ownerDefaul } func setSecurityDescriptorRMControl(sd *SECURITY_DESCRIPTOR, rmControl *uint8) { - syscall.Syscall(procSetSecurityDescriptorRMControl.Addr(), 2, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(rmControl)), 0) + syscall.SyscallN(procSetSecurityDescriptorRMControl.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(rmControl))) return } @@ -1336,7 +1342,7 @@ func setSecurityDescriptorSacl(sd *SECURITY_DESCRIPTOR, saclPresent bool, sacl * if saclDefaulted { _p1 = 1 } - r1, _, e1 := syscall.Syscall6(procSetSecurityDescriptorSacl.Addr(), 4, uintptr(unsafe.Pointer(sd)), uintptr(_p0), uintptr(unsafe.Pointer(sacl)), uintptr(_p1), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetSecurityDescriptorSacl.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(_p0), uintptr(unsafe.Pointer(sacl)), uintptr(_p1)) if r1 == 0 { err = errnoErr(e1) } @@ -1344,7 +1350,7 @@ func setSecurityDescriptorSacl(sd *SECURITY_DESCRIPTOR, saclPresent bool, sacl * } func SetSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner *SID, group *SID, dacl *ACL, sacl *ACL) (ret error) { - r0, _, _ := syscall.Syscall9(procSetSecurityInfo.Addr(), 7, uintptr(handle), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), 0, 0) + r0, _, _ := syscall.SyscallN(procSetSecurityInfo.Addr(), uintptr(handle), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -1352,7 +1358,7 @@ func SetSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformati } func SetServiceStatus(service Handle, serviceStatus *SERVICE_STATUS) (err error) { - r1, _, e1 := syscall.Syscall(procSetServiceStatus.Addr(), 2, uintptr(service), uintptr(unsafe.Pointer(serviceStatus)), 0) + r1, _, e1 := syscall.SyscallN(procSetServiceStatus.Addr(), uintptr(service), uintptr(unsafe.Pointer(serviceStatus))) if r1 == 0 { err = errnoErr(e1) } @@ -1360,7 +1366,7 @@ func SetServiceStatus(service Handle, serviceStatus *SERVICE_STATUS) (err error) } func SetThreadToken(thread *Handle, token Token) (err error) { - r1, _, e1 := syscall.Syscall(procSetThreadToken.Addr(), 2, uintptr(unsafe.Pointer(thread)), uintptr(token), 0) + r1, _, e1 := syscall.SyscallN(procSetThreadToken.Addr(), uintptr(unsafe.Pointer(thread)), uintptr(token)) if r1 == 0 { err = errnoErr(e1) } @@ -1368,7 +1374,7 @@ func SetThreadToken(thread *Handle, token Token) (err error) { } func SetTokenInformation(token Token, infoClass uint32, info *byte, infoLen uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procSetTokenInformation.Addr(), 4, uintptr(token), uintptr(infoClass), uintptr(unsafe.Pointer(info)), uintptr(infoLen), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetTokenInformation.Addr(), uintptr(token), uintptr(infoClass), uintptr(unsafe.Pointer(info)), uintptr(infoLen)) if r1 == 0 { err = errnoErr(e1) } @@ -1376,7 +1382,7 @@ func SetTokenInformation(token Token, infoClass uint32, info *byte, infoLen uint } func StartServiceCtrlDispatcher(serviceTable *SERVICE_TABLE_ENTRY) (err error) { - r1, _, e1 := syscall.Syscall(procStartServiceCtrlDispatcherW.Addr(), 1, uintptr(unsafe.Pointer(serviceTable)), 0, 0) + r1, _, e1 := syscall.SyscallN(procStartServiceCtrlDispatcherW.Addr(), uintptr(unsafe.Pointer(serviceTable))) if r1 == 0 { err = errnoErr(e1) } @@ -1384,7 +1390,7 @@ func StartServiceCtrlDispatcher(serviceTable *SERVICE_TABLE_ENTRY) (err error) { } func StartService(service Handle, numArgs uint32, argVectors **uint16) (err error) { - r1, _, e1 := syscall.Syscall(procStartServiceW.Addr(), 3, uintptr(service), uintptr(numArgs), uintptr(unsafe.Pointer(argVectors))) + r1, _, e1 := syscall.SyscallN(procStartServiceW.Addr(), uintptr(service), uintptr(numArgs), uintptr(unsafe.Pointer(argVectors))) if r1 == 0 { err = errnoErr(e1) } @@ -1392,7 +1398,7 @@ func StartService(service Handle, numArgs uint32, argVectors **uint16) (err erro } func CertAddCertificateContextToStore(store Handle, certContext *CertContext, addDisposition uint32, storeContext **CertContext) (err error) { - r1, _, e1 := syscall.Syscall6(procCertAddCertificateContextToStore.Addr(), 4, uintptr(store), uintptr(unsafe.Pointer(certContext)), uintptr(addDisposition), uintptr(unsafe.Pointer(storeContext)), 0, 0) + r1, _, e1 := syscall.SyscallN(procCertAddCertificateContextToStore.Addr(), uintptr(store), uintptr(unsafe.Pointer(certContext)), uintptr(addDisposition), uintptr(unsafe.Pointer(storeContext))) if r1 == 0 { err = errnoErr(e1) } @@ -1400,7 +1406,7 @@ func CertAddCertificateContextToStore(store Handle, certContext *CertContext, ad } func CertCloseStore(store Handle, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procCertCloseStore.Addr(), 2, uintptr(store), uintptr(flags), 0) + r1, _, e1 := syscall.SyscallN(procCertCloseStore.Addr(), uintptr(store), uintptr(flags)) if r1 == 0 { err = errnoErr(e1) } @@ -1408,7 +1414,7 @@ func CertCloseStore(store Handle, flags uint32) (err error) { } func CertCreateCertificateContext(certEncodingType uint32, certEncoded *byte, encodedLen uint32) (context *CertContext, err error) { - r0, _, e1 := syscall.Syscall(procCertCreateCertificateContext.Addr(), 3, uintptr(certEncodingType), uintptr(unsafe.Pointer(certEncoded)), uintptr(encodedLen)) + r0, _, e1 := syscall.SyscallN(procCertCreateCertificateContext.Addr(), uintptr(certEncodingType), uintptr(unsafe.Pointer(certEncoded)), uintptr(encodedLen)) context = (*CertContext)(unsafe.Pointer(r0)) if context == nil { err = errnoErr(e1) @@ -1417,7 +1423,7 @@ func CertCreateCertificateContext(certEncodingType uint32, certEncoded *byte, en } func CertDeleteCertificateFromStore(certContext *CertContext) (err error) { - r1, _, e1 := syscall.Syscall(procCertDeleteCertificateFromStore.Addr(), 1, uintptr(unsafe.Pointer(certContext)), 0, 0) + r1, _, e1 := syscall.SyscallN(procCertDeleteCertificateFromStore.Addr(), uintptr(unsafe.Pointer(certContext))) if r1 == 0 { err = errnoErr(e1) } @@ -1425,13 +1431,13 @@ func CertDeleteCertificateFromStore(certContext *CertContext) (err error) { } func CertDuplicateCertificateContext(certContext *CertContext) (dupContext *CertContext) { - r0, _, _ := syscall.Syscall(procCertDuplicateCertificateContext.Addr(), 1, uintptr(unsafe.Pointer(certContext)), 0, 0) + r0, _, _ := syscall.SyscallN(procCertDuplicateCertificateContext.Addr(), uintptr(unsafe.Pointer(certContext))) dupContext = (*CertContext)(unsafe.Pointer(r0)) return } func CertEnumCertificatesInStore(store Handle, prevContext *CertContext) (context *CertContext, err error) { - r0, _, e1 := syscall.Syscall(procCertEnumCertificatesInStore.Addr(), 2, uintptr(store), uintptr(unsafe.Pointer(prevContext)), 0) + r0, _, e1 := syscall.SyscallN(procCertEnumCertificatesInStore.Addr(), uintptr(store), uintptr(unsafe.Pointer(prevContext))) context = (*CertContext)(unsafe.Pointer(r0)) if context == nil { err = errnoErr(e1) @@ -1440,7 +1446,7 @@ func CertEnumCertificatesInStore(store Handle, prevContext *CertContext) (contex } func CertFindCertificateInStore(store Handle, certEncodingType uint32, findFlags uint32, findType uint32, findPara unsafe.Pointer, prevCertContext *CertContext) (cert *CertContext, err error) { - r0, _, e1 := syscall.Syscall6(procCertFindCertificateInStore.Addr(), 6, uintptr(store), uintptr(certEncodingType), uintptr(findFlags), uintptr(findType), uintptr(findPara), uintptr(unsafe.Pointer(prevCertContext))) + r0, _, e1 := syscall.SyscallN(procCertFindCertificateInStore.Addr(), uintptr(store), uintptr(certEncodingType), uintptr(findFlags), uintptr(findType), uintptr(findPara), uintptr(unsafe.Pointer(prevCertContext))) cert = (*CertContext)(unsafe.Pointer(r0)) if cert == nil { err = errnoErr(e1) @@ -1449,7 +1455,7 @@ func CertFindCertificateInStore(store Handle, certEncodingType uint32, findFlags } func CertFindChainInStore(store Handle, certEncodingType uint32, findFlags uint32, findType uint32, findPara unsafe.Pointer, prevChainContext *CertChainContext) (certchain *CertChainContext, err error) { - r0, _, e1 := syscall.Syscall6(procCertFindChainInStore.Addr(), 6, uintptr(store), uintptr(certEncodingType), uintptr(findFlags), uintptr(findType), uintptr(findPara), uintptr(unsafe.Pointer(prevChainContext))) + r0, _, e1 := syscall.SyscallN(procCertFindChainInStore.Addr(), uintptr(store), uintptr(certEncodingType), uintptr(findFlags), uintptr(findType), uintptr(findPara), uintptr(unsafe.Pointer(prevChainContext))) certchain = (*CertChainContext)(unsafe.Pointer(r0)) if certchain == nil { err = errnoErr(e1) @@ -1458,18 +1464,18 @@ func CertFindChainInStore(store Handle, certEncodingType uint32, findFlags uint3 } func CertFindExtension(objId *byte, countExtensions uint32, extensions *CertExtension) (ret *CertExtension) { - r0, _, _ := syscall.Syscall(procCertFindExtension.Addr(), 3, uintptr(unsafe.Pointer(objId)), uintptr(countExtensions), uintptr(unsafe.Pointer(extensions))) + r0, _, _ := syscall.SyscallN(procCertFindExtension.Addr(), uintptr(unsafe.Pointer(objId)), uintptr(countExtensions), uintptr(unsafe.Pointer(extensions))) ret = (*CertExtension)(unsafe.Pointer(r0)) return } func CertFreeCertificateChain(ctx *CertChainContext) { - syscall.Syscall(procCertFreeCertificateChain.Addr(), 1, uintptr(unsafe.Pointer(ctx)), 0, 0) + syscall.SyscallN(procCertFreeCertificateChain.Addr(), uintptr(unsafe.Pointer(ctx))) return } func CertFreeCertificateContext(ctx *CertContext) (err error) { - r1, _, e1 := syscall.Syscall(procCertFreeCertificateContext.Addr(), 1, uintptr(unsafe.Pointer(ctx)), 0, 0) + r1, _, e1 := syscall.SyscallN(procCertFreeCertificateContext.Addr(), uintptr(unsafe.Pointer(ctx))) if r1 == 0 { err = errnoErr(e1) } @@ -1477,7 +1483,7 @@ func CertFreeCertificateContext(ctx *CertContext) (err error) { } func CertGetCertificateChain(engine Handle, leaf *CertContext, time *Filetime, additionalStore Handle, para *CertChainPara, flags uint32, reserved uintptr, chainCtx **CertChainContext) (err error) { - r1, _, e1 := syscall.Syscall9(procCertGetCertificateChain.Addr(), 8, uintptr(engine), uintptr(unsafe.Pointer(leaf)), uintptr(unsafe.Pointer(time)), uintptr(additionalStore), uintptr(unsafe.Pointer(para)), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(chainCtx)), 0) + r1, _, e1 := syscall.SyscallN(procCertGetCertificateChain.Addr(), uintptr(engine), uintptr(unsafe.Pointer(leaf)), uintptr(unsafe.Pointer(time)), uintptr(additionalStore), uintptr(unsafe.Pointer(para)), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(chainCtx))) if r1 == 0 { err = errnoErr(e1) } @@ -1485,13 +1491,13 @@ func CertGetCertificateChain(engine Handle, leaf *CertContext, time *Filetime, a } func CertGetNameString(certContext *CertContext, nameType uint32, flags uint32, typePara unsafe.Pointer, name *uint16, size uint32) (chars uint32) { - r0, _, _ := syscall.Syscall6(procCertGetNameStringW.Addr(), 6, uintptr(unsafe.Pointer(certContext)), uintptr(nameType), uintptr(flags), uintptr(typePara), uintptr(unsafe.Pointer(name)), uintptr(size)) + r0, _, _ := syscall.SyscallN(procCertGetNameStringW.Addr(), uintptr(unsafe.Pointer(certContext)), uintptr(nameType), uintptr(flags), uintptr(typePara), uintptr(unsafe.Pointer(name)), uintptr(size)) chars = uint32(r0) return } func CertOpenStore(storeProvider uintptr, msgAndCertEncodingType uint32, cryptProv uintptr, flags uint32, para uintptr) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall6(procCertOpenStore.Addr(), 5, uintptr(storeProvider), uintptr(msgAndCertEncodingType), uintptr(cryptProv), uintptr(flags), uintptr(para), 0) + r0, _, e1 := syscall.SyscallN(procCertOpenStore.Addr(), uintptr(storeProvider), uintptr(msgAndCertEncodingType), uintptr(cryptProv), uintptr(flags), uintptr(para)) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -1500,7 +1506,7 @@ func CertOpenStore(storeProvider uintptr, msgAndCertEncodingType uint32, cryptPr } func CertOpenSystemStore(hprov Handle, name *uint16) (store Handle, err error) { - r0, _, e1 := syscall.Syscall(procCertOpenSystemStoreW.Addr(), 2, uintptr(hprov), uintptr(unsafe.Pointer(name)), 0) + r0, _, e1 := syscall.SyscallN(procCertOpenSystemStoreW.Addr(), uintptr(hprov), uintptr(unsafe.Pointer(name))) store = Handle(r0) if store == 0 { err = errnoErr(e1) @@ -1509,7 +1515,7 @@ func CertOpenSystemStore(hprov Handle, name *uint16) (store Handle, err error) { } func CertVerifyCertificateChainPolicy(policyOID uintptr, chain *CertChainContext, para *CertChainPolicyPara, status *CertChainPolicyStatus) (err error) { - r1, _, e1 := syscall.Syscall6(procCertVerifyCertificateChainPolicy.Addr(), 4, uintptr(policyOID), uintptr(unsafe.Pointer(chain)), uintptr(unsafe.Pointer(para)), uintptr(unsafe.Pointer(status)), 0, 0) + r1, _, e1 := syscall.SyscallN(procCertVerifyCertificateChainPolicy.Addr(), uintptr(policyOID), uintptr(unsafe.Pointer(chain)), uintptr(unsafe.Pointer(para)), uintptr(unsafe.Pointer(status))) if r1 == 0 { err = errnoErr(e1) } @@ -1521,7 +1527,7 @@ func CryptAcquireCertificatePrivateKey(cert *CertContext, flags uint32, paramete if *callerFreeProvOrNCryptKey { _p0 = 1 } - r1, _, e1 := syscall.Syscall6(procCryptAcquireCertificatePrivateKey.Addr(), 6, uintptr(unsafe.Pointer(cert)), uintptr(flags), uintptr(parameters), uintptr(unsafe.Pointer(cryptProvOrNCryptKey)), uintptr(unsafe.Pointer(keySpec)), uintptr(unsafe.Pointer(&_p0))) + r1, _, e1 := syscall.SyscallN(procCryptAcquireCertificatePrivateKey.Addr(), uintptr(unsafe.Pointer(cert)), uintptr(flags), uintptr(parameters), uintptr(unsafe.Pointer(cryptProvOrNCryptKey)), uintptr(unsafe.Pointer(keySpec)), uintptr(unsafe.Pointer(&_p0))) *callerFreeProvOrNCryptKey = _p0 != 0 if r1 == 0 { err = errnoErr(e1) @@ -1530,7 +1536,7 @@ func CryptAcquireCertificatePrivateKey(cert *CertContext, flags uint32, paramete } func CryptDecodeObject(encodingType uint32, structType *byte, encodedBytes *byte, lenEncodedBytes uint32, flags uint32, decoded unsafe.Pointer, decodedLen *uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procCryptDecodeObject.Addr(), 7, uintptr(encodingType), uintptr(unsafe.Pointer(structType)), uintptr(unsafe.Pointer(encodedBytes)), uintptr(lenEncodedBytes), uintptr(flags), uintptr(decoded), uintptr(unsafe.Pointer(decodedLen)), 0, 0) + r1, _, e1 := syscall.SyscallN(procCryptDecodeObject.Addr(), uintptr(encodingType), uintptr(unsafe.Pointer(structType)), uintptr(unsafe.Pointer(encodedBytes)), uintptr(lenEncodedBytes), uintptr(flags), uintptr(decoded), uintptr(unsafe.Pointer(decodedLen))) if r1 == 0 { err = errnoErr(e1) } @@ -1538,7 +1544,7 @@ func CryptDecodeObject(encodingType uint32, structType *byte, encodedBytes *byte } func CryptProtectData(dataIn *DataBlob, name *uint16, optionalEntropy *DataBlob, reserved uintptr, promptStruct *CryptProtectPromptStruct, flags uint32, dataOut *DataBlob) (err error) { - r1, _, e1 := syscall.Syscall9(procCryptProtectData.Addr(), 7, uintptr(unsafe.Pointer(dataIn)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(optionalEntropy)), uintptr(reserved), uintptr(unsafe.Pointer(promptStruct)), uintptr(flags), uintptr(unsafe.Pointer(dataOut)), 0, 0) + r1, _, e1 := syscall.SyscallN(procCryptProtectData.Addr(), uintptr(unsafe.Pointer(dataIn)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(optionalEntropy)), uintptr(reserved), uintptr(unsafe.Pointer(promptStruct)), uintptr(flags), uintptr(unsafe.Pointer(dataOut))) if r1 == 0 { err = errnoErr(e1) } @@ -1546,7 +1552,7 @@ func CryptProtectData(dataIn *DataBlob, name *uint16, optionalEntropy *DataBlob, } func CryptQueryObject(objectType uint32, object unsafe.Pointer, expectedContentTypeFlags uint32, expectedFormatTypeFlags uint32, flags uint32, msgAndCertEncodingType *uint32, contentType *uint32, formatType *uint32, certStore *Handle, msg *Handle, context *unsafe.Pointer) (err error) { - r1, _, e1 := syscall.Syscall12(procCryptQueryObject.Addr(), 11, uintptr(objectType), uintptr(object), uintptr(expectedContentTypeFlags), uintptr(expectedFormatTypeFlags), uintptr(flags), uintptr(unsafe.Pointer(msgAndCertEncodingType)), uintptr(unsafe.Pointer(contentType)), uintptr(unsafe.Pointer(formatType)), uintptr(unsafe.Pointer(certStore)), uintptr(unsafe.Pointer(msg)), uintptr(unsafe.Pointer(context)), 0) + r1, _, e1 := syscall.SyscallN(procCryptQueryObject.Addr(), uintptr(objectType), uintptr(object), uintptr(expectedContentTypeFlags), uintptr(expectedFormatTypeFlags), uintptr(flags), uintptr(unsafe.Pointer(msgAndCertEncodingType)), uintptr(unsafe.Pointer(contentType)), uintptr(unsafe.Pointer(formatType)), uintptr(unsafe.Pointer(certStore)), uintptr(unsafe.Pointer(msg)), uintptr(unsafe.Pointer(context))) if r1 == 0 { err = errnoErr(e1) } @@ -1554,7 +1560,7 @@ func CryptQueryObject(objectType uint32, object unsafe.Pointer, expectedContentT } func CryptUnprotectData(dataIn *DataBlob, name **uint16, optionalEntropy *DataBlob, reserved uintptr, promptStruct *CryptProtectPromptStruct, flags uint32, dataOut *DataBlob) (err error) { - r1, _, e1 := syscall.Syscall9(procCryptUnprotectData.Addr(), 7, uintptr(unsafe.Pointer(dataIn)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(optionalEntropy)), uintptr(reserved), uintptr(unsafe.Pointer(promptStruct)), uintptr(flags), uintptr(unsafe.Pointer(dataOut)), 0, 0) + r1, _, e1 := syscall.SyscallN(procCryptUnprotectData.Addr(), uintptr(unsafe.Pointer(dataIn)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(optionalEntropy)), uintptr(reserved), uintptr(unsafe.Pointer(promptStruct)), uintptr(flags), uintptr(unsafe.Pointer(dataOut))) if r1 == 0 { err = errnoErr(e1) } @@ -1562,7 +1568,7 @@ func CryptUnprotectData(dataIn *DataBlob, name **uint16, optionalEntropy *DataBl } func PFXImportCertStore(pfx *CryptDataBlob, password *uint16, flags uint32) (store Handle, err error) { - r0, _, e1 := syscall.Syscall(procPFXImportCertStore.Addr(), 3, uintptr(unsafe.Pointer(pfx)), uintptr(unsafe.Pointer(password)), uintptr(flags)) + r0, _, e1 := syscall.SyscallN(procPFXImportCertStore.Addr(), uintptr(unsafe.Pointer(pfx)), uintptr(unsafe.Pointer(password)), uintptr(flags)) store = Handle(r0) if store == 0 { err = errnoErr(e1) @@ -1571,7 +1577,7 @@ func PFXImportCertStore(pfx *CryptDataBlob, password *uint16, flags uint32) (sto } func DnsNameCompare(name1 *uint16, name2 *uint16) (same bool) { - r0, _, _ := syscall.Syscall(procDnsNameCompare_W.Addr(), 2, uintptr(unsafe.Pointer(name1)), uintptr(unsafe.Pointer(name2)), 0) + r0, _, _ := syscall.SyscallN(procDnsNameCompare_W.Addr(), uintptr(unsafe.Pointer(name1)), uintptr(unsafe.Pointer(name2))) same = r0 != 0 return } @@ -1586,7 +1592,7 @@ func DnsQuery(name string, qtype uint16, options uint32, extra *byte, qrs **DNSR } func _DnsQuery(name *uint16, qtype uint16, options uint32, extra *byte, qrs **DNSRecord, pr *byte) (status error) { - r0, _, _ := syscall.Syscall6(procDnsQuery_W.Addr(), 6, uintptr(unsafe.Pointer(name)), uintptr(qtype), uintptr(options), uintptr(unsafe.Pointer(extra)), uintptr(unsafe.Pointer(qrs)), uintptr(unsafe.Pointer(pr))) + r0, _, _ := syscall.SyscallN(procDnsQuery_W.Addr(), uintptr(unsafe.Pointer(name)), uintptr(qtype), uintptr(options), uintptr(unsafe.Pointer(extra)), uintptr(unsafe.Pointer(qrs)), uintptr(unsafe.Pointer(pr))) if r0 != 0 { status = syscall.Errno(r0) } @@ -1594,12 +1600,12 @@ func _DnsQuery(name *uint16, qtype uint16, options uint32, extra *byte, qrs **DN } func DnsRecordListFree(rl *DNSRecord, freetype uint32) { - syscall.Syscall(procDnsRecordListFree.Addr(), 2, uintptr(unsafe.Pointer(rl)), uintptr(freetype), 0) + syscall.SyscallN(procDnsRecordListFree.Addr(), uintptr(unsafe.Pointer(rl)), uintptr(freetype)) return } func DwmGetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, size uint32) (ret error) { - r0, _, _ := syscall.Syscall6(procDwmGetWindowAttribute.Addr(), 4, uintptr(hwnd), uintptr(attribute), uintptr(value), uintptr(size), 0, 0) + r0, _, _ := syscall.SyscallN(procDwmGetWindowAttribute.Addr(), uintptr(hwnd), uintptr(attribute), uintptr(value), uintptr(size)) if r0 != 0 { ret = syscall.Errno(r0) } @@ -1607,7 +1613,7 @@ func DwmGetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, si } func DwmSetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, size uint32) (ret error) { - r0, _, _ := syscall.Syscall6(procDwmSetWindowAttribute.Addr(), 4, uintptr(hwnd), uintptr(attribute), uintptr(value), uintptr(size), 0, 0) + r0, _, _ := syscall.SyscallN(procDwmSetWindowAttribute.Addr(), uintptr(hwnd), uintptr(attribute), uintptr(value), uintptr(size)) if r0 != 0 { ret = syscall.Errno(r0) } @@ -1615,15 +1621,20 @@ func DwmSetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, si } func CancelMibChangeNotify2(notificationHandle Handle) (errcode error) { - r0, _, _ := syscall.Syscall(procCancelMibChangeNotify2.Addr(), 1, uintptr(notificationHandle), 0, 0) + r0, _, _ := syscall.SyscallN(procCancelMibChangeNotify2.Addr(), uintptr(notificationHandle)) if r0 != 0 { errcode = syscall.Errno(r0) } return } +func FreeMibTable(memory unsafe.Pointer) { + syscall.SyscallN(procFreeMibTable.Addr(), uintptr(memory)) + return +} + func GetAdaptersAddresses(family uint32, flags uint32, reserved uintptr, adapterAddresses *IpAdapterAddresses, sizePointer *uint32) (errcode error) { - r0, _, _ := syscall.Syscall6(procGetAdaptersAddresses.Addr(), 5, uintptr(family), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(adapterAddresses)), uintptr(unsafe.Pointer(sizePointer)), 0) + r0, _, _ := syscall.SyscallN(procGetAdaptersAddresses.Addr(), uintptr(family), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(adapterAddresses)), uintptr(unsafe.Pointer(sizePointer))) if r0 != 0 { errcode = syscall.Errno(r0) } @@ -1631,7 +1642,7 @@ func GetAdaptersAddresses(family uint32, flags uint32, reserved uintptr, adapter } func GetAdaptersInfo(ai *IpAdapterInfo, ol *uint32) (errcode error) { - r0, _, _ := syscall.Syscall(procGetAdaptersInfo.Addr(), 2, uintptr(unsafe.Pointer(ai)), uintptr(unsafe.Pointer(ol)), 0) + r0, _, _ := syscall.SyscallN(procGetAdaptersInfo.Addr(), uintptr(unsafe.Pointer(ai)), uintptr(unsafe.Pointer(ol))) if r0 != 0 { errcode = syscall.Errno(r0) } @@ -1639,7 +1650,7 @@ func GetAdaptersInfo(ai *IpAdapterInfo, ol *uint32) (errcode error) { } func getBestInterfaceEx(sockaddr unsafe.Pointer, pdwBestIfIndex *uint32) (errcode error) { - r0, _, _ := syscall.Syscall(procGetBestInterfaceEx.Addr(), 2, uintptr(sockaddr), uintptr(unsafe.Pointer(pdwBestIfIndex)), 0) + r0, _, _ := syscall.SyscallN(procGetBestInterfaceEx.Addr(), uintptr(sockaddr), uintptr(unsafe.Pointer(pdwBestIfIndex))) if r0 != 0 { errcode = syscall.Errno(r0) } @@ -1647,7 +1658,7 @@ func getBestInterfaceEx(sockaddr unsafe.Pointer, pdwBestIfIndex *uint32) (errcod } func GetIfEntry(pIfRow *MibIfRow) (errcode error) { - r0, _, _ := syscall.Syscall(procGetIfEntry.Addr(), 1, uintptr(unsafe.Pointer(pIfRow)), 0, 0) + r0, _, _ := syscall.SyscallN(procGetIfEntry.Addr(), uintptr(unsafe.Pointer(pIfRow))) if r0 != 0 { errcode = syscall.Errno(r0) } @@ -1655,7 +1666,23 @@ func GetIfEntry(pIfRow *MibIfRow) (errcode error) { } func GetIfEntry2Ex(level uint32, row *MibIfRow2) (errcode error) { - r0, _, _ := syscall.Syscall(procGetIfEntry2Ex.Addr(), 2, uintptr(level), uintptr(unsafe.Pointer(row)), 0) + r0, _, _ := syscall.SyscallN(procGetIfEntry2Ex.Addr(), uintptr(level), uintptr(unsafe.Pointer(row))) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + +func GetIpForwardEntry2(row *MibIpForwardRow2) (errcode error) { + r0, _, _ := syscall.SyscallN(procGetIpForwardEntry2.Addr(), uintptr(unsafe.Pointer(row))) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + +func GetIpForwardTable2(family uint16, table **MibIpForwardTable2) (errcode error) { + r0, _, _ := syscall.SyscallN(procGetIpForwardTable2.Addr(), uintptr(family), uintptr(unsafe.Pointer(table))) if r0 != 0 { errcode = syscall.Errno(r0) } @@ -1663,7 +1690,7 @@ func GetIfEntry2Ex(level uint32, row *MibIfRow2) (errcode error) { } func GetUnicastIpAddressEntry(row *MibUnicastIpAddressRow) (errcode error) { - r0, _, _ := syscall.Syscall(procGetUnicastIpAddressEntry.Addr(), 1, uintptr(unsafe.Pointer(row)), 0, 0) + r0, _, _ := syscall.SyscallN(procGetUnicastIpAddressEntry.Addr(), uintptr(unsafe.Pointer(row))) if r0 != 0 { errcode = syscall.Errno(r0) } @@ -1675,7 +1702,19 @@ func NotifyIpInterfaceChange(family uint16, callback uintptr, callerContext unsa if initialNotification { _p0 = 1 } - r0, _, _ := syscall.Syscall6(procNotifyIpInterfaceChange.Addr(), 5, uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle)), 0) + r0, _, _ := syscall.SyscallN(procNotifyIpInterfaceChange.Addr(), uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle))) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + +func NotifyRouteChange2(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) { + var _p0 uint32 + if initialNotification { + _p0 = 1 + } + r0, _, _ := syscall.SyscallN(procNotifyRouteChange2.Addr(), uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle))) if r0 != 0 { errcode = syscall.Errno(r0) } @@ -1687,7 +1726,7 @@ func NotifyUnicastIpAddressChange(family uint16, callback uintptr, callerContext if initialNotification { _p0 = 1 } - r0, _, _ := syscall.Syscall6(procNotifyUnicastIpAddressChange.Addr(), 5, uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle)), 0) + r0, _, _ := syscall.SyscallN(procNotifyUnicastIpAddressChange.Addr(), uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle))) if r0 != 0 { errcode = syscall.Errno(r0) } @@ -1695,7 +1734,7 @@ func NotifyUnicastIpAddressChange(family uint16, callback uintptr, callerContext } func AddDllDirectory(path *uint16) (cookie uintptr, err error) { - r0, _, e1 := syscall.Syscall(procAddDllDirectory.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) + r0, _, e1 := syscall.SyscallN(procAddDllDirectory.Addr(), uintptr(unsafe.Pointer(path))) cookie = uintptr(r0) if cookie == 0 { err = errnoErr(e1) @@ -1704,7 +1743,7 @@ func AddDllDirectory(path *uint16) (cookie uintptr, err error) { } func AssignProcessToJobObject(job Handle, process Handle) (err error) { - r1, _, e1 := syscall.Syscall(procAssignProcessToJobObject.Addr(), 2, uintptr(job), uintptr(process), 0) + r1, _, e1 := syscall.SyscallN(procAssignProcessToJobObject.Addr(), uintptr(job), uintptr(process)) if r1 == 0 { err = errnoErr(e1) } @@ -1712,7 +1751,7 @@ func AssignProcessToJobObject(job Handle, process Handle) (err error) { } func CancelIo(s Handle) (err error) { - r1, _, e1 := syscall.Syscall(procCancelIo.Addr(), 1, uintptr(s), 0, 0) + r1, _, e1 := syscall.SyscallN(procCancelIo.Addr(), uintptr(s)) if r1 == 0 { err = errnoErr(e1) } @@ -1720,7 +1759,7 @@ func CancelIo(s Handle) (err error) { } func CancelIoEx(s Handle, o *Overlapped) (err error) { - r1, _, e1 := syscall.Syscall(procCancelIoEx.Addr(), 2, uintptr(s), uintptr(unsafe.Pointer(o)), 0) + r1, _, e1 := syscall.SyscallN(procCancelIoEx.Addr(), uintptr(s), uintptr(unsafe.Pointer(o))) if r1 == 0 { err = errnoErr(e1) } @@ -1728,7 +1767,7 @@ func CancelIoEx(s Handle, o *Overlapped) (err error) { } func ClearCommBreak(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procClearCommBreak.Addr(), 1, uintptr(handle), 0, 0) + r1, _, e1 := syscall.SyscallN(procClearCommBreak.Addr(), uintptr(handle)) if r1 == 0 { err = errnoErr(e1) } @@ -1736,7 +1775,7 @@ func ClearCommBreak(handle Handle) (err error) { } func ClearCommError(handle Handle, lpErrors *uint32, lpStat *ComStat) (err error) { - r1, _, e1 := syscall.Syscall(procClearCommError.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(lpErrors)), uintptr(unsafe.Pointer(lpStat))) + r1, _, e1 := syscall.SyscallN(procClearCommError.Addr(), uintptr(handle), uintptr(unsafe.Pointer(lpErrors)), uintptr(unsafe.Pointer(lpStat))) if r1 == 0 { err = errnoErr(e1) } @@ -1744,7 +1783,7 @@ func ClearCommError(handle Handle, lpErrors *uint32, lpStat *ComStat) (err error } func CloseHandle(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procCloseHandle.Addr(), 1, uintptr(handle), 0, 0) + r1, _, e1 := syscall.SyscallN(procCloseHandle.Addr(), uintptr(handle)) if r1 == 0 { err = errnoErr(e1) } @@ -1752,12 +1791,12 @@ func CloseHandle(handle Handle) (err error) { } func ClosePseudoConsole(console Handle) { - syscall.Syscall(procClosePseudoConsole.Addr(), 1, uintptr(console), 0, 0) + syscall.SyscallN(procClosePseudoConsole.Addr(), uintptr(console)) return } func ConnectNamedPipe(pipe Handle, overlapped *Overlapped) (err error) { - r1, _, e1 := syscall.Syscall(procConnectNamedPipe.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(overlapped)), 0) + r1, _, e1 := syscall.SyscallN(procConnectNamedPipe.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(overlapped))) if r1 == 0 { err = errnoErr(e1) } @@ -1765,7 +1804,7 @@ func ConnectNamedPipe(pipe Handle, overlapped *Overlapped) (err error) { } func CreateDirectory(path *uint16, sa *SecurityAttributes) (err error) { - r1, _, e1 := syscall.Syscall(procCreateDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(sa)), 0) + r1, _, e1 := syscall.SyscallN(procCreateDirectoryW.Addr(), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(sa))) if r1 == 0 { err = errnoErr(e1) } @@ -1773,7 +1812,7 @@ func CreateDirectory(path *uint16, sa *SecurityAttributes) (err error) { } func CreateEventEx(eventAttrs *SecurityAttributes, name *uint16, flags uint32, desiredAccess uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall6(procCreateEventExW.Addr(), 4, uintptr(unsafe.Pointer(eventAttrs)), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(desiredAccess), 0, 0) + r0, _, e1 := syscall.SyscallN(procCreateEventExW.Addr(), uintptr(unsafe.Pointer(eventAttrs)), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(desiredAccess)) handle = Handle(r0) if handle == 0 || e1 == ERROR_ALREADY_EXISTS { err = errnoErr(e1) @@ -1782,7 +1821,7 @@ func CreateEventEx(eventAttrs *SecurityAttributes, name *uint16, flags uint32, d } func CreateEvent(eventAttrs *SecurityAttributes, manualReset uint32, initialState uint32, name *uint16) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall6(procCreateEventW.Addr(), 4, uintptr(unsafe.Pointer(eventAttrs)), uintptr(manualReset), uintptr(initialState), uintptr(unsafe.Pointer(name)), 0, 0) + r0, _, e1 := syscall.SyscallN(procCreateEventW.Addr(), uintptr(unsafe.Pointer(eventAttrs)), uintptr(manualReset), uintptr(initialState), uintptr(unsafe.Pointer(name))) handle = Handle(r0) if handle == 0 || e1 == ERROR_ALREADY_EXISTS { err = errnoErr(e1) @@ -1791,7 +1830,7 @@ func CreateEvent(eventAttrs *SecurityAttributes, manualReset uint32, initialStat } func CreateFileMapping(fhandle Handle, sa *SecurityAttributes, prot uint32, maxSizeHigh uint32, maxSizeLow uint32, name *uint16) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall6(procCreateFileMappingW.Addr(), 6, uintptr(fhandle), uintptr(unsafe.Pointer(sa)), uintptr(prot), uintptr(maxSizeHigh), uintptr(maxSizeLow), uintptr(unsafe.Pointer(name))) + r0, _, e1 := syscall.SyscallN(procCreateFileMappingW.Addr(), uintptr(fhandle), uintptr(unsafe.Pointer(sa)), uintptr(prot), uintptr(maxSizeHigh), uintptr(maxSizeLow), uintptr(unsafe.Pointer(name))) handle = Handle(r0) if handle == 0 || e1 == ERROR_ALREADY_EXISTS { err = errnoErr(e1) @@ -1800,7 +1839,7 @@ func CreateFileMapping(fhandle Handle, sa *SecurityAttributes, prot uint32, maxS } func CreateFile(name *uint16, access uint32, mode uint32, sa *SecurityAttributes, createmode uint32, attrs uint32, templatefile Handle) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall9(procCreateFileW.Addr(), 7, uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile), 0, 0) + r0, _, e1 := syscall.SyscallN(procCreateFileW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile)) handle = Handle(r0) if handle == InvalidHandle { err = errnoErr(e1) @@ -1809,7 +1848,7 @@ func CreateFile(name *uint16, access uint32, mode uint32, sa *SecurityAttributes } func CreateHardLink(filename *uint16, existingfilename *uint16, reserved uintptr) (err error) { - r1, _, e1 := syscall.Syscall(procCreateHardLinkW.Addr(), 3, uintptr(unsafe.Pointer(filename)), uintptr(unsafe.Pointer(existingfilename)), uintptr(reserved)) + r1, _, e1 := syscall.SyscallN(procCreateHardLinkW.Addr(), uintptr(unsafe.Pointer(filename)), uintptr(unsafe.Pointer(existingfilename)), uintptr(reserved)) if r1&0xff == 0 { err = errnoErr(e1) } @@ -1817,7 +1856,7 @@ func CreateHardLink(filename *uint16, existingfilename *uint16, reserved uintptr } func CreateIoCompletionPort(filehandle Handle, cphandle Handle, key uintptr, threadcnt uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall6(procCreateIoCompletionPort.Addr(), 4, uintptr(filehandle), uintptr(cphandle), uintptr(key), uintptr(threadcnt), 0, 0) + r0, _, e1 := syscall.SyscallN(procCreateIoCompletionPort.Addr(), uintptr(filehandle), uintptr(cphandle), uintptr(key), uintptr(threadcnt)) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -1826,7 +1865,7 @@ func CreateIoCompletionPort(filehandle Handle, cphandle Handle, key uintptr, thr } func CreateJobObject(jobAttr *SecurityAttributes, name *uint16) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procCreateJobObjectW.Addr(), 2, uintptr(unsafe.Pointer(jobAttr)), uintptr(unsafe.Pointer(name)), 0) + r0, _, e1 := syscall.SyscallN(procCreateJobObjectW.Addr(), uintptr(unsafe.Pointer(jobAttr)), uintptr(unsafe.Pointer(name))) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -1835,7 +1874,7 @@ func CreateJobObject(jobAttr *SecurityAttributes, name *uint16) (handle Handle, } func CreateMutexEx(mutexAttrs *SecurityAttributes, name *uint16, flags uint32, desiredAccess uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall6(procCreateMutexExW.Addr(), 4, uintptr(unsafe.Pointer(mutexAttrs)), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(desiredAccess), 0, 0) + r0, _, e1 := syscall.SyscallN(procCreateMutexExW.Addr(), uintptr(unsafe.Pointer(mutexAttrs)), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(desiredAccess)) handle = Handle(r0) if handle == 0 || e1 == ERROR_ALREADY_EXISTS { err = errnoErr(e1) @@ -1848,7 +1887,7 @@ func CreateMutex(mutexAttrs *SecurityAttributes, initialOwner bool, name *uint16 if initialOwner { _p0 = 1 } - r0, _, e1 := syscall.Syscall(procCreateMutexW.Addr(), 3, uintptr(unsafe.Pointer(mutexAttrs)), uintptr(_p0), uintptr(unsafe.Pointer(name))) + r0, _, e1 := syscall.SyscallN(procCreateMutexW.Addr(), uintptr(unsafe.Pointer(mutexAttrs)), uintptr(_p0), uintptr(unsafe.Pointer(name))) handle = Handle(r0) if handle == 0 || e1 == ERROR_ALREADY_EXISTS { err = errnoErr(e1) @@ -1857,7 +1896,7 @@ func CreateMutex(mutexAttrs *SecurityAttributes, initialOwner bool, name *uint16 } func CreateNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *SecurityAttributes) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall9(procCreateNamedPipeW.Addr(), 8, uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(pipeMode), uintptr(maxInstances), uintptr(outSize), uintptr(inSize), uintptr(defaultTimeout), uintptr(unsafe.Pointer(sa)), 0) + r0, _, e1 := syscall.SyscallN(procCreateNamedPipeW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(pipeMode), uintptr(maxInstances), uintptr(outSize), uintptr(inSize), uintptr(defaultTimeout), uintptr(unsafe.Pointer(sa))) handle = Handle(r0) if handle == InvalidHandle { err = errnoErr(e1) @@ -1866,7 +1905,7 @@ func CreateNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances u } func CreatePipe(readhandle *Handle, writehandle *Handle, sa *SecurityAttributes, size uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procCreatePipe.Addr(), 4, uintptr(unsafe.Pointer(readhandle)), uintptr(unsafe.Pointer(writehandle)), uintptr(unsafe.Pointer(sa)), uintptr(size), 0, 0) + r1, _, e1 := syscall.SyscallN(procCreatePipe.Addr(), uintptr(unsafe.Pointer(readhandle)), uintptr(unsafe.Pointer(writehandle)), uintptr(unsafe.Pointer(sa)), uintptr(size)) if r1 == 0 { err = errnoErr(e1) } @@ -1878,7 +1917,7 @@ func CreateProcess(appName *uint16, commandLine *uint16, procSecurity *SecurityA if inheritHandles { _p0 = 1 } - r1, _, e1 := syscall.Syscall12(procCreateProcessW.Addr(), 10, uintptr(unsafe.Pointer(appName)), uintptr(unsafe.Pointer(commandLine)), uintptr(unsafe.Pointer(procSecurity)), uintptr(unsafe.Pointer(threadSecurity)), uintptr(_p0), uintptr(creationFlags), uintptr(unsafe.Pointer(env)), uintptr(unsafe.Pointer(currentDir)), uintptr(unsafe.Pointer(startupInfo)), uintptr(unsafe.Pointer(outProcInfo)), 0, 0) + r1, _, e1 := syscall.SyscallN(procCreateProcessW.Addr(), uintptr(unsafe.Pointer(appName)), uintptr(unsafe.Pointer(commandLine)), uintptr(unsafe.Pointer(procSecurity)), uintptr(unsafe.Pointer(threadSecurity)), uintptr(_p0), uintptr(creationFlags), uintptr(unsafe.Pointer(env)), uintptr(unsafe.Pointer(currentDir)), uintptr(unsafe.Pointer(startupInfo)), uintptr(unsafe.Pointer(outProcInfo))) if r1 == 0 { err = errnoErr(e1) } @@ -1886,7 +1925,7 @@ func CreateProcess(appName *uint16, commandLine *uint16, procSecurity *SecurityA } func createPseudoConsole(size uint32, in Handle, out Handle, flags uint32, pconsole *Handle) (hr error) { - r0, _, _ := syscall.Syscall6(procCreatePseudoConsole.Addr(), 5, uintptr(size), uintptr(in), uintptr(out), uintptr(flags), uintptr(unsafe.Pointer(pconsole)), 0) + r0, _, _ := syscall.SyscallN(procCreatePseudoConsole.Addr(), uintptr(size), uintptr(in), uintptr(out), uintptr(flags), uintptr(unsafe.Pointer(pconsole))) if r0 != 0 { hr = syscall.Errno(r0) } @@ -1894,7 +1933,7 @@ func createPseudoConsole(size uint32, in Handle, out Handle, flags uint32, pcons } func CreateSymbolicLink(symlinkfilename *uint16, targetfilename *uint16, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procCreateSymbolicLinkW.Addr(), 3, uintptr(unsafe.Pointer(symlinkfilename)), uintptr(unsafe.Pointer(targetfilename)), uintptr(flags)) + r1, _, e1 := syscall.SyscallN(procCreateSymbolicLinkW.Addr(), uintptr(unsafe.Pointer(symlinkfilename)), uintptr(unsafe.Pointer(targetfilename)), uintptr(flags)) if r1&0xff == 0 { err = errnoErr(e1) } @@ -1902,7 +1941,7 @@ func CreateSymbolicLink(symlinkfilename *uint16, targetfilename *uint16, flags u } func CreateToolhelp32Snapshot(flags uint32, processId uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procCreateToolhelp32Snapshot.Addr(), 2, uintptr(flags), uintptr(processId), 0) + r0, _, e1 := syscall.SyscallN(procCreateToolhelp32Snapshot.Addr(), uintptr(flags), uintptr(processId)) handle = Handle(r0) if handle == InvalidHandle { err = errnoErr(e1) @@ -1911,7 +1950,7 @@ func CreateToolhelp32Snapshot(flags uint32, processId uint32) (handle Handle, er } func DefineDosDevice(flags uint32, deviceName *uint16, targetPath *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procDefineDosDeviceW.Addr(), 3, uintptr(flags), uintptr(unsafe.Pointer(deviceName)), uintptr(unsafe.Pointer(targetPath))) + r1, _, e1 := syscall.SyscallN(procDefineDosDeviceW.Addr(), uintptr(flags), uintptr(unsafe.Pointer(deviceName)), uintptr(unsafe.Pointer(targetPath))) if r1 == 0 { err = errnoErr(e1) } @@ -1919,7 +1958,7 @@ func DefineDosDevice(flags uint32, deviceName *uint16, targetPath *uint16) (err } func DeleteFile(path *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procDeleteFileW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) + r1, _, e1 := syscall.SyscallN(procDeleteFileW.Addr(), uintptr(unsafe.Pointer(path))) if r1 == 0 { err = errnoErr(e1) } @@ -1927,12 +1966,12 @@ func DeleteFile(path *uint16) (err error) { } func deleteProcThreadAttributeList(attrlist *ProcThreadAttributeList) { - syscall.Syscall(procDeleteProcThreadAttributeList.Addr(), 1, uintptr(unsafe.Pointer(attrlist)), 0, 0) + syscall.SyscallN(procDeleteProcThreadAttributeList.Addr(), uintptr(unsafe.Pointer(attrlist))) return } func DeleteVolumeMountPoint(volumeMountPoint *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procDeleteVolumeMountPointW.Addr(), 1, uintptr(unsafe.Pointer(volumeMountPoint)), 0, 0) + r1, _, e1 := syscall.SyscallN(procDeleteVolumeMountPointW.Addr(), uintptr(unsafe.Pointer(volumeMountPoint))) if r1 == 0 { err = errnoErr(e1) } @@ -1940,7 +1979,7 @@ func DeleteVolumeMountPoint(volumeMountPoint *uint16) (err error) { } func DeviceIoControl(handle Handle, ioControlCode uint32, inBuffer *byte, inBufferSize uint32, outBuffer *byte, outBufferSize uint32, bytesReturned *uint32, overlapped *Overlapped) (err error) { - r1, _, e1 := syscall.Syscall9(procDeviceIoControl.Addr(), 8, uintptr(handle), uintptr(ioControlCode), uintptr(unsafe.Pointer(inBuffer)), uintptr(inBufferSize), uintptr(unsafe.Pointer(outBuffer)), uintptr(outBufferSize), uintptr(unsafe.Pointer(bytesReturned)), uintptr(unsafe.Pointer(overlapped)), 0) + r1, _, e1 := syscall.SyscallN(procDeviceIoControl.Addr(), uintptr(handle), uintptr(ioControlCode), uintptr(unsafe.Pointer(inBuffer)), uintptr(inBufferSize), uintptr(unsafe.Pointer(outBuffer)), uintptr(outBufferSize), uintptr(unsafe.Pointer(bytesReturned)), uintptr(unsafe.Pointer(overlapped))) if r1 == 0 { err = errnoErr(e1) } @@ -1948,7 +1987,7 @@ func DeviceIoControl(handle Handle, ioControlCode uint32, inBuffer *byte, inBuff } func DisconnectNamedPipe(pipe Handle) (err error) { - r1, _, e1 := syscall.Syscall(procDisconnectNamedPipe.Addr(), 1, uintptr(pipe), 0, 0) + r1, _, e1 := syscall.SyscallN(procDisconnectNamedPipe.Addr(), uintptr(pipe)) if r1 == 0 { err = errnoErr(e1) } @@ -1960,7 +1999,7 @@ func DuplicateHandle(hSourceProcessHandle Handle, hSourceHandle Handle, hTargetP if bInheritHandle { _p0 = 1 } - r1, _, e1 := syscall.Syscall9(procDuplicateHandle.Addr(), 7, uintptr(hSourceProcessHandle), uintptr(hSourceHandle), uintptr(hTargetProcessHandle), uintptr(unsafe.Pointer(lpTargetHandle)), uintptr(dwDesiredAccess), uintptr(_p0), uintptr(dwOptions), 0, 0) + r1, _, e1 := syscall.SyscallN(procDuplicateHandle.Addr(), uintptr(hSourceProcessHandle), uintptr(hSourceHandle), uintptr(hTargetProcessHandle), uintptr(unsafe.Pointer(lpTargetHandle)), uintptr(dwDesiredAccess), uintptr(_p0), uintptr(dwOptions)) if r1 == 0 { err = errnoErr(e1) } @@ -1968,7 +2007,7 @@ func DuplicateHandle(hSourceProcessHandle Handle, hSourceHandle Handle, hTargetP } func EscapeCommFunction(handle Handle, dwFunc uint32) (err error) { - r1, _, e1 := syscall.Syscall(procEscapeCommFunction.Addr(), 2, uintptr(handle), uintptr(dwFunc), 0) + r1, _, e1 := syscall.SyscallN(procEscapeCommFunction.Addr(), uintptr(handle), uintptr(dwFunc)) if r1 == 0 { err = errnoErr(e1) } @@ -1976,12 +2015,12 @@ func EscapeCommFunction(handle Handle, dwFunc uint32) (err error) { } func ExitProcess(exitcode uint32) { - syscall.Syscall(procExitProcess.Addr(), 1, uintptr(exitcode), 0, 0) + syscall.SyscallN(procExitProcess.Addr(), uintptr(exitcode)) return } func ExpandEnvironmentStrings(src *uint16, dst *uint16, size uint32) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procExpandEnvironmentStringsW.Addr(), 3, uintptr(unsafe.Pointer(src)), uintptr(unsafe.Pointer(dst)), uintptr(size)) + r0, _, e1 := syscall.SyscallN(procExpandEnvironmentStringsW.Addr(), uintptr(unsafe.Pointer(src)), uintptr(unsafe.Pointer(dst)), uintptr(size)) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -1990,7 +2029,7 @@ func ExpandEnvironmentStrings(src *uint16, dst *uint16, size uint32) (n uint32, } func FindClose(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procFindClose.Addr(), 1, uintptr(handle), 0, 0) + r1, _, e1 := syscall.SyscallN(procFindClose.Addr(), uintptr(handle)) if r1 == 0 { err = errnoErr(e1) } @@ -1998,7 +2037,7 @@ func FindClose(handle Handle) (err error) { } func FindCloseChangeNotification(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procFindCloseChangeNotification.Addr(), 1, uintptr(handle), 0, 0) + r1, _, e1 := syscall.SyscallN(procFindCloseChangeNotification.Addr(), uintptr(handle)) if r1 == 0 { err = errnoErr(e1) } @@ -2019,7 +2058,7 @@ func _FindFirstChangeNotification(path *uint16, watchSubtree bool, notifyFilter if watchSubtree { _p1 = 1 } - r0, _, e1 := syscall.Syscall(procFindFirstChangeNotificationW.Addr(), 3, uintptr(unsafe.Pointer(path)), uintptr(_p1), uintptr(notifyFilter)) + r0, _, e1 := syscall.SyscallN(procFindFirstChangeNotificationW.Addr(), uintptr(unsafe.Pointer(path)), uintptr(_p1), uintptr(notifyFilter)) handle = Handle(r0) if handle == InvalidHandle { err = errnoErr(e1) @@ -2028,7 +2067,7 @@ func _FindFirstChangeNotification(path *uint16, watchSubtree bool, notifyFilter } func findFirstFile1(name *uint16, data *win32finddata1) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procFindFirstFileW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(data)), 0) + r0, _, e1 := syscall.SyscallN(procFindFirstFileW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(data))) handle = Handle(r0) if handle == InvalidHandle { err = errnoErr(e1) @@ -2037,7 +2076,7 @@ func findFirstFile1(name *uint16, data *win32finddata1) (handle Handle, err erro } func FindFirstVolumeMountPoint(rootPathName *uint16, volumeMountPoint *uint16, bufferLength uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procFindFirstVolumeMountPointW.Addr(), 3, uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(bufferLength)) + r0, _, e1 := syscall.SyscallN(procFindFirstVolumeMountPointW.Addr(), uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(bufferLength)) handle = Handle(r0) if handle == InvalidHandle { err = errnoErr(e1) @@ -2046,7 +2085,7 @@ func FindFirstVolumeMountPoint(rootPathName *uint16, volumeMountPoint *uint16, b } func FindFirstVolume(volumeName *uint16, bufferLength uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procFindFirstVolumeW.Addr(), 2, uintptr(unsafe.Pointer(volumeName)), uintptr(bufferLength), 0) + r0, _, e1 := syscall.SyscallN(procFindFirstVolumeW.Addr(), uintptr(unsafe.Pointer(volumeName)), uintptr(bufferLength)) handle = Handle(r0) if handle == InvalidHandle { err = errnoErr(e1) @@ -2055,7 +2094,7 @@ func FindFirstVolume(volumeName *uint16, bufferLength uint32) (handle Handle, er } func FindNextChangeNotification(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procFindNextChangeNotification.Addr(), 1, uintptr(handle), 0, 0) + r1, _, e1 := syscall.SyscallN(procFindNextChangeNotification.Addr(), uintptr(handle)) if r1 == 0 { err = errnoErr(e1) } @@ -2063,7 +2102,7 @@ func FindNextChangeNotification(handle Handle) (err error) { } func findNextFile1(handle Handle, data *win32finddata1) (err error) { - r1, _, e1 := syscall.Syscall(procFindNextFileW.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(data)), 0) + r1, _, e1 := syscall.SyscallN(procFindNextFileW.Addr(), uintptr(handle), uintptr(unsafe.Pointer(data))) if r1 == 0 { err = errnoErr(e1) } @@ -2071,7 +2110,7 @@ func findNextFile1(handle Handle, data *win32finddata1) (err error) { } func FindNextVolumeMountPoint(findVolumeMountPoint Handle, volumeMountPoint *uint16, bufferLength uint32) (err error) { - r1, _, e1 := syscall.Syscall(procFindNextVolumeMountPointW.Addr(), 3, uintptr(findVolumeMountPoint), uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(bufferLength)) + r1, _, e1 := syscall.SyscallN(procFindNextVolumeMountPointW.Addr(), uintptr(findVolumeMountPoint), uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(bufferLength)) if r1 == 0 { err = errnoErr(e1) } @@ -2079,7 +2118,7 @@ func FindNextVolumeMountPoint(findVolumeMountPoint Handle, volumeMountPoint *uin } func FindNextVolume(findVolume Handle, volumeName *uint16, bufferLength uint32) (err error) { - r1, _, e1 := syscall.Syscall(procFindNextVolumeW.Addr(), 3, uintptr(findVolume), uintptr(unsafe.Pointer(volumeName)), uintptr(bufferLength)) + r1, _, e1 := syscall.SyscallN(procFindNextVolumeW.Addr(), uintptr(findVolume), uintptr(unsafe.Pointer(volumeName)), uintptr(bufferLength)) if r1 == 0 { err = errnoErr(e1) } @@ -2087,7 +2126,7 @@ func FindNextVolume(findVolume Handle, volumeName *uint16, bufferLength uint32) } func findResource(module Handle, name uintptr, resType uintptr) (resInfo Handle, err error) { - r0, _, e1 := syscall.Syscall(procFindResourceW.Addr(), 3, uintptr(module), uintptr(name), uintptr(resType)) + r0, _, e1 := syscall.SyscallN(procFindResourceW.Addr(), uintptr(module), uintptr(name), uintptr(resType)) resInfo = Handle(r0) if resInfo == 0 { err = errnoErr(e1) @@ -2096,7 +2135,7 @@ func findResource(module Handle, name uintptr, resType uintptr) (resInfo Handle, } func FindVolumeClose(findVolume Handle) (err error) { - r1, _, e1 := syscall.Syscall(procFindVolumeClose.Addr(), 1, uintptr(findVolume), 0, 0) + r1, _, e1 := syscall.SyscallN(procFindVolumeClose.Addr(), uintptr(findVolume)) if r1 == 0 { err = errnoErr(e1) } @@ -2104,7 +2143,15 @@ func FindVolumeClose(findVolume Handle) (err error) { } func FindVolumeMountPointClose(findVolumeMountPoint Handle) (err error) { - r1, _, e1 := syscall.Syscall(procFindVolumeMountPointClose.Addr(), 1, uintptr(findVolumeMountPoint), 0, 0) + r1, _, e1 := syscall.SyscallN(procFindVolumeMountPointClose.Addr(), uintptr(findVolumeMountPoint)) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func FlushConsoleInputBuffer(console Handle) (err error) { + r1, _, e1 := syscall.SyscallN(procFlushConsoleInputBuffer.Addr(), uintptr(console)) if r1 == 0 { err = errnoErr(e1) } @@ -2112,7 +2159,7 @@ func FindVolumeMountPointClose(findVolumeMountPoint Handle) (err error) { } func FlushFileBuffers(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procFlushFileBuffers.Addr(), 1, uintptr(handle), 0, 0) + r1, _, e1 := syscall.SyscallN(procFlushFileBuffers.Addr(), uintptr(handle)) if r1 == 0 { err = errnoErr(e1) } @@ -2120,7 +2167,7 @@ func FlushFileBuffers(handle Handle) (err error) { } func FlushViewOfFile(addr uintptr, length uintptr) (err error) { - r1, _, e1 := syscall.Syscall(procFlushViewOfFile.Addr(), 2, uintptr(addr), uintptr(length), 0) + r1, _, e1 := syscall.SyscallN(procFlushViewOfFile.Addr(), uintptr(addr), uintptr(length)) if r1 == 0 { err = errnoErr(e1) } @@ -2132,7 +2179,7 @@ func FormatMessage(flags uint32, msgsrc uintptr, msgid uint32, langid uint32, bu if len(buf) > 0 { _p0 = &buf[0] } - r0, _, e1 := syscall.Syscall9(procFormatMessageW.Addr(), 7, uintptr(flags), uintptr(msgsrc), uintptr(msgid), uintptr(langid), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(args)), 0, 0) + r0, _, e1 := syscall.SyscallN(procFormatMessageW.Addr(), uintptr(flags), uintptr(msgsrc), uintptr(msgid), uintptr(langid), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(args))) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -2141,7 +2188,7 @@ func FormatMessage(flags uint32, msgsrc uintptr, msgid uint32, langid uint32, bu } func FreeEnvironmentStrings(envs *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procFreeEnvironmentStringsW.Addr(), 1, uintptr(unsafe.Pointer(envs)), 0, 0) + r1, _, e1 := syscall.SyscallN(procFreeEnvironmentStringsW.Addr(), uintptr(unsafe.Pointer(envs))) if r1 == 0 { err = errnoErr(e1) } @@ -2149,7 +2196,7 @@ func FreeEnvironmentStrings(envs *uint16) (err error) { } func FreeLibrary(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procFreeLibrary.Addr(), 1, uintptr(handle), 0, 0) + r1, _, e1 := syscall.SyscallN(procFreeLibrary.Addr(), uintptr(handle)) if r1 == 0 { err = errnoErr(e1) } @@ -2157,7 +2204,7 @@ func FreeLibrary(handle Handle) (err error) { } func GenerateConsoleCtrlEvent(ctrlEvent uint32, processGroupID uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGenerateConsoleCtrlEvent.Addr(), 2, uintptr(ctrlEvent), uintptr(processGroupID), 0) + r1, _, e1 := syscall.SyscallN(procGenerateConsoleCtrlEvent.Addr(), uintptr(ctrlEvent), uintptr(processGroupID)) if r1 == 0 { err = errnoErr(e1) } @@ -2165,19 +2212,19 @@ func GenerateConsoleCtrlEvent(ctrlEvent uint32, processGroupID uint32) (err erro } func GetACP() (acp uint32) { - r0, _, _ := syscall.Syscall(procGetACP.Addr(), 0, 0, 0, 0) + r0, _, _ := syscall.SyscallN(procGetACP.Addr()) acp = uint32(r0) return } func GetActiveProcessorCount(groupNumber uint16) (ret uint32) { - r0, _, _ := syscall.Syscall(procGetActiveProcessorCount.Addr(), 1, uintptr(groupNumber), 0, 0) + r0, _, _ := syscall.SyscallN(procGetActiveProcessorCount.Addr(), uintptr(groupNumber)) ret = uint32(r0) return } func GetCommModemStatus(handle Handle, lpModemStat *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetCommModemStatus.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(lpModemStat)), 0) + r1, _, e1 := syscall.SyscallN(procGetCommModemStatus.Addr(), uintptr(handle), uintptr(unsafe.Pointer(lpModemStat))) if r1 == 0 { err = errnoErr(e1) } @@ -2185,7 +2232,7 @@ func GetCommModemStatus(handle Handle, lpModemStat *uint32) (err error) { } func GetCommState(handle Handle, lpDCB *DCB) (err error) { - r1, _, e1 := syscall.Syscall(procGetCommState.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(lpDCB)), 0) + r1, _, e1 := syscall.SyscallN(procGetCommState.Addr(), uintptr(handle), uintptr(unsafe.Pointer(lpDCB))) if r1 == 0 { err = errnoErr(e1) } @@ -2193,7 +2240,7 @@ func GetCommState(handle Handle, lpDCB *DCB) (err error) { } func GetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) { - r1, _, e1 := syscall.Syscall(procGetCommTimeouts.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(timeouts)), 0) + r1, _, e1 := syscall.SyscallN(procGetCommTimeouts.Addr(), uintptr(handle), uintptr(unsafe.Pointer(timeouts))) if r1 == 0 { err = errnoErr(e1) } @@ -2201,13 +2248,13 @@ func GetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) { } func GetCommandLine() (cmd *uint16) { - r0, _, _ := syscall.Syscall(procGetCommandLineW.Addr(), 0, 0, 0, 0) + r0, _, _ := syscall.SyscallN(procGetCommandLineW.Addr()) cmd = (*uint16)(unsafe.Pointer(r0)) return } func GetComputerNameEx(nametype uint32, buf *uint16, n *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetComputerNameExW.Addr(), 3, uintptr(nametype), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(n))) + r1, _, e1 := syscall.SyscallN(procGetComputerNameExW.Addr(), uintptr(nametype), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(n))) if r1 == 0 { err = errnoErr(e1) } @@ -2215,7 +2262,7 @@ func GetComputerNameEx(nametype uint32, buf *uint16, n *uint32) (err error) { } func GetComputerName(buf *uint16, n *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetComputerNameW.Addr(), 2, uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(n)), 0) + r1, _, e1 := syscall.SyscallN(procGetComputerNameW.Addr(), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(n))) if r1 == 0 { err = errnoErr(e1) } @@ -2223,7 +2270,7 @@ func GetComputerName(buf *uint16, n *uint32) (err error) { } func GetConsoleCP() (cp uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetConsoleCP.Addr(), 0, 0, 0, 0) + r0, _, e1 := syscall.SyscallN(procGetConsoleCP.Addr()) cp = uint32(r0) if cp == 0 { err = errnoErr(e1) @@ -2232,7 +2279,7 @@ func GetConsoleCP() (cp uint32, err error) { } func GetConsoleMode(console Handle, mode *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(console), uintptr(unsafe.Pointer(mode)), 0) + r1, _, e1 := syscall.SyscallN(procGetConsoleMode.Addr(), uintptr(console), uintptr(unsafe.Pointer(mode))) if r1 == 0 { err = errnoErr(e1) } @@ -2240,7 +2287,7 @@ func GetConsoleMode(console Handle, mode *uint32) (err error) { } func GetConsoleOutputCP() (cp uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetConsoleOutputCP.Addr(), 0, 0, 0, 0) + r0, _, e1 := syscall.SyscallN(procGetConsoleOutputCP.Addr()) cp = uint32(r0) if cp == 0 { err = errnoErr(e1) @@ -2249,7 +2296,7 @@ func GetConsoleOutputCP() (cp uint32, err error) { } func GetConsoleScreenBufferInfo(console Handle, info *ConsoleScreenBufferInfo) (err error) { - r1, _, e1 := syscall.Syscall(procGetConsoleScreenBufferInfo.Addr(), 2, uintptr(console), uintptr(unsafe.Pointer(info)), 0) + r1, _, e1 := syscall.SyscallN(procGetConsoleScreenBufferInfo.Addr(), uintptr(console), uintptr(unsafe.Pointer(info))) if r1 == 0 { err = errnoErr(e1) } @@ -2257,7 +2304,7 @@ func GetConsoleScreenBufferInfo(console Handle, info *ConsoleScreenBufferInfo) ( } func GetCurrentDirectory(buflen uint32, buf *uint16) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetCurrentDirectoryW.Addr(), 2, uintptr(buflen), uintptr(unsafe.Pointer(buf)), 0) + r0, _, e1 := syscall.SyscallN(procGetCurrentDirectoryW.Addr(), uintptr(buflen), uintptr(unsafe.Pointer(buf))) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -2266,19 +2313,19 @@ func GetCurrentDirectory(buflen uint32, buf *uint16) (n uint32, err error) { } func GetCurrentProcessId() (pid uint32) { - r0, _, _ := syscall.Syscall(procGetCurrentProcessId.Addr(), 0, 0, 0, 0) + r0, _, _ := syscall.SyscallN(procGetCurrentProcessId.Addr()) pid = uint32(r0) return } func GetCurrentThreadId() (id uint32) { - r0, _, _ := syscall.Syscall(procGetCurrentThreadId.Addr(), 0, 0, 0, 0) + r0, _, _ := syscall.SyscallN(procGetCurrentThreadId.Addr()) id = uint32(r0) return } func GetDiskFreeSpaceEx(directoryName *uint16, freeBytesAvailableToCaller *uint64, totalNumberOfBytes *uint64, totalNumberOfFreeBytes *uint64) (err error) { - r1, _, e1 := syscall.Syscall6(procGetDiskFreeSpaceExW.Addr(), 4, uintptr(unsafe.Pointer(directoryName)), uintptr(unsafe.Pointer(freeBytesAvailableToCaller)), uintptr(unsafe.Pointer(totalNumberOfBytes)), uintptr(unsafe.Pointer(totalNumberOfFreeBytes)), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetDiskFreeSpaceExW.Addr(), uintptr(unsafe.Pointer(directoryName)), uintptr(unsafe.Pointer(freeBytesAvailableToCaller)), uintptr(unsafe.Pointer(totalNumberOfBytes)), uintptr(unsafe.Pointer(totalNumberOfFreeBytes))) if r1 == 0 { err = errnoErr(e1) } @@ -2286,13 +2333,13 @@ func GetDiskFreeSpaceEx(directoryName *uint16, freeBytesAvailableToCaller *uint6 } func GetDriveType(rootPathName *uint16) (driveType uint32) { - r0, _, _ := syscall.Syscall(procGetDriveTypeW.Addr(), 1, uintptr(unsafe.Pointer(rootPathName)), 0, 0) + r0, _, _ := syscall.SyscallN(procGetDriveTypeW.Addr(), uintptr(unsafe.Pointer(rootPathName))) driveType = uint32(r0) return } func GetEnvironmentStrings() (envs *uint16, err error) { - r0, _, e1 := syscall.Syscall(procGetEnvironmentStringsW.Addr(), 0, 0, 0, 0) + r0, _, e1 := syscall.SyscallN(procGetEnvironmentStringsW.Addr()) envs = (*uint16)(unsafe.Pointer(r0)) if envs == nil { err = errnoErr(e1) @@ -2301,7 +2348,7 @@ func GetEnvironmentStrings() (envs *uint16, err error) { } func GetEnvironmentVariable(name *uint16, buffer *uint16, size uint32) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetEnvironmentVariableW.Addr(), 3, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buffer)), uintptr(size)) + r0, _, e1 := syscall.SyscallN(procGetEnvironmentVariableW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buffer)), uintptr(size)) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -2310,7 +2357,7 @@ func GetEnvironmentVariable(name *uint16, buffer *uint16, size uint32) (n uint32 } func GetExitCodeProcess(handle Handle, exitcode *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetExitCodeProcess.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(exitcode)), 0) + r1, _, e1 := syscall.SyscallN(procGetExitCodeProcess.Addr(), uintptr(handle), uintptr(unsafe.Pointer(exitcode))) if r1 == 0 { err = errnoErr(e1) } @@ -2318,7 +2365,7 @@ func GetExitCodeProcess(handle Handle, exitcode *uint32) (err error) { } func GetFileAttributesEx(name *uint16, level uint32, info *byte) (err error) { - r1, _, e1 := syscall.Syscall(procGetFileAttributesExW.Addr(), 3, uintptr(unsafe.Pointer(name)), uintptr(level), uintptr(unsafe.Pointer(info))) + r1, _, e1 := syscall.SyscallN(procGetFileAttributesExW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(level), uintptr(unsafe.Pointer(info))) if r1 == 0 { err = errnoErr(e1) } @@ -2326,7 +2373,7 @@ func GetFileAttributesEx(name *uint16, level uint32, info *byte) (err error) { } func GetFileAttributes(name *uint16) (attrs uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetFileAttributesW.Addr(), 1, uintptr(unsafe.Pointer(name)), 0, 0) + r0, _, e1 := syscall.SyscallN(procGetFileAttributesW.Addr(), uintptr(unsafe.Pointer(name))) attrs = uint32(r0) if attrs == INVALID_FILE_ATTRIBUTES { err = errnoErr(e1) @@ -2335,7 +2382,7 @@ func GetFileAttributes(name *uint16) (attrs uint32, err error) { } func GetFileInformationByHandle(handle Handle, data *ByHandleFileInformation) (err error) { - r1, _, e1 := syscall.Syscall(procGetFileInformationByHandle.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(data)), 0) + r1, _, e1 := syscall.SyscallN(procGetFileInformationByHandle.Addr(), uintptr(handle), uintptr(unsafe.Pointer(data))) if r1 == 0 { err = errnoErr(e1) } @@ -2343,7 +2390,7 @@ func GetFileInformationByHandle(handle Handle, data *ByHandleFileInformation) (e } func GetFileInformationByHandleEx(handle Handle, class uint32, outBuffer *byte, outBufferLen uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetFileInformationByHandleEx.Addr(), 4, uintptr(handle), uintptr(class), uintptr(unsafe.Pointer(outBuffer)), uintptr(outBufferLen), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetFileInformationByHandleEx.Addr(), uintptr(handle), uintptr(class), uintptr(unsafe.Pointer(outBuffer)), uintptr(outBufferLen)) if r1 == 0 { err = errnoErr(e1) } @@ -2351,7 +2398,7 @@ func GetFileInformationByHandleEx(handle Handle, class uint32, outBuffer *byte, } func GetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetime) (err error) { - r1, _, e1 := syscall.Syscall6(procGetFileTime.Addr(), 4, uintptr(handle), uintptr(unsafe.Pointer(ctime)), uintptr(unsafe.Pointer(atime)), uintptr(unsafe.Pointer(wtime)), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetFileTime.Addr(), uintptr(handle), uintptr(unsafe.Pointer(ctime)), uintptr(unsafe.Pointer(atime)), uintptr(unsafe.Pointer(wtime))) if r1 == 0 { err = errnoErr(e1) } @@ -2359,7 +2406,7 @@ func GetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetim } func GetFileType(filehandle Handle) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetFileType.Addr(), 1, uintptr(filehandle), 0, 0) + r0, _, e1 := syscall.SyscallN(procGetFileType.Addr(), uintptr(filehandle)) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -2368,7 +2415,7 @@ func GetFileType(filehandle Handle) (n uint32, err error) { } func GetFinalPathNameByHandle(file Handle, filePath *uint16, filePathSize uint32, flags uint32) (n uint32, err error) { - r0, _, e1 := syscall.Syscall6(procGetFinalPathNameByHandleW.Addr(), 4, uintptr(file), uintptr(unsafe.Pointer(filePath)), uintptr(filePathSize), uintptr(flags), 0, 0) + r0, _, e1 := syscall.SyscallN(procGetFinalPathNameByHandleW.Addr(), uintptr(file), uintptr(unsafe.Pointer(filePath)), uintptr(filePathSize), uintptr(flags)) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -2377,7 +2424,7 @@ func GetFinalPathNameByHandle(file Handle, filePath *uint16, filePathSize uint32 } func GetFullPathName(path *uint16, buflen uint32, buf *uint16, fname **uint16) (n uint32, err error) { - r0, _, e1 := syscall.Syscall6(procGetFullPathNameW.Addr(), 4, uintptr(unsafe.Pointer(path)), uintptr(buflen), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(fname)), 0, 0) + r0, _, e1 := syscall.SyscallN(procGetFullPathNameW.Addr(), uintptr(unsafe.Pointer(path)), uintptr(buflen), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(fname))) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -2386,13 +2433,13 @@ func GetFullPathName(path *uint16, buflen uint32, buf *uint16, fname **uint16) ( } func GetLargePageMinimum() (size uintptr) { - r0, _, _ := syscall.Syscall(procGetLargePageMinimum.Addr(), 0, 0, 0, 0) + r0, _, _ := syscall.SyscallN(procGetLargePageMinimum.Addr()) size = uintptr(r0) return } func GetLastError() (lasterr error) { - r0, _, _ := syscall.Syscall(procGetLastError.Addr(), 0, 0, 0, 0) + r0, _, _ := syscall.SyscallN(procGetLastError.Addr()) if r0 != 0 { lasterr = syscall.Errno(r0) } @@ -2400,7 +2447,7 @@ func GetLastError() (lasterr error) { } func GetLogicalDriveStrings(bufferLength uint32, buffer *uint16) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetLogicalDriveStringsW.Addr(), 2, uintptr(bufferLength), uintptr(unsafe.Pointer(buffer)), 0) + r0, _, e1 := syscall.SyscallN(procGetLogicalDriveStringsW.Addr(), uintptr(bufferLength), uintptr(unsafe.Pointer(buffer))) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -2409,7 +2456,7 @@ func GetLogicalDriveStrings(bufferLength uint32, buffer *uint16) (n uint32, err } func GetLogicalDrives() (drivesBitMask uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetLogicalDrives.Addr(), 0, 0, 0, 0) + r0, _, e1 := syscall.SyscallN(procGetLogicalDrives.Addr()) drivesBitMask = uint32(r0) if drivesBitMask == 0 { err = errnoErr(e1) @@ -2418,7 +2465,7 @@ func GetLogicalDrives() (drivesBitMask uint32, err error) { } func GetLongPathName(path *uint16, buf *uint16, buflen uint32) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetLongPathNameW.Addr(), 3, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(buf)), uintptr(buflen)) + r0, _, e1 := syscall.SyscallN(procGetLongPathNameW.Addr(), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(buf)), uintptr(buflen)) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -2427,13 +2474,13 @@ func GetLongPathName(path *uint16, buf *uint16, buflen uint32) (n uint32, err er } func GetMaximumProcessorCount(groupNumber uint16) (ret uint32) { - r0, _, _ := syscall.Syscall(procGetMaximumProcessorCount.Addr(), 1, uintptr(groupNumber), 0, 0) + r0, _, _ := syscall.SyscallN(procGetMaximumProcessorCount.Addr(), uintptr(groupNumber)) ret = uint32(r0) return } func GetModuleFileName(module Handle, filename *uint16, size uint32) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetModuleFileNameW.Addr(), 3, uintptr(module), uintptr(unsafe.Pointer(filename)), uintptr(size)) + r0, _, e1 := syscall.SyscallN(procGetModuleFileNameW.Addr(), uintptr(module), uintptr(unsafe.Pointer(filename)), uintptr(size)) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -2442,7 +2489,7 @@ func GetModuleFileName(module Handle, filename *uint16, size uint32) (n uint32, } func GetModuleHandleEx(flags uint32, moduleName *uint16, module *Handle) (err error) { - r1, _, e1 := syscall.Syscall(procGetModuleHandleExW.Addr(), 3, uintptr(flags), uintptr(unsafe.Pointer(moduleName)), uintptr(unsafe.Pointer(module))) + r1, _, e1 := syscall.SyscallN(procGetModuleHandleExW.Addr(), uintptr(flags), uintptr(unsafe.Pointer(moduleName)), uintptr(unsafe.Pointer(module))) if r1 == 0 { err = errnoErr(e1) } @@ -2450,7 +2497,7 @@ func GetModuleHandleEx(flags uint32, moduleName *uint16, module *Handle) (err er } func GetNamedPipeClientProcessId(pipe Handle, clientProcessID *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetNamedPipeClientProcessId.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(clientProcessID)), 0) + r1, _, e1 := syscall.SyscallN(procGetNamedPipeClientProcessId.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(clientProcessID))) if r1 == 0 { err = errnoErr(e1) } @@ -2458,7 +2505,7 @@ func GetNamedPipeClientProcessId(pipe Handle, clientProcessID *uint32) (err erro } func GetNamedPipeHandleState(pipe Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procGetNamedPipeHandleStateW.Addr(), 7, uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(curInstances)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), uintptr(unsafe.Pointer(userName)), uintptr(maxUserNameSize), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetNamedPipeHandleStateW.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(curInstances)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), uintptr(unsafe.Pointer(userName)), uintptr(maxUserNameSize)) if r1 == 0 { err = errnoErr(e1) } @@ -2466,7 +2513,7 @@ func GetNamedPipeHandleState(pipe Handle, state *uint32, curInstances *uint32, m } func GetNamedPipeInfo(pipe Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetNamedPipeInfo.Addr(), 5, uintptr(pipe), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(outSize)), uintptr(unsafe.Pointer(inSize)), uintptr(unsafe.Pointer(maxInstances)), 0) + r1, _, e1 := syscall.SyscallN(procGetNamedPipeInfo.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(outSize)), uintptr(unsafe.Pointer(inSize)), uintptr(unsafe.Pointer(maxInstances))) if r1 == 0 { err = errnoErr(e1) } @@ -2474,7 +2521,15 @@ func GetNamedPipeInfo(pipe Handle, flags *uint32, outSize *uint32, inSize *uint3 } func GetNamedPipeServerProcessId(pipe Handle, serverProcessID *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetNamedPipeServerProcessId.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(serverProcessID)), 0) + r1, _, e1 := syscall.SyscallN(procGetNamedPipeServerProcessId.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(serverProcessID))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func GetNumberOfConsoleInputEvents(console Handle, numevents *uint32) (err error) { + r1, _, e1 := syscall.SyscallN(procGetNumberOfConsoleInputEvents.Addr(), uintptr(console), uintptr(unsafe.Pointer(numevents))) if r1 == 0 { err = errnoErr(e1) } @@ -2486,7 +2541,7 @@ func GetOverlappedResult(handle Handle, overlapped *Overlapped, done *uint32, wa if wait { _p0 = 1 } - r1, _, e1 := syscall.Syscall6(procGetOverlappedResult.Addr(), 4, uintptr(handle), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(done)), uintptr(_p0), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetOverlappedResult.Addr(), uintptr(handle), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(done)), uintptr(_p0)) if r1 == 0 { err = errnoErr(e1) } @@ -2494,7 +2549,7 @@ func GetOverlappedResult(handle Handle, overlapped *Overlapped, done *uint32, wa } func GetPriorityClass(process Handle) (ret uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetPriorityClass.Addr(), 1, uintptr(process), 0, 0) + r0, _, e1 := syscall.SyscallN(procGetPriorityClass.Addr(), uintptr(process)) ret = uint32(r0) if ret == 0 { err = errnoErr(e1) @@ -2512,7 +2567,7 @@ func GetProcAddress(module Handle, procname string) (proc uintptr, err error) { } func _GetProcAddress(module Handle, procname *byte) (proc uintptr, err error) { - r0, _, e1 := syscall.Syscall(procGetProcAddress.Addr(), 2, uintptr(module), uintptr(unsafe.Pointer(procname)), 0) + r0, _, e1 := syscall.SyscallN(procGetProcAddress.Addr(), uintptr(module), uintptr(unsafe.Pointer(procname))) proc = uintptr(r0) if proc == 0 { err = errnoErr(e1) @@ -2521,7 +2576,7 @@ func _GetProcAddress(module Handle, procname *byte) (proc uintptr, err error) { } func GetProcessId(process Handle) (id uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetProcessId.Addr(), 1, uintptr(process), 0, 0) + r0, _, e1 := syscall.SyscallN(procGetProcessId.Addr(), uintptr(process)) id = uint32(r0) if id == 0 { err = errnoErr(e1) @@ -2530,7 +2585,7 @@ func GetProcessId(process Handle) (id uint32, err error) { } func getProcessPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetProcessPreferredUILanguages.Addr(), 4, uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetProcessPreferredUILanguages.Addr(), uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize))) if r1 == 0 { err = errnoErr(e1) } @@ -2538,7 +2593,7 @@ func getProcessPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uin } func GetProcessShutdownParameters(level *uint32, flags *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetProcessShutdownParameters.Addr(), 2, uintptr(unsafe.Pointer(level)), uintptr(unsafe.Pointer(flags)), 0) + r1, _, e1 := syscall.SyscallN(procGetProcessShutdownParameters.Addr(), uintptr(unsafe.Pointer(level)), uintptr(unsafe.Pointer(flags))) if r1 == 0 { err = errnoErr(e1) } @@ -2546,7 +2601,7 @@ func GetProcessShutdownParameters(level *uint32, flags *uint32) (err error) { } func GetProcessTimes(handle Handle, creationTime *Filetime, exitTime *Filetime, kernelTime *Filetime, userTime *Filetime) (err error) { - r1, _, e1 := syscall.Syscall6(procGetProcessTimes.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(creationTime)), uintptr(unsafe.Pointer(exitTime)), uintptr(unsafe.Pointer(kernelTime)), uintptr(unsafe.Pointer(userTime)), 0) + r1, _, e1 := syscall.SyscallN(procGetProcessTimes.Addr(), uintptr(handle), uintptr(unsafe.Pointer(creationTime)), uintptr(unsafe.Pointer(exitTime)), uintptr(unsafe.Pointer(kernelTime)), uintptr(unsafe.Pointer(userTime))) if r1 == 0 { err = errnoErr(e1) } @@ -2554,12 +2609,12 @@ func GetProcessTimes(handle Handle, creationTime *Filetime, exitTime *Filetime, } func GetProcessWorkingSetSizeEx(hProcess Handle, lpMinimumWorkingSetSize *uintptr, lpMaximumWorkingSetSize *uintptr, flags *uint32) { - syscall.Syscall6(procGetProcessWorkingSetSizeEx.Addr(), 4, uintptr(hProcess), uintptr(unsafe.Pointer(lpMinimumWorkingSetSize)), uintptr(unsafe.Pointer(lpMaximumWorkingSetSize)), uintptr(unsafe.Pointer(flags)), 0, 0) + syscall.SyscallN(procGetProcessWorkingSetSizeEx.Addr(), uintptr(hProcess), uintptr(unsafe.Pointer(lpMinimumWorkingSetSize)), uintptr(unsafe.Pointer(lpMaximumWorkingSetSize)), uintptr(unsafe.Pointer(flags))) return } func GetQueuedCompletionStatus(cphandle Handle, qty *uint32, key *uintptr, overlapped **Overlapped, timeout uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetQueuedCompletionStatus.Addr(), 5, uintptr(cphandle), uintptr(unsafe.Pointer(qty)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(overlapped)), uintptr(timeout), 0) + r1, _, e1 := syscall.SyscallN(procGetQueuedCompletionStatus.Addr(), uintptr(cphandle), uintptr(unsafe.Pointer(qty)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(overlapped)), uintptr(timeout)) if r1 == 0 { err = errnoErr(e1) } @@ -2567,7 +2622,7 @@ func GetQueuedCompletionStatus(cphandle Handle, qty *uint32, key *uintptr, overl } func GetShortPathName(longpath *uint16, shortpath *uint16, buflen uint32) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetShortPathNameW.Addr(), 3, uintptr(unsafe.Pointer(longpath)), uintptr(unsafe.Pointer(shortpath)), uintptr(buflen)) + r0, _, e1 := syscall.SyscallN(procGetShortPathNameW.Addr(), uintptr(unsafe.Pointer(longpath)), uintptr(unsafe.Pointer(shortpath)), uintptr(buflen)) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -2576,12 +2631,12 @@ func GetShortPathName(longpath *uint16, shortpath *uint16, buflen uint32) (n uin } func getStartupInfo(startupInfo *StartupInfo) { - syscall.Syscall(procGetStartupInfoW.Addr(), 1, uintptr(unsafe.Pointer(startupInfo)), 0, 0) + syscall.SyscallN(procGetStartupInfoW.Addr(), uintptr(unsafe.Pointer(startupInfo))) return } func GetStdHandle(stdhandle uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procGetStdHandle.Addr(), 1, uintptr(stdhandle), 0, 0) + r0, _, e1 := syscall.SyscallN(procGetStdHandle.Addr(), uintptr(stdhandle)) handle = Handle(r0) if handle == InvalidHandle { err = errnoErr(e1) @@ -2590,7 +2645,7 @@ func GetStdHandle(stdhandle uint32) (handle Handle, err error) { } func getSystemDirectory(dir *uint16, dirLen uint32) (len uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetSystemDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(dir)), uintptr(dirLen), 0) + r0, _, e1 := syscall.SyscallN(procGetSystemDirectoryW.Addr(), uintptr(unsafe.Pointer(dir)), uintptr(dirLen)) len = uint32(r0) if len == 0 { err = errnoErr(e1) @@ -2599,7 +2654,7 @@ func getSystemDirectory(dir *uint16, dirLen uint32) (len uint32, err error) { } func getSystemPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetSystemPreferredUILanguages.Addr(), 4, uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetSystemPreferredUILanguages.Addr(), uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize))) if r1 == 0 { err = errnoErr(e1) } @@ -2607,17 +2662,17 @@ func getSystemPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint } func GetSystemTimeAsFileTime(time *Filetime) { - syscall.Syscall(procGetSystemTimeAsFileTime.Addr(), 1, uintptr(unsafe.Pointer(time)), 0, 0) + syscall.SyscallN(procGetSystemTimeAsFileTime.Addr(), uintptr(unsafe.Pointer(time))) return } func GetSystemTimePreciseAsFileTime(time *Filetime) { - syscall.Syscall(procGetSystemTimePreciseAsFileTime.Addr(), 1, uintptr(unsafe.Pointer(time)), 0, 0) + syscall.SyscallN(procGetSystemTimePreciseAsFileTime.Addr(), uintptr(unsafe.Pointer(time))) return } func getSystemWindowsDirectory(dir *uint16, dirLen uint32) (len uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetSystemWindowsDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(dir)), uintptr(dirLen), 0) + r0, _, e1 := syscall.SyscallN(procGetSystemWindowsDirectoryW.Addr(), uintptr(unsafe.Pointer(dir)), uintptr(dirLen)) len = uint32(r0) if len == 0 { err = errnoErr(e1) @@ -2626,7 +2681,7 @@ func getSystemWindowsDirectory(dir *uint16, dirLen uint32) (len uint32, err erro } func GetTempPath(buflen uint32, buf *uint16) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetTempPathW.Addr(), 2, uintptr(buflen), uintptr(unsafe.Pointer(buf)), 0) + r0, _, e1 := syscall.SyscallN(procGetTempPathW.Addr(), uintptr(buflen), uintptr(unsafe.Pointer(buf))) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -2635,7 +2690,7 @@ func GetTempPath(buflen uint32, buf *uint16) (n uint32, err error) { } func getThreadPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetThreadPreferredUILanguages.Addr(), 4, uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetThreadPreferredUILanguages.Addr(), uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize))) if r1 == 0 { err = errnoErr(e1) } @@ -2643,13 +2698,13 @@ func getThreadPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint } func getTickCount64() (ms uint64) { - r0, _, _ := syscall.Syscall(procGetTickCount64.Addr(), 0, 0, 0, 0) + r0, _, _ := syscall.SyscallN(procGetTickCount64.Addr()) ms = uint64(r0) return } func GetTimeZoneInformation(tzi *Timezoneinformation) (rc uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetTimeZoneInformation.Addr(), 1, uintptr(unsafe.Pointer(tzi)), 0, 0) + r0, _, e1 := syscall.SyscallN(procGetTimeZoneInformation.Addr(), uintptr(unsafe.Pointer(tzi))) rc = uint32(r0) if rc == 0xffffffff { err = errnoErr(e1) @@ -2658,7 +2713,7 @@ func GetTimeZoneInformation(tzi *Timezoneinformation) (rc uint32, err error) { } func getUserPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetUserPreferredUILanguages.Addr(), 4, uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetUserPreferredUILanguages.Addr(), uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize))) if r1 == 0 { err = errnoErr(e1) } @@ -2666,7 +2721,7 @@ func getUserPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16 } func GetVersion() (ver uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetVersion.Addr(), 0, 0, 0, 0) + r0, _, e1 := syscall.SyscallN(procGetVersion.Addr()) ver = uint32(r0) if ver == 0 { err = errnoErr(e1) @@ -2675,7 +2730,7 @@ func GetVersion() (ver uint32, err error) { } func GetVolumeInformationByHandle(file Handle, volumeNameBuffer *uint16, volumeNameSize uint32, volumeNameSerialNumber *uint32, maximumComponentLength *uint32, fileSystemFlags *uint32, fileSystemNameBuffer *uint16, fileSystemNameSize uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procGetVolumeInformationByHandleW.Addr(), 8, uintptr(file), uintptr(unsafe.Pointer(volumeNameBuffer)), uintptr(volumeNameSize), uintptr(unsafe.Pointer(volumeNameSerialNumber)), uintptr(unsafe.Pointer(maximumComponentLength)), uintptr(unsafe.Pointer(fileSystemFlags)), uintptr(unsafe.Pointer(fileSystemNameBuffer)), uintptr(fileSystemNameSize), 0) + r1, _, e1 := syscall.SyscallN(procGetVolumeInformationByHandleW.Addr(), uintptr(file), uintptr(unsafe.Pointer(volumeNameBuffer)), uintptr(volumeNameSize), uintptr(unsafe.Pointer(volumeNameSerialNumber)), uintptr(unsafe.Pointer(maximumComponentLength)), uintptr(unsafe.Pointer(fileSystemFlags)), uintptr(unsafe.Pointer(fileSystemNameBuffer)), uintptr(fileSystemNameSize)) if r1 == 0 { err = errnoErr(e1) } @@ -2683,7 +2738,7 @@ func GetVolumeInformationByHandle(file Handle, volumeNameBuffer *uint16, volumeN } func GetVolumeInformation(rootPathName *uint16, volumeNameBuffer *uint16, volumeNameSize uint32, volumeNameSerialNumber *uint32, maximumComponentLength *uint32, fileSystemFlags *uint32, fileSystemNameBuffer *uint16, fileSystemNameSize uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procGetVolumeInformationW.Addr(), 8, uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeNameBuffer)), uintptr(volumeNameSize), uintptr(unsafe.Pointer(volumeNameSerialNumber)), uintptr(unsafe.Pointer(maximumComponentLength)), uintptr(unsafe.Pointer(fileSystemFlags)), uintptr(unsafe.Pointer(fileSystemNameBuffer)), uintptr(fileSystemNameSize), 0) + r1, _, e1 := syscall.SyscallN(procGetVolumeInformationW.Addr(), uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeNameBuffer)), uintptr(volumeNameSize), uintptr(unsafe.Pointer(volumeNameSerialNumber)), uintptr(unsafe.Pointer(maximumComponentLength)), uintptr(unsafe.Pointer(fileSystemFlags)), uintptr(unsafe.Pointer(fileSystemNameBuffer)), uintptr(fileSystemNameSize)) if r1 == 0 { err = errnoErr(e1) } @@ -2691,7 +2746,7 @@ func GetVolumeInformation(rootPathName *uint16, volumeNameBuffer *uint16, volume } func GetVolumeNameForVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16, bufferlength uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetVolumeNameForVolumeMountPointW.Addr(), 3, uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(unsafe.Pointer(volumeName)), uintptr(bufferlength)) + r1, _, e1 := syscall.SyscallN(procGetVolumeNameForVolumeMountPointW.Addr(), uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(unsafe.Pointer(volumeName)), uintptr(bufferlength)) if r1 == 0 { err = errnoErr(e1) } @@ -2699,7 +2754,7 @@ func GetVolumeNameForVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint } func GetVolumePathName(fileName *uint16, volumePathName *uint16, bufferLength uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetVolumePathNameW.Addr(), 3, uintptr(unsafe.Pointer(fileName)), uintptr(unsafe.Pointer(volumePathName)), uintptr(bufferLength)) + r1, _, e1 := syscall.SyscallN(procGetVolumePathNameW.Addr(), uintptr(unsafe.Pointer(fileName)), uintptr(unsafe.Pointer(volumePathName)), uintptr(bufferLength)) if r1 == 0 { err = errnoErr(e1) } @@ -2707,7 +2762,7 @@ func GetVolumePathName(fileName *uint16, volumePathName *uint16, bufferLength ui } func GetVolumePathNamesForVolumeName(volumeName *uint16, volumePathNames *uint16, bufferLength uint32, returnLength *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetVolumePathNamesForVolumeNameW.Addr(), 4, uintptr(unsafe.Pointer(volumeName)), uintptr(unsafe.Pointer(volumePathNames)), uintptr(bufferLength), uintptr(unsafe.Pointer(returnLength)), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetVolumePathNamesForVolumeNameW.Addr(), uintptr(unsafe.Pointer(volumeName)), uintptr(unsafe.Pointer(volumePathNames)), uintptr(bufferLength), uintptr(unsafe.Pointer(returnLength))) if r1 == 0 { err = errnoErr(e1) } @@ -2715,7 +2770,7 @@ func GetVolumePathNamesForVolumeName(volumeName *uint16, volumePathNames *uint16 } func getWindowsDirectory(dir *uint16, dirLen uint32) (len uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetWindowsDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(dir)), uintptr(dirLen), 0) + r0, _, e1 := syscall.SyscallN(procGetWindowsDirectoryW.Addr(), uintptr(unsafe.Pointer(dir)), uintptr(dirLen)) len = uint32(r0) if len == 0 { err = errnoErr(e1) @@ -2724,7 +2779,7 @@ func getWindowsDirectory(dir *uint16, dirLen uint32) (len uint32, err error) { } func initializeProcThreadAttributeList(attrlist *ProcThreadAttributeList, attrcount uint32, flags uint32, size *uintptr) (err error) { - r1, _, e1 := syscall.Syscall6(procInitializeProcThreadAttributeList.Addr(), 4, uintptr(unsafe.Pointer(attrlist)), uintptr(attrcount), uintptr(flags), uintptr(unsafe.Pointer(size)), 0, 0) + r1, _, e1 := syscall.SyscallN(procInitializeProcThreadAttributeList.Addr(), uintptr(unsafe.Pointer(attrlist)), uintptr(attrcount), uintptr(flags), uintptr(unsafe.Pointer(size))) if r1 == 0 { err = errnoErr(e1) } @@ -2736,7 +2791,7 @@ func IsWow64Process(handle Handle, isWow64 *bool) (err error) { if *isWow64 { _p0 = 1 } - r1, _, e1 := syscall.Syscall(procIsWow64Process.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(&_p0)), 0) + r1, _, e1 := syscall.SyscallN(procIsWow64Process.Addr(), uintptr(handle), uintptr(unsafe.Pointer(&_p0))) *isWow64 = _p0 != 0 if r1 == 0 { err = errnoErr(e1) @@ -2749,7 +2804,7 @@ func IsWow64Process2(handle Handle, processMachine *uint16, nativeMachine *uint1 if err != nil { return } - r1, _, e1 := syscall.Syscall(procIsWow64Process2.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(processMachine)), uintptr(unsafe.Pointer(nativeMachine))) + r1, _, e1 := syscall.SyscallN(procIsWow64Process2.Addr(), uintptr(handle), uintptr(unsafe.Pointer(processMachine)), uintptr(unsafe.Pointer(nativeMachine))) if r1 == 0 { err = errnoErr(e1) } @@ -2766,7 +2821,7 @@ func LoadLibraryEx(libname string, zero Handle, flags uintptr) (handle Handle, e } func _LoadLibraryEx(libname *uint16, zero Handle, flags uintptr) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procLoadLibraryExW.Addr(), 3, uintptr(unsafe.Pointer(libname)), uintptr(zero), uintptr(flags)) + r0, _, e1 := syscall.SyscallN(procLoadLibraryExW.Addr(), uintptr(unsafe.Pointer(libname)), uintptr(zero), uintptr(flags)) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -2784,7 +2839,7 @@ func LoadLibrary(libname string) (handle Handle, err error) { } func _LoadLibrary(libname *uint16) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procLoadLibraryW.Addr(), 1, uintptr(unsafe.Pointer(libname)), 0, 0) + r0, _, e1 := syscall.SyscallN(procLoadLibraryW.Addr(), uintptr(unsafe.Pointer(libname))) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -2793,7 +2848,7 @@ func _LoadLibrary(libname *uint16) (handle Handle, err error) { } func LoadResource(module Handle, resInfo Handle) (resData Handle, err error) { - r0, _, e1 := syscall.Syscall(procLoadResource.Addr(), 2, uintptr(module), uintptr(resInfo), 0) + r0, _, e1 := syscall.SyscallN(procLoadResource.Addr(), uintptr(module), uintptr(resInfo)) resData = Handle(r0) if resData == 0 { err = errnoErr(e1) @@ -2802,7 +2857,7 @@ func LoadResource(module Handle, resInfo Handle) (resData Handle, err error) { } func LocalAlloc(flags uint32, length uint32) (ptr uintptr, err error) { - r0, _, e1 := syscall.Syscall(procLocalAlloc.Addr(), 2, uintptr(flags), uintptr(length), 0) + r0, _, e1 := syscall.SyscallN(procLocalAlloc.Addr(), uintptr(flags), uintptr(length)) ptr = uintptr(r0) if ptr == 0 { err = errnoErr(e1) @@ -2811,7 +2866,7 @@ func LocalAlloc(flags uint32, length uint32) (ptr uintptr, err error) { } func LocalFree(hmem Handle) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procLocalFree.Addr(), 1, uintptr(hmem), 0, 0) + r0, _, e1 := syscall.SyscallN(procLocalFree.Addr(), uintptr(hmem)) handle = Handle(r0) if handle != 0 { err = errnoErr(e1) @@ -2820,7 +2875,7 @@ func LocalFree(hmem Handle) (handle Handle, err error) { } func LockFileEx(file Handle, flags uint32, reserved uint32, bytesLow uint32, bytesHigh uint32, overlapped *Overlapped) (err error) { - r1, _, e1 := syscall.Syscall6(procLockFileEx.Addr(), 6, uintptr(file), uintptr(flags), uintptr(reserved), uintptr(bytesLow), uintptr(bytesHigh), uintptr(unsafe.Pointer(overlapped))) + r1, _, e1 := syscall.SyscallN(procLockFileEx.Addr(), uintptr(file), uintptr(flags), uintptr(reserved), uintptr(bytesLow), uintptr(bytesHigh), uintptr(unsafe.Pointer(overlapped))) if r1 == 0 { err = errnoErr(e1) } @@ -2828,7 +2883,7 @@ func LockFileEx(file Handle, flags uint32, reserved uint32, bytesLow uint32, byt } func LockResource(resData Handle) (addr uintptr, err error) { - r0, _, e1 := syscall.Syscall(procLockResource.Addr(), 1, uintptr(resData), 0, 0) + r0, _, e1 := syscall.SyscallN(procLockResource.Addr(), uintptr(resData)) addr = uintptr(r0) if addr == 0 { err = errnoErr(e1) @@ -2837,7 +2892,7 @@ func LockResource(resData Handle) (addr uintptr, err error) { } func MapViewOfFile(handle Handle, access uint32, offsetHigh uint32, offsetLow uint32, length uintptr) (addr uintptr, err error) { - r0, _, e1 := syscall.Syscall6(procMapViewOfFile.Addr(), 5, uintptr(handle), uintptr(access), uintptr(offsetHigh), uintptr(offsetLow), uintptr(length), 0) + r0, _, e1 := syscall.SyscallN(procMapViewOfFile.Addr(), uintptr(handle), uintptr(access), uintptr(offsetHigh), uintptr(offsetLow), uintptr(length)) addr = uintptr(r0) if addr == 0 { err = errnoErr(e1) @@ -2846,7 +2901,7 @@ func MapViewOfFile(handle Handle, access uint32, offsetHigh uint32, offsetLow ui } func Module32First(snapshot Handle, moduleEntry *ModuleEntry32) (err error) { - r1, _, e1 := syscall.Syscall(procModule32FirstW.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(moduleEntry)), 0) + r1, _, e1 := syscall.SyscallN(procModule32FirstW.Addr(), uintptr(snapshot), uintptr(unsafe.Pointer(moduleEntry))) if r1 == 0 { err = errnoErr(e1) } @@ -2854,7 +2909,7 @@ func Module32First(snapshot Handle, moduleEntry *ModuleEntry32) (err error) { } func Module32Next(snapshot Handle, moduleEntry *ModuleEntry32) (err error) { - r1, _, e1 := syscall.Syscall(procModule32NextW.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(moduleEntry)), 0) + r1, _, e1 := syscall.SyscallN(procModule32NextW.Addr(), uintptr(snapshot), uintptr(unsafe.Pointer(moduleEntry))) if r1 == 0 { err = errnoErr(e1) } @@ -2862,7 +2917,7 @@ func Module32Next(snapshot Handle, moduleEntry *ModuleEntry32) (err error) { } func MoveFileEx(from *uint16, to *uint16, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procMoveFileExW.Addr(), 3, uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(to)), uintptr(flags)) + r1, _, e1 := syscall.SyscallN(procMoveFileExW.Addr(), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(to)), uintptr(flags)) if r1 == 0 { err = errnoErr(e1) } @@ -2870,7 +2925,7 @@ func MoveFileEx(from *uint16, to *uint16, flags uint32) (err error) { } func MoveFile(from *uint16, to *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procMoveFileW.Addr(), 2, uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(to)), 0) + r1, _, e1 := syscall.SyscallN(procMoveFileW.Addr(), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(to))) if r1 == 0 { err = errnoErr(e1) } @@ -2878,7 +2933,7 @@ func MoveFile(from *uint16, to *uint16) (err error) { } func MultiByteToWideChar(codePage uint32, dwFlags uint32, str *byte, nstr int32, wchar *uint16, nwchar int32) (nwrite int32, err error) { - r0, _, e1 := syscall.Syscall6(procMultiByteToWideChar.Addr(), 6, uintptr(codePage), uintptr(dwFlags), uintptr(unsafe.Pointer(str)), uintptr(nstr), uintptr(unsafe.Pointer(wchar)), uintptr(nwchar)) + r0, _, e1 := syscall.SyscallN(procMultiByteToWideChar.Addr(), uintptr(codePage), uintptr(dwFlags), uintptr(unsafe.Pointer(str)), uintptr(nstr), uintptr(unsafe.Pointer(wchar)), uintptr(nwchar)) nwrite = int32(r0) if nwrite == 0 { err = errnoErr(e1) @@ -2891,7 +2946,7 @@ func OpenEvent(desiredAccess uint32, inheritHandle bool, name *uint16) (handle H if inheritHandle { _p0 = 1 } - r0, _, e1 := syscall.Syscall(procOpenEventW.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(unsafe.Pointer(name))) + r0, _, e1 := syscall.SyscallN(procOpenEventW.Addr(), uintptr(desiredAccess), uintptr(_p0), uintptr(unsafe.Pointer(name))) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -2904,7 +2959,7 @@ func OpenMutex(desiredAccess uint32, inheritHandle bool, name *uint16) (handle H if inheritHandle { _p0 = 1 } - r0, _, e1 := syscall.Syscall(procOpenMutexW.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(unsafe.Pointer(name))) + r0, _, e1 := syscall.SyscallN(procOpenMutexW.Addr(), uintptr(desiredAccess), uintptr(_p0), uintptr(unsafe.Pointer(name))) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -2917,7 +2972,7 @@ func OpenProcess(desiredAccess uint32, inheritHandle bool, processId uint32) (ha if inheritHandle { _p0 = 1 } - r0, _, e1 := syscall.Syscall(procOpenProcess.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(processId)) + r0, _, e1 := syscall.SyscallN(procOpenProcess.Addr(), uintptr(desiredAccess), uintptr(_p0), uintptr(processId)) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -2930,7 +2985,7 @@ func OpenThread(desiredAccess uint32, inheritHandle bool, threadId uint32) (hand if inheritHandle { _p0 = 1 } - r0, _, e1 := syscall.Syscall(procOpenThread.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(threadId)) + r0, _, e1 := syscall.SyscallN(procOpenThread.Addr(), uintptr(desiredAccess), uintptr(_p0), uintptr(threadId)) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -2939,7 +2994,7 @@ func OpenThread(desiredAccess uint32, inheritHandle bool, threadId uint32) (hand } func PostQueuedCompletionStatus(cphandle Handle, qty uint32, key uintptr, overlapped *Overlapped) (err error) { - r1, _, e1 := syscall.Syscall6(procPostQueuedCompletionStatus.Addr(), 4, uintptr(cphandle), uintptr(qty), uintptr(key), uintptr(unsafe.Pointer(overlapped)), 0, 0) + r1, _, e1 := syscall.SyscallN(procPostQueuedCompletionStatus.Addr(), uintptr(cphandle), uintptr(qty), uintptr(key), uintptr(unsafe.Pointer(overlapped))) if r1 == 0 { err = errnoErr(e1) } @@ -2947,7 +3002,7 @@ func PostQueuedCompletionStatus(cphandle Handle, qty uint32, key uintptr, overla } func Process32First(snapshot Handle, procEntry *ProcessEntry32) (err error) { - r1, _, e1 := syscall.Syscall(procProcess32FirstW.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(procEntry)), 0) + r1, _, e1 := syscall.SyscallN(procProcess32FirstW.Addr(), uintptr(snapshot), uintptr(unsafe.Pointer(procEntry))) if r1 == 0 { err = errnoErr(e1) } @@ -2955,7 +3010,7 @@ func Process32First(snapshot Handle, procEntry *ProcessEntry32) (err error) { } func Process32Next(snapshot Handle, procEntry *ProcessEntry32) (err error) { - r1, _, e1 := syscall.Syscall(procProcess32NextW.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(procEntry)), 0) + r1, _, e1 := syscall.SyscallN(procProcess32NextW.Addr(), uintptr(snapshot), uintptr(unsafe.Pointer(procEntry))) if r1 == 0 { err = errnoErr(e1) } @@ -2963,7 +3018,7 @@ func Process32Next(snapshot Handle, procEntry *ProcessEntry32) (err error) { } func ProcessIdToSessionId(pid uint32, sessionid *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procProcessIdToSessionId.Addr(), 2, uintptr(pid), uintptr(unsafe.Pointer(sessionid)), 0) + r1, _, e1 := syscall.SyscallN(procProcessIdToSessionId.Addr(), uintptr(pid), uintptr(unsafe.Pointer(sessionid))) if r1 == 0 { err = errnoErr(e1) } @@ -2971,7 +3026,7 @@ func ProcessIdToSessionId(pid uint32, sessionid *uint32) (err error) { } func PulseEvent(event Handle) (err error) { - r1, _, e1 := syscall.Syscall(procPulseEvent.Addr(), 1, uintptr(event), 0, 0) + r1, _, e1 := syscall.SyscallN(procPulseEvent.Addr(), uintptr(event)) if r1 == 0 { err = errnoErr(e1) } @@ -2979,7 +3034,7 @@ func PulseEvent(event Handle) (err error) { } func PurgeComm(handle Handle, dwFlags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procPurgeComm.Addr(), 2, uintptr(handle), uintptr(dwFlags), 0) + r1, _, e1 := syscall.SyscallN(procPurgeComm.Addr(), uintptr(handle), uintptr(dwFlags)) if r1 == 0 { err = errnoErr(e1) } @@ -2987,7 +3042,7 @@ func PurgeComm(handle Handle, dwFlags uint32) (err error) { } func QueryDosDevice(deviceName *uint16, targetPath *uint16, max uint32) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procQueryDosDeviceW.Addr(), 3, uintptr(unsafe.Pointer(deviceName)), uintptr(unsafe.Pointer(targetPath)), uintptr(max)) + r0, _, e1 := syscall.SyscallN(procQueryDosDeviceW.Addr(), uintptr(unsafe.Pointer(deviceName)), uintptr(unsafe.Pointer(targetPath)), uintptr(max)) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -2996,7 +3051,7 @@ func QueryDosDevice(deviceName *uint16, targetPath *uint16, max uint32) (n uint3 } func QueryFullProcessImageName(proc Handle, flags uint32, exeName *uint16, size *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procQueryFullProcessImageNameW.Addr(), 4, uintptr(proc), uintptr(flags), uintptr(unsafe.Pointer(exeName)), uintptr(unsafe.Pointer(size)), 0, 0) + r1, _, e1 := syscall.SyscallN(procQueryFullProcessImageNameW.Addr(), uintptr(proc), uintptr(flags), uintptr(unsafe.Pointer(exeName)), uintptr(unsafe.Pointer(size))) if r1 == 0 { err = errnoErr(e1) } @@ -3004,7 +3059,7 @@ func QueryFullProcessImageName(proc Handle, flags uint32, exeName *uint16, size } func QueryInformationJobObject(job Handle, JobObjectInformationClass int32, JobObjectInformation uintptr, JobObjectInformationLength uint32, retlen *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procQueryInformationJobObject.Addr(), 5, uintptr(job), uintptr(JobObjectInformationClass), uintptr(JobObjectInformation), uintptr(JobObjectInformationLength), uintptr(unsafe.Pointer(retlen)), 0) + r1, _, e1 := syscall.SyscallN(procQueryInformationJobObject.Addr(), uintptr(job), uintptr(JobObjectInformationClass), uintptr(JobObjectInformation), uintptr(JobObjectInformationLength), uintptr(unsafe.Pointer(retlen))) if r1 == 0 { err = errnoErr(e1) } @@ -3012,7 +3067,7 @@ func QueryInformationJobObject(job Handle, JobObjectInformationClass int32, JobO } func ReadConsole(console Handle, buf *uint16, toread uint32, read *uint32, inputControl *byte) (err error) { - r1, _, e1 := syscall.Syscall6(procReadConsoleW.Addr(), 5, uintptr(console), uintptr(unsafe.Pointer(buf)), uintptr(toread), uintptr(unsafe.Pointer(read)), uintptr(unsafe.Pointer(inputControl)), 0) + r1, _, e1 := syscall.SyscallN(procReadConsoleW.Addr(), uintptr(console), uintptr(unsafe.Pointer(buf)), uintptr(toread), uintptr(unsafe.Pointer(read)), uintptr(unsafe.Pointer(inputControl))) if r1 == 0 { err = errnoErr(e1) } @@ -3024,7 +3079,7 @@ func ReadDirectoryChanges(handle Handle, buf *byte, buflen uint32, watchSubTree if watchSubTree { _p0 = 1 } - r1, _, e1 := syscall.Syscall9(procReadDirectoryChangesW.Addr(), 8, uintptr(handle), uintptr(unsafe.Pointer(buf)), uintptr(buflen), uintptr(_p0), uintptr(mask), uintptr(unsafe.Pointer(retlen)), uintptr(unsafe.Pointer(overlapped)), uintptr(completionRoutine), 0) + r1, _, e1 := syscall.SyscallN(procReadDirectoryChangesW.Addr(), uintptr(handle), uintptr(unsafe.Pointer(buf)), uintptr(buflen), uintptr(_p0), uintptr(mask), uintptr(unsafe.Pointer(retlen)), uintptr(unsafe.Pointer(overlapped)), uintptr(completionRoutine)) if r1 == 0 { err = errnoErr(e1) } @@ -3036,7 +3091,7 @@ func readFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) ( if len(buf) > 0 { _p0 = &buf[0] } - r1, _, e1 := syscall.Syscall6(procReadFile.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(done)), uintptr(unsafe.Pointer(overlapped)), 0) + r1, _, e1 := syscall.SyscallN(procReadFile.Addr(), uintptr(handle), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(done)), uintptr(unsafe.Pointer(overlapped))) if r1 == 0 { err = errnoErr(e1) } @@ -3044,7 +3099,7 @@ func readFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) ( } func ReadProcessMemory(process Handle, baseAddress uintptr, buffer *byte, size uintptr, numberOfBytesRead *uintptr) (err error) { - r1, _, e1 := syscall.Syscall6(procReadProcessMemory.Addr(), 5, uintptr(process), uintptr(baseAddress), uintptr(unsafe.Pointer(buffer)), uintptr(size), uintptr(unsafe.Pointer(numberOfBytesRead)), 0) + r1, _, e1 := syscall.SyscallN(procReadProcessMemory.Addr(), uintptr(process), uintptr(baseAddress), uintptr(unsafe.Pointer(buffer)), uintptr(size), uintptr(unsafe.Pointer(numberOfBytesRead))) if r1 == 0 { err = errnoErr(e1) } @@ -3052,7 +3107,7 @@ func ReadProcessMemory(process Handle, baseAddress uintptr, buffer *byte, size u } func ReleaseMutex(mutex Handle) (err error) { - r1, _, e1 := syscall.Syscall(procReleaseMutex.Addr(), 1, uintptr(mutex), 0, 0) + r1, _, e1 := syscall.SyscallN(procReleaseMutex.Addr(), uintptr(mutex)) if r1 == 0 { err = errnoErr(e1) } @@ -3060,7 +3115,7 @@ func ReleaseMutex(mutex Handle) (err error) { } func RemoveDirectory(path *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procRemoveDirectoryW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) + r1, _, e1 := syscall.SyscallN(procRemoveDirectoryW.Addr(), uintptr(unsafe.Pointer(path))) if r1 == 0 { err = errnoErr(e1) } @@ -3068,7 +3123,7 @@ func RemoveDirectory(path *uint16) (err error) { } func RemoveDllDirectory(cookie uintptr) (err error) { - r1, _, e1 := syscall.Syscall(procRemoveDllDirectory.Addr(), 1, uintptr(cookie), 0, 0) + r1, _, e1 := syscall.SyscallN(procRemoveDllDirectory.Addr(), uintptr(cookie)) if r1 == 0 { err = errnoErr(e1) } @@ -3076,7 +3131,7 @@ func RemoveDllDirectory(cookie uintptr) (err error) { } func ResetEvent(event Handle) (err error) { - r1, _, e1 := syscall.Syscall(procResetEvent.Addr(), 1, uintptr(event), 0, 0) + r1, _, e1 := syscall.SyscallN(procResetEvent.Addr(), uintptr(event)) if r1 == 0 { err = errnoErr(e1) } @@ -3084,7 +3139,7 @@ func ResetEvent(event Handle) (err error) { } func resizePseudoConsole(pconsole Handle, size uint32) (hr error) { - r0, _, _ := syscall.Syscall(procResizePseudoConsole.Addr(), 2, uintptr(pconsole), uintptr(size), 0) + r0, _, _ := syscall.SyscallN(procResizePseudoConsole.Addr(), uintptr(pconsole), uintptr(size)) if r0 != 0 { hr = syscall.Errno(r0) } @@ -3092,7 +3147,7 @@ func resizePseudoConsole(pconsole Handle, size uint32) (hr error) { } func ResumeThread(thread Handle) (ret uint32, err error) { - r0, _, e1 := syscall.Syscall(procResumeThread.Addr(), 1, uintptr(thread), 0, 0) + r0, _, e1 := syscall.SyscallN(procResumeThread.Addr(), uintptr(thread)) ret = uint32(r0) if ret == 0xffffffff { err = errnoErr(e1) @@ -3101,7 +3156,7 @@ func ResumeThread(thread Handle) (ret uint32, err error) { } func SetCommBreak(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procSetCommBreak.Addr(), 1, uintptr(handle), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetCommBreak.Addr(), uintptr(handle)) if r1 == 0 { err = errnoErr(e1) } @@ -3109,7 +3164,7 @@ func SetCommBreak(handle Handle) (err error) { } func SetCommMask(handle Handle, dwEvtMask uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetCommMask.Addr(), 2, uintptr(handle), uintptr(dwEvtMask), 0) + r1, _, e1 := syscall.SyscallN(procSetCommMask.Addr(), uintptr(handle), uintptr(dwEvtMask)) if r1 == 0 { err = errnoErr(e1) } @@ -3117,7 +3172,7 @@ func SetCommMask(handle Handle, dwEvtMask uint32) (err error) { } func SetCommState(handle Handle, lpDCB *DCB) (err error) { - r1, _, e1 := syscall.Syscall(procSetCommState.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(lpDCB)), 0) + r1, _, e1 := syscall.SyscallN(procSetCommState.Addr(), uintptr(handle), uintptr(unsafe.Pointer(lpDCB))) if r1 == 0 { err = errnoErr(e1) } @@ -3125,7 +3180,7 @@ func SetCommState(handle Handle, lpDCB *DCB) (err error) { } func SetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) { - r1, _, e1 := syscall.Syscall(procSetCommTimeouts.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(timeouts)), 0) + r1, _, e1 := syscall.SyscallN(procSetCommTimeouts.Addr(), uintptr(handle), uintptr(unsafe.Pointer(timeouts))) if r1 == 0 { err = errnoErr(e1) } @@ -3133,7 +3188,7 @@ func SetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) { } func SetConsoleCP(cp uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetConsoleCP.Addr(), 1, uintptr(cp), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetConsoleCP.Addr(), uintptr(cp)) if r1 == 0 { err = errnoErr(e1) } @@ -3141,7 +3196,7 @@ func SetConsoleCP(cp uint32) (err error) { } func setConsoleCursorPosition(console Handle, position uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetConsoleCursorPosition.Addr(), 2, uintptr(console), uintptr(position), 0) + r1, _, e1 := syscall.SyscallN(procSetConsoleCursorPosition.Addr(), uintptr(console), uintptr(position)) if r1 == 0 { err = errnoErr(e1) } @@ -3149,7 +3204,7 @@ func setConsoleCursorPosition(console Handle, position uint32) (err error) { } func SetConsoleMode(console Handle, mode uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(console), uintptr(mode), 0) + r1, _, e1 := syscall.SyscallN(procSetConsoleMode.Addr(), uintptr(console), uintptr(mode)) if r1 == 0 { err = errnoErr(e1) } @@ -3157,7 +3212,7 @@ func SetConsoleMode(console Handle, mode uint32) (err error) { } func SetConsoleOutputCP(cp uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetConsoleOutputCP.Addr(), 1, uintptr(cp), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetConsoleOutputCP.Addr(), uintptr(cp)) if r1 == 0 { err = errnoErr(e1) } @@ -3165,7 +3220,7 @@ func SetConsoleOutputCP(cp uint32) (err error) { } func SetCurrentDirectory(path *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procSetCurrentDirectoryW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetCurrentDirectoryW.Addr(), uintptr(unsafe.Pointer(path))) if r1 == 0 { err = errnoErr(e1) } @@ -3173,7 +3228,7 @@ func SetCurrentDirectory(path *uint16) (err error) { } func SetDefaultDllDirectories(directoryFlags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetDefaultDllDirectories.Addr(), 1, uintptr(directoryFlags), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetDefaultDllDirectories.Addr(), uintptr(directoryFlags)) if r1 == 0 { err = errnoErr(e1) } @@ -3190,7 +3245,7 @@ func SetDllDirectory(path string) (err error) { } func _SetDllDirectory(path *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procSetDllDirectoryW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetDllDirectoryW.Addr(), uintptr(unsafe.Pointer(path))) if r1 == 0 { err = errnoErr(e1) } @@ -3198,7 +3253,7 @@ func _SetDllDirectory(path *uint16) (err error) { } func SetEndOfFile(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procSetEndOfFile.Addr(), 1, uintptr(handle), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetEndOfFile.Addr(), uintptr(handle)) if r1 == 0 { err = errnoErr(e1) } @@ -3206,7 +3261,7 @@ func SetEndOfFile(handle Handle) (err error) { } func SetEnvironmentVariable(name *uint16, value *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procSetEnvironmentVariableW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(value)), 0) + r1, _, e1 := syscall.SyscallN(procSetEnvironmentVariableW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(value))) if r1 == 0 { err = errnoErr(e1) } @@ -3214,13 +3269,13 @@ func SetEnvironmentVariable(name *uint16, value *uint16) (err error) { } func SetErrorMode(mode uint32) (ret uint32) { - r0, _, _ := syscall.Syscall(procSetErrorMode.Addr(), 1, uintptr(mode), 0, 0) + r0, _, _ := syscall.SyscallN(procSetErrorMode.Addr(), uintptr(mode)) ret = uint32(r0) return } func SetEvent(event Handle) (err error) { - r1, _, e1 := syscall.Syscall(procSetEvent.Addr(), 1, uintptr(event), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetEvent.Addr(), uintptr(event)) if r1 == 0 { err = errnoErr(e1) } @@ -3228,7 +3283,7 @@ func SetEvent(event Handle) (err error) { } func SetFileAttributes(name *uint16, attrs uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetFileAttributesW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(attrs), 0) + r1, _, e1 := syscall.SyscallN(procSetFileAttributesW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(attrs)) if r1 == 0 { err = errnoErr(e1) } @@ -3236,7 +3291,7 @@ func SetFileAttributes(name *uint16, attrs uint32) (err error) { } func SetFileCompletionNotificationModes(handle Handle, flags uint8) (err error) { - r1, _, e1 := syscall.Syscall(procSetFileCompletionNotificationModes.Addr(), 2, uintptr(handle), uintptr(flags), 0) + r1, _, e1 := syscall.SyscallN(procSetFileCompletionNotificationModes.Addr(), uintptr(handle), uintptr(flags)) if r1 == 0 { err = errnoErr(e1) } @@ -3244,7 +3299,7 @@ func SetFileCompletionNotificationModes(handle Handle, flags uint8) (err error) } func SetFileInformationByHandle(handle Handle, class uint32, inBuffer *byte, inBufferLen uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procSetFileInformationByHandle.Addr(), 4, uintptr(handle), uintptr(class), uintptr(unsafe.Pointer(inBuffer)), uintptr(inBufferLen), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetFileInformationByHandle.Addr(), uintptr(handle), uintptr(class), uintptr(unsafe.Pointer(inBuffer)), uintptr(inBufferLen)) if r1 == 0 { err = errnoErr(e1) } @@ -3252,7 +3307,7 @@ func SetFileInformationByHandle(handle Handle, class uint32, inBuffer *byte, inB } func SetFilePointer(handle Handle, lowoffset int32, highoffsetptr *int32, whence uint32) (newlowoffset uint32, err error) { - r0, _, e1 := syscall.Syscall6(procSetFilePointer.Addr(), 4, uintptr(handle), uintptr(lowoffset), uintptr(unsafe.Pointer(highoffsetptr)), uintptr(whence), 0, 0) + r0, _, e1 := syscall.SyscallN(procSetFilePointer.Addr(), uintptr(handle), uintptr(lowoffset), uintptr(unsafe.Pointer(highoffsetptr)), uintptr(whence)) newlowoffset = uint32(r0) if newlowoffset == 0xffffffff { err = errnoErr(e1) @@ -3261,7 +3316,7 @@ func SetFilePointer(handle Handle, lowoffset int32, highoffsetptr *int32, whence } func SetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetime) (err error) { - r1, _, e1 := syscall.Syscall6(procSetFileTime.Addr(), 4, uintptr(handle), uintptr(unsafe.Pointer(ctime)), uintptr(unsafe.Pointer(atime)), uintptr(unsafe.Pointer(wtime)), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetFileTime.Addr(), uintptr(handle), uintptr(unsafe.Pointer(ctime)), uintptr(unsafe.Pointer(atime)), uintptr(unsafe.Pointer(wtime))) if r1 == 0 { err = errnoErr(e1) } @@ -3269,7 +3324,7 @@ func SetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetim } func SetFileValidData(handle Handle, validDataLength int64) (err error) { - r1, _, e1 := syscall.Syscall(procSetFileValidData.Addr(), 2, uintptr(handle), uintptr(validDataLength), 0) + r1, _, e1 := syscall.SyscallN(procSetFileValidData.Addr(), uintptr(handle), uintptr(validDataLength)) if r1 == 0 { err = errnoErr(e1) } @@ -3277,7 +3332,7 @@ func SetFileValidData(handle Handle, validDataLength int64) (err error) { } func SetHandleInformation(handle Handle, mask uint32, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetHandleInformation.Addr(), 3, uintptr(handle), uintptr(mask), uintptr(flags)) + r1, _, e1 := syscall.SyscallN(procSetHandleInformation.Addr(), uintptr(handle), uintptr(mask), uintptr(flags)) if r1 == 0 { err = errnoErr(e1) } @@ -3285,7 +3340,7 @@ func SetHandleInformation(handle Handle, mask uint32, flags uint32) (err error) } func SetInformationJobObject(job Handle, JobObjectInformationClass uint32, JobObjectInformation uintptr, JobObjectInformationLength uint32) (ret int, err error) { - r0, _, e1 := syscall.Syscall6(procSetInformationJobObject.Addr(), 4, uintptr(job), uintptr(JobObjectInformationClass), uintptr(JobObjectInformation), uintptr(JobObjectInformationLength), 0, 0) + r0, _, e1 := syscall.SyscallN(procSetInformationJobObject.Addr(), uintptr(job), uintptr(JobObjectInformationClass), uintptr(JobObjectInformation), uintptr(JobObjectInformationLength)) ret = int(r0) if ret == 0 { err = errnoErr(e1) @@ -3294,7 +3349,7 @@ func SetInformationJobObject(job Handle, JobObjectInformationClass uint32, JobOb } func SetNamedPipeHandleState(pipe Handle, state *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procSetNamedPipeHandleState.Addr(), 4, uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetNamedPipeHandleState.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout))) if r1 == 0 { err = errnoErr(e1) } @@ -3302,7 +3357,7 @@ func SetNamedPipeHandleState(pipe Handle, state *uint32, maxCollectionCount *uin } func SetPriorityClass(process Handle, priorityClass uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetPriorityClass.Addr(), 2, uintptr(process), uintptr(priorityClass), 0) + r1, _, e1 := syscall.SyscallN(procSetPriorityClass.Addr(), uintptr(process), uintptr(priorityClass)) if r1 == 0 { err = errnoErr(e1) } @@ -3314,7 +3369,7 @@ func SetProcessPriorityBoost(process Handle, disable bool) (err error) { if disable { _p0 = 1 } - r1, _, e1 := syscall.Syscall(procSetProcessPriorityBoost.Addr(), 2, uintptr(process), uintptr(_p0), 0) + r1, _, e1 := syscall.SyscallN(procSetProcessPriorityBoost.Addr(), uintptr(process), uintptr(_p0)) if r1 == 0 { err = errnoErr(e1) } @@ -3322,7 +3377,7 @@ func SetProcessPriorityBoost(process Handle, disable bool) (err error) { } func SetProcessShutdownParameters(level uint32, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetProcessShutdownParameters.Addr(), 2, uintptr(level), uintptr(flags), 0) + r1, _, e1 := syscall.SyscallN(procSetProcessShutdownParameters.Addr(), uintptr(level), uintptr(flags)) if r1 == 0 { err = errnoErr(e1) } @@ -3330,7 +3385,7 @@ func SetProcessShutdownParameters(level uint32, flags uint32) (err error) { } func SetProcessWorkingSetSizeEx(hProcess Handle, dwMinimumWorkingSetSize uintptr, dwMaximumWorkingSetSize uintptr, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procSetProcessWorkingSetSizeEx.Addr(), 4, uintptr(hProcess), uintptr(dwMinimumWorkingSetSize), uintptr(dwMaximumWorkingSetSize), uintptr(flags), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetProcessWorkingSetSizeEx.Addr(), uintptr(hProcess), uintptr(dwMinimumWorkingSetSize), uintptr(dwMaximumWorkingSetSize), uintptr(flags)) if r1 == 0 { err = errnoErr(e1) } @@ -3338,7 +3393,7 @@ func SetProcessWorkingSetSizeEx(hProcess Handle, dwMinimumWorkingSetSize uintptr } func SetStdHandle(stdhandle uint32, handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procSetStdHandle.Addr(), 2, uintptr(stdhandle), uintptr(handle), 0) + r1, _, e1 := syscall.SyscallN(procSetStdHandle.Addr(), uintptr(stdhandle), uintptr(handle)) if r1 == 0 { err = errnoErr(e1) } @@ -3346,7 +3401,7 @@ func SetStdHandle(stdhandle uint32, handle Handle) (err error) { } func SetVolumeLabel(rootPathName *uint16, volumeName *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procSetVolumeLabelW.Addr(), 2, uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeName)), 0) + r1, _, e1 := syscall.SyscallN(procSetVolumeLabelW.Addr(), uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeName))) if r1 == 0 { err = errnoErr(e1) } @@ -3354,7 +3409,7 @@ func SetVolumeLabel(rootPathName *uint16, volumeName *uint16) (err error) { } func SetVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procSetVolumeMountPointW.Addr(), 2, uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(unsafe.Pointer(volumeName)), 0) + r1, _, e1 := syscall.SyscallN(procSetVolumeMountPointW.Addr(), uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(unsafe.Pointer(volumeName))) if r1 == 0 { err = errnoErr(e1) } @@ -3362,7 +3417,7 @@ func SetVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16) (err erro } func SetupComm(handle Handle, dwInQueue uint32, dwOutQueue uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetupComm.Addr(), 3, uintptr(handle), uintptr(dwInQueue), uintptr(dwOutQueue)) + r1, _, e1 := syscall.SyscallN(procSetupComm.Addr(), uintptr(handle), uintptr(dwInQueue), uintptr(dwOutQueue)) if r1 == 0 { err = errnoErr(e1) } @@ -3370,7 +3425,7 @@ func SetupComm(handle Handle, dwInQueue uint32, dwOutQueue uint32) (err error) { } func SizeofResource(module Handle, resInfo Handle) (size uint32, err error) { - r0, _, e1 := syscall.Syscall(procSizeofResource.Addr(), 2, uintptr(module), uintptr(resInfo), 0) + r0, _, e1 := syscall.SyscallN(procSizeofResource.Addr(), uintptr(module), uintptr(resInfo)) size = uint32(r0) if size == 0 { err = errnoErr(e1) @@ -3383,13 +3438,13 @@ func SleepEx(milliseconds uint32, alertable bool) (ret uint32) { if alertable { _p0 = 1 } - r0, _, _ := syscall.Syscall(procSleepEx.Addr(), 2, uintptr(milliseconds), uintptr(_p0), 0) + r0, _, _ := syscall.SyscallN(procSleepEx.Addr(), uintptr(milliseconds), uintptr(_p0)) ret = uint32(r0) return } func TerminateJobObject(job Handle, exitCode uint32) (err error) { - r1, _, e1 := syscall.Syscall(procTerminateJobObject.Addr(), 2, uintptr(job), uintptr(exitCode), 0) + r1, _, e1 := syscall.SyscallN(procTerminateJobObject.Addr(), uintptr(job), uintptr(exitCode)) if r1 == 0 { err = errnoErr(e1) } @@ -3397,7 +3452,7 @@ func TerminateJobObject(job Handle, exitCode uint32) (err error) { } func TerminateProcess(handle Handle, exitcode uint32) (err error) { - r1, _, e1 := syscall.Syscall(procTerminateProcess.Addr(), 2, uintptr(handle), uintptr(exitcode), 0) + r1, _, e1 := syscall.SyscallN(procTerminateProcess.Addr(), uintptr(handle), uintptr(exitcode)) if r1 == 0 { err = errnoErr(e1) } @@ -3405,7 +3460,7 @@ func TerminateProcess(handle Handle, exitcode uint32) (err error) { } func Thread32First(snapshot Handle, threadEntry *ThreadEntry32) (err error) { - r1, _, e1 := syscall.Syscall(procThread32First.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(threadEntry)), 0) + r1, _, e1 := syscall.SyscallN(procThread32First.Addr(), uintptr(snapshot), uintptr(unsafe.Pointer(threadEntry))) if r1 == 0 { err = errnoErr(e1) } @@ -3413,7 +3468,7 @@ func Thread32First(snapshot Handle, threadEntry *ThreadEntry32) (err error) { } func Thread32Next(snapshot Handle, threadEntry *ThreadEntry32) (err error) { - r1, _, e1 := syscall.Syscall(procThread32Next.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(threadEntry)), 0) + r1, _, e1 := syscall.SyscallN(procThread32Next.Addr(), uintptr(snapshot), uintptr(unsafe.Pointer(threadEntry))) if r1 == 0 { err = errnoErr(e1) } @@ -3421,7 +3476,7 @@ func Thread32Next(snapshot Handle, threadEntry *ThreadEntry32) (err error) { } func UnlockFileEx(file Handle, reserved uint32, bytesLow uint32, bytesHigh uint32, overlapped *Overlapped) (err error) { - r1, _, e1 := syscall.Syscall6(procUnlockFileEx.Addr(), 5, uintptr(file), uintptr(reserved), uintptr(bytesLow), uintptr(bytesHigh), uintptr(unsafe.Pointer(overlapped)), 0) + r1, _, e1 := syscall.SyscallN(procUnlockFileEx.Addr(), uintptr(file), uintptr(reserved), uintptr(bytesLow), uintptr(bytesHigh), uintptr(unsafe.Pointer(overlapped))) if r1 == 0 { err = errnoErr(e1) } @@ -3429,7 +3484,7 @@ func UnlockFileEx(file Handle, reserved uint32, bytesLow uint32, bytesHigh uint3 } func UnmapViewOfFile(addr uintptr) (err error) { - r1, _, e1 := syscall.Syscall(procUnmapViewOfFile.Addr(), 1, uintptr(addr), 0, 0) + r1, _, e1 := syscall.SyscallN(procUnmapViewOfFile.Addr(), uintptr(addr)) if r1 == 0 { err = errnoErr(e1) } @@ -3437,7 +3492,7 @@ func UnmapViewOfFile(addr uintptr) (err error) { } func updateProcThreadAttribute(attrlist *ProcThreadAttributeList, flags uint32, attr uintptr, value unsafe.Pointer, size uintptr, prevvalue unsafe.Pointer, returnedsize *uintptr) (err error) { - r1, _, e1 := syscall.Syscall9(procUpdateProcThreadAttribute.Addr(), 7, uintptr(unsafe.Pointer(attrlist)), uintptr(flags), uintptr(attr), uintptr(value), uintptr(size), uintptr(prevvalue), uintptr(unsafe.Pointer(returnedsize)), 0, 0) + r1, _, e1 := syscall.SyscallN(procUpdateProcThreadAttribute.Addr(), uintptr(unsafe.Pointer(attrlist)), uintptr(flags), uintptr(attr), uintptr(value), uintptr(size), uintptr(prevvalue), uintptr(unsafe.Pointer(returnedsize))) if r1 == 0 { err = errnoErr(e1) } @@ -3445,7 +3500,7 @@ func updateProcThreadAttribute(attrlist *ProcThreadAttributeList, flags uint32, } func VirtualAlloc(address uintptr, size uintptr, alloctype uint32, protect uint32) (value uintptr, err error) { - r0, _, e1 := syscall.Syscall6(procVirtualAlloc.Addr(), 4, uintptr(address), uintptr(size), uintptr(alloctype), uintptr(protect), 0, 0) + r0, _, e1 := syscall.SyscallN(procVirtualAlloc.Addr(), uintptr(address), uintptr(size), uintptr(alloctype), uintptr(protect)) value = uintptr(r0) if value == 0 { err = errnoErr(e1) @@ -3454,7 +3509,7 @@ func VirtualAlloc(address uintptr, size uintptr, alloctype uint32, protect uint3 } func VirtualFree(address uintptr, size uintptr, freetype uint32) (err error) { - r1, _, e1 := syscall.Syscall(procVirtualFree.Addr(), 3, uintptr(address), uintptr(size), uintptr(freetype)) + r1, _, e1 := syscall.SyscallN(procVirtualFree.Addr(), uintptr(address), uintptr(size), uintptr(freetype)) if r1 == 0 { err = errnoErr(e1) } @@ -3462,7 +3517,7 @@ func VirtualFree(address uintptr, size uintptr, freetype uint32) (err error) { } func VirtualLock(addr uintptr, length uintptr) (err error) { - r1, _, e1 := syscall.Syscall(procVirtualLock.Addr(), 2, uintptr(addr), uintptr(length), 0) + r1, _, e1 := syscall.SyscallN(procVirtualLock.Addr(), uintptr(addr), uintptr(length)) if r1 == 0 { err = errnoErr(e1) } @@ -3470,7 +3525,7 @@ func VirtualLock(addr uintptr, length uintptr) (err error) { } func VirtualProtect(address uintptr, size uintptr, newprotect uint32, oldprotect *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procVirtualProtect.Addr(), 4, uintptr(address), uintptr(size), uintptr(newprotect), uintptr(unsafe.Pointer(oldprotect)), 0, 0) + r1, _, e1 := syscall.SyscallN(procVirtualProtect.Addr(), uintptr(address), uintptr(size), uintptr(newprotect), uintptr(unsafe.Pointer(oldprotect))) if r1 == 0 { err = errnoErr(e1) } @@ -3478,7 +3533,7 @@ func VirtualProtect(address uintptr, size uintptr, newprotect uint32, oldprotect } func VirtualProtectEx(process Handle, address uintptr, size uintptr, newProtect uint32, oldProtect *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procVirtualProtectEx.Addr(), 5, uintptr(process), uintptr(address), uintptr(size), uintptr(newProtect), uintptr(unsafe.Pointer(oldProtect)), 0) + r1, _, e1 := syscall.SyscallN(procVirtualProtectEx.Addr(), uintptr(process), uintptr(address), uintptr(size), uintptr(newProtect), uintptr(unsafe.Pointer(oldProtect))) if r1 == 0 { err = errnoErr(e1) } @@ -3486,7 +3541,7 @@ func VirtualProtectEx(process Handle, address uintptr, size uintptr, newProtect } func VirtualQuery(address uintptr, buffer *MemoryBasicInformation, length uintptr) (err error) { - r1, _, e1 := syscall.Syscall(procVirtualQuery.Addr(), 3, uintptr(address), uintptr(unsafe.Pointer(buffer)), uintptr(length)) + r1, _, e1 := syscall.SyscallN(procVirtualQuery.Addr(), uintptr(address), uintptr(unsafe.Pointer(buffer)), uintptr(length)) if r1 == 0 { err = errnoErr(e1) } @@ -3494,7 +3549,7 @@ func VirtualQuery(address uintptr, buffer *MemoryBasicInformation, length uintpt } func VirtualQueryEx(process Handle, address uintptr, buffer *MemoryBasicInformation, length uintptr) (err error) { - r1, _, e1 := syscall.Syscall6(procVirtualQueryEx.Addr(), 4, uintptr(process), uintptr(address), uintptr(unsafe.Pointer(buffer)), uintptr(length), 0, 0) + r1, _, e1 := syscall.SyscallN(procVirtualQueryEx.Addr(), uintptr(process), uintptr(address), uintptr(unsafe.Pointer(buffer)), uintptr(length)) if r1 == 0 { err = errnoErr(e1) } @@ -3502,7 +3557,7 @@ func VirtualQueryEx(process Handle, address uintptr, buffer *MemoryBasicInformat } func VirtualUnlock(addr uintptr, length uintptr) (err error) { - r1, _, e1 := syscall.Syscall(procVirtualUnlock.Addr(), 2, uintptr(addr), uintptr(length), 0) + r1, _, e1 := syscall.SyscallN(procVirtualUnlock.Addr(), uintptr(addr), uintptr(length)) if r1 == 0 { err = errnoErr(e1) } @@ -3510,13 +3565,13 @@ func VirtualUnlock(addr uintptr, length uintptr) (err error) { } func WTSGetActiveConsoleSessionId() (sessionID uint32) { - r0, _, _ := syscall.Syscall(procWTSGetActiveConsoleSessionId.Addr(), 0, 0, 0, 0) + r0, _, _ := syscall.SyscallN(procWTSGetActiveConsoleSessionId.Addr()) sessionID = uint32(r0) return } func WaitCommEvent(handle Handle, lpEvtMask *uint32, lpOverlapped *Overlapped) (err error) { - r1, _, e1 := syscall.Syscall(procWaitCommEvent.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(lpEvtMask)), uintptr(unsafe.Pointer(lpOverlapped))) + r1, _, e1 := syscall.SyscallN(procWaitCommEvent.Addr(), uintptr(handle), uintptr(unsafe.Pointer(lpEvtMask)), uintptr(unsafe.Pointer(lpOverlapped))) if r1 == 0 { err = errnoErr(e1) } @@ -3528,7 +3583,7 @@ func waitForMultipleObjects(count uint32, handles uintptr, waitAll bool, waitMil if waitAll { _p0 = 1 } - r0, _, e1 := syscall.Syscall6(procWaitForMultipleObjects.Addr(), 4, uintptr(count), uintptr(handles), uintptr(_p0), uintptr(waitMilliseconds), 0, 0) + r0, _, e1 := syscall.SyscallN(procWaitForMultipleObjects.Addr(), uintptr(count), uintptr(handles), uintptr(_p0), uintptr(waitMilliseconds)) event = uint32(r0) if event == 0xffffffff { err = errnoErr(e1) @@ -3537,7 +3592,7 @@ func waitForMultipleObjects(count uint32, handles uintptr, waitAll bool, waitMil } func WaitForSingleObject(handle Handle, waitMilliseconds uint32) (event uint32, err error) { - r0, _, e1 := syscall.Syscall(procWaitForSingleObject.Addr(), 2, uintptr(handle), uintptr(waitMilliseconds), 0) + r0, _, e1 := syscall.SyscallN(procWaitForSingleObject.Addr(), uintptr(handle), uintptr(waitMilliseconds)) event = uint32(r0) if event == 0xffffffff { err = errnoErr(e1) @@ -3546,7 +3601,7 @@ func WaitForSingleObject(handle Handle, waitMilliseconds uint32) (event uint32, } func WriteConsole(console Handle, buf *uint16, towrite uint32, written *uint32, reserved *byte) (err error) { - r1, _, e1 := syscall.Syscall6(procWriteConsoleW.Addr(), 5, uintptr(console), uintptr(unsafe.Pointer(buf)), uintptr(towrite), uintptr(unsafe.Pointer(written)), uintptr(unsafe.Pointer(reserved)), 0) + r1, _, e1 := syscall.SyscallN(procWriteConsoleW.Addr(), uintptr(console), uintptr(unsafe.Pointer(buf)), uintptr(towrite), uintptr(unsafe.Pointer(written)), uintptr(unsafe.Pointer(reserved))) if r1 == 0 { err = errnoErr(e1) } @@ -3558,7 +3613,7 @@ func writeFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) if len(buf) > 0 { _p0 = &buf[0] } - r1, _, e1 := syscall.Syscall6(procWriteFile.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(done)), uintptr(unsafe.Pointer(overlapped)), 0) + r1, _, e1 := syscall.SyscallN(procWriteFile.Addr(), uintptr(handle), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(done)), uintptr(unsafe.Pointer(overlapped))) if r1 == 0 { err = errnoErr(e1) } @@ -3566,7 +3621,7 @@ func writeFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) } func WriteProcessMemory(process Handle, baseAddress uintptr, buffer *byte, size uintptr, numberOfBytesWritten *uintptr) (err error) { - r1, _, e1 := syscall.Syscall6(procWriteProcessMemory.Addr(), 5, uintptr(process), uintptr(baseAddress), uintptr(unsafe.Pointer(buffer)), uintptr(size), uintptr(unsafe.Pointer(numberOfBytesWritten)), 0) + r1, _, e1 := syscall.SyscallN(procWriteProcessMemory.Addr(), uintptr(process), uintptr(baseAddress), uintptr(unsafe.Pointer(buffer)), uintptr(size), uintptr(unsafe.Pointer(numberOfBytesWritten))) if r1 == 0 { err = errnoErr(e1) } @@ -3574,7 +3629,7 @@ func WriteProcessMemory(process Handle, baseAddress uintptr, buffer *byte, size } func AcceptEx(ls Handle, as Handle, buf *byte, rxdatalen uint32, laddrlen uint32, raddrlen uint32, recvd *uint32, overlapped *Overlapped) (err error) { - r1, _, e1 := syscall.Syscall9(procAcceptEx.Addr(), 8, uintptr(ls), uintptr(as), uintptr(unsafe.Pointer(buf)), uintptr(rxdatalen), uintptr(laddrlen), uintptr(raddrlen), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(overlapped)), 0) + r1, _, e1 := syscall.SyscallN(procAcceptEx.Addr(), uintptr(ls), uintptr(as), uintptr(unsafe.Pointer(buf)), uintptr(rxdatalen), uintptr(laddrlen), uintptr(raddrlen), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(overlapped))) if r1 == 0 { err = errnoErr(e1) } @@ -3582,12 +3637,12 @@ func AcceptEx(ls Handle, as Handle, buf *byte, rxdatalen uint32, laddrlen uint32 } func GetAcceptExSockaddrs(buf *byte, rxdatalen uint32, laddrlen uint32, raddrlen uint32, lrsa **RawSockaddrAny, lrsalen *int32, rrsa **RawSockaddrAny, rrsalen *int32) { - syscall.Syscall9(procGetAcceptExSockaddrs.Addr(), 8, uintptr(unsafe.Pointer(buf)), uintptr(rxdatalen), uintptr(laddrlen), uintptr(raddrlen), uintptr(unsafe.Pointer(lrsa)), uintptr(unsafe.Pointer(lrsalen)), uintptr(unsafe.Pointer(rrsa)), uintptr(unsafe.Pointer(rrsalen)), 0) + syscall.SyscallN(procGetAcceptExSockaddrs.Addr(), uintptr(unsafe.Pointer(buf)), uintptr(rxdatalen), uintptr(laddrlen), uintptr(raddrlen), uintptr(unsafe.Pointer(lrsa)), uintptr(unsafe.Pointer(lrsalen)), uintptr(unsafe.Pointer(rrsa)), uintptr(unsafe.Pointer(rrsalen))) return } func TransmitFile(s Handle, handle Handle, bytesToWrite uint32, bytsPerSend uint32, overlapped *Overlapped, transmitFileBuf *TransmitFileBuffers, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procTransmitFile.Addr(), 7, uintptr(s), uintptr(handle), uintptr(bytesToWrite), uintptr(bytsPerSend), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(transmitFileBuf)), uintptr(flags), 0, 0) + r1, _, e1 := syscall.SyscallN(procTransmitFile.Addr(), uintptr(s), uintptr(handle), uintptr(bytesToWrite), uintptr(bytsPerSend), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(transmitFileBuf)), uintptr(flags)) if r1 == 0 { err = errnoErr(e1) } @@ -3595,7 +3650,7 @@ func TransmitFile(s Handle, handle Handle, bytesToWrite uint32, bytsPerSend uint } func NetApiBufferFree(buf *byte) (neterr error) { - r0, _, _ := syscall.Syscall(procNetApiBufferFree.Addr(), 1, uintptr(unsafe.Pointer(buf)), 0, 0) + r0, _, _ := syscall.SyscallN(procNetApiBufferFree.Addr(), uintptr(unsafe.Pointer(buf))) if r0 != 0 { neterr = syscall.Errno(r0) } @@ -3603,7 +3658,7 @@ func NetApiBufferFree(buf *byte) (neterr error) { } func NetGetJoinInformation(server *uint16, name **uint16, bufType *uint32) (neterr error) { - r0, _, _ := syscall.Syscall(procNetGetJoinInformation.Addr(), 3, uintptr(unsafe.Pointer(server)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(bufType))) + r0, _, _ := syscall.SyscallN(procNetGetJoinInformation.Addr(), uintptr(unsafe.Pointer(server)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(bufType))) if r0 != 0 { neterr = syscall.Errno(r0) } @@ -3611,7 +3666,7 @@ func NetGetJoinInformation(server *uint16, name **uint16, bufType *uint32) (nete } func NetUserEnum(serverName *uint16, level uint32, filter uint32, buf **byte, prefMaxLen uint32, entriesRead *uint32, totalEntries *uint32, resumeHandle *uint32) (neterr error) { - r0, _, _ := syscall.Syscall9(procNetUserEnum.Addr(), 8, uintptr(unsafe.Pointer(serverName)), uintptr(level), uintptr(filter), uintptr(unsafe.Pointer(buf)), uintptr(prefMaxLen), uintptr(unsafe.Pointer(entriesRead)), uintptr(unsafe.Pointer(totalEntries)), uintptr(unsafe.Pointer(resumeHandle)), 0) + r0, _, _ := syscall.SyscallN(procNetUserEnum.Addr(), uintptr(unsafe.Pointer(serverName)), uintptr(level), uintptr(filter), uintptr(unsafe.Pointer(buf)), uintptr(prefMaxLen), uintptr(unsafe.Pointer(entriesRead)), uintptr(unsafe.Pointer(totalEntries)), uintptr(unsafe.Pointer(resumeHandle))) if r0 != 0 { neterr = syscall.Errno(r0) } @@ -3619,7 +3674,7 @@ func NetUserEnum(serverName *uint16, level uint32, filter uint32, buf **byte, pr } func NetUserGetInfo(serverName *uint16, userName *uint16, level uint32, buf **byte) (neterr error) { - r0, _, _ := syscall.Syscall6(procNetUserGetInfo.Addr(), 4, uintptr(unsafe.Pointer(serverName)), uintptr(unsafe.Pointer(userName)), uintptr(level), uintptr(unsafe.Pointer(buf)), 0, 0) + r0, _, _ := syscall.SyscallN(procNetUserGetInfo.Addr(), uintptr(unsafe.Pointer(serverName)), uintptr(unsafe.Pointer(userName)), uintptr(level), uintptr(unsafe.Pointer(buf))) if r0 != 0 { neterr = syscall.Errno(r0) } @@ -3627,7 +3682,7 @@ func NetUserGetInfo(serverName *uint16, userName *uint16, level uint32, buf **by } func NtCreateFile(handle *Handle, access uint32, oa *OBJECT_ATTRIBUTES, iosb *IO_STATUS_BLOCK, allocationSize *int64, attributes uint32, share uint32, disposition uint32, options uint32, eabuffer uintptr, ealength uint32) (ntstatus error) { - r0, _, _ := syscall.Syscall12(procNtCreateFile.Addr(), 11, uintptr(unsafe.Pointer(handle)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(unsafe.Pointer(allocationSize)), uintptr(attributes), uintptr(share), uintptr(disposition), uintptr(options), uintptr(eabuffer), uintptr(ealength), 0) + r0, _, _ := syscall.SyscallN(procNtCreateFile.Addr(), uintptr(unsafe.Pointer(handle)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(unsafe.Pointer(allocationSize)), uintptr(attributes), uintptr(share), uintptr(disposition), uintptr(options), uintptr(eabuffer), uintptr(ealength)) if r0 != 0 { ntstatus = NTStatus(r0) } @@ -3635,7 +3690,7 @@ func NtCreateFile(handle *Handle, access uint32, oa *OBJECT_ATTRIBUTES, iosb *IO } func NtCreateNamedPipeFile(pipe *Handle, access uint32, oa *OBJECT_ATTRIBUTES, iosb *IO_STATUS_BLOCK, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (ntstatus error) { - r0, _, _ := syscall.Syscall15(procNtCreateNamedPipeFile.Addr(), 14, uintptr(unsafe.Pointer(pipe)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(share), uintptr(disposition), uintptr(options), uintptr(typ), uintptr(readMode), uintptr(completionMode), uintptr(maxInstances), uintptr(inboundQuota), uintptr(outputQuota), uintptr(unsafe.Pointer(timeout)), 0) + r0, _, _ := syscall.SyscallN(procNtCreateNamedPipeFile.Addr(), uintptr(unsafe.Pointer(pipe)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(share), uintptr(disposition), uintptr(options), uintptr(typ), uintptr(readMode), uintptr(completionMode), uintptr(maxInstances), uintptr(inboundQuota), uintptr(outputQuota), uintptr(unsafe.Pointer(timeout))) if r0 != 0 { ntstatus = NTStatus(r0) } @@ -3643,7 +3698,7 @@ func NtCreateNamedPipeFile(pipe *Handle, access uint32, oa *OBJECT_ATTRIBUTES, i } func NtQueryInformationProcess(proc Handle, procInfoClass int32, procInfo unsafe.Pointer, procInfoLen uint32, retLen *uint32) (ntstatus error) { - r0, _, _ := syscall.Syscall6(procNtQueryInformationProcess.Addr(), 5, uintptr(proc), uintptr(procInfoClass), uintptr(procInfo), uintptr(procInfoLen), uintptr(unsafe.Pointer(retLen)), 0) + r0, _, _ := syscall.SyscallN(procNtQueryInformationProcess.Addr(), uintptr(proc), uintptr(procInfoClass), uintptr(procInfo), uintptr(procInfoLen), uintptr(unsafe.Pointer(retLen))) if r0 != 0 { ntstatus = NTStatus(r0) } @@ -3651,7 +3706,7 @@ func NtQueryInformationProcess(proc Handle, procInfoClass int32, procInfo unsafe } func NtQuerySystemInformation(sysInfoClass int32, sysInfo unsafe.Pointer, sysInfoLen uint32, retLen *uint32) (ntstatus error) { - r0, _, _ := syscall.Syscall6(procNtQuerySystemInformation.Addr(), 4, uintptr(sysInfoClass), uintptr(sysInfo), uintptr(sysInfoLen), uintptr(unsafe.Pointer(retLen)), 0, 0) + r0, _, _ := syscall.SyscallN(procNtQuerySystemInformation.Addr(), uintptr(sysInfoClass), uintptr(sysInfo), uintptr(sysInfoLen), uintptr(unsafe.Pointer(retLen))) if r0 != 0 { ntstatus = NTStatus(r0) } @@ -3659,7 +3714,7 @@ func NtQuerySystemInformation(sysInfoClass int32, sysInfo unsafe.Pointer, sysInf } func NtSetInformationFile(handle Handle, iosb *IO_STATUS_BLOCK, inBuffer *byte, inBufferLen uint32, class uint32) (ntstatus error) { - r0, _, _ := syscall.Syscall6(procNtSetInformationFile.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(iosb)), uintptr(unsafe.Pointer(inBuffer)), uintptr(inBufferLen), uintptr(class), 0) + r0, _, _ := syscall.SyscallN(procNtSetInformationFile.Addr(), uintptr(handle), uintptr(unsafe.Pointer(iosb)), uintptr(unsafe.Pointer(inBuffer)), uintptr(inBufferLen), uintptr(class)) if r0 != 0 { ntstatus = NTStatus(r0) } @@ -3667,7 +3722,7 @@ func NtSetInformationFile(handle Handle, iosb *IO_STATUS_BLOCK, inBuffer *byte, } func NtSetInformationProcess(proc Handle, procInfoClass int32, procInfo unsafe.Pointer, procInfoLen uint32) (ntstatus error) { - r0, _, _ := syscall.Syscall6(procNtSetInformationProcess.Addr(), 4, uintptr(proc), uintptr(procInfoClass), uintptr(procInfo), uintptr(procInfoLen), 0, 0) + r0, _, _ := syscall.SyscallN(procNtSetInformationProcess.Addr(), uintptr(proc), uintptr(procInfoClass), uintptr(procInfo), uintptr(procInfoLen)) if r0 != 0 { ntstatus = NTStatus(r0) } @@ -3675,7 +3730,7 @@ func NtSetInformationProcess(proc Handle, procInfoClass int32, procInfo unsafe.P } func NtSetSystemInformation(sysInfoClass int32, sysInfo unsafe.Pointer, sysInfoLen uint32) (ntstatus error) { - r0, _, _ := syscall.Syscall(procNtSetSystemInformation.Addr(), 3, uintptr(sysInfoClass), uintptr(sysInfo), uintptr(sysInfoLen)) + r0, _, _ := syscall.SyscallN(procNtSetSystemInformation.Addr(), uintptr(sysInfoClass), uintptr(sysInfo), uintptr(sysInfoLen)) if r0 != 0 { ntstatus = NTStatus(r0) } @@ -3683,13 +3738,13 @@ func NtSetSystemInformation(sysInfoClass int32, sysInfo unsafe.Pointer, sysInfoL } func RtlAddFunctionTable(functionTable *RUNTIME_FUNCTION, entryCount uint32, baseAddress uintptr) (ret bool) { - r0, _, _ := syscall.Syscall(procRtlAddFunctionTable.Addr(), 3, uintptr(unsafe.Pointer(functionTable)), uintptr(entryCount), uintptr(baseAddress)) + r0, _, _ := syscall.SyscallN(procRtlAddFunctionTable.Addr(), uintptr(unsafe.Pointer(functionTable)), uintptr(entryCount), uintptr(baseAddress)) ret = r0 != 0 return } func RtlDefaultNpAcl(acl **ACL) (ntstatus error) { - r0, _, _ := syscall.Syscall(procRtlDefaultNpAcl.Addr(), 1, uintptr(unsafe.Pointer(acl)), 0, 0) + r0, _, _ := syscall.SyscallN(procRtlDefaultNpAcl.Addr(), uintptr(unsafe.Pointer(acl))) if r0 != 0 { ntstatus = NTStatus(r0) } @@ -3697,13 +3752,13 @@ func RtlDefaultNpAcl(acl **ACL) (ntstatus error) { } func RtlDeleteFunctionTable(functionTable *RUNTIME_FUNCTION) (ret bool) { - r0, _, _ := syscall.Syscall(procRtlDeleteFunctionTable.Addr(), 1, uintptr(unsafe.Pointer(functionTable)), 0, 0) + r0, _, _ := syscall.SyscallN(procRtlDeleteFunctionTable.Addr(), uintptr(unsafe.Pointer(functionTable))) ret = r0 != 0 return } func RtlDosPathNameToNtPathName(dosName *uint16, ntName *NTUnicodeString, ntFileNamePart *uint16, relativeName *RTL_RELATIVE_NAME) (ntstatus error) { - r0, _, _ := syscall.Syscall6(procRtlDosPathNameToNtPathName_U_WithStatus.Addr(), 4, uintptr(unsafe.Pointer(dosName)), uintptr(unsafe.Pointer(ntName)), uintptr(unsafe.Pointer(ntFileNamePart)), uintptr(unsafe.Pointer(relativeName)), 0, 0) + r0, _, _ := syscall.SyscallN(procRtlDosPathNameToNtPathName_U_WithStatus.Addr(), uintptr(unsafe.Pointer(dosName)), uintptr(unsafe.Pointer(ntName)), uintptr(unsafe.Pointer(ntFileNamePart)), uintptr(unsafe.Pointer(relativeName))) if r0 != 0 { ntstatus = NTStatus(r0) } @@ -3711,7 +3766,7 @@ func RtlDosPathNameToNtPathName(dosName *uint16, ntName *NTUnicodeString, ntFile } func RtlDosPathNameToRelativeNtPathName(dosName *uint16, ntName *NTUnicodeString, ntFileNamePart *uint16, relativeName *RTL_RELATIVE_NAME) (ntstatus error) { - r0, _, _ := syscall.Syscall6(procRtlDosPathNameToRelativeNtPathName_U_WithStatus.Addr(), 4, uintptr(unsafe.Pointer(dosName)), uintptr(unsafe.Pointer(ntName)), uintptr(unsafe.Pointer(ntFileNamePart)), uintptr(unsafe.Pointer(relativeName)), 0, 0) + r0, _, _ := syscall.SyscallN(procRtlDosPathNameToRelativeNtPathName_U_WithStatus.Addr(), uintptr(unsafe.Pointer(dosName)), uintptr(unsafe.Pointer(ntName)), uintptr(unsafe.Pointer(ntFileNamePart)), uintptr(unsafe.Pointer(relativeName))) if r0 != 0 { ntstatus = NTStatus(r0) } @@ -3719,18 +3774,18 @@ func RtlDosPathNameToRelativeNtPathName(dosName *uint16, ntName *NTUnicodeString } func RtlGetCurrentPeb() (peb *PEB) { - r0, _, _ := syscall.Syscall(procRtlGetCurrentPeb.Addr(), 0, 0, 0, 0) + r0, _, _ := syscall.SyscallN(procRtlGetCurrentPeb.Addr()) peb = (*PEB)(unsafe.Pointer(r0)) return } func rtlGetNtVersionNumbers(majorVersion *uint32, minorVersion *uint32, buildNumber *uint32) { - syscall.Syscall(procRtlGetNtVersionNumbers.Addr(), 3, uintptr(unsafe.Pointer(majorVersion)), uintptr(unsafe.Pointer(minorVersion)), uintptr(unsafe.Pointer(buildNumber))) + syscall.SyscallN(procRtlGetNtVersionNumbers.Addr(), uintptr(unsafe.Pointer(majorVersion)), uintptr(unsafe.Pointer(minorVersion)), uintptr(unsafe.Pointer(buildNumber))) return } func rtlGetVersion(info *OsVersionInfoEx) (ntstatus error) { - r0, _, _ := syscall.Syscall(procRtlGetVersion.Addr(), 1, uintptr(unsafe.Pointer(info)), 0, 0) + r0, _, _ := syscall.SyscallN(procRtlGetVersion.Addr(), uintptr(unsafe.Pointer(info))) if r0 != 0 { ntstatus = NTStatus(r0) } @@ -3738,23 +3793,23 @@ func rtlGetVersion(info *OsVersionInfoEx) (ntstatus error) { } func RtlInitString(destinationString *NTString, sourceString *byte) { - syscall.Syscall(procRtlInitString.Addr(), 2, uintptr(unsafe.Pointer(destinationString)), uintptr(unsafe.Pointer(sourceString)), 0) + syscall.SyscallN(procRtlInitString.Addr(), uintptr(unsafe.Pointer(destinationString)), uintptr(unsafe.Pointer(sourceString))) return } func RtlInitUnicodeString(destinationString *NTUnicodeString, sourceString *uint16) { - syscall.Syscall(procRtlInitUnicodeString.Addr(), 2, uintptr(unsafe.Pointer(destinationString)), uintptr(unsafe.Pointer(sourceString)), 0) + syscall.SyscallN(procRtlInitUnicodeString.Addr(), uintptr(unsafe.Pointer(destinationString)), uintptr(unsafe.Pointer(sourceString))) return } func rtlNtStatusToDosErrorNoTeb(ntstatus NTStatus) (ret syscall.Errno) { - r0, _, _ := syscall.Syscall(procRtlNtStatusToDosErrorNoTeb.Addr(), 1, uintptr(ntstatus), 0, 0) + r0, _, _ := syscall.SyscallN(procRtlNtStatusToDosErrorNoTeb.Addr(), uintptr(ntstatus)) ret = syscall.Errno(r0) return } func clsidFromString(lpsz *uint16, pclsid *GUID) (ret error) { - r0, _, _ := syscall.Syscall(procCLSIDFromString.Addr(), 2, uintptr(unsafe.Pointer(lpsz)), uintptr(unsafe.Pointer(pclsid)), 0) + r0, _, _ := syscall.SyscallN(procCLSIDFromString.Addr(), uintptr(unsafe.Pointer(lpsz)), uintptr(unsafe.Pointer(pclsid))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -3762,7 +3817,7 @@ func clsidFromString(lpsz *uint16, pclsid *GUID) (ret error) { } func coCreateGuid(pguid *GUID) (ret error) { - r0, _, _ := syscall.Syscall(procCoCreateGuid.Addr(), 1, uintptr(unsafe.Pointer(pguid)), 0, 0) + r0, _, _ := syscall.SyscallN(procCoCreateGuid.Addr(), uintptr(unsafe.Pointer(pguid))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -3770,7 +3825,7 @@ func coCreateGuid(pguid *GUID) (ret error) { } func CoGetObject(name *uint16, bindOpts *BIND_OPTS3, guid *GUID, functionTable **uintptr) (ret error) { - r0, _, _ := syscall.Syscall6(procCoGetObject.Addr(), 4, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(bindOpts)), uintptr(unsafe.Pointer(guid)), uintptr(unsafe.Pointer(functionTable)), 0, 0) + r0, _, _ := syscall.SyscallN(procCoGetObject.Addr(), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(bindOpts)), uintptr(unsafe.Pointer(guid)), uintptr(unsafe.Pointer(functionTable))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -3778,7 +3833,7 @@ func CoGetObject(name *uint16, bindOpts *BIND_OPTS3, guid *GUID, functionTable * } func CoInitializeEx(reserved uintptr, coInit uint32) (ret error) { - r0, _, _ := syscall.Syscall(procCoInitializeEx.Addr(), 2, uintptr(reserved), uintptr(coInit), 0) + r0, _, _ := syscall.SyscallN(procCoInitializeEx.Addr(), uintptr(reserved), uintptr(coInit)) if r0 != 0 { ret = syscall.Errno(r0) } @@ -3786,23 +3841,23 @@ func CoInitializeEx(reserved uintptr, coInit uint32) (ret error) { } func CoTaskMemFree(address unsafe.Pointer) { - syscall.Syscall(procCoTaskMemFree.Addr(), 1, uintptr(address), 0, 0) + syscall.SyscallN(procCoTaskMemFree.Addr(), uintptr(address)) return } func CoUninitialize() { - syscall.Syscall(procCoUninitialize.Addr(), 0, 0, 0, 0) + syscall.SyscallN(procCoUninitialize.Addr()) return } func stringFromGUID2(rguid *GUID, lpsz *uint16, cchMax int32) (chars int32) { - r0, _, _ := syscall.Syscall(procStringFromGUID2.Addr(), 3, uintptr(unsafe.Pointer(rguid)), uintptr(unsafe.Pointer(lpsz)), uintptr(cchMax)) + r0, _, _ := syscall.SyscallN(procStringFromGUID2.Addr(), uintptr(unsafe.Pointer(rguid)), uintptr(unsafe.Pointer(lpsz)), uintptr(cchMax)) chars = int32(r0) return } func EnumProcessModules(process Handle, module *Handle, cb uint32, cbNeeded *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procEnumProcessModules.Addr(), 4, uintptr(process), uintptr(unsafe.Pointer(module)), uintptr(cb), uintptr(unsafe.Pointer(cbNeeded)), 0, 0) + r1, _, e1 := syscall.SyscallN(procEnumProcessModules.Addr(), uintptr(process), uintptr(unsafe.Pointer(module)), uintptr(cb), uintptr(unsafe.Pointer(cbNeeded))) if r1 == 0 { err = errnoErr(e1) } @@ -3810,7 +3865,7 @@ func EnumProcessModules(process Handle, module *Handle, cb uint32, cbNeeded *uin } func EnumProcessModulesEx(process Handle, module *Handle, cb uint32, cbNeeded *uint32, filterFlag uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procEnumProcessModulesEx.Addr(), 5, uintptr(process), uintptr(unsafe.Pointer(module)), uintptr(cb), uintptr(unsafe.Pointer(cbNeeded)), uintptr(filterFlag), 0) + r1, _, e1 := syscall.SyscallN(procEnumProcessModulesEx.Addr(), uintptr(process), uintptr(unsafe.Pointer(module)), uintptr(cb), uintptr(unsafe.Pointer(cbNeeded)), uintptr(filterFlag)) if r1 == 0 { err = errnoErr(e1) } @@ -3818,7 +3873,7 @@ func EnumProcessModulesEx(process Handle, module *Handle, cb uint32, cbNeeded *u } func enumProcesses(processIds *uint32, nSize uint32, bytesReturned *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procEnumProcesses.Addr(), 3, uintptr(unsafe.Pointer(processIds)), uintptr(nSize), uintptr(unsafe.Pointer(bytesReturned))) + r1, _, e1 := syscall.SyscallN(procEnumProcesses.Addr(), uintptr(unsafe.Pointer(processIds)), uintptr(nSize), uintptr(unsafe.Pointer(bytesReturned))) if r1 == 0 { err = errnoErr(e1) } @@ -3826,7 +3881,7 @@ func enumProcesses(processIds *uint32, nSize uint32, bytesReturned *uint32) (err } func GetModuleBaseName(process Handle, module Handle, baseName *uint16, size uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetModuleBaseNameW.Addr(), 4, uintptr(process), uintptr(module), uintptr(unsafe.Pointer(baseName)), uintptr(size), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetModuleBaseNameW.Addr(), uintptr(process), uintptr(module), uintptr(unsafe.Pointer(baseName)), uintptr(size)) if r1 == 0 { err = errnoErr(e1) } @@ -3834,7 +3889,7 @@ func GetModuleBaseName(process Handle, module Handle, baseName *uint16, size uin } func GetModuleFileNameEx(process Handle, module Handle, filename *uint16, size uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetModuleFileNameExW.Addr(), 4, uintptr(process), uintptr(module), uintptr(unsafe.Pointer(filename)), uintptr(size), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetModuleFileNameExW.Addr(), uintptr(process), uintptr(module), uintptr(unsafe.Pointer(filename)), uintptr(size)) if r1 == 0 { err = errnoErr(e1) } @@ -3842,7 +3897,7 @@ func GetModuleFileNameEx(process Handle, module Handle, filename *uint16, size u } func GetModuleInformation(process Handle, module Handle, modinfo *ModuleInfo, cb uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetModuleInformation.Addr(), 4, uintptr(process), uintptr(module), uintptr(unsafe.Pointer(modinfo)), uintptr(cb), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetModuleInformation.Addr(), uintptr(process), uintptr(module), uintptr(unsafe.Pointer(modinfo)), uintptr(cb)) if r1 == 0 { err = errnoErr(e1) } @@ -3850,7 +3905,7 @@ func GetModuleInformation(process Handle, module Handle, modinfo *ModuleInfo, cb } func QueryWorkingSetEx(process Handle, pv uintptr, cb uint32) (err error) { - r1, _, e1 := syscall.Syscall(procQueryWorkingSetEx.Addr(), 3, uintptr(process), uintptr(pv), uintptr(cb)) + r1, _, e1 := syscall.SyscallN(procQueryWorkingSetEx.Addr(), uintptr(process), uintptr(pv), uintptr(cb)) if r1 == 0 { err = errnoErr(e1) } @@ -3862,7 +3917,7 @@ func SubscribeServiceChangeNotifications(service Handle, eventType uint32, callb if ret != nil { return } - r0, _, _ := syscall.Syscall6(procSubscribeServiceChangeNotifications.Addr(), 5, uintptr(service), uintptr(eventType), uintptr(callback), uintptr(callbackCtx), uintptr(unsafe.Pointer(subscription)), 0) + r0, _, _ := syscall.SyscallN(procSubscribeServiceChangeNotifications.Addr(), uintptr(service), uintptr(eventType), uintptr(callback), uintptr(callbackCtx), uintptr(unsafe.Pointer(subscription))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -3874,12 +3929,12 @@ func UnsubscribeServiceChangeNotifications(subscription uintptr) (err error) { if err != nil { return } - syscall.Syscall(procUnsubscribeServiceChangeNotifications.Addr(), 1, uintptr(subscription), 0, 0) + syscall.SyscallN(procUnsubscribeServiceChangeNotifications.Addr(), uintptr(subscription)) return } func GetUserNameEx(nameFormat uint32, nameBuffre *uint16, nSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetUserNameExW.Addr(), 3, uintptr(nameFormat), uintptr(unsafe.Pointer(nameBuffre)), uintptr(unsafe.Pointer(nSize))) + r1, _, e1 := syscall.SyscallN(procGetUserNameExW.Addr(), uintptr(nameFormat), uintptr(unsafe.Pointer(nameBuffre)), uintptr(unsafe.Pointer(nSize))) if r1&0xff == 0 { err = errnoErr(e1) } @@ -3887,7 +3942,7 @@ func GetUserNameEx(nameFormat uint32, nameBuffre *uint16, nSize *uint32) (err er } func TranslateName(accName *uint16, accNameFormat uint32, desiredNameFormat uint32, translatedName *uint16, nSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procTranslateNameW.Addr(), 5, uintptr(unsafe.Pointer(accName)), uintptr(accNameFormat), uintptr(desiredNameFormat), uintptr(unsafe.Pointer(translatedName)), uintptr(unsafe.Pointer(nSize)), 0) + r1, _, e1 := syscall.SyscallN(procTranslateNameW.Addr(), uintptr(unsafe.Pointer(accName)), uintptr(accNameFormat), uintptr(desiredNameFormat), uintptr(unsafe.Pointer(translatedName)), uintptr(unsafe.Pointer(nSize))) if r1&0xff == 0 { err = errnoErr(e1) } @@ -3895,7 +3950,7 @@ func TranslateName(accName *uint16, accNameFormat uint32, desiredNameFormat uint } func SetupDiBuildDriverInfoList(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverType SPDIT) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiBuildDriverInfoList.Addr(), 3, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(driverType)) + r1, _, e1 := syscall.SyscallN(procSetupDiBuildDriverInfoList.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(driverType)) if r1 == 0 { err = errnoErr(e1) } @@ -3903,7 +3958,7 @@ func SetupDiBuildDriverInfoList(deviceInfoSet DevInfo, deviceInfoData *DevInfoDa } func SetupDiCallClassInstaller(installFunction DI_FUNCTION, deviceInfoSet DevInfo, deviceInfoData *DevInfoData) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiCallClassInstaller.Addr(), 3, uintptr(installFunction), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData))) + r1, _, e1 := syscall.SyscallN(procSetupDiCallClassInstaller.Addr(), uintptr(installFunction), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData))) if r1 == 0 { err = errnoErr(e1) } @@ -3911,7 +3966,7 @@ func SetupDiCallClassInstaller(installFunction DI_FUNCTION, deviceInfoSet DevInf } func SetupDiCancelDriverInfoSearch(deviceInfoSet DevInfo) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiCancelDriverInfoSearch.Addr(), 1, uintptr(deviceInfoSet), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetupDiCancelDriverInfoSearch.Addr(), uintptr(deviceInfoSet)) if r1 == 0 { err = errnoErr(e1) } @@ -3919,7 +3974,7 @@ func SetupDiCancelDriverInfoSearch(deviceInfoSet DevInfo) (err error) { } func setupDiClassGuidsFromNameEx(className *uint16, classGuidList *GUID, classGuidListSize uint32, requiredSize *uint32, machineName *uint16, reserved uintptr) (err error) { - r1, _, e1 := syscall.Syscall6(procSetupDiClassGuidsFromNameExW.Addr(), 6, uintptr(unsafe.Pointer(className)), uintptr(unsafe.Pointer(classGuidList)), uintptr(classGuidListSize), uintptr(unsafe.Pointer(requiredSize)), uintptr(unsafe.Pointer(machineName)), uintptr(reserved)) + r1, _, e1 := syscall.SyscallN(procSetupDiClassGuidsFromNameExW.Addr(), uintptr(unsafe.Pointer(className)), uintptr(unsafe.Pointer(classGuidList)), uintptr(classGuidListSize), uintptr(unsafe.Pointer(requiredSize)), uintptr(unsafe.Pointer(machineName)), uintptr(reserved)) if r1 == 0 { err = errnoErr(e1) } @@ -3927,7 +3982,7 @@ func setupDiClassGuidsFromNameEx(className *uint16, classGuidList *GUID, classGu } func setupDiClassNameFromGuidEx(classGUID *GUID, className *uint16, classNameSize uint32, requiredSize *uint32, machineName *uint16, reserved uintptr) (err error) { - r1, _, e1 := syscall.Syscall6(procSetupDiClassNameFromGuidExW.Addr(), 6, uintptr(unsafe.Pointer(classGUID)), uintptr(unsafe.Pointer(className)), uintptr(classNameSize), uintptr(unsafe.Pointer(requiredSize)), uintptr(unsafe.Pointer(machineName)), uintptr(reserved)) + r1, _, e1 := syscall.SyscallN(procSetupDiClassNameFromGuidExW.Addr(), uintptr(unsafe.Pointer(classGUID)), uintptr(unsafe.Pointer(className)), uintptr(classNameSize), uintptr(unsafe.Pointer(requiredSize)), uintptr(unsafe.Pointer(machineName)), uintptr(reserved)) if r1 == 0 { err = errnoErr(e1) } @@ -3935,7 +3990,7 @@ func setupDiClassNameFromGuidEx(classGUID *GUID, className *uint16, classNameSiz } func setupDiCreateDeviceInfoListEx(classGUID *GUID, hwndParent uintptr, machineName *uint16, reserved uintptr) (handle DevInfo, err error) { - r0, _, e1 := syscall.Syscall6(procSetupDiCreateDeviceInfoListExW.Addr(), 4, uintptr(unsafe.Pointer(classGUID)), uintptr(hwndParent), uintptr(unsafe.Pointer(machineName)), uintptr(reserved), 0, 0) + r0, _, e1 := syscall.SyscallN(procSetupDiCreateDeviceInfoListExW.Addr(), uintptr(unsafe.Pointer(classGUID)), uintptr(hwndParent), uintptr(unsafe.Pointer(machineName)), uintptr(reserved)) handle = DevInfo(r0) if handle == DevInfo(InvalidHandle) { err = errnoErr(e1) @@ -3944,7 +3999,7 @@ func setupDiCreateDeviceInfoListEx(classGUID *GUID, hwndParent uintptr, machineN } func setupDiCreateDeviceInfo(deviceInfoSet DevInfo, DeviceName *uint16, classGUID *GUID, DeviceDescription *uint16, hwndParent uintptr, CreationFlags DICD, deviceInfoData *DevInfoData) (err error) { - r1, _, e1 := syscall.Syscall9(procSetupDiCreateDeviceInfoW.Addr(), 7, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(DeviceName)), uintptr(unsafe.Pointer(classGUID)), uintptr(unsafe.Pointer(DeviceDescription)), uintptr(hwndParent), uintptr(CreationFlags), uintptr(unsafe.Pointer(deviceInfoData)), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetupDiCreateDeviceInfoW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(DeviceName)), uintptr(unsafe.Pointer(classGUID)), uintptr(unsafe.Pointer(DeviceDescription)), uintptr(hwndParent), uintptr(CreationFlags), uintptr(unsafe.Pointer(deviceInfoData))) if r1 == 0 { err = errnoErr(e1) } @@ -3952,7 +4007,7 @@ func setupDiCreateDeviceInfo(deviceInfoSet DevInfo, DeviceName *uint16, classGUI } func SetupDiDestroyDeviceInfoList(deviceInfoSet DevInfo) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiDestroyDeviceInfoList.Addr(), 1, uintptr(deviceInfoSet), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetupDiDestroyDeviceInfoList.Addr(), uintptr(deviceInfoSet)) if r1 == 0 { err = errnoErr(e1) } @@ -3960,7 +4015,7 @@ func SetupDiDestroyDeviceInfoList(deviceInfoSet DevInfo) (err error) { } func SetupDiDestroyDriverInfoList(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverType SPDIT) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiDestroyDriverInfoList.Addr(), 3, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(driverType)) + r1, _, e1 := syscall.SyscallN(procSetupDiDestroyDriverInfoList.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(driverType)) if r1 == 0 { err = errnoErr(e1) } @@ -3968,7 +4023,7 @@ func SetupDiDestroyDriverInfoList(deviceInfoSet DevInfo, deviceInfoData *DevInfo } func setupDiEnumDeviceInfo(deviceInfoSet DevInfo, memberIndex uint32, deviceInfoData *DevInfoData) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiEnumDeviceInfo.Addr(), 3, uintptr(deviceInfoSet), uintptr(memberIndex), uintptr(unsafe.Pointer(deviceInfoData))) + r1, _, e1 := syscall.SyscallN(procSetupDiEnumDeviceInfo.Addr(), uintptr(deviceInfoSet), uintptr(memberIndex), uintptr(unsafe.Pointer(deviceInfoData))) if r1 == 0 { err = errnoErr(e1) } @@ -3976,7 +4031,7 @@ func setupDiEnumDeviceInfo(deviceInfoSet DevInfo, memberIndex uint32, deviceInfo } func setupDiEnumDriverInfo(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverType SPDIT, memberIndex uint32, driverInfoData *DrvInfoData) (err error) { - r1, _, e1 := syscall.Syscall6(procSetupDiEnumDriverInfoW.Addr(), 5, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(driverType), uintptr(memberIndex), uintptr(unsafe.Pointer(driverInfoData)), 0) + r1, _, e1 := syscall.SyscallN(procSetupDiEnumDriverInfoW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(driverType), uintptr(memberIndex), uintptr(unsafe.Pointer(driverInfoData))) if r1 == 0 { err = errnoErr(e1) } @@ -3984,7 +4039,7 @@ func setupDiEnumDriverInfo(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, d } func setupDiGetClassDevsEx(classGUID *GUID, Enumerator *uint16, hwndParent uintptr, Flags DIGCF, deviceInfoSet DevInfo, machineName *uint16, reserved uintptr) (handle DevInfo, err error) { - r0, _, e1 := syscall.Syscall9(procSetupDiGetClassDevsExW.Addr(), 7, uintptr(unsafe.Pointer(classGUID)), uintptr(unsafe.Pointer(Enumerator)), uintptr(hwndParent), uintptr(Flags), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(machineName)), uintptr(reserved), 0, 0) + r0, _, e1 := syscall.SyscallN(procSetupDiGetClassDevsExW.Addr(), uintptr(unsafe.Pointer(classGUID)), uintptr(unsafe.Pointer(Enumerator)), uintptr(hwndParent), uintptr(Flags), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(machineName)), uintptr(reserved)) handle = DevInfo(r0) if handle == DevInfo(InvalidHandle) { err = errnoErr(e1) @@ -3993,7 +4048,7 @@ func setupDiGetClassDevsEx(classGUID *GUID, Enumerator *uint16, hwndParent uintp } func SetupDiGetClassInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, classInstallParams *ClassInstallHeader, classInstallParamsSize uint32, requiredSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procSetupDiGetClassInstallParamsW.Addr(), 5, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(classInstallParams)), uintptr(classInstallParamsSize), uintptr(unsafe.Pointer(requiredSize)), 0) + r1, _, e1 := syscall.SyscallN(procSetupDiGetClassInstallParamsW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(classInstallParams)), uintptr(classInstallParamsSize), uintptr(unsafe.Pointer(requiredSize))) if r1 == 0 { err = errnoErr(e1) } @@ -4001,7 +4056,7 @@ func SetupDiGetClassInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfo } func setupDiGetDeviceInfoListDetail(deviceInfoSet DevInfo, deviceInfoSetDetailData *DevInfoListDetailData) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiGetDeviceInfoListDetailW.Addr(), 2, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoSetDetailData)), 0) + r1, _, e1 := syscall.SyscallN(procSetupDiGetDeviceInfoListDetailW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoSetDetailData))) if r1 == 0 { err = errnoErr(e1) } @@ -4009,7 +4064,7 @@ func setupDiGetDeviceInfoListDetail(deviceInfoSet DevInfo, deviceInfoSetDetailDa } func setupDiGetDeviceInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, deviceInstallParams *DevInstallParams) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiGetDeviceInstallParamsW.Addr(), 3, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(deviceInstallParams))) + r1, _, e1 := syscall.SyscallN(procSetupDiGetDeviceInstallParamsW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(deviceInstallParams))) if r1 == 0 { err = errnoErr(e1) } @@ -4017,7 +4072,7 @@ func setupDiGetDeviceInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInf } func setupDiGetDeviceInstanceId(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, instanceId *uint16, instanceIdSize uint32, instanceIdRequiredSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procSetupDiGetDeviceInstanceIdW.Addr(), 5, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(instanceId)), uintptr(instanceIdSize), uintptr(unsafe.Pointer(instanceIdRequiredSize)), 0) + r1, _, e1 := syscall.SyscallN(procSetupDiGetDeviceInstanceIdW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(instanceId)), uintptr(instanceIdSize), uintptr(unsafe.Pointer(instanceIdRequiredSize))) if r1 == 0 { err = errnoErr(e1) } @@ -4025,7 +4080,7 @@ func setupDiGetDeviceInstanceId(deviceInfoSet DevInfo, deviceInfoData *DevInfoDa } func setupDiGetDeviceProperty(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, propertyKey *DEVPROPKEY, propertyType *DEVPROPTYPE, propertyBuffer *byte, propertyBufferSize uint32, requiredSize *uint32, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procSetupDiGetDevicePropertyW.Addr(), 8, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(propertyKey)), uintptr(unsafe.Pointer(propertyType)), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(propertyBufferSize), uintptr(unsafe.Pointer(requiredSize)), uintptr(flags), 0) + r1, _, e1 := syscall.SyscallN(procSetupDiGetDevicePropertyW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(propertyKey)), uintptr(unsafe.Pointer(propertyType)), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(propertyBufferSize), uintptr(unsafe.Pointer(requiredSize)), uintptr(flags)) if r1 == 0 { err = errnoErr(e1) } @@ -4033,7 +4088,7 @@ func setupDiGetDeviceProperty(deviceInfoSet DevInfo, deviceInfoData *DevInfoData } func setupDiGetDeviceRegistryProperty(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, property SPDRP, propertyRegDataType *uint32, propertyBuffer *byte, propertyBufferSize uint32, requiredSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procSetupDiGetDeviceRegistryPropertyW.Addr(), 7, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(property), uintptr(unsafe.Pointer(propertyRegDataType)), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(propertyBufferSize), uintptr(unsafe.Pointer(requiredSize)), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetupDiGetDeviceRegistryPropertyW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(property), uintptr(unsafe.Pointer(propertyRegDataType)), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(propertyBufferSize), uintptr(unsafe.Pointer(requiredSize))) if r1 == 0 { err = errnoErr(e1) } @@ -4041,7 +4096,7 @@ func setupDiGetDeviceRegistryProperty(deviceInfoSet DevInfo, deviceInfoData *Dev } func setupDiGetDriverInfoDetail(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverInfoData *DrvInfoData, driverInfoDetailData *DrvInfoDetailData, driverInfoDetailDataSize uint32, requiredSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procSetupDiGetDriverInfoDetailW.Addr(), 6, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(driverInfoData)), uintptr(unsafe.Pointer(driverInfoDetailData)), uintptr(driverInfoDetailDataSize), uintptr(unsafe.Pointer(requiredSize))) + r1, _, e1 := syscall.SyscallN(procSetupDiGetDriverInfoDetailW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(driverInfoData)), uintptr(unsafe.Pointer(driverInfoDetailData)), uintptr(driverInfoDetailDataSize), uintptr(unsafe.Pointer(requiredSize))) if r1 == 0 { err = errnoErr(e1) } @@ -4049,7 +4104,7 @@ func setupDiGetDriverInfoDetail(deviceInfoSet DevInfo, deviceInfoData *DevInfoDa } func setupDiGetSelectedDevice(deviceInfoSet DevInfo, deviceInfoData *DevInfoData) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiGetSelectedDevice.Addr(), 2, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), 0) + r1, _, e1 := syscall.SyscallN(procSetupDiGetSelectedDevice.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData))) if r1 == 0 { err = errnoErr(e1) } @@ -4057,7 +4112,7 @@ func setupDiGetSelectedDevice(deviceInfoSet DevInfo, deviceInfoData *DevInfoData } func setupDiGetSelectedDriver(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverInfoData *DrvInfoData) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiGetSelectedDriverW.Addr(), 3, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(driverInfoData))) + r1, _, e1 := syscall.SyscallN(procSetupDiGetSelectedDriverW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(driverInfoData))) if r1 == 0 { err = errnoErr(e1) } @@ -4065,7 +4120,7 @@ func setupDiGetSelectedDriver(deviceInfoSet DevInfo, deviceInfoData *DevInfoData } func SetupDiOpenDevRegKey(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, Scope DICS_FLAG, HwProfile uint32, KeyType DIREG, samDesired uint32) (key Handle, err error) { - r0, _, e1 := syscall.Syscall6(procSetupDiOpenDevRegKey.Addr(), 6, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(Scope), uintptr(HwProfile), uintptr(KeyType), uintptr(samDesired)) + r0, _, e1 := syscall.SyscallN(procSetupDiOpenDevRegKey.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(Scope), uintptr(HwProfile), uintptr(KeyType), uintptr(samDesired)) key = Handle(r0) if key == InvalidHandle { err = errnoErr(e1) @@ -4074,7 +4129,7 @@ func SetupDiOpenDevRegKey(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, Sc } func SetupDiSetClassInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, classInstallParams *ClassInstallHeader, classInstallParamsSize uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procSetupDiSetClassInstallParamsW.Addr(), 4, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(classInstallParams)), uintptr(classInstallParamsSize), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetupDiSetClassInstallParamsW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(classInstallParams)), uintptr(classInstallParamsSize)) if r1 == 0 { err = errnoErr(e1) } @@ -4082,7 +4137,7 @@ func SetupDiSetClassInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfo } func SetupDiSetDeviceInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, deviceInstallParams *DevInstallParams) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiSetDeviceInstallParamsW.Addr(), 3, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(deviceInstallParams))) + r1, _, e1 := syscall.SyscallN(procSetupDiSetDeviceInstallParamsW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(deviceInstallParams))) if r1 == 0 { err = errnoErr(e1) } @@ -4090,7 +4145,7 @@ func SetupDiSetDeviceInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInf } func setupDiSetDeviceRegistryProperty(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, property SPDRP, propertyBuffer *byte, propertyBufferSize uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procSetupDiSetDeviceRegistryPropertyW.Addr(), 5, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(property), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(propertyBufferSize), 0) + r1, _, e1 := syscall.SyscallN(procSetupDiSetDeviceRegistryPropertyW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(property), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(propertyBufferSize)) if r1 == 0 { err = errnoErr(e1) } @@ -4098,7 +4153,7 @@ func setupDiSetDeviceRegistryProperty(deviceInfoSet DevInfo, deviceInfoData *Dev } func SetupDiSetSelectedDevice(deviceInfoSet DevInfo, deviceInfoData *DevInfoData) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiSetSelectedDevice.Addr(), 2, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), 0) + r1, _, e1 := syscall.SyscallN(procSetupDiSetSelectedDevice.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData))) if r1 == 0 { err = errnoErr(e1) } @@ -4106,7 +4161,7 @@ func SetupDiSetSelectedDevice(deviceInfoSet DevInfo, deviceInfoData *DevInfoData } func SetupDiSetSelectedDriver(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverInfoData *DrvInfoData) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiSetSelectedDriverW.Addr(), 3, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(driverInfoData))) + r1, _, e1 := syscall.SyscallN(procSetupDiSetSelectedDriverW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(driverInfoData))) if r1 == 0 { err = errnoErr(e1) } @@ -4114,7 +4169,7 @@ func SetupDiSetSelectedDriver(deviceInfoSet DevInfo, deviceInfoData *DevInfoData } func setupUninstallOEMInf(infFileName *uint16, flags SUOI, reserved uintptr) (err error) { - r1, _, e1 := syscall.Syscall(procSetupUninstallOEMInfW.Addr(), 3, uintptr(unsafe.Pointer(infFileName)), uintptr(flags), uintptr(reserved)) + r1, _, e1 := syscall.SyscallN(procSetupUninstallOEMInfW.Addr(), uintptr(unsafe.Pointer(infFileName)), uintptr(flags), uintptr(reserved)) if r1 == 0 { err = errnoErr(e1) } @@ -4122,7 +4177,7 @@ func setupUninstallOEMInf(infFileName *uint16, flags SUOI, reserved uintptr) (er } func commandLineToArgv(cmd *uint16, argc *int32) (argv **uint16, err error) { - r0, _, e1 := syscall.Syscall(procCommandLineToArgvW.Addr(), 2, uintptr(unsafe.Pointer(cmd)), uintptr(unsafe.Pointer(argc)), 0) + r0, _, e1 := syscall.SyscallN(procCommandLineToArgvW.Addr(), uintptr(unsafe.Pointer(cmd)), uintptr(unsafe.Pointer(argc))) argv = (**uint16)(unsafe.Pointer(r0)) if argv == nil { err = errnoErr(e1) @@ -4131,7 +4186,7 @@ func commandLineToArgv(cmd *uint16, argc *int32) (argv **uint16, err error) { } func shGetKnownFolderPath(id *KNOWNFOLDERID, flags uint32, token Token, path **uint16) (ret error) { - r0, _, _ := syscall.Syscall6(procSHGetKnownFolderPath.Addr(), 4, uintptr(unsafe.Pointer(id)), uintptr(flags), uintptr(token), uintptr(unsafe.Pointer(path)), 0, 0) + r0, _, _ := syscall.SyscallN(procSHGetKnownFolderPath.Addr(), uintptr(unsafe.Pointer(id)), uintptr(flags), uintptr(token), uintptr(unsafe.Pointer(path))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -4139,7 +4194,7 @@ func shGetKnownFolderPath(id *KNOWNFOLDERID, flags uint32, token Token, path **u } func ShellExecute(hwnd Handle, verb *uint16, file *uint16, args *uint16, cwd *uint16, showCmd int32) (err error) { - r1, _, e1 := syscall.Syscall6(procShellExecuteW.Addr(), 6, uintptr(hwnd), uintptr(unsafe.Pointer(verb)), uintptr(unsafe.Pointer(file)), uintptr(unsafe.Pointer(args)), uintptr(unsafe.Pointer(cwd)), uintptr(showCmd)) + r1, _, e1 := syscall.SyscallN(procShellExecuteW.Addr(), uintptr(hwnd), uintptr(unsafe.Pointer(verb)), uintptr(unsafe.Pointer(file)), uintptr(unsafe.Pointer(args)), uintptr(unsafe.Pointer(cwd)), uintptr(showCmd)) if r1 <= 32 { err = errnoErr(e1) } @@ -4147,12 +4202,12 @@ func ShellExecute(hwnd Handle, verb *uint16, file *uint16, args *uint16, cwd *ui } func EnumChildWindows(hwnd HWND, enumFunc uintptr, param unsafe.Pointer) { - syscall.Syscall(procEnumChildWindows.Addr(), 3, uintptr(hwnd), uintptr(enumFunc), uintptr(param)) + syscall.SyscallN(procEnumChildWindows.Addr(), uintptr(hwnd), uintptr(enumFunc), uintptr(param)) return } func EnumWindows(enumFunc uintptr, param unsafe.Pointer) (err error) { - r1, _, e1 := syscall.Syscall(procEnumWindows.Addr(), 2, uintptr(enumFunc), uintptr(param), 0) + r1, _, e1 := syscall.SyscallN(procEnumWindows.Addr(), uintptr(enumFunc), uintptr(param)) if r1 == 0 { err = errnoErr(e1) } @@ -4160,7 +4215,7 @@ func EnumWindows(enumFunc uintptr, param unsafe.Pointer) (err error) { } func ExitWindowsEx(flags uint32, reason uint32) (err error) { - r1, _, e1 := syscall.Syscall(procExitWindowsEx.Addr(), 2, uintptr(flags), uintptr(reason), 0) + r1, _, e1 := syscall.SyscallN(procExitWindowsEx.Addr(), uintptr(flags), uintptr(reason)) if r1 == 0 { err = errnoErr(e1) } @@ -4168,7 +4223,7 @@ func ExitWindowsEx(flags uint32, reason uint32) (err error) { } func GetClassName(hwnd HWND, className *uint16, maxCount int32) (copied int32, err error) { - r0, _, e1 := syscall.Syscall(procGetClassNameW.Addr(), 3, uintptr(hwnd), uintptr(unsafe.Pointer(className)), uintptr(maxCount)) + r0, _, e1 := syscall.SyscallN(procGetClassNameW.Addr(), uintptr(hwnd), uintptr(unsafe.Pointer(className)), uintptr(maxCount)) copied = int32(r0) if copied == 0 { err = errnoErr(e1) @@ -4177,19 +4232,19 @@ func GetClassName(hwnd HWND, className *uint16, maxCount int32) (copied int32, e } func GetDesktopWindow() (hwnd HWND) { - r0, _, _ := syscall.Syscall(procGetDesktopWindow.Addr(), 0, 0, 0, 0) + r0, _, _ := syscall.SyscallN(procGetDesktopWindow.Addr()) hwnd = HWND(r0) return } func GetForegroundWindow() (hwnd HWND) { - r0, _, _ := syscall.Syscall(procGetForegroundWindow.Addr(), 0, 0, 0, 0) + r0, _, _ := syscall.SyscallN(procGetForegroundWindow.Addr()) hwnd = HWND(r0) return } func GetGUIThreadInfo(thread uint32, info *GUIThreadInfo) (err error) { - r1, _, e1 := syscall.Syscall(procGetGUIThreadInfo.Addr(), 2, uintptr(thread), uintptr(unsafe.Pointer(info)), 0) + r1, _, e1 := syscall.SyscallN(procGetGUIThreadInfo.Addr(), uintptr(thread), uintptr(unsafe.Pointer(info))) if r1 == 0 { err = errnoErr(e1) } @@ -4197,19 +4252,19 @@ func GetGUIThreadInfo(thread uint32, info *GUIThreadInfo) (err error) { } func GetKeyboardLayout(tid uint32) (hkl Handle) { - r0, _, _ := syscall.Syscall(procGetKeyboardLayout.Addr(), 1, uintptr(tid), 0, 0) + r0, _, _ := syscall.SyscallN(procGetKeyboardLayout.Addr(), uintptr(tid)) hkl = Handle(r0) return } func GetShellWindow() (shellWindow HWND) { - r0, _, _ := syscall.Syscall(procGetShellWindow.Addr(), 0, 0, 0, 0) + r0, _, _ := syscall.SyscallN(procGetShellWindow.Addr()) shellWindow = HWND(r0) return } func GetWindowThreadProcessId(hwnd HWND, pid *uint32) (tid uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetWindowThreadProcessId.Addr(), 2, uintptr(hwnd), uintptr(unsafe.Pointer(pid)), 0) + r0, _, e1 := syscall.SyscallN(procGetWindowThreadProcessId.Addr(), uintptr(hwnd), uintptr(unsafe.Pointer(pid))) tid = uint32(r0) if tid == 0 { err = errnoErr(e1) @@ -4218,25 +4273,25 @@ func GetWindowThreadProcessId(hwnd HWND, pid *uint32) (tid uint32, err error) { } func IsWindow(hwnd HWND) (isWindow bool) { - r0, _, _ := syscall.Syscall(procIsWindow.Addr(), 1, uintptr(hwnd), 0, 0) + r0, _, _ := syscall.SyscallN(procIsWindow.Addr(), uintptr(hwnd)) isWindow = r0 != 0 return } func IsWindowUnicode(hwnd HWND) (isUnicode bool) { - r0, _, _ := syscall.Syscall(procIsWindowUnicode.Addr(), 1, uintptr(hwnd), 0, 0) + r0, _, _ := syscall.SyscallN(procIsWindowUnicode.Addr(), uintptr(hwnd)) isUnicode = r0 != 0 return } func IsWindowVisible(hwnd HWND) (isVisible bool) { - r0, _, _ := syscall.Syscall(procIsWindowVisible.Addr(), 1, uintptr(hwnd), 0, 0) + r0, _, _ := syscall.SyscallN(procIsWindowVisible.Addr(), uintptr(hwnd)) isVisible = r0 != 0 return } func LoadKeyboardLayout(name *uint16, flags uint32) (hkl Handle, err error) { - r0, _, e1 := syscall.Syscall(procLoadKeyboardLayoutW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(flags), 0) + r0, _, e1 := syscall.SyscallN(procLoadKeyboardLayoutW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(flags)) hkl = Handle(r0) if hkl == 0 { err = errnoErr(e1) @@ -4245,7 +4300,7 @@ func LoadKeyboardLayout(name *uint16, flags uint32) (hkl Handle, err error) { } func MessageBox(hwnd HWND, text *uint16, caption *uint16, boxtype uint32) (ret int32, err error) { - r0, _, e1 := syscall.Syscall6(procMessageBoxW.Addr(), 4, uintptr(hwnd), uintptr(unsafe.Pointer(text)), uintptr(unsafe.Pointer(caption)), uintptr(boxtype), 0, 0) + r0, _, e1 := syscall.SyscallN(procMessageBoxW.Addr(), uintptr(hwnd), uintptr(unsafe.Pointer(text)), uintptr(unsafe.Pointer(caption)), uintptr(boxtype)) ret = int32(r0) if ret == 0 { err = errnoErr(e1) @@ -4254,13 +4309,13 @@ func MessageBox(hwnd HWND, text *uint16, caption *uint16, boxtype uint32) (ret i } func ToUnicodeEx(vkey uint32, scancode uint32, keystate *byte, pwszBuff *uint16, cchBuff int32, flags uint32, hkl Handle) (ret int32) { - r0, _, _ := syscall.Syscall9(procToUnicodeEx.Addr(), 7, uintptr(vkey), uintptr(scancode), uintptr(unsafe.Pointer(keystate)), uintptr(unsafe.Pointer(pwszBuff)), uintptr(cchBuff), uintptr(flags), uintptr(hkl), 0, 0) + r0, _, _ := syscall.SyscallN(procToUnicodeEx.Addr(), uintptr(vkey), uintptr(scancode), uintptr(unsafe.Pointer(keystate)), uintptr(unsafe.Pointer(pwszBuff)), uintptr(cchBuff), uintptr(flags), uintptr(hkl)) ret = int32(r0) return } func UnloadKeyboardLayout(hkl Handle) (err error) { - r1, _, e1 := syscall.Syscall(procUnloadKeyboardLayout.Addr(), 1, uintptr(hkl), 0, 0) + r1, _, e1 := syscall.SyscallN(procUnloadKeyboardLayout.Addr(), uintptr(hkl)) if r1 == 0 { err = errnoErr(e1) } @@ -4272,7 +4327,7 @@ func CreateEnvironmentBlock(block **uint16, token Token, inheritExisting bool) ( if inheritExisting { _p0 = 1 } - r1, _, e1 := syscall.Syscall(procCreateEnvironmentBlock.Addr(), 3, uintptr(unsafe.Pointer(block)), uintptr(token), uintptr(_p0)) + r1, _, e1 := syscall.SyscallN(procCreateEnvironmentBlock.Addr(), uintptr(unsafe.Pointer(block)), uintptr(token), uintptr(_p0)) if r1 == 0 { err = errnoErr(e1) } @@ -4280,7 +4335,7 @@ func CreateEnvironmentBlock(block **uint16, token Token, inheritExisting bool) ( } func DestroyEnvironmentBlock(block *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procDestroyEnvironmentBlock.Addr(), 1, uintptr(unsafe.Pointer(block)), 0, 0) + r1, _, e1 := syscall.SyscallN(procDestroyEnvironmentBlock.Addr(), uintptr(unsafe.Pointer(block))) if r1 == 0 { err = errnoErr(e1) } @@ -4288,7 +4343,7 @@ func DestroyEnvironmentBlock(block *uint16) (err error) { } func GetUserProfileDirectory(t Token, dir *uint16, dirLen *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetUserProfileDirectoryW.Addr(), 3, uintptr(t), uintptr(unsafe.Pointer(dir)), uintptr(unsafe.Pointer(dirLen))) + r1, _, e1 := syscall.SyscallN(procGetUserProfileDirectoryW.Addr(), uintptr(t), uintptr(unsafe.Pointer(dir)), uintptr(unsafe.Pointer(dirLen))) if r1 == 0 { err = errnoErr(e1) } @@ -4305,7 +4360,7 @@ func GetFileVersionInfoSize(filename string, zeroHandle *Handle) (bufSize uint32 } func _GetFileVersionInfoSize(filename *uint16, zeroHandle *Handle) (bufSize uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetFileVersionInfoSizeW.Addr(), 2, uintptr(unsafe.Pointer(filename)), uintptr(unsafe.Pointer(zeroHandle)), 0) + r0, _, e1 := syscall.SyscallN(procGetFileVersionInfoSizeW.Addr(), uintptr(unsafe.Pointer(filename)), uintptr(unsafe.Pointer(zeroHandle))) bufSize = uint32(r0) if bufSize == 0 { err = errnoErr(e1) @@ -4323,7 +4378,7 @@ func GetFileVersionInfo(filename string, handle uint32, bufSize uint32, buffer u } func _GetFileVersionInfo(filename *uint16, handle uint32, bufSize uint32, buffer unsafe.Pointer) (err error) { - r1, _, e1 := syscall.Syscall6(procGetFileVersionInfoW.Addr(), 4, uintptr(unsafe.Pointer(filename)), uintptr(handle), uintptr(bufSize), uintptr(buffer), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetFileVersionInfoW.Addr(), uintptr(unsafe.Pointer(filename)), uintptr(handle), uintptr(bufSize), uintptr(buffer)) if r1 == 0 { err = errnoErr(e1) } @@ -4340,7 +4395,7 @@ func VerQueryValue(block unsafe.Pointer, subBlock string, pointerToBufferPointer } func _VerQueryValue(block unsafe.Pointer, subBlock *uint16, pointerToBufferPointer unsafe.Pointer, bufSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procVerQueryValueW.Addr(), 4, uintptr(block), uintptr(unsafe.Pointer(subBlock)), uintptr(pointerToBufferPointer), uintptr(unsafe.Pointer(bufSize)), 0, 0) + r1, _, e1 := syscall.SyscallN(procVerQueryValueW.Addr(), uintptr(block), uintptr(unsafe.Pointer(subBlock)), uintptr(pointerToBufferPointer), uintptr(unsafe.Pointer(bufSize))) if r1 == 0 { err = errnoErr(e1) } @@ -4348,7 +4403,7 @@ func _VerQueryValue(block unsafe.Pointer, subBlock *uint16, pointerToBufferPoint } func TimeBeginPeriod(period uint32) (err error) { - r1, _, e1 := syscall.Syscall(proctimeBeginPeriod.Addr(), 1, uintptr(period), 0, 0) + r1, _, e1 := syscall.SyscallN(proctimeBeginPeriod.Addr(), uintptr(period)) if r1 != 0 { err = errnoErr(e1) } @@ -4356,7 +4411,7 @@ func TimeBeginPeriod(period uint32) (err error) { } func TimeEndPeriod(period uint32) (err error) { - r1, _, e1 := syscall.Syscall(proctimeEndPeriod.Addr(), 1, uintptr(period), 0, 0) + r1, _, e1 := syscall.SyscallN(proctimeEndPeriod.Addr(), uintptr(period)) if r1 != 0 { err = errnoErr(e1) } @@ -4364,7 +4419,7 @@ func TimeEndPeriod(period uint32) (err error) { } func WinVerifyTrustEx(hwnd HWND, actionId *GUID, data *WinTrustData) (ret error) { - r0, _, _ := syscall.Syscall(procWinVerifyTrustEx.Addr(), 3, uintptr(hwnd), uintptr(unsafe.Pointer(actionId)), uintptr(unsafe.Pointer(data))) + r0, _, _ := syscall.SyscallN(procWinVerifyTrustEx.Addr(), uintptr(hwnd), uintptr(unsafe.Pointer(actionId)), uintptr(unsafe.Pointer(data))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -4372,12 +4427,12 @@ func WinVerifyTrustEx(hwnd HWND, actionId *GUID, data *WinTrustData) (ret error) } func FreeAddrInfoW(addrinfo *AddrinfoW) { - syscall.Syscall(procFreeAddrInfoW.Addr(), 1, uintptr(unsafe.Pointer(addrinfo)), 0, 0) + syscall.SyscallN(procFreeAddrInfoW.Addr(), uintptr(unsafe.Pointer(addrinfo))) return } func GetAddrInfoW(nodename *uint16, servicename *uint16, hints *AddrinfoW, result **AddrinfoW) (sockerr error) { - r0, _, _ := syscall.Syscall6(procGetAddrInfoW.Addr(), 4, uintptr(unsafe.Pointer(nodename)), uintptr(unsafe.Pointer(servicename)), uintptr(unsafe.Pointer(hints)), uintptr(unsafe.Pointer(result)), 0, 0) + r0, _, _ := syscall.SyscallN(procGetAddrInfoW.Addr(), uintptr(unsafe.Pointer(nodename)), uintptr(unsafe.Pointer(servicename)), uintptr(unsafe.Pointer(hints)), uintptr(unsafe.Pointer(result))) if r0 != 0 { sockerr = syscall.Errno(r0) } @@ -4385,7 +4440,7 @@ func GetAddrInfoW(nodename *uint16, servicename *uint16, hints *AddrinfoW, resul } func WSACleanup() (err error) { - r1, _, e1 := syscall.Syscall(procWSACleanup.Addr(), 0, 0, 0, 0) + r1, _, e1 := syscall.SyscallN(procWSACleanup.Addr()) if r1 == socket_error { err = errnoErr(e1) } @@ -4393,7 +4448,7 @@ func WSACleanup() (err error) { } func WSADuplicateSocket(s Handle, processID uint32, info *WSAProtocolInfo) (err error) { - r1, _, e1 := syscall.Syscall(procWSADuplicateSocketW.Addr(), 3, uintptr(s), uintptr(processID), uintptr(unsafe.Pointer(info))) + r1, _, e1 := syscall.SyscallN(procWSADuplicateSocketW.Addr(), uintptr(s), uintptr(processID), uintptr(unsafe.Pointer(info))) if r1 != 0 { err = errnoErr(e1) } @@ -4401,7 +4456,7 @@ func WSADuplicateSocket(s Handle, processID uint32, info *WSAProtocolInfo) (err } func WSAEnumProtocols(protocols *int32, protocolBuffer *WSAProtocolInfo, bufferLength *uint32) (n int32, err error) { - r0, _, e1 := syscall.Syscall(procWSAEnumProtocolsW.Addr(), 3, uintptr(unsafe.Pointer(protocols)), uintptr(unsafe.Pointer(protocolBuffer)), uintptr(unsafe.Pointer(bufferLength))) + r0, _, e1 := syscall.SyscallN(procWSAEnumProtocolsW.Addr(), uintptr(unsafe.Pointer(protocols)), uintptr(unsafe.Pointer(protocolBuffer)), uintptr(unsafe.Pointer(bufferLength))) n = int32(r0) if n == -1 { err = errnoErr(e1) @@ -4414,7 +4469,7 @@ func WSAGetOverlappedResult(h Handle, o *Overlapped, bytes *uint32, wait bool, f if wait { _p0 = 1 } - r1, _, e1 := syscall.Syscall6(procWSAGetOverlappedResult.Addr(), 5, uintptr(h), uintptr(unsafe.Pointer(o)), uintptr(unsafe.Pointer(bytes)), uintptr(_p0), uintptr(unsafe.Pointer(flags)), 0) + r1, _, e1 := syscall.SyscallN(procWSAGetOverlappedResult.Addr(), uintptr(h), uintptr(unsafe.Pointer(o)), uintptr(unsafe.Pointer(bytes)), uintptr(_p0), uintptr(unsafe.Pointer(flags))) if r1 == 0 { err = errnoErr(e1) } @@ -4422,7 +4477,7 @@ func WSAGetOverlappedResult(h Handle, o *Overlapped, bytes *uint32, wait bool, f } func WSAIoctl(s Handle, iocc uint32, inbuf *byte, cbif uint32, outbuf *byte, cbob uint32, cbbr *uint32, overlapped *Overlapped, completionRoutine uintptr) (err error) { - r1, _, e1 := syscall.Syscall9(procWSAIoctl.Addr(), 9, uintptr(s), uintptr(iocc), uintptr(unsafe.Pointer(inbuf)), uintptr(cbif), uintptr(unsafe.Pointer(outbuf)), uintptr(cbob), uintptr(unsafe.Pointer(cbbr)), uintptr(unsafe.Pointer(overlapped)), uintptr(completionRoutine)) + r1, _, e1 := syscall.SyscallN(procWSAIoctl.Addr(), uintptr(s), uintptr(iocc), uintptr(unsafe.Pointer(inbuf)), uintptr(cbif), uintptr(unsafe.Pointer(outbuf)), uintptr(cbob), uintptr(unsafe.Pointer(cbbr)), uintptr(unsafe.Pointer(overlapped)), uintptr(completionRoutine)) if r1 == socket_error { err = errnoErr(e1) } @@ -4430,7 +4485,7 @@ func WSAIoctl(s Handle, iocc uint32, inbuf *byte, cbif uint32, outbuf *byte, cbo } func WSALookupServiceBegin(querySet *WSAQUERYSET, flags uint32, handle *Handle) (err error) { - r1, _, e1 := syscall.Syscall(procWSALookupServiceBeginW.Addr(), 3, uintptr(unsafe.Pointer(querySet)), uintptr(flags), uintptr(unsafe.Pointer(handle))) + r1, _, e1 := syscall.SyscallN(procWSALookupServiceBeginW.Addr(), uintptr(unsafe.Pointer(querySet)), uintptr(flags), uintptr(unsafe.Pointer(handle))) if r1 == socket_error { err = errnoErr(e1) } @@ -4438,7 +4493,7 @@ func WSALookupServiceBegin(querySet *WSAQUERYSET, flags uint32, handle *Handle) } func WSALookupServiceEnd(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procWSALookupServiceEnd.Addr(), 1, uintptr(handle), 0, 0) + r1, _, e1 := syscall.SyscallN(procWSALookupServiceEnd.Addr(), uintptr(handle)) if r1 == socket_error { err = errnoErr(e1) } @@ -4446,7 +4501,7 @@ func WSALookupServiceEnd(handle Handle) (err error) { } func WSALookupServiceNext(handle Handle, flags uint32, size *int32, querySet *WSAQUERYSET) (err error) { - r1, _, e1 := syscall.Syscall6(procWSALookupServiceNextW.Addr(), 4, uintptr(handle), uintptr(flags), uintptr(unsafe.Pointer(size)), uintptr(unsafe.Pointer(querySet)), 0, 0) + r1, _, e1 := syscall.SyscallN(procWSALookupServiceNextW.Addr(), uintptr(handle), uintptr(flags), uintptr(unsafe.Pointer(size)), uintptr(unsafe.Pointer(querySet))) if r1 == socket_error { err = errnoErr(e1) } @@ -4454,7 +4509,7 @@ func WSALookupServiceNext(handle Handle, flags uint32, size *int32, querySet *WS } func WSARecv(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *uint32, overlapped *Overlapped, croutine *byte) (err error) { - r1, _, e1 := syscall.Syscall9(procWSARecv.Addr(), 7, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine)), 0, 0) + r1, _, e1 := syscall.SyscallN(procWSARecv.Addr(), uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine))) if r1 == socket_error { err = errnoErr(e1) } @@ -4462,7 +4517,7 @@ func WSARecv(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *uint32 } func WSARecvFrom(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *uint32, from *RawSockaddrAny, fromlen *int32, overlapped *Overlapped, croutine *byte) (err error) { - r1, _, e1 := syscall.Syscall9(procWSARecvFrom.Addr(), 9, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine))) + r1, _, e1 := syscall.SyscallN(procWSARecvFrom.Addr(), uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine))) if r1 == socket_error { err = errnoErr(e1) } @@ -4470,7 +4525,7 @@ func WSARecvFrom(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *ui } func WSASend(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, overlapped *Overlapped, croutine *byte) (err error) { - r1, _, e1 := syscall.Syscall9(procWSASend.Addr(), 7, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(sent)), uintptr(flags), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine)), 0, 0) + r1, _, e1 := syscall.SyscallN(procWSASend.Addr(), uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(sent)), uintptr(flags), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine))) if r1 == socket_error { err = errnoErr(e1) } @@ -4478,7 +4533,7 @@ func WSASend(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, } func WSASendTo(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, to *RawSockaddrAny, tolen int32, overlapped *Overlapped, croutine *byte) (err error) { - r1, _, e1 := syscall.Syscall9(procWSASendTo.Addr(), 9, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(sent)), uintptr(flags), uintptr(unsafe.Pointer(to)), uintptr(tolen), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine))) + r1, _, e1 := syscall.SyscallN(procWSASendTo.Addr(), uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(sent)), uintptr(flags), uintptr(unsafe.Pointer(to)), uintptr(tolen), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine))) if r1 == socket_error { err = errnoErr(e1) } @@ -4486,7 +4541,7 @@ func WSASendTo(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32 } func WSASocket(af int32, typ int32, protocol int32, protoInfo *WSAProtocolInfo, group uint32, flags uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall6(procWSASocketW.Addr(), 6, uintptr(af), uintptr(typ), uintptr(protocol), uintptr(unsafe.Pointer(protoInfo)), uintptr(group), uintptr(flags)) + r0, _, e1 := syscall.SyscallN(procWSASocketW.Addr(), uintptr(af), uintptr(typ), uintptr(protocol), uintptr(unsafe.Pointer(protoInfo)), uintptr(group), uintptr(flags)) handle = Handle(r0) if handle == InvalidHandle { err = errnoErr(e1) @@ -4495,7 +4550,7 @@ func WSASocket(af int32, typ int32, protocol int32, protoInfo *WSAProtocolInfo, } func WSAStartup(verreq uint32, data *WSAData) (sockerr error) { - r0, _, _ := syscall.Syscall(procWSAStartup.Addr(), 2, uintptr(verreq), uintptr(unsafe.Pointer(data)), 0) + r0, _, _ := syscall.SyscallN(procWSAStartup.Addr(), uintptr(verreq), uintptr(unsafe.Pointer(data))) if r0 != 0 { sockerr = syscall.Errno(r0) } @@ -4503,7 +4558,7 @@ func WSAStartup(verreq uint32, data *WSAData) (sockerr error) { } func bind(s Handle, name unsafe.Pointer, namelen int32) (err error) { - r1, _, e1 := syscall.Syscall(procbind.Addr(), 3, uintptr(s), uintptr(name), uintptr(namelen)) + r1, _, e1 := syscall.SyscallN(procbind.Addr(), uintptr(s), uintptr(name), uintptr(namelen)) if r1 == socket_error { err = errnoErr(e1) } @@ -4511,7 +4566,7 @@ func bind(s Handle, name unsafe.Pointer, namelen int32) (err error) { } func Closesocket(s Handle) (err error) { - r1, _, e1 := syscall.Syscall(procclosesocket.Addr(), 1, uintptr(s), 0, 0) + r1, _, e1 := syscall.SyscallN(procclosesocket.Addr(), uintptr(s)) if r1 == socket_error { err = errnoErr(e1) } @@ -4519,7 +4574,7 @@ func Closesocket(s Handle) (err error) { } func connect(s Handle, name unsafe.Pointer, namelen int32) (err error) { - r1, _, e1 := syscall.Syscall(procconnect.Addr(), 3, uintptr(s), uintptr(name), uintptr(namelen)) + r1, _, e1 := syscall.SyscallN(procconnect.Addr(), uintptr(s), uintptr(name), uintptr(namelen)) if r1 == socket_error { err = errnoErr(e1) } @@ -4536,7 +4591,7 @@ func GetHostByName(name string) (h *Hostent, err error) { } func _GetHostByName(name *byte) (h *Hostent, err error) { - r0, _, e1 := syscall.Syscall(procgethostbyname.Addr(), 1, uintptr(unsafe.Pointer(name)), 0, 0) + r0, _, e1 := syscall.SyscallN(procgethostbyname.Addr(), uintptr(unsafe.Pointer(name))) h = (*Hostent)(unsafe.Pointer(r0)) if h == nil { err = errnoErr(e1) @@ -4545,7 +4600,7 @@ func _GetHostByName(name *byte) (h *Hostent, err error) { } func getpeername(s Handle, rsa *RawSockaddrAny, addrlen *int32) (err error) { - r1, _, e1 := syscall.Syscall(procgetpeername.Addr(), 3, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + r1, _, e1 := syscall.SyscallN(procgetpeername.Addr(), uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) if r1 == socket_error { err = errnoErr(e1) } @@ -4562,7 +4617,7 @@ func GetProtoByName(name string) (p *Protoent, err error) { } func _GetProtoByName(name *byte) (p *Protoent, err error) { - r0, _, e1 := syscall.Syscall(procgetprotobyname.Addr(), 1, uintptr(unsafe.Pointer(name)), 0, 0) + r0, _, e1 := syscall.SyscallN(procgetprotobyname.Addr(), uintptr(unsafe.Pointer(name))) p = (*Protoent)(unsafe.Pointer(r0)) if p == nil { err = errnoErr(e1) @@ -4585,7 +4640,7 @@ func GetServByName(name string, proto string) (s *Servent, err error) { } func _GetServByName(name *byte, proto *byte) (s *Servent, err error) { - r0, _, e1 := syscall.Syscall(procgetservbyname.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(proto)), 0) + r0, _, e1 := syscall.SyscallN(procgetservbyname.Addr(), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(proto))) s = (*Servent)(unsafe.Pointer(r0)) if s == nil { err = errnoErr(e1) @@ -4594,7 +4649,7 @@ func _GetServByName(name *byte, proto *byte) (s *Servent, err error) { } func getsockname(s Handle, rsa *RawSockaddrAny, addrlen *int32) (err error) { - r1, _, e1 := syscall.Syscall(procgetsockname.Addr(), 3, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + r1, _, e1 := syscall.SyscallN(procgetsockname.Addr(), uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) if r1 == socket_error { err = errnoErr(e1) } @@ -4602,7 +4657,7 @@ func getsockname(s Handle, rsa *RawSockaddrAny, addrlen *int32) (err error) { } func Getsockopt(s Handle, level int32, optname int32, optval *byte, optlen *int32) (err error) { - r1, _, e1 := syscall.Syscall6(procgetsockopt.Addr(), 5, uintptr(s), uintptr(level), uintptr(optname), uintptr(unsafe.Pointer(optval)), uintptr(unsafe.Pointer(optlen)), 0) + r1, _, e1 := syscall.SyscallN(procgetsockopt.Addr(), uintptr(s), uintptr(level), uintptr(optname), uintptr(unsafe.Pointer(optval)), uintptr(unsafe.Pointer(optlen))) if r1 == socket_error { err = errnoErr(e1) } @@ -4610,7 +4665,7 @@ func Getsockopt(s Handle, level int32, optname int32, optval *byte, optlen *int3 } func listen(s Handle, backlog int32) (err error) { - r1, _, e1 := syscall.Syscall(proclisten.Addr(), 2, uintptr(s), uintptr(backlog), 0) + r1, _, e1 := syscall.SyscallN(proclisten.Addr(), uintptr(s), uintptr(backlog)) if r1 == socket_error { err = errnoErr(e1) } @@ -4618,7 +4673,7 @@ func listen(s Handle, backlog int32) (err error) { } func Ntohs(netshort uint16) (u uint16) { - r0, _, _ := syscall.Syscall(procntohs.Addr(), 1, uintptr(netshort), 0, 0) + r0, _, _ := syscall.SyscallN(procntohs.Addr(), uintptr(netshort)) u = uint16(r0) return } @@ -4628,7 +4683,7 @@ func recvfrom(s Handle, buf []byte, flags int32, from *RawSockaddrAny, fromlen * if len(buf) > 0 { _p0 = &buf[0] } - r0, _, e1 := syscall.Syscall6(procrecvfrom.Addr(), 6, uintptr(s), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + r0, _, e1 := syscall.SyscallN(procrecvfrom.Addr(), uintptr(s), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) n = int32(r0) if n == -1 { err = errnoErr(e1) @@ -4641,7 +4696,7 @@ func sendto(s Handle, buf []byte, flags int32, to unsafe.Pointer, tolen int32) ( if len(buf) > 0 { _p0 = &buf[0] } - r1, _, e1 := syscall.Syscall6(procsendto.Addr(), 6, uintptr(s), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(tolen)) + r1, _, e1 := syscall.SyscallN(procsendto.Addr(), uintptr(s), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(tolen)) if r1 == socket_error { err = errnoErr(e1) } @@ -4649,7 +4704,7 @@ func sendto(s Handle, buf []byte, flags int32, to unsafe.Pointer, tolen int32) ( } func Setsockopt(s Handle, level int32, optname int32, optval *byte, optlen int32) (err error) { - r1, _, e1 := syscall.Syscall6(procsetsockopt.Addr(), 5, uintptr(s), uintptr(level), uintptr(optname), uintptr(unsafe.Pointer(optval)), uintptr(optlen), 0) + r1, _, e1 := syscall.SyscallN(procsetsockopt.Addr(), uintptr(s), uintptr(level), uintptr(optname), uintptr(unsafe.Pointer(optval)), uintptr(optlen)) if r1 == socket_error { err = errnoErr(e1) } @@ -4657,7 +4712,7 @@ func Setsockopt(s Handle, level int32, optname int32, optval *byte, optlen int32 } func shutdown(s Handle, how int32) (err error) { - r1, _, e1 := syscall.Syscall(procshutdown.Addr(), 2, uintptr(s), uintptr(how), 0) + r1, _, e1 := syscall.SyscallN(procshutdown.Addr(), uintptr(s), uintptr(how)) if r1 == socket_error { err = errnoErr(e1) } @@ -4665,7 +4720,7 @@ func shutdown(s Handle, how int32) (err error) { } func socket(af int32, typ int32, protocol int32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procsocket.Addr(), 3, uintptr(af), uintptr(typ), uintptr(protocol)) + r0, _, e1 := syscall.SyscallN(procsocket.Addr(), uintptr(af), uintptr(typ), uintptr(protocol)) handle = Handle(r0) if handle == InvalidHandle { err = errnoErr(e1) @@ -4674,7 +4729,7 @@ func socket(af int32, typ int32, protocol int32) (handle Handle, err error) { } func WTSEnumerateSessions(handle Handle, reserved uint32, version uint32, sessions **WTS_SESSION_INFO, count *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procWTSEnumerateSessionsW.Addr(), 5, uintptr(handle), uintptr(reserved), uintptr(version), uintptr(unsafe.Pointer(sessions)), uintptr(unsafe.Pointer(count)), 0) + r1, _, e1 := syscall.SyscallN(procWTSEnumerateSessionsW.Addr(), uintptr(handle), uintptr(reserved), uintptr(version), uintptr(unsafe.Pointer(sessions)), uintptr(unsafe.Pointer(count))) if r1 == 0 { err = errnoErr(e1) } @@ -4682,12 +4737,12 @@ func WTSEnumerateSessions(handle Handle, reserved uint32, version uint32, sessio } func WTSFreeMemory(ptr uintptr) { - syscall.Syscall(procWTSFreeMemory.Addr(), 1, uintptr(ptr), 0, 0) + syscall.SyscallN(procWTSFreeMemory.Addr(), uintptr(ptr)) return } func WTSQueryUserToken(session uint32, token *Token) (err error) { - r1, _, e1 := syscall.Syscall(procWTSQueryUserToken.Addr(), 2, uintptr(session), uintptr(unsafe.Pointer(token)), 0) + r1, _, e1 := syscall.SyscallN(procWTSQueryUserToken.Addr(), uintptr(session), uintptr(unsafe.Pointer(token))) if r1 == 0 { err = errnoErr(e1) } diff --git a/vendor/modules.txt b/vendor/modules.txt index fc9ee79c..b9cff5c8 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -9,6 +9,9 @@ filippo.io/age/internal/stream ## explicit; go 1.20 filippo.io/edwards25519 filippo.io/edwards25519/field +# github.com/Masterminds/semver/v3 v3.4.0 +## explicit; go 1.21 +github.com/Masterminds/semver/v3 # github.com/aws/aws-lambda-go v1.47.0 ## explicit; go 1.18 github.com/aws/aws-lambda-go/lambda @@ -156,8 +159,8 @@ github.com/benbjohnson/clock # github.com/cenkalti/backoff/v4 v4.3.0 ## explicit; go 1.18 github.com/cenkalti/backoff/v4 -# github.com/conductorone/baton-sdk v0.4.2 -## explicit; go 1.25 +# github.com/conductorone/baton-sdk v0.6.9 +## explicit; go 1.25.2 github.com/conductorone/baton-sdk/internal/connector github.com/conductorone/baton-sdk/pb/c1/c1z/v1 github.com/conductorone/baton-sdk/pb/c1/config/v1 @@ -168,6 +171,7 @@ github.com/conductorone/baton-sdk/pb/c1/ratelimit/v1 github.com/conductorone/baton-sdk/pb/c1/reader/v2 github.com/conductorone/baton-sdk/pb/c1/transport/v1 github.com/conductorone/baton-sdk/pb/c1/utls/v1 +github.com/conductorone/baton-sdk/pkg/actions github.com/conductorone/baton-sdk/pkg/annotations github.com/conductorone/baton-sdk/pkg/auth github.com/conductorone/baton-sdk/pkg/bid @@ -208,6 +212,7 @@ github.com/conductorone/baton-sdk/pkg/types github.com/conductorone/baton-sdk/pkg/types/entitlement github.com/conductorone/baton-sdk/pkg/types/grant github.com/conductorone/baton-sdk/pkg/types/resource +github.com/conductorone/baton-sdk/pkg/types/sessions github.com/conductorone/baton-sdk/pkg/types/tasks github.com/conductorone/baton-sdk/pkg/types/ticket github.com/conductorone/baton-sdk/pkg/ugrpc @@ -231,9 +236,6 @@ github.com/davecgh/go-spew/spew # github.com/deckarep/golang-set/v2 v2.8.0 ## explicit; go 1.18 github.com/deckarep/golang-set/v2 -# github.com/dolthub/maphash v0.1.0 -## explicit; go 1.18 -github.com/dolthub/maphash # github.com/doug-martin/goqu/v9 v9.19.0 ## explicit; go 1.12 github.com/doug-martin/goqu/v9 @@ -248,6 +250,12 @@ github.com/doug-martin/goqu/v9/sqlgen # github.com/dustin/go-humanize v1.0.1 ## explicit; go 1.16 github.com/dustin/go-humanize +# github.com/ebitengine/purego v0.9.1 +## explicit; go 1.18 +github.com/ebitengine/purego +github.com/ebitengine/purego/internal/cgo +github.com/ebitengine/purego/internal/fakecgo +github.com/ebitengine/purego/internal/strings # github.com/ennyjfrick/ruleguard-logfatal v0.0.2 ## explicit; go 1.23 github.com/ennyjfrick/ruleguard-logfatal @@ -258,9 +266,6 @@ github.com/envoyproxy/protoc-gen-validate/validate ## explicit; go 1.17 github.com/fsnotify/fsnotify github.com/fsnotify/fsnotify/internal -# github.com/gammazero/deque v1.0.0 -## explicit; go 1.22 -github.com/gammazero/deque # github.com/glebarez/go-sqlite v1.22.0 ## explicit; go 1.17 github.com/glebarez/go-sqlite @@ -355,20 +360,20 @@ github.com/magiconair/properties # github.com/mattn/go-isatty v0.0.20 ## explicit; go 1.15 github.com/mattn/go-isatty -# github.com/maypok86/otter v1.2.4 -## explicit; go 1.19 -github.com/maypok86/otter -github.com/maypok86/otter/internal/core -github.com/maypok86/otter/internal/expiry -github.com/maypok86/otter/internal/generated/node -github.com/maypok86/otter/internal/hashtable -github.com/maypok86/otter/internal/lossy -github.com/maypok86/otter/internal/queue -github.com/maypok86/otter/internal/s3fifo -github.com/maypok86/otter/internal/stats -github.com/maypok86/otter/internal/unixtime -github.com/maypok86/otter/internal/xmath -github.com/maypok86/otter/internal/xruntime +# github.com/maypok86/otter/v2 v2.2.1 +## explicit; go 1.24 +github.com/maypok86/otter/v2 +github.com/maypok86/otter/v2/internal/deque +github.com/maypok86/otter/v2/internal/deque/queue +github.com/maypok86/otter/v2/internal/expiration +github.com/maypok86/otter/v2/internal/generated/node +github.com/maypok86/otter/v2/internal/hashmap +github.com/maypok86/otter/v2/internal/lossy +github.com/maypok86/otter/v2/internal/xiter +github.com/maypok86/otter/v2/internal/xmath +github.com/maypok86/otter/v2/internal/xruntime +github.com/maypok86/otter/v2/internal/xsync +github.com/maypok86/otter/v2/stats # github.com/migueleliasweb/go-github-mock v1.1.0 ## explicit; go 1.23 github.com/migueleliasweb/go-github-mock/src/mock @@ -417,18 +422,15 @@ github.com/sagikazarmark/slog-shim # github.com/segmentio/ksuid v1.0.4 ## explicit; go 1.12 github.com/segmentio/ksuid -# github.com/shirou/gopsutil/v3 v3.24.5 -## explicit; go 1.18 -github.com/shirou/gopsutil/v3/common -github.com/shirou/gopsutil/v3/cpu -github.com/shirou/gopsutil/v3/host -github.com/shirou/gopsutil/v3/internal/common -github.com/shirou/gopsutil/v3/mem -github.com/shirou/gopsutil/v3/net -github.com/shirou/gopsutil/v3/process -# github.com/shoenig/go-m1cpu v0.1.6 -## explicit; go 1.20 -github.com/shoenig/go-m1cpu +# github.com/shirou/gopsutil/v4 v4.25.11 +## explicit; go 1.24.0 +github.com/shirou/gopsutil/v4/common +github.com/shirou/gopsutil/v4/cpu +github.com/shirou/gopsutil/v4/host +github.com/shirou/gopsutil/v4/internal/common +github.com/shirou/gopsutil/v4/mem +github.com/shirou/gopsutil/v4/net +github.com/shirou/gopsutil/v4/process # github.com/shurcooL/githubv4 v0.0.0-20240727222349-48295856cce7 ## explicit; go 1.19 github.com/shurcooL/githubv4 @@ -469,7 +471,7 @@ github.com/spf13/viper/internal/encoding/json github.com/spf13/viper/internal/encoding/toml github.com/spf13/viper/internal/encoding/yaml github.com/spf13/viper/internal/features -# github.com/stretchr/testify v1.10.0 +# github.com/stretchr/testify v1.11.1 ## explicit; go 1.17 github.com/stretchr/testify/assert github.com/stretchr/testify/assert/yaml @@ -477,11 +479,11 @@ github.com/stretchr/testify/require # github.com/subosito/gotenv v1.6.0 ## explicit; go 1.18 github.com/subosito/gotenv -# github.com/tklauser/go-sysconf v0.3.14 -## explicit; go 1.18 +# github.com/tklauser/go-sysconf v0.3.16 +## explicit; go 1.24.0 github.com/tklauser/go-sysconf -# github.com/tklauser/numcpus v0.9.0 -## explicit; go 1.18 +# github.com/tklauser/numcpus v0.11.0 +## explicit; go 1.24.0 github.com/tklauser/numcpus # github.com/yusufpapurcu/wmi v1.2.4 ## explicit; go 1.16 @@ -622,8 +624,8 @@ golang.org/x/oauth2/jwt ## explicit; go 1.23.0 golang.org/x/sync/semaphore golang.org/x/sync/singleflight -# golang.org/x/sys v0.34.0 -## explicit; go 1.23.0 +# golang.org/x/sys v0.38.0 +## explicit; go 1.24.0 golang.org/x/sys/cpu golang.org/x/sys/plan9 golang.org/x/sys/unix From 3845e9561aad0fd909c3bf8aa76fef6192a32224 Mon Sep 17 00:00:00 2001 From: vipulgowda Date: Tue, 27 Jan 2026 18:49:07 +0530 Subject: [PATCH 07/19] upgrade baton-sdk --- go.mod | 2 +- go.sum | 4 +- pkg/connector/repository.go | 10 +- pkg/connector/team.go | 17 +- .../v2/annotation_security_insight.pb.go | 495 +++++++++++++-- ...annotation_security_insight.pb.validate.go | 564 +++++++++++++++++- ...otation_security_insight_protoopaque.pb.go | 484 +++++++++++++-- .../pb/c1/connector/v2/connector.pb.go | 60 +- .../connector/v2/connector_protoopaque.pb.go | 60 +- .../pb/c1/connector/v2/entitlement.pb.go | 10 +- .../v2/entitlement_protoopaque.pb.go | 10 +- .../pb/c1/connector/v2/resource.pb.go | 20 +- .../c1/connector/v2/resource.pb.validate.go | 15 + .../connector/v2/resource_protoopaque.pb.go | 20 +- .../pb/c1/connectorapi/baton/v1/baton.pb.go | 22 +- .../baton/v1/baton.pb.validate.go | 15 + .../baton/v1/baton_protoopaque.pb.go | 22 +- .../baton-sdk/pkg/actions/actions.go | 120 +++- .../baton-sdk/pkg/cli/commands.go | 2 + .../baton-sdk/pkg/cli/lambda_server__added.go | 18 +- .../pkg/connectorbuilder/accounts.go | 43 +- .../pkg/connectorbuilder/connectorbuilder.go | 14 +- .../baton-sdk/pkg/connectorrunner/runner.go | 18 +- .../pkg/connectorstore/connectorstore.go | 2 +- .../baton-sdk/pkg/dotc1z/c1file.go | 71 ++- .../baton-sdk/pkg/dotc1z/c1file_attached.go | 84 ++- .../baton-sdk/pkg/dotc1z/clone_sync.go | 4 +- .../baton-sdk/pkg/dotc1z/decoder.go | 1 + .../baton-sdk/pkg/dotc1z/dotc1z.go | 2 +- .../conductorone/baton-sdk/pkg/dotc1z/file.go | 99 ++- .../baton-sdk/pkg/dotc1z/grants.go | 23 - .../baton-sdk/pkg/dotc1z/manager/s3/s3.go | 1 + .../baton-sdk/pkg/dotc1z/sync_runs.go | 39 +- .../baton-sdk/pkg/field/defaults.go | 7 + .../baton-sdk/pkg/provisioner/provisioner.go | 29 +- .../conductorone/baton-sdk/pkg/sdk/version.go | 2 +- .../conductorone/baton-sdk/pkg/sync/syncer.go | 117 +++- .../pkg/synccompactor/attached/attached.go | 68 +-- .../baton-sdk/pkg/synccompactor/compactor.go | 299 +++++----- .../pkg/synccompactor/naive/naive.go | 88 --- .../pkg/synccompactor/naive/naive_unroll.go | 98 --- .../pkg/tasks/c1api/create_account.go | 1 + .../baton-sdk/pkg/tasks/local/accounter.go | 26 +- .../baton-sdk/pkg/tasks/local/deleter.go | 2 +- .../baton-sdk/pkg/tasks/local/differ.go | 2 +- .../pkg/types/entitlement/entitlement.go | 23 + .../types/resource/security_insight_trait.go | 236 ++++---- vendor/modules.txt | 3 +- 48 files changed, 2509 insertions(+), 863 deletions(-) delete mode 100644 vendor/github.com/conductorone/baton-sdk/pkg/synccompactor/naive/naive.go delete mode 100644 vendor/github.com/conductorone/baton-sdk/pkg/synccompactor/naive/naive_unroll.go diff --git a/go.mod b/go.mod index 34ae51f5..ee1d2d76 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/conductorone/baton-github go 1.25.2 require ( - github.com/conductorone/baton-sdk v0.6.9 + github.com/conductorone/baton-sdk v0.6.24 github.com/deckarep/golang-set/v2 v2.8.0 github.com/ennyjfrick/ruleguard-logfatal v0.0.2 github.com/golang-jwt/jwt/v5 v5.2.2 diff --git a/go.sum b/go.sum index 65631f28..6aa1b6e0 100644 --- a/go.sum +++ b/go.sum @@ -60,8 +60,8 @@ github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyY github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/conductorone/baton-sdk v0.6.9 h1:HckTc+QeoL/K4FAOrvrsTIDb65898ft/m2YIty/YBgk= -github.com/conductorone/baton-sdk v0.6.9/go.mod h1:9S5feBOuIJxlNdGmkv3ObkCNHbVyOHr6foNrIrk+d4Y= +github.com/conductorone/baton-sdk v0.6.24 h1:0Uc0+EyJZx36a6XEoLurqsW2z/2yJVtMYxvMOn1CEf4= +github.com/conductorone/baton-sdk v0.6.24/go.mod h1:9S5feBOuIJxlNdGmkv3ObkCNHbVyOHr6foNrIrk+d4Y= github.com/conductorone/dpop v0.2.3 h1:s91U3845GHQ6P6FWrdNr2SEOy1ES/jcFs1JtKSl2S+o= github.com/conductorone/dpop v0.2.3/go.mod h1:gyo8TtzB9SCFCsjsICH4IaLZ7y64CcrDXMOPBwfq/3s= github.com/conductorone/dpop/integrations/dpop_grpc v0.2.3 h1:kLMCNIh0Mo2vbvvkCmJ3ixsPbXEJ6HPcW53Ku9yje3s= diff --git a/pkg/connector/repository.go b/pkg/connector/repository.go index 1e7cdb97..88d4ae64 100644 --- a/pkg/connector/repository.go +++ b/pkg/connector/repository.go @@ -470,8 +470,14 @@ func (o *repositoryResourceType) registerCreateRepositoryAction(ctx context.Cont Name: "parent", DisplayName: "Parent Organization", Description: "The organization to create the repository in", - Field: &config.Field_ResourceIdField{}, - IsRequired: true, + Field: &config.Field_ResourceIdField{ + ResourceIdField: &config.ResourceIdField{ + Rules: &config.ResourceIDRules{ + AllowedResourceTypeIds: []string{resourceTypeOrg.Id}, + }, + }, + }, + IsRequired: true, }, { Name: "description", diff --git a/pkg/connector/team.go b/pkg/connector/team.go index 6fc420d2..92512e76 100644 --- a/pkg/connector/team.go +++ b/pkg/connector/team.go @@ -398,7 +398,13 @@ func (o *teamResourceType) registerCreateTeamAction(ctx context.Context, registr Name: "parent", DisplayName: "Parent Organization", Description: "The organization to create the team in", - Field: &config.Field_ResourceIdField{}, + Field: &config.Field_ResourceIdField{ + ResourceIdField: &config.ResourceIdField{ + Rules: &config.ResourceIDRules{ + AllowedResourceTypeIds: []string{resourceTypeOrg.Id}, + }, + }, + }, IsRequired: true, }, { @@ -411,7 +417,14 @@ func (o *teamResourceType) registerCreateTeamAction(ctx context.Context, registr Name: "privacy", DisplayName: "Privacy", Description: "The privacy level: 'secret' or 'closed'", - Field: &config.Field_StringField{}, + Field: &config.Field_StringField{ + StringField: &config.StringField{ + Options: []*config.StringFieldOption{ + {Value: "secret", DisplayName: "Secret (only visible to org owners and team members)"}, + {Value: "closed", DisplayName: "Closed (visible to all org members)"}, + }, + }, + }, }, { Name: "notification_setting", diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_security_insight.pb.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_security_insight.pb.go index 9da76c41..354f27af 100644 --- a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_security_insight.pb.go +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_security_insight.pb.go @@ -24,15 +24,152 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) +// RiskScore represents a risk score insight +type RiskScore struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + // The risk score value (e.g., "85", "High") + Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *RiskScore) Reset() { + *x = RiskScore{} + mi := &file_c1_connector_v2_annotation_security_insight_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *RiskScore) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RiskScore) ProtoMessage() {} + +func (x *RiskScore) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_annotation_security_insight_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *RiskScore) GetValue() string { + if x != nil { + return x.Value + } + return "" +} + +func (x *RiskScore) SetValue(v string) { + x.Value = v +} + +type RiskScore_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // The risk score value (e.g., "85", "High") + Value string +} + +func (b0 RiskScore_builder) Build() *RiskScore { + m0 := &RiskScore{} + b, x := &b0, m0 + _, _ = b, x + x.Value = b.Value + return m0 +} + +// Issue represents a security issue or vulnerability +type Issue struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + // The issue description or severity (e.g., "Critical", "CVE-2024-1234") + Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` + Severity string `protobuf:"bytes,2,opt,name=severity,proto3" json:"severity,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Issue) Reset() { + *x = Issue{} + mi := &file_c1_connector_v2_annotation_security_insight_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Issue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Issue) ProtoMessage() {} + +func (x *Issue) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_annotation_security_insight_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *Issue) GetValue() string { + if x != nil { + return x.Value + } + return "" +} + +func (x *Issue) GetSeverity() string { + if x != nil { + return x.Severity + } + return "" +} + +func (x *Issue) SetValue(v string) { + x.Value = v +} + +func (x *Issue) SetSeverity(v string) { + x.Severity = v +} + +type Issue_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // The issue description or severity (e.g., "Critical", "CVE-2024-1234") + Value string + Severity string +} + +func (b0 Issue_builder) Build() *Issue { + m0 := &Issue{} + b, x := &b0, m0 + _, _ = b, x + x.Value = b.Value + x.Severity = b.Severity + return m0 +} + // SecurityInsightTrait is the trait annotation for resources with TRAIT_SECURITY_INSIGHT. // It contains the metadata for the security insight including type, value, observation time, // and the target entity (user or resource) that this insight should be bound to. type SecurityInsightTrait struct { state protoimpl.MessageState `protogen:"hybrid.v1"` - // The type of insight (e.g., "crowdstrike_zta_score", "wiz_critical_vulnerability") - InsightType string `protobuf:"bytes,1,opt,name=insight_type,json=insightType,proto3" json:"insight_type,omitempty"` - // The value of the insight (e.g., "85", "High", "Critical") - Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + // The type and value of the insight + // + // Types that are valid to be assigned to InsightType: + // + // *SecurityInsightTrait_RiskScore + // *SecurityInsightTrait_Issue + InsightType isSecurityInsightTrait_InsightType `protobuf_oneof:"insight_type"` // When this insight was observed/captured from the source system ObservedAt *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=observed_at,json=observedAt,proto3" json:"observed_at,omitempty"` // The target entity this insight should be bound to @@ -42,6 +179,7 @@ type SecurityInsightTrait struct { // *SecurityInsightTrait_User // *SecurityInsightTrait_ResourceId // *SecurityInsightTrait_ExternalResource + // *SecurityInsightTrait_AppUser Target isSecurityInsightTrait_Target `protobuf_oneof:"target"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache @@ -49,7 +187,7 @@ type SecurityInsightTrait struct { func (x *SecurityInsightTrait) Reset() { *x = SecurityInsightTrait{} - mi := &file_c1_connector_v2_annotation_security_insight_proto_msgTypes[0] + mi := &file_c1_connector_v2_annotation_security_insight_proto_msgTypes[2] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -61,7 +199,7 @@ func (x *SecurityInsightTrait) String() string { func (*SecurityInsightTrait) ProtoMessage() {} func (x *SecurityInsightTrait) ProtoReflect() protoreflect.Message { - mi := &file_c1_connector_v2_annotation_security_insight_proto_msgTypes[0] + mi := &file_c1_connector_v2_annotation_security_insight_proto_msgTypes[2] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -72,18 +210,29 @@ func (x *SecurityInsightTrait) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -func (x *SecurityInsightTrait) GetInsightType() string { +func (x *SecurityInsightTrait) GetInsightType() isSecurityInsightTrait_InsightType { if x != nil { return x.InsightType } - return "" + return nil } -func (x *SecurityInsightTrait) GetValue() string { +func (x *SecurityInsightTrait) GetRiskScore() *RiskScore { if x != nil { - return x.Value + if x, ok := x.InsightType.(*SecurityInsightTrait_RiskScore); ok { + return x.RiskScore + } } - return "" + return nil +} + +func (x *SecurityInsightTrait) GetIssue() *Issue { + if x != nil { + if x, ok := x.InsightType.(*SecurityInsightTrait_Issue); ok { + return x.Issue + } + } + return nil } func (x *SecurityInsightTrait) GetObservedAt() *timestamppb.Timestamp { @@ -127,12 +276,29 @@ func (x *SecurityInsightTrait) GetExternalResource() *SecurityInsightTrait_Exter return nil } -func (x *SecurityInsightTrait) SetInsightType(v string) { - x.InsightType = v +func (x *SecurityInsightTrait) GetAppUser() *SecurityInsightTrait_AppUserTarget { + if x != nil { + if x, ok := x.Target.(*SecurityInsightTrait_AppUser); ok { + return x.AppUser + } + } + return nil } -func (x *SecurityInsightTrait) SetValue(v string) { - x.Value = v +func (x *SecurityInsightTrait) SetRiskScore(v *RiskScore) { + if v == nil { + x.InsightType = nil + return + } + x.InsightType = &SecurityInsightTrait_RiskScore{v} +} + +func (x *SecurityInsightTrait) SetIssue(v *Issue) { + if v == nil { + x.InsightType = nil + return + } + x.InsightType = &SecurityInsightTrait_Issue{v} } func (x *SecurityInsightTrait) SetObservedAt(v *timestamppb.Timestamp) { @@ -163,6 +329,37 @@ func (x *SecurityInsightTrait) SetExternalResource(v *SecurityInsightTrait_Exter x.Target = &SecurityInsightTrait_ExternalResource{v} } +func (x *SecurityInsightTrait) SetAppUser(v *SecurityInsightTrait_AppUserTarget) { + if v == nil { + x.Target = nil + return + } + x.Target = &SecurityInsightTrait_AppUser{v} +} + +func (x *SecurityInsightTrait) HasInsightType() bool { + if x == nil { + return false + } + return x.InsightType != nil +} + +func (x *SecurityInsightTrait) HasRiskScore() bool { + if x == nil { + return false + } + _, ok := x.InsightType.(*SecurityInsightTrait_RiskScore) + return ok +} + +func (x *SecurityInsightTrait) HasIssue() bool { + if x == nil { + return false + } + _, ok := x.InsightType.(*SecurityInsightTrait_Issue) + return ok +} + func (x *SecurityInsightTrait) HasObservedAt() bool { if x == nil { return false @@ -201,6 +398,30 @@ func (x *SecurityInsightTrait) HasExternalResource() bool { return ok } +func (x *SecurityInsightTrait) HasAppUser() bool { + if x == nil { + return false + } + _, ok := x.Target.(*SecurityInsightTrait_AppUser) + return ok +} + +func (x *SecurityInsightTrait) ClearInsightType() { + x.InsightType = nil +} + +func (x *SecurityInsightTrait) ClearRiskScore() { + if _, ok := x.InsightType.(*SecurityInsightTrait_RiskScore); ok { + x.InsightType = nil + } +} + +func (x *SecurityInsightTrait) ClearIssue() { + if _, ok := x.InsightType.(*SecurityInsightTrait_Issue); ok { + x.InsightType = nil + } +} + func (x *SecurityInsightTrait) ClearObservedAt() { x.ObservedAt = nil } @@ -227,10 +448,35 @@ func (x *SecurityInsightTrait) ClearExternalResource() { } } +func (x *SecurityInsightTrait) ClearAppUser() { + if _, ok := x.Target.(*SecurityInsightTrait_AppUser); ok { + x.Target = nil + } +} + +const SecurityInsightTrait_InsightType_not_set_case case_SecurityInsightTrait_InsightType = 0 +const SecurityInsightTrait_RiskScore_case case_SecurityInsightTrait_InsightType = 1 +const SecurityInsightTrait_Issue_case case_SecurityInsightTrait_InsightType = 2 + +func (x *SecurityInsightTrait) WhichInsightType() case_SecurityInsightTrait_InsightType { + if x == nil { + return SecurityInsightTrait_InsightType_not_set_case + } + switch x.InsightType.(type) { + case *SecurityInsightTrait_RiskScore: + return SecurityInsightTrait_RiskScore_case + case *SecurityInsightTrait_Issue: + return SecurityInsightTrait_Issue_case + default: + return SecurityInsightTrait_InsightType_not_set_case + } +} + const SecurityInsightTrait_Target_not_set_case case_SecurityInsightTrait_Target = 0 const SecurityInsightTrait_User_case case_SecurityInsightTrait_Target = 4 const SecurityInsightTrait_ResourceId_case case_SecurityInsightTrait_Target = 5 const SecurityInsightTrait_ExternalResource_case case_SecurityInsightTrait_Target = 6 +const SecurityInsightTrait_AppUser_case case_SecurityInsightTrait_Target = 7 func (x *SecurityInsightTrait) WhichTarget() case_SecurityInsightTrait_Target { if x == nil { @@ -243,6 +489,8 @@ func (x *SecurityInsightTrait) WhichTarget() case_SecurityInsightTrait_Target { return SecurityInsightTrait_ResourceId_case case *SecurityInsightTrait_ExternalResource: return SecurityInsightTrait_ExternalResource_case + case *SecurityInsightTrait_AppUser: + return SecurityInsightTrait_AppUser_case default: return SecurityInsightTrait_Target_not_set_case } @@ -251,10 +499,12 @@ func (x *SecurityInsightTrait) WhichTarget() case_SecurityInsightTrait_Target { type SecurityInsightTrait_builder struct { _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. - // The type of insight (e.g., "crowdstrike_zta_score", "wiz_critical_vulnerability") - InsightType string - // The value of the insight (e.g., "85", "High", "Critical") - Value string + // The type and value of the insight + + // Fields of oneof InsightType: + RiskScore *RiskScore + Issue *Issue + // -- end of InsightType // When this insight was observed/captured from the source system ObservedAt *timestamppb.Timestamp // The target entity this insight should be bound to @@ -266,6 +516,8 @@ type SecurityInsightTrait_builder struct { ResourceId *ResourceId // For binding to an AppResource by external ID ExternalResource *SecurityInsightTrait_ExternalResourceTarget + // For binding to an AppUser by email address + AppUser *SecurityInsightTrait_AppUserTarget // -- end of Target } @@ -273,8 +525,12 @@ func (b0 SecurityInsightTrait_builder) Build() *SecurityInsightTrait { m0 := &SecurityInsightTrait{} b, x := &b0, m0 _, _ = b, x - x.InsightType = b.InsightType - x.Value = b.Value + if b.RiskScore != nil { + x.InsightType = &SecurityInsightTrait_RiskScore{b.RiskScore} + } + if b.Issue != nil { + x.InsightType = &SecurityInsightTrait_Issue{b.Issue} + } x.ObservedAt = b.ObservedAt if b.User != nil { x.Target = &SecurityInsightTrait_User{b.User} @@ -285,19 +541,48 @@ func (b0 SecurityInsightTrait_builder) Build() *SecurityInsightTrait { if b.ExternalResource != nil { x.Target = &SecurityInsightTrait_ExternalResource{b.ExternalResource} } + if b.AppUser != nil { + x.Target = &SecurityInsightTrait_AppUser{b.AppUser} + } return m0 } +type case_SecurityInsightTrait_InsightType protoreflect.FieldNumber + +func (x case_SecurityInsightTrait_InsightType) String() string { + md := file_c1_connector_v2_annotation_security_insight_proto_msgTypes[2].Descriptor() + if x == 0 { + return "not set" + } + return protoimpl.X.MessageFieldStringOf(md, protoreflect.FieldNumber(x)) +} + type case_SecurityInsightTrait_Target protoreflect.FieldNumber func (x case_SecurityInsightTrait_Target) String() string { - md := file_c1_connector_v2_annotation_security_insight_proto_msgTypes[0].Descriptor() + md := file_c1_connector_v2_annotation_security_insight_proto_msgTypes[2].Descriptor() if x == 0 { return "not set" } return protoimpl.X.MessageFieldStringOf(md, protoreflect.FieldNumber(x)) } +type isSecurityInsightTrait_InsightType interface { + isSecurityInsightTrait_InsightType() +} + +type SecurityInsightTrait_RiskScore struct { + RiskScore *RiskScore `protobuf:"bytes,1,opt,name=risk_score,json=riskScore,proto3,oneof"` +} + +type SecurityInsightTrait_Issue struct { + Issue *Issue `protobuf:"bytes,2,opt,name=issue,proto3,oneof"` +} + +func (*SecurityInsightTrait_RiskScore) isSecurityInsightTrait_InsightType() {} + +func (*SecurityInsightTrait_Issue) isSecurityInsightTrait_InsightType() {} + type isSecurityInsightTrait_Target interface { isSecurityInsightTrait_Target() } @@ -317,12 +602,19 @@ type SecurityInsightTrait_ExternalResource struct { ExternalResource *SecurityInsightTrait_ExternalResourceTarget `protobuf:"bytes,6,opt,name=external_resource,json=externalResource,proto3,oneof"` } +type SecurityInsightTrait_AppUser struct { + // For binding to an AppUser by email address + AppUser *SecurityInsightTrait_AppUserTarget `protobuf:"bytes,7,opt,name=app_user,json=appUser,proto3,oneof"` +} + func (*SecurityInsightTrait_User) isSecurityInsightTrait_Target() {} func (*SecurityInsightTrait_ResourceId) isSecurityInsightTrait_Target() {} func (*SecurityInsightTrait_ExternalResource) isSecurityInsightTrait_Target() {} +func (*SecurityInsightTrait_AppUser) isSecurityInsightTrait_Target() {} + // UserTarget identifies a user by email for resolution to a C1 User type SecurityInsightTrait_UserTarget struct { state protoimpl.MessageState `protogen:"hybrid.v1"` @@ -333,7 +625,7 @@ type SecurityInsightTrait_UserTarget struct { func (x *SecurityInsightTrait_UserTarget) Reset() { *x = SecurityInsightTrait_UserTarget{} - mi := &file_c1_connector_v2_annotation_security_insight_proto_msgTypes[1] + mi := &file_c1_connector_v2_annotation_security_insight_proto_msgTypes[3] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -345,7 +637,7 @@ func (x *SecurityInsightTrait_UserTarget) String() string { func (*SecurityInsightTrait_UserTarget) ProtoMessage() {} func (x *SecurityInsightTrait_UserTarget) ProtoReflect() protoreflect.Message { - mi := &file_c1_connector_v2_annotation_security_insight_proto_msgTypes[1] + mi := &file_c1_connector_v2_annotation_security_insight_proto_msgTypes[3] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -381,6 +673,80 @@ func (b0 SecurityInsightTrait_UserTarget_builder) Build() *SecurityInsightTrait_ return m0 } +// AppUserTarget identifies a user by email for resolution to an AppUser. +type SecurityInsightTrait_AppUserTarget struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + Email string `protobuf:"bytes,1,opt,name=email,proto3" json:"email,omitempty"` + // The external identifier of the user (e.g., ID, GUID, etc.) + ExternalId string `protobuf:"bytes,2,opt,name=external_id,json=externalId,proto3" json:"external_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SecurityInsightTrait_AppUserTarget) Reset() { + *x = SecurityInsightTrait_AppUserTarget{} + mi := &file_c1_connector_v2_annotation_security_insight_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SecurityInsightTrait_AppUserTarget) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SecurityInsightTrait_AppUserTarget) ProtoMessage() {} + +func (x *SecurityInsightTrait_AppUserTarget) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_annotation_security_insight_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *SecurityInsightTrait_AppUserTarget) GetEmail() string { + if x != nil { + return x.Email + } + return "" +} + +func (x *SecurityInsightTrait_AppUserTarget) GetExternalId() string { + if x != nil { + return x.ExternalId + } + return "" +} + +func (x *SecurityInsightTrait_AppUserTarget) SetEmail(v string) { + x.Email = v +} + +func (x *SecurityInsightTrait_AppUserTarget) SetExternalId(v string) { + x.ExternalId = v +} + +type SecurityInsightTrait_AppUserTarget_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Email string + // The external identifier of the user (e.g., ID, GUID, etc.) + ExternalId string +} + +func (b0 SecurityInsightTrait_AppUserTarget_builder) Build() *SecurityInsightTrait_AppUserTarget { + m0 := &SecurityInsightTrait_AppUserTarget{} + b, x := &b0, m0 + _, _ = b, x + x.Email = b.Email + x.ExternalId = b.ExternalId + return m0 +} + // ExternalResourceTarget identifies a resource by external ID for resolution to an AppResource. // Use this when the connector doesn't sync the target resource itself. type SecurityInsightTrait_ExternalResourceTarget struct { @@ -395,7 +761,7 @@ type SecurityInsightTrait_ExternalResourceTarget struct { func (x *SecurityInsightTrait_ExternalResourceTarget) Reset() { *x = SecurityInsightTrait_ExternalResourceTarget{} - mi := &file_c1_connector_v2_annotation_security_insight_proto_msgTypes[2] + mi := &file_c1_connector_v2_annotation_security_insight_proto_msgTypes[5] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -407,7 +773,7 @@ func (x *SecurityInsightTrait_ExternalResourceTarget) String() string { func (*SecurityInsightTrait_ExternalResourceTarget) ProtoMessage() {} func (x *SecurityInsightTrait_ExternalResourceTarget) ProtoReflect() protoreflect.Message { - mi := &file_c1_connector_v2_annotation_security_insight_proto_msgTypes[2] + mi := &file_c1_connector_v2_annotation_security_insight_proto_msgTypes[5] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -462,46 +828,64 @@ var File_c1_connector_v2_annotation_security_insight_proto protoreflect.FileDesc const file_c1_connector_v2_annotation_security_insight_proto_rawDesc = "" + "\n" + - "1c1/connector/v2/annotation_security_insight.proto\x12\x0fc1.connector.v2\x1a\x1ec1/connector/v2/resource.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x17validate/validate.proto\"\xc9\x04\n" + - "\x14SecurityInsightTrait\x12-\n" + - "\finsight_type\x18\x01 \x01(\tB\n" + - "\xfaB\ar\x05 \x01(\x80\bR\vinsightType\x12 \n" + - "\x05value\x18\x02 \x01(\tB\n" + - "\xfaB\ar\x05 \x01(\x80\bR\x05value\x12;\n" + + "1c1/connector/v2/annotation_security_insight.proto\x12\x0fc1.connector.v2\x1a\x1ec1/connector/v2/resource.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x17validate/validate.proto\"!\n" + + "\tRiskScore\x12\x14\n" + + "\x05value\x18\x01 \x01(\tR\x05value\"E\n" + + "\x05Issue\x12\x14\n" + + "\x05value\x18\x01 \x01(\tR\x05value\x12&\n" + + "\bseverity\x18\x02 \x01(\tB\n" + + "\xfaB\ar\x05 \x00(\x80\bR\bseverity\"\xae\x06\n" + + "\x14SecurityInsightTrait\x12;\n" + + "\n" + + "risk_score\x18\x01 \x01(\v2\x1a.c1.connector.v2.RiskScoreH\x00R\triskScore\x12.\n" + + "\x05issue\x18\x02 \x01(\v2\x16.c1.connector.v2.IssueH\x00R\x05issue\x12;\n" + "\vobserved_at\x18\x03 \x01(\v2\x1a.google.protobuf.TimestampR\n" + "observedAt\x12F\n" + - "\x04user\x18\x04 \x01(\v20.c1.connector.v2.SecurityInsightTrait.UserTargetH\x00R\x04user\x12>\n" + - "\vresource_id\x18\x05 \x01(\v2\x1b.c1.connector.v2.ResourceIdH\x00R\n" + + "\x04user\x18\x04 \x01(\v20.c1.connector.v2.SecurityInsightTrait.UserTargetH\x01R\x04user\x12>\n" + + "\vresource_id\x18\x05 \x01(\v2\x1b.c1.connector.v2.ResourceIdH\x01R\n" + "resourceId\x12k\n" + - "\x11external_resource\x18\x06 \x01(\v2<.c1.connector.v2.SecurityInsightTrait.ExternalResourceTargetH\x00R\x10externalResource\x1a0\n" + + "\x11external_resource\x18\x06 \x01(\v2<.c1.connector.v2.SecurityInsightTrait.ExternalResourceTargetH\x01R\x10externalResource\x12P\n" + + "\bapp_user\x18\a \x01(\v23.c1.connector.v2.SecurityInsightTrait.AppUserTargetH\x01R\aappUser\x1a0\n" + "\n" + "UserTarget\x12\"\n" + - "\x05email\x18\x01 \x01(\tB\f\xfaB\tr\a \x01(\x80\b`\x01R\x05email\x1am\n" + + "\x05email\x18\x01 \x01(\tB\f\xfaB\tr\a \x01(\x80\b`\x01R\x05email\x1a`\n" + + "\rAppUserTarget\x12\"\n" + + "\x05email\x18\x01 \x01(\tB\f\xfaB\tr\a \x01(\x80\b`\x01R\x05email\x12+\n" + + "\vexternal_id\x18\x02 \x01(\tB\n" + + "\xfaB\ar\x05 \x01(\x80 R\n" + + "externalId\x1am\n" + "\x16ExternalResourceTarget\x12+\n" + "\vexternal_id\x18\x01 \x01(\tB\n" + "\xfaB\ar\x05 \x01(\x80 R\n" + "externalId\x12&\n" + - "\bapp_hint\x18\x02 \x01(\tB\v\xfaB\br\x06(\x80\b\xd0\x01\x01R\aappHintB\r\n" + + "\bapp_hint\x18\x02 \x01(\tB\v\xfaB\br\x06(\x80\b\xd0\x01\x01R\aappHintB\x13\n" + + "\finsight_type\x12\x03\xf8B\x01B\r\n" + "\x06target\x12\x03\xf8B\x01B6Z4github.com/conductorone/baton-sdk/pb/c1/connector/v2b\x06proto3" -var file_c1_connector_v2_annotation_security_insight_proto_msgTypes = make([]protoimpl.MessageInfo, 3) +var file_c1_connector_v2_annotation_security_insight_proto_msgTypes = make([]protoimpl.MessageInfo, 6) var file_c1_connector_v2_annotation_security_insight_proto_goTypes = []any{ - (*SecurityInsightTrait)(nil), // 0: c1.connector.v2.SecurityInsightTrait - (*SecurityInsightTrait_UserTarget)(nil), // 1: c1.connector.v2.SecurityInsightTrait.UserTarget - (*SecurityInsightTrait_ExternalResourceTarget)(nil), // 2: c1.connector.v2.SecurityInsightTrait.ExternalResourceTarget - (*timestamppb.Timestamp)(nil), // 3: google.protobuf.Timestamp - (*ResourceId)(nil), // 4: c1.connector.v2.ResourceId + (*RiskScore)(nil), // 0: c1.connector.v2.RiskScore + (*Issue)(nil), // 1: c1.connector.v2.Issue + (*SecurityInsightTrait)(nil), // 2: c1.connector.v2.SecurityInsightTrait + (*SecurityInsightTrait_UserTarget)(nil), // 3: c1.connector.v2.SecurityInsightTrait.UserTarget + (*SecurityInsightTrait_AppUserTarget)(nil), // 4: c1.connector.v2.SecurityInsightTrait.AppUserTarget + (*SecurityInsightTrait_ExternalResourceTarget)(nil), // 5: c1.connector.v2.SecurityInsightTrait.ExternalResourceTarget + (*timestamppb.Timestamp)(nil), // 6: google.protobuf.Timestamp + (*ResourceId)(nil), // 7: c1.connector.v2.ResourceId } var file_c1_connector_v2_annotation_security_insight_proto_depIdxs = []int32{ - 3, // 0: c1.connector.v2.SecurityInsightTrait.observed_at:type_name -> google.protobuf.Timestamp - 1, // 1: c1.connector.v2.SecurityInsightTrait.user:type_name -> c1.connector.v2.SecurityInsightTrait.UserTarget - 4, // 2: c1.connector.v2.SecurityInsightTrait.resource_id:type_name -> c1.connector.v2.ResourceId - 2, // 3: c1.connector.v2.SecurityInsightTrait.external_resource:type_name -> c1.connector.v2.SecurityInsightTrait.ExternalResourceTarget - 4, // [4:4] is the sub-list for method output_type - 4, // [4:4] is the sub-list for method input_type - 4, // [4:4] is the sub-list for extension type_name - 4, // [4:4] is the sub-list for extension extendee - 0, // [0:4] is the sub-list for field type_name + 0, // 0: c1.connector.v2.SecurityInsightTrait.risk_score:type_name -> c1.connector.v2.RiskScore + 1, // 1: c1.connector.v2.SecurityInsightTrait.issue:type_name -> c1.connector.v2.Issue + 6, // 2: c1.connector.v2.SecurityInsightTrait.observed_at:type_name -> google.protobuf.Timestamp + 3, // 3: c1.connector.v2.SecurityInsightTrait.user:type_name -> c1.connector.v2.SecurityInsightTrait.UserTarget + 7, // 4: c1.connector.v2.SecurityInsightTrait.resource_id:type_name -> c1.connector.v2.ResourceId + 5, // 5: c1.connector.v2.SecurityInsightTrait.external_resource:type_name -> c1.connector.v2.SecurityInsightTrait.ExternalResourceTarget + 4, // 6: c1.connector.v2.SecurityInsightTrait.app_user:type_name -> c1.connector.v2.SecurityInsightTrait.AppUserTarget + 7, // [7:7] is the sub-list for method output_type + 7, // [7:7] is the sub-list for method input_type + 7, // [7:7] is the sub-list for extension type_name + 7, // [7:7] is the sub-list for extension extendee + 0, // [0:7] is the sub-list for field type_name } func init() { file_c1_connector_v2_annotation_security_insight_proto_init() } @@ -510,10 +894,13 @@ func file_c1_connector_v2_annotation_security_insight_proto_init() { return } file_c1_connector_v2_resource_proto_init() - file_c1_connector_v2_annotation_security_insight_proto_msgTypes[0].OneofWrappers = []any{ + file_c1_connector_v2_annotation_security_insight_proto_msgTypes[2].OneofWrappers = []any{ + (*SecurityInsightTrait_RiskScore)(nil), + (*SecurityInsightTrait_Issue)(nil), (*SecurityInsightTrait_User)(nil), (*SecurityInsightTrait_ResourceId)(nil), (*SecurityInsightTrait_ExternalResource)(nil), + (*SecurityInsightTrait_AppUser)(nil), } type x struct{} out := protoimpl.TypeBuilder{ @@ -521,7 +908,7 @@ func file_c1_connector_v2_annotation_security_insight_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: unsafe.Slice(unsafe.StringData(file_c1_connector_v2_annotation_security_insight_proto_rawDesc), len(file_c1_connector_v2_annotation_security_insight_proto_rawDesc)), NumEnums: 0, - NumMessages: 3, + NumMessages: 6, NumExtensions: 0, NumServices: 0, }, diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_security_insight.pb.validate.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_security_insight.pb.validate.go index b550d79e..6b83aee6 100644 --- a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_security_insight.pb.validate.go +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_security_insight.pb.validate.go @@ -35,6 +35,218 @@ var ( _ = sort.Sort ) +// Validate checks the field values on RiskScore with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *RiskScore) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RiskScore with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in RiskScoreMultiError, or nil +// if none found. +func (m *RiskScore) ValidateAll() error { + return m.validate(true) +} + +func (m *RiskScore) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Value + + if len(errors) > 0 { + return RiskScoreMultiError(errors) + } + + return nil +} + +// RiskScoreMultiError is an error wrapping multiple validation errors returned +// by RiskScore.ValidateAll() if the designated constraints aren't met. +type RiskScoreMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RiskScoreMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RiskScoreMultiError) AllErrors() []error { return m } + +// RiskScoreValidationError is the validation error returned by +// RiskScore.Validate if the designated constraints aren't met. +type RiskScoreValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RiskScoreValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RiskScoreValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RiskScoreValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RiskScoreValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RiskScoreValidationError) ErrorName() string { return "RiskScoreValidationError" } + +// Error satisfies the builtin error interface +func (e RiskScoreValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRiskScore.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RiskScoreValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RiskScoreValidationError{} + +// Validate checks the field values on Issue with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Issue) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Issue with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in IssueMultiError, or nil if none found. +func (m *Issue) ValidateAll() error { + return m.validate(true) +} + +func (m *Issue) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Value + + if l := len(m.GetSeverity()); l < 0 || l > 1024 { + err := IssueValidationError{ + field: "Severity", + reason: "value length must be between 0 and 1024 bytes, inclusive", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return IssueMultiError(errors) + } + + return nil +} + +// IssueMultiError is an error wrapping multiple validation errors returned by +// Issue.ValidateAll() if the designated constraints aren't met. +type IssueMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m IssueMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m IssueMultiError) AllErrors() []error { return m } + +// IssueValidationError is the validation error returned by Issue.Validate if +// the designated constraints aren't met. +type IssueValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e IssueValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e IssueValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e IssueValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e IssueValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e IssueValidationError) ErrorName() string { return "IssueValidationError" } + +// Error satisfies the builtin error interface +func (e IssueValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sIssue.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = IssueValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = IssueValidationError{} + // Validate checks the field values on SecurityInsightTrait with the rules // defined in the proto definition for this message. If any rules are // violated, the first error encountered is returned, or nil if there are no violations. @@ -57,28 +269,6 @@ func (m *SecurityInsightTrait) validate(all bool) error { var errors []error - if l := len(m.GetInsightType()); l < 1 || l > 1024 { - err := SecurityInsightTraitValidationError{ - field: "InsightType", - reason: "value length must be between 1 and 1024 bytes, inclusive", - } - if !all { - return err - } - errors = append(errors, err) - } - - if l := len(m.GetValue()); l < 1 || l > 1024 { - err := SecurityInsightTraitValidationError{ - field: "Value", - reason: "value length must be between 1 and 1024 bytes, inclusive", - } - if !all { - return err - } - errors = append(errors, err) - } - if all { switch v := interface{}(m.GetObservedAt()).(type) { case interface{ ValidateAll() error }: @@ -108,6 +298,105 @@ func (m *SecurityInsightTrait) validate(all bool) error { } } + oneofInsightTypePresent := false + switch v := m.InsightType.(type) { + case *SecurityInsightTrait_RiskScore: + if v == nil { + err := SecurityInsightTraitValidationError{ + field: "InsightType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofInsightTypePresent = true + + if all { + switch v := interface{}(m.GetRiskScore()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, SecurityInsightTraitValidationError{ + field: "RiskScore", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, SecurityInsightTraitValidationError{ + field: "RiskScore", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRiskScore()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return SecurityInsightTraitValidationError{ + field: "RiskScore", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *SecurityInsightTrait_Issue: + if v == nil { + err := SecurityInsightTraitValidationError{ + field: "InsightType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofInsightTypePresent = true + + if all { + switch v := interface{}(m.GetIssue()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, SecurityInsightTraitValidationError{ + field: "Issue", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, SecurityInsightTraitValidationError{ + field: "Issue", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetIssue()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return SecurityInsightTraitValidationError{ + field: "Issue", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if !oneofInsightTypePresent { + err := SecurityInsightTraitValidationError{ + field: "InsightType", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } oneofTargetPresent := false switch v := m.Target.(type) { case *SecurityInsightTrait_User: @@ -236,6 +525,48 @@ func (m *SecurityInsightTrait) validate(all bool) error { } } + case *SecurityInsightTrait_AppUser: + if v == nil { + err := SecurityInsightTraitValidationError{ + field: "Target", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + oneofTargetPresent = true + + if all { + switch v := interface{}(m.GetAppUser()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, SecurityInsightTraitValidationError{ + field: "AppUser", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, SecurityInsightTraitValidationError{ + field: "AppUser", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetAppUser()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return SecurityInsightTraitValidationError{ + field: "AppUser", + reason: "embedded message failed validation", + cause: err, + } + } + } + default: _ = v // ensures v is used } @@ -506,6 +837,195 @@ var _ interface { ErrorName() string } = SecurityInsightTrait_UserTargetValidationError{} +// Validate checks the field values on SecurityInsightTrait_AppUserTarget with +// the rules defined in the proto definition for this message. If any rules +// are violated, the first error encountered is returned, or nil if there are +// no violations. +func (m *SecurityInsightTrait_AppUserTarget) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on SecurityInsightTrait_AppUserTarget +// with the rules defined in the proto definition for this message. If any +// rules are violated, the result is a list of violation errors wrapped in +// SecurityInsightTrait_AppUserTargetMultiError, or nil if none found. +func (m *SecurityInsightTrait_AppUserTarget) ValidateAll() error { + return m.validate(true) +} + +func (m *SecurityInsightTrait_AppUserTarget) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if l := len(m.GetEmail()); l < 1 || l > 1024 { + err := SecurityInsightTrait_AppUserTargetValidationError{ + field: "Email", + reason: "value length must be between 1 and 1024 bytes, inclusive", + } + if !all { + return err + } + errors = append(errors, err) + } + + if err := m._validateEmail(m.GetEmail()); err != nil { + err = SecurityInsightTrait_AppUserTargetValidationError{ + field: "Email", + reason: "value must be a valid email address", + cause: err, + } + if !all { + return err + } + errors = append(errors, err) + } + + if l := len(m.GetExternalId()); l < 1 || l > 4096 { + err := SecurityInsightTrait_AppUserTargetValidationError{ + field: "ExternalId", + reason: "value length must be between 1 and 4096 bytes, inclusive", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return SecurityInsightTrait_AppUserTargetMultiError(errors) + } + + return nil +} + +func (m *SecurityInsightTrait_AppUserTarget) _validateHostname(host string) error { + s := strings.ToLower(strings.TrimSuffix(host, ".")) + + if len(host) > 253 { + return errors.New("hostname cannot exceed 253 characters") + } + + for _, part := range strings.Split(s, ".") { + if l := len(part); l == 0 || l > 63 { + return errors.New("hostname part must be non-empty and cannot exceed 63 characters") + } + + if part[0] == '-' { + return errors.New("hostname parts cannot begin with hyphens") + } + + if part[len(part)-1] == '-' { + return errors.New("hostname parts cannot end with hyphens") + } + + for _, r := range part { + if (r < 'a' || r > 'z') && (r < '0' || r > '9') && r != '-' { + return fmt.Errorf("hostname parts can only contain alphanumeric characters or hyphens, got %q", string(r)) + } + } + } + + return nil +} + +func (m *SecurityInsightTrait_AppUserTarget) _validateEmail(addr string) error { + a, err := mail.ParseAddress(addr) + if err != nil { + return err + } + addr = a.Address + + if len(addr) > 254 { + return errors.New("email addresses cannot exceed 254 characters") + } + + parts := strings.SplitN(addr, "@", 2) + + if len(parts[0]) > 64 { + return errors.New("email address local phrase cannot exceed 64 characters") + } + + return m._validateHostname(parts[1]) +} + +// SecurityInsightTrait_AppUserTargetMultiError is an error wrapping multiple +// validation errors returned by +// SecurityInsightTrait_AppUserTarget.ValidateAll() if the designated +// constraints aren't met. +type SecurityInsightTrait_AppUserTargetMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m SecurityInsightTrait_AppUserTargetMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m SecurityInsightTrait_AppUserTargetMultiError) AllErrors() []error { return m } + +// SecurityInsightTrait_AppUserTargetValidationError is the validation error +// returned by SecurityInsightTrait_AppUserTarget.Validate if the designated +// constraints aren't met. +type SecurityInsightTrait_AppUserTargetValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e SecurityInsightTrait_AppUserTargetValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e SecurityInsightTrait_AppUserTargetValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e SecurityInsightTrait_AppUserTargetValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e SecurityInsightTrait_AppUserTargetValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e SecurityInsightTrait_AppUserTargetValidationError) ErrorName() string { + return "SecurityInsightTrait_AppUserTargetValidationError" +} + +// Error satisfies the builtin error interface +func (e SecurityInsightTrait_AppUserTargetValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sSecurityInsightTrait_AppUserTarget.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = SecurityInsightTrait_AppUserTargetValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = SecurityInsightTrait_AppUserTargetValidationError{} + // Validate checks the field values on // SecurityInsightTrait_ExternalResourceTarget with the rules defined in the // proto definition for this message. If any rules are violated, the first diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_security_insight_protoopaque.pb.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_security_insight_protoopaque.pb.go index 184346c1..394575c8 100644 --- a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_security_insight_protoopaque.pb.go +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_security_insight_protoopaque.pb.go @@ -24,22 +24,153 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) +// RiskScore represents a risk score insight +type RiskScore struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Value string `protobuf:"bytes,1,opt,name=value,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *RiskScore) Reset() { + *x = RiskScore{} + mi := &file_c1_connector_v2_annotation_security_insight_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *RiskScore) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RiskScore) ProtoMessage() {} + +func (x *RiskScore) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_annotation_security_insight_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *RiskScore) GetValue() string { + if x != nil { + return x.xxx_hidden_Value + } + return "" +} + +func (x *RiskScore) SetValue(v string) { + x.xxx_hidden_Value = v +} + +type RiskScore_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // The risk score value (e.g., "85", "High") + Value string +} + +func (b0 RiskScore_builder) Build() *RiskScore { + m0 := &RiskScore{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Value = b.Value + return m0 +} + +// Issue represents a security issue or vulnerability +type Issue struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Value string `protobuf:"bytes,1,opt,name=value,proto3"` + xxx_hidden_Severity string `protobuf:"bytes,2,opt,name=severity,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Issue) Reset() { + *x = Issue{} + mi := &file_c1_connector_v2_annotation_security_insight_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Issue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Issue) ProtoMessage() {} + +func (x *Issue) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_annotation_security_insight_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *Issue) GetValue() string { + if x != nil { + return x.xxx_hidden_Value + } + return "" +} + +func (x *Issue) GetSeverity() string { + if x != nil { + return x.xxx_hidden_Severity + } + return "" +} + +func (x *Issue) SetValue(v string) { + x.xxx_hidden_Value = v +} + +func (x *Issue) SetSeverity(v string) { + x.xxx_hidden_Severity = v +} + +type Issue_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + // The issue description or severity (e.g., "Critical", "CVE-2024-1234") + Value string + Severity string +} + +func (b0 Issue_builder) Build() *Issue { + m0 := &Issue{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Value = b.Value + x.xxx_hidden_Severity = b.Severity + return m0 +} + // SecurityInsightTrait is the trait annotation for resources with TRAIT_SECURITY_INSIGHT. // It contains the metadata for the security insight including type, value, observation time, // and the target entity (user or resource) that this insight should be bound to. type SecurityInsightTrait struct { - state protoimpl.MessageState `protogen:"opaque.v1"` - xxx_hidden_InsightType string `protobuf:"bytes,1,opt,name=insight_type,json=insightType,proto3"` - xxx_hidden_Value string `protobuf:"bytes,2,opt,name=value,proto3"` - xxx_hidden_ObservedAt *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=observed_at,json=observedAt,proto3"` - xxx_hidden_Target isSecurityInsightTrait_Target `protobuf_oneof:"target"` + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_InsightType isSecurityInsightTrait_InsightType `protobuf_oneof:"insight_type"` + xxx_hidden_ObservedAt *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=observed_at,json=observedAt,proto3"` + xxx_hidden_Target isSecurityInsightTrait_Target `protobuf_oneof:"target"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } func (x *SecurityInsightTrait) Reset() { *x = SecurityInsightTrait{} - mi := &file_c1_connector_v2_annotation_security_insight_proto_msgTypes[0] + mi := &file_c1_connector_v2_annotation_security_insight_proto_msgTypes[2] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -51,7 +182,7 @@ func (x *SecurityInsightTrait) String() string { func (*SecurityInsightTrait) ProtoMessage() {} func (x *SecurityInsightTrait) ProtoReflect() protoreflect.Message { - mi := &file_c1_connector_v2_annotation_security_insight_proto_msgTypes[0] + mi := &file_c1_connector_v2_annotation_security_insight_proto_msgTypes[2] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -62,18 +193,22 @@ func (x *SecurityInsightTrait) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -func (x *SecurityInsightTrait) GetInsightType() string { +func (x *SecurityInsightTrait) GetRiskScore() *RiskScore { if x != nil { - return x.xxx_hidden_InsightType + if x, ok := x.xxx_hidden_InsightType.(*securityInsightTrait_RiskScore); ok { + return x.RiskScore + } } - return "" + return nil } -func (x *SecurityInsightTrait) GetValue() string { +func (x *SecurityInsightTrait) GetIssue() *Issue { if x != nil { - return x.xxx_hidden_Value + if x, ok := x.xxx_hidden_InsightType.(*securityInsightTrait_Issue); ok { + return x.Issue + } } - return "" + return nil } func (x *SecurityInsightTrait) GetObservedAt() *timestamppb.Timestamp { @@ -110,12 +245,29 @@ func (x *SecurityInsightTrait) GetExternalResource() *SecurityInsightTrait_Exter return nil } -func (x *SecurityInsightTrait) SetInsightType(v string) { - x.xxx_hidden_InsightType = v +func (x *SecurityInsightTrait) GetAppUser() *SecurityInsightTrait_AppUserTarget { + if x != nil { + if x, ok := x.xxx_hidden_Target.(*securityInsightTrait_AppUser); ok { + return x.AppUser + } + } + return nil } -func (x *SecurityInsightTrait) SetValue(v string) { - x.xxx_hidden_Value = v +func (x *SecurityInsightTrait) SetRiskScore(v *RiskScore) { + if v == nil { + x.xxx_hidden_InsightType = nil + return + } + x.xxx_hidden_InsightType = &securityInsightTrait_RiskScore{v} +} + +func (x *SecurityInsightTrait) SetIssue(v *Issue) { + if v == nil { + x.xxx_hidden_InsightType = nil + return + } + x.xxx_hidden_InsightType = &securityInsightTrait_Issue{v} } func (x *SecurityInsightTrait) SetObservedAt(v *timestamppb.Timestamp) { @@ -146,6 +298,37 @@ func (x *SecurityInsightTrait) SetExternalResource(v *SecurityInsightTrait_Exter x.xxx_hidden_Target = &securityInsightTrait_ExternalResource{v} } +func (x *SecurityInsightTrait) SetAppUser(v *SecurityInsightTrait_AppUserTarget) { + if v == nil { + x.xxx_hidden_Target = nil + return + } + x.xxx_hidden_Target = &securityInsightTrait_AppUser{v} +} + +func (x *SecurityInsightTrait) HasInsightType() bool { + if x == nil { + return false + } + return x.xxx_hidden_InsightType != nil +} + +func (x *SecurityInsightTrait) HasRiskScore() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_InsightType.(*securityInsightTrait_RiskScore) + return ok +} + +func (x *SecurityInsightTrait) HasIssue() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_InsightType.(*securityInsightTrait_Issue) + return ok +} + func (x *SecurityInsightTrait) HasObservedAt() bool { if x == nil { return false @@ -184,6 +367,30 @@ func (x *SecurityInsightTrait) HasExternalResource() bool { return ok } +func (x *SecurityInsightTrait) HasAppUser() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_Target.(*securityInsightTrait_AppUser) + return ok +} + +func (x *SecurityInsightTrait) ClearInsightType() { + x.xxx_hidden_InsightType = nil +} + +func (x *SecurityInsightTrait) ClearRiskScore() { + if _, ok := x.xxx_hidden_InsightType.(*securityInsightTrait_RiskScore); ok { + x.xxx_hidden_InsightType = nil + } +} + +func (x *SecurityInsightTrait) ClearIssue() { + if _, ok := x.xxx_hidden_InsightType.(*securityInsightTrait_Issue); ok { + x.xxx_hidden_InsightType = nil + } +} + func (x *SecurityInsightTrait) ClearObservedAt() { x.xxx_hidden_ObservedAt = nil } @@ -210,10 +417,35 @@ func (x *SecurityInsightTrait) ClearExternalResource() { } } +func (x *SecurityInsightTrait) ClearAppUser() { + if _, ok := x.xxx_hidden_Target.(*securityInsightTrait_AppUser); ok { + x.xxx_hidden_Target = nil + } +} + +const SecurityInsightTrait_InsightType_not_set_case case_SecurityInsightTrait_InsightType = 0 +const SecurityInsightTrait_RiskScore_case case_SecurityInsightTrait_InsightType = 1 +const SecurityInsightTrait_Issue_case case_SecurityInsightTrait_InsightType = 2 + +func (x *SecurityInsightTrait) WhichInsightType() case_SecurityInsightTrait_InsightType { + if x == nil { + return SecurityInsightTrait_InsightType_not_set_case + } + switch x.xxx_hidden_InsightType.(type) { + case *securityInsightTrait_RiskScore: + return SecurityInsightTrait_RiskScore_case + case *securityInsightTrait_Issue: + return SecurityInsightTrait_Issue_case + default: + return SecurityInsightTrait_InsightType_not_set_case + } +} + const SecurityInsightTrait_Target_not_set_case case_SecurityInsightTrait_Target = 0 const SecurityInsightTrait_User_case case_SecurityInsightTrait_Target = 4 const SecurityInsightTrait_ResourceId_case case_SecurityInsightTrait_Target = 5 const SecurityInsightTrait_ExternalResource_case case_SecurityInsightTrait_Target = 6 +const SecurityInsightTrait_AppUser_case case_SecurityInsightTrait_Target = 7 func (x *SecurityInsightTrait) WhichTarget() case_SecurityInsightTrait_Target { if x == nil { @@ -226,6 +458,8 @@ func (x *SecurityInsightTrait) WhichTarget() case_SecurityInsightTrait_Target { return SecurityInsightTrait_ResourceId_case case *securityInsightTrait_ExternalResource: return SecurityInsightTrait_ExternalResource_case + case *securityInsightTrait_AppUser: + return SecurityInsightTrait_AppUser_case default: return SecurityInsightTrait_Target_not_set_case } @@ -234,10 +468,12 @@ func (x *SecurityInsightTrait) WhichTarget() case_SecurityInsightTrait_Target { type SecurityInsightTrait_builder struct { _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. - // The type of insight (e.g., "crowdstrike_zta_score", "wiz_critical_vulnerability") - InsightType string - // The value of the insight (e.g., "85", "High", "Critical") - Value string + // The type and value of the insight + + // Fields of oneof xxx_hidden_InsightType: + RiskScore *RiskScore + Issue *Issue + // -- end of xxx_hidden_InsightType // When this insight was observed/captured from the source system ObservedAt *timestamppb.Timestamp // The target entity this insight should be bound to @@ -249,6 +485,8 @@ type SecurityInsightTrait_builder struct { ResourceId *ResourceId // For binding to an AppResource by external ID ExternalResource *SecurityInsightTrait_ExternalResourceTarget + // For binding to an AppUser by email address + AppUser *SecurityInsightTrait_AppUserTarget // -- end of xxx_hidden_Target } @@ -256,8 +494,12 @@ func (b0 SecurityInsightTrait_builder) Build() *SecurityInsightTrait { m0 := &SecurityInsightTrait{} b, x := &b0, m0 _, _ = b, x - x.xxx_hidden_InsightType = b.InsightType - x.xxx_hidden_Value = b.Value + if b.RiskScore != nil { + x.xxx_hidden_InsightType = &securityInsightTrait_RiskScore{b.RiskScore} + } + if b.Issue != nil { + x.xxx_hidden_InsightType = &securityInsightTrait_Issue{b.Issue} + } x.xxx_hidden_ObservedAt = b.ObservedAt if b.User != nil { x.xxx_hidden_Target = &securityInsightTrait_User{b.User} @@ -268,19 +510,48 @@ func (b0 SecurityInsightTrait_builder) Build() *SecurityInsightTrait { if b.ExternalResource != nil { x.xxx_hidden_Target = &securityInsightTrait_ExternalResource{b.ExternalResource} } + if b.AppUser != nil { + x.xxx_hidden_Target = &securityInsightTrait_AppUser{b.AppUser} + } return m0 } +type case_SecurityInsightTrait_InsightType protoreflect.FieldNumber + +func (x case_SecurityInsightTrait_InsightType) String() string { + md := file_c1_connector_v2_annotation_security_insight_proto_msgTypes[2].Descriptor() + if x == 0 { + return "not set" + } + return protoimpl.X.MessageFieldStringOf(md, protoreflect.FieldNumber(x)) +} + type case_SecurityInsightTrait_Target protoreflect.FieldNumber func (x case_SecurityInsightTrait_Target) String() string { - md := file_c1_connector_v2_annotation_security_insight_proto_msgTypes[0].Descriptor() + md := file_c1_connector_v2_annotation_security_insight_proto_msgTypes[2].Descriptor() if x == 0 { return "not set" } return protoimpl.X.MessageFieldStringOf(md, protoreflect.FieldNumber(x)) } +type isSecurityInsightTrait_InsightType interface { + isSecurityInsightTrait_InsightType() +} + +type securityInsightTrait_RiskScore struct { + RiskScore *RiskScore `protobuf:"bytes,1,opt,name=risk_score,json=riskScore,proto3,oneof"` +} + +type securityInsightTrait_Issue struct { + Issue *Issue `protobuf:"bytes,2,opt,name=issue,proto3,oneof"` +} + +func (*securityInsightTrait_RiskScore) isSecurityInsightTrait_InsightType() {} + +func (*securityInsightTrait_Issue) isSecurityInsightTrait_InsightType() {} + type isSecurityInsightTrait_Target interface { isSecurityInsightTrait_Target() } @@ -300,12 +571,19 @@ type securityInsightTrait_ExternalResource struct { ExternalResource *SecurityInsightTrait_ExternalResourceTarget `protobuf:"bytes,6,opt,name=external_resource,json=externalResource,proto3,oneof"` } +type securityInsightTrait_AppUser struct { + // For binding to an AppUser by email address + AppUser *SecurityInsightTrait_AppUserTarget `protobuf:"bytes,7,opt,name=app_user,json=appUser,proto3,oneof"` +} + func (*securityInsightTrait_User) isSecurityInsightTrait_Target() {} func (*securityInsightTrait_ResourceId) isSecurityInsightTrait_Target() {} func (*securityInsightTrait_ExternalResource) isSecurityInsightTrait_Target() {} +func (*securityInsightTrait_AppUser) isSecurityInsightTrait_Target() {} + // UserTarget identifies a user by email for resolution to a C1 User type SecurityInsightTrait_UserTarget struct { state protoimpl.MessageState `protogen:"opaque.v1"` @@ -316,7 +594,7 @@ type SecurityInsightTrait_UserTarget struct { func (x *SecurityInsightTrait_UserTarget) Reset() { *x = SecurityInsightTrait_UserTarget{} - mi := &file_c1_connector_v2_annotation_security_insight_proto_msgTypes[1] + mi := &file_c1_connector_v2_annotation_security_insight_proto_msgTypes[3] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -328,7 +606,7 @@ func (x *SecurityInsightTrait_UserTarget) String() string { func (*SecurityInsightTrait_UserTarget) ProtoMessage() {} func (x *SecurityInsightTrait_UserTarget) ProtoReflect() protoreflect.Message { - mi := &file_c1_connector_v2_annotation_security_insight_proto_msgTypes[1] + mi := &file_c1_connector_v2_annotation_security_insight_proto_msgTypes[3] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -364,6 +642,79 @@ func (b0 SecurityInsightTrait_UserTarget_builder) Build() *SecurityInsightTrait_ return m0 } +// AppUserTarget identifies a user by email for resolution to an AppUser. +type SecurityInsightTrait_AppUserTarget struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Email string `protobuf:"bytes,1,opt,name=email,proto3"` + xxx_hidden_ExternalId string `protobuf:"bytes,2,opt,name=external_id,json=externalId,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SecurityInsightTrait_AppUserTarget) Reset() { + *x = SecurityInsightTrait_AppUserTarget{} + mi := &file_c1_connector_v2_annotation_security_insight_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SecurityInsightTrait_AppUserTarget) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SecurityInsightTrait_AppUserTarget) ProtoMessage() {} + +func (x *SecurityInsightTrait_AppUserTarget) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_annotation_security_insight_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *SecurityInsightTrait_AppUserTarget) GetEmail() string { + if x != nil { + return x.xxx_hidden_Email + } + return "" +} + +func (x *SecurityInsightTrait_AppUserTarget) GetExternalId() string { + if x != nil { + return x.xxx_hidden_ExternalId + } + return "" +} + +func (x *SecurityInsightTrait_AppUserTarget) SetEmail(v string) { + x.xxx_hidden_Email = v +} + +func (x *SecurityInsightTrait_AppUserTarget) SetExternalId(v string) { + x.xxx_hidden_ExternalId = v +} + +type SecurityInsightTrait_AppUserTarget_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Email string + // The external identifier of the user (e.g., ID, GUID, etc.) + ExternalId string +} + +func (b0 SecurityInsightTrait_AppUserTarget_builder) Build() *SecurityInsightTrait_AppUserTarget { + m0 := &SecurityInsightTrait_AppUserTarget{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Email = b.Email + x.xxx_hidden_ExternalId = b.ExternalId + return m0 +} + // ExternalResourceTarget identifies a resource by external ID for resolution to an AppResource. // Use this when the connector doesn't sync the target resource itself. type SecurityInsightTrait_ExternalResourceTarget struct { @@ -376,7 +727,7 @@ type SecurityInsightTrait_ExternalResourceTarget struct { func (x *SecurityInsightTrait_ExternalResourceTarget) Reset() { *x = SecurityInsightTrait_ExternalResourceTarget{} - mi := &file_c1_connector_v2_annotation_security_insight_proto_msgTypes[2] + mi := &file_c1_connector_v2_annotation_security_insight_proto_msgTypes[5] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -388,7 +739,7 @@ func (x *SecurityInsightTrait_ExternalResourceTarget) String() string { func (*SecurityInsightTrait_ExternalResourceTarget) ProtoMessage() {} func (x *SecurityInsightTrait_ExternalResourceTarget) ProtoReflect() protoreflect.Message { - mi := &file_c1_connector_v2_annotation_security_insight_proto_msgTypes[2] + mi := &file_c1_connector_v2_annotation_security_insight_proto_msgTypes[5] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -443,46 +794,64 @@ var File_c1_connector_v2_annotation_security_insight_proto protoreflect.FileDesc const file_c1_connector_v2_annotation_security_insight_proto_rawDesc = "" + "\n" + - "1c1/connector/v2/annotation_security_insight.proto\x12\x0fc1.connector.v2\x1a\x1ec1/connector/v2/resource.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x17validate/validate.proto\"\xc9\x04\n" + - "\x14SecurityInsightTrait\x12-\n" + - "\finsight_type\x18\x01 \x01(\tB\n" + - "\xfaB\ar\x05 \x01(\x80\bR\vinsightType\x12 \n" + - "\x05value\x18\x02 \x01(\tB\n" + - "\xfaB\ar\x05 \x01(\x80\bR\x05value\x12;\n" + + "1c1/connector/v2/annotation_security_insight.proto\x12\x0fc1.connector.v2\x1a\x1ec1/connector/v2/resource.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x17validate/validate.proto\"!\n" + + "\tRiskScore\x12\x14\n" + + "\x05value\x18\x01 \x01(\tR\x05value\"E\n" + + "\x05Issue\x12\x14\n" + + "\x05value\x18\x01 \x01(\tR\x05value\x12&\n" + + "\bseverity\x18\x02 \x01(\tB\n" + + "\xfaB\ar\x05 \x00(\x80\bR\bseverity\"\xae\x06\n" + + "\x14SecurityInsightTrait\x12;\n" + + "\n" + + "risk_score\x18\x01 \x01(\v2\x1a.c1.connector.v2.RiskScoreH\x00R\triskScore\x12.\n" + + "\x05issue\x18\x02 \x01(\v2\x16.c1.connector.v2.IssueH\x00R\x05issue\x12;\n" + "\vobserved_at\x18\x03 \x01(\v2\x1a.google.protobuf.TimestampR\n" + "observedAt\x12F\n" + - "\x04user\x18\x04 \x01(\v20.c1.connector.v2.SecurityInsightTrait.UserTargetH\x00R\x04user\x12>\n" + - "\vresource_id\x18\x05 \x01(\v2\x1b.c1.connector.v2.ResourceIdH\x00R\n" + + "\x04user\x18\x04 \x01(\v20.c1.connector.v2.SecurityInsightTrait.UserTargetH\x01R\x04user\x12>\n" + + "\vresource_id\x18\x05 \x01(\v2\x1b.c1.connector.v2.ResourceIdH\x01R\n" + "resourceId\x12k\n" + - "\x11external_resource\x18\x06 \x01(\v2<.c1.connector.v2.SecurityInsightTrait.ExternalResourceTargetH\x00R\x10externalResource\x1a0\n" + + "\x11external_resource\x18\x06 \x01(\v2<.c1.connector.v2.SecurityInsightTrait.ExternalResourceTargetH\x01R\x10externalResource\x12P\n" + + "\bapp_user\x18\a \x01(\v23.c1.connector.v2.SecurityInsightTrait.AppUserTargetH\x01R\aappUser\x1a0\n" + "\n" + "UserTarget\x12\"\n" + - "\x05email\x18\x01 \x01(\tB\f\xfaB\tr\a \x01(\x80\b`\x01R\x05email\x1am\n" + + "\x05email\x18\x01 \x01(\tB\f\xfaB\tr\a \x01(\x80\b`\x01R\x05email\x1a`\n" + + "\rAppUserTarget\x12\"\n" + + "\x05email\x18\x01 \x01(\tB\f\xfaB\tr\a \x01(\x80\b`\x01R\x05email\x12+\n" + + "\vexternal_id\x18\x02 \x01(\tB\n" + + "\xfaB\ar\x05 \x01(\x80 R\n" + + "externalId\x1am\n" + "\x16ExternalResourceTarget\x12+\n" + "\vexternal_id\x18\x01 \x01(\tB\n" + "\xfaB\ar\x05 \x01(\x80 R\n" + "externalId\x12&\n" + - "\bapp_hint\x18\x02 \x01(\tB\v\xfaB\br\x06(\x80\b\xd0\x01\x01R\aappHintB\r\n" + + "\bapp_hint\x18\x02 \x01(\tB\v\xfaB\br\x06(\x80\b\xd0\x01\x01R\aappHintB\x13\n" + + "\finsight_type\x12\x03\xf8B\x01B\r\n" + "\x06target\x12\x03\xf8B\x01B6Z4github.com/conductorone/baton-sdk/pb/c1/connector/v2b\x06proto3" -var file_c1_connector_v2_annotation_security_insight_proto_msgTypes = make([]protoimpl.MessageInfo, 3) +var file_c1_connector_v2_annotation_security_insight_proto_msgTypes = make([]protoimpl.MessageInfo, 6) var file_c1_connector_v2_annotation_security_insight_proto_goTypes = []any{ - (*SecurityInsightTrait)(nil), // 0: c1.connector.v2.SecurityInsightTrait - (*SecurityInsightTrait_UserTarget)(nil), // 1: c1.connector.v2.SecurityInsightTrait.UserTarget - (*SecurityInsightTrait_ExternalResourceTarget)(nil), // 2: c1.connector.v2.SecurityInsightTrait.ExternalResourceTarget - (*timestamppb.Timestamp)(nil), // 3: google.protobuf.Timestamp - (*ResourceId)(nil), // 4: c1.connector.v2.ResourceId + (*RiskScore)(nil), // 0: c1.connector.v2.RiskScore + (*Issue)(nil), // 1: c1.connector.v2.Issue + (*SecurityInsightTrait)(nil), // 2: c1.connector.v2.SecurityInsightTrait + (*SecurityInsightTrait_UserTarget)(nil), // 3: c1.connector.v2.SecurityInsightTrait.UserTarget + (*SecurityInsightTrait_AppUserTarget)(nil), // 4: c1.connector.v2.SecurityInsightTrait.AppUserTarget + (*SecurityInsightTrait_ExternalResourceTarget)(nil), // 5: c1.connector.v2.SecurityInsightTrait.ExternalResourceTarget + (*timestamppb.Timestamp)(nil), // 6: google.protobuf.Timestamp + (*ResourceId)(nil), // 7: c1.connector.v2.ResourceId } var file_c1_connector_v2_annotation_security_insight_proto_depIdxs = []int32{ - 3, // 0: c1.connector.v2.SecurityInsightTrait.observed_at:type_name -> google.protobuf.Timestamp - 1, // 1: c1.connector.v2.SecurityInsightTrait.user:type_name -> c1.connector.v2.SecurityInsightTrait.UserTarget - 4, // 2: c1.connector.v2.SecurityInsightTrait.resource_id:type_name -> c1.connector.v2.ResourceId - 2, // 3: c1.connector.v2.SecurityInsightTrait.external_resource:type_name -> c1.connector.v2.SecurityInsightTrait.ExternalResourceTarget - 4, // [4:4] is the sub-list for method output_type - 4, // [4:4] is the sub-list for method input_type - 4, // [4:4] is the sub-list for extension type_name - 4, // [4:4] is the sub-list for extension extendee - 0, // [0:4] is the sub-list for field type_name + 0, // 0: c1.connector.v2.SecurityInsightTrait.risk_score:type_name -> c1.connector.v2.RiskScore + 1, // 1: c1.connector.v2.SecurityInsightTrait.issue:type_name -> c1.connector.v2.Issue + 6, // 2: c1.connector.v2.SecurityInsightTrait.observed_at:type_name -> google.protobuf.Timestamp + 3, // 3: c1.connector.v2.SecurityInsightTrait.user:type_name -> c1.connector.v2.SecurityInsightTrait.UserTarget + 7, // 4: c1.connector.v2.SecurityInsightTrait.resource_id:type_name -> c1.connector.v2.ResourceId + 5, // 5: c1.connector.v2.SecurityInsightTrait.external_resource:type_name -> c1.connector.v2.SecurityInsightTrait.ExternalResourceTarget + 4, // 6: c1.connector.v2.SecurityInsightTrait.app_user:type_name -> c1.connector.v2.SecurityInsightTrait.AppUserTarget + 7, // [7:7] is the sub-list for method output_type + 7, // [7:7] is the sub-list for method input_type + 7, // [7:7] is the sub-list for extension type_name + 7, // [7:7] is the sub-list for extension extendee + 0, // [0:7] is the sub-list for field type_name } func init() { file_c1_connector_v2_annotation_security_insight_proto_init() } @@ -491,10 +860,13 @@ func file_c1_connector_v2_annotation_security_insight_proto_init() { return } file_c1_connector_v2_resource_proto_init() - file_c1_connector_v2_annotation_security_insight_proto_msgTypes[0].OneofWrappers = []any{ + file_c1_connector_v2_annotation_security_insight_proto_msgTypes[2].OneofWrappers = []any{ + (*securityInsightTrait_RiskScore)(nil), + (*securityInsightTrait_Issue)(nil), (*securityInsightTrait_User)(nil), (*securityInsightTrait_ResourceId)(nil), (*securityInsightTrait_ExternalResource)(nil), + (*securityInsightTrait_AppUser)(nil), } type x struct{} out := protoimpl.TypeBuilder{ @@ -502,7 +874,7 @@ func file_c1_connector_v2_annotation_security_insight_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: unsafe.Slice(unsafe.StringData(file_c1_connector_v2_annotation_security_insight_proto_rawDesc), len(file_c1_connector_v2_annotation_security_insight_proto_rawDesc)), NumEnums: 0, - NumMessages: 3, + NumMessages: 6, NumExtensions: 0, NumServices: 0, }, diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/connector.pb.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/connector.pb.go index 21ac6d98..8795297d 100644 --- a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/connector.pb.go +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/connector.pb.go @@ -28,19 +28,20 @@ const ( type Capability int32 const ( - Capability_CAPABILITY_UNSPECIFIED Capability = 0 - Capability_CAPABILITY_PROVISION Capability = 1 - Capability_CAPABILITY_SYNC Capability = 2 - Capability_CAPABILITY_EVENT_FEED Capability = 3 - Capability_CAPABILITY_TICKETING Capability = 4 - Capability_CAPABILITY_ACCOUNT_PROVISIONING Capability = 5 - Capability_CAPABILITY_CREDENTIAL_ROTATION Capability = 6 - Capability_CAPABILITY_RESOURCE_CREATE Capability = 7 - Capability_CAPABILITY_RESOURCE_DELETE Capability = 8 - Capability_CAPABILITY_SYNC_SECRETS Capability = 9 - Capability_CAPABILITY_ACTIONS Capability = 10 - Capability_CAPABILITY_TARGETED_SYNC Capability = 11 - Capability_CAPABILITY_EVENT_FEED_V2 Capability = 12 + Capability_CAPABILITY_UNSPECIFIED Capability = 0 + Capability_CAPABILITY_PROVISION Capability = 1 + Capability_CAPABILITY_SYNC Capability = 2 + Capability_CAPABILITY_EVENT_FEED Capability = 3 + Capability_CAPABILITY_TICKETING Capability = 4 + Capability_CAPABILITY_ACCOUNT_PROVISIONING Capability = 5 + Capability_CAPABILITY_CREDENTIAL_ROTATION Capability = 6 + Capability_CAPABILITY_RESOURCE_CREATE Capability = 7 + Capability_CAPABILITY_RESOURCE_DELETE Capability = 8 + Capability_CAPABILITY_SYNC_SECRETS Capability = 9 + Capability_CAPABILITY_ACTIONS Capability = 10 + Capability_CAPABILITY_TARGETED_SYNC Capability = 11 + Capability_CAPABILITY_EVENT_FEED_V2 Capability = 12 + Capability_CAPABILITY_SERVICE_MODE_TARGETED_SYNC Capability = 13 ) // Enum value maps for Capability. @@ -59,21 +60,23 @@ var ( 10: "CAPABILITY_ACTIONS", 11: "CAPABILITY_TARGETED_SYNC", 12: "CAPABILITY_EVENT_FEED_V2", + 13: "CAPABILITY_SERVICE_MODE_TARGETED_SYNC", } Capability_value = map[string]int32{ - "CAPABILITY_UNSPECIFIED": 0, - "CAPABILITY_PROVISION": 1, - "CAPABILITY_SYNC": 2, - "CAPABILITY_EVENT_FEED": 3, - "CAPABILITY_TICKETING": 4, - "CAPABILITY_ACCOUNT_PROVISIONING": 5, - "CAPABILITY_CREDENTIAL_ROTATION": 6, - "CAPABILITY_RESOURCE_CREATE": 7, - "CAPABILITY_RESOURCE_DELETE": 8, - "CAPABILITY_SYNC_SECRETS": 9, - "CAPABILITY_ACTIONS": 10, - "CAPABILITY_TARGETED_SYNC": 11, - "CAPABILITY_EVENT_FEED_V2": 12, + "CAPABILITY_UNSPECIFIED": 0, + "CAPABILITY_PROVISION": 1, + "CAPABILITY_SYNC": 2, + "CAPABILITY_EVENT_FEED": 3, + "CAPABILITY_TICKETING": 4, + "CAPABILITY_ACCOUNT_PROVISIONING": 5, + "CAPABILITY_CREDENTIAL_ROTATION": 6, + "CAPABILITY_RESOURCE_CREATE": 7, + "CAPABILITY_RESOURCE_DELETE": 8, + "CAPABILITY_SYNC_SECRETS": 9, + "CAPABILITY_ACTIONS": 10, + "CAPABILITY_TARGETED_SYNC": 11, + "CAPABILITY_EVENT_FEED_V2": 12, + "CAPABILITY_SERVICE_MODE_TARGETED_SYNC": 13, } ) @@ -2151,7 +2154,7 @@ const file_c1_connector_v2_connector_proto_rawDesc = "" + "\rdefault_value\x18\x01 \x03(\v2J.c1.connector.v2.ConnectorAccountCreationSchema.MapField.DefaultValueEntryR\fdefaultValue\x1av\n" + "\x11DefaultValueEntry\x12\x10\n" + "\x03key\x18\x01 \x01(\tR\x03key\x12K\n" + - "\x05value\x18\x02 \x01(\v25.c1.connector.v2.ConnectorAccountCreationSchema.FieldR\x05value:\x028\x01*\x86\x03\n" + + "\x05value\x18\x02 \x01(\v25.c1.connector.v2.ConnectorAccountCreationSchema.FieldR\x05value:\x028\x01*\xb1\x03\n" + "\n" + "Capability\x12\x1a\n" + "\x16CAPABILITY_UNSPECIFIED\x10\x00\x12\x18\n" + @@ -2167,7 +2170,8 @@ const file_c1_connector_v2_connector_proto_rawDesc = "" + "\x12CAPABILITY_ACTIONS\x10\n" + "\x12\x1c\n" + "\x18CAPABILITY_TARGETED_SYNC\x10\v\x12\x1c\n" + - "\x18CAPABILITY_EVENT_FEED_V2\x10\f*\xae\x02\n" + + "\x18CAPABILITY_EVENT_FEED_V2\x10\f\x12)\n" + + "%CAPABILITY_SERVICE_MODE_TARGETED_SYNC\x10\r*\xae\x02\n" + " CapabilityDetailCredentialOption\x123\n" + "/CAPABILITY_DETAIL_CREDENTIAL_OPTION_UNSPECIFIED\x10\x00\x123\n" + "/CAPABILITY_DETAIL_CREDENTIAL_OPTION_NO_PASSWORD\x10\x01\x127\n" + diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/connector_protoopaque.pb.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/connector_protoopaque.pb.go index e8ec7de5..2025995c 100644 --- a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/connector_protoopaque.pb.go +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/connector_protoopaque.pb.go @@ -28,19 +28,20 @@ const ( type Capability int32 const ( - Capability_CAPABILITY_UNSPECIFIED Capability = 0 - Capability_CAPABILITY_PROVISION Capability = 1 - Capability_CAPABILITY_SYNC Capability = 2 - Capability_CAPABILITY_EVENT_FEED Capability = 3 - Capability_CAPABILITY_TICKETING Capability = 4 - Capability_CAPABILITY_ACCOUNT_PROVISIONING Capability = 5 - Capability_CAPABILITY_CREDENTIAL_ROTATION Capability = 6 - Capability_CAPABILITY_RESOURCE_CREATE Capability = 7 - Capability_CAPABILITY_RESOURCE_DELETE Capability = 8 - Capability_CAPABILITY_SYNC_SECRETS Capability = 9 - Capability_CAPABILITY_ACTIONS Capability = 10 - Capability_CAPABILITY_TARGETED_SYNC Capability = 11 - Capability_CAPABILITY_EVENT_FEED_V2 Capability = 12 + Capability_CAPABILITY_UNSPECIFIED Capability = 0 + Capability_CAPABILITY_PROVISION Capability = 1 + Capability_CAPABILITY_SYNC Capability = 2 + Capability_CAPABILITY_EVENT_FEED Capability = 3 + Capability_CAPABILITY_TICKETING Capability = 4 + Capability_CAPABILITY_ACCOUNT_PROVISIONING Capability = 5 + Capability_CAPABILITY_CREDENTIAL_ROTATION Capability = 6 + Capability_CAPABILITY_RESOURCE_CREATE Capability = 7 + Capability_CAPABILITY_RESOURCE_DELETE Capability = 8 + Capability_CAPABILITY_SYNC_SECRETS Capability = 9 + Capability_CAPABILITY_ACTIONS Capability = 10 + Capability_CAPABILITY_TARGETED_SYNC Capability = 11 + Capability_CAPABILITY_EVENT_FEED_V2 Capability = 12 + Capability_CAPABILITY_SERVICE_MODE_TARGETED_SYNC Capability = 13 ) // Enum value maps for Capability. @@ -59,21 +60,23 @@ var ( 10: "CAPABILITY_ACTIONS", 11: "CAPABILITY_TARGETED_SYNC", 12: "CAPABILITY_EVENT_FEED_V2", + 13: "CAPABILITY_SERVICE_MODE_TARGETED_SYNC", } Capability_value = map[string]int32{ - "CAPABILITY_UNSPECIFIED": 0, - "CAPABILITY_PROVISION": 1, - "CAPABILITY_SYNC": 2, - "CAPABILITY_EVENT_FEED": 3, - "CAPABILITY_TICKETING": 4, - "CAPABILITY_ACCOUNT_PROVISIONING": 5, - "CAPABILITY_CREDENTIAL_ROTATION": 6, - "CAPABILITY_RESOURCE_CREATE": 7, - "CAPABILITY_RESOURCE_DELETE": 8, - "CAPABILITY_SYNC_SECRETS": 9, - "CAPABILITY_ACTIONS": 10, - "CAPABILITY_TARGETED_SYNC": 11, - "CAPABILITY_EVENT_FEED_V2": 12, + "CAPABILITY_UNSPECIFIED": 0, + "CAPABILITY_PROVISION": 1, + "CAPABILITY_SYNC": 2, + "CAPABILITY_EVENT_FEED": 3, + "CAPABILITY_TICKETING": 4, + "CAPABILITY_ACCOUNT_PROVISIONING": 5, + "CAPABILITY_CREDENTIAL_ROTATION": 6, + "CAPABILITY_RESOURCE_CREATE": 7, + "CAPABILITY_RESOURCE_DELETE": 8, + "CAPABILITY_SYNC_SECRETS": 9, + "CAPABILITY_ACTIONS": 10, + "CAPABILITY_TARGETED_SYNC": 11, + "CAPABILITY_EVENT_FEED_V2": 12, + "CAPABILITY_SERVICE_MODE_TARGETED_SYNC": 13, } ) @@ -2173,7 +2176,7 @@ const file_c1_connector_v2_connector_proto_rawDesc = "" + "\rdefault_value\x18\x01 \x03(\v2J.c1.connector.v2.ConnectorAccountCreationSchema.MapField.DefaultValueEntryR\fdefaultValue\x1av\n" + "\x11DefaultValueEntry\x12\x10\n" + "\x03key\x18\x01 \x01(\tR\x03key\x12K\n" + - "\x05value\x18\x02 \x01(\v25.c1.connector.v2.ConnectorAccountCreationSchema.FieldR\x05value:\x028\x01*\x86\x03\n" + + "\x05value\x18\x02 \x01(\v25.c1.connector.v2.ConnectorAccountCreationSchema.FieldR\x05value:\x028\x01*\xb1\x03\n" + "\n" + "Capability\x12\x1a\n" + "\x16CAPABILITY_UNSPECIFIED\x10\x00\x12\x18\n" + @@ -2189,7 +2192,8 @@ const file_c1_connector_v2_connector_proto_rawDesc = "" + "\x12CAPABILITY_ACTIONS\x10\n" + "\x12\x1c\n" + "\x18CAPABILITY_TARGETED_SYNC\x10\v\x12\x1c\n" + - "\x18CAPABILITY_EVENT_FEED_V2\x10\f*\xae\x02\n" + + "\x18CAPABILITY_EVENT_FEED_V2\x10\f\x12)\n" + + "%CAPABILITY_SERVICE_MODE_TARGETED_SYNC\x10\r*\xae\x02\n" + " CapabilityDetailCredentialOption\x123\n" + "/CAPABILITY_DETAIL_CREDENTIAL_OPTION_UNSPECIFIED\x10\x00\x123\n" + "/CAPABILITY_DETAIL_CREDENTIAL_OPTION_NO_PASSWORD\x10\x01\x127\n" + diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/entitlement.pb.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/entitlement.pb.go index fca3e908..d1926c4a 100644 --- a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/entitlement.pb.go +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/entitlement.pb.go @@ -30,6 +30,7 @@ const ( Entitlement_PURPOSE_VALUE_UNSPECIFIED Entitlement_PurposeValue = 0 Entitlement_PURPOSE_VALUE_ASSIGNMENT Entitlement_PurposeValue = 1 Entitlement_PURPOSE_VALUE_PERMISSION Entitlement_PurposeValue = 2 + Entitlement_PURPOSE_VALUE_OWNERSHIP Entitlement_PurposeValue = 3 ) // Enum value maps for Entitlement_PurposeValue. @@ -38,11 +39,13 @@ var ( 0: "PURPOSE_VALUE_UNSPECIFIED", 1: "PURPOSE_VALUE_ASSIGNMENT", 2: "PURPOSE_VALUE_PERMISSION", + 3: "PURPOSE_VALUE_OWNERSHIP", } Entitlement_PurposeValue_value = map[string]int32{ "PURPOSE_VALUE_UNSPECIFIED": 0, "PURPOSE_VALUE_ASSIGNMENT": 1, "PURPOSE_VALUE_PERMISSION": 2, + "PURPOSE_VALUE_OWNERSHIP": 3, } ) @@ -645,7 +648,7 @@ var File_c1_connector_v2_entitlement_proto protoreflect.FileDescriptor const file_c1_connector_v2_entitlement_proto_rawDesc = "" + "\n" + - "!c1/connector/v2/entitlement.proto\x12\x0fc1.connector.v2\x1a\x1ec1/connector/v2/resource.proto\x1a\x19google/protobuf/any.proto\x1a\x17validate/validate.proto\"\x95\x04\n" + + "!c1/connector/v2/entitlement.proto\x12\x0fc1.connector.v2\x1a\x1ec1/connector/v2/resource.proto\x1a\x19google/protobuf/any.proto\x1a\x17validate/validate.proto\"\xb3\x04\n" + "\vEntitlement\x12?\n" + "\bresource\x18\x01 \x01(\v2\x19.c1.connector.v2.ResourceB\b\xfaB\x05\x8a\x01\x02\x10\x01R\bresource\x12\x1a\n" + "\x02id\x18\x02 \x01(\tB\n" + @@ -657,11 +660,12 @@ const file_c1_connector_v2_entitlement_proto_rawDesc = "" + "\fgrantable_to\x18\x05 \x03(\v2\x1d.c1.connector.v2.ResourceTypeR\vgrantableTo\x126\n" + "\vannotations\x18\x06 \x03(\v2\x14.google.protobuf.AnyR\vannotations\x12M\n" + "\apurpose\x18\a \x01(\x0e2).c1.connector.v2.Entitlement.PurposeValueB\b\xfaB\x05\x82\x01\x02\x10\x01R\apurpose\x12\x12\n" + - "\x04slug\x18\b \x01(\tR\x04slug\"i\n" + + "\x04slug\x18\b \x01(\tR\x04slug\"\x86\x01\n" + "\fPurposeValue\x12\x1d\n" + "\x19PURPOSE_VALUE_UNSPECIFIED\x10\x00\x12\x1c\n" + "\x18PURPOSE_VALUE_ASSIGNMENT\x10\x01\x12\x1c\n" + - "\x18PURPOSE_VALUE_PERMISSION\x10\x02\"\xa8\x02\n" + + "\x18PURPOSE_VALUE_PERMISSION\x10\x02\x12\x1b\n" + + "\x17PURPOSE_VALUE_OWNERSHIP\x10\x03\"\xa8\x02\n" + "*EntitlementsServiceListEntitlementsRequest\x125\n" + "\bresource\x18\x01 \x01(\v2\x19.c1.connector.v2.ResourceR\bresource\x12'\n" + "\tpage_size\x18\x02 \x01(\rB\n" + diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/entitlement_protoopaque.pb.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/entitlement_protoopaque.pb.go index 88a1142b..0fc2b471 100644 --- a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/entitlement_protoopaque.pb.go +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/entitlement_protoopaque.pb.go @@ -30,6 +30,7 @@ const ( Entitlement_PURPOSE_VALUE_UNSPECIFIED Entitlement_PurposeValue = 0 Entitlement_PURPOSE_VALUE_ASSIGNMENT Entitlement_PurposeValue = 1 Entitlement_PURPOSE_VALUE_PERMISSION Entitlement_PurposeValue = 2 + Entitlement_PURPOSE_VALUE_OWNERSHIP Entitlement_PurposeValue = 3 ) // Enum value maps for Entitlement_PurposeValue. @@ -38,11 +39,13 @@ var ( 0: "PURPOSE_VALUE_UNSPECIFIED", 1: "PURPOSE_VALUE_ASSIGNMENT", 2: "PURPOSE_VALUE_PERMISSION", + 3: "PURPOSE_VALUE_OWNERSHIP", } Entitlement_PurposeValue_value = map[string]int32{ "PURPOSE_VALUE_UNSPECIFIED": 0, "PURPOSE_VALUE_ASSIGNMENT": 1, "PURPOSE_VALUE_PERMISSION": 2, + "PURPOSE_VALUE_OWNERSHIP": 3, } ) @@ -661,7 +664,7 @@ var File_c1_connector_v2_entitlement_proto protoreflect.FileDescriptor const file_c1_connector_v2_entitlement_proto_rawDesc = "" + "\n" + - "!c1/connector/v2/entitlement.proto\x12\x0fc1.connector.v2\x1a\x1ec1/connector/v2/resource.proto\x1a\x19google/protobuf/any.proto\x1a\x17validate/validate.proto\"\x95\x04\n" + + "!c1/connector/v2/entitlement.proto\x12\x0fc1.connector.v2\x1a\x1ec1/connector/v2/resource.proto\x1a\x19google/protobuf/any.proto\x1a\x17validate/validate.proto\"\xb3\x04\n" + "\vEntitlement\x12?\n" + "\bresource\x18\x01 \x01(\v2\x19.c1.connector.v2.ResourceB\b\xfaB\x05\x8a\x01\x02\x10\x01R\bresource\x12\x1a\n" + "\x02id\x18\x02 \x01(\tB\n" + @@ -673,11 +676,12 @@ const file_c1_connector_v2_entitlement_proto_rawDesc = "" + "\fgrantable_to\x18\x05 \x03(\v2\x1d.c1.connector.v2.ResourceTypeR\vgrantableTo\x126\n" + "\vannotations\x18\x06 \x03(\v2\x14.google.protobuf.AnyR\vannotations\x12M\n" + "\apurpose\x18\a \x01(\x0e2).c1.connector.v2.Entitlement.PurposeValueB\b\xfaB\x05\x82\x01\x02\x10\x01R\apurpose\x12\x12\n" + - "\x04slug\x18\b \x01(\tR\x04slug\"i\n" + + "\x04slug\x18\b \x01(\tR\x04slug\"\x86\x01\n" + "\fPurposeValue\x12\x1d\n" + "\x19PURPOSE_VALUE_UNSPECIFIED\x10\x00\x12\x1c\n" + "\x18PURPOSE_VALUE_ASSIGNMENT\x10\x01\x12\x1c\n" + - "\x18PURPOSE_VALUE_PERMISSION\x10\x02\"\xa8\x02\n" + + "\x18PURPOSE_VALUE_PERMISSION\x10\x02\x12\x1b\n" + + "\x17PURPOSE_VALUE_OWNERSHIP\x10\x03\"\xa8\x02\n" + "*EntitlementsServiceListEntitlementsRequest\x125\n" + "\bresource\x18\x01 \x01(\v2\x19.c1.connector.v2.ResourceR\bresource\x12'\n" + "\tpage_size\x18\x02 \x01(\rB\n" + diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/resource.pb.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/resource.pb.go index de8fff47..e86d89c8 100644 --- a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/resource.pb.go +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/resource.pb.go @@ -1878,6 +1878,7 @@ type CreateAccountRequest struct { AccountInfo *AccountInfo `protobuf:"bytes,1,opt,name=account_info,json=accountInfo,proto3" json:"account_info,omitempty"` CredentialOptions *CredentialOptions `protobuf:"bytes,2,opt,name=credential_options,json=credentialOptions,proto3" json:"credential_options,omitempty"` EncryptionConfigs []*EncryptionConfig `protobuf:"bytes,3,rep,name=encryption_configs,json=encryptionConfigs,proto3" json:"encryption_configs,omitempty"` + ResourceTypeId string `protobuf:"bytes,4,opt,name=resource_type_id,json=resourceTypeId,proto3" json:"resource_type_id,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } @@ -1928,6 +1929,13 @@ func (x *CreateAccountRequest) GetEncryptionConfigs() []*EncryptionConfig { return nil } +func (x *CreateAccountRequest) GetResourceTypeId() string { + if x != nil { + return x.ResourceTypeId + } + return "" +} + func (x *CreateAccountRequest) SetAccountInfo(v *AccountInfo) { x.AccountInfo = v } @@ -1940,6 +1948,10 @@ func (x *CreateAccountRequest) SetEncryptionConfigs(v []*EncryptionConfig) { x.EncryptionConfigs = v } +func (x *CreateAccountRequest) SetResourceTypeId(v string) { + x.ResourceTypeId = v +} + func (x *CreateAccountRequest) HasAccountInfo() bool { if x == nil { return false @@ -1968,6 +1980,7 @@ type CreateAccountRequest_builder struct { AccountInfo *AccountInfo CredentialOptions *CredentialOptions EncryptionConfigs []*EncryptionConfig + ResourceTypeId string } func (b0 CreateAccountRequest_builder) Build() *CreateAccountRequest { @@ -1977,6 +1990,7 @@ func (b0 CreateAccountRequest_builder) Build() *CreateAccountRequest { x.AccountInfo = b.AccountInfo x.CredentialOptions = b.CredentialOptions x.EncryptionConfigs = b.EncryptionConfigs + x.ResourceTypeId = b.ResourceTypeId return m0 } @@ -4545,11 +4559,13 @@ const file_c1_connector_v2_resource_proto_rawDesc = "" + "\aoptions\"L\n" + "\x12PasswordConstraint\x12\x19\n" + "\bchar_set\x18\x01 \x01(\tR\acharSet\x12\x1b\n" + - "\tmin_count\x18\x02 \x01(\rR\bminCount\"\xfc\x01\n" + + "\tmin_count\x18\x02 \x01(\rR\bminCount\"\xb5\x02\n" + "\x14CreateAccountRequest\x12?\n" + "\faccount_info\x18\x01 \x01(\v2\x1c.c1.connector.v2.AccountInfoR\vaccountInfo\x12Q\n" + "\x12credential_options\x18\x02 \x01(\v2\".c1.connector.v2.CredentialOptionsR\x11credentialOptions\x12P\n" + - "\x12encryption_configs\x18\x03 \x03(\v2!.c1.connector.v2.EncryptionConfigR\x11encryptionConfigs\"\xcc\b\n" + + "\x12encryption_configs\x18\x03 \x03(\v2!.c1.connector.v2.EncryptionConfigR\x11encryptionConfigs\x127\n" + + "\x10resource_type_id\x18\x04 \x01(\tB\r\xfaB\n" + + "r\b \x01(\x80\b\xd0\x01\x01R\x0eresourceTypeId\"\xcc\b\n" + "\x15CreateAccountResponse\x12P\n" + "\asuccess\x18d \x01(\v24.c1.connector.v2.CreateAccountResponse.SuccessResultH\x00R\asuccess\x12f\n" + "\x0faction_required\x18e \x01(\v2;.c1.connector.v2.CreateAccountResponse.ActionRequiredResultH\x00R\x0eactionRequired\x12c\n" + diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/resource.pb.validate.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/resource.pb.validate.go index 691da265..1e2e556a 100644 --- a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/resource.pb.validate.go +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/resource.pb.validate.go @@ -2860,6 +2860,21 @@ func (m *CreateAccountRequest) validate(all bool) error { } + if m.GetResourceTypeId() != "" { + + if l := len(m.GetResourceTypeId()); l < 1 || l > 1024 { + err := CreateAccountRequestValidationError{ + field: "ResourceTypeId", + reason: "value length must be between 1 and 1024 bytes, inclusive", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + if len(errors) > 0 { return CreateAccountRequestMultiError(errors) } diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/resource_protoopaque.pb.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/resource_protoopaque.pb.go index 211603ac..c2093c67 100644 --- a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/resource_protoopaque.pb.go +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/resource_protoopaque.pb.go @@ -1872,6 +1872,7 @@ type CreateAccountRequest struct { xxx_hidden_AccountInfo *AccountInfo `protobuf:"bytes,1,opt,name=account_info,json=accountInfo,proto3"` xxx_hidden_CredentialOptions *CredentialOptions `protobuf:"bytes,2,opt,name=credential_options,json=credentialOptions,proto3"` xxx_hidden_EncryptionConfigs *[]*EncryptionConfig `protobuf:"bytes,3,rep,name=encryption_configs,json=encryptionConfigs,proto3"` + xxx_hidden_ResourceTypeId string `protobuf:"bytes,4,opt,name=resource_type_id,json=resourceTypeId,proto3"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } @@ -1924,6 +1925,13 @@ func (x *CreateAccountRequest) GetEncryptionConfigs() []*EncryptionConfig { return nil } +func (x *CreateAccountRequest) GetResourceTypeId() string { + if x != nil { + return x.xxx_hidden_ResourceTypeId + } + return "" +} + func (x *CreateAccountRequest) SetAccountInfo(v *AccountInfo) { x.xxx_hidden_AccountInfo = v } @@ -1936,6 +1944,10 @@ func (x *CreateAccountRequest) SetEncryptionConfigs(v []*EncryptionConfig) { x.xxx_hidden_EncryptionConfigs = &v } +func (x *CreateAccountRequest) SetResourceTypeId(v string) { + x.xxx_hidden_ResourceTypeId = v +} + func (x *CreateAccountRequest) HasAccountInfo() bool { if x == nil { return false @@ -1964,6 +1976,7 @@ type CreateAccountRequest_builder struct { AccountInfo *AccountInfo CredentialOptions *CredentialOptions EncryptionConfigs []*EncryptionConfig + ResourceTypeId string } func (b0 CreateAccountRequest_builder) Build() *CreateAccountRequest { @@ -1973,6 +1986,7 @@ func (b0 CreateAccountRequest_builder) Build() *CreateAccountRequest { x.xxx_hidden_AccountInfo = b.AccountInfo x.xxx_hidden_CredentialOptions = b.CredentialOptions x.xxx_hidden_EncryptionConfigs = &b.EncryptionConfigs + x.xxx_hidden_ResourceTypeId = b.ResourceTypeId return m0 } @@ -4538,11 +4552,13 @@ const file_c1_connector_v2_resource_proto_rawDesc = "" + "\aoptions\"L\n" + "\x12PasswordConstraint\x12\x19\n" + "\bchar_set\x18\x01 \x01(\tR\acharSet\x12\x1b\n" + - "\tmin_count\x18\x02 \x01(\rR\bminCount\"\xfc\x01\n" + + "\tmin_count\x18\x02 \x01(\rR\bminCount\"\xb5\x02\n" + "\x14CreateAccountRequest\x12?\n" + "\faccount_info\x18\x01 \x01(\v2\x1c.c1.connector.v2.AccountInfoR\vaccountInfo\x12Q\n" + "\x12credential_options\x18\x02 \x01(\v2\".c1.connector.v2.CredentialOptionsR\x11credentialOptions\x12P\n" + - "\x12encryption_configs\x18\x03 \x03(\v2!.c1.connector.v2.EncryptionConfigR\x11encryptionConfigs\"\xcc\b\n" + + "\x12encryption_configs\x18\x03 \x03(\v2!.c1.connector.v2.EncryptionConfigR\x11encryptionConfigs\x127\n" + + "\x10resource_type_id\x18\x04 \x01(\tB\r\xfaB\n" + + "r\b \x01(\x80\b\xd0\x01\x01R\x0eresourceTypeId\"\xcc\b\n" + "\x15CreateAccountResponse\x12P\n" + "\asuccess\x18d \x01(\v24.c1.connector.v2.CreateAccountResponse.SuccessResultH\x00R\asuccess\x12f\n" + "\x0faction_required\x18e \x01(\v2;.c1.connector.v2.CreateAccountResponse.ActionRequiredResultH\x00R\x0eactionRequired\x12c\n" + diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/connectorapi/baton/v1/baton.pb.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/connectorapi/baton/v1/baton.pb.go index 9a188337..76d95c61 100644 --- a/vendor/github.com/conductorone/baton-sdk/pb/c1/connectorapi/baton/v1/baton.pb.go +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/connectorapi/baton/v1/baton.pb.go @@ -2936,6 +2936,7 @@ type Task_CreateAccountTask struct { AccountInfo *v2.AccountInfo `protobuf:"bytes,1,opt,name=account_info,json=accountInfo,proto3" json:"account_info,omitempty"` CredentialOptions *v2.CredentialOptions `protobuf:"bytes,2,opt,name=credential_options,json=credentialOptions,proto3" json:"credential_options,omitempty"` EncryptionConfigs []*v2.EncryptionConfig `protobuf:"bytes,3,rep,name=encryption_configs,json=encryptionConfigs,proto3" json:"encryption_configs,omitempty"` + ResourceTypeId string `protobuf:"bytes,4,opt,name=resource_type_id,json=resourceTypeId,proto3" json:"resource_type_id,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } @@ -2986,6 +2987,13 @@ func (x *Task_CreateAccountTask) GetEncryptionConfigs() []*v2.EncryptionConfig { return nil } +func (x *Task_CreateAccountTask) GetResourceTypeId() string { + if x != nil { + return x.ResourceTypeId + } + return "" +} + func (x *Task_CreateAccountTask) SetAccountInfo(v *v2.AccountInfo) { x.AccountInfo = v } @@ -2998,6 +3006,10 @@ func (x *Task_CreateAccountTask) SetEncryptionConfigs(v []*v2.EncryptionConfig) x.EncryptionConfigs = v } +func (x *Task_CreateAccountTask) SetResourceTypeId(v string) { + x.ResourceTypeId = v +} + func (x *Task_CreateAccountTask) HasAccountInfo() bool { if x == nil { return false @@ -3026,6 +3038,7 @@ type Task_CreateAccountTask_builder struct { AccountInfo *v2.AccountInfo CredentialOptions *v2.CredentialOptions EncryptionConfigs []*v2.EncryptionConfig + ResourceTypeId string } func (b0 Task_CreateAccountTask_builder) Build() *Task_CreateAccountTask { @@ -3035,6 +3048,7 @@ func (b0 Task_CreateAccountTask_builder) Build() *Task_CreateAccountTask { x.AccountInfo = b.AccountInfo x.CredentialOptions = b.CredentialOptions x.EncryptionConfigs = b.EncryptionConfigs + x.ResourceTypeId = b.ResourceTypeId return m0 } @@ -4872,7 +4886,7 @@ var File_c1_connectorapi_baton_v1_baton_proto protoreflect.FileDescriptor const file_c1_connectorapi_baton_v1_baton_proto_rawDesc = "" + "\n" + - "$c1/connectorapi/baton/v1/baton.proto\x12\x18c1.connectorapi.baton.v1\x1a\x1fc1/connector/v2/connector.proto\x1a!c1/connector/v2/entitlement.proto\x1a\x1bc1/connector/v2/grant.proto\x1a\x1ec1/connector/v2/resource.proto\x1a\x1cc1/connector/v2/ticket.proto\x1a\x19google/protobuf/any.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1cgoogle/protobuf/struct.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x17google/rpc/status.proto\x1a\x17validate/validate.proto\"\x80)\n" + + "$c1/connectorapi/baton/v1/baton.proto\x12\x18c1.connectorapi.baton.v1\x1a\x1fc1/connector/v2/connector.proto\x1a!c1/connector/v2/entitlement.proto\x1a\x1bc1/connector/v2/grant.proto\x1a\x1ec1/connector/v2/resource.proto\x1a\x1cc1/connector/v2/ticket.proto\x1a\x19google/protobuf/any.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1cgoogle/protobuf/struct.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x17google/rpc/status.proto\x1a\x17validate/validate.proto\"\xb9)\n" + "\x04Task\x12\x0e\n" + "\x02id\x18\x01 \x01(\tR\x02id\x12=\n" + "\x06status\x18\x02 \x01(\x0e2%.c1.connectorapi.baton.v1.Task.StatusR\x06status\x12=\n" + @@ -4920,11 +4934,13 @@ const file_c1_connectorapi_baton_v1_baton_proto_rawDesc = "" + "\n" + "RevokeTask\x12,\n" + "\x05grant\x18\x01 \x01(\v2\x16.c1.connector.v2.GrantR\x05grant\x126\n" + - "\vannotations\x18\x02 \x03(\v2\x14.google.protobuf.AnyR\vannotations\x1a\xf9\x01\n" + + "\vannotations\x18\x02 \x03(\v2\x14.google.protobuf.AnyR\vannotations\x1a\xb2\x02\n" + "\x11CreateAccountTask\x12?\n" + "\faccount_info\x18\x01 \x01(\v2\x1c.c1.connector.v2.AccountInfoR\vaccountInfo\x12Q\n" + "\x12credential_options\x18\x02 \x01(\v2\".c1.connector.v2.CredentialOptionsR\x11credentialOptions\x12P\n" + - "\x12encryption_configs\x18\x03 \x03(\v2!.c1.connector.v2.EncryptionConfigR\x11encryptionConfigs\x1aK\n" + + "\x12encryption_configs\x18\x03 \x03(\v2!.c1.connector.v2.EncryptionConfigR\x11encryptionConfigs\x127\n" + + "\x10resource_type_id\x18\x04 \x01(\tB\r\xfaB\n" + + "r\b \x01(\x80\b\xd0\x01\x01R\x0eresourceTypeId\x1aK\n" + "\x12CreateResourceTask\x125\n" + "\bresource\x18\x01 \x01(\v2\x19.c1.connector.v2.ResourceR\bresource\x1a\x9d\x01\n" + "\x12DeleteResourceTask\x12<\n" + diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/connectorapi/baton/v1/baton.pb.validate.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/connectorapi/baton/v1/baton.pb.validate.go index 937bae11..a7b9b06d 100644 --- a/vendor/github.com/conductorone/baton-sdk/pb/c1/connectorapi/baton/v1/baton.pb.validate.go +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/connectorapi/baton/v1/baton.pb.validate.go @@ -4195,6 +4195,21 @@ func (m *Task_CreateAccountTask) validate(all bool) error { } + if m.GetResourceTypeId() != "" { + + if l := len(m.GetResourceTypeId()); l < 1 || l > 1024 { + err := Task_CreateAccountTaskValidationError{ + field: "ResourceTypeId", + reason: "value length must be between 1 and 1024 bytes, inclusive", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + if len(errors) > 0 { return Task_CreateAccountTaskMultiError(errors) } diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/connectorapi/baton/v1/baton_protoopaque.pb.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/connectorapi/baton/v1/baton_protoopaque.pb.go index bdf204f7..65db2f42 100644 --- a/vendor/github.com/conductorone/baton-sdk/pb/c1/connectorapi/baton/v1/baton_protoopaque.pb.go +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/connectorapi/baton/v1/baton_protoopaque.pb.go @@ -2911,6 +2911,7 @@ type Task_CreateAccountTask struct { xxx_hidden_AccountInfo *v2.AccountInfo `protobuf:"bytes,1,opt,name=account_info,json=accountInfo,proto3"` xxx_hidden_CredentialOptions *v2.CredentialOptions `protobuf:"bytes,2,opt,name=credential_options,json=credentialOptions,proto3"` xxx_hidden_EncryptionConfigs *[]*v2.EncryptionConfig `protobuf:"bytes,3,rep,name=encryption_configs,json=encryptionConfigs,proto3"` + xxx_hidden_ResourceTypeId string `protobuf:"bytes,4,opt,name=resource_type_id,json=resourceTypeId,proto3"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } @@ -2963,6 +2964,13 @@ func (x *Task_CreateAccountTask) GetEncryptionConfigs() []*v2.EncryptionConfig { return nil } +func (x *Task_CreateAccountTask) GetResourceTypeId() string { + if x != nil { + return x.xxx_hidden_ResourceTypeId + } + return "" +} + func (x *Task_CreateAccountTask) SetAccountInfo(v *v2.AccountInfo) { x.xxx_hidden_AccountInfo = v } @@ -2975,6 +2983,10 @@ func (x *Task_CreateAccountTask) SetEncryptionConfigs(v []*v2.EncryptionConfig) x.xxx_hidden_EncryptionConfigs = &v } +func (x *Task_CreateAccountTask) SetResourceTypeId(v string) { + x.xxx_hidden_ResourceTypeId = v +} + func (x *Task_CreateAccountTask) HasAccountInfo() bool { if x == nil { return false @@ -3003,6 +3015,7 @@ type Task_CreateAccountTask_builder struct { AccountInfo *v2.AccountInfo CredentialOptions *v2.CredentialOptions EncryptionConfigs []*v2.EncryptionConfig + ResourceTypeId string } func (b0 Task_CreateAccountTask_builder) Build() *Task_CreateAccountTask { @@ -3012,6 +3025,7 @@ func (b0 Task_CreateAccountTask_builder) Build() *Task_CreateAccountTask { x.xxx_hidden_AccountInfo = b.AccountInfo x.xxx_hidden_CredentialOptions = b.CredentialOptions x.xxx_hidden_EncryptionConfigs = &b.EncryptionConfigs + x.xxx_hidden_ResourceTypeId = b.ResourceTypeId return m0 } @@ -4877,7 +4891,7 @@ var File_c1_connectorapi_baton_v1_baton_proto protoreflect.FileDescriptor const file_c1_connectorapi_baton_v1_baton_proto_rawDesc = "" + "\n" + - "$c1/connectorapi/baton/v1/baton.proto\x12\x18c1.connectorapi.baton.v1\x1a\x1fc1/connector/v2/connector.proto\x1a!c1/connector/v2/entitlement.proto\x1a\x1bc1/connector/v2/grant.proto\x1a\x1ec1/connector/v2/resource.proto\x1a\x1cc1/connector/v2/ticket.proto\x1a\x19google/protobuf/any.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1cgoogle/protobuf/struct.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x17google/rpc/status.proto\x1a\x17validate/validate.proto\"\x80)\n" + + "$c1/connectorapi/baton/v1/baton.proto\x12\x18c1.connectorapi.baton.v1\x1a\x1fc1/connector/v2/connector.proto\x1a!c1/connector/v2/entitlement.proto\x1a\x1bc1/connector/v2/grant.proto\x1a\x1ec1/connector/v2/resource.proto\x1a\x1cc1/connector/v2/ticket.proto\x1a\x19google/protobuf/any.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1cgoogle/protobuf/struct.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x17google/rpc/status.proto\x1a\x17validate/validate.proto\"\xb9)\n" + "\x04Task\x12\x0e\n" + "\x02id\x18\x01 \x01(\tR\x02id\x12=\n" + "\x06status\x18\x02 \x01(\x0e2%.c1.connectorapi.baton.v1.Task.StatusR\x06status\x12=\n" + @@ -4925,11 +4939,13 @@ const file_c1_connectorapi_baton_v1_baton_proto_rawDesc = "" + "\n" + "RevokeTask\x12,\n" + "\x05grant\x18\x01 \x01(\v2\x16.c1.connector.v2.GrantR\x05grant\x126\n" + - "\vannotations\x18\x02 \x03(\v2\x14.google.protobuf.AnyR\vannotations\x1a\xf9\x01\n" + + "\vannotations\x18\x02 \x03(\v2\x14.google.protobuf.AnyR\vannotations\x1a\xb2\x02\n" + "\x11CreateAccountTask\x12?\n" + "\faccount_info\x18\x01 \x01(\v2\x1c.c1.connector.v2.AccountInfoR\vaccountInfo\x12Q\n" + "\x12credential_options\x18\x02 \x01(\v2\".c1.connector.v2.CredentialOptionsR\x11credentialOptions\x12P\n" + - "\x12encryption_configs\x18\x03 \x03(\v2!.c1.connector.v2.EncryptionConfigR\x11encryptionConfigs\x1aK\n" + + "\x12encryption_configs\x18\x03 \x03(\v2!.c1.connector.v2.EncryptionConfigR\x11encryptionConfigs\x127\n" + + "\x10resource_type_id\x18\x04 \x01(\tB\r\xfaB\n" + + "r\b \x01(\x80\b\xd0\x01\x01R\x0eresourceTypeId\x1aK\n" + "\x12CreateResourceTask\x125\n" + "\bresource\x18\x01 \x01(\v2\x19.c1.connector.v2.ResourceR\bresource\x1a\x9d\x01\n" + "\x12DeleteResourceTask\x12<\n" + diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/actions/actions.go b/vendor/github.com/conductorone/baton-sdk/pkg/actions/actions.go index ca26d7e4..f63d01ba 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/actions/actions.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/actions/actions.go @@ -8,6 +8,7 @@ import ( "sync" "time" + config "github.com/conductorone/baton-sdk/pb/c1/config/v1" v2 "github.com/conductorone/baton-sdk/pb/c1/connector/v2" "github.com/conductorone/baton-sdk/pkg/annotations" "github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/ctxzap" @@ -418,11 +419,20 @@ func (a *ActionManager) InvokeAction( func (a *ActionManager) invokeGlobalAction(ctx context.Context, name string, args *structpb.Struct) (string, v2.BatonActionStatus, *structpb.Struct, annotations.Annotations, error) { a.mu.RLock() handler, ok := a.handlers[name] + schema, schemaOk := a.schemas[name] a.mu.RUnlock() if !ok { return "", v2.BatonActionStatus_BATON_ACTION_STATUS_FAILED, nil, nil, status.Error(codes.NotFound, fmt.Sprintf("handler for action %s not found", name)) } + if !schemaOk || schema == nil { + return "", v2.BatonActionStatus_BATON_ACTION_STATUS_FAILED, nil, nil, status.Error(codes.Internal, fmt.Sprintf("schema for action %s not found", name)) + } + + // Validate constraints + if err := validateActionConstraints(schema.GetConstraints(), args); err != nil { + return "", v2.BatonActionStatus_BATON_ACTION_STATUS_FAILED, nil, nil, status.Error(codes.InvalidArgument, err.Error()) + } oa := a.GetNewAction(name) @@ -432,6 +442,7 @@ func (a *ActionManager) invokeGlobalAction(ctx context.Context, name string, arg // If handler takes longer than 1 second, return status pending. // If handler takes longer than an hour, return status failed. go func() { + defer close(done) oa.SetStatus(ctx, v2.BatonActionStatus_BATON_ACTION_STATUS_RUNNING) handlerCtx, cancel := context.WithTimeoutCause(context.Background(), 1*time.Hour, errors.New("action handler timed out")) defer cancel() @@ -442,7 +453,6 @@ func (a *ActionManager) invokeGlobalAction(ctx context.Context, name string, arg } else { oa.SetError(ctx, oaErr) } - done <- struct{}{} }() select { @@ -486,13 +496,31 @@ func (a *ActionManager) invokeResourceAction( nil, status.Error(codes.NotFound, fmt.Sprintf("handler for action %s not found for resource type %s", actionName, resourceTypeID)) } + + schemas, ok := a.resourceSchemas[resourceTypeID] + if !ok { + a.mu.RUnlock() + return "", v2.BatonActionStatus_BATON_ACTION_STATUS_FAILED, nil, nil, status.Error(codes.Internal, fmt.Sprintf("schemas not found for resource type %s", resourceTypeID)) + } + + schema, ok := schemas[actionName] + if !ok { + a.mu.RUnlock() + return "", v2.BatonActionStatus_BATON_ACTION_STATUS_FAILED, nil, nil, status.Error(codes.Internal, fmt.Sprintf("schema not found for action %s", actionName)) + } a.mu.RUnlock() + // Validate constraints + if err := validateActionConstraints(schema.GetConstraints(), args); err != nil { + return "", v2.BatonActionStatus_BATON_ACTION_STATUS_FAILED, nil, nil, status.Error(codes.InvalidArgument, err.Error()) + } + oa := a.GetNewAction(actionName) done := make(chan struct{}) // Invoke handler in goroutine go func() { + defer close(done) oa.SetStatus(ctx, v2.BatonActionStatus_BATON_ACTION_STATUS_RUNNING) handlerCtx, cancel := context.WithTimeoutCause(context.Background(), 1*time.Hour, errors.New("action handler timed out")) defer cancel() @@ -503,7 +531,6 @@ func (a *ActionManager) invokeResourceAction( } else { oa.SetError(ctx, oaErr) } - done <- struct{}{} }() // Wait for completion or timeout @@ -517,3 +544,92 @@ func (a *ActionManager) invokeResourceAction( return oa.Id, oa.Status, oa.Rv, oa.Annos, ctx.Err() } } + +// validateActionConstraints validates that the provided args satisfy the schema constraints. +func validateActionConstraints(constraints []*config.Constraint, args *structpb.Struct) error { + if len(constraints) == 0 { + return nil + } + + // Build map of present fields (non-null values in struct) + present := make(map[string]bool) + if args != nil { + for fieldName, value := range args.GetFields() { + if !isNullValue(value) { + present[fieldName] = true + } + } + } + + // Validate each constraint + for _, constraint := range constraints { + if err := validateConstraint(constraint, present); err != nil { + return err + } + } + return nil +} + +func validateConstraint(c *config.Constraint, present map[string]bool) error { + // Deduplicate field names to handle cases where the same field is listed multiple times + uniqueFieldNames := deduplicateStrings(c.GetFieldNames()) + + // Count how many unique primary fields are present + var primaryPresent int + for _, name := range uniqueFieldNames { + if present[name] { + primaryPresent++ + } + } + + switch c.GetKind() { + case config.ConstraintKind_CONSTRAINT_KIND_REQUIRED_TOGETHER: + if primaryPresent > 0 && primaryPresent < len(uniqueFieldNames) { + return fmt.Errorf("fields required together: %v", uniqueFieldNames) + } + case config.ConstraintKind_CONSTRAINT_KIND_MUTUALLY_EXCLUSIVE: + if primaryPresent > 1 { + return fmt.Errorf("fields are mutually exclusive: %v", uniqueFieldNames) + } + case config.ConstraintKind_CONSTRAINT_KIND_AT_LEAST_ONE: + if primaryPresent == 0 { + return fmt.Errorf("at least one required: %v", uniqueFieldNames) + } + case config.ConstraintKind_CONSTRAINT_KIND_DEPENDENT_ON: + if primaryPresent > 0 { + // Deduplicate secondary field names and check they are all present + uniqueSecondaryFieldNames := deduplicateStrings(c.GetSecondaryFieldNames()) + for _, name := range uniqueSecondaryFieldNames { + if !present[name] { + return fmt.Errorf("fields %v depend on %v which must also be present", uniqueFieldNames, uniqueSecondaryFieldNames) + } + } + } + case config.ConstraintKind_CONSTRAINT_KIND_UNSPECIFIED: + return nil + default: + return fmt.Errorf("unknown constraint kind: %v", c.GetKind()) + } + return nil +} + +// deduplicateStrings returns a new slice with duplicate strings removed, preserving order. +func deduplicateStrings(input []string) []string { + seen := make(map[string]bool) + result := make([]string, 0, len(input)) + for _, s := range input { + if !seen[s] { + seen[s] = true + result = append(result, s) + } + } + return result +} + +func isNullValue(v *structpb.Value) bool { + if v == nil { + return true + } + _, isNull := v.GetKind().(*structpb.Value_NullValue) + return isNull +} diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/cli/commands.go b/vendor/github.com/conductorone/baton-sdk/pkg/cli/commands.go index 93c0e5b8..ae3f324e 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/cli/commands.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/cli/commands.go @@ -234,6 +234,7 @@ func MakeMainCommand[T field.Configurable]( login, email, profile, + v.GetString("create-account-resource-type"), )) case v.GetString("create-account-login") != "": // should only be here if no create-account-profile is provided, so lets make one. @@ -251,6 +252,7 @@ func MakeMainCommand[T field.Configurable]( v.GetString("create-account-login"), v.GetString("create-account-email"), profile, + v.GetString("create-account-resource-type"), )) case v.GetString("invoke-action") != "": invokeActionArgsStr := v.GetString("invoke-action-args") diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/cli/lambda_server__added.go b/vendor/github.com/conductorone/baton-sdk/pkg/cli/lambda_server__added.go index c2fd1848..ae5e0c20 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/cli/lambda_server__added.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/cli/lambda_server__added.go @@ -178,12 +178,24 @@ func OptionallyAddLambdaCommand[T field.Configurable]( configStructMap := configStruct.AsMap() - var fieldOptions []field.Option + var ( + fieldOptions []field.Option + schemaFields []field.SchemaField + authMethodStr string + ) if authMethod, ok := configStructMap["auth-method"]; ok { - if authMethodStr, ok := authMethod.(string); ok { + if authMethodStr, ok = authMethod.(string); ok { fieldOptions = append(fieldOptions, field.WithAuthMethod(authMethodStr)) } } + schemaFieldsMap := connectorSchema.FieldGroupFields(authMethodStr) + for _, field := range schemaFieldsMap { + schemaFields = append(schemaFields, field) + } + + if len(schemaFields) == 0 { + schemaFields = connectorSchema.Fields + } if err := field.Validate(connectorSchema, t, fieldOptions...); err != nil { return fmt.Errorf("lambda-run: failed to validate config: %w", err) @@ -216,7 +228,7 @@ func OptionallyAddLambdaCommand[T field.Configurable]( }), } - if hasOauthField(connectorSchema.Fields) { + if hasOauthField(schemaFields) { ops.TokenSource = &lambdaTokenSource{ ctx: runCtx, webKey: webKey, diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/connectorbuilder/accounts.go b/vendor/github.com/conductorone/baton-sdk/pkg/connectorbuilder/accounts.go index e9c59e63..ed7f1403 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/connectorbuilder/accounts.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/connectorbuilder/accounts.go @@ -59,12 +59,43 @@ func (b *builder) CreateAccount(ctx context.Context, request *v2.CreateAccountRe start := b.nowFunc() tt := tasks.CreateAccountType l := ctxzap.Extract(ctx) - if b.accountManager == nil { + + if len(b.accountManagers) == 0 { l.Error("error: connector does not have account manager configured") b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) return nil, status.Error(codes.Unimplemented, "connector does not have account manager configured") } + var accountManager AccountManagerLimited + if request.GetResourceTypeId() == "" { + if len(b.accountManagers) == 1 { + // If there's only one account manager, use it. + for _, am := range b.accountManagers { + accountManager = am + break + } + } else { + // If there are multiple account managers, default to user resource type. + var ok bool + accountManager, ok = b.accountManagers["user"] + if !ok { + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) + return nil, status.Error(codes.Unimplemented, "connector has multiple account managers configured, but no resource type specified, and no default account manager configured") + } + } + } + + // If resource type is specified, use the account manager for that resource type. + if accountManager == nil { + var ok bool + accountManager, ok = b.accountManagers[request.GetResourceTypeId()] + if !ok { + l.Error("error: connector does not have account manager configured") + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) + return nil, status.Errorf(codes.Unimplemented, "connector does not have account manager configured for resource type: %s", request.GetResourceTypeId()) + } + } + opts, err := crypto.ConvertCredentialOptions(ctx, b.clientSecret, request.GetCredentialOptions(), request.GetEncryptionConfigs()) if err != nil { l.Error("error: converting credential options failed", zap.Error(err)) @@ -72,7 +103,7 @@ func (b *builder) CreateAccount(ctx context.Context, request *v2.CreateAccountRe return nil, fmt.Errorf("error: converting credential options failed: %w", err) } - result, plaintexts, annos, err := b.accountManager.CreateAccount(ctx, request.GetAccountInfo(), opts) + result, plaintexts, annos, err := accountManager.CreateAccount(ctx, request.GetAccountInfo(), opts) if err != nil { l.Error("error: create account failed", zap.Error(err)) b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) @@ -119,18 +150,16 @@ func (b *builder) CreateAccount(ctx context.Context, request *v2.CreateAccountRe return rv, nil } -func (b *builder) addAccountManager(_ context.Context, typeId string, in interface{}) error { +func (b *builder) addAccountManager(_ context.Context, typeId string, in any) error { if _, ok := in.(OldAccountManager); ok { return fmt.Errorf("error: old account manager interface implemented for %s", typeId) } if accountManager, ok := in.(AccountManagerLimited); ok { - // NOTE(kans): currently unused - but these should probably be (resource) typed - b.accountManagers[typeId] = accountManager - if b.accountManager != nil { + if _, ok := b.accountManagers[typeId]; ok { return fmt.Errorf("error: duplicate resource type found for account manager %s", typeId) } - b.accountManager = accountManager + b.accountManagers[typeId] = accountManager } return nil } diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/connectorbuilder/connectorbuilder.go b/vendor/github.com/conductorone/baton-sdk/pkg/connectorbuilder/connectorbuilder.go index 40df7dfd..ab092893 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/connectorbuilder/connectorbuilder.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/connectorbuilder/connectorbuilder.go @@ -68,7 +68,6 @@ type builder struct { metadataProvider MetadataProvider validateProvider ValidateProvider ticketManager TicketManagerLimited - accountManager AccountManagerLimited resourceSyncers map[string]ResourceSyncerV2 resourceProvisioners map[string]ResourceProvisionerV2Limited resourceManagers map[string]ResourceManagerV2Limited @@ -76,8 +75,8 @@ type builder struct { resourceTargetedSyncers map[string]ResourceTargetedSyncerLimited credentialManagers map[string]CredentialManagerLimited eventFeeds map[string]EventFeed - accountManagers map[string]AccountManagerLimited // NOTE(kans): currently unused - actionManager ActionManager // Unified action manager for all actions + accountManagers map[string]AccountManagerLimited + actionManager ActionManager // Unified action manager for all actions } // NewConnector creates a new ConnectorServer for a new resource. @@ -105,7 +104,6 @@ func NewConnector(ctx context.Context, in interface{}, opts ...Opt) (types.Conne metadataProvider: nil, validateProvider: nil, ticketManager: nil, - accountManager: nil, nowFunc: time.Now, clientSecret: clientSecretJWK, resourceSyncers: make(map[string]ResourceSyncerV2), @@ -345,6 +343,7 @@ func (b *builder) getCapabilities(ctx context.Context) (*v2.ConnectorCapabilitie if _, exists := b.resourceTargetedSyncers[resourceTypeID]; exists { caps = append(caps, v2.Capability_CAPABILITY_TARGETED_SYNC) + connectorCaps[v2.Capability_CAPABILITY_SERVICE_MODE_TARGETED_SYNC] = struct{}{} } if _, exists := b.resourceProvisioners[resourceTypeID]; exists { @@ -386,7 +385,7 @@ func (b *builder) getCapabilities(ctx context.Context) (*v2.ConnectorCapabilitie } // Check for account provisioning capability (global, not per resource type) - if b.accountManager != nil { + if len(b.accountManagers) > 0 { connectorCaps[v2.Capability_CAPABILITY_ACCOUNT_PROVISIONING] = struct{}{} } sort.Slice(resourceTypeCapabilities, func(i, j int) bool { @@ -452,13 +451,14 @@ func getCredentialDetails(ctx context.Context, b *builder) (*v2.CredentialDetail rv := &v2.CredentialDetails{} // Check for account provisioning capability details - if b.accountManager != nil { - accountProvisioningCapabilityDetails, _, err := b.accountManager.CreateAccountCapabilityDetails(ctx) + for _, am := range b.accountManagers { + accountProvisioningCapabilityDetails, _, err := am.CreateAccountCapabilityDetails(ctx) if err != nil { l.Error("error: getting account provisioning details", zap.Error(err)) return nil, fmt.Errorf("error: getting account provisioning details: %w", err) } rv.SetCapabilityAccountProvisioning(accountProvisioningCapabilityDetails) + break // Only need one account manager's details } // Check for credential rotation capability details diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/connectorrunner/runner.go b/vendor/github.com/conductorone/baton-sdk/pkg/connectorrunner/runner.go index 7974ef45..5b7d2148 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/connectorrunner/runner.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/connectorrunner/runner.go @@ -280,9 +280,10 @@ type revokeConfig struct { } type createAccountConfig struct { - login string - email string - profile *structpb.Struct + login string + email string + profile *structpb.Struct + resourceTypeID string // Optional: if set, creates an account for the specified resource type. } type invokeActionConfig struct { @@ -470,14 +471,15 @@ func WithOnDemandRevoke(c1zPath string, grantID string) Option { } } -func WithOnDemandCreateAccount(c1zPath string, login string, email string, profile *structpb.Struct) Option { +func WithOnDemandCreateAccount(c1zPath string, login string, email string, profile *structpb.Struct, resourceTypeId string) Option { return func(ctx context.Context, cfg *runnerConfig) error { cfg.onDemand = true cfg.c1zPath = c1zPath cfg.createAccountConfig = &createAccountConfig{ - login: login, - email: email, - profile: profile, + login: login, + email: email, + profile: profile, + resourceTypeID: resourceTypeId, } return nil } @@ -797,7 +799,7 @@ func NewConnectorRunner(ctx context.Context, c types.ConnectorServer, opts ...Op tm = local.NewRevoker(ctx, cfg.c1zPath, cfg.revokeConfig.grantID) case cfg.createAccountConfig != nil: - tm = local.NewCreateAccountManager(ctx, cfg.c1zPath, cfg.createAccountConfig.login, cfg.createAccountConfig.email, cfg.createAccountConfig.profile) + tm = local.NewCreateAccountManager(ctx, cfg.c1zPath, cfg.createAccountConfig.login, cfg.createAccountConfig.email, cfg.createAccountConfig.profile, cfg.createAccountConfig.resourceTypeID) case cfg.invokeActionConfig != nil: tm = local.NewActionInvoker(ctx, cfg.c1zPath, cfg.invokeActionConfig.action, cfg.invokeActionConfig.resourceTypeID, cfg.invokeActionConfig.args) diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/connectorstore/connectorstore.go b/vendor/github.com/conductorone/baton-sdk/pkg/connectorstore/connectorstore.go index 64e352ec..c557da26 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/connectorstore/connectorstore.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/connectorstore/connectorstore.go @@ -44,7 +44,7 @@ type Reader interface { // the GRPC api, but because this is defined as a streaming RPC, it isn't trivial to implement grpc streaming as part of the c1z format. GetAsset(ctx context.Context, req *v2.AssetServiceGetAssetRequest) (string, io.Reader, error) - Close() error + Close(ctx context.Context) error } // ConnectorStoreWriter defines an implementation for a connector v2 datasource writer. This is used to store sync data from an upstream provider. diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/c1file.go b/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/c1file.go index 5de57194..911bf095 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/c1file.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/c1file.go @@ -11,6 +11,8 @@ import ( "time" "github.com/doug-martin/goqu/v9" + "github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/ctxzap" + "go.uber.org/zap" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" @@ -45,11 +47,13 @@ type C1File struct { pragmas []pragma readOnly bool encoderConcurrency int + closed bool + closedMu sync.Mutex // Cached sync run for listConnectorObjects (avoids N+1 queries) - cachedViewSyncRun *syncRun - cachedViewSyncOnce sync.Once - cachedViewSyncErr error + cachedViewSyncRun *syncRun + cachedViewSyncMu sync.Mutex + cachedViewSyncErr error // Slow query tracking slowQueryLogTimes map[string]time.Time @@ -187,7 +191,7 @@ func NewC1ZFile(ctx context.Context, outputFilePath string, opts ...C1ZOption) ( opt(options) } - dbFilePath, err := loadC1z(outputFilePath, options.tmpDir, options.decoderOptions...) + dbFilePath, _, err := decompressC1z(outputFilePath, options.tmpDir, options.decoderOptions...) if err != nil { return nil, err } @@ -225,11 +229,45 @@ func cleanupDbDir(dbFilePath string, err error) error { var ErrReadOnly = errors.New("c1z: read only mode") // Close ensures that the sqlite database is flushed to disk, and if any changes were made we update the original database -// with our changes. -func (c *C1File) Close() error { +// with our changes. The provided context is used for the WAL checkpoint operation. +func (c *C1File) Close(ctx context.Context) error { var err error + c.closedMu.Lock() + defer c.closedMu.Unlock() + if c.closed { + l := ctxzap.Extract(ctx) + l.Warn("close called on already-closed c1file", zap.String("db_path", c.dbFilePath)) + return nil + } + if c.rawDb != nil { + // CRITICAL: Force a full WAL checkpoint before closing the database. + // This ensures all WAL data is written back to the main database file + // and the writes are synced to disk. Without this, on filesystems with + // aggressive caching (like ZFS with large ARC), the subsequent saveC1z() + // read could see stale data because the checkpoint writes may still be + // in kernel buffers. + // + // TRUNCATE mode: checkpoint as many frames as possible, then truncate + // the WAL file to zero bytes. This guarantees all data is in the main + // database file before we read it for compression. + if c.dbUpdated && !c.readOnly { + _, err = c.rawDb.ExecContext(ctx, "PRAGMA wal_checkpoint(TRUNCATE)") + if err != nil { + l := ctxzap.Extract(ctx) + // Checkpoint failed - log and continue. The subsequent Close() + // will attempt a passive checkpoint. If that also fails, we'll + // get an error from Close() or saveC1z() will read stale data. + // We log here for debugging but don't fail because: + // 1. Close() will still attempt its own checkpoint + // 2. The error might be transient (busy) + l.Warn("WAL checkpoint failed before close", + zap.Error(err), + zap.String("db_path", c.dbFilePath)) + } + } + err = c.rawDb.Close() if err != nil { return cleanupDbDir(c.dbFilePath, err) @@ -249,7 +287,13 @@ func (c *C1File) Close() error { } } - return cleanupDbDir(c.dbFilePath, err) + err = cleanupDbDir(c.dbFilePath, err) + if err != nil { + return err + } + c.closed = true + + return nil } // init ensures that the database has all of the required schema. @@ -267,6 +311,19 @@ func (c *C1File) init(ctx context.Context) error { return err } + if c.readOnly { + // Disable journaling in read only mode, since we're not writing to the database. + _, err = c.db.ExecContext(ctx, "PRAGMA journal_mode = OFF") + if err != nil { + return err + } + // Disable synchronous writes in read only mode, since we're not writing to the database. + _, err = c.db.ExecContext(ctx, "PRAGMA synchronous = OFF") + if err != nil { + return err + } + } + for _, pragma := range c.pragmas { _, err := c.db.ExecContext(ctx, fmt.Sprintf("PRAGMA %s = %s", pragma.name, pragma.value)) if err != nil { diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/c1file_attached.go b/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/c1file_attached.go index 34bf5ea4..d7ee4c6d 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/c1file_attached.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/c1file_attached.go @@ -4,6 +4,10 @@ import ( "context" "errors" "fmt" + + reader_v2 "github.com/conductorone/baton-sdk/pb/c1/reader/v2" + "github.com/conductorone/baton-sdk/pkg/connectorstore" + "github.com/doug-martin/goqu/v9" ) type C1FileAttached struct { @@ -11,7 +15,7 @@ type C1FileAttached struct { file *C1File } -func (c *C1FileAttached) CompactTable(ctx context.Context, destSyncID string, baseSyncID string, appliedSyncID string, tableName string) error { +func (c *C1FileAttached) CompactTable(ctx context.Context, baseSyncID string, appliedSyncID string, tableName string) error { if !c.safe { return errors.New("database has been detached") } @@ -40,20 +44,7 @@ func (c *C1FileAttached) CompactTable(ctx context.Context, destSyncID string, ba } } - // Step 1: Insert ALL records from base sync - insertBaseQuery := fmt.Sprintf(` - INSERT INTO main.%s (%s) - SELECT %s - FROM base.%s - WHERE sync_id = ? - `, tableName, columnList, selectList, tableName) - - _, err = c.file.db.ExecContext(ctx, insertBaseQuery, destSyncID, baseSyncID) - if err != nil { - return fmt.Errorf("failed to copy base records: %w", err) - } - - // Step 2: Insert/replace records from applied sync where applied.discovered_at > main.discovered_at + // Insert/replace records from applied sync where applied.discovered_at > main.discovered_at insertOrReplaceAppliedQuery := fmt.Sprintf(` INSERT OR REPLACE INTO main.%s (%s) SELECT %s @@ -73,7 +64,7 @@ func (c *C1FileAttached) CompactTable(ctx context.Context, destSyncID string, ba ) `, tableName, columnList, selectList, tableName, tableName, tableName) - _, err = c.file.db.ExecContext(ctx, insertOrReplaceAppliedQuery, destSyncID, appliedSyncID, destSyncID, destSyncID) + _, err = c.file.db.ExecContext(ctx, insertOrReplaceAppliedQuery, baseSyncID, appliedSyncID, baseSyncID, baseSyncID) return err } @@ -94,7 +85,7 @@ func (c *C1FileAttached) getTableColumns(ctx context.Context, tableName string) var cid int var name, dataType string var notNull, pk int - var defaultValue interface{} + var defaultValue any err := rows.Scan(&cid, &name, &dataType, ¬Null, &defaultValue, &pk) if err != nil { @@ -113,30 +104,73 @@ func (c *C1FileAttached) getTableColumns(ctx context.Context, tableName string) return columns, nil } -func (c *C1FileAttached) CompactResourceTypes(ctx context.Context, destSyncID string, baseSyncID string, appliedSyncID string) error { +func (c *C1FileAttached) CompactResourceTypes(ctx context.Context, baseSyncID string, appliedSyncID string) error { if !c.safe { return errors.New("database has been detached") } - return c.CompactTable(ctx, destSyncID, baseSyncID, appliedSyncID, "v1_resource_types") + return c.CompactTable(ctx, baseSyncID, appliedSyncID, "v1_resource_types") } -func (c *C1FileAttached) CompactResources(ctx context.Context, destSyncID string, baseSyncID string, appliedSyncID string) error { +func (c *C1FileAttached) CompactResources(ctx context.Context, baseSyncID string, appliedSyncID string) error { if !c.safe { return errors.New("database has been detached") } - return c.CompactTable(ctx, destSyncID, baseSyncID, appliedSyncID, "v1_resources") + return c.CompactTable(ctx, baseSyncID, appliedSyncID, "v1_resources") } -func (c *C1FileAttached) CompactEntitlements(ctx context.Context, destSyncID string, baseSyncID string, appliedSyncID string) error { +func (c *C1FileAttached) CompactEntitlements(ctx context.Context, baseSyncID string, appliedSyncID string) error { if !c.safe { return errors.New("database has been detached") } - return c.CompactTable(ctx, destSyncID, baseSyncID, appliedSyncID, "v1_entitlements") + return c.CompactTable(ctx, baseSyncID, appliedSyncID, "v1_entitlements") +} + +func (c *C1FileAttached) CompactGrants(ctx context.Context, baseSyncID string, appliedSyncID string) error { + if !c.safe { + return errors.New("database has been detached") + } + return c.CompactTable(ctx, baseSyncID, appliedSyncID, "v1_grants") +} + +func unionSyncTypes(a, b connectorstore.SyncType) connectorstore.SyncType { + switch { + case a == connectorstore.SyncTypeFull || b == connectorstore.SyncTypeFull: + return connectorstore.SyncTypeFull + case a == connectorstore.SyncTypeResourcesOnly || b == connectorstore.SyncTypeResourcesOnly: + return connectorstore.SyncTypeResourcesOnly + default: + return connectorstore.SyncTypePartial + } } -func (c *C1FileAttached) CompactGrants(ctx context.Context, destSyncID string, baseSyncID string, appliedSyncID string) error { +func (c *C1FileAttached) UpdateSync(ctx context.Context, baseSync *reader_v2.SyncRun, appliedSync *reader_v2.SyncRun) error { if !c.safe { return errors.New("database has been detached") } - return c.CompactTable(ctx, destSyncID, baseSyncID, appliedSyncID, "v1_grants") + syncType := unionSyncTypes(connectorstore.SyncType(baseSync.GetSyncType()), connectorstore.SyncType(appliedSync.GetSyncType())) + + latestEndedAt := baseSync.GetEndedAt().AsTime() + if appliedSync.GetEndedAt().AsTime().After(latestEndedAt) { + latestEndedAt = appliedSync.GetEndedAt().AsTime() + } + + baseSyncID := baseSync.GetId() + q := c.file.db.Update(fmt.Sprintf("main.%s", syncRuns.Name())) + q = q.Set(goqu.Record{ + "ended_at": latestEndedAt.Format("2006-01-02 15:04:05.999999999"), + "sync_type": string(syncType), + }) + q = q.Where(goqu.C("sync_id").Eq(baseSyncID)) + + query, args, err := q.ToSQL() + if err != nil { + return fmt.Errorf("failed to build update sync query: %w", err) + } + + _, err = c.file.db.ExecContext(ctx, query, args...) + if err != nil { + return fmt.Errorf("failed to update sync %s to type %s: %w", baseSyncID, syncType, err) + } + + return nil } diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/clone_sync.go b/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/clone_sync.go index 0a6050d0..64aad8aa 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/clone_sync.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/clone_sync.go @@ -78,7 +78,7 @@ func (c *C1File) CloneSync(ctx context.Context, outPath string, syncID string) ( if err != nil { return err } - defer out.Close() + defer out.Close(ctx) err = out.init(ctx) if err != nil { @@ -142,7 +142,7 @@ func (c *C1File) CloneSync(ctx context.Context, outPath string, syncID string) ( } outFile.dbUpdated = true outFile.outputFilePath = outPath - err = outFile.Close() + err = outFile.Close(ctx) if err != nil { return err } diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/decoder.go b/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/decoder.go index 68480acc..5f3a4cd2 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/decoder.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/decoder.go @@ -107,6 +107,7 @@ func WithDecoderMaxDecodedSize(n uint64) DecoderOption { // WithDecoderConcurrency sets the number of created decoders. // Default is 1, which disables async decoding/concurrency. // 0 uses GOMAXPROCS. +// -1 uses GOMAXPROCS or 4, whichever is lower. func WithDecoderConcurrency(n int) DecoderOption { return func(o *decoderOptions) error { o.decoderConcurrency = n diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/dotc1z.go b/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/dotc1z.go index 835a2958..0d64ea4c 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/dotc1z.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/dotc1z.go @@ -50,7 +50,7 @@ func C1ZFileCheckHeader(f io.ReadSeeker) (bool, error) { } func NewExternalC1FileReader(ctx context.Context, tmpDir string, externalResourceC1ZPath string) (connectorstore.Reader, error) { - dbFilePath, err := loadC1z(externalResourceC1ZPath, tmpDir) + dbFilePath, _, err := decompressC1z(externalResourceC1ZPath, tmpDir) if err != nil { return nil, fmt.Errorf("error loading external resource c1z file: %w", err) } diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/file.go b/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/file.go index b4f9d72a..b7096ce6 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/file.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/file.go @@ -15,48 +15,83 @@ import ( "google.golang.org/grpc/status" ) -func loadC1z(filePath string, tmpDir string, opts ...DecoderOption) (string, error) { - var err error - workingDir, err := os.MkdirTemp(tmpDir, "c1z") +// Note(kans): decompressC1z is unfortunately called to load or create a c1z file so the error handling is rough. +// It creates its own temporary directory so that it can also do its own cleanup. +// It returns that directory for verification in tests. +func decompressC1z(c1zPath string, workingDir string, opts ...DecoderOption) (string, string, error) { + tmpDir, err := os.MkdirTemp(workingDir, "c1z") if err != nil { - return "", err + return "", tmpDir, err } - defer func() { - if err != nil { - if removeErr := os.RemoveAll(workingDir); removeErr != nil { - err = errors.Join(err, removeErr) + + var dbFile *os.File + var c1zFile *os.File + var decoder *decoder + cleanupDir := func(e error) error { + if decoder != nil { + err := decoder.Close() + if err != nil { + e = errors.Join(e, err) } } - }() - dbFilePath := filepath.Join(workingDir, "db") - dbFile, err := os.Create(dbFilePath) + if c1zFile != nil { + err := c1zFile.Close() + if err != nil { + e = errors.Join(e, err) + } + } + if dbFile != nil { + err := dbFile.Close() + if err != nil { + e = errors.Join(e, err) + } + } + if e != nil { + err := os.RemoveAll(tmpDir) + if err != nil { + e = errors.Join(e, err) + } + } + return e + } + + dbFilePath := filepath.Join(tmpDir, "db") + dbFile, err = os.Create(dbFilePath) if err != nil { - return "", err + return "", tmpDir, cleanupDir(err) } - defer dbFile.Close() - if stat, err := os.Stat(filePath); err == nil && stat.Size() != 0 { - c1zFile, err := os.Open(filePath) - if err != nil { - return "", err - } - defer c1zFile.Close() + stat, err := os.Stat(c1zPath) + if err != nil || stat.Size() == 0 { + // TODO(kans): it would be nice to know more about the error.... + return dbFilePath, tmpDir, cleanupDir(nil) + } - r, err := NewDecoder(c1zFile, opts...) - if err != nil { - return "", err - } - _, err = io.Copy(dbFile, r) - if err != nil { - return "", err - } - err = r.Close() - if err != nil { - return "", err - } + c1zFile, err = os.Open(c1zPath) + if err != nil { + return "", tmpDir, cleanupDir(err) + } + + decoder, err = NewDecoder(c1zFile, opts...) + if err != nil { + return "", tmpDir, cleanupDir(err) + } + + _, err = io.Copy(dbFile, decoder) + if err != nil { + return "", tmpDir, cleanupDir(err) + } + + // CRITICAL: Sync the database file before returning to ensure all + // decompressed data is on disk. On filesystems with aggressive caching + // (like ZFS with large ARC), SQLite might otherwise open the file and + // see incomplete data still in kernel buffers. + err = dbFile.Sync() + if err != nil { + return "", tmpDir, cleanupDir(err) } - return dbFilePath, nil + return dbFilePath, tmpDir, cleanupDir(nil) } func saveC1z(dbFilePath string, outputFilePath string, encoderConcurrency int) error { diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/grants.go b/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/grants.go index 69096da3..c091d747 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/grants.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/grants.go @@ -63,29 +63,6 @@ func (r *grantsTable) Migrations(ctx context.Context, db *goqu.Database) error { return nil } -// DropGrantIndexes drops the indexes on the grants table. -// This should only be called when compacting the grants table. -// These indexes are re-created when we open the database again. -func (c *C1File) DropGrantIndexes(ctx context.Context) error { - ctx, span := tracer.Start(ctx, "C1File.DropGrantsIndexes") - defer span.End() - - indexes := []string{ - fmt.Sprintf("idx_grants_resource_type_id_resource_id_v%s", grants.Version()), - fmt.Sprintf("idx_grants_principal_id_v%s", grants.Version()), - fmt.Sprintf("idx_grants_entitlement_id_principal_id_v%s", grants.Version()), - fmt.Sprintf("idx_grants_external_sync_v%s", grants.Version()), - } - - for _, index := range indexes { - _, err := c.db.ExecContext(ctx, fmt.Sprintf("DROP INDEX IF EXISTS %s", index)) - if err != nil { - return err - } - } - return nil -} - func (c *C1File) ListGrants(ctx context.Context, request *v2.GrantsServiceListGrantsRequest) (*v2.GrantsServiceListGrantsResponse, error) { ctx, span := tracer.Start(ctx, "C1File.ListGrants") defer span.End() diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/manager/s3/s3.go b/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/manager/s3/s3.go index 2660b953..dd9fbd22 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/manager/s3/s3.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/manager/s3/s3.go @@ -161,6 +161,7 @@ func (s *s3Manager) SaveC1Z(ctx context.Context) error { if err != nil { return err } + defer f.Close() if s.client == nil { return fmt.Errorf("attempting to save to s3 without a valid client") diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/sync_runs.go b/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/sync_runs.go index c8a7b11a..e2b4ee78 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/sync_runs.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/sync_runs.go @@ -97,27 +97,40 @@ type syncRun struct { // getCachedViewSyncRun returns the cached sync run for read operations. // This avoids N+1 queries when paginating through listConnectorObjects. -// The result is computed once and cached for the lifetime of the C1File. +// The cache is invalidated when a sync starts or ends. func (c *C1File) getCachedViewSyncRun(ctx context.Context) (*syncRun, error) { ctx, span := tracer.Start(ctx, "C1File.getCachedViewSyncRun") defer span.End() - c.cachedViewSyncOnce.Do(func() { - // First try to get a finished full sync - c.cachedViewSyncRun, c.cachedViewSyncErr = c.getFinishedSync(ctx, 0, connectorstore.SyncTypeFull) - if c.cachedViewSyncErr != nil { - return - } + c.cachedViewSyncMu.Lock() + defer c.cachedViewSyncMu.Unlock() - // If no finished sync, try to get an unfinished one - if c.cachedViewSyncRun == nil { - c.cachedViewSyncRun, c.cachedViewSyncErr = c.getLatestUnfinishedSync(ctx, connectorstore.SyncTypeAny) - } - }) + if c.cachedViewSyncRun != nil || c.cachedViewSyncErr != nil { + return c.cachedViewSyncRun, c.cachedViewSyncErr + } + + // First try to get a finished full sync + c.cachedViewSyncRun, c.cachedViewSyncErr = c.getFinishedSync(ctx, 0, connectorstore.SyncTypeFull) + if c.cachedViewSyncErr != nil { + return c.cachedViewSyncRun, c.cachedViewSyncErr + } + + // If no finished sync, try to get an unfinished one + if c.cachedViewSyncRun == nil { + c.cachedViewSyncRun, c.cachedViewSyncErr = c.getLatestUnfinishedSync(ctx, connectorstore.SyncTypeAny) + } return c.cachedViewSyncRun, c.cachedViewSyncErr } +// invalidateCachedViewSyncRun clears the cached sync run so it will be recomputed on next access. +func (c *C1File) invalidateCachedViewSyncRun() { + c.cachedViewSyncMu.Lock() + defer c.cachedViewSyncMu.Unlock() + c.cachedViewSyncRun = nil + c.cachedViewSyncErr = nil +} + func (c *C1File) getLatestUnfinishedSync(ctx context.Context, syncType connectorstore.SyncType) (*syncRun, error) { ctx, span := tracer.Start(ctx, "C1File.getLatestUnfinishedSync") defer span.End() @@ -539,6 +552,7 @@ func (c *C1File) StartNewSync(ctx context.Context, syncType connectorstore.SyncT } c.currentSyncID = syncID + c.invalidateCachedViewSyncRun() return c.currentSyncID, nil } @@ -597,6 +611,7 @@ func (c *C1File) EndSync(ctx context.Context) error { } c.currentSyncID = "" + c.invalidateCachedViewSyncRun() return nil } diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/field/defaults.go b/vendor/github.com/conductorone/baton-sdk/pkg/field/defaults.go index 5805b9f0..9259241e 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/field/defaults.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/field/defaults.go @@ -49,6 +49,12 @@ var ( WithHidden(true), WithDescription("JSON-formatted object of map keys and values like '{ 'key': 'value' }'"), WithPersistent(true), WithExportTarget(ExportTargetNone)) + createAccountResourceTypeField = StringField("create-account-resource-type", + WithHidden(true), + WithDescription("The resource type ID of the account to create"), + WithPersistent(true), + WithExportTarget(ExportTargetNone), + ) deleteResourceField = StringField("delete-resource", WithHidden(true), WithDescription("The id of the resource to delete"), WithPersistent(true), WithExportTarget(ExportTargetNone)) deleteResourceTypeField = StringField("delete-resource-type", WithHidden(true), WithDescription("The type of the resource to delete"), WithPersistent(true), WithExportTarget(ExportTargetNone)) eventFeedField = StringField("event-feed", WithHidden(true), WithDescription("Read feed events to stdout"), WithPersistent(true), WithExportTarget(ExportTargetNone)) @@ -315,6 +321,7 @@ var DefaultFields = []SchemaField{ createAccountEmailField, createAccountLoginField, createAccountProfileField, + createAccountResourceTypeField, deleteResourceField, deleteResourceTypeField, eventFeedField, diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/provisioner/provisioner.go b/vendor/github.com/conductorone/baton-sdk/pkg/provisioner/provisioner.go index 263f1bad..18bd2d63 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/provisioner/provisioner.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/provisioner/provisioner.go @@ -34,9 +34,10 @@ type Provisioner struct { revokeGrantID string - createAccountLogin string - createAccountEmail string - createAccountProfile *structpb.Struct + createAccountLogin string + createAccountEmail string + createAccountProfile *structpb.Struct + createAccountResourceType string deleteResourceID string deleteResourceType string @@ -118,7 +119,7 @@ func (p *Provisioner) Close(ctx context.Context) error { var err error if p.store != nil { - storeErr := p.store.Close() + storeErr := p.store.Close(ctx) if storeErr != nil { err = errors.Join(err, storeErr) } @@ -285,6 +286,7 @@ func (p *Provisioner) createAccount(ctx context.Context) error { } _, err = p.connector.CreateAccount(ctx, v2.CreateAccountRequest_builder{ + ResourceTypeId: p.createAccountResourceType, AccountInfo: v2.AccountInfo_builder{ Emails: emails, Login: p.createAccountLogin, @@ -297,7 +299,11 @@ func (p *Provisioner) createAccount(ctx context.Context) error { return err } - l.Debug("account created", zap.String("login", p.createAccountLogin), zap.String("email", p.createAccountEmail)) + l.Debug("account created", + zap.String("login", p.createAccountLogin), + zap.String("email", p.createAccountEmail), + zap.String("resource_type", p.createAccountResourceType), + ) return nil } @@ -373,13 +379,14 @@ func NewResourceDeleter(c types.ConnectorClient, dbPath string, resourceId strin } } -func NewCreateAccountManager(c types.ConnectorClient, dbPath string, login string, email string, profile *structpb.Struct) *Provisioner { +func NewCreateAccountManager(c types.ConnectorClient, dbPath string, login string, email string, profile *structpb.Struct, resourceType string) *Provisioner { return &Provisioner{ - dbPath: dbPath, - connector: c, - createAccountLogin: login, - createAccountEmail: email, - createAccountProfile: profile, + dbPath: dbPath, + connector: c, + createAccountLogin: login, + createAccountEmail: email, + createAccountProfile: profile, + createAccountResourceType: resourceType, } } diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/sdk/version.go b/vendor/github.com/conductorone/baton-sdk/pkg/sdk/version.go index 5f7db3e1..ecef98b7 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/sdk/version.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/sdk/version.go @@ -1,3 +1,3 @@ package sdk -const Version = "v0.6.8" +const Version = "v0.6.23" diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/sync/syncer.go b/vendor/github.com/conductorone/baton-sdk/pkg/sync/syncer.go index 110efa35..8e175146 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/sync/syncer.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/sync/syncer.go @@ -12,6 +12,8 @@ import ( "slices" "strconv" "strings" + native_sync "sync" + "sync/atomic" "time" "github.com/Masterminds/semver/v3" @@ -46,6 +48,39 @@ var tracer = otel.Tracer("baton-sdk/sync") var dontFixCycles, _ = strconv.ParseBool(os.Getenv("BATON_DONT_FIX_CYCLES")) var ErrSyncNotComplete = fmt.Errorf("sync exited without finishing") +var ErrTooManyWarnings = fmt.Errorf("too many warnings, exiting sync") +var ErrNoSyncIDFound = fmt.Errorf("no syncID found after starting or resuming sync") + +// IsSyncPreservable returns true if the error returned by Sync() means that the sync artifact is useful. +// This either means that there was no error, or that the error is recoverable (we can resume the sync and possibly succeed next time). +func IsSyncPreservable(err error) bool { + if err == nil { + return true + } + // ErrSyncNotComplete means we hit the run duration timeout. + // ErrTooManyWarnings means we hit too many warnings. + // Both are recoverable errors. + if errors.Is(err, ErrSyncNotComplete) || errors.Is(err, ErrTooManyWarnings) { + return true + } + statusErr, ok := status.FromError(err) + if !ok { + return false + } + switch statusErr.Code() { + case codes.OK, + codes.NotFound, + codes.PermissionDenied, + codes.ResourceExhausted, + codes.FailedPrecondition, + codes.Aborted, + codes.Unavailable, + codes.Unauthenticated: + return true + default: + return false + } +} type Syncer interface { Sync(context.Context) error @@ -220,6 +255,8 @@ type syncer struct { injectSyncIDAnnotation bool setSessionStore sessions.SetSessionStore syncResourceTypes []string + previousSyncMu native_sync.Mutex + previousSyncIDPtr atomic.Pointer[string] } const minCheckpointInterval = 10 * time.Second @@ -245,6 +282,33 @@ func (s *syncer) Checkpoint(ctx context.Context, force bool) error { return nil } +func (s *syncer) getPreviousFullSyncID(ctx context.Context) (string, error) { + if ptr := s.previousSyncIDPtr.Load(); ptr != nil { + return *ptr, nil + } + + s.previousSyncMu.Lock() + defer s.previousSyncMu.Unlock() + + if ptr := s.previousSyncIDPtr.Load(); ptr != nil { + return *ptr, nil + } + + psf, ok := s.store.(latestSyncFetcher) + if !ok { + empty := "" + s.previousSyncIDPtr.Store(&empty) + return "", nil + } + + previousSyncID, err := psf.LatestFinishedSync(ctx, connectorstore.SyncTypeFull) + if err == nil { + s.previousSyncIDPtr.Store(&previousSyncID) + } + + return previousSyncID, err +} + func (s *syncer) handleInitialActionForStep(ctx context.Context, a Action) { if s.transitionHandler != nil { s.transitionHandler(a) @@ -398,7 +462,7 @@ func (s *syncer) Sync(ctx context.Context) error { // Set the syncID on the wrapper after we have it if syncID == "" { - err = errors.New("no syncID found after starting or resuming sync") + err = ErrNoSyncIDFound l.Error("no syncID found after starting or resuming sync", zap.Error(err)) return err } @@ -473,7 +537,7 @@ func (s *syncer) Sync(ctx context.Context) error { if len(warnings) > 10 { completedActionsCount := s.state.GetCompletedActionsCount() if completedActionsCount > 0 && float64(len(warnings))/float64(completedActionsCount) > 0.1 { - return fmt.Errorf("too many warnings, exiting sync. warnings: %v completed actions: %d", warnings, completedActionsCount) + return fmt.Errorf("%w: warnings: %v completed actions: %d", ErrTooManyWarnings, warnings, completedActionsCount) } } select { @@ -843,6 +907,12 @@ func validateSyncResourceTypesFilter(resourceTypesFilter []string, validResource return nil } +func (s *syncer) hasChildResources(resource *v2.Resource) bool { + annos := annotations.Annotations(resource.GetAnnotations()) + + return annos.Contains((*v2.ChildResourceType)(nil)) +} + // getSubResources fetches the sub resource types from a resources' annotations. func (s *syncer) getSubResources(ctx context.Context, parent *v2.Resource) error { ctx, span := tracer.Start(ctx, "syncer.getSubResources") @@ -1064,21 +1134,38 @@ func (s *syncer) syncResources(ctx context.Context) error { bulkPutResoruces := []*v2.Resource{} for _, r := range resp.GetList() { + validatedResource := false + // Check if we've already synced this resource, skip it if we have _, err = s.store.GetResource(ctx, reader_v2.ResourcesReaderServiceGetResourceRequest_builder{ ResourceId: v2.ResourceId_builder{ResourceType: r.GetId().GetResourceType(), Resource: r.GetId().GetResource()}.Build(), }.Build()) if err == nil { - continue + err = s.validateResourceTraits(ctx, r) + if err != nil { + return err + } + validatedResource = true + + // We must *ALSO* check if we have any child resources. + if !s.hasChildResources(r) { + // Since we only have the resource type IDs of child resources, + // we can't tell if we already have synced those child resources. + // Those children may also have their own child resources, + // so we are conservative here and just re-sync this resource. + continue + } } - if !errors.Is(err, sql.ErrNoRows) { + if err != nil && !errors.Is(err, sql.ErrNoRows) { return err } - err = s.validateResourceTraits(ctx, r) - if err != nil { - return err + if !validatedResource { + err = s.validateResourceTraits(ctx, r) + if err != nil { + return err + } } // Set the resource creation source @@ -1860,14 +1947,9 @@ func (s *syncer) fetchResourceForPreviousSync(ctx context.Context, resourceID *v l := ctxzap.Extract(ctx) - var previousSyncID string - var err error - - if psf, ok := s.store.(latestSyncFetcher); ok { - previousSyncID, err = psf.LatestFinishedSync(ctx, connectorstore.SyncTypeFull) - if err != nil { - return "", nil, err - } + previousSyncID, err := s.getPreviousFullSyncID(ctx) + if err != nil { + return "", nil, err } if previousSyncID == "" { @@ -1932,6 +2014,7 @@ func (s *syncer) fetchEtaggedGrantsForResource( var ret []*v2.Grant // No previous etag, so an etag match is not possible + // TODO(kans): do the request again to get the grants, but this time don't use the etag match! if prevEtag == nil { return nil, false, errors.New("connector returned an etag match but there is no previous sync generation to use") } @@ -2886,14 +2969,14 @@ func (s *syncer) Close(ctx context.Context) error { var err error if s.store != nil { - err = s.store.Close() + err = s.store.Close(ctx) if err != nil { return fmt.Errorf("error closing store: %w", err) } } if s.externalResourceReader != nil { - err = s.externalResourceReader.Close() + err = s.externalResourceReader.Close(ctx) if err != nil { return fmt.Errorf("error closing external resource reader: %w", err) } diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/synccompactor/attached/attached.go b/vendor/github.com/conductorone/baton-sdk/pkg/synccompactor/attached/attached.go index cb675996..00291d53 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/synccompactor/attached/attached.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/synccompactor/attached/attached.go @@ -4,6 +4,7 @@ import ( "context" "fmt" + reader_v2 "github.com/conductorone/baton-sdk/pb/c1/reader/v2" "github.com/conductorone/baton-sdk/pkg/connectorstore" "github.com/conductorone/baton-sdk/pkg/dotc1z" "github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/ctxzap" @@ -13,49 +14,40 @@ import ( type Compactor struct { base *dotc1z.C1File applied *dotc1z.C1File - dest *dotc1z.C1File } -func NewAttachedCompactor(base *dotc1z.C1File, applied *dotc1z.C1File, dest *dotc1z.C1File) *Compactor { +func NewAttachedCompactor(base *dotc1z.C1File, applied *dotc1z.C1File) *Compactor { return &Compactor{ base: base, applied: applied, - dest: dest, } } -func (c *Compactor) CompactWithSyncID(ctx context.Context, destSyncID string) error { - baseSyncID, err := c.base.LatestFinishedSyncID(ctx, connectorstore.SyncTypeAny) +func (c *Compactor) Compact(ctx context.Context) error { + baseSync, err := c.base.GetLatestFinishedSync(ctx, reader_v2.SyncsReaderServiceGetLatestFinishedSyncRequest_builder{ + SyncType: string(connectorstore.SyncTypeAny), + }.Build()) if err != nil { - return fmt.Errorf("failed to get base sync ID: %w", err) + return fmt.Errorf("failed to get base sync: %w", err) } - if baseSyncID == "" { + if baseSync == nil || baseSync.GetSync() == nil { return fmt.Errorf("no finished sync found in base") } - appliedSyncID, err := c.applied.LatestFinishedSyncID(ctx, connectorstore.SyncTypeAny) + appliedSync, err := c.applied.GetLatestFinishedSync(ctx, reader_v2.SyncsReaderServiceGetLatestFinishedSyncRequest_builder{ + SyncType: string(connectorstore.SyncTypeAny), + }.Build()) if err != nil { - return fmt.Errorf("failed to get applied sync ID: %w", err) + return fmt.Errorf("failed to get applied sync: %w", err) } - if appliedSyncID == "" { + if appliedSync == nil || appliedSync.GetSync() == nil { return fmt.Errorf("no finished sync found in applied") } - // Attach both the base and applied databases to the destination - base, err := c.dest.AttachFile(c.base, "base") - if err != nil { - return fmt.Errorf("failed to attach databases to destination: %w", err) - } l := ctxzap.Extract(ctx) - defer func() { - _, err := base.DetachFile("base") - if err != nil { - l.Error("failed to detach file", zap.Error(err)) - } - }() // Attach both the base and applied databases to the destination - attached, err := c.dest.AttachFile(c.applied, "attached") + attached, err := c.base.AttachFile(c.applied, "attached") if err != nil { return fmt.Errorf("failed to attach databases to destination: %w", err) } @@ -66,40 +58,36 @@ func (c *Compactor) CompactWithSyncID(ctx context.Context, destSyncID string) er } }() - // Drop grants indexes to improve performance. - err = c.dest.DropGrantIndexes(ctx) - if err != nil { - return fmt.Errorf("failed to drop grants indexes: %w", err) - } - - if err := c.processRecords(ctx, attached, destSyncID, baseSyncID, appliedSyncID); err != nil { + if err := c.processRecords(ctx, attached, baseSync.GetSync(), appliedSync.GetSync()); err != nil { return fmt.Errorf("failed to process records: %w", err) } - // Re-create the destination database to re-create the grant indexes. - err = c.dest.InitTables(ctx) - if err != nil { - return fmt.Errorf("failed to re-create destination database: %w", err) - } - return nil } -func (c *Compactor) processRecords(ctx context.Context, attached *dotc1z.C1FileAttached, destSyncID string, baseSyncID string, appliedSyncID string) error { +func (c *Compactor) processRecords(ctx context.Context, attached *dotc1z.C1FileAttached, baseSync *reader_v2.SyncRun, appliedSync *reader_v2.SyncRun) error { + baseSyncID := baseSync.GetId() + appliedSyncID := appliedSync.GetId() + + // Update the base sync type to the union of the base and applied sync types. + if err := attached.UpdateSync(ctx, baseSync, appliedSync); err != nil { + return fmt.Errorf("failed to update sync %s: %w", baseSyncID, err) + } + // Compact all tables: copy base records and merge newer applied records using raw SQL - if err := attached.CompactResourceTypes(ctx, destSyncID, baseSyncID, appliedSyncID); err != nil { + if err := attached.CompactResourceTypes(ctx, baseSyncID, appliedSyncID); err != nil { return fmt.Errorf("failed to compact resource types: %w", err) } - if err := attached.CompactResources(ctx, destSyncID, baseSyncID, appliedSyncID); err != nil { + if err := attached.CompactResources(ctx, baseSyncID, appliedSyncID); err != nil { return fmt.Errorf("failed to compact resources: %w", err) } - if err := attached.CompactEntitlements(ctx, destSyncID, baseSyncID, appliedSyncID); err != nil { + if err := attached.CompactEntitlements(ctx, baseSyncID, appliedSyncID); err != nil { return fmt.Errorf("failed to compact entitlements: %w", err) } - if err := attached.CompactGrants(ctx, destSyncID, baseSyncID, appliedSyncID); err != nil { + if err := attached.CompactGrants(ctx, baseSyncID, appliedSyncID); err != nil { return fmt.Errorf("failed to compact grants: %w", err) } diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/synccompactor/compactor.go b/vendor/github.com/conductorone/baton-sdk/pkg/synccompactor/compactor.go index dc8a070d..2dafb58d 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/synccompactor/compactor.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/synccompactor/compactor.go @@ -16,7 +16,6 @@ import ( "github.com/conductorone/baton-sdk/pkg/sdk" "github.com/conductorone/baton-sdk/pkg/sync" "github.com/conductorone/baton-sdk/pkg/synccompactor/attached" - "github.com/conductorone/baton-sdk/pkg/synccompactor/naive" "github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/ctxzap" "go.opentelemetry.io/otel" "go.uber.org/zap" @@ -27,13 +26,13 @@ var tracer = otel.Tracer("baton-sdk/pkg.synccompactor") type CompactorType string const ( - CompactorTypeNaive CompactorType = "naive" CompactorTypeAttached CompactorType = "attached" ) type Compactor struct { compactorType CompactorType entries []*CompactableSync + compactedC1z *dotc1z.C1File tmpDir string destDir string @@ -136,65 +135,95 @@ func (c *Compactor) Compact(ctx context.Context) (*CompactableSync, error) { default: } - // Base sync is c.entries[0], so compact all incrementals first, then apply that onto the base. - applied := c.entries[len(c.entries)-1] - for i := len(c.entries) - 2; i >= 0; i-- { - applied, err = c.doOneCompaction(ctx, c.entries[i], applied) - if err != nil { - return nil, err - } + opts := []dotc1z.C1ZOption{ + dotc1z.WithTmpDir(c.tmpDir), + // Performance improvements: + // NOTE: We do not close this c1z after compaction, so syncer will have these pragmas when expanding grants. + // We should re-evaluate these pragmas when partial syncs sync grants. + // Disable journaling. + dotc1z.WithPragma("journal_mode", "OFF"), + // Disable synchronous writes + dotc1z.WithPragma("synchronous", "OFF"), + // Use exclusive locking. + dotc1z.WithPragma("main.locking_mode", "EXCLUSIVE"), + // Use parallel decoding. + dotc1z.WithDecoderOptions(dotc1z.WithDecoderConcurrency(-1)), + // Use parallel encoding. + dotc1z.WithEncoderConcurrency(0), } - // Grant expansion doesn't use the connector interface at all, so giving syncer an empty connector is safe... for now. - // If that ever changes, we should implement a file connector that is a wrapper around the reader. - emptyConnector, err := sdk.NewEmptyConnector() + fileName := fmt.Sprintf("compacted-%s.c1z", c.entries[0].SyncID) + destFilePath := path.Join(c.tmpDir, fileName) + + c.compactedC1z, err = dotc1z.NewC1ZFile(ctx, destFilePath, opts...) if err != nil { - l.Error("error creating empty connector", zap.Error(err)) + l.Error("doOneCompaction failed: could not create c1z file", zap.Error(err)) return nil, err } - - // Use syncer to expand grants. - // TODO: Handle external resources. - syncOpts := []sync.SyncOpt{ - sync.WithC1ZPath(applied.FilePath), - sync.WithTmpDir(c.tmpDir), - sync.WithSyncID(applied.SyncID), - sync.WithOnlyExpandGrants(), + defer func() { + if c.compactedC1z == nil { + return + } + err := c.compactedC1z.Close(ctx) + if err != nil { + l.Error("error closing compacted c1z", zap.Error(err)) + } + }() + // Start new sync of type partial. If we compact syncs of other types, this sync type will be updated by attached.UpdateSync which is called by doOneCompaction(). + newSyncId, err := c.compactedC1z.StartNewSync(ctx, connectorstore.SyncTypePartial, "") + if err != nil { + return nil, fmt.Errorf("failed to start new sync: %w", err) } + err = c.compactedC1z.EndSync(ctx) + if err != nil { + return nil, fmt.Errorf("failed to end sync: %w", err) + } + l.Debug("new empty partial sync created", zap.String("sync_id", newSyncId)) - compactionDuration := time.Since(compactionStart) - runDuration := c.runDuration - compactionDuration - l.Debug("finished compaction", zap.Duration("compaction_duration", compactionDuration)) - - switch { - case c.runDuration > 0 && runDuration < 0: - return nil, fmt.Errorf("unable to finish compaction sync in run duration (%s). compactions took %s", c.runDuration, compactionDuration) - case runDuration > 0: - syncOpts = append(syncOpts, sync.WithRunDuration(runDuration)) + // Base sync is c.entries[0], so compact in reverse order. That way we compact the biggest sync last. + for i := len(c.entries) - 1; i >= 0; i-- { + err = c.doOneCompaction(ctx, c.entries[i]) + if err != nil { + return nil, fmt.Errorf("failed to compact sync %s: %w", c.entries[i].SyncID, err) + } } - syncer, err := sync.NewSyncer( - ctx, - emptyConnector, - syncOpts..., - ) + resp, err := c.compactedC1z.GetSync(ctx, reader_v2.SyncsReaderServiceGetSyncRequest_builder{ + SyncId: newSyncId, + }.Build()) if err != nil { - l.Error("error creating syncer", zap.Error(err)) - return nil, err + return nil, fmt.Errorf("failed to get sync: %w", err) + } + newSync := resp.GetSync() + if newSync == nil { + return nil, fmt.Errorf("no sync found") } - if err := syncer.Sync(ctx); err != nil { - l.Error("error syncing with grant expansion", zap.Error(err)) - return nil, err + if newSync.GetId() != newSyncId { + return nil, fmt.Errorf("new sync id does not match expected id: %s != %s", newSync.GetId(), newSyncId) } - if err := syncer.Close(ctx); err != nil { - l.Error("error closing syncer", zap.Error(err)) - return nil, err + + if newSync.GetSyncType() == string(connectorstore.SyncTypePartial) { + err = c.compactedC1z.Cleanup(ctx) + if err != nil { + return nil, fmt.Errorf("failed to cleanup compacted c1z: %w", err) + } + // Close compactedC1z so that the c1z file is written to disk before cpFile() is called. + err = c.compactedC1z.Close(ctx) + if err != nil { + return nil, fmt.Errorf("failed to close compacted c1z: %w", err) + } + c.compactedC1z = nil + } else { + err = c.expandGrants(ctx, newSyncId, compactionStart) + if err != nil { + return nil, fmt.Errorf("failed to expand grants: %w", err) + } } // Move last compacted file to the destination dir - finalPath := path.Join(c.destDir, fmt.Sprintf("compacted-%s.c1z", applied.SyncID)) - if err := cpFile(applied.FilePath, finalPath); err != nil { + finalPath := path.Join(c.destDir, fmt.Sprintf("compacted-%s.c1z", newSyncId)) + if err := cpFile(ctx, destFilePath, finalPath); err != nil { return nil, err } @@ -205,10 +234,18 @@ func (c *Compactor) Compact(ctx context.Context) (*CompactableSync, error) { } finalPath = abs } - return &CompactableSync{FilePath: finalPath, SyncID: applied.SyncID}, nil + return &CompactableSync{FilePath: finalPath, SyncID: newSyncId}, nil } -func cpFile(sourcePath string, destPath string) error { +func cpFile(ctx context.Context, sourcePath string, destPath string) error { + err := os.Rename(sourcePath, destPath) + if err == nil { + return nil + } + + l := ctxzap.Extract(ctx) + l.Warn("compactor: failed to rename final compacted file, falling back to copy", zap.Error(err), zap.String("source_path", sourcePath), zap.String("dest_path", destPath)) + source, err := os.Open(sourcePath) if err != nil { return fmt.Errorf("failed to open source file: %w", err) @@ -229,136 +266,94 @@ func cpFile(sourcePath string, destPath string) error { return nil } -func (c *Compactor) getLatestObjects(ctx context.Context, info *CompactableSync) (*reader_v2.SyncRun, *dotc1z.C1File, func(), error) { - cleanup := func() {} +func (c *Compactor) doOneCompaction(ctx context.Context, cs *CompactableSync) error { + ctx, span := tracer.Start(ctx, "Compactor.doOneCompaction") + defer span.End() + l := ctxzap.Extract(ctx) + l.Info( + "running compaction", + zap.String("apply_file", cs.FilePath), + zap.String("apply_sync", cs.SyncID), + zap.String("tmp_dir", c.tmpDir), + ) - baseFile, err := dotc1z.NewC1ZFile( + applyFile, err := dotc1z.NewC1ZFile( ctx, - info.FilePath, + cs.FilePath, dotc1z.WithTmpDir(c.tmpDir), - dotc1z.WithDecoderOptions(dotc1z.WithDecoderConcurrency(0)), + dotc1z.WithDecoderOptions(dotc1z.WithDecoderConcurrency(-1)), dotc1z.WithReadOnly(true), // We're only reading, so it's safe to use these pragmas. - dotc1z.WithPragma("journal_mode", "OFF"), dotc1z.WithPragma("synchronous", "OFF"), + dotc1z.WithPragma("journal_mode", "OFF"), + dotc1z.WithPragma("locking_mode", "EXCLUSIVE"), ) if err != nil { - return nil, nil, cleanup, err - } - - cleanup = func() { - _ = baseFile.Close() + return err } + defer func() { + err := applyFile.Close(ctx) + if err != nil { + l.Error("error closing apply file", zap.Error(err), zap.String("apply_file", cs.FilePath)) + } + }() - latestAppliedSync, err := baseFile.GetSync(ctx, reader_v2.SyncsReaderServiceGetSyncRequest_builder{ - SyncId: info.SyncID, - Annotations: nil, - }.Build()) - if err != nil { - return nil, nil, cleanup, err + runner := attached.NewAttachedCompactor(c.compactedC1z, applyFile) + if err := runner.Compact(ctx); err != nil { + l.Error("error running compaction", zap.Error(err), zap.String("apply_file", cs.FilePath)) + return err } - return latestAppliedSync.GetSync(), baseFile, cleanup, nil -} - -func unionSyncTypes(a, b connectorstore.SyncType) connectorstore.SyncType { - switch { - case a == connectorstore.SyncTypeFull || b == connectorstore.SyncTypeFull: - return connectorstore.SyncTypeFull - case a == connectorstore.SyncTypeResourcesOnly || b == connectorstore.SyncTypeResourcesOnly: - return connectorstore.SyncTypeResourcesOnly - default: - return connectorstore.SyncTypePartial - } + return nil } -func (c *Compactor) doOneCompaction(ctx context.Context, base *CompactableSync, applied *CompactableSync) (*CompactableSync, error) { - ctx, span := tracer.Start(ctx, "Compactor.doOneCompaction") - defer span.End() +func (c *Compactor) expandGrants(ctx context.Context, newSyncId string, compactionStart time.Time) error { l := ctxzap.Extract(ctx) - l.Info( - "running compaction", - zap.String("base_file", base.FilePath), - zap.String("base_sync", base.SyncID), - zap.String("applied_file", applied.FilePath), - zap.String("applied_sync", applied.SyncID), - zap.String("tmp_dir", c.tmpDir), - ) - opts := []dotc1z.C1ZOption{ - dotc1z.WithTmpDir(c.tmpDir), - // Performance improvements: - // Disable journaling. - dotc1z.WithPragma("journal_mode", "OFF"), - // Disable synchronous writes - dotc1z.WithPragma("synchronous", "OFF"), - // Use exclusive locking. - dotc1z.WithPragma("main.locking_mode", "EXCLUSIVE"), - // Use memory for temporary storage. - dotc1z.WithPragma("temp_store", "MEMORY"), - // We close this c1z after compaction, so syncer won't have these pragmas when expanding grants. - // Use parallel decoding. - dotc1z.WithDecoderOptions(dotc1z.WithDecoderConcurrency(0)), - // Use parallel encoding. - dotc1z.WithEncoderConcurrency(0), - } - - fileName := fmt.Sprintf("compacted-%s-%s.c1z", base.SyncID, applied.SyncID) - newFile, err := dotc1z.NewC1ZFile(ctx, path.Join(c.tmpDir, fileName), opts...) - if err != nil { - l.Error("doOneCompaction failed: could not create c1z file", zap.Error(err)) - return nil, err - } - defer func() { _ = newFile.Close() }() - - baseSync, baseFile, cleanupBase, err := c.getLatestObjects(ctx, base) - defer cleanupBase() + // Grant expansion doesn't use the connector interface at all, so giving syncer an empty connector is safe... for now. + // If that ever changes, we should implement a file connector that is a wrapper around the reader. + emptyConnector, err := sdk.NewEmptyConnector() if err != nil { - return nil, err + l.Error("error creating empty connector", zap.Error(err)) + return err } - appliedSync, appliedFile, cleanupApplied, err := c.getLatestObjects(ctx, applied) - defer cleanupApplied() - if err != nil { - return nil, err + // Use syncer to expand grants. + // TODO: Handle external resources. + syncOpts := []sync.SyncOpt{ + sync.WithConnectorStore(c.compactedC1z), // Use the existing C1File so we're not wasting time compressing & decompressing it. + sync.WithTmpDir(c.tmpDir), + sync.WithSyncID(newSyncId), + sync.WithOnlyExpandGrants(), } - syncType := unionSyncTypes(connectorstore.SyncType(baseSync.GetSyncType()), connectorstore.SyncType(appliedSync.GetSyncType())) + compactionDuration := time.Since(compactionStart) + runDuration := c.runDuration - compactionDuration + l.Debug("finished compaction", zap.Duration("compaction_duration", compactionDuration)) - newSyncId, err := newFile.StartNewSync(ctx, syncType, "") - if err != nil { - return nil, err + switch { + case c.runDuration > 0 && runDuration <= 0: + return fmt.Errorf("unable to finish compaction sync in run duration (%s). compactions took %s", c.runDuration, compactionDuration) + case runDuration > 0: + syncOpts = append(syncOpts, sync.WithRunDuration(runDuration)) } - switch c.compactorType { - case CompactorTypeNaive: - // TODO: Add support for syncID or remove naive compactor. - runner := naive.NewNaiveCompactor(baseFile, appliedFile, newFile) - if err := runner.Compact(ctx); err != nil { - l.Error("error running compaction", zap.Error(err)) - return nil, err - } - case CompactorTypeAttached: - runner := attached.NewAttachedCompactor(baseFile, appliedFile, newFile) - if err := runner.CompactWithSyncID(ctx, newSyncId); err != nil { - l.Error("error running compaction", zap.Error(err)) - return nil, err - } - default: - // c.compactorType defaults to attached, so this should never happen. - return nil, fmt.Errorf("invalid compactor type: %s", c.compactorType) + syncer, err := sync.NewSyncer( + ctx, + emptyConnector, + syncOpts..., + ) + if err != nil { + l.Error("error creating syncer", zap.Error(err)) + return err } - if err := newFile.EndSync(ctx); err != nil { - return nil, err + if err := syncer.Sync(ctx); err != nil { + l.Error("error syncing with grant expansion", zap.Error(err)) + return err } - - outputFilepath, err := newFile.OutputFilepath() - if err != nil { - return nil, err + if err := syncer.Close(ctx); err != nil { + l.Error("error closing syncer", zap.Error(err)) + return err } - - return &CompactableSync{ - FilePath: outputFilepath, - SyncID: newSyncId, - }, nil + return nil } diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/synccompactor/naive/naive.go b/vendor/github.com/conductorone/baton-sdk/pkg/synccompactor/naive/naive.go deleted file mode 100644 index 7e4ae6e9..00000000 --- a/vendor/github.com/conductorone/baton-sdk/pkg/synccompactor/naive/naive.go +++ /dev/null @@ -1,88 +0,0 @@ -package naive - -import ( - "context" - - "github.com/conductorone/baton-sdk/pkg/dotc1z" - "github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/ctxzap" - "go.uber.org/zap" - "google.golang.org/protobuf/proto" -) - -func NewNaiveCompactor(base *dotc1z.C1File, applied *dotc1z.C1File, dest *dotc1z.C1File) *Compactor { - return &Compactor{ - base: base, - applied: applied, - dest: dest, - } -} - -type Compactor struct { - base *dotc1z.C1File - applied *dotc1z.C1File - dest *dotc1z.C1File -} - -func (n *Compactor) Compact(ctx context.Context) error { - if err := n.processResourceTypes(ctx); err != nil { - return err - } - if err := n.processResources(ctx); err != nil { - return err - } - if err := n.processEntitlements(ctx); err != nil { - return err - } - if err := n.processGrants(ctx); err != nil { - return err - } - return nil -} - -func naiveCompact[T proto.Message, REQ listRequest, RESP listResponse[T]]( - ctx context.Context, - base listFunc[T, REQ, RESP], - applied listFunc[T, REQ, RESP], - save func(context.Context, ...T) error, -) error { - var t T - l := ctxzap.Extract(ctx) - l.Info("naive compaction: compacting objects", zap.String("object_type", string(t.ProtoReflect().Descriptor().FullName()))) - // List all objects from the base file and save them in the destination file - if err := listAllObjects(ctx, base, func(items []T) (bool, error) { - if err := save(ctx, items...); err != nil { - return false, err - } - return true, nil - }); err != nil { - return err - } - - // Then list all objects from the applied file and save them in the destination file, overwriting ones with the same external_id - if err := listAllObjects(ctx, applied, func(items []T) (bool, error) { - if err := save(ctx, items...); err != nil { - return false, err - } - return true, nil - }); err != nil { - return err - } - - return nil -} - -func (n *Compactor) processResourceTypes(ctx context.Context) error { - return naiveCompact(ctx, n.base.ListResourceTypes, n.applied.ListResourceTypes, n.dest.PutResourceTypesIfNewer) -} - -func (n *Compactor) processResources(ctx context.Context) error { - return naiveCompact(ctx, n.base.ListResources, n.applied.ListResources, n.dest.PutResourcesIfNewer) -} - -func (n *Compactor) processGrants(ctx context.Context) error { - return naiveCompact(ctx, n.base.ListGrants, n.applied.ListGrants, n.dest.PutGrantsIfNewer) -} - -func (n *Compactor) processEntitlements(ctx context.Context) error { - return naiveCompact(ctx, n.base.ListEntitlements, n.applied.ListEntitlements, n.dest.PutEntitlementsIfNewer) -} diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/synccompactor/naive/naive_unroll.go b/vendor/github.com/conductorone/baton-sdk/pkg/synccompactor/naive/naive_unroll.go deleted file mode 100644 index cc4f8064..00000000 --- a/vendor/github.com/conductorone/baton-sdk/pkg/synccompactor/naive/naive_unroll.go +++ /dev/null @@ -1,98 +0,0 @@ -package naive - -import ( - "context" - "reflect" - - "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/types/known/anypb" -) - -type listRequest interface { - proto.Message - GetPageSize() uint32 - GetPageToken() string - GetAnnotations() []*anypb.Any -} - -type listResponse[T proto.Message] interface { - GetNextPageToken() string - GetAnnotations() []*anypb.Any - GetList() []T -} - -// createRequest creates a new request object of type REQ using reflection. -func createRequest[REQ listRequest]() REQ { - var r REQ - baseType := reflect.TypeOf(r).Elem() - pointerToInitializedVal := reflect.New(baseType) - return pointerToInitializedVal.Interface().(REQ) -} - -// setFieldIfValid sets a field in a struct if it exists and can be set. -func setFieldIfValid(obj interface{}, fieldName string, setValue func(reflect.Value)) { - val := reflect.ValueOf(obj) - if val.Kind() != reflect.Ptr || val.IsNil() { - return - } - - field := val.Elem().FieldByName(fieldName) - if field.IsValid() && field.CanSet() { - setValue(field) - } -} - -// setPageSize sets the PageSize field in a request to the specified value. -func setPageSize(req listRequest, size uint64) { - setFieldIfValid(req, "PageSize", func(field reflect.Value) { - field.SetUint(size) - }) -} - -// setPageToken sets the PageToken field in a request to the specified token. -func setPageToken(req listRequest, token string) { - setFieldIfValid(req, "PageToken", func(field reflect.Value) { - field.SetString(token) - }) -} - -type listFunc[T proto.Message, REQ listRequest, RESP listResponse[T]] func(context.Context, REQ) (RESP, error) - -func listAllObjects[T proto.Message, REQ listRequest, RESP listResponse[T]](ctx context.Context, list listFunc[T, REQ, RESP], cb func(items []T) (bool, error)) error { - // Create a new request using reflection - req := createRequest[REQ]() - - // Set initial page size - setPageSize(req, 100) // Set a reasonable default page size - - var nextPageToken string - for { - // Set the page token for the current request if needed - if nextPageToken != "" { - setPageToken(req, nextPageToken) - } - - // Call the list function with the current request - resp, err := list(ctx, req) - if err != nil { - return err - } - - // Collect the results - shouldContinue, err := cb(resp.GetList()) - if err != nil { - return err - } - if !shouldContinue { - return nil - } - - // Check if there are more pages - nextPageToken = resp.GetNextPageToken() - if nextPageToken == "" || len(resp.GetList()) == 0 { - break // No more pages - } - } - - return nil -} diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/create_account.go b/vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/create_account.go index eb45b3c4..f9e5255f 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/create_account.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/create_account.go @@ -45,6 +45,7 @@ func (g *createAccountTaskHandler) HandleTask(ctx context.Context) error { AccountInfo: t.GetAccountInfo(), CredentialOptions: t.GetCredentialOptions(), EncryptionConfigs: t.GetEncryptionConfigs(), + ResourceTypeId: t.GetResourceTypeId(), }.Build()) if err != nil { l.Error("failed creating account", zap.Error(err)) diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/tasks/local/accounter.go b/vendor/github.com/conductorone/baton-sdk/pkg/tasks/local/accounter.go index 7d507117..bb581b9b 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/tasks/local/accounter.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/tasks/local/accounter.go @@ -18,9 +18,10 @@ type localAccountManager struct { dbPath string o sync.Once - login string - email string - profile *structpb.Struct + login string + email string + profile *structpb.Struct + resourceTypeId string } func (m *localAccountManager) GetTempDir() string { @@ -35,7 +36,9 @@ func (m *localAccountManager) Next(ctx context.Context) (*v1.Task, time.Duration var task *v1.Task m.o.Do(func() { task = v1.Task_builder{ - CreateAccount: &v1.Task_CreateAccountTask{}, + CreateAccount: &v1.Task_CreateAccountTask{ + ResourceTypeId: m.resourceTypeId, + }, }.Build() }) return task, 0, nil @@ -45,7 +48,7 @@ func (m *localAccountManager) Process(ctx context.Context, task *v1.Task, cc typ ctx, span := tracer.Start(ctx, "localAccountManager.Process", trace.WithNewRoot()) defer span.End() - accountManager := provisioner.NewCreateAccountManager(cc, m.dbPath, m.login, m.email, m.profile) + accountManager := provisioner.NewCreateAccountManager(cc, m.dbPath, m.login, m.email, m.profile, m.resourceTypeId) err := accountManager.Run(ctx) if err != nil { @@ -60,12 +63,13 @@ func (m *localAccountManager) Process(ctx context.Context, task *v1.Task, cc typ return nil } -// NewGranter returns a task manager that queues a sync task. -func NewCreateAccountManager(ctx context.Context, dbPath string, login string, email string, profile *structpb.Struct) tasks.Manager { +// NewCreateAccountManager returns a task manager that queues a create account task. +func NewCreateAccountManager(ctx context.Context, dbPath string, login string, email string, profile *structpb.Struct, resourceTypeId string) tasks.Manager { return &localAccountManager{ - dbPath: dbPath, - login: login, - email: email, - profile: profile, + dbPath: dbPath, + login: login, + email: email, + profile: profile, + resourceTypeId: resourceTypeId, } } diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/tasks/local/deleter.go b/vendor/github.com/conductorone/baton-sdk/pkg/tasks/local/deleter.go index 400c88b2..c94c0c27 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/tasks/local/deleter.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/tasks/local/deleter.go @@ -58,7 +58,7 @@ func (m *localResourceDeleter) Process(ctx context.Context, task *v1.Task, cc ty return nil } -// NewGranter returns a task manager that queues a sync task. +// NewResourceDeleter returns a task manager that queues a delete resource task. func NewResourceDeleter(ctx context.Context, dbPath string, resourceId string, resourceType string) tasks.Manager { return &localResourceDeleter{ dbPath: dbPath, diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/tasks/local/differ.go b/vendor/github.com/conductorone/baton-sdk/pkg/tasks/local/differ.go index 856f2690..94c39b1e 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/tasks/local/differ.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/tasks/local/differ.go @@ -64,7 +64,7 @@ func (m *localDiffer) Process(ctx context.Context, task *v1.Task, cc types.Conne return err } - if err := file.Close(); err != nil { + if err := file.Close(ctx); err != nil { return err } diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/types/entitlement/entitlement.go b/vendor/github.com/conductorone/baton-sdk/pkg/types/entitlement/entitlement.go index 8715c825..7a441a12 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/types/entitlement/entitlement.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/types/entitlement/entitlement.go @@ -32,6 +32,12 @@ func WithDisplayName(displayName string) EntitlementOption { } } +func WithSlug(slug string) EntitlementOption { + return func(g *v2.Entitlement) { + g.SetSlug(slug) + } +} + func WithDescription(description string) EntitlementOption { return func(g *v2.Entitlement) { g.SetDescription(description) @@ -72,6 +78,21 @@ func NewAssignmentEntitlement(resource *v2.Resource, name string, entitlementOpt return entitlement } +func NewOwnershipEntitlement(resource *v2.Resource, name string, entitlementOptions ...EntitlementOption) *v2.Entitlement { + entitlement := v2.Entitlement_builder{ + Id: NewEntitlementID(resource, name), + DisplayName: name, + Slug: name, + Purpose: v2.Entitlement_PURPOSE_VALUE_OWNERSHIP, + Resource: resource, + }.Build() + + for _, entitlementOption := range entitlementOptions { + entitlementOption(entitlement) + } + return entitlement +} + func NewEntitlement(resource *v2.Resource, name, purposeStr string, entitlementOptions ...EntitlementOption) *v2.Entitlement { var purpose v2.Entitlement_PurposeValue switch purposeStr { @@ -79,6 +100,8 @@ func NewEntitlement(resource *v2.Resource, name, purposeStr string, entitlementO purpose = v2.Entitlement_PURPOSE_VALUE_PERMISSION case "assignment": purpose = v2.Entitlement_PURPOSE_VALUE_ASSIGNMENT + case "ownership": + purpose = v2.Entitlement_PURPOSE_VALUE_OWNERSHIP default: purpose = v2.Entitlement_PURPOSE_VALUE_UNSPECIFIED } diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/types/resource/security_insight_trait.go b/vendor/github.com/conductorone/baton-sdk/pkg/types/resource/security_insight_trait.go index 9b8218d3..939bb8f2 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/types/resource/security_insight_trait.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/types/resource/security_insight_trait.go @@ -13,22 +13,42 @@ import ( // SecurityInsightTraitOption is a functional option for configuring a SecurityInsightTrait. type SecurityInsightTraitOption func(*v2.SecurityInsightTrait) error -// WithInsightType sets the insight type. This is typically set via NewSecurityInsightTrait, -// but can be used to override or update the type on an existing trait. -func WithInsightType(insightType string) SecurityInsightTraitOption { +// WithRiskScore sets the insight type to risk score with the given value. +func WithRiskScore(value string) SecurityInsightTraitOption { return func(t *v2.SecurityInsightTrait) error { - if insightType == "" { - return fmt.Errorf("insight type cannot be empty") + if value == "" { + return fmt.Errorf("risk score value cannot be empty") } - t.SetInsightType(insightType) + t.SetRiskScore(&v2.RiskScore{ + Value: value, + }) return nil } } -// WithInsightValue sets the value of the security insight. -func WithInsightValue(value string) SecurityInsightTraitOption { +// WithIssue sets the insight type to issue with the given value. +func WithIssue(value string) SecurityInsightTraitOption { return func(t *v2.SecurityInsightTrait) error { - t.SetValue(value) + if value == "" { + return fmt.Errorf("issue value cannot be empty") + } + issue := &v2.Issue{ + Value: value, + } + t.SetIssue(issue) + return nil + } +} + +// WithIssueSeverity sets or updates the severity on an issue insight. +// This should be used after WithIssue or on an existing issue insight. +func WithIssueSeverity(severity string) SecurityInsightTraitOption { + return func(t *v2.SecurityInsightTrait) error { + issue := t.GetIssue() + if issue == nil { + return fmt.Errorf("cannot set severity: insight is not an issue type (use WithIssue first)") + } + issue.SetSeverity(severity) return nil } } @@ -73,23 +93,38 @@ func WithInsightExternalResourceTarget(externalId string, appHint string) Securi } } -// NewSecurityInsightTrait creates a new SecurityInsightTrait with the given insight type and options. -func NewSecurityInsightTrait(insightType string, opts ...SecurityInsightTraitOption) (*v2.SecurityInsightTrait, error) { - if insightType == "" { - return nil, fmt.Errorf("insight type cannot be empty") +// NewSecurityInsightTrait creates a new SecurityInsightTrait with the given options. +// You must provide either WithRiskScore or WithIssue to set the insight type. +// +// Example usage: +// +// trait, err := NewSecurityInsightTrait( +// WithIssue("CVE-2024-1234", "Critical"), +// WithInsightUserTarget("user@example.com")) +// +// trait, err := NewSecurityInsightTrait( +// WithRiskScore("85"), +// WithInsightResourceTarget(resourceId)) +func NewSecurityInsightTrait(opts ...SecurityInsightTraitOption) (*v2.SecurityInsightTrait, error) { + trait := &v2.SecurityInsightTrait{ + ObservedAt: timestamppb.Now(), } - trait := v2.SecurityInsightTrait_builder{ - InsightType: insightType, - ObservedAt: timestamppb.Now(), - }.Build() - for _, opt := range opts { if err := opt(trait); err != nil { return nil, err } } + // Validate that an insight type was set + if trait.GetRiskScore() == nil && trait.GetIssue() == nil { + return nil, fmt.Errorf("insight type must be set (use WithRiskScore or WithIssue)") + } + + if trait.GetTarget() == nil { + return nil, fmt.Errorf("target must be set (use WithInsightUserTarget, WithInsightResourceTarget, WithInsightExternalResourceTarget, or WithInsightAppUserTarget)") + } + return trait, nil } @@ -109,10 +144,20 @@ func GetSecurityInsightTrait(resource *v2.Resource) (*v2.SecurityInsightTrait, e } // WithSecurityInsightTrait adds or updates a SecurityInsightTrait annotation on a resource. -// The insightType parameter is required to ensure the trait is always valid. +// The insight type (risk score or issue) must be set via the provided options. // If the resource already has a SecurityInsightTrait, it will be updated with the provided options. -// If not, a new trait will be created with the given insightType. -func WithSecurityInsightTrait(insightType string, opts ...SecurityInsightTraitOption) ResourceOption { +// If not, a new trait will be created. +// +// Example usage: +// +// resource, err := NewResource( +// "Security Finding", +// resourceType, +// objectID, +// WithSecurityInsightTrait( +// WithIssue("CVE-2024-1234", "Critical"), +// WithInsightUserTarget("user@example.com"))) +func WithSecurityInsightTrait(opts ...SecurityInsightTraitOption) ResourceOption { return func(r *v2.Resource) error { t := &v2.SecurityInsightTrait{} annos := annotations.Annotations(r.GetAnnotations()) @@ -122,16 +167,9 @@ func WithSecurityInsightTrait(insightType string, opts ...SecurityInsightTraitOp } if !existing { - // Creating a new trait - insightType is required - if insightType == "" { - return fmt.Errorf("insight type is required when creating a new security insight trait") - } - t.SetInsightType(insightType) - } else if insightType != "" { - // Updating existing trait with a new type - t.SetInsightType(insightType) + // Creating a new trait - set default observation time + t.SetObservedAt(timestamppb.Now()) } - // If existing and insightType is empty, keep the existing type for _, o := range opts { if err := o(t); err != nil { @@ -139,6 +177,11 @@ func WithSecurityInsightTrait(insightType string, opts ...SecurityInsightTraitOp } } + // Validate that an insight type was set + if t.GetRiskScore() == nil && t.GetIssue() == nil { + return fmt.Errorf("insight type must be set (use WithRiskScore or WithIssue)") + } + annos.Update(t) r.SetAnnotations(annos) @@ -146,96 +189,89 @@ func WithSecurityInsightTrait(insightType string, opts ...SecurityInsightTraitOp } } -// NewUserSecurityInsightResource creates a security insight resource targeting a user by email. -// Use this when the insight should be resolved to a C1 User by Uplift. -func NewUserSecurityInsightResource( +// NewSecurityInsightResource creates a security insight resource with the given trait options. +// This is a flexible constructor that uses the options pattern to configure all aspects of the insight. +// +// Example usage: +// +// // Risk score for a user +// resource, err := NewSecurityInsightResource( +// "User Risk Score", +// securityInsightResourceType, +// "user-123", +// WithRiskScore("85"), +// WithInsightUserTarget("user@example.com")) +// +// // Issue with severity for a resource +// resource, err := NewSecurityInsightResource( +// "Critical Vulnerability", +// securityInsightResourceType, +// "vuln-456", +// WithIssue("CVE-2024-1234", "Critical"), +// WithInsightResourceTarget(resourceId)) +// +// // Issue for external resource with custom observation time +// resource, err := NewSecurityInsightResource( +// "AWS Security Finding", +// securityInsightResourceType, +// "finding-789", +// WithIssue("S3 bucket publicly accessible"), +// WithIssueSeverity("High"), +// WithInsightExternalResourceTarget("arn:aws:s3:::my-bucket", "aws"), +// WithInsightObservedAt(time.Now())) +func NewSecurityInsightResource( name string, resourceType *v2.ResourceType, objectID interface{}, - insightType string, - value string, - userEmail string, - traitOpts []SecurityInsightTraitOption, - opts ...ResourceOption, + traitOpts ...SecurityInsightTraitOption, ) (*v2.Resource, error) { - allTraitOpts := append([]SecurityInsightTraitOption{ - WithInsightValue(value), - WithInsightUserTarget(userEmail), - }, traitOpts...) - - trait, err := NewSecurityInsightTrait(insightType, allTraitOpts...) + trait, err := NewSecurityInsightTrait(traitOpts...) if err != nil { return nil, err } - opts = append(opts, WithAnnotation(trait)) - - return NewResource(name, resourceType, objectID, opts...) + return NewResource(name, resourceType, objectID, WithAnnotation(trait)) } -// NewResourceSecurityInsightResource creates a security insight resource with a direct resource reference. -// Use this when the connector knows the actual resource (synced by this connector). -func NewResourceSecurityInsightResource( - name string, - resourceType *v2.ResourceType, - objectID interface{}, - insightType string, - value string, - targetResourceId *v2.ResourceId, - traitOpts []SecurityInsightTraitOption, - opts ...ResourceOption, -) (*v2.Resource, error) { - allTraitOpts := append([]SecurityInsightTraitOption{ - WithInsightValue(value), - WithInsightResourceTarget(targetResourceId), - }, traitOpts...) - - trait, err := NewSecurityInsightTrait(insightType, allTraitOpts...) - if err != nil { - return nil, err +// IsSecurityInsightResource checks if a resource type has the TRAIT_SECURITY_INSIGHT trait. +func IsSecurityInsightResource(resourceType *v2.ResourceType) bool { + for _, trait := range resourceType.GetTraits() { + if trait == v2.ResourceType_TRAIT_SECURITY_INSIGHT { + return true + } } + return false +} - opts = append(opts, WithAnnotation(trait)) +// --- Insight type checkers --- - return NewResource(name, resourceType, objectID, opts...) +// IsRiskScore returns true if the insight is a risk score. +func IsRiskScore(trait *v2.SecurityInsightTrait) bool { + return trait.GetRiskScore() != nil } -// NewExternalResourceSecurityInsightResource creates a security insight resource targeting an external resource. -// Use this when the connector only has an external ID (e.g., ARN) and needs Uplift to resolve it. -func NewExternalResourceSecurityInsightResource( - name string, - resourceType *v2.ResourceType, - objectID interface{}, - insightType string, - value string, - targetExternalId string, - targetAppHint string, - traitOpts []SecurityInsightTraitOption, - opts ...ResourceOption, -) (*v2.Resource, error) { - allTraitOpts := append([]SecurityInsightTraitOption{ - WithInsightValue(value), - WithInsightExternalResourceTarget(targetExternalId, targetAppHint), - }, traitOpts...) +// IsIssue returns true if the insight is an issue. +func IsIssue(trait *v2.SecurityInsightTrait) bool { + return trait.GetIssue() != nil +} - trait, err := NewSecurityInsightTrait(insightType, allTraitOpts...) - if err != nil { - return nil, err +// GetInsightValue returns the value of the insight (either risk score or issue). +func GetInsightValue(trait *v2.SecurityInsightTrait) string { + if rs := trait.GetRiskScore(); rs != nil { + return rs.GetValue() } - - opts = append(opts, WithAnnotation(trait)) - - return NewResource(name, resourceType, objectID, opts...) + if issue := trait.GetIssue(); issue != nil { + return issue.GetValue() + } + return "" } -// IsSecurityInsightResource checks if a resource type has the TRAIT_SECURITY_INSIGHT trait. -func IsSecurityInsightResource(resourceType *v2.ResourceType) bool { - for _, trait := range resourceType.GetTraits() { - if trait == v2.ResourceType_TRAIT_SECURITY_INSIGHT { - return true - } +// GetIssueSeverity returns the severity of an issue insight, or empty string if not set or not an issue. +func GetIssueSeverity(trait *v2.SecurityInsightTrait) string { + if issue := trait.GetIssue(); issue != nil { + return issue.GetSeverity() } - return false + return "" } // --- Target type checkers --- diff --git a/vendor/modules.txt b/vendor/modules.txt index b9cff5c8..04dbd5ed 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -159,7 +159,7 @@ github.com/benbjohnson/clock # github.com/cenkalti/backoff/v4 v4.3.0 ## explicit; go 1.18 github.com/cenkalti/backoff/v4 -# github.com/conductorone/baton-sdk v0.6.9 +# github.com/conductorone/baton-sdk v0.6.24 ## explicit; go 1.25.2 github.com/conductorone/baton-sdk/internal/connector github.com/conductorone/baton-sdk/pb/c1/c1z/v1 @@ -204,7 +204,6 @@ github.com/conductorone/baton-sdk/pkg/sync/expand github.com/conductorone/baton-sdk/pkg/sync/expand/scc github.com/conductorone/baton-sdk/pkg/synccompactor github.com/conductorone/baton-sdk/pkg/synccompactor/attached -github.com/conductorone/baton-sdk/pkg/synccompactor/naive github.com/conductorone/baton-sdk/pkg/tasks github.com/conductorone/baton-sdk/pkg/tasks/c1api github.com/conductorone/baton-sdk/pkg/tasks/local From 0e08af233ea452ab02c181db1c08bf37fe2b7cb1 Mon Sep 17 00:00:00 2001 From: vipulgowda Date: Wed, 28 Jan 2026 16:38:50 +0530 Subject: [PATCH 08/19] more updates --- go.mod | 2 +- go.sum | 4 +- pkg/connector/team.go | 150 +++++- .../baton-sdk/pb/c1/config/v1/rules.pb.go | 125 ++++- .../pb/c1/config/v1/rules.pb.validate.go | 14 + .../pb/c1/config/v1/rules_protoopaque.pb.go | 126 ++++- .../c1/connector/v2/annotation_resource.pb.go | 110 +++++ .../v2/annotation_resource.pb.validate.go | 138 ++++++ .../v2/annotation_resource_protoopaque.pb.go | 110 +++++ .../pb/c1/connector/v2/annotation_trait.pb.go | 396 ++++++++++++--- .../v2/annotation_trait.pb.validate.go | 453 ++++++++++++++++++ .../v2/annotation_trait_protoopaque.pb.go | 397 ++++++++++++--- .../pb/c1/connector/v2/resource.pb.go | 65 ++- .../connector/v2/resource_protoopaque.pb.go | 37 +- .../baton-sdk/pkg/actions/actions.go | 2 +- .../baton-sdk/pkg/cli/commands.go | 82 +++- .../baton-sdk/pkg/config/config.go | 25 +- .../pkg/connectorbuilder/accounts.go | 28 +- .../baton-sdk/pkg/connectorbuilder/actions.go | 8 +- .../pkg/connectorbuilder/connectorbuilder.go | 10 +- .../pkg/connectorbuilder/credentials.go | 13 +- .../baton-sdk/pkg/connectorbuilder/events.go | 2 +- .../pkg/connectorbuilder/resource_manager.go | 21 +- .../connectorbuilder/resource_provisioner.go | 16 +- .../pkg/connectorbuilder/resource_syncer.go | 76 +-- .../baton-sdk/pkg/connectorbuilder/tickets.go | 64 ++- .../baton-sdk/pkg/connectorrunner/runner.go | 108 +++-- .../pkg/connectorstore/connectorstore.go | 12 +- .../baton-sdk/pkg/dotc1z/c1file.go | 24 + .../baton-sdk/pkg/dotc1z/c1file_attached.go | 227 ++++++++- .../conductorone/baton-sdk/pkg/dotc1z/diff.go | 2 +- .../baton-sdk/pkg/dotc1z/session_store.go | 6 +- .../baton-sdk/pkg/dotc1z/sql_helpers.go | 7 +- .../baton-sdk/pkg/dotc1z/sync_runs.go | 134 ++++-- .../baton-sdk/pkg/field/rule_builders.go | 33 ++ .../baton-sdk/pkg/field/validation.go | 52 ++ .../baton-sdk/pkg/metrics/instrumentor.go | 58 ++- .../baton-sdk/pkg/provisioner/provisioner.go | 4 +- .../conductorone/baton-sdk/pkg/sdk/version.go | 2 +- .../conductorone/baton-sdk/pkg/sync/syncer.go | 3 - .../pkg/synccompactor/attached/attached.go | 56 ++- .../baton-sdk/pkg/synccompactor/compactor.go | 11 + .../baton-sdk/pkg/tasks/c1api/actions.go | 18 + .../baton-sdk/pkg/types/grant/grant.go | 3 +- .../baton-sdk/pkg/types/resource/resource.go | 56 ++- .../pkg/types/resource/role_scope_trait.go | 41 ++ .../pkg/types/resource/role_trait.go | 22 +- vendor/modules.txt | 2 +- 48 files changed, 2939 insertions(+), 416 deletions(-) create mode 100644 vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_resource.pb.go create mode 100644 vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_resource.pb.validate.go create mode 100644 vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_resource_protoopaque.pb.go create mode 100644 vendor/github.com/conductorone/baton-sdk/pkg/types/resource/role_scope_trait.go diff --git a/go.mod b/go.mod index ee1d2d76..e8784351 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/conductorone/baton-github go 1.25.2 require ( - github.com/conductorone/baton-sdk v0.6.24 + github.com/conductorone/baton-sdk v0.7.4 github.com/deckarep/golang-set/v2 v2.8.0 github.com/ennyjfrick/ruleguard-logfatal v0.0.2 github.com/golang-jwt/jwt/v5 v5.2.2 diff --git a/go.sum b/go.sum index 6aa1b6e0..45ac2db2 100644 --- a/go.sum +++ b/go.sum @@ -60,8 +60,8 @@ github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyY github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/conductorone/baton-sdk v0.6.24 h1:0Uc0+EyJZx36a6XEoLurqsW2z/2yJVtMYxvMOn1CEf4= -github.com/conductorone/baton-sdk v0.6.24/go.mod h1:9S5feBOuIJxlNdGmkv3ObkCNHbVyOHr6foNrIrk+d4Y= +github.com/conductorone/baton-sdk v0.7.4 h1:JD79NYgIficX00ucugU/5//r2rpGPNqAsHlZsgE0GCM= +github.com/conductorone/baton-sdk v0.7.4/go.mod h1:9S5feBOuIJxlNdGmkv3ObkCNHbVyOHr6foNrIrk+d4Y= github.com/conductorone/dpop v0.2.3 h1:s91U3845GHQ6P6FWrdNr2SEOy1ES/jcFs1JtKSl2S+o= github.com/conductorone/dpop v0.2.3/go.mod h1:gyo8TtzB9SCFCsjsICH4IaLZ7y64CcrDXMOPBwfq/3s= github.com/conductorone/dpop/integrations/dpop_grpc v0.2.3 h1:kLMCNIh0Mo2vbvvkCmJ3ixsPbXEJ6HPcW53Ku9yje3s= diff --git a/pkg/connector/team.go b/pkg/connector/team.go index 92512e76..f220f884 100644 --- a/pkg/connector/team.go +++ b/pkg/connector/team.go @@ -390,34 +390,46 @@ func (o *teamResourceType) registerCreateTeamAction(ctx context.Context, registr { Name: "name", DisplayName: "Team Name", - Description: "The name of the team to create", + Description: "The name of the team.", Field: &config.Field_StringField{}, IsRequired: true, }, { - Name: "parent", - DisplayName: "Parent Organization", - Description: "The organization to create the team in", - Field: &config.Field_ResourceIdField{ + Name: "description", + DisplayName: "Description", + Description: "The description of the team.", + Field: &config.Field_StringField{}, + }, + { + Name: "org", + DisplayName: "Organization", + Description: "The organization name. The name is not case sensitive.", + Field: &config.Field_ResourceIdField{ ResourceIdField: &config.ResourceIdField{ Rules: &config.ResourceIDRules{ AllowedResourceTypeIds: []string{resourceTypeOrg.Id}, }, }, }, - IsRequired: true, + IsRequired: true, }, { - Name: "description", - DisplayName: "Description", - Description: "A description of the team", - Field: &config.Field_StringField{}, + Name: "parent", + DisplayName: "Parent Team ID", + Description: "The name of a team to set as the parent team.", + Field: &config.Field_ResourceIdField{ + ResourceIdField: &config.ResourceIdField{ + Rules: &config.ResourceIDRules{ + AllowedResourceTypeIds: []string{resourceTypeTeam.Id}, + }, + }, + }, }, { Name: "privacy", DisplayName: "Privacy", - Description: "The privacy level: 'secret' or 'closed'", - Field: &config.Field_StringField{ + Description: "The privacy level of the team", + Field: &config.Field_StringField{ StringField: &config.StringField{ Options: []*config.StringFieldOption{ {Value: "secret", DisplayName: "Secret (only visible to org owners and team members)"}, @@ -427,14 +439,35 @@ func (o *teamResourceType) registerCreateTeamAction(ctx context.Context, registr }, }, { - Name: "notification_setting", - DisplayName: "Notification Setting", - Description: "The notification setting for the team", - Field: &config.Field_StringField{ - StringField: &config.StringField{ - Options: []*config.StringFieldOption{ - {Value: "notifications_enabled", DisplayName: "Enabled"}, - {Value: "notifications_disabled", DisplayName: "Disabled"}, + Name: "notifications_enabled", + DisplayName: "Team Notifications", + Description: "Enable team notifications. When enabled, team members receive notifications when the team is @mentioned. Default: enabled", + Field: &config.Field_BoolField{ + BoolField: &config.BoolField{ + DefaultValue: true, + }, + }, + }, + { + Name: "maintainers", + DisplayName: "Maintainers", + Description: "List GitHub usernames for organization members who will become team maintainers.", + Field: &config.Field_ResourceIdSliceField{ + ResourceIdSliceField: &config.ResourceIdSliceField{ + Rules: &config.RepeatedResourceIdRules{ + AllowedResourceTypeIds: []string{resourceTypeUser.Id}, + }, + }, + }, + }, + { + Name: "repo_names", + DisplayName: "Repository Names", + Description: "The full name (e.g., organization-name/repository-name) of repositories to add the team to.", + Field: &config.Field_ResourceIdSliceField{ + ResourceIdSliceField: &config.ResourceIdSliceField{ + Rules: &config.RepeatedResourceIdRules{ + AllowedResourceTypeIds: []string{resourceTypeRepository.Id}, }, }, }, @@ -551,7 +584,7 @@ func (o *teamResourceType) handleCreateTeamAction(ctx context.Context, args *str return nil, nil, err } - parentResourceID, err := actions.RequireResourceIDArg(args, "parent") + parentResourceID, err := actions.RequireResourceIDArg(args, "org") if err != nil { return nil, nil, err } @@ -577,14 +610,79 @@ func (o *teamResourceType) handleCreateTeamAction(ctx context.Context, args *str newTeam.Description = github.Ptr(description) } + // Check if this is a nested team (has parent) + isNestedTeam := false + if parentTeamResourceID, ok := actions.GetResourceIDArg(args, "parent"); ok { + parentTeamID, err := strconv.ParseInt(parentTeamResourceID.Resource, 10, 64) + if err != nil { + return nil, nil, fmt.Errorf("invalid parent team ID: %w", err) + } + newTeam.ParentTeamID = github.Ptr(parentTeamID) + isNestedTeam = true + } + + // Handle privacy with constraints based on team type: + // - For non-nested teams: "secret" (default) or "closed" + // - For nested/child teams: only "closed" is allowed (default: closed) if privacy, ok := actions.GetStringArg(args, "privacy"); ok && privacy != "" { - if privacy == "secret" || privacy == "closed" { - newTeam.Privacy = github.Ptr(privacy) + if isNestedTeam { + // Nested teams can only be "closed" + if privacy == "secret" { + l.Warn("github-connector: secret privacy not allowed for nested teams, using closed", + zap.String("requested_privacy", privacy), + ) + } + newTeam.Privacy = github.Ptr("closed") } else { - l.Warn("github-connector: invalid privacy value, using default", - zap.String("provided_privacy", privacy), - ) + // Non-nested teams can be "secret" or "closed" + if privacy == "secret" || privacy == "closed" { + newTeam.Privacy = github.Ptr(privacy) + } + } + } else if isNestedTeam { + // Default for nested teams is "closed" + newTeam.Privacy = github.Ptr("closed") + } + // Note: Default for non-nested teams is "secret" (handled by GitHub API) + + if notificationsEnabled, ok := actions.GetBoolArg(args, "notifications_enabled"); ok { + if notificationsEnabled { + newTeam.NotificationSetting = github.Ptr("notifications_enabled") + } else { + newTeam.NotificationSetting = github.Ptr("notifications_disabled") + } + } + + if maintainerIDs, ok := actions.GetResourceIdListArg(args, "maintainers"); ok && len(maintainerIDs) > 0 { + var maintainerLogins []string + for _, rid := range maintainerIDs { + userID, err := strconv.ParseInt(rid.Resource, 10, 64) + if err != nil { + return nil, nil, fmt.Errorf("invalid maintainer user ID %s: %w", rid.Resource, err) + } + user, resp, err := o.client.Users.GetByID(ctx, userID) + if err != nil { + return nil, nil, wrapGitHubError(err, resp, fmt.Sprintf("failed to get user %d", userID)) + } + maintainerLogins = append(maintainerLogins, user.GetLogin()) + } + newTeam.Maintainers = maintainerLogins + } + + if repoIDs, ok := actions.GetResourceIdListArg(args, "repo_names"); ok && len(repoIDs) > 0 { + var repoFullNames []string + for _, rid := range repoIDs { + repoID, err := strconv.ParseInt(rid.Resource, 10, 64) + if err != nil { + return nil, nil, fmt.Errorf("invalid repository ID %s: %w", rid.Resource, err) + } + repo, resp, err := o.client.Repositories.GetByID(ctx, repoID) + if err != nil { + return nil, nil, wrapGitHubError(err, resp, fmt.Sprintf("failed to get repository %d", repoID)) + } + repoFullNames = append(repoFullNames, repo.GetFullName()) } + newTeam.RepoNames = repoFullNames } // Create the team via GitHub API diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/config/v1/rules.pb.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/config/v1/rules.pb.go index 11073384..afe1a1ff 100644 --- a/vendor/github.com/conductorone/baton-sdk/pb/c1/config/v1/rules.pb.go +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/config/v1/rules.pb.go @@ -1487,8 +1487,20 @@ func (b0 ResourceIDRules_builder) Build() *ResourceIDRules { type RepeatedResourceIdRules struct { state protoimpl.MessageState `protogen:"hybrid.v1"` AllowedResourceTypeIds []string `protobuf:"bytes,1,rep,name=allowed_resource_type_ids,json=allowedResourceTypeIds,proto3" json:"allowed_resource_type_ids,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + // MinItems specifies that this field must have the specified number of + // items at a minimum + MinItems *uint64 `protobuf:"varint,2,opt,name=min_items,json=minItems,proto3,oneof" json:"min_items,omitempty"` + // MaxItems specifies that this field must have the specified number of + // items at a maximum + MaxItems *uint64 `protobuf:"varint,3,opt,name=max_items,json=maxItems,proto3,oneof" json:"max_items,omitempty"` + // Unique specifies that all elements in this field must be unique. + Unique bool `protobuf:"varint,4,opt,name=unique,proto3" json:"unique,omitempty"` + // IgnoreEmpty specifies that the validation rules of this field should be + // evaluated only if the field is not empty + ValidateEmpty bool `protobuf:"varint,5,opt,name=validate_empty,json=validateEmpty,proto3" json:"validate_empty,omitempty"` + IsRequired bool `protobuf:"varint,6,opt,name=is_required,json=isRequired,proto3" json:"is_required,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *RepeatedResourceIdRules) Reset() { @@ -1523,14 +1535,103 @@ func (x *RepeatedResourceIdRules) GetAllowedResourceTypeIds() []string { return nil } +func (x *RepeatedResourceIdRules) GetMinItems() uint64 { + if x != nil && x.MinItems != nil { + return *x.MinItems + } + return 0 +} + +func (x *RepeatedResourceIdRules) GetMaxItems() uint64 { + if x != nil && x.MaxItems != nil { + return *x.MaxItems + } + return 0 +} + +func (x *RepeatedResourceIdRules) GetUnique() bool { + if x != nil { + return x.Unique + } + return false +} + +func (x *RepeatedResourceIdRules) GetValidateEmpty() bool { + if x != nil { + return x.ValidateEmpty + } + return false +} + +func (x *RepeatedResourceIdRules) GetIsRequired() bool { + if x != nil { + return x.IsRequired + } + return false +} + func (x *RepeatedResourceIdRules) SetAllowedResourceTypeIds(v []string) { x.AllowedResourceTypeIds = v } +func (x *RepeatedResourceIdRules) SetMinItems(v uint64) { + x.MinItems = &v +} + +func (x *RepeatedResourceIdRules) SetMaxItems(v uint64) { + x.MaxItems = &v +} + +func (x *RepeatedResourceIdRules) SetUnique(v bool) { + x.Unique = v +} + +func (x *RepeatedResourceIdRules) SetValidateEmpty(v bool) { + x.ValidateEmpty = v +} + +func (x *RepeatedResourceIdRules) SetIsRequired(v bool) { + x.IsRequired = v +} + +func (x *RepeatedResourceIdRules) HasMinItems() bool { + if x == nil { + return false + } + return x.MinItems != nil +} + +func (x *RepeatedResourceIdRules) HasMaxItems() bool { + if x == nil { + return false + } + return x.MaxItems != nil +} + +func (x *RepeatedResourceIdRules) ClearMinItems() { + x.MinItems = nil +} + +func (x *RepeatedResourceIdRules) ClearMaxItems() { + x.MaxItems = nil +} + type RepeatedResourceIdRules_builder struct { _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. AllowedResourceTypeIds []string + // MinItems specifies that this field must have the specified number of + // items at a minimum + MinItems *uint64 + // MaxItems specifies that this field must have the specified number of + // items at a maximum + MaxItems *uint64 + // Unique specifies that all elements in this field must be unique. + Unique bool + // IgnoreEmpty specifies that the validation rules of this field should be + // evaluated only if the field is not empty + ValidateEmpty bool + IsRequired bool } func (b0 RepeatedResourceIdRules_builder) Build() *RepeatedResourceIdRules { @@ -1538,6 +1639,11 @@ func (b0 RepeatedResourceIdRules_builder) Build() *RepeatedResourceIdRules { b, x := &b0, m0 _, _ = b, x x.AllowedResourceTypeIds = b.AllowedResourceTypeIds + x.MinItems = b.MinItems + x.MaxItems = b.MaxItems + x.Unique = b.Unique + x.ValidateEmpty = b.ValidateEmpty + x.IsRequired = b.IsRequired return m0 } @@ -1630,9 +1736,19 @@ const file_c1_config_v1_rules_proto_rawDesc = "" + "\vis_required\x18\x02 \x01(\bR\n" + "isRequired\"L\n" + "\x0fResourceIDRules\x129\n" + - "\x19allowed_resource_type_ids\x18\x01 \x03(\tR\x16allowedResourceTypeIds\"T\n" + + "\x19allowed_resource_type_ids\x18\x01 \x03(\tR\x16allowedResourceTypeIds\"\x94\x02\n" + "\x17RepeatedResourceIdRules\x129\n" + - "\x19allowed_resource_type_ids\x18\x01 \x03(\tR\x16allowedResourceTypeIds*\x99\x02\n" + + "\x19allowed_resource_type_ids\x18\x01 \x03(\tR\x16allowedResourceTypeIds\x12 \n" + + "\tmin_items\x18\x02 \x01(\x04H\x00R\bminItems\x88\x01\x01\x12 \n" + + "\tmax_items\x18\x03 \x01(\x04H\x01R\bmaxItems\x88\x01\x01\x12\x16\n" + + "\x06unique\x18\x04 \x01(\bR\x06unique\x12%\n" + + "\x0evalidate_empty\x18\x05 \x01(\bR\rvalidateEmpty\x12\x1f\n" + + "\vis_required\x18\x06 \x01(\bR\n" + + "isRequiredB\f\n" + + "\n" + + "_min_itemsB\f\n" + + "\n" + + "_max_items*\x99\x02\n" + "\x0fWellKnownString\x12!\n" + "\x1dWELL_KNOWN_STRING_UNSPECIFIED\x10\x00\x12\x1b\n" + "\x17WELL_KNOWN_STRING_EMAIL\x10\x01\x12\x1e\n" + @@ -1684,6 +1800,7 @@ func file_c1_config_v1_rules_proto_init() { } file_c1_config_v1_rules_proto_msgTypes[3].OneofWrappers = []any{} file_c1_config_v1_rules_proto_msgTypes[4].OneofWrappers = []any{} + file_c1_config_v1_rules_proto_msgTypes[7].OneofWrappers = []any{} type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/config/v1/rules.pb.validate.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/config/v1/rules.pb.validate.go index f5f49e9e..ef7e9c54 100644 --- a/vendor/github.com/conductorone/baton-sdk/pb/c1/config/v1/rules.pb.validate.go +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/config/v1/rules.pb.validate.go @@ -1015,6 +1015,20 @@ func (m *RepeatedResourceIdRules) validate(all bool) error { var errors []error + // no validation rules for Unique + + // no validation rules for ValidateEmpty + + // no validation rules for IsRequired + + if m.MinItems != nil { + // no validation rules for MinItems + } + + if m.MaxItems != nil { + // no validation rules for MaxItems + } + if len(errors) > 0 { return RepeatedResourceIdRulesMultiError(errors) } diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/config/v1/rules_protoopaque.pb.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/config/v1/rules_protoopaque.pb.go index 87964c85..a4f425ba 100644 --- a/vendor/github.com/conductorone/baton-sdk/pb/c1/config/v1/rules_protoopaque.pb.go +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/config/v1/rules_protoopaque.pb.go @@ -1524,6 +1524,13 @@ func (b0 ResourceIDRules_builder) Build() *ResourceIDRules { type RepeatedResourceIdRules struct { state protoimpl.MessageState `protogen:"opaque.v1"` xxx_hidden_AllowedResourceTypeIds []string `protobuf:"bytes,1,rep,name=allowed_resource_type_ids,json=allowedResourceTypeIds,proto3"` + xxx_hidden_MinItems uint64 `protobuf:"varint,2,opt,name=min_items,json=minItems,proto3,oneof"` + xxx_hidden_MaxItems uint64 `protobuf:"varint,3,opt,name=max_items,json=maxItems,proto3,oneof"` + xxx_hidden_Unique bool `protobuf:"varint,4,opt,name=unique,proto3"` + xxx_hidden_ValidateEmpty bool `protobuf:"varint,5,opt,name=validate_empty,json=validateEmpty,proto3"` + xxx_hidden_IsRequired bool `protobuf:"varint,6,opt,name=is_required,json=isRequired,proto3"` + XXX_raceDetectHookData protoimpl.RaceDetectHookData + XXX_presence [1]uint32 unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } @@ -1560,14 +1567,107 @@ func (x *RepeatedResourceIdRules) GetAllowedResourceTypeIds() []string { return nil } +func (x *RepeatedResourceIdRules) GetMinItems() uint64 { + if x != nil { + return x.xxx_hidden_MinItems + } + return 0 +} + +func (x *RepeatedResourceIdRules) GetMaxItems() uint64 { + if x != nil { + return x.xxx_hidden_MaxItems + } + return 0 +} + +func (x *RepeatedResourceIdRules) GetUnique() bool { + if x != nil { + return x.xxx_hidden_Unique + } + return false +} + +func (x *RepeatedResourceIdRules) GetValidateEmpty() bool { + if x != nil { + return x.xxx_hidden_ValidateEmpty + } + return false +} + +func (x *RepeatedResourceIdRules) GetIsRequired() bool { + if x != nil { + return x.xxx_hidden_IsRequired + } + return false +} + func (x *RepeatedResourceIdRules) SetAllowedResourceTypeIds(v []string) { x.xxx_hidden_AllowedResourceTypeIds = v } +func (x *RepeatedResourceIdRules) SetMinItems(v uint64) { + x.xxx_hidden_MinItems = v + protoimpl.X.SetPresent(&(x.XXX_presence[0]), 1, 6) +} + +func (x *RepeatedResourceIdRules) SetMaxItems(v uint64) { + x.xxx_hidden_MaxItems = v + protoimpl.X.SetPresent(&(x.XXX_presence[0]), 2, 6) +} + +func (x *RepeatedResourceIdRules) SetUnique(v bool) { + x.xxx_hidden_Unique = v +} + +func (x *RepeatedResourceIdRules) SetValidateEmpty(v bool) { + x.xxx_hidden_ValidateEmpty = v +} + +func (x *RepeatedResourceIdRules) SetIsRequired(v bool) { + x.xxx_hidden_IsRequired = v +} + +func (x *RepeatedResourceIdRules) HasMinItems() bool { + if x == nil { + return false + } + return protoimpl.X.Present(&(x.XXX_presence[0]), 1) +} + +func (x *RepeatedResourceIdRules) HasMaxItems() bool { + if x == nil { + return false + } + return protoimpl.X.Present(&(x.XXX_presence[0]), 2) +} + +func (x *RepeatedResourceIdRules) ClearMinItems() { + protoimpl.X.ClearPresent(&(x.XXX_presence[0]), 1) + x.xxx_hidden_MinItems = 0 +} + +func (x *RepeatedResourceIdRules) ClearMaxItems() { + protoimpl.X.ClearPresent(&(x.XXX_presence[0]), 2) + x.xxx_hidden_MaxItems = 0 +} + type RepeatedResourceIdRules_builder struct { _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. AllowedResourceTypeIds []string + // MinItems specifies that this field must have the specified number of + // items at a minimum + MinItems *uint64 + // MaxItems specifies that this field must have the specified number of + // items at a maximum + MaxItems *uint64 + // Unique specifies that all elements in this field must be unique. + Unique bool + // IgnoreEmpty specifies that the validation rules of this field should be + // evaluated only if the field is not empty + ValidateEmpty bool + IsRequired bool } func (b0 RepeatedResourceIdRules_builder) Build() *RepeatedResourceIdRules { @@ -1575,6 +1675,17 @@ func (b0 RepeatedResourceIdRules_builder) Build() *RepeatedResourceIdRules { b, x := &b0, m0 _, _ = b, x x.xxx_hidden_AllowedResourceTypeIds = b.AllowedResourceTypeIds + if b.MinItems != nil { + protoimpl.X.SetPresentNonAtomic(&(x.XXX_presence[0]), 1, 6) + x.xxx_hidden_MinItems = *b.MinItems + } + if b.MaxItems != nil { + protoimpl.X.SetPresentNonAtomic(&(x.XXX_presence[0]), 2, 6) + x.xxx_hidden_MaxItems = *b.MaxItems + } + x.xxx_hidden_Unique = b.Unique + x.xxx_hidden_ValidateEmpty = b.ValidateEmpty + x.xxx_hidden_IsRequired = b.IsRequired return m0 } @@ -1667,9 +1778,19 @@ const file_c1_config_v1_rules_proto_rawDesc = "" + "\vis_required\x18\x02 \x01(\bR\n" + "isRequired\"L\n" + "\x0fResourceIDRules\x129\n" + - "\x19allowed_resource_type_ids\x18\x01 \x03(\tR\x16allowedResourceTypeIds\"T\n" + + "\x19allowed_resource_type_ids\x18\x01 \x03(\tR\x16allowedResourceTypeIds\"\x94\x02\n" + "\x17RepeatedResourceIdRules\x129\n" + - "\x19allowed_resource_type_ids\x18\x01 \x03(\tR\x16allowedResourceTypeIds*\x99\x02\n" + + "\x19allowed_resource_type_ids\x18\x01 \x03(\tR\x16allowedResourceTypeIds\x12 \n" + + "\tmin_items\x18\x02 \x01(\x04H\x00R\bminItems\x88\x01\x01\x12 \n" + + "\tmax_items\x18\x03 \x01(\x04H\x01R\bmaxItems\x88\x01\x01\x12\x16\n" + + "\x06unique\x18\x04 \x01(\bR\x06unique\x12%\n" + + "\x0evalidate_empty\x18\x05 \x01(\bR\rvalidateEmpty\x12\x1f\n" + + "\vis_required\x18\x06 \x01(\bR\n" + + "isRequiredB\f\n" + + "\n" + + "_min_itemsB\f\n" + + "\n" + + "_max_items*\x99\x02\n" + "\x0fWellKnownString\x12!\n" + "\x1dWELL_KNOWN_STRING_UNSPECIFIED\x10\x00\x12\x1b\n" + "\x17WELL_KNOWN_STRING_EMAIL\x10\x01\x12\x1e\n" + @@ -1721,6 +1842,7 @@ func file_c1_config_v1_rules_proto_init() { } file_c1_config_v1_rules_proto_msgTypes[3].OneofWrappers = []any{} file_c1_config_v1_rules_proto_msgTypes[4].OneofWrappers = []any{} + file_c1_config_v1_rules_proto_msgTypes[7].OneofWrappers = []any{} type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_resource.pb.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_resource.pb.go new file mode 100644 index 00000000..39fa94b8 --- /dev/null +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_resource.pb.go @@ -0,0 +1,110 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.10 +// protoc (unknown) +// source: c1/connector/v2/annotation_resource.proto + +//go:build !protoopaque + +package v2 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Resource was not deleted because the resource does not exist. +type ResourceDoesNotExist struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ResourceDoesNotExist) Reset() { + *x = ResourceDoesNotExist{} + mi := &file_c1_connector_v2_annotation_resource_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ResourceDoesNotExist) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResourceDoesNotExist) ProtoMessage() {} + +func (x *ResourceDoesNotExist) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_annotation_resource_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +type ResourceDoesNotExist_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + +} + +func (b0 ResourceDoesNotExist_builder) Build() *ResourceDoesNotExist { + m0 := &ResourceDoesNotExist{} + b, x := &b0, m0 + _, _ = b, x + return m0 +} + +var File_c1_connector_v2_annotation_resource_proto protoreflect.FileDescriptor + +const file_c1_connector_v2_annotation_resource_proto_rawDesc = "" + + "\n" + + ")c1/connector/v2/annotation_resource.proto\x12\x0fc1.connector.v2\"\x16\n" + + "\x14ResourceDoesNotExistB6Z4github.com/conductorone/baton-sdk/pb/c1/connector/v2b\x06proto3" + +var file_c1_connector_v2_annotation_resource_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_c1_connector_v2_annotation_resource_proto_goTypes = []any{ + (*ResourceDoesNotExist)(nil), // 0: c1.connector.v2.ResourceDoesNotExist +} +var file_c1_connector_v2_annotation_resource_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_c1_connector_v2_annotation_resource_proto_init() } +func file_c1_connector_v2_annotation_resource_proto_init() { + if File_c1_connector_v2_annotation_resource_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_c1_connector_v2_annotation_resource_proto_rawDesc), len(file_c1_connector_v2_annotation_resource_proto_rawDesc)), + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_c1_connector_v2_annotation_resource_proto_goTypes, + DependencyIndexes: file_c1_connector_v2_annotation_resource_proto_depIdxs, + MessageInfos: file_c1_connector_v2_annotation_resource_proto_msgTypes, + }.Build() + File_c1_connector_v2_annotation_resource_proto = out.File + file_c1_connector_v2_annotation_resource_proto_goTypes = nil + file_c1_connector_v2_annotation_resource_proto_depIdxs = nil +} diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_resource.pb.validate.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_resource.pb.validate.go new file mode 100644 index 00000000..06cd9eab --- /dev/null +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_resource.pb.validate.go @@ -0,0 +1,138 @@ +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: c1/connector/v2/annotation_resource.proto + +package v2 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on ResourceDoesNotExist with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *ResourceDoesNotExist) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ResourceDoesNotExist with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ResourceDoesNotExistMultiError, or nil if none found. +func (m *ResourceDoesNotExist) ValidateAll() error { + return m.validate(true) +} + +func (m *ResourceDoesNotExist) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return ResourceDoesNotExistMultiError(errors) + } + + return nil +} + +// ResourceDoesNotExistMultiError is an error wrapping multiple validation +// errors returned by ResourceDoesNotExist.ValidateAll() if the designated +// constraints aren't met. +type ResourceDoesNotExistMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ResourceDoesNotExistMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ResourceDoesNotExistMultiError) AllErrors() []error { return m } + +// ResourceDoesNotExistValidationError is the validation error returned by +// ResourceDoesNotExist.Validate if the designated constraints aren't met. +type ResourceDoesNotExistValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ResourceDoesNotExistValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ResourceDoesNotExistValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ResourceDoesNotExistValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ResourceDoesNotExistValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ResourceDoesNotExistValidationError) ErrorName() string { + return "ResourceDoesNotExistValidationError" +} + +// Error satisfies the builtin error interface +func (e ResourceDoesNotExistValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sResourceDoesNotExist.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ResourceDoesNotExistValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ResourceDoesNotExistValidationError{} diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_resource_protoopaque.pb.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_resource_protoopaque.pb.go new file mode 100644 index 00000000..3c9ff590 --- /dev/null +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_resource_protoopaque.pb.go @@ -0,0 +1,110 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.10 +// protoc (unknown) +// source: c1/connector/v2/annotation_resource.proto + +//go:build protoopaque + +package v2 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Resource was not deleted because the resource does not exist. +type ResourceDoesNotExist struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ResourceDoesNotExist) Reset() { + *x = ResourceDoesNotExist{} + mi := &file_c1_connector_v2_annotation_resource_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ResourceDoesNotExist) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResourceDoesNotExist) ProtoMessage() {} + +func (x *ResourceDoesNotExist) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_annotation_resource_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +type ResourceDoesNotExist_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + +} + +func (b0 ResourceDoesNotExist_builder) Build() *ResourceDoesNotExist { + m0 := &ResourceDoesNotExist{} + b, x := &b0, m0 + _, _ = b, x + return m0 +} + +var File_c1_connector_v2_annotation_resource_proto protoreflect.FileDescriptor + +const file_c1_connector_v2_annotation_resource_proto_rawDesc = "" + + "\n" + + ")c1/connector/v2/annotation_resource.proto\x12\x0fc1.connector.v2\"\x16\n" + + "\x14ResourceDoesNotExistB6Z4github.com/conductorone/baton-sdk/pb/c1/connector/v2b\x06proto3" + +var file_c1_connector_v2_annotation_resource_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_c1_connector_v2_annotation_resource_proto_goTypes = []any{ + (*ResourceDoesNotExist)(nil), // 0: c1.connector.v2.ResourceDoesNotExist +} +var file_c1_connector_v2_annotation_resource_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_c1_connector_v2_annotation_resource_proto_init() } +func file_c1_connector_v2_annotation_resource_proto_init() { + if File_c1_connector_v2_annotation_resource_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_c1_connector_v2_annotation_resource_proto_rawDesc), len(file_c1_connector_v2_annotation_resource_proto_rawDesc)), + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_c1_connector_v2_annotation_resource_proto_goTypes, + DependencyIndexes: file_c1_connector_v2_annotation_resource_proto_depIdxs, + MessageInfos: file_c1_connector_v2_annotation_resource_proto_msgTypes, + }.Build() + File_c1_connector_v2_annotation_resource_proto = out.File + file_c1_connector_v2_annotation_resource_proto_goTypes = nil + file_c1_connector_v2_annotation_resource_proto_depIdxs = nil +} diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_trait.pb.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_trait.pb.go index 7b79389a..8a50448f 100644 --- a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_trait.pb.go +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_trait.pb.go @@ -583,10 +583,11 @@ func (b0 GroupTrait_builder) Build() *GroupTrait { } type RoleTrait struct { - state protoimpl.MessageState `protogen:"hybrid.v1"` - Profile *structpb.Struct `protobuf:"bytes,1,opt,name=profile,proto3" json:"profile,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"hybrid.v1"` + Profile *structpb.Struct `protobuf:"bytes,1,opt,name=profile,proto3" json:"profile,omitempty"` + RoleScopeConditions *RoleScopeConditions `protobuf:"bytes,2,opt,name=role_scope_conditions,json=roleScopeConditions,proto3" json:"role_scope_conditions,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *RoleTrait) Reset() { @@ -621,10 +622,21 @@ func (x *RoleTrait) GetProfile() *structpb.Struct { return nil } +func (x *RoleTrait) GetRoleScopeConditions() *RoleScopeConditions { + if x != nil { + return x.RoleScopeConditions + } + return nil +} + func (x *RoleTrait) SetProfile(v *structpb.Struct) { x.Profile = v } +func (x *RoleTrait) SetRoleScopeConditions(v *RoleScopeConditions) { + x.RoleScopeConditions = v +} + func (x *RoleTrait) HasProfile() bool { if x == nil { return false @@ -632,14 +644,26 @@ func (x *RoleTrait) HasProfile() bool { return x.Profile != nil } +func (x *RoleTrait) HasRoleScopeConditions() bool { + if x == nil { + return false + } + return x.RoleScopeConditions != nil +} + func (x *RoleTrait) ClearProfile() { x.Profile = nil } +func (x *RoleTrait) ClearRoleScopeConditions() { + x.RoleScopeConditions = nil +} + type RoleTrait_builder struct { _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. - Profile *structpb.Struct + Profile *structpb.Struct + RoleScopeConditions *RoleScopeConditions } func (b0 RoleTrait_builder) Build() *RoleTrait { @@ -647,6 +671,234 @@ func (b0 RoleTrait_builder) Build() *RoleTrait { b, x := &b0, m0 _, _ = b, x x.Profile = b.Profile + x.RoleScopeConditions = b.RoleScopeConditions + return m0 +} + +type RoleScopeConditions struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Conditions []*RoleScopeCondition `protobuf:"bytes,3,rep,name=conditions,proto3" json:"conditions,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *RoleScopeConditions) Reset() { + *x = RoleScopeConditions{} + mi := &file_c1_connector_v2_annotation_trait_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *RoleScopeConditions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RoleScopeConditions) ProtoMessage() {} + +func (x *RoleScopeConditions) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_annotation_trait_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *RoleScopeConditions) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *RoleScopeConditions) GetConditions() []*RoleScopeCondition { + if x != nil { + return x.Conditions + } + return nil +} + +func (x *RoleScopeConditions) SetType(v string) { + x.Type = v +} + +func (x *RoleScopeConditions) SetConditions(v []*RoleScopeCondition) { + x.Conditions = v +} + +type RoleScopeConditions_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Type string + Conditions []*RoleScopeCondition +} + +func (b0 RoleScopeConditions_builder) Build() *RoleScopeConditions { + m0 := &RoleScopeConditions{} + b, x := &b0, m0 + _, _ = b, x + x.Type = b.Type + x.Conditions = b.Conditions + return m0 +} + +type RoleScopeCondition struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + Expression string `protobuf:"bytes,1,opt,name=expression,proto3" json:"expression,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *RoleScopeCondition) Reset() { + *x = RoleScopeCondition{} + mi := &file_c1_connector_v2_annotation_trait_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *RoleScopeCondition) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RoleScopeCondition) ProtoMessage() {} + +func (x *RoleScopeCondition) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_annotation_trait_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *RoleScopeCondition) GetExpression() string { + if x != nil { + return x.Expression + } + return "" +} + +func (x *RoleScopeCondition) SetExpression(v string) { + x.Expression = v +} + +type RoleScopeCondition_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Expression string +} + +func (b0 RoleScopeCondition_builder) Build() *RoleScopeCondition { + m0 := &RoleScopeCondition{} + b, x := &b0, m0 + _, _ = b, x + x.Expression = b.Expression + return m0 +} + +// ScopeBindingTrait is used to scope a role to a resource or set of resources. +// The scope may be static (determined at crawl time) or dynamic (determined based on conditions). +// For example, in Azure a role definition can be scoped to a subscription, management group, or resource group. +// In that case, the role ID would be the resource ID of the role definition, and the scope resource ID would be the resource ID of the subscription, management group, or resource group. +type ScopeBindingTrait struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + RoleId *ResourceId `protobuf:"bytes,1,opt,name=role_id,json=roleId,proto3" json:"role_id,omitempty"` // The role that is scoped. Must be a resource with the role trait. + // Remove required if we add more ways to scope roles. (eg: Expressions.) + ScopeResourceId *ResourceId `protobuf:"bytes,2,opt,name=scope_resource_id,json=scopeResourceId,proto3" json:"scope_resource_id,omitempty"` // The resource that the role is scoped to. + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ScopeBindingTrait) Reset() { + *x = ScopeBindingTrait{} + mi := &file_c1_connector_v2_annotation_trait_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ScopeBindingTrait) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ScopeBindingTrait) ProtoMessage() {} + +func (x *ScopeBindingTrait) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_annotation_trait_proto_msgTypes[5] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *ScopeBindingTrait) GetRoleId() *ResourceId { + if x != nil { + return x.RoleId + } + return nil +} + +func (x *ScopeBindingTrait) GetScopeResourceId() *ResourceId { + if x != nil { + return x.ScopeResourceId + } + return nil +} + +func (x *ScopeBindingTrait) SetRoleId(v *ResourceId) { + x.RoleId = v +} + +func (x *ScopeBindingTrait) SetScopeResourceId(v *ResourceId) { + x.ScopeResourceId = v +} + +func (x *ScopeBindingTrait) HasRoleId() bool { + if x == nil { + return false + } + return x.RoleId != nil +} + +func (x *ScopeBindingTrait) HasScopeResourceId() bool { + if x == nil { + return false + } + return x.ScopeResourceId != nil +} + +func (x *ScopeBindingTrait) ClearRoleId() { + x.RoleId = nil +} + +func (x *ScopeBindingTrait) ClearScopeResourceId() { + x.ScopeResourceId = nil +} + +type ScopeBindingTrait_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + RoleId *ResourceId + // Remove required if we add more ways to scope roles. (eg: Expressions.) + ScopeResourceId *ResourceId +} + +func (b0 ScopeBindingTrait_builder) Build() *ScopeBindingTrait { + m0 := &ScopeBindingTrait{} + b, x := &b0, m0 + _, _ = b, x + x.RoleId = b.RoleId + x.ScopeResourceId = b.ScopeResourceId return m0 } @@ -663,7 +915,7 @@ type AppTrait struct { func (x *AppTrait) Reset() { *x = AppTrait{} - mi := &file_c1_connector_v2_annotation_trait_proto_msgTypes[3] + mi := &file_c1_connector_v2_annotation_trait_proto_msgTypes[6] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -675,7 +927,7 @@ func (x *AppTrait) String() string { func (*AppTrait) ProtoMessage() {} func (x *AppTrait) ProtoReflect() protoreflect.Message { - mi := &file_c1_connector_v2_annotation_trait_proto_msgTypes[3] + mi := &file_c1_connector_v2_annotation_trait_proto_msgTypes[6] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -810,7 +1062,7 @@ type SecretTrait struct { func (x *SecretTrait) Reset() { *x = SecretTrait{} - mi := &file_c1_connector_v2_annotation_trait_proto_msgTypes[4] + mi := &file_c1_connector_v2_annotation_trait_proto_msgTypes[7] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -822,7 +1074,7 @@ func (x *SecretTrait) String() string { func (*SecretTrait) ProtoMessage() {} func (x *SecretTrait) ProtoReflect() protoreflect.Message { - mi := &file_c1_connector_v2_annotation_trait_proto_msgTypes[4] + mi := &file_c1_connector_v2_annotation_trait_proto_msgTypes[7] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1000,7 +1252,7 @@ type UserTrait_Email struct { func (x *UserTrait_Email) Reset() { *x = UserTrait_Email{} - mi := &file_c1_connector_v2_annotation_trait_proto_msgTypes[5] + mi := &file_c1_connector_v2_annotation_trait_proto_msgTypes[8] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1012,7 +1264,7 @@ func (x *UserTrait_Email) String() string { func (*UserTrait_Email) ProtoMessage() {} func (x *UserTrait_Email) ProtoReflect() protoreflect.Message { - mi := &file_c1_connector_v2_annotation_trait_proto_msgTypes[5] + mi := &file_c1_connector_v2_annotation_trait_proto_msgTypes[8] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1072,7 +1324,7 @@ type UserTrait_Status struct { func (x *UserTrait_Status) Reset() { *x = UserTrait_Status{} - mi := &file_c1_connector_v2_annotation_trait_proto_msgTypes[6] + mi := &file_c1_connector_v2_annotation_trait_proto_msgTypes[9] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1084,7 +1336,7 @@ func (x *UserTrait_Status) String() string { func (*UserTrait_Status) ProtoMessage() {} func (x *UserTrait_Status) ProtoReflect() protoreflect.Message { - mi := &file_c1_connector_v2_annotation_trait_proto_msgTypes[6] + mi := &file_c1_connector_v2_annotation_trait_proto_msgTypes[9] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1142,7 +1394,7 @@ type UserTrait_MFAStatus struct { func (x *UserTrait_MFAStatus) Reset() { *x = UserTrait_MFAStatus{} - mi := &file_c1_connector_v2_annotation_trait_proto_msgTypes[7] + mi := &file_c1_connector_v2_annotation_trait_proto_msgTypes[10] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1154,7 +1406,7 @@ func (x *UserTrait_MFAStatus) String() string { func (*UserTrait_MFAStatus) ProtoMessage() {} func (x *UserTrait_MFAStatus) ProtoReflect() protoreflect.Message { - mi := &file_c1_connector_v2_annotation_trait_proto_msgTypes[7] + mi := &file_c1_connector_v2_annotation_trait_proto_msgTypes[10] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1199,7 +1451,7 @@ type UserTrait_SSOStatus struct { func (x *UserTrait_SSOStatus) Reset() { *x = UserTrait_SSOStatus{} - mi := &file_c1_connector_v2_annotation_trait_proto_msgTypes[8] + mi := &file_c1_connector_v2_annotation_trait_proto_msgTypes[11] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1211,7 +1463,7 @@ func (x *UserTrait_SSOStatus) String() string { func (*UserTrait_SSOStatus) ProtoMessage() {} func (x *UserTrait_SSOStatus) ProtoReflect() protoreflect.Message { - mi := &file_c1_connector_v2_annotation_trait_proto_msgTypes[8] + mi := &file_c1_connector_v2_annotation_trait_proto_msgTypes[11] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1260,7 +1512,7 @@ type UserTrait_StructuredName struct { func (x *UserTrait_StructuredName) Reset() { *x = UserTrait_StructuredName{} - mi := &file_c1_connector_v2_annotation_trait_proto_msgTypes[9] + mi := &file_c1_connector_v2_annotation_trait_proto_msgTypes[12] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1272,7 +1524,7 @@ func (x *UserTrait_StructuredName) String() string { func (*UserTrait_StructuredName) ProtoMessage() {} func (x *UserTrait_StructuredName) ProtoReflect() protoreflect.Message { - mi := &file_c1_connector_v2_annotation_trait_proto_msgTypes[9] + mi := &file_c1_connector_v2_annotation_trait_proto_msgTypes[12] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1419,9 +1671,22 @@ const file_c1_connector_v2_annotation_trait_proto_rawDesc = "" + "\n" + "GroupTrait\x12-\n" + "\x04icon\x18\x01 \x01(\v2\x19.c1.connector.v2.AssetRefR\x04icon\x121\n" + - "\aprofile\x18\x02 \x01(\v2\x17.google.protobuf.StructR\aprofile\">\n" + + "\aprofile\x18\x02 \x01(\v2\x17.google.protobuf.StructR\aprofile\"\x98\x01\n" + "\tRoleTrait\x121\n" + - "\aprofile\x18\x01 \x01(\v2\x17.google.protobuf.StructR\aprofile\"\x9a\x03\n" + + "\aprofile\x18\x01 \x01(\v2\x17.google.protobuf.StructR\aprofile\x12X\n" + + "\x15role_scope_conditions\x18\x02 \x01(\v2$.c1.connector.v2.RoleScopeConditionsR\x13roleScopeConditions\"n\n" + + "\x13RoleScopeConditions\x12\x12\n" + + "\x04type\x18\x01 \x01(\tR\x04type\x12C\n" + + "\n" + + "conditions\x18\x03 \x03(\v2#.c1.connector.v2.RoleScopeConditionR\n" + + "conditions\"4\n" + + "\x12RoleScopeCondition\x12\x1e\n" + + "\n" + + "expression\x18\x01 \x01(\tR\n" + + "expression\"\xa6\x01\n" + + "\x11ScopeBindingTrait\x12>\n" + + "\arole_id\x18\x01 \x01(\v2\x1b.c1.connector.v2.ResourceIdB\b\xfaB\x05\x8a\x01\x02\x10\x01R\x06roleId\x12Q\n" + + "\x11scope_resource_id\x18\x02 \x01(\v2\x1b.c1.connector.v2.ResourceIdB\b\xfaB\x05\x8a\x01\x02\x10\x01R\x0fscopeResourceId\"\x9a\x03\n" + "\bAppTrait\x125\n" + "\bhelp_url\x18\x01 \x01(\tB\x1a\xfaB\x17r\x15 \x01(\x80\b:\bhttps://\xd0\x01\x01\x88\x01\x01R\ahelpUrl\x12-\n" + "\x04icon\x18\x02 \x01(\v2\x19.c1.connector.v2.AssetRefR\x04icon\x12-\n" + @@ -1448,7 +1713,7 @@ const file_c1_connector_v2_annotation_trait_proto_rawDesc = "" + "identityIdB6Z4github.com/conductorone/baton-sdk/pb/c1/connector/v2b\x06proto3" var file_c1_connector_v2_annotation_trait_proto_enumTypes = make([]protoimpl.EnumInfo, 3) -var file_c1_connector_v2_annotation_trait_proto_msgTypes = make([]protoimpl.MessageInfo, 10) +var file_c1_connector_v2_annotation_trait_proto_msgTypes = make([]protoimpl.MessageInfo, 13) var file_c1_connector_v2_annotation_trait_proto_goTypes = []any{ (UserTrait_AccountType)(0), // 0: c1.connector.v2.UserTrait.AccountType (UserTrait_Status_Status)(0), // 1: c1.connector.v2.UserTrait.Status.Status @@ -1456,48 +1721,55 @@ var file_c1_connector_v2_annotation_trait_proto_goTypes = []any{ (*UserTrait)(nil), // 3: c1.connector.v2.UserTrait (*GroupTrait)(nil), // 4: c1.connector.v2.GroupTrait (*RoleTrait)(nil), // 5: c1.connector.v2.RoleTrait - (*AppTrait)(nil), // 6: c1.connector.v2.AppTrait - (*SecretTrait)(nil), // 7: c1.connector.v2.SecretTrait - (*UserTrait_Email)(nil), // 8: c1.connector.v2.UserTrait.Email - (*UserTrait_Status)(nil), // 9: c1.connector.v2.UserTrait.Status - (*UserTrait_MFAStatus)(nil), // 10: c1.connector.v2.UserTrait.MFAStatus - (*UserTrait_SSOStatus)(nil), // 11: c1.connector.v2.UserTrait.SSOStatus - (*UserTrait_StructuredName)(nil), // 12: c1.connector.v2.UserTrait.StructuredName - (*structpb.Struct)(nil), // 13: google.protobuf.Struct - (*AssetRef)(nil), // 14: c1.connector.v2.AssetRef - (*timestamppb.Timestamp)(nil), // 15: google.protobuf.Timestamp - (*ResourceId)(nil), // 16: c1.connector.v2.ResourceId + (*RoleScopeConditions)(nil), // 6: c1.connector.v2.RoleScopeConditions + (*RoleScopeCondition)(nil), // 7: c1.connector.v2.RoleScopeCondition + (*ScopeBindingTrait)(nil), // 8: c1.connector.v2.ScopeBindingTrait + (*AppTrait)(nil), // 9: c1.connector.v2.AppTrait + (*SecretTrait)(nil), // 10: c1.connector.v2.SecretTrait + (*UserTrait_Email)(nil), // 11: c1.connector.v2.UserTrait.Email + (*UserTrait_Status)(nil), // 12: c1.connector.v2.UserTrait.Status + (*UserTrait_MFAStatus)(nil), // 13: c1.connector.v2.UserTrait.MFAStatus + (*UserTrait_SSOStatus)(nil), // 14: c1.connector.v2.UserTrait.SSOStatus + (*UserTrait_StructuredName)(nil), // 15: c1.connector.v2.UserTrait.StructuredName + (*structpb.Struct)(nil), // 16: google.protobuf.Struct + (*AssetRef)(nil), // 17: c1.connector.v2.AssetRef + (*timestamppb.Timestamp)(nil), // 18: google.protobuf.Timestamp + (*ResourceId)(nil), // 19: c1.connector.v2.ResourceId } var file_c1_connector_v2_annotation_trait_proto_depIdxs = []int32{ - 8, // 0: c1.connector.v2.UserTrait.emails:type_name -> c1.connector.v2.UserTrait.Email - 9, // 1: c1.connector.v2.UserTrait.status:type_name -> c1.connector.v2.UserTrait.Status - 13, // 2: c1.connector.v2.UserTrait.profile:type_name -> google.protobuf.Struct - 14, // 3: c1.connector.v2.UserTrait.icon:type_name -> c1.connector.v2.AssetRef + 11, // 0: c1.connector.v2.UserTrait.emails:type_name -> c1.connector.v2.UserTrait.Email + 12, // 1: c1.connector.v2.UserTrait.status:type_name -> c1.connector.v2.UserTrait.Status + 16, // 2: c1.connector.v2.UserTrait.profile:type_name -> google.protobuf.Struct + 17, // 3: c1.connector.v2.UserTrait.icon:type_name -> c1.connector.v2.AssetRef 0, // 4: c1.connector.v2.UserTrait.account_type:type_name -> c1.connector.v2.UserTrait.AccountType - 15, // 5: c1.connector.v2.UserTrait.created_at:type_name -> google.protobuf.Timestamp - 15, // 6: c1.connector.v2.UserTrait.last_login:type_name -> google.protobuf.Timestamp - 10, // 7: c1.connector.v2.UserTrait.mfa_status:type_name -> c1.connector.v2.UserTrait.MFAStatus - 11, // 8: c1.connector.v2.UserTrait.sso_status:type_name -> c1.connector.v2.UserTrait.SSOStatus - 12, // 9: c1.connector.v2.UserTrait.structured_name:type_name -> c1.connector.v2.UserTrait.StructuredName - 14, // 10: c1.connector.v2.GroupTrait.icon:type_name -> c1.connector.v2.AssetRef - 13, // 11: c1.connector.v2.GroupTrait.profile:type_name -> google.protobuf.Struct - 13, // 12: c1.connector.v2.RoleTrait.profile:type_name -> google.protobuf.Struct - 14, // 13: c1.connector.v2.AppTrait.icon:type_name -> c1.connector.v2.AssetRef - 14, // 14: c1.connector.v2.AppTrait.logo:type_name -> c1.connector.v2.AssetRef - 13, // 15: c1.connector.v2.AppTrait.profile:type_name -> google.protobuf.Struct - 2, // 16: c1.connector.v2.AppTrait.flags:type_name -> c1.connector.v2.AppTrait.AppFlag - 13, // 17: c1.connector.v2.SecretTrait.profile:type_name -> google.protobuf.Struct - 15, // 18: c1.connector.v2.SecretTrait.created_at:type_name -> google.protobuf.Timestamp - 15, // 19: c1.connector.v2.SecretTrait.expires_at:type_name -> google.protobuf.Timestamp - 15, // 20: c1.connector.v2.SecretTrait.last_used_at:type_name -> google.protobuf.Timestamp - 16, // 21: c1.connector.v2.SecretTrait.created_by_id:type_name -> c1.connector.v2.ResourceId - 16, // 22: c1.connector.v2.SecretTrait.identity_id:type_name -> c1.connector.v2.ResourceId - 1, // 23: c1.connector.v2.UserTrait.Status.status:type_name -> c1.connector.v2.UserTrait.Status.Status - 24, // [24:24] is the sub-list for method output_type - 24, // [24:24] is the sub-list for method input_type - 24, // [24:24] is the sub-list for extension type_name - 24, // [24:24] is the sub-list for extension extendee - 0, // [0:24] is the sub-list for field type_name + 18, // 5: c1.connector.v2.UserTrait.created_at:type_name -> google.protobuf.Timestamp + 18, // 6: c1.connector.v2.UserTrait.last_login:type_name -> google.protobuf.Timestamp + 13, // 7: c1.connector.v2.UserTrait.mfa_status:type_name -> c1.connector.v2.UserTrait.MFAStatus + 14, // 8: c1.connector.v2.UserTrait.sso_status:type_name -> c1.connector.v2.UserTrait.SSOStatus + 15, // 9: c1.connector.v2.UserTrait.structured_name:type_name -> c1.connector.v2.UserTrait.StructuredName + 17, // 10: c1.connector.v2.GroupTrait.icon:type_name -> c1.connector.v2.AssetRef + 16, // 11: c1.connector.v2.GroupTrait.profile:type_name -> google.protobuf.Struct + 16, // 12: c1.connector.v2.RoleTrait.profile:type_name -> google.protobuf.Struct + 6, // 13: c1.connector.v2.RoleTrait.role_scope_conditions:type_name -> c1.connector.v2.RoleScopeConditions + 7, // 14: c1.connector.v2.RoleScopeConditions.conditions:type_name -> c1.connector.v2.RoleScopeCondition + 19, // 15: c1.connector.v2.ScopeBindingTrait.role_id:type_name -> c1.connector.v2.ResourceId + 19, // 16: c1.connector.v2.ScopeBindingTrait.scope_resource_id:type_name -> c1.connector.v2.ResourceId + 17, // 17: c1.connector.v2.AppTrait.icon:type_name -> c1.connector.v2.AssetRef + 17, // 18: c1.connector.v2.AppTrait.logo:type_name -> c1.connector.v2.AssetRef + 16, // 19: c1.connector.v2.AppTrait.profile:type_name -> google.protobuf.Struct + 2, // 20: c1.connector.v2.AppTrait.flags:type_name -> c1.connector.v2.AppTrait.AppFlag + 16, // 21: c1.connector.v2.SecretTrait.profile:type_name -> google.protobuf.Struct + 18, // 22: c1.connector.v2.SecretTrait.created_at:type_name -> google.protobuf.Timestamp + 18, // 23: c1.connector.v2.SecretTrait.expires_at:type_name -> google.protobuf.Timestamp + 18, // 24: c1.connector.v2.SecretTrait.last_used_at:type_name -> google.protobuf.Timestamp + 19, // 25: c1.connector.v2.SecretTrait.created_by_id:type_name -> c1.connector.v2.ResourceId + 19, // 26: c1.connector.v2.SecretTrait.identity_id:type_name -> c1.connector.v2.ResourceId + 1, // 27: c1.connector.v2.UserTrait.Status.status:type_name -> c1.connector.v2.UserTrait.Status.Status + 28, // [28:28] is the sub-list for method output_type + 28, // [28:28] is the sub-list for method input_type + 28, // [28:28] is the sub-list for extension type_name + 28, // [28:28] is the sub-list for extension extendee + 0, // [0:28] is the sub-list for field type_name } func init() { file_c1_connector_v2_annotation_trait_proto_init() } @@ -1513,7 +1785,7 @@ func file_c1_connector_v2_annotation_trait_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: unsafe.Slice(unsafe.StringData(file_c1_connector_v2_annotation_trait_proto_rawDesc), len(file_c1_connector_v2_annotation_trait_proto_rawDesc)), NumEnums: 3, - NumMessages: 10, + NumMessages: 13, NumExtensions: 0, NumServices: 0, }, diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_trait.pb.validate.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_trait.pb.validate.go index a0288dc3..c79b43ca 100644 --- a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_trait.pb.validate.go +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_trait.pb.validate.go @@ -632,6 +632,35 @@ func (m *RoleTrait) validate(all bool) error { } } + if all { + switch v := interface{}(m.GetRoleScopeConditions()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RoleTraitValidationError{ + field: "RoleScopeConditions", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RoleTraitValidationError{ + field: "RoleScopeConditions", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRoleScopeConditions()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RoleTraitValidationError{ + field: "RoleScopeConditions", + reason: "embedded message failed validation", + cause: err, + } + } + } + if len(errors) > 0 { return RoleTraitMultiError(errors) } @@ -709,6 +738,430 @@ var _ interface { ErrorName() string } = RoleTraitValidationError{} +// Validate checks the field values on RoleScopeConditions with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *RoleScopeConditions) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RoleScopeConditions with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// RoleScopeConditionsMultiError, or nil if none found. +func (m *RoleScopeConditions) ValidateAll() error { + return m.validate(true) +} + +func (m *RoleScopeConditions) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Type + + for idx, item := range m.GetConditions() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, RoleScopeConditionsValidationError{ + field: fmt.Sprintf("Conditions[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, RoleScopeConditionsValidationError{ + field: fmt.Sprintf("Conditions[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return RoleScopeConditionsValidationError{ + field: fmt.Sprintf("Conditions[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return RoleScopeConditionsMultiError(errors) + } + + return nil +} + +// RoleScopeConditionsMultiError is an error wrapping multiple validation +// errors returned by RoleScopeConditions.ValidateAll() if the designated +// constraints aren't met. +type RoleScopeConditionsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RoleScopeConditionsMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RoleScopeConditionsMultiError) AllErrors() []error { return m } + +// RoleScopeConditionsValidationError is the validation error returned by +// RoleScopeConditions.Validate if the designated constraints aren't met. +type RoleScopeConditionsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RoleScopeConditionsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RoleScopeConditionsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RoleScopeConditionsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RoleScopeConditionsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RoleScopeConditionsValidationError) ErrorName() string { + return "RoleScopeConditionsValidationError" +} + +// Error satisfies the builtin error interface +func (e RoleScopeConditionsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRoleScopeConditions.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RoleScopeConditionsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RoleScopeConditionsValidationError{} + +// Validate checks the field values on RoleScopeCondition with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *RoleScopeCondition) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RoleScopeCondition with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// RoleScopeConditionMultiError, or nil if none found. +func (m *RoleScopeCondition) ValidateAll() error { + return m.validate(true) +} + +func (m *RoleScopeCondition) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Expression + + if len(errors) > 0 { + return RoleScopeConditionMultiError(errors) + } + + return nil +} + +// RoleScopeConditionMultiError is an error wrapping multiple validation errors +// returned by RoleScopeCondition.ValidateAll() if the designated constraints +// aren't met. +type RoleScopeConditionMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RoleScopeConditionMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RoleScopeConditionMultiError) AllErrors() []error { return m } + +// RoleScopeConditionValidationError is the validation error returned by +// RoleScopeCondition.Validate if the designated constraints aren't met. +type RoleScopeConditionValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RoleScopeConditionValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RoleScopeConditionValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RoleScopeConditionValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RoleScopeConditionValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RoleScopeConditionValidationError) ErrorName() string { + return "RoleScopeConditionValidationError" +} + +// Error satisfies the builtin error interface +func (e RoleScopeConditionValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRoleScopeCondition.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RoleScopeConditionValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RoleScopeConditionValidationError{} + +// Validate checks the field values on ScopeBindingTrait with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *ScopeBindingTrait) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ScopeBindingTrait with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ScopeBindingTraitMultiError, or nil if none found. +func (m *ScopeBindingTrait) ValidateAll() error { + return m.validate(true) +} + +func (m *ScopeBindingTrait) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if m.GetRoleId() == nil { + err := ScopeBindingTraitValidationError{ + field: "RoleId", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetRoleId()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ScopeBindingTraitValidationError{ + field: "RoleId", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ScopeBindingTraitValidationError{ + field: "RoleId", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRoleId()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ScopeBindingTraitValidationError{ + field: "RoleId", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if m.GetScopeResourceId() == nil { + err := ScopeBindingTraitValidationError{ + field: "ScopeResourceId", + reason: "value is required", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetScopeResourceId()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ScopeBindingTraitValidationError{ + field: "ScopeResourceId", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ScopeBindingTraitValidationError{ + field: "ScopeResourceId", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetScopeResourceId()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ScopeBindingTraitValidationError{ + field: "ScopeResourceId", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return ScopeBindingTraitMultiError(errors) + } + + return nil +} + +// ScopeBindingTraitMultiError is an error wrapping multiple validation errors +// returned by ScopeBindingTrait.ValidateAll() if the designated constraints +// aren't met. +type ScopeBindingTraitMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ScopeBindingTraitMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ScopeBindingTraitMultiError) AllErrors() []error { return m } + +// ScopeBindingTraitValidationError is the validation error returned by +// ScopeBindingTrait.Validate if the designated constraints aren't met. +type ScopeBindingTraitValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ScopeBindingTraitValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ScopeBindingTraitValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ScopeBindingTraitValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ScopeBindingTraitValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ScopeBindingTraitValidationError) ErrorName() string { + return "ScopeBindingTraitValidationError" +} + +// Error satisfies the builtin error interface +func (e ScopeBindingTraitValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sScopeBindingTrait.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ScopeBindingTraitValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ScopeBindingTraitValidationError{} + // Validate checks the field values on AppTrait with the rules defined in the // proto definition for this message. If any rules are violated, the first // error encountered is returned, or nil if there are no violations. diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_trait_protoopaque.pb.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_trait_protoopaque.pb.go index c7a4531b..db2e7db8 100644 --- a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_trait_protoopaque.pb.go +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_trait_protoopaque.pb.go @@ -583,10 +583,11 @@ func (b0 GroupTrait_builder) Build() *GroupTrait { } type RoleTrait struct { - state protoimpl.MessageState `protogen:"opaque.v1"` - xxx_hidden_Profile *structpb.Struct `protobuf:"bytes,1,opt,name=profile,proto3"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Profile *structpb.Struct `protobuf:"bytes,1,opt,name=profile,proto3"` + xxx_hidden_RoleScopeConditions *RoleScopeConditions `protobuf:"bytes,2,opt,name=role_scope_conditions,json=roleScopeConditions,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *RoleTrait) Reset() { @@ -621,10 +622,21 @@ func (x *RoleTrait) GetProfile() *structpb.Struct { return nil } +func (x *RoleTrait) GetRoleScopeConditions() *RoleScopeConditions { + if x != nil { + return x.xxx_hidden_RoleScopeConditions + } + return nil +} + func (x *RoleTrait) SetProfile(v *structpb.Struct) { x.xxx_hidden_Profile = v } +func (x *RoleTrait) SetRoleScopeConditions(v *RoleScopeConditions) { + x.xxx_hidden_RoleScopeConditions = v +} + func (x *RoleTrait) HasProfile() bool { if x == nil { return false @@ -632,14 +644,26 @@ func (x *RoleTrait) HasProfile() bool { return x.xxx_hidden_Profile != nil } +func (x *RoleTrait) HasRoleScopeConditions() bool { + if x == nil { + return false + } + return x.xxx_hidden_RoleScopeConditions != nil +} + func (x *RoleTrait) ClearProfile() { x.xxx_hidden_Profile = nil } +func (x *RoleTrait) ClearRoleScopeConditions() { + x.xxx_hidden_RoleScopeConditions = nil +} + type RoleTrait_builder struct { _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. - Profile *structpb.Struct + Profile *structpb.Struct + RoleScopeConditions *RoleScopeConditions } func (b0 RoleTrait_builder) Build() *RoleTrait { @@ -647,6 +671,235 @@ func (b0 RoleTrait_builder) Build() *RoleTrait { b, x := &b0, m0 _, _ = b, x x.xxx_hidden_Profile = b.Profile + x.xxx_hidden_RoleScopeConditions = b.RoleScopeConditions + return m0 +} + +type RoleScopeConditions struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Type string `protobuf:"bytes,1,opt,name=type,proto3"` + xxx_hidden_Conditions *[]*RoleScopeCondition `protobuf:"bytes,3,rep,name=conditions,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *RoleScopeConditions) Reset() { + *x = RoleScopeConditions{} + mi := &file_c1_connector_v2_annotation_trait_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *RoleScopeConditions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RoleScopeConditions) ProtoMessage() {} + +func (x *RoleScopeConditions) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_annotation_trait_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *RoleScopeConditions) GetType() string { + if x != nil { + return x.xxx_hidden_Type + } + return "" +} + +func (x *RoleScopeConditions) GetConditions() []*RoleScopeCondition { + if x != nil { + if x.xxx_hidden_Conditions != nil { + return *x.xxx_hidden_Conditions + } + } + return nil +} + +func (x *RoleScopeConditions) SetType(v string) { + x.xxx_hidden_Type = v +} + +func (x *RoleScopeConditions) SetConditions(v []*RoleScopeCondition) { + x.xxx_hidden_Conditions = &v +} + +type RoleScopeConditions_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Type string + Conditions []*RoleScopeCondition +} + +func (b0 RoleScopeConditions_builder) Build() *RoleScopeConditions { + m0 := &RoleScopeConditions{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Type = b.Type + x.xxx_hidden_Conditions = &b.Conditions + return m0 +} + +type RoleScopeCondition struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Expression string `protobuf:"bytes,1,opt,name=expression,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *RoleScopeCondition) Reset() { + *x = RoleScopeCondition{} + mi := &file_c1_connector_v2_annotation_trait_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *RoleScopeCondition) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RoleScopeCondition) ProtoMessage() {} + +func (x *RoleScopeCondition) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_annotation_trait_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *RoleScopeCondition) GetExpression() string { + if x != nil { + return x.xxx_hidden_Expression + } + return "" +} + +func (x *RoleScopeCondition) SetExpression(v string) { + x.xxx_hidden_Expression = v +} + +type RoleScopeCondition_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Expression string +} + +func (b0 RoleScopeCondition_builder) Build() *RoleScopeCondition { + m0 := &RoleScopeCondition{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Expression = b.Expression + return m0 +} + +// ScopeBindingTrait is used to scope a role to a resource or set of resources. +// The scope may be static (determined at crawl time) or dynamic (determined based on conditions). +// For example, in Azure a role definition can be scoped to a subscription, management group, or resource group. +// In that case, the role ID would be the resource ID of the role definition, and the scope resource ID would be the resource ID of the subscription, management group, or resource group. +type ScopeBindingTrait struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_RoleId *ResourceId `protobuf:"bytes,1,opt,name=role_id,json=roleId,proto3"` + xxx_hidden_ScopeResourceId *ResourceId `protobuf:"bytes,2,opt,name=scope_resource_id,json=scopeResourceId,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ScopeBindingTrait) Reset() { + *x = ScopeBindingTrait{} + mi := &file_c1_connector_v2_annotation_trait_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ScopeBindingTrait) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ScopeBindingTrait) ProtoMessage() {} + +func (x *ScopeBindingTrait) ProtoReflect() protoreflect.Message { + mi := &file_c1_connector_v2_annotation_trait_proto_msgTypes[5] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *ScopeBindingTrait) GetRoleId() *ResourceId { + if x != nil { + return x.xxx_hidden_RoleId + } + return nil +} + +func (x *ScopeBindingTrait) GetScopeResourceId() *ResourceId { + if x != nil { + return x.xxx_hidden_ScopeResourceId + } + return nil +} + +func (x *ScopeBindingTrait) SetRoleId(v *ResourceId) { + x.xxx_hidden_RoleId = v +} + +func (x *ScopeBindingTrait) SetScopeResourceId(v *ResourceId) { + x.xxx_hidden_ScopeResourceId = v +} + +func (x *ScopeBindingTrait) HasRoleId() bool { + if x == nil { + return false + } + return x.xxx_hidden_RoleId != nil +} + +func (x *ScopeBindingTrait) HasScopeResourceId() bool { + if x == nil { + return false + } + return x.xxx_hidden_ScopeResourceId != nil +} + +func (x *ScopeBindingTrait) ClearRoleId() { + x.xxx_hidden_RoleId = nil +} + +func (x *ScopeBindingTrait) ClearScopeResourceId() { + x.xxx_hidden_ScopeResourceId = nil +} + +type ScopeBindingTrait_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + RoleId *ResourceId + // Remove required if we add more ways to scope roles. (eg: Expressions.) + ScopeResourceId *ResourceId +} + +func (b0 ScopeBindingTrait_builder) Build() *ScopeBindingTrait { + m0 := &ScopeBindingTrait{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_RoleId = b.RoleId + x.xxx_hidden_ScopeResourceId = b.ScopeResourceId return m0 } @@ -663,7 +916,7 @@ type AppTrait struct { func (x *AppTrait) Reset() { *x = AppTrait{} - mi := &file_c1_connector_v2_annotation_trait_proto_msgTypes[3] + mi := &file_c1_connector_v2_annotation_trait_proto_msgTypes[6] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -675,7 +928,7 @@ func (x *AppTrait) String() string { func (*AppTrait) ProtoMessage() {} func (x *AppTrait) ProtoReflect() protoreflect.Message { - mi := &file_c1_connector_v2_annotation_trait_proto_msgTypes[3] + mi := &file_c1_connector_v2_annotation_trait_proto_msgTypes[6] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -810,7 +1063,7 @@ type SecretTrait struct { func (x *SecretTrait) Reset() { *x = SecretTrait{} - mi := &file_c1_connector_v2_annotation_trait_proto_msgTypes[4] + mi := &file_c1_connector_v2_annotation_trait_proto_msgTypes[7] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -822,7 +1075,7 @@ func (x *SecretTrait) String() string { func (*SecretTrait) ProtoMessage() {} func (x *SecretTrait) ProtoReflect() protoreflect.Message { - mi := &file_c1_connector_v2_annotation_trait_proto_msgTypes[4] + mi := &file_c1_connector_v2_annotation_trait_proto_msgTypes[7] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -999,7 +1252,7 @@ type UserTrait_Email struct { func (x *UserTrait_Email) Reset() { *x = UserTrait_Email{} - mi := &file_c1_connector_v2_annotation_trait_proto_msgTypes[5] + mi := &file_c1_connector_v2_annotation_trait_proto_msgTypes[8] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1011,7 +1264,7 @@ func (x *UserTrait_Email) String() string { func (*UserTrait_Email) ProtoMessage() {} func (x *UserTrait_Email) ProtoReflect() protoreflect.Message { - mi := &file_c1_connector_v2_annotation_trait_proto_msgTypes[5] + mi := &file_c1_connector_v2_annotation_trait_proto_msgTypes[8] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1071,7 +1324,7 @@ type UserTrait_Status struct { func (x *UserTrait_Status) Reset() { *x = UserTrait_Status{} - mi := &file_c1_connector_v2_annotation_trait_proto_msgTypes[6] + mi := &file_c1_connector_v2_annotation_trait_proto_msgTypes[9] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1083,7 +1336,7 @@ func (x *UserTrait_Status) String() string { func (*UserTrait_Status) ProtoMessage() {} func (x *UserTrait_Status) ProtoReflect() protoreflect.Message { - mi := &file_c1_connector_v2_annotation_trait_proto_msgTypes[6] + mi := &file_c1_connector_v2_annotation_trait_proto_msgTypes[9] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1141,7 +1394,7 @@ type UserTrait_MFAStatus struct { func (x *UserTrait_MFAStatus) Reset() { *x = UserTrait_MFAStatus{} - mi := &file_c1_connector_v2_annotation_trait_proto_msgTypes[7] + mi := &file_c1_connector_v2_annotation_trait_proto_msgTypes[10] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1153,7 +1406,7 @@ func (x *UserTrait_MFAStatus) String() string { func (*UserTrait_MFAStatus) ProtoMessage() {} func (x *UserTrait_MFAStatus) ProtoReflect() protoreflect.Message { - mi := &file_c1_connector_v2_annotation_trait_proto_msgTypes[7] + mi := &file_c1_connector_v2_annotation_trait_proto_msgTypes[10] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1198,7 +1451,7 @@ type UserTrait_SSOStatus struct { func (x *UserTrait_SSOStatus) Reset() { *x = UserTrait_SSOStatus{} - mi := &file_c1_connector_v2_annotation_trait_proto_msgTypes[8] + mi := &file_c1_connector_v2_annotation_trait_proto_msgTypes[11] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1210,7 +1463,7 @@ func (x *UserTrait_SSOStatus) String() string { func (*UserTrait_SSOStatus) ProtoMessage() {} func (x *UserTrait_SSOStatus) ProtoReflect() protoreflect.Message { - mi := &file_c1_connector_v2_annotation_trait_proto_msgTypes[8] + mi := &file_c1_connector_v2_annotation_trait_proto_msgTypes[11] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1259,7 +1512,7 @@ type UserTrait_StructuredName struct { func (x *UserTrait_StructuredName) Reset() { *x = UserTrait_StructuredName{} - mi := &file_c1_connector_v2_annotation_trait_proto_msgTypes[9] + mi := &file_c1_connector_v2_annotation_trait_proto_msgTypes[12] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1271,7 +1524,7 @@ func (x *UserTrait_StructuredName) String() string { func (*UserTrait_StructuredName) ProtoMessage() {} func (x *UserTrait_StructuredName) ProtoReflect() protoreflect.Message { - mi := &file_c1_connector_v2_annotation_trait_proto_msgTypes[9] + mi := &file_c1_connector_v2_annotation_trait_proto_msgTypes[12] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1418,9 +1671,22 @@ const file_c1_connector_v2_annotation_trait_proto_rawDesc = "" + "\n" + "GroupTrait\x12-\n" + "\x04icon\x18\x01 \x01(\v2\x19.c1.connector.v2.AssetRefR\x04icon\x121\n" + - "\aprofile\x18\x02 \x01(\v2\x17.google.protobuf.StructR\aprofile\">\n" + + "\aprofile\x18\x02 \x01(\v2\x17.google.protobuf.StructR\aprofile\"\x98\x01\n" + "\tRoleTrait\x121\n" + - "\aprofile\x18\x01 \x01(\v2\x17.google.protobuf.StructR\aprofile\"\x9a\x03\n" + + "\aprofile\x18\x01 \x01(\v2\x17.google.protobuf.StructR\aprofile\x12X\n" + + "\x15role_scope_conditions\x18\x02 \x01(\v2$.c1.connector.v2.RoleScopeConditionsR\x13roleScopeConditions\"n\n" + + "\x13RoleScopeConditions\x12\x12\n" + + "\x04type\x18\x01 \x01(\tR\x04type\x12C\n" + + "\n" + + "conditions\x18\x03 \x03(\v2#.c1.connector.v2.RoleScopeConditionR\n" + + "conditions\"4\n" + + "\x12RoleScopeCondition\x12\x1e\n" + + "\n" + + "expression\x18\x01 \x01(\tR\n" + + "expression\"\xa6\x01\n" + + "\x11ScopeBindingTrait\x12>\n" + + "\arole_id\x18\x01 \x01(\v2\x1b.c1.connector.v2.ResourceIdB\b\xfaB\x05\x8a\x01\x02\x10\x01R\x06roleId\x12Q\n" + + "\x11scope_resource_id\x18\x02 \x01(\v2\x1b.c1.connector.v2.ResourceIdB\b\xfaB\x05\x8a\x01\x02\x10\x01R\x0fscopeResourceId\"\x9a\x03\n" + "\bAppTrait\x125\n" + "\bhelp_url\x18\x01 \x01(\tB\x1a\xfaB\x17r\x15 \x01(\x80\b:\bhttps://\xd0\x01\x01\x88\x01\x01R\ahelpUrl\x12-\n" + "\x04icon\x18\x02 \x01(\v2\x19.c1.connector.v2.AssetRefR\x04icon\x12-\n" + @@ -1447,7 +1713,7 @@ const file_c1_connector_v2_annotation_trait_proto_rawDesc = "" + "identityIdB6Z4github.com/conductorone/baton-sdk/pb/c1/connector/v2b\x06proto3" var file_c1_connector_v2_annotation_trait_proto_enumTypes = make([]protoimpl.EnumInfo, 3) -var file_c1_connector_v2_annotation_trait_proto_msgTypes = make([]protoimpl.MessageInfo, 10) +var file_c1_connector_v2_annotation_trait_proto_msgTypes = make([]protoimpl.MessageInfo, 13) var file_c1_connector_v2_annotation_trait_proto_goTypes = []any{ (UserTrait_AccountType)(0), // 0: c1.connector.v2.UserTrait.AccountType (UserTrait_Status_Status)(0), // 1: c1.connector.v2.UserTrait.Status.Status @@ -1455,48 +1721,55 @@ var file_c1_connector_v2_annotation_trait_proto_goTypes = []any{ (*UserTrait)(nil), // 3: c1.connector.v2.UserTrait (*GroupTrait)(nil), // 4: c1.connector.v2.GroupTrait (*RoleTrait)(nil), // 5: c1.connector.v2.RoleTrait - (*AppTrait)(nil), // 6: c1.connector.v2.AppTrait - (*SecretTrait)(nil), // 7: c1.connector.v2.SecretTrait - (*UserTrait_Email)(nil), // 8: c1.connector.v2.UserTrait.Email - (*UserTrait_Status)(nil), // 9: c1.connector.v2.UserTrait.Status - (*UserTrait_MFAStatus)(nil), // 10: c1.connector.v2.UserTrait.MFAStatus - (*UserTrait_SSOStatus)(nil), // 11: c1.connector.v2.UserTrait.SSOStatus - (*UserTrait_StructuredName)(nil), // 12: c1.connector.v2.UserTrait.StructuredName - (*structpb.Struct)(nil), // 13: google.protobuf.Struct - (*AssetRef)(nil), // 14: c1.connector.v2.AssetRef - (*timestamppb.Timestamp)(nil), // 15: google.protobuf.Timestamp - (*ResourceId)(nil), // 16: c1.connector.v2.ResourceId + (*RoleScopeConditions)(nil), // 6: c1.connector.v2.RoleScopeConditions + (*RoleScopeCondition)(nil), // 7: c1.connector.v2.RoleScopeCondition + (*ScopeBindingTrait)(nil), // 8: c1.connector.v2.ScopeBindingTrait + (*AppTrait)(nil), // 9: c1.connector.v2.AppTrait + (*SecretTrait)(nil), // 10: c1.connector.v2.SecretTrait + (*UserTrait_Email)(nil), // 11: c1.connector.v2.UserTrait.Email + (*UserTrait_Status)(nil), // 12: c1.connector.v2.UserTrait.Status + (*UserTrait_MFAStatus)(nil), // 13: c1.connector.v2.UserTrait.MFAStatus + (*UserTrait_SSOStatus)(nil), // 14: c1.connector.v2.UserTrait.SSOStatus + (*UserTrait_StructuredName)(nil), // 15: c1.connector.v2.UserTrait.StructuredName + (*structpb.Struct)(nil), // 16: google.protobuf.Struct + (*AssetRef)(nil), // 17: c1.connector.v2.AssetRef + (*timestamppb.Timestamp)(nil), // 18: google.protobuf.Timestamp + (*ResourceId)(nil), // 19: c1.connector.v2.ResourceId } var file_c1_connector_v2_annotation_trait_proto_depIdxs = []int32{ - 8, // 0: c1.connector.v2.UserTrait.emails:type_name -> c1.connector.v2.UserTrait.Email - 9, // 1: c1.connector.v2.UserTrait.status:type_name -> c1.connector.v2.UserTrait.Status - 13, // 2: c1.connector.v2.UserTrait.profile:type_name -> google.protobuf.Struct - 14, // 3: c1.connector.v2.UserTrait.icon:type_name -> c1.connector.v2.AssetRef + 11, // 0: c1.connector.v2.UserTrait.emails:type_name -> c1.connector.v2.UserTrait.Email + 12, // 1: c1.connector.v2.UserTrait.status:type_name -> c1.connector.v2.UserTrait.Status + 16, // 2: c1.connector.v2.UserTrait.profile:type_name -> google.protobuf.Struct + 17, // 3: c1.connector.v2.UserTrait.icon:type_name -> c1.connector.v2.AssetRef 0, // 4: c1.connector.v2.UserTrait.account_type:type_name -> c1.connector.v2.UserTrait.AccountType - 15, // 5: c1.connector.v2.UserTrait.created_at:type_name -> google.protobuf.Timestamp - 15, // 6: c1.connector.v2.UserTrait.last_login:type_name -> google.protobuf.Timestamp - 10, // 7: c1.connector.v2.UserTrait.mfa_status:type_name -> c1.connector.v2.UserTrait.MFAStatus - 11, // 8: c1.connector.v2.UserTrait.sso_status:type_name -> c1.connector.v2.UserTrait.SSOStatus - 12, // 9: c1.connector.v2.UserTrait.structured_name:type_name -> c1.connector.v2.UserTrait.StructuredName - 14, // 10: c1.connector.v2.GroupTrait.icon:type_name -> c1.connector.v2.AssetRef - 13, // 11: c1.connector.v2.GroupTrait.profile:type_name -> google.protobuf.Struct - 13, // 12: c1.connector.v2.RoleTrait.profile:type_name -> google.protobuf.Struct - 14, // 13: c1.connector.v2.AppTrait.icon:type_name -> c1.connector.v2.AssetRef - 14, // 14: c1.connector.v2.AppTrait.logo:type_name -> c1.connector.v2.AssetRef - 13, // 15: c1.connector.v2.AppTrait.profile:type_name -> google.protobuf.Struct - 2, // 16: c1.connector.v2.AppTrait.flags:type_name -> c1.connector.v2.AppTrait.AppFlag - 13, // 17: c1.connector.v2.SecretTrait.profile:type_name -> google.protobuf.Struct - 15, // 18: c1.connector.v2.SecretTrait.created_at:type_name -> google.protobuf.Timestamp - 15, // 19: c1.connector.v2.SecretTrait.expires_at:type_name -> google.protobuf.Timestamp - 15, // 20: c1.connector.v2.SecretTrait.last_used_at:type_name -> google.protobuf.Timestamp - 16, // 21: c1.connector.v2.SecretTrait.created_by_id:type_name -> c1.connector.v2.ResourceId - 16, // 22: c1.connector.v2.SecretTrait.identity_id:type_name -> c1.connector.v2.ResourceId - 1, // 23: c1.connector.v2.UserTrait.Status.status:type_name -> c1.connector.v2.UserTrait.Status.Status - 24, // [24:24] is the sub-list for method output_type - 24, // [24:24] is the sub-list for method input_type - 24, // [24:24] is the sub-list for extension type_name - 24, // [24:24] is the sub-list for extension extendee - 0, // [0:24] is the sub-list for field type_name + 18, // 5: c1.connector.v2.UserTrait.created_at:type_name -> google.protobuf.Timestamp + 18, // 6: c1.connector.v2.UserTrait.last_login:type_name -> google.protobuf.Timestamp + 13, // 7: c1.connector.v2.UserTrait.mfa_status:type_name -> c1.connector.v2.UserTrait.MFAStatus + 14, // 8: c1.connector.v2.UserTrait.sso_status:type_name -> c1.connector.v2.UserTrait.SSOStatus + 15, // 9: c1.connector.v2.UserTrait.structured_name:type_name -> c1.connector.v2.UserTrait.StructuredName + 17, // 10: c1.connector.v2.GroupTrait.icon:type_name -> c1.connector.v2.AssetRef + 16, // 11: c1.connector.v2.GroupTrait.profile:type_name -> google.protobuf.Struct + 16, // 12: c1.connector.v2.RoleTrait.profile:type_name -> google.protobuf.Struct + 6, // 13: c1.connector.v2.RoleTrait.role_scope_conditions:type_name -> c1.connector.v2.RoleScopeConditions + 7, // 14: c1.connector.v2.RoleScopeConditions.conditions:type_name -> c1.connector.v2.RoleScopeCondition + 19, // 15: c1.connector.v2.ScopeBindingTrait.role_id:type_name -> c1.connector.v2.ResourceId + 19, // 16: c1.connector.v2.ScopeBindingTrait.scope_resource_id:type_name -> c1.connector.v2.ResourceId + 17, // 17: c1.connector.v2.AppTrait.icon:type_name -> c1.connector.v2.AssetRef + 17, // 18: c1.connector.v2.AppTrait.logo:type_name -> c1.connector.v2.AssetRef + 16, // 19: c1.connector.v2.AppTrait.profile:type_name -> google.protobuf.Struct + 2, // 20: c1.connector.v2.AppTrait.flags:type_name -> c1.connector.v2.AppTrait.AppFlag + 16, // 21: c1.connector.v2.SecretTrait.profile:type_name -> google.protobuf.Struct + 18, // 22: c1.connector.v2.SecretTrait.created_at:type_name -> google.protobuf.Timestamp + 18, // 23: c1.connector.v2.SecretTrait.expires_at:type_name -> google.protobuf.Timestamp + 18, // 24: c1.connector.v2.SecretTrait.last_used_at:type_name -> google.protobuf.Timestamp + 19, // 25: c1.connector.v2.SecretTrait.created_by_id:type_name -> c1.connector.v2.ResourceId + 19, // 26: c1.connector.v2.SecretTrait.identity_id:type_name -> c1.connector.v2.ResourceId + 1, // 27: c1.connector.v2.UserTrait.Status.status:type_name -> c1.connector.v2.UserTrait.Status.Status + 28, // [28:28] is the sub-list for method output_type + 28, // [28:28] is the sub-list for method input_type + 28, // [28:28] is the sub-list for extension type_name + 28, // [28:28] is the sub-list for extension extendee + 0, // [0:28] is the sub-list for field type_name } func init() { file_c1_connector_v2_annotation_trait_proto_init() } @@ -1512,7 +1785,7 @@ func file_c1_connector_v2_annotation_trait_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: unsafe.Slice(unsafe.StringData(file_c1_connector_v2_annotation_trait_proto_rawDesc), len(file_c1_connector_v2_annotation_trait_proto_rawDesc)), NumEnums: 3, - NumMessages: 10, + NumMessages: 13, NumExtensions: 0, NumServices: 0, }, diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/resource.pb.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/resource.pb.go index e86d89c8..34601877 100644 --- a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/resource.pb.go +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/resource.pb.go @@ -35,6 +35,7 @@ const ( ResourceType_TRAIT_APP ResourceType_Trait = 4 ResourceType_TRAIT_SECRET ResourceType_Trait = 5 ResourceType_TRAIT_SECURITY_INSIGHT ResourceType_Trait = 6 + ResourceType_TRAIT_SCOPE_BINDING ResourceType_Trait = 7 ) // Enum value maps for ResourceType_Trait. @@ -47,6 +48,7 @@ var ( 4: "TRAIT_APP", 5: "TRAIT_SECRET", 6: "TRAIT_SECURITY_INSIGHT", + 7: "TRAIT_SCOPE_BINDING", } ResourceType_Trait_value = map[string]int32{ "TRAIT_UNSPECIFIED": 0, @@ -56,6 +58,7 @@ var ( "TRAIT_APP": 4, "TRAIT_SECRET": 5, "TRAIT_SECURITY_INSIGHT": 6, + "TRAIT_SCOPE_BINDING": 7, } ) @@ -81,7 +84,6 @@ func (x ResourceType_Trait) Number() protoreflect.EnumNumber { return protoreflect.EnumNumber(x) } -// FIXME(mstanbCO): call this something else? Should it just be a bool? Possibly just use an annotation? type Resource_CreationSource int32 const ( @@ -2820,17 +2822,23 @@ func (b0 ResourceId_builder) Build() *ResourceId { } type Resource struct { - state protoimpl.MessageState `protogen:"hybrid.v1"` - Id *ResourceId `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - ParentResourceId *ResourceId `protobuf:"bytes,2,opt,name=parent_resource_id,json=parentResourceId,proto3" json:"parent_resource_id,omitempty"` - DisplayName string `protobuf:"bytes,3,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` - Annotations []*anypb.Any `protobuf:"bytes,4,rep,name=annotations,proto3" json:"annotations,omitempty"` - Description string `protobuf:"bytes,5,opt,name=description,proto3" json:"description,omitempty"` - BatonResource bool `protobuf:"varint,6,opt,name=baton_resource,json=batonResource,proto3" json:"baton_resource,omitempty"` - ExternalId *ExternalId `protobuf:"bytes,7,opt,name=external_id,json=externalId,proto3" json:"external_id,omitempty"` - CreationSource Resource_CreationSource `protobuf:"varint,8,opt,name=creation_source,json=creationSource,proto3,enum=c1.connector.v2.Resource_CreationSource" json:"creation_source,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"hybrid.v1"` + Id *ResourceId `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + ParentResourceId *ResourceId `protobuf:"bytes,2,opt,name=parent_resource_id,json=parentResourceId,proto3" json:"parent_resource_id,omitempty"` + DisplayName string `protobuf:"bytes,3,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` + Annotations []*anypb.Any `protobuf:"bytes,4,rep,name=annotations,proto3" json:"annotations,omitempty"` + Description string `protobuf:"bytes,5,opt,name=description,proto3" json:"description,omitempty"` + BatonResource bool `protobuf:"varint,6,opt,name=baton_resource,json=batonResource,proto3" json:"baton_resource,omitempty"` + // Deprecated. This is no longer used. + // + // Deprecated: Marked as deprecated in c1/connector/v2/resource.proto. + ExternalId *ExternalId `protobuf:"bytes,7,opt,name=external_id,json=externalId,proto3" json:"external_id,omitempty"` + // Deprecated. This is no longer used. + // + // Deprecated: Marked as deprecated in c1/connector/v2/resource.proto. + CreationSource Resource_CreationSource `protobuf:"varint,8,opt,name=creation_source,json=creationSource,proto3,enum=c1.connector.v2.Resource_CreationSource" json:"creation_source,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *Resource) Reset() { @@ -2900,6 +2908,7 @@ func (x *Resource) GetBatonResource() bool { return false } +// Deprecated: Marked as deprecated in c1/connector/v2/resource.proto. func (x *Resource) GetExternalId() *ExternalId { if x != nil { return x.ExternalId @@ -2907,6 +2916,7 @@ func (x *Resource) GetExternalId() *ExternalId { return nil } +// Deprecated: Marked as deprecated in c1/connector/v2/resource.proto. func (x *Resource) GetCreationSource() Resource_CreationSource { if x != nil { return x.CreationSource @@ -2938,10 +2948,12 @@ func (x *Resource) SetBatonResource(v bool) { x.BatonResource = v } +// Deprecated: Marked as deprecated in c1/connector/v2/resource.proto. func (x *Resource) SetExternalId(v *ExternalId) { x.ExternalId = v } +// Deprecated: Marked as deprecated in c1/connector/v2/resource.proto. func (x *Resource) SetCreationSource(v Resource_CreationSource) { x.CreationSource = v } @@ -2960,6 +2972,7 @@ func (x *Resource) HasParentResourceId() bool { return x.ParentResourceId != nil } +// Deprecated: Marked as deprecated in c1/connector/v2/resource.proto. func (x *Resource) HasExternalId() bool { if x == nil { return false @@ -2975,6 +2988,7 @@ func (x *Resource) ClearParentResourceId() { x.ParentResourceId = nil } +// Deprecated: Marked as deprecated in c1/connector/v2/resource.proto. func (x *Resource) ClearExternalId() { x.ExternalId = nil } @@ -2988,8 +3002,14 @@ type Resource_builder struct { Annotations []*anypb.Any Description string BatonResource bool - ExternalId *ExternalId - CreationSource Resource_CreationSource + // Deprecated. This is no longer used. + // + // Deprecated: Marked as deprecated in c1/connector/v2/resource.proto. + ExternalId *ExternalId + // Deprecated. This is no longer used. + // + // Deprecated: Marked as deprecated in c1/connector/v2/resource.proto. + CreationSource Resource_CreationSource } func (b0 Resource_builder) Build() *Resource { @@ -4453,7 +4473,7 @@ var File_c1_connector_v2_resource_proto protoreflect.FileDescriptor const file_c1_connector_v2_resource_proto_rawDesc = "" + "\n" + - "\x1ec1/connector/v2/resource.proto\x12\x0fc1.connector.v2\x1a\x19google/protobuf/any.proto\x1a\x1cgoogle/protobuf/struct.proto\x1a\x17validate/validate.proto\"\xd1\x03\n" + + "\x1ec1/connector/v2/resource.proto\x12\x0fc1.connector.v2\x1a\x19google/protobuf/any.proto\x1a\x1cgoogle/protobuf/struct.proto\x1a\x17validate/validate.proto\"\xea\x03\n" + "\fResourceType\x12\x1a\n" + "\x02id\x18\x01 \x01(\tB\n" + "\xfaB\ar\x05 \x01(\x80\bR\x02id\x120\n" + @@ -4463,7 +4483,7 @@ const file_c1_connector_v2_resource_proto_rawDesc = "" + "\vannotations\x18\x04 \x03(\v2\x14.google.protobuf.AnyR\vannotations\x12/\n" + "\vdescription\x18\x05 \x01(\tB\r\xfaB\n" + "r\b \x01(\x80 \xd0\x01\x01R\vdescription\x12-\n" + - "\x12sourced_externally\x18\x06 \x01(\bR\x11sourcedExternally\"\x8c\x01\n" + + "\x12sourced_externally\x18\x06 \x01(\bR\x11sourcedExternally\"\xa5\x01\n" + "\x05Trait\x12\x15\n" + "\x11TRAIT_UNSPECIFIED\x10\x00\x12\x0e\n" + "\n" + @@ -4473,7 +4493,8 @@ const file_c1_connector_v2_resource_proto_rawDesc = "" + "TRAIT_ROLE\x10\x03\x12\r\n" + "\tTRAIT_APP\x10\x04\x12\x10\n" + "\fTRAIT_SECRET\x10\x05\x12\x1a\n" + - "\x16TRAIT_SECURITY_INSIGHT\x10\x06\"\xa6\x02\n" + + "\x16TRAIT_SECURITY_INSIGHT\x10\x06\x12\x17\n" + + "\x13TRAIT_SCOPE_BINDING\x10\a\"\xa6\x02\n" + ",ResourceTypesServiceListResourceTypesRequest\x121\n" + "\x06parent\x18\x01 \x01(\v2\x19.c1.connector.v2.ResourceR\x06parent\x12'\n" + "\tpage_size\x18\x02 \x01(\rB\n" + @@ -4615,7 +4636,7 @@ const file_c1_connector_v2_resource_proto_rawDesc = "" + "\xfaB\ar\x05 \x01(\x80\bR\fresourceType\x12&\n" + "\bresource\x18\x02 \x01(\tB\n" + "\xfaB\ar\x05 \x01(\x80\bR\bresource\x12%\n" + - "\x0ebaton_resource\x18\x03 \x01(\bR\rbatonResource\"\xf0\x04\n" + + "\x0ebaton_resource\x18\x03 \x01(\bR\rbatonResource\"\xf8\x04\n" + "\bResource\x12+\n" + "\x02id\x18\x01 \x01(\v2\x1b.c1.connector.v2.ResourceIdR\x02id\x12I\n" + "\x12parent_resource_id\x18\x02 \x01(\v2\x1b.c1.connector.v2.ResourceIdR\x10parentResourceId\x120\n" + @@ -4624,10 +4645,10 @@ const file_c1_connector_v2_resource_proto_rawDesc = "" + "\vannotations\x18\x04 \x03(\v2\x14.google.protobuf.AnyR\vannotations\x12/\n" + "\vdescription\x18\x05 \x01(\tB\r\xfaB\n" + "r\b \x01(\x80\x10\xd0\x01\x01R\vdescription\x12%\n" + - "\x0ebaton_resource\x18\x06 \x01(\bR\rbatonResource\x12<\n" + - "\vexternal_id\x18\a \x01(\v2\x1b.c1.connector.v2.ExternalIdR\n" + - "externalId\x12Q\n" + - "\x0fcreation_source\x18\b \x01(\x0e2(.c1.connector.v2.Resource.CreationSourceR\x0ecreationSource\"\x98\x01\n" + + "\x0ebaton_resource\x18\x06 \x01(\bR\rbatonResource\x12@\n" + + "\vexternal_id\x18\a \x01(\v2\x1b.c1.connector.v2.ExternalIdB\x02\x18\x01R\n" + + "externalId\x12U\n" + + "\x0fcreation_source\x18\b \x01(\x0e2(.c1.connector.v2.Resource.CreationSourceB\x02\x18\x01R\x0ecreationSource\"\x98\x01\n" + "\x0eCreationSource\x12\x1f\n" + "\x1bCREATION_SOURCE_UNSPECIFIED\x10\x00\x12,\n" + "(CREATION_SOURCE_CONNECTOR_LIST_RESOURCES\x10\x01\x127\n" + diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/resource_protoopaque.pb.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/resource_protoopaque.pb.go index c2093c67..12dcb8a5 100644 --- a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/resource_protoopaque.pb.go +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/resource_protoopaque.pb.go @@ -35,6 +35,7 @@ const ( ResourceType_TRAIT_APP ResourceType_Trait = 4 ResourceType_TRAIT_SECRET ResourceType_Trait = 5 ResourceType_TRAIT_SECURITY_INSIGHT ResourceType_Trait = 6 + ResourceType_TRAIT_SCOPE_BINDING ResourceType_Trait = 7 ) // Enum value maps for ResourceType_Trait. @@ -47,6 +48,7 @@ var ( 4: "TRAIT_APP", 5: "TRAIT_SECRET", 6: "TRAIT_SECURITY_INSIGHT", + 7: "TRAIT_SCOPE_BINDING", } ResourceType_Trait_value = map[string]int32{ "TRAIT_UNSPECIFIED": 0, @@ -56,6 +58,7 @@ var ( "TRAIT_APP": 4, "TRAIT_SECRET": 5, "TRAIT_SECURITY_INSIGHT": 6, + "TRAIT_SCOPE_BINDING": 7, } ) @@ -81,7 +84,6 @@ func (x ResourceType_Trait) Number() protoreflect.EnumNumber { return protoreflect.EnumNumber(x) } -// FIXME(mstanbCO): call this something else? Should it just be a bool? Possibly just use an annotation? type Resource_CreationSource int32 const ( @@ -2878,6 +2880,7 @@ func (x *Resource) GetBatonResource() bool { return false } +// Deprecated: Marked as deprecated in c1/connector/v2/resource.proto. func (x *Resource) GetExternalId() *ExternalId { if x != nil { return x.xxx_hidden_ExternalId @@ -2885,6 +2888,7 @@ func (x *Resource) GetExternalId() *ExternalId { return nil } +// Deprecated: Marked as deprecated in c1/connector/v2/resource.proto. func (x *Resource) GetCreationSource() Resource_CreationSource { if x != nil { return x.xxx_hidden_CreationSource @@ -2916,10 +2920,12 @@ func (x *Resource) SetBatonResource(v bool) { x.xxx_hidden_BatonResource = v } +// Deprecated: Marked as deprecated in c1/connector/v2/resource.proto. func (x *Resource) SetExternalId(v *ExternalId) { x.xxx_hidden_ExternalId = v } +// Deprecated: Marked as deprecated in c1/connector/v2/resource.proto. func (x *Resource) SetCreationSource(v Resource_CreationSource) { x.xxx_hidden_CreationSource = v } @@ -2938,6 +2944,7 @@ func (x *Resource) HasParentResourceId() bool { return x.xxx_hidden_ParentResourceId != nil } +// Deprecated: Marked as deprecated in c1/connector/v2/resource.proto. func (x *Resource) HasExternalId() bool { if x == nil { return false @@ -2953,6 +2960,7 @@ func (x *Resource) ClearParentResourceId() { x.xxx_hidden_ParentResourceId = nil } +// Deprecated: Marked as deprecated in c1/connector/v2/resource.proto. func (x *Resource) ClearExternalId() { x.xxx_hidden_ExternalId = nil } @@ -2966,8 +2974,14 @@ type Resource_builder struct { Annotations []*anypb.Any Description string BatonResource bool - ExternalId *ExternalId - CreationSource Resource_CreationSource + // Deprecated. This is no longer used. + // + // Deprecated: Marked as deprecated in c1/connector/v2/resource.proto. + ExternalId *ExternalId + // Deprecated. This is no longer used. + // + // Deprecated: Marked as deprecated in c1/connector/v2/resource.proto. + CreationSource Resource_CreationSource } func (b0 Resource_builder) Build() *Resource { @@ -4446,7 +4460,7 @@ var File_c1_connector_v2_resource_proto protoreflect.FileDescriptor const file_c1_connector_v2_resource_proto_rawDesc = "" + "\n" + - "\x1ec1/connector/v2/resource.proto\x12\x0fc1.connector.v2\x1a\x19google/protobuf/any.proto\x1a\x1cgoogle/protobuf/struct.proto\x1a\x17validate/validate.proto\"\xd1\x03\n" + + "\x1ec1/connector/v2/resource.proto\x12\x0fc1.connector.v2\x1a\x19google/protobuf/any.proto\x1a\x1cgoogle/protobuf/struct.proto\x1a\x17validate/validate.proto\"\xea\x03\n" + "\fResourceType\x12\x1a\n" + "\x02id\x18\x01 \x01(\tB\n" + "\xfaB\ar\x05 \x01(\x80\bR\x02id\x120\n" + @@ -4456,7 +4470,7 @@ const file_c1_connector_v2_resource_proto_rawDesc = "" + "\vannotations\x18\x04 \x03(\v2\x14.google.protobuf.AnyR\vannotations\x12/\n" + "\vdescription\x18\x05 \x01(\tB\r\xfaB\n" + "r\b \x01(\x80 \xd0\x01\x01R\vdescription\x12-\n" + - "\x12sourced_externally\x18\x06 \x01(\bR\x11sourcedExternally\"\x8c\x01\n" + + "\x12sourced_externally\x18\x06 \x01(\bR\x11sourcedExternally\"\xa5\x01\n" + "\x05Trait\x12\x15\n" + "\x11TRAIT_UNSPECIFIED\x10\x00\x12\x0e\n" + "\n" + @@ -4466,7 +4480,8 @@ const file_c1_connector_v2_resource_proto_rawDesc = "" + "TRAIT_ROLE\x10\x03\x12\r\n" + "\tTRAIT_APP\x10\x04\x12\x10\n" + "\fTRAIT_SECRET\x10\x05\x12\x1a\n" + - "\x16TRAIT_SECURITY_INSIGHT\x10\x06\"\xa6\x02\n" + + "\x16TRAIT_SECURITY_INSIGHT\x10\x06\x12\x17\n" + + "\x13TRAIT_SCOPE_BINDING\x10\a\"\xa6\x02\n" + ",ResourceTypesServiceListResourceTypesRequest\x121\n" + "\x06parent\x18\x01 \x01(\v2\x19.c1.connector.v2.ResourceR\x06parent\x12'\n" + "\tpage_size\x18\x02 \x01(\rB\n" + @@ -4608,7 +4623,7 @@ const file_c1_connector_v2_resource_proto_rawDesc = "" + "\xfaB\ar\x05 \x01(\x80\bR\fresourceType\x12&\n" + "\bresource\x18\x02 \x01(\tB\n" + "\xfaB\ar\x05 \x01(\x80\bR\bresource\x12%\n" + - "\x0ebaton_resource\x18\x03 \x01(\bR\rbatonResource\"\xf0\x04\n" + + "\x0ebaton_resource\x18\x03 \x01(\bR\rbatonResource\"\xf8\x04\n" + "\bResource\x12+\n" + "\x02id\x18\x01 \x01(\v2\x1b.c1.connector.v2.ResourceIdR\x02id\x12I\n" + "\x12parent_resource_id\x18\x02 \x01(\v2\x1b.c1.connector.v2.ResourceIdR\x10parentResourceId\x120\n" + @@ -4617,10 +4632,10 @@ const file_c1_connector_v2_resource_proto_rawDesc = "" + "\vannotations\x18\x04 \x03(\v2\x14.google.protobuf.AnyR\vannotations\x12/\n" + "\vdescription\x18\x05 \x01(\tB\r\xfaB\n" + "r\b \x01(\x80\x10\xd0\x01\x01R\vdescription\x12%\n" + - "\x0ebaton_resource\x18\x06 \x01(\bR\rbatonResource\x12<\n" + - "\vexternal_id\x18\a \x01(\v2\x1b.c1.connector.v2.ExternalIdR\n" + - "externalId\x12Q\n" + - "\x0fcreation_source\x18\b \x01(\x0e2(.c1.connector.v2.Resource.CreationSourceR\x0ecreationSource\"\x98\x01\n" + + "\x0ebaton_resource\x18\x06 \x01(\bR\rbatonResource\x12@\n" + + "\vexternal_id\x18\a \x01(\v2\x1b.c1.connector.v2.ExternalIdB\x02\x18\x01R\n" + + "externalId\x12U\n" + + "\x0fcreation_source\x18\b \x01(\x0e2(.c1.connector.v2.Resource.CreationSourceB\x02\x18\x01R\x0ecreationSource\"\x98\x01\n" + "\x0eCreationSource\x12\x1f\n" + "\x1bCREATION_SOURCE_UNSPECIFIED\x10\x00\x12,\n" + "(CREATION_SOURCE_CONNECTOR_LIST_RESOURCES\x10\x01\x127\n" + diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/actions/actions.go b/vendor/github.com/conductorone/baton-sdk/pkg/actions/actions.go index f63d01ba..5d997718 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/actions/actions.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/actions/actions.go @@ -522,7 +522,7 @@ func (a *ActionManager) invokeResourceAction( go func() { defer close(done) oa.SetStatus(ctx, v2.BatonActionStatus_BATON_ACTION_STATUS_RUNNING) - handlerCtx, cancel := context.WithTimeoutCause(context.Background(), 1*time.Hour, errors.New("action handler timed out")) + handlerCtx, cancel := context.WithTimeoutCause(ctxzap.ToContext(context.Background(), ctxzap.Extract(ctx)), 1*time.Hour, errors.New("action handler timed out")) defer cancel() var oaErr error oa.Rv, oa.Annos, oaErr = handler(handlerCtx, args) diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/cli/commands.go b/vendor/github.com/conductorone/baton-sdk/pkg/cli/commands.go index ae3f324e..efbaff6b 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/cli/commands.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/cli/commands.go @@ -9,6 +9,8 @@ import ( "os" "time" + "github.com/conductorone/baton-sdk/pkg/connectorbuilder" + "github.com/conductorone/baton-sdk/pkg/types" "github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/ctxzap" "github.com/maypok86/otter/v2" "github.com/spf13/cobra" @@ -605,6 +607,7 @@ func MakeCapabilitiesCommand[T field.Configurable]( v *viper.Viper, confschema field.Configuration, getconnector GetConnectorFunc2[T], + opts ...connectorrunner.Option, ) func(*cobra.Command, []string) error { return func(cmd *cobra.Command, args []string) error { // NOTE(shackra): bind all the flags (persistent and @@ -626,29 +629,59 @@ func MakeCapabilitiesCommand[T field.Configurable]( return err } - readFromPath := true - decodeOpts := field.WithAdditionalDecodeHooks(field.FileUploadDecodeHook(readFromPath)) - t, err := MakeGenericConfiguration[T](v, decodeOpts) + var c types.ConnectorServer + + c, err = defaultConnectorBuilder(ctx, opts...) if err != nil { - return fmt.Errorf("failed to make configuration: %w", err) + return fmt.Errorf("failed to build default connector: %w", err) } - // validate required fields and relationship constraints - if err := field.Validate(confschema, t, field.WithAuthMethod(v.GetString("auth-method"))); err != nil { - return err + + if c == nil { + readFromPath := true + decodeOpts := field.WithAdditionalDecodeHooks(field.FileUploadDecodeHook(readFromPath)) + t, err := MakeGenericConfiguration[T](v, decodeOpts) + if err != nil { + return fmt.Errorf("failed to make configuration: %w", err) + } + // validate required fields and relationship constraints + if err := field.Validate(confschema, t, field.WithAuthMethod(v.GetString("auth-method"))); err != nil { + return err + } + + c, err = getconnector(runCtx, t, RunTimeOpts{}) + if err != nil { + return err + } } - c, err := getconnector(runCtx, t, RunTimeOpts{}) - if err != nil { - return err + if c == nil { + return fmt.Errorf("could not create connector %w", err) } - md, err := c.GetMetadata(runCtx, &v2.ConnectorServiceGetMetadataRequest{}) - if err != nil { - return err + type getter interface { + GetCapabilities(ctx context.Context) (*v2.ConnectorCapabilities, error) } - if !md.GetMetadata().HasCapabilities() { - return fmt.Errorf("connector does not support capabilities") + var capabilities *v2.ConnectorCapabilities + + if getCap, ok := c.(getter); ok { + capabilities, err = getCap.GetCapabilities(runCtx) + if err != nil { + return err + } + } + + if capabilities == nil { + md, err := c.GetMetadata(runCtx, &v2.ConnectorServiceGetMetadataRequest{}) + if err != nil { + return err + } + + if !md.GetMetadata().HasCapabilities() { + return fmt.Errorf("connector does not support capabilities") + } + + capabilities = md.GetMetadata().GetCapabilities() } protoMarshaller := protojson.MarshalOptions{ @@ -657,7 +690,7 @@ func MakeCapabilitiesCommand[T field.Configurable]( } a := &anypb.Any{} - err = anypb.MarshalFrom(a, md.GetMetadata().GetCapabilities(), proto.MarshalOptions{Deterministic: true}) + err = anypb.MarshalFrom(a, capabilities, proto.MarshalOptions{Deterministic: true}) if err != nil { return err } @@ -696,3 +729,20 @@ func MakeConfigSchemaCommand[T field.Configurable]( return nil } } + +func defaultConnectorBuilder(ctx context.Context, opts ...connectorrunner.Option) (types.ConnectorServer, error) { + defaultConnector, err := connectorrunner.ExtractDefaultConnector(ctx, opts...) + if err != nil { + return nil, err + } + + if defaultConnector == nil { + return nil, nil + } + + c, err := connectorbuilder.NewConnector(ctx, defaultConnector) + if err != nil { + return nil, err + } + return c, nil +} diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/config/config.go b/vendor/github.com/conductorone/baton-sdk/pkg/config/config.go index d9304524..02cc5d18 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/config/config.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/config/config.go @@ -207,15 +207,28 @@ func DefineConfigurationV2[T field.Configurable]( return nil, nil, err } - _, err = cli.AddCommand(mainCMD, v, &schema, &cobra.Command{ - Use: "capabilities", - Short: "Get connector capabilities", - RunE: cli.MakeCapabilitiesCommand(ctx, connectorName, v, confschema, connector), - }) - + defaultConnector, err := connectorrunner.ExtractDefaultConnector(ctx, options...) if err != nil { return nil, nil, err } + if defaultConnector == nil { + _, err = cli.AddCommand(mainCMD, v, &schema, &cobra.Command{ + Use: "capabilities", + Short: "Get connector capabilities", + RunE: cli.MakeCapabilitiesCommand(ctx, connectorName, v, confschema, connector), + }) + if err != nil { + return nil, nil, err + } + } else { + // We don't want to use cli.AddCommand here because we don't want to validate config flags + // So we can call capabilities even with incomplete config + mainCMD.AddCommand(&cobra.Command{ + Use: "capabilities", + Short: "Get connector capabilities", + RunE: cli.MakeCapabilitiesCommand(ctx, connectorName, v, confschema, connector, options...), + }) + } _, err = cli.AddCommand(mainCMD, v, nil, &cobra.Command{ Use: "config", diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/connectorbuilder/accounts.go b/vendor/github.com/conductorone/baton-sdk/pkg/connectorbuilder/accounts.go index ed7f1403..adccd2ef 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/connectorbuilder/accounts.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/connectorbuilder/accounts.go @@ -62,8 +62,9 @@ func (b *builder) CreateAccount(ctx context.Context, request *v2.CreateAccountRe if len(b.accountManagers) == 0 { l.Error("error: connector does not have account manager configured") - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) - return nil, status.Error(codes.Unimplemented, "connector does not have account manager configured") + err := status.Error(codes.Unimplemented, "connector does not have account manager configured") + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start), err) + return nil, err } var accountManager AccountManagerLimited @@ -79,8 +80,9 @@ func (b *builder) CreateAccount(ctx context.Context, request *v2.CreateAccountRe var ok bool accountManager, ok = b.accountManagers["user"] if !ok { - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) - return nil, status.Error(codes.Unimplemented, "connector has multiple account managers configured, but no resource type specified, and no default account manager configured") + err := status.Error(codes.Unimplemented, "connector has multiple account managers configured, but no resource type specified, and no default account manager configured") + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start), err) + return nil, err } } } @@ -91,29 +93,30 @@ func (b *builder) CreateAccount(ctx context.Context, request *v2.CreateAccountRe accountManager, ok = b.accountManagers[request.GetResourceTypeId()] if !ok { l.Error("error: connector does not have account manager configured") - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) - return nil, status.Errorf(codes.Unimplemented, "connector does not have account manager configured for resource type: %s", request.GetResourceTypeId()) + err := status.Errorf(codes.Unimplemented, "connector does not have account manager configured for resource type: %s", request.GetResourceTypeId()) + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start), err) + return nil, err } } opts, err := crypto.ConvertCredentialOptions(ctx, b.clientSecret, request.GetCredentialOptions(), request.GetEncryptionConfigs()) if err != nil { l.Error("error: converting credential options failed", zap.Error(err)) - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start), err) return nil, fmt.Errorf("error: converting credential options failed: %w", err) } result, plaintexts, annos, err := accountManager.CreateAccount(ctx, request.GetAccountInfo(), opts) if err != nil { l.Error("error: create account failed", zap.Error(err)) - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start), err) return nil, fmt.Errorf("error: create account failed: %w", err) } pkem, err := crypto.NewEncryptionManager(request.GetCredentialOptions(), request.GetEncryptionConfigs()) if err != nil { l.Error("error: creating encryption manager failed", zap.Error(err)) - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start), err) return nil, fmt.Errorf("error: creating encryption manager failed: %w", err) } @@ -121,7 +124,7 @@ func (b *builder) CreateAccount(ctx context.Context, request *v2.CreateAccountRe for _, plaintextCredential := range plaintexts { encryptedData, err := pkem.Encrypt(ctx, plaintextCredential) if err != nil { - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start), err) return nil, err } encryptedDatas = append(encryptedDatas, encryptedData...) @@ -142,8 +145,9 @@ func (b *builder) CreateAccount(ctx context.Context, request *v2.CreateAccountRe case *v2.CreateAccountResponse_InProgressResult: rv.SetInProgress(proto.ValueOrDefault(r)) default: - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) - return nil, status.Error(codes.Unimplemented, fmt.Sprintf("unknown result type: %T", result)) + err := status.Error(codes.Unimplemented, fmt.Sprintf("unknown result type: %T", result)) + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start), err) + return nil, err } b.m.RecordTaskSuccess(ctx, tt, b.nowFunc().Sub(start)) diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/connectorbuilder/actions.go b/vendor/github.com/conductorone/baton-sdk/pkg/connectorbuilder/actions.go index 0edab281..b24adff3 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/connectorbuilder/actions.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/connectorbuilder/actions.go @@ -108,7 +108,7 @@ func (b *builder) ListActionSchemas(ctx context.Context, request *v2.ListActionS actionSchemas, _, err := b.actionManager.ListActionSchemas(ctx, resourceTypeID) if err != nil { - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start), err) return nil, fmt.Errorf("error: listing action schemas failed: %w", err) } @@ -129,7 +129,7 @@ func (b *builder) GetActionSchema(ctx context.Context, request *v2.GetActionSche actionSchema, annos, err := b.actionManager.GetActionSchema(ctx, request.GetName()) if err != nil { - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start), err) return nil, fmt.Errorf("error: action schema %s not found: %w", request.GetName(), err) } @@ -152,7 +152,7 @@ func (b *builder) InvokeAction(ctx context.Context, request *v2.InvokeActionRequ id, actionStatus, resp, annos, err := b.actionManager.InvokeAction(ctx, request.GetName(), resourceTypeID, request.GetArgs()) if err != nil { - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start), err) return nil, fmt.Errorf("error: invoking action failed: %w", err) } @@ -177,7 +177,7 @@ func (b *builder) GetActionStatus(ctx context.Context, request *v2.GetActionStat actionStatus, name, rv, annos, err := b.actionManager.GetActionStatus(ctx, request.GetId()) if err != nil { - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start), err) return nil, fmt.Errorf("error: action status for id %s not found: %w", request.GetId(), err) } diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/connectorbuilder/connectorbuilder.go b/vendor/github.com/conductorone/baton-sdk/pkg/connectorbuilder/connectorbuilder.go index ab092893..e5fc4f9b 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/connectorbuilder/connectorbuilder.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/connectorbuilder/connectorbuilder.go @@ -263,13 +263,13 @@ func (b *builder) GetMetadata(ctx context.Context, request *v2.ConnectorServiceG tt := tasks.GetMetadataType md, err := b.metadataProvider.Metadata(ctx) if err != nil { - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start), err) return nil, err } - md.Capabilities, err = b.getCapabilities(ctx) + md.Capabilities, err = b.GetCapabilities(ctx) if err != nil { - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start), err) return nil, err } @@ -332,8 +332,8 @@ func (b *builder) Cleanup(ctx context.Context, request *v2.ConnectorServiceClean return resp, err } -// getCapabilities gets all capabilities for a connector. -func (b *builder) getCapabilities(ctx context.Context) (*v2.ConnectorCapabilities, error) { +// GetCapabilities gets all capabilities for a connector. +func (b *builder) GetCapabilities(ctx context.Context) (*v2.ConnectorCapabilities, error) { connectorCaps := make(map[v2.Capability]struct{}) resourceTypeCapabilities := []*v2.ResourceTypeCapability{} diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/connectorbuilder/credentials.go b/vendor/github.com/conductorone/baton-sdk/pkg/connectorbuilder/credentials.go index 4f303e94..ad8ea9af 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/connectorbuilder/credentials.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/connectorbuilder/credentials.go @@ -47,28 +47,29 @@ func (b *builder) RotateCredential(ctx context.Context, request *v2.RotateCreden manager, ok := b.credentialManagers[rt] if !ok { l.Error("error: resource type does not have credential manager configured", zap.String("resource_type", rt)) - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) - return nil, status.Error(codes.Unimplemented, "resource type does not have credential manager configured") + err := status.Error(codes.Unimplemented, "resource type does not have credential manager configured") + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start), err) + return nil, err } opts, err := crypto.ConvertCredentialOptions(ctx, b.clientSecret, request.GetCredentialOptions(), request.GetEncryptionConfigs()) if err != nil { l.Error("error: converting credential options failed", zap.Error(err)) - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start), err) return nil, fmt.Errorf("error: converting credential options failed: %w", err) } plaintexts, annos, err := manager.Rotate(ctx, request.GetResourceId(), opts) if err != nil { l.Error("error: rotate credentials on resource failed", zap.Error(err)) - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start), err) return nil, fmt.Errorf("error: rotate credentials on resource failed: %w", err) } pkem, err := crypto.NewEncryptionManager(request.GetCredentialOptions(), request.GetEncryptionConfigs()) if err != nil { l.Error("error: creating encryption manager failed", zap.Error(err)) - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start), err) return nil, fmt.Errorf("error: creating encryption manager failed: %w", err) } @@ -76,7 +77,7 @@ func (b *builder) RotateCredential(ctx context.Context, request *v2.RotateCreden for _, plaintextCredential := range plaintexts { encryptedData, err := pkem.Encrypt(ctx, plaintextCredential) if err != nil { - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start), err) return nil, err } encryptedDatas = append(encryptedDatas, encryptedData...) diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/connectorbuilder/events.go b/vendor/github.com/conductorone/baton-sdk/pkg/connectorbuilder/events.go index c6bf352f..7b5bc745 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/connectorbuilder/events.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/connectorbuilder/events.go @@ -122,7 +122,7 @@ func (b *builder) ListEvents(ctx context.Context, request *v2.ListEventsRequest) Cursor: request.GetCursor(), }) if err != nil { - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start), err) return nil, fmt.Errorf("error: listing events failed: %w", err) } b.m.RecordTaskSuccess(ctx, tt, b.nowFunc().Sub(start)) diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/connectorbuilder/resource_manager.go b/vendor/github.com/conductorone/baton-sdk/pkg/connectorbuilder/resource_manager.go index 4c47e653..e9ebbb7f 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/connectorbuilder/resource_manager.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/connectorbuilder/resource_manager.go @@ -82,13 +82,14 @@ func (b *builder) CreateResource(ctx context.Context, request *v2.CreateResource manager, ok := b.resourceManagers[rt] if !ok { l.Error("error: resource type does not have resource Create() configured", zap.String("resource_type", rt)) - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) - return nil, status.Error(codes.Unimplemented, fmt.Sprintf("resource type %s does not have resource Create() configured", rt)) + err := status.Error(codes.Unimplemented, fmt.Sprintf("resource type %s does not have resource Create() configured", rt)) + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start), err) + return nil, err } resource, annos, err := manager.Create(ctx, request.GetResource()) if err != nil { l.Error("error: create resource failed", zap.Error(err)) - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start), err) return nil, fmt.Errorf("error: create resource failed: %w", err) } b.m.RecordTaskSuccess(ctx, tt, b.nowFunc().Sub(start)) @@ -114,14 +115,15 @@ func (b *builder) DeleteResource(ctx context.Context, request *v2.DeleteResource if !ok { l.Error("error: resource type does not have resource Delete() configured", zap.String("resource_type", rt)) - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) - return nil, status.Error(codes.Unimplemented, fmt.Sprintf("resource type %s does not have resource Delete() configured", rt)) + err := status.Error(codes.Unimplemented, fmt.Sprintf("resource type %s does not have resource Delete() configured", rt)) + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start), err) + return nil, err } annos, err := rsDeleter.Delete(ctx, request.GetResourceId(), request.GetParentResourceId()) if err != nil { l.Error("error: deleteV2 resource failed", zap.Error(err)) - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start), err) return nil, fmt.Errorf("error: delete resource failed: %w", err) } b.m.RecordTaskSuccess(ctx, tt, b.nowFunc().Sub(start)) @@ -147,14 +149,15 @@ func (b *builder) DeleteResourceV2(ctx context.Context, request *v2.DeleteResour if !ok { l.Error("error: resource type does not have resource Delete() configured", zap.String("resource_type", rt)) - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) - return nil, status.Error(codes.Unimplemented, fmt.Sprintf("resource type %s does not have resource Delete() configured", rt)) + err := status.Error(codes.Unimplemented, fmt.Sprintf("resource type %s does not have resource Delete() configured", rt)) + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start), err) + return nil, err } annos, err := rsDeleter.Delete(ctx, request.GetResourceId(), request.GetParentResourceId()) if err != nil { l.Error("error: deleteV2 resource failed", zap.Error(err)) - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start), err) return nil, fmt.Errorf("error: delete resource failed: %w", err) } b.m.RecordTaskSuccess(ctx, tt, b.nowFunc().Sub(start)) diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/connectorbuilder/resource_provisioner.go b/vendor/github.com/conductorone/baton-sdk/pkg/connectorbuilder/resource_provisioner.go index d0f8cf2e..01c7f238 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/connectorbuilder/resource_provisioner.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/connectorbuilder/resource_provisioner.go @@ -11,6 +11,8 @@ import ( "github.com/conductorone/baton-sdk/pkg/types/tasks" "github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/ctxzap" "go.uber.org/zap" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" ) // ResourceProvisioner extends ResourceSyncer to add capabilities for granting and revoking access. @@ -71,8 +73,9 @@ func (b *builder) Grant(ctx context.Context, request *v2.GrantManagerServiceGran if !ok { l.Error("error: resource type does not have provisioner configured", zap.String("resource_type", rt)) - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) - return nil, fmt.Errorf("error: resource type does not have provisioner configured") + err := status.Errorf(codes.Unimplemented, "resource type %s does not have provisioner configured", rt) + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start), err) + return nil, err } retryer := retry.NewRetryer(ctx, retry.RetryConfig{ @@ -90,7 +93,7 @@ func (b *builder) Grant(ctx context.Context, request *v2.GrantManagerServiceGran if retryer.ShouldWaitAndRetry(ctx, err) { continue } - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start), err) return nil, fmt.Errorf("grant failed: %w", err) } } @@ -114,8 +117,9 @@ func (b *builder) Revoke(ctx context.Context, request *v2.GrantManagerServiceRev if revokeProvisioner == nil { l.Error("error: resource type does not have provisioner configured", zap.String("resource_type", rt)) - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) - return nil, fmt.Errorf("error: resource type does not have provisioner configured") + err := status.Errorf(codes.Unimplemented, "resource type %s does not have provisioner configured", rt) + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start), err) + return nil, err } retryer := retry.NewRetryer(ctx, retry.RetryConfig{ @@ -133,7 +137,7 @@ func (b *builder) Revoke(ctx context.Context, request *v2.GrantManagerServiceRev if retryer.ShouldWaitAndRetry(ctx, err) { continue } - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start), err) return nil, fmt.Errorf("revoke failed: %w", err) } } diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/connectorbuilder/resource_syncer.go b/vendor/github.com/conductorone/baton-sdk/pkg/connectorbuilder/resource_syncer.go index 4d52b830..4378730f 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/connectorbuilder/resource_syncer.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/connectorbuilder/resource_syncer.go @@ -89,8 +89,9 @@ func (b *builder) ListResourceTypes( var out []*v2.ResourceType if len(b.resourceSyncers) == 0 { - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) - return nil, fmt.Errorf("error: no resource builders found") + err := status.Error(codes.FailedPrecondition, "no resource builders found") + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start), err) + return nil, err } for _, rb := range b.resourceSyncers { @@ -98,8 +99,9 @@ func (b *builder) ListResourceTypes( } if len(out) == 0 { - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) - return nil, fmt.Errorf("error: no resource types found") + err := status.Error(codes.FailedPrecondition, "no resource types found") + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start), err) + return nil, err } b.m.RecordTaskSuccess(ctx, tt, b.nowFunc().Sub(start)) @@ -115,8 +117,9 @@ func (b *builder) ListResources(ctx context.Context, request *v2.ResourcesServic tt := tasks.ListResourcesType rb, ok := b.resourceSyncers[request.GetResourceTypeId()] if !ok { - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) - return nil, fmt.Errorf("error: list resources with unknown resource type %s", request.GetResourceTypeId()) + err := fmt.Errorf("error: list resources with unknown resource type %s", request.GetResourceTypeId()) + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start), err) + return nil, err } token := pagination.Token{ @@ -135,14 +138,15 @@ func (b *builder) ListResources(ctx context.Context, request *v2.ResourcesServic Annotations: retOptions.Annotations, }.Build() if err != nil { - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start), err) return resp, fmt.Errorf("error: listing resources failed: %w", err) } if request.GetPageToken() != "" && request.GetPageToken() == retOptions.NextPageToken { - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) - errMsg := fmt.Sprintf(" with page token %s resource type id %s and resource parent id: %s this is most likely a connector bug", + err := status.Errorf(codes.Internal, + "listing resources failed: next page token unchanged (token=%s, type=%s, parent=%s) - likely a connector bug", request.GetPageToken(), request.GetResourceTypeId(), request.GetParentResourceId()) - return resp, fmt.Errorf("error: listing resources failed: next page token is the same as the current page token %s", errMsg) + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start), err) + return resp, err } b.m.RecordTaskSuccess(ctx, tt, b.nowFunc().Sub(start)) @@ -158,17 +162,19 @@ func (b *builder) GetResource(ctx context.Context, request *v2.ResourceGetterSer resourceType := request.GetResourceId().GetResourceType() rb, ok := b.resourceTargetedSyncers[resourceType] if !ok { - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) - return nil, status.Errorf(codes.Unimplemented, "error: get resource with unknown resource type %s", resourceType) + err := status.Errorf(codes.Unimplemented, "error: get resource with unknown resource type %s", resourceType) + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start), err) + return nil, err } resource, annos, err := rb.Get(ctx, request.GetResourceId(), request.GetParentResourceId()) if err != nil { - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start), err) return nil, fmt.Errorf("error: get resource failed: %w", err) } if resource == nil { - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) - return nil, status.Error(codes.NotFound, "error: get resource returned nil") + err := status.Error(codes.NotFound, "error: get resource returned nil") + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start), err) + return nil, err } b.m.RecordTaskSuccess(ctx, tt, b.nowFunc().Sub(start)) @@ -188,8 +194,9 @@ func (b *builder) ListStaticEntitlements(ctx context.Context, request *v2.Entitl tt := tasks.ListStaticEntitlementsType rb, ok := b.resourceSyncers[request.GetResourceTypeId()] if !ok { - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) - return nil, fmt.Errorf("error: list static entitlements with unknown resource type %s", request.GetResourceTypeId()) + err := fmt.Errorf("error: list static entitlements with unknown resource type %s", request.GetResourceTypeId()) + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start), err) + return nil, err } rbse, ok := rb.(StaticEntitlementSyncerV2) if !ok { @@ -217,12 +224,13 @@ func (b *builder) ListStaticEntitlements(ctx context.Context, request *v2.Entitl Annotations: retOptions.Annotations, }.Build() if err != nil { - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start), err) return nil, fmt.Errorf("error: listing static entitlements failed: %w", err) } if request.GetPageToken() != "" && request.GetPageToken() == retOptions.NextPageToken { - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) - return resp, fmt.Errorf("error: listing static entitlements failed: next page token is the same as the current page token. this is most likely a connector bug") + err := status.Error(codes.Internal, "listing static entitlements failed: next page token unchanged - likely a connector bug") + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start), err) + return resp, err } b.m.RecordTaskSuccess(ctx, tt, b.nowFunc().Sub(start)) @@ -238,8 +246,9 @@ func (b *builder) ListEntitlements(ctx context.Context, request *v2.Entitlements tt := tasks.ListEntitlementsType rb, ok := b.resourceSyncers[request.GetResource().GetId().GetResourceType()] if !ok { - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) - return nil, fmt.Errorf("error: list entitlements with unknown resource type %s", request.GetResource().GetId().GetResourceType()) + err := fmt.Errorf("error: list entitlements with unknown resource type %s", request.GetResource().GetId().GetResourceType()) + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start), err) + return nil, err } token := pagination.Token{ Size: int(request.GetPageSize()), @@ -257,12 +266,13 @@ func (b *builder) ListEntitlements(ctx context.Context, request *v2.Entitlements Annotations: retOptions.Annotations, }.Build() if err != nil { - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start), err) return resp, fmt.Errorf("error: listing entitlements failed: %w", err) } if request.GetPageToken() != "" && request.GetPageToken() == retOptions.NextPageToken { - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) - return resp, fmt.Errorf("error: listing entitlements failed: next page token is the same as the current page token. this is most likely a connector bug") + err := status.Error(codes.Internal, "listing entitlements failed: next page token unchanged - likely a connector bug") + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start), err) + return resp, err } b.m.RecordTaskSuccess(ctx, tt, b.nowFunc().Sub(start)) @@ -279,8 +289,9 @@ func (b *builder) ListGrants(ctx context.Context, request *v2.GrantsServiceListG rid := request.GetResource().GetId() rb, ok := b.resourceSyncers[rid.GetResourceType()] if !ok { - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) - return nil, fmt.Errorf("error: list grants with unknown resource type %s", rid.GetResourceType()) + err := fmt.Errorf("error: list grants with unknown resource type %s", rid.GetResourceType()) + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start), err) + return nil, err } token := pagination.Token{ @@ -300,14 +311,15 @@ func (b *builder) ListGrants(ctx context.Context, request *v2.GrantsServiceListG }.Build() if err != nil { - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start), err) return resp, fmt.Errorf("error: listing grants for resource %s/%s failed: %w", rid.GetResourceType(), rid.GetResource(), err) } if request.GetPageToken() != "" && request.GetPageToken() == retOptions.NextPageToken { - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) - return resp, fmt.Errorf("error: listing grants for resource %s/%s failed: next page token is the same as the current page token. this is most likely a connector bug", - rid.GetResourceType(), - rid.GetResource()) + err := status.Errorf(codes.Internal, + "listing grants for resource %s/%s failed: next page token unchanged - likely a connector bug", + rid.GetResourceType(), rid.GetResource()) + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start), err) + return resp, err } b.m.RecordTaskSuccess(ctx, tt, b.nowFunc().Sub(start)) diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/connectorbuilder/tickets.go b/vendor/github.com/conductorone/baton-sdk/pkg/connectorbuilder/tickets.go index 740cc3c5..e1ea8bf5 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/connectorbuilder/tickets.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/connectorbuilder/tickets.go @@ -10,6 +10,8 @@ import ( "github.com/conductorone/baton-sdk/pkg/pagination" "github.com/conductorone/baton-sdk/pkg/retry" "github.com/conductorone/baton-sdk/pkg/types/tasks" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" ) // TicketManager extends ConnectorBuilder to add capabilities for ticket management. @@ -37,19 +39,21 @@ func (b *builder) BulkCreateTickets(ctx context.Context, request *v2.TicketsServ start := b.nowFunc() tt := tasks.BulkCreateTicketsType if b.ticketManager == nil { - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) - return nil, fmt.Errorf("error: ticket manager not implemented") + err := status.Error(codes.Unimplemented, "ticket manager not implemented") + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start), err) + return nil, err } reqBody := request.GetTicketRequests() if len(reqBody) == 0 { - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) - return nil, fmt.Errorf("error: request body had no items") + err := status.Error(codes.InvalidArgument, "request body had no items") + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start), err) + return nil, err } ticketsResponse, err := b.ticketManager.BulkCreateTickets(ctx, request) if err != nil { - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start), err) return nil, fmt.Errorf("error: creating tickets failed: %w", err) } @@ -66,19 +70,21 @@ func (b *builder) BulkGetTickets(ctx context.Context, request *v2.TicketsService start := b.nowFunc() tt := tasks.BulkGetTicketsType if b.ticketManager == nil { - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) - return nil, fmt.Errorf("error: ticket manager not implemented") + err := status.Error(codes.Unimplemented, "ticket manager not implemented") + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start), err) + return nil, err } reqBody := request.GetTicketRequests() if len(reqBody) == 0 { - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) - return nil, fmt.Errorf("error: request body had no items") + err := status.Error(codes.InvalidArgument, "request body had no items") + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start), err) + return nil, err } ticketsResponse, err := b.ticketManager.BulkGetTickets(ctx, request) if err != nil { - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start), err) return nil, fmt.Errorf("error: fetching tickets failed: %w", err) } @@ -95,8 +101,9 @@ func (b *builder) ListTicketSchemas(ctx context.Context, request *v2.TicketsServ start := b.nowFunc() tt := tasks.ListTicketSchemasType if b.ticketManager == nil { - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) - return nil, fmt.Errorf("error: ticket manager not implemented") + err := status.Error(codes.Unimplemented, "ticket manager not implemented") + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start), err) + return nil, err } retryer := retry.NewRetryer(ctx, retry.RetryConfig{ @@ -112,8 +119,9 @@ func (b *builder) ListTicketSchemas(ctx context.Context, request *v2.TicketsServ }) if err == nil { if request.GetPageToken() != "" && request.GetPageToken() == nextPageToken { - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) - return nil, fmt.Errorf("error: listing ticket schemas failed: next page token is the same as the current page token. this is most likely a connector bug") + err := status.Error(codes.Internal, "listing ticket schemas failed: next page token unchanged - likely a connector bug") + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start), err) + return nil, err } b.m.RecordTaskSuccess(ctx, tt, b.nowFunc().Sub(start)) @@ -126,7 +134,7 @@ func (b *builder) ListTicketSchemas(ctx context.Context, request *v2.TicketsServ if retryer.ShouldWaitAndRetry(ctx, err) { continue } - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start), err) return nil, fmt.Errorf("error: listing ticket schemas failed: %w", err) } } @@ -138,14 +146,16 @@ func (b *builder) CreateTicket(ctx context.Context, request *v2.TicketsServiceCr start := b.nowFunc() tt := tasks.CreateTicketType if b.ticketManager == nil { - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) - return nil, fmt.Errorf("error: ticket manager not implemented") + err := status.Error(codes.Unimplemented, "ticket manager not implemented") + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start), err) + return nil, err } reqBody := request.GetRequest() if reqBody == nil { - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) - return nil, fmt.Errorf("error: request body is nil") + err := status.Error(codes.InvalidArgument, "request body is nil") + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start), err) + return nil, err } cTicket := v2.Ticket_builder{ DisplayName: reqBody.GetDisplayName(), @@ -159,7 +169,7 @@ func (b *builder) CreateTicket(ctx context.Context, request *v2.TicketsServiceCr ticket, annos, err := b.ticketManager.CreateTicket(ctx, cTicket, request.GetSchema()) var resp *v2.TicketsServiceCreateTicketResponse if err != nil { - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start), err) if ticket != nil { resp = v2.TicketsServiceCreateTicketResponse_builder{ Ticket: ticket, @@ -183,14 +193,15 @@ func (b *builder) GetTicket(ctx context.Context, request *v2.TicketsServiceGetTi start := b.nowFunc() tt := tasks.GetTicketType if b.ticketManager == nil { - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) - return nil, fmt.Errorf("error: ticket manager not implemented") + err := status.Error(codes.Unimplemented, "ticket manager not implemented") + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start), err) + return nil, err } var resp *v2.TicketsServiceGetTicketResponse ticket, annos, err := b.ticketManager.GetTicket(ctx, request.GetId()) if err != nil { - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start), err) if ticket != nil { resp = v2.TicketsServiceGetTicketResponse_builder{ Ticket: ticket, @@ -214,13 +225,14 @@ func (b *builder) GetTicketSchema(ctx context.Context, request *v2.TicketsServic start := b.nowFunc() tt := tasks.GetTicketSchemaType if b.ticketManager == nil { - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) - return nil, fmt.Errorf("error: ticket manager not implemented") + err := status.Error(codes.Unimplemented, "ticket manager not implemented") + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start), err) + return nil, err } ticketSchema, annos, err := b.ticketManager.GetTicketSchema(ctx, request.GetId()) if err != nil { - b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start)) + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start), err) return nil, fmt.Errorf("error: getting ticket metadata failed: %w", err) } diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/connectorrunner/runner.go b/vendor/github.com/conductorone/baton-sdk/pkg/connectorrunner/runner.go index 5b7d2148..fb5201ae 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/connectorrunner/runner.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/connectorrunner/runner.go @@ -11,6 +11,7 @@ import ( "time" "github.com/conductorone/baton-sdk/pkg/bid" + "github.com/conductorone/baton-sdk/pkg/connectorbuilder" "github.com/conductorone/baton-sdk/pkg/synccompactor" "golang.org/x/sync/semaphore" "google.golang.org/protobuf/types/known/structpb" @@ -324,39 +325,41 @@ type syncCompactorConfig struct { } type runnerConfig struct { - rlCfg *ratelimitV1.RateLimiterConfig - rlDescriptors []*ratelimitV1.RateLimitDescriptors_Entry - onDemand bool - c1zPath string - clientAuth bool - clientID string - clientSecret string - provisioningEnabled bool - ticketingEnabled bool - actionsEnabled bool - grantConfig *grantConfig - revokeConfig *revokeConfig - eventFeedConfig *eventStreamConfig - tempDir string - createAccountConfig *createAccountConfig - invokeActionConfig *invokeActionConfig - listActionSchemasConfig *listActionSchemasConfig - deleteResourceConfig *deleteResourceConfig - rotateCredentialsConfig *rotateCredentialsConfig - createTicketConfig *createTicketConfig - bulkCreateTicketConfig *bulkCreateTicketConfig - listTicketSchemasConfig *listTicketSchemasConfig - getTicketConfig *getTicketConfig - syncDifferConfig *syncDifferConfig - syncCompactorConfig *syncCompactorConfig - skipFullSync bool - targetedSyncResourceIDs []string - externalResourceC1Z string - externalResourceEntitlementIdFilter string - skipEntitlementsAndGrants bool - skipGrants bool - sessionStoreEnabled bool - syncResourceTypeIDs []string + rlCfg *ratelimitV1.RateLimiterConfig + rlDescriptors []*ratelimitV1.RateLimitDescriptors_Entry + onDemand bool + c1zPath string + clientAuth bool + clientID string + clientSecret string + provisioningEnabled bool + ticketingEnabled bool + actionsEnabled bool + grantConfig *grantConfig + revokeConfig *revokeConfig + eventFeedConfig *eventStreamConfig + tempDir string + createAccountConfig *createAccountConfig + invokeActionConfig *invokeActionConfig + listActionSchemasConfig *listActionSchemasConfig + deleteResourceConfig *deleteResourceConfig + rotateCredentialsConfig *rotateCredentialsConfig + createTicketConfig *createTicketConfig + bulkCreateTicketConfig *bulkCreateTicketConfig + listTicketSchemasConfig *listTicketSchemasConfig + getTicketConfig *getTicketConfig + syncDifferConfig *syncDifferConfig + syncCompactorConfig *syncCompactorConfig + skipFullSync bool + targetedSyncResourceIDs []string + externalResourceC1Z string + externalResourceEntitlementIdFilter string + skipEntitlementsAndGrants bool + skipGrants bool + sessionStoreEnabled bool + syncResourceTypeIDs []string + defaultCapabilitiesConnectorBuilder connectorbuilder.ConnectorBuilder + defaultCapabilitiesConnectorBuilderV2 connectorbuilder.ConnectorBuilderV2 } func WithSessionStoreEnabled() Option { @@ -701,6 +704,45 @@ func WithSkipGrants(skip bool) Option { } } +// WithDefaultCapabilitiesConnectorBuilder sets the default connector builder for the runner +// This is used by the "capabilities" sub-command to instantiate the connector. +func WithDefaultCapabilitiesConnectorBuilder(t connectorbuilder.ConnectorBuilder) Option { + return func(ctx context.Context, cfg *runnerConfig) error { + cfg.defaultCapabilitiesConnectorBuilder = t + return nil + } +} + +// WithDefaultCapabilitiesConnectorBuilderV2 sets the default connector builder for the runner +// This is used by the "capabilities" sub-command to instantiate the connector. +func WithDefaultCapabilitiesConnectorBuilderV2(t connectorbuilder.ConnectorBuilderV2) Option { + return func(ctx context.Context, cfg *runnerConfig) error { + cfg.defaultCapabilitiesConnectorBuilderV2 = t + return nil + } +} + +func ExtractDefaultConnector(ctx context.Context, options ...Option) (any, error) { + cfg := &runnerConfig{} + + for _, o := range options { + err := o(ctx, cfg) + if err != nil { + return nil, err + } + } + + if cfg.defaultCapabilitiesConnectorBuilder != nil { + return cfg.defaultCapabilitiesConnectorBuilder, nil + } + + if cfg.defaultCapabilitiesConnectorBuilderV2 != nil { + return cfg.defaultCapabilitiesConnectorBuilderV2, nil + } + + return nil, nil +} + func IsSessionStoreEnabled(ctx context.Context, options ...Option) (bool, error) { cfg := &runnerConfig{} diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/connectorstore/connectorstore.go b/vendor/github.com/conductorone/baton-sdk/pkg/connectorstore/connectorstore.go index c557da26..65793949 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/connectorstore/connectorstore.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/connectorstore/connectorstore.go @@ -11,10 +11,12 @@ import ( type SyncType string const ( - SyncTypeFull SyncType = "full" - SyncTypePartial SyncType = "partial" - SyncTypeResourcesOnly SyncType = "resources_only" - SyncTypeAny SyncType = "" + SyncTypeFull SyncType = "full" + SyncTypePartial SyncType = "partial" + SyncTypeResourcesOnly SyncType = "resources_only" + SyncTypePartialUpserts SyncType = "partial_upserts" // Diff sync: additions and modifications + SyncTypePartialDeletions SyncType = "partial_deletions" // Diff sync: deletions + SyncTypeAny SyncType = "" ) var AllSyncTypes = []SyncType{ @@ -22,6 +24,8 @@ var AllSyncTypes = []SyncType{ SyncTypeFull, SyncTypePartial, SyncTypeResourcesOnly, + SyncTypePartialUpserts, + SyncTypePartialDeletions, } // ConnectorStoreReader implements the ConnectorV2 API, along with getters for individual objects. diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/c1file.go b/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/c1file.go index 911bf095..19876916 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/c1file.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/c1file.go @@ -60,6 +60,9 @@ type C1File struct { slowQueryLogTimesMu sync.Mutex slowQueryThreshold time.Duration slowQueryLogFrequency time.Duration + + // Sync cleanup settings + syncLimit int } var _ connectorstore.Writer = (*C1File)(nil) @@ -93,6 +96,14 @@ func WithC1FEncoderConcurrency(concurrency int) C1FOption { } } +// WithC1FSyncCountLimit sets the number of syncs to keep during cleanup. +// If not set, defaults to 2 (or BATON_KEEP_SYNC_COUNT env var if set). +func WithC1FSyncCountLimit(limit int) C1FOption { + return func(o *C1File) { + o.syncLimit = limit + } +} + // Returns a C1File instance for the given db filepath. func NewC1File(ctx context.Context, dbFilePath string, opts ...C1FOption) (*C1File, error) { ctx, span := tracer.Start(ctx, "NewC1File") @@ -139,7 +150,9 @@ type c1zOptions struct { decoderOptions []DecoderOption readOnly bool encoderConcurrency int + syncLimit int } + type C1ZOption func(*c1zOptions) // WithTmpDir sets the temporary directory to extract the c1z file to. @@ -179,6 +192,14 @@ func WithEncoderConcurrency(concurrency int) C1ZOption { } } +// WithSyncLimit sets the number of syncs to keep during cleanup. +// If not set, defaults to 2 (or BATON_KEEP_SYNC_COUNT env var if set). +func WithSyncLimit(limit int) C1ZOption { + return func(o *c1zOptions) { + o.syncLimit = limit + } +} + // Returns a new C1File instance with its state stored at the provided filename. func NewC1ZFile(ctx context.Context, outputFilePath string, opts ...C1ZOption) (*C1File, error) { ctx, span := tracer.Start(ctx, "NewC1ZFile") @@ -207,6 +228,9 @@ func NewC1ZFile(ctx context.Context, outputFilePath string, opts ...C1ZOption) ( return nil, fmt.Errorf("encoder concurrency must be greater than 0") } c1fopts = append(c1fopts, WithC1FEncoderConcurrency(options.encoderConcurrency)) + if options.syncLimit > 0 { + c1fopts = append(c1fopts, WithC1FSyncCountLimit(options.syncLimit)) + } c1File, err := NewC1File(ctx, dbFilePath, c1fopts...) if err != nil { diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/c1file_attached.go b/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/c1file_attached.go index d7ee4c6d..54e64f4f 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/c1file_attached.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/c1file_attached.go @@ -2,12 +2,15 @@ package dotc1z import ( "context" + "database/sql" "errors" "fmt" + "time" reader_v2 "github.com/conductorone/baton-sdk/pb/c1/reader/v2" "github.com/conductorone/baton-sdk/pkg/connectorstore" "github.com/doug-martin/goqu/v9" + "github.com/segmentio/ksuid" ) type C1FileAttached struct { @@ -37,8 +40,8 @@ func (c *C1FileAttached) CompactTable(ctx context.Context, baseSyncID string, ap selectList += ", " } columnList += col - if col == "sync_id" { - selectList += "? as sync_id" + if col == "sync_id" { //nolint:goconst,nolintlint // ... + selectList += "? as sync_id" //nolint:goconst,nolintlint // ... } else { selectList += col } @@ -174,3 +177,223 @@ func (c *C1FileAttached) UpdateSync(ctx context.Context, baseSync *reader_v2.Syn return nil } + +// GenerateSyncDiffFromFile compares the old sync (in attached) with the new sync (in main) +// and generates two new syncs in the main database. +// +// IMPORTANT: This assumes main=NEW/compacted and attached=OLD/base: +// - diffTableFromAttached: items in attached (OLD) not in main (NEW) = deletions +// - diffTableFromMain: items in main (NEW) not in attached (OLD) = upserts (additions) +// +// Parameters: +// - oldSyncID: the sync ID in the attached database (OLD/base state) +// - newSyncID: the sync ID in the main database (NEW/compacted state) +// +// Returns (upsertsSyncID, deletionsSyncID, error). +func (c *C1FileAttached) GenerateSyncDiffFromFile(ctx context.Context, oldSyncID string, newSyncID string) (string, string, error) { + if !c.safe { + return "", "", errors.New("database has been detached") + } + + ctx, span := tracer.Start(ctx, "C1FileAttached.GenerateSyncDiffFromFile") + defer span.End() + + // Generate unique IDs for the diff syncs + deletionsSyncID := ksuid.New().String() + upsertsSyncID := ksuid.New().String() + + // Start transaction for atomicity + tx, err := c.file.rawDb.BeginTx(ctx, nil) + if err != nil { + return "", "", fmt.Errorf("failed to begin transaction: %w", err) + } + + // Ensure rollback on error + committed := false + defer func() { + if !committed { + _ = tx.Rollback() + } + }() + + now := time.Now().Format("2006-01-02 15:04:05.999999999") + + // Create the deletions sync first (so upserts is "latest") + // Link it to upserts sync bidirectionally + deletionsInsert := c.file.db.Insert(syncRuns.Name()).Rows(goqu.Record{ + "sync_id": deletionsSyncID, + "started_at": now, + "sync_token": "", + "sync_type": connectorstore.SyncTypePartialDeletions, + "parent_sync_id": oldSyncID, + "linked_sync_id": upsertsSyncID, + }) + query, args, err := deletionsInsert.ToSQL() + if err != nil { + return "", "", fmt.Errorf("failed to build deletions sync insert: %w", err) + } + if _, err = tx.ExecContext(ctx, query, args...); err != nil { + return "", "", fmt.Errorf("failed to create deletions sync: %w", err) + } + + // Create the upserts sync, linked to deletions sync + upsertsInsert := c.file.db.Insert(syncRuns.Name()).Rows(goqu.Record{ + "sync_id": upsertsSyncID, + "started_at": now, + "sync_token": "", + "sync_type": connectorstore.SyncTypePartialUpserts, + "parent_sync_id": oldSyncID, + "linked_sync_id": deletionsSyncID, + }) + query, args, err = upsertsInsert.ToSQL() + if err != nil { + return "", "", fmt.Errorf("failed to build upserts sync insert: %w", err) + } + if _, err = tx.ExecContext(ctx, query, args...); err != nil { + return "", "", fmt.Errorf("failed to create upserts sync: %w", err) + } + + // Process each table + // main=NEW, attached=OLD + // - diffTableFromAttachedTx finds items in OLD not in NEW = deletions + // - diffTableFromMainTx finds items in NEW not in OLD or modified = upserts + tables := []string{"v1_resource_types", "v1_resources", "v1_entitlements", "v1_grants"} + for _, tableName := range tables { + if err := c.diffTableFromAttachedTx(ctx, tx, tableName, oldSyncID, newSyncID, deletionsSyncID); err != nil { + return "", "", fmt.Errorf("failed to generate deletions for %s: %w", tableName, err) + } + if err := c.diffTableFromMainTx(ctx, tx, tableName, oldSyncID, newSyncID, upsertsSyncID); err != nil { + return "", "", fmt.Errorf("failed to generate upserts for %s: %w", tableName, err) + } + } + + // End the syncs (deletions first, then upserts) + endedAt := time.Now().Format("2006-01-02 15:04:05.999999999") + + endDeletions := c.file.db.Update(syncRuns.Name()). + Set(goqu.Record{"ended_at": endedAt}). + Where(goqu.C("sync_id").Eq(deletionsSyncID), goqu.C("ended_at").IsNull()) + query, args, err = endDeletions.ToSQL() + if err != nil { + return "", "", fmt.Errorf("failed to build end deletions sync: %w", err) + } + if _, err = tx.ExecContext(ctx, query, args...); err != nil { + return "", "", fmt.Errorf("failed to end deletions sync: %w", err) + } + + endUpserts := c.file.db.Update(syncRuns.Name()). + Set(goqu.Record{"ended_at": endedAt}). + Where(goqu.C("sync_id").Eq(upsertsSyncID), goqu.C("ended_at").IsNull()) + query, args, err = endUpserts.ToSQL() + if err != nil { + return "", "", fmt.Errorf("failed to build end upserts sync: %w", err) + } + if _, err = tx.ExecContext(ctx, query, args...); err != nil { + return "", "", fmt.Errorf("failed to end upserts sync: %w", err) + } + + // Commit transaction + if err = tx.Commit(); err != nil { + return "", "", fmt.Errorf("failed to commit transaction: %w", err) + } + committed = true + c.file.dbUpdated = true + + return upsertsSyncID, deletionsSyncID, nil +} + +// diffTableFromAttachedTx finds items in attached (OLD) that don't exist in main (NEW). +// These are DELETIONS - items that existed before but no longer exist. +// Uses the provided transaction. +func (c *C1FileAttached) diffTableFromAttachedTx(ctx context.Context, tx *sql.Tx, tableName string, oldSyncID string, newSyncID string, targetSyncID string) error { + columns, err := c.getTableColumns(ctx, tableName) + if err != nil { + return err + } + + // Build column lists + columnList := "" + selectList := "" + for i, col := range columns { + if i > 0 { + columnList += ", " + selectList += ", " + } + columnList += col + if col == "sync_id" { + selectList += "? as sync_id" + } else { + selectList += col + } + } + + // Insert items from attached (OLD) that don't exist in main (NEW) + // oldSyncID is in attached, newSyncID is in main + //nolint:gosec // table names are from hardcoded list, not user input + query := fmt.Sprintf(` + INSERT INTO main.%s (%s) + SELECT %s + FROM attached.%s AS a + WHERE a.sync_id = ? + AND NOT EXISTS ( + SELECT 1 FROM main.%s AS m + WHERE m.external_id = a.external_id AND m.sync_id = ? + ) + `, tableName, columnList, selectList, tableName, tableName) + + _, err = tx.ExecContext(ctx, query, targetSyncID, oldSyncID, newSyncID) + return err +} + +// diffTableFromMainTx finds items in main (NEW) that are new or modified compared to attached (OLD). +// These are UPSERTS - items that are new or have changed. +// Uses the provided transaction. +func (c *C1FileAttached) diffTableFromMainTx(ctx context.Context, tx *sql.Tx, tableName string, oldSyncID string, newSyncID string, targetSyncID string) error { + columns, err := c.getTableColumns(ctx, tableName) + if err != nil { + return err + } + + // Build column lists + columnList := "" + selectList := "" + for i, col := range columns { + if i > 0 { + columnList += ", " + selectList += ", " + } + columnList += col + if col == "sync_id" { + selectList += "? as sync_id" + } else { + selectList += col + } + } + + // Insert items from main (NEW) that are: + // 1. Not in attached (OLD) - additions + // 2. In attached but with different data - modifications + // newSyncID is in main, oldSyncID is in attached + //nolint:gosec // table names are from hardcoded list, not user input + query := fmt.Sprintf(` + INSERT INTO main.%s (%s) + SELECT %s + FROM main.%s AS m + WHERE m.sync_id = ? + AND ( + NOT EXISTS ( + SELECT 1 FROM attached.%s AS a + WHERE a.external_id = m.external_id AND a.sync_id = ? + ) + OR EXISTS ( + SELECT 1 FROM attached.%s AS a + WHERE a.external_id = m.external_id + AND a.sync_id = ? + AND a.data != m.data + ) + ) + `, tableName, columnList, selectList, tableName, tableName, tableName) + + _, err = tx.ExecContext(ctx, query, targetSyncID, newSyncID, oldSyncID, oldSyncID) + return err +} diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/diff.go b/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/diff.go index 4324a7cc..162001e4 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/diff.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/diff.go @@ -99,7 +99,7 @@ func (c *C1File) diffTableQuery(table tableDescriptor, baseSyncID, appliedSyncID queryColumns := []interface{}{} for _, col := range columns { - if col == "sync_id" { + if col == "sync_id" { //nolint:goconst,nolintlint // ... queryColumns = append(queryColumns, goqu.L(fmt.Sprintf("'%s' as sync_id", newSyncID))) continue } diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/session_store.go b/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/session_store.go index 796c0648..c20be9d6 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/session_store.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/session_store.go @@ -216,7 +216,8 @@ func (c *C1File) Clear(ctx context.Context, opt ...sessions.SessionStoreOption) q = q.Where(goqu.C("sync_id").Eq(bag.SyncID)) if bag.Prefix != "" { - q = q.Where(goqu.C("key").Like(escapeLike(bag.Prefix) + "%")) + pattern := escapeLike(bag.Prefix) + "%" + q = q.Where(goqu.L("key LIKE ? ESCAPE '\\'", pattern)) } sql, params, err := q.ToSQL() @@ -366,7 +367,8 @@ func (c *C1File) getAllChunk(ctx context.Context, pageToken string, sizeLimit in Limit(100) if bag.Prefix != "" { - q = q.Where(goqu.C("key").Like(escapeLike(bag.Prefix) + "%")) + pattern := escapeLike(bag.Prefix) + "%" + q = q.Where(goqu.L("key LIKE ? ESCAPE '\\'", pattern)) } if pageToken != "" { diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/sql_helpers.go b/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/sql_helpers.go index 9c3efa34..296da43f 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/sql_helpers.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/sql_helpers.go @@ -279,7 +279,8 @@ func listConnectorObjects[T proto.Message](ctx context.Context, c *C1File, table return ret, nextPageToken, nil } -var protoMarshaler = proto.MarshalOptions{Deterministic: false} +// This is required for sync diffs to work. Its not much slower. +var protoMarshaler = proto.MarshalOptions{Deterministic: true} // prepareSingleConnectorObjectRow processes a single message and returns the prepared record. func prepareSingleConnectorObjectRow[T proto.Message]( @@ -344,8 +345,8 @@ func prepareConnectorObjectRowsParallel[T proto.Message]( protoMarshallers := make([]proto.MarshalOptions, numWorkers) for i := range numWorkers { - // Don't enable deterministic marshaling, as it sorts keys in lexicographical order which hurts performance. - protoMarshallers[i] = proto.MarshalOptions{} + // Deterministic marshaling is required for sync diffs to work. Its not much slower. + protoMarshallers[i] = proto.MarshalOptions{Deterministic: true} } rows := make([]*goqu.Record, len(msgs)) diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/sync_runs.go b/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/sync_runs.go index e2b4ee78..4426a3e5 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/sync_runs.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/sync_runs.go @@ -32,7 +32,8 @@ create table if not exists %s ( ended_at datetime, sync_token text not null, sync_type text not null default 'full', - parent_sync_id text not null default '' + parent_sync_id text not null default '', + linked_sync_id text not null default '' ); create unique index if not exists %s on %s (sync_id);` @@ -83,6 +84,19 @@ func (r *syncRunsTable) Migrations(ctx context.Context, db *goqu.Database) error } } + // Check if linked_sync_id column exists + var linkedSyncIDExists int + err = db.QueryRowContext(ctx, fmt.Sprintf("select count(*) from pragma_table_info('%s') where name='linked_sync_id'", r.Name())).Scan(&linkedSyncIDExists) + if err != nil { + return err + } + if linkedSyncIDExists == 0 { + _, err = db.ExecContext(ctx, fmt.Sprintf("alter table %s add column linked_sync_id text not null default ''", r.Name())) + if err != nil { + return err + } + } + return nil } @@ -93,6 +107,7 @@ type syncRun struct { SyncToken string Type connectorstore.SyncType ParentSyncID string + LinkedSyncID string } // getCachedViewSyncRun returns the cached sync run for read operations. @@ -144,7 +159,7 @@ func (c *C1File) getLatestUnfinishedSync(ctx context.Context, syncType connector oneWeekAgo := time.Now().AddDate(0, 0, -7) ret := &syncRun{} q := c.db.From(syncRuns.Name()) - q = q.Select("sync_id", "started_at", "ended_at", "sync_token", "sync_type", "parent_sync_id") + q = q.Select("sync_id", "started_at", "ended_at", "sync_token", "sync_type", "parent_sync_id", "linked_sync_id") q = q.Where(goqu.C("ended_at").IsNull()) q = q.Where(goqu.C("started_at").Gte(oneWeekAgo)) q = q.Order(goqu.C("started_at").Desc()) @@ -160,7 +175,7 @@ func (c *C1File) getLatestUnfinishedSync(ctx context.Context, syncType connector row := c.db.QueryRowContext(ctx, query, args...) - err = row.Scan(&ret.ID, &ret.StartedAt, &ret.EndedAt, &ret.SyncToken, &ret.Type, &ret.ParentSyncID) + err = row.Scan(&ret.ID, &ret.StartedAt, &ret.EndedAt, &ret.SyncToken, &ret.Type, &ret.ParentSyncID, &ret.LinkedSyncID) if err != nil { if errors.Is(err, sql.ErrNoRows) { return nil, nil @@ -187,7 +202,7 @@ func (c *C1File) getFinishedSync(ctx context.Context, offset uint, syncType conn ret := &syncRun{} q := c.db.From(syncRuns.Name()) - q = q.Select("sync_id", "started_at", "ended_at", "sync_token", "sync_type", "parent_sync_id") + q = q.Select("sync_id", "started_at", "ended_at", "sync_token", "sync_type", "parent_sync_id", "linked_sync_id") q = q.Where(goqu.C("ended_at").IsNotNull()) if syncType != connectorstore.SyncTypeAny { q = q.Where(goqu.C("sync_type").Eq(syncType)) @@ -206,7 +221,7 @@ func (c *C1File) getFinishedSync(ctx context.Context, offset uint, syncType conn row := c.db.QueryRowContext(ctx, query, args...) - err = row.Scan(&ret.ID, &ret.StartedAt, &ret.EndedAt, &ret.SyncToken, &ret.Type, &ret.ParentSyncID) + err = row.Scan(&ret.ID, &ret.StartedAt, &ret.EndedAt, &ret.SyncToken, &ret.Type, &ret.ParentSyncID, &ret.LinkedSyncID) if err != nil { if errors.Is(err, sql.ErrNoRows) { return nil, nil @@ -227,7 +242,7 @@ func (c *C1File) ListSyncRuns(ctx context.Context, pageToken string, pageSize ui } q := c.db.From(syncRuns.Name()).Prepared(true) - q = q.Select("id", "sync_id", "started_at", "ended_at", "sync_token", "sync_type", "parent_sync_id") + q = q.Select("id", "sync_id", "started_at", "ended_at", "sync_token", "sync_type", "parent_sync_id", "linked_sync_id") if pageToken != "" { q = q.Where(goqu.C("id").Gte(pageToken)) @@ -262,7 +277,7 @@ func (c *C1File) ListSyncRuns(ctx context.Context, pageToken string, pageSize ui } rowId := 0 data := &syncRun{} - err := rows.Scan(&rowId, &data.ID, &data.StartedAt, &data.EndedAt, &data.SyncToken, &data.Type, &data.ParentSyncID) + err := rows.Scan(&rowId, &data.ID, &data.StartedAt, &data.EndedAt, &data.SyncToken, &data.Type, &data.ParentSyncID, &data.LinkedSyncID) if err != nil { return nil, "", err } @@ -351,7 +366,7 @@ func (c *C1File) getSync(ctx context.Context, syncID string) (*syncRun, error) { ret := &syncRun{} q := c.db.From(syncRuns.Name()) - q = q.Select("sync_id", "started_at", "ended_at", "sync_token", "sync_type", "parent_sync_id") + q = q.Select("sync_id", "started_at", "ended_at", "sync_token", "sync_type", "parent_sync_id", "linked_sync_id") q = q.Where(goqu.C("sync_id").Eq(syncID)) query, args, err := q.ToSQL() @@ -359,7 +374,7 @@ func (c *C1File) getSync(ctx context.Context, syncID string) (*syncRun, error) { return nil, err } row := c.db.QueryRowContext(ctx, query, args...) - err = row.Scan(&ret.ID, &ret.StartedAt, &ret.EndedAt, &ret.SyncToken, &ret.Type, &ret.ParentSyncID) + err = row.Scan(&ret.ID, &ret.StartedAt, &ret.EndedAt, &ret.SyncToken, &ret.Type, &ret.ParentSyncID, &ret.LinkedSyncID) if err != nil { return nil, err } @@ -558,6 +573,10 @@ func (c *C1File) StartNewSync(ctx context.Context, syncType connectorstore.SyncT } func (c *C1File) insertSyncRun(ctx context.Context, syncID string, syncType connectorstore.SyncType, parentSyncID string) error { + return c.insertSyncRunWithLink(ctx, syncID, syncType, parentSyncID, "") +} + +func (c *C1File) insertSyncRunWithLink(ctx context.Context, syncID string, syncType connectorstore.SyncType, parentSyncID string, linkedSyncID string) error { if c.readOnly { return ErrReadOnly } @@ -569,6 +588,7 @@ func (c *C1File) insertSyncRun(ctx context.Context, syncID string, syncType conn "sync_token": "", "sync_type": syncType, "parent_sync_id": parentSyncID, + "linked_sync_id": linkedSyncID, }) query, args, err := q.ToSQL() @@ -659,8 +679,9 @@ func (c *C1File) Cleanup(ctx context.Context) error { return nil } - var ret []*syncRun + var fullSyncs []*syncRun var partials []*syncRun + var diffSyncs []*syncRun pageToken := "" for { @@ -673,10 +694,13 @@ func (c *C1File) Cleanup(ctx context.Context) error { if sr.EndedAt == nil { continue } - if sr.Type == connectorstore.SyncTypePartial || sr.Type == connectorstore.SyncTypeResourcesOnly { + switch sr.Type { + case connectorstore.SyncTypePartial, connectorstore.SyncTypeResourcesOnly: partials = append(partials, sr) - } else { - ret = append(ret, sr) + case connectorstore.SyncTypePartialUpserts, connectorstore.SyncTypePartialDeletions: + diffSyncs = append(diffSyncs, sr) + default: + fullSyncs = append(fullSyncs, sr) } } @@ -687,27 +711,31 @@ func (c *C1File) Cleanup(ctx context.Context) error { } syncLimit := 2 - if customSyncLimit, err := strconv.ParseInt(os.Getenv("BATON_KEEP_SYNC_COUNT"), 10, 64); err == nil && customSyncLimit > 0 { + if c.syncLimit > 0 { + syncLimit = c.syncLimit + } else if customSyncLimit, err := strconv.ParseInt(os.Getenv("BATON_KEEP_SYNC_COUNT"), 10, 64); err == nil && customSyncLimit > 0 { syncLimit = int(customSyncLimit) } - l.Debug("found syncs", zap.Int("count", len(ret)), zap.Int("sync_limit", syncLimit)) - if len(ret) <= syncLimit { - return nil - } + l.Debug("found syncs", + zap.Int("full_count", len(fullSyncs)), + zap.Int("partial_count", len(partials)), + zap.Int("diff_count", len(diffSyncs)), + zap.Int("sync_limit", syncLimit)) - l.Info("Cleaning up old sync data...") - for i := 0; i < len(ret)-syncLimit; i++ { - err = c.DeleteSyncRun(ctx, ret[i].ID) - if err != nil { - return err + // Clean up old full syncs beyond the limit + if len(fullSyncs) > syncLimit { + l.Info("Cleaning up old sync data...") + for i := 0; i < len(fullSyncs)-syncLimit; i++ { + err = c.DeleteSyncRun(ctx, fullSyncs[i].ID) + if err != nil { + return err + } + l.Info("Removed old sync data.", zap.String("sync_date", fullSyncs[i].EndedAt.Format(time.RFC3339)), zap.String("sync_id", fullSyncs[i].ID)) } - l.Info("Removed old sync data.", zap.String("sync_date", ret[i].EndedAt.Format(time.RFC3339)), zap.String("sync_id", ret[i].ID)) - } - // Delete non-full syncs that ended before the earliest-kept full sync started - if len(ret) > syncLimit { - earliestKeptSync := ret[len(ret)-syncLimit] + // Delete partial syncs that ended before the earliest-kept full sync started + earliestKeptSync := fullSyncs[len(fullSyncs)-syncLimit] l.Debug("Earliest kept sync", zap.String("sync_id", earliestKeptSync.ID), zap.Time("started_at", *earliestKeptSync.StartedAt)) for _, partial := range partials { @@ -724,6 +752,56 @@ func (c *C1File) Cleanup(ctx context.Context) error { } } + // Clean up old diff syncs - keep only the most recent diff sync (upserts or deletions) and its linked pair (if present) + if len(diffSyncs) > 2 { + // Build a map for quick lookup by ID + syncByID := make(map[string]*syncRun) + for _, ds := range diffSyncs { + syncByID[ds.ID] = ds + } + + // Determine which syncs to keep. diffSyncs are ordered by row id (ascending), + // so the last element is the most recently created diff sync. + keepIDs := make(map[string]bool) + latestDiff := diffSyncs[len(diffSyncs)-1] + keepIDs[latestDiff.ID] = true + l.Debug("keeping latest diff sync", + zap.String("sync_id", latestDiff.ID), + zap.String("sync_type", string(latestDiff.Type))) + + // Also keep its linked pair if it exists. + // NOTE: We intentionally do NOT require a bidirectional link; if the latest diff sync exists, + // it's better to keep it and best-effort keep its linked partner (if present). + if latestDiff.LinkedSyncID != "" { + if linkedSync := syncByID[latestDiff.LinkedSyncID]; linkedSync != nil { + keepIDs[linkedSync.ID] = true + l.Debug("keeping linked diff sync", + zap.String("sync_id", linkedSync.ID), + zap.String("sync_type", string(linkedSync.Type))) + if linkedSync.LinkedSyncID != latestDiff.ID { + l.Warn("diff sync link is not bidirectional", + zap.String("sync_id", latestDiff.ID), + zap.String("linked_sync_id", latestDiff.LinkedSyncID), + zap.String("linked_sync_linked_sync_id", linkedSync.LinkedSyncID)) + } + } + } + + // Delete all diff syncs except the ones we're keeping + for _, ds := range diffSyncs { + if keepIDs[ds.ID] { + continue + } + err = c.DeleteSyncRun(ctx, ds.ID) + if err != nil { + return err + } + l.Info("Removed old diff sync.", + zap.String("sync_type", string(ds.Type)), + zap.String("sync_id", ds.ID)) + } + } + l.Debug("vacuuming database") err = c.Vacuum(ctx) if err != nil { diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/field/rule_builders.go b/vendor/github.com/conductorone/baton-sdk/pkg/field/rule_builders.go index 4e7afef7..9dc2b4de 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/field/rule_builders.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/field/rule_builders.go @@ -238,3 +238,36 @@ func (b *StringSliceRuler) ItemRules(f func(stringer *StringRuler)) *StringSlice f(b.stringer) return b } + +type ResourceIdSliceRuler struct { + rules *v1_conf.RepeatedResourceIdRules +} + +func NewRepeatedResourceIdBuilder(rules *v1_conf.RepeatedResourceIdRules) *ResourceIdSliceRuler { + return &ResourceIdSliceRuler{rules: rules} +} + +func (b *ResourceIdSliceRuler) MinItems(value uint64) *ResourceIdSliceRuler { + b.rules.SetMinItems(value) + return b +} + +func (b *ResourceIdSliceRuler) MaxItems(value uint64) *ResourceIdSliceRuler { + b.rules.SetMaxItems(value) + return b +} + +func (b *ResourceIdSliceRuler) Unique(unique bool) *ResourceIdSliceRuler { + b.rules.SetUnique(unique) + return b +} + +func (b *ResourceIdSliceRuler) ValidateEmpty(value bool) *ResourceIdSliceRuler { + b.rules.SetValidateEmpty(value) + return b +} + +func (b *ResourceIdSliceRuler) AllowedResourceTypeIDs(typeIDs []string) *ResourceIdSliceRuler { + b.rules.SetAllowedResourceTypeIds(typeIDs) + return b +} diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/field/validation.go b/vendor/github.com/conductorone/baton-sdk/pkg/field/validation.go index 9ec1dff3..03c92e77 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/field/validation.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/field/validation.go @@ -306,6 +306,58 @@ func ValidateStringMapRules(r *v1_conf.StringMapRules, v map[string]any, name st return nil } +func ValidateRepeatedResourceIdRules(r *v1_conf.RepeatedResourceIdRules, v []*v1_conf.ResourceId, name string) error { + if r == nil { + return nil + } + if r.GetIsRequired() && len(v) == 0 { + return fmt.Errorf("field %s of type []*ResourceId is marked as required but it has a zero-value", name) + } + + if !r.GetValidateEmpty() && len(v) == 0 { + return nil + } + + if r.HasMinItems() && uint64(len(v)) < r.GetMinItems() { + return fmt.Errorf("field %s: value must have at least %d items but got %d", name, r.GetMinItems(), len(v)) + } + if r.HasMaxItems() && uint64(len(v)) > r.GetMaxItems() { + return fmt.Errorf("field %s: value must have at most %d items but got %d", name, r.GetMaxItems(), len(v)) + } + if r.GetUnique() { + type resourceKey struct { + typeID string + id string + } + uniqueValues := make(map[resourceKey]struct{}) + for _, item := range v { + if item == nil { + continue + } + key := resourceKey{typeID: item.GetResourceTypeId(), id: item.GetResourceId()} + if _, exists := uniqueValues[key]; exists { + return fmt.Errorf("field %s: value must not contain duplicate items but got multiple (%s, %s)", name, key.typeID, key.id) + } + uniqueValues[key] = struct{}{} + } + } + if len(r.GetAllowedResourceTypeIds()) > 0 { + allowedTypes := make(map[string]struct{}) + for _, t := range r.GetAllowedResourceTypeIds() { + allowedTypes[t] = struct{}{} + } + for i, item := range v { + if item == nil { + continue + } + if _, ok := allowedTypes[item.GetResourceTypeId()]; !ok { + return fmt.Errorf("field %s: item at index %d has resource type '%s' which is not in the allowed list %v", name, i, item.GetResourceTypeId(), r.GetAllowedResourceTypeIds()) + } + } + } + return nil +} + func (e *ErrConfigurationMissingFields) Error() string { var messages []string diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/metrics/instrumentor.go b/vendor/github.com/conductorone/baton-sdk/pkg/metrics/instrumentor.go index 52541d43..dd6e346f 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/metrics/instrumentor.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/metrics/instrumentor.go @@ -2,9 +2,13 @@ package metrics import ( "context" + "strconv" "time" + v2 "github.com/conductorone/baton-sdk/pb/c1/connector/v2" "github.com/conductorone/baton-sdk/pkg/types/tasks" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" ) const ( @@ -12,10 +16,41 @@ const ( taskFailureCounterName = "baton_sdk.task_failure" taskDurationHistoName = "baton_sdk.task_latency" taskSuccessCounterDesc = "number of successful tasks by task type" - taskFailureCounterDesc = "number of failed tasks by task type" + taskFailureCounterDesc = "number of failed tasks by task type, grpc code, and rate limit status" taskDurationHistoDesc = "duration of all tasks by task type and status" ) +// FailureReason contains extracted information about why a task failed. +type FailureReason struct { + GrpcCode codes.Code + IsRateLimit bool +} + +// extractFailureReason extracts the gRPC status code and rate limit information from an error. +func extractFailureReason(err error) FailureReason { + if err == nil { + return FailureReason{GrpcCode: codes.Unknown} + } + + reason := FailureReason{ + GrpcCode: status.Code(err), + } + + // Check for rate limit details in gRPC status + if st, ok := status.FromError(err); ok { + for _, detail := range st.Details() { + if rl, ok := detail.(*v2.RateLimitDescription); ok { + if rl.GetStatus() == v2.RateLimitDescription_STATUS_OVERLIMIT { + reason.IsRateLimit = true + break + } + } + } + } + + return reason +} + type M struct { underlying Handler } @@ -27,11 +62,26 @@ func (m *M) RecordTaskSuccess(ctx context.Context, task tasks.TaskType, dur time h.Record(ctx, dur.Milliseconds(), map[string]string{"task_type": task.String(), "task_status": "success"}) } -func (m *M) RecordTaskFailure(ctx context.Context, task tasks.TaskType, dur time.Duration) { +func (m *M) RecordTaskFailure(ctx context.Context, task tasks.TaskType, dur time.Duration, err error) { + reason := extractFailureReason(err) + c := m.underlying.Int64Counter(taskFailureCounterName, taskFailureCounterDesc, Dimensionless) h := m.underlying.Int64Histogram(taskDurationHistoName, taskDurationHistoDesc, Milliseconds) - c.Add(ctx, 1, map[string]string{"task_type": task.String()}) - h.Record(ctx, dur.Milliseconds(), map[string]string{"task_type": task.String(), "task_status": "failure"}) + + counterAttrs := map[string]string{ + "task_type": task.String(), + "grpc_code": reason.GrpcCode.String(), + "is_rate_limit": strconv.FormatBool(reason.IsRateLimit), + } + histoAttrs := map[string]string{ + "task_type": task.String(), + "task_status": "failure", + "grpc_code": reason.GrpcCode.String(), + "is_rate_limit": strconv.FormatBool(reason.IsRateLimit), + } + + c.Add(ctx, 1, counterAttrs) + h.Record(ctx, dur.Milliseconds(), histoAttrs) } func New(handler Handler) *M { diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/provisioner/provisioner.go b/vendor/github.com/conductorone/baton-sdk/pkg/provisioner/provisioner.go index 18bd2d63..e69463df 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/provisioner/provisioner.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/provisioner/provisioner.go @@ -184,7 +184,7 @@ func (p *Provisioner) grant(ctx context.Context) error { DisplayName: principal.GetResource().GetDisplayName(), Annotations: principal.GetResource().GetAnnotations(), Description: principal.GetResource().GetDescription(), - ExternalId: principal.GetResource().GetExternalId(), + ExternalId: principal.GetResource().GetExternalId(), //nolint:staticcheck // Deprecated. // Omit parent resource ID so that behavior is the same as ConductorOne's provisioning mode ParentResourceId: nil, }.Build() @@ -247,7 +247,7 @@ func (p *Provisioner) revoke(ctx context.Context) error { DisplayName: principal.GetResource().GetDisplayName(), Annotations: principal.GetResource().GetAnnotations(), Description: principal.GetResource().GetDescription(), - ExternalId: principal.GetResource().GetExternalId(), + ExternalId: principal.GetResource().GetExternalId(), //nolint:staticcheck // Deprecated. // Omit parent resource ID so that behavior is the same as ConductorOne's provisioning mode ParentResourceId: nil, }.Build() diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/sdk/version.go b/vendor/github.com/conductorone/baton-sdk/pkg/sdk/version.go index ecef98b7..f2d6acbd 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/sdk/version.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/sdk/version.go @@ -1,3 +1,3 @@ package sdk -const Version = "v0.6.23" +const Version = "v0.7.3" diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/sync/syncer.go b/vendor/github.com/conductorone/baton-sdk/pkg/sync/syncer.go index 8e175146..d2e8be55 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/sync/syncer.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/sync/syncer.go @@ -1168,9 +1168,6 @@ func (s *syncer) syncResources(ctx context.Context) error { } } - // Set the resource creation source - r.SetCreationSource(v2.Resource_CREATION_SOURCE_CONNECTOR_LIST_RESOURCES) - bulkPutResoruces = append(bulkPutResoruces, r) err = s.getSubResources(ctx, r) diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/synccompactor/attached/attached.go b/vendor/github.com/conductorone/baton-sdk/pkg/synccompactor/attached/attached.go index 00291d53..4542f26d 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/synccompactor/attached/attached.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/synccompactor/attached/attached.go @@ -23,25 +23,59 @@ func NewAttachedCompactor(base *dotc1z.C1File, applied *dotc1z.C1File) *Compacto } } +func latestFinishedCompactableSync(ctx context.Context, f *dotc1z.C1File) (*reader_v2.SyncRun, error) { + // Compaction must NOT operate on diff syncs (partial_upserts / partial_deletions). + // We want the latest finished "snapshot-like" sync. + candidates := []connectorstore.SyncType{ + connectorstore.SyncTypeFull, + connectorstore.SyncTypeResourcesOnly, + connectorstore.SyncTypePartial, + } + + var best *reader_v2.SyncRun + for _, st := range candidates { + resp, err := f.GetLatestFinishedSync(ctx, reader_v2.SyncsReaderServiceGetLatestFinishedSyncRequest_builder{ + SyncType: string(st), + }.Build()) + if err != nil { + return nil, err + } + s := resp.GetSync() + if s == nil { + continue + } + + if best == nil || s.GetEndedAt().AsTime().After(best.GetEndedAt().AsTime()) { + best = s + } + } + + return best, nil +} + func (c *Compactor) Compact(ctx context.Context) error { - baseSync, err := c.base.GetLatestFinishedSync(ctx, reader_v2.SyncsReaderServiceGetLatestFinishedSyncRequest_builder{ - SyncType: string(connectorstore.SyncTypeAny), - }.Build()) + baseSync, err := latestFinishedCompactableSync(ctx, c.base) if err != nil { return fmt.Errorf("failed to get base sync: %w", err) } - if baseSync == nil || baseSync.GetSync() == nil { - return fmt.Errorf("no finished sync found in base") + if baseSync == nil { + return fmt.Errorf( + "no finished compactable sync found in base (diff sync types %q/%q are not compactable)", + string(connectorstore.SyncTypePartialUpserts), + string(connectorstore.SyncTypePartialDeletions), + ) } - appliedSync, err := c.applied.GetLatestFinishedSync(ctx, reader_v2.SyncsReaderServiceGetLatestFinishedSyncRequest_builder{ - SyncType: string(connectorstore.SyncTypeAny), - }.Build()) + appliedSync, err := latestFinishedCompactableSync(ctx, c.applied) if err != nil { return fmt.Errorf("failed to get applied sync: %w", err) } - if appliedSync == nil || appliedSync.GetSync() == nil { - return fmt.Errorf("no finished sync found in applied") + if appliedSync == nil { + return fmt.Errorf( + "no finished compactable sync found in applied (diff sync types %q/%q are not compactable)", + string(connectorstore.SyncTypePartialUpserts), + string(connectorstore.SyncTypePartialDeletions), + ) } l := ctxzap.Extract(ctx) @@ -58,7 +92,7 @@ func (c *Compactor) Compact(ctx context.Context) error { } }() - if err := c.processRecords(ctx, attached, baseSync.GetSync(), appliedSync.GetSync()); err != nil { + if err := c.processRecords(ctx, attached, baseSync, appliedSync); err != nil { return fmt.Errorf("failed to process records: %w", err) } diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/synccompactor/compactor.go b/vendor/github.com/conductorone/baton-sdk/pkg/synccompactor/compactor.go index 2dafb58d..3a43622f 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/synccompactor/compactor.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/synccompactor/compactor.go @@ -37,6 +37,7 @@ type Compactor struct { tmpDir string destDir string runDuration time.Duration + syncLimit int } type CompactableSync struct { @@ -68,6 +69,13 @@ func WithRunDuration(runDuration time.Duration) Option { } } +// WithSyncLimit sets the number of syncs to keep after compaction cleanup. +func WithSyncLimit(limit int) Option { + return func(c *Compactor) { + c.syncLimit = limit + } +} + func NewCompactor(ctx context.Context, outputDir string, compactableSyncs []*CompactableSync, opts ...Option) (*Compactor, func() error, error) { if len(compactableSyncs) < 2 { return nil, nil, ErrNotEnoughFilesToCompact @@ -151,6 +159,9 @@ func (c *Compactor) Compact(ctx context.Context) (*CompactableSync, error) { // Use parallel encoding. dotc1z.WithEncoderConcurrency(0), } + if c.syncLimit > 0 { + opts = append(opts, dotc1z.WithSyncLimit(c.syncLimit)) + } fileName := fmt.Sprintf("compacted-%s.c1z", c.entries[0].SyncID) destFilePath := path.Join(c.tmpDir, fileName) diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/actions.go b/vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/actions.go index 5ceda817..02737057 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/actions.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/actions.go @@ -3,6 +3,7 @@ package c1api import ( "context" "errors" + "fmt" "github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/ctxzap" "go.uber.org/zap" @@ -138,6 +139,23 @@ func (c *actionInvokeTaskHandler) HandleTask(ctx context.Context) error { return c.helpers.FinishTask(ctx, nil, nil, err) } + // Check if the action itself failed and propagate the error + if resp.GetStatus() == v2.BatonActionStatus_BATON_ACTION_STATUS_FAILED { + errMsg := "action failed" + if resp.GetResponse() != nil && resp.GetResponse().GetFields() != nil { + if errField, ok := resp.GetResponse().GetFields()["error"]; ok { + errMsg = errField.GetStringValue() + } + } + l.Error("ActionInvoke failed", + zap.String("error", errMsg), + zap.String("action_id", resp.GetId()), + zap.String("action_name", resp.GetName()), + zap.Stringer("status", resp.GetStatus()), + ) + return c.helpers.FinishTask(ctx, resp, nil, fmt.Errorf("%s", errMsg)) + } + l.Debug("ActionInvoke response", zap.Any("resp", resp)) return c.helpers.FinishTask(ctx, resp, nil, nil) diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/types/grant/grant.go b/vendor/github.com/conductorone/baton-sdk/pkg/types/grant/grant.go index 972c6344..b472ca99 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/types/grant/grant.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/types/grant/grant.go @@ -36,9 +36,10 @@ func WithGrantMetadata(metadata map[string]interface{}) GrantOption { } } +// WithExternalPrincipalID: Deprecated. This field is no longer used. func WithExternalPrincipalID(externalID *v2.ExternalId) GrantOption { return func(g *v2.Grant) error { - g.GetPrincipal().SetExternalId(externalID) + g.GetPrincipal().SetExternalId(externalID) //nolint:staticcheck // Deprecated. return nil } } diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/types/resource/resource.go b/vendor/github.com/conductorone/baton-sdk/pkg/types/resource/resource.go index 33d3f132..b7dae67e 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/types/resource/resource.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/types/resource/resource.go @@ -9,6 +9,8 @@ import ( "github.com/conductorone/baton-sdk/pkg/annotations" "github.com/conductorone/baton-sdk/pkg/pagination" "github.com/conductorone/baton-sdk/pkg/types/sessions" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" "google.golang.org/protobuf/proto" ) @@ -29,9 +31,10 @@ func WithAnnotation(msgs ...proto.Message) ResourceOption { } } +// WithExternalID: Deprecated. This field is no longer used. func WithExternalID(externalID *v2.ExternalId) ResourceOption { return func(r *v2.Resource) error { - r.SetExternalId(externalID) + r.SetExternalId(externalID) //nolint:staticcheck // Deprecated. return nil } } @@ -132,6 +135,39 @@ func WithRoleTrait(opts ...RoleTraitOption) ResourceOption { } } +func WithScopeBindingTrait(opts ...ScopeBindingTraitOption) ResourceOption { + return func(r *v2.Resource) error { + rt := &v2.ScopeBindingTrait{} + + annos := annotations.Annotations(r.GetAnnotations()) + _, err := annos.Pick(rt) + if err != nil { + return err + } + + for _, o := range opts { + err := o(rt) + if err != nil { + return err + } + } + + roleId := rt.GetRoleId() + scopeResourceId := rt.GetScopeResourceId() + if roleId == nil { + return status.Errorf(codes.InvalidArgument, "role ID is required for scope binding trait") + } + if scopeResourceId == nil { + return status.Errorf(codes.InvalidArgument, "scope resource ID is required for scope binding trait") + } + + annos.Update(rt) + r.SetAnnotations(annos) + + return nil + } +} + func WithAppTrait(opts ...AppTraitOption) ResourceOption { return func(r *v2.Resource) error { at := &v2.AppTrait{} @@ -304,6 +340,24 @@ func NewRoleResource( return ret, nil } +// NewScopeBindingResource returns a new resource instance with a configured scope binding trait. +func NewScopeBindingResource( + name string, + resourceType *v2.ResourceType, + objectID any, + scopeBindingOpts []ScopeBindingTraitOption, + opts ...ResourceOption, +) (*v2.Resource, error) { + opts = append(opts, WithScopeBindingTrait(scopeBindingOpts...)) + + ret, err := NewResource(name, resourceType, objectID, opts...) + if err != nil { + return nil, err + } + + return ret, nil +} + // NewAppResource returns a new resource instance with a configured app trait. // The trait is configured with the provided helpURL and profile. func NewAppResource( diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/types/resource/role_scope_trait.go b/vendor/github.com/conductorone/baton-sdk/pkg/types/resource/role_scope_trait.go new file mode 100644 index 00000000..0624f9ee --- /dev/null +++ b/vendor/github.com/conductorone/baton-sdk/pkg/types/resource/role_scope_trait.go @@ -0,0 +1,41 @@ +package resource + +import ( + v2 "github.com/conductorone/baton-sdk/pb/c1/connector/v2" + "github.com/conductorone/baton-sdk/pkg/annotations" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +type ScopeBindingTraitOption func(rs *v2.ScopeBindingTrait) error + +// WithRoleScopeRoleId sets the role of role scope. +func WithRoleScopeRoleId(resourceId *v2.ResourceId) ScopeBindingTraitOption { + return func(rs *v2.ScopeBindingTrait) error { + rs.RoleId = resourceId + return nil + } +} + +// WithRoleScopeResourceId sets the resource scope of role scope. +func WithRoleScopeResourceId(resourceId *v2.ResourceId) ScopeBindingTraitOption { + return func(rs *v2.ScopeBindingTrait) error { + rs.ScopeResourceId = resourceId + return nil + } +} + +// GetScopeBindingTrait attempts to return the ScopeBindingTrait instance on a resource. +func GetScopeBindingTrait(resource *v2.Resource) (*v2.ScopeBindingTrait, error) { + ret := &v2.ScopeBindingTrait{} + annos := annotations.Annotations(resource.GetAnnotations()) + ok, err := annos.Pick(ret) + if err != nil { + return nil, err + } + if !ok { + return nil, status.Errorf(codes.NotFound, "scope binding trait was not found on resource") + } + + return ret, nil +} diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/types/resource/role_trait.go b/vendor/github.com/conductorone/baton-sdk/pkg/types/resource/role_trait.go index bc534133..80b75a6d 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/types/resource/role_trait.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/types/resource/role_trait.go @@ -1,10 +1,10 @@ package resource import ( - "fmt" - v2 "github.com/conductorone/baton-sdk/pb/c1/connector/v2" "github.com/conductorone/baton-sdk/pkg/annotations" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" "google.golang.org/protobuf/types/known/structpb" ) @@ -23,6 +23,22 @@ func WithRoleProfile(profile map[string]interface{}) RoleTraitOption { } } +func WithRoleScopeConditions(typ string, conditions []string) RoleTraitOption { + return func(rt *v2.RoleTrait) error { + rt.RoleScopeConditions = &v2.RoleScopeConditions{ + Type: typ, + Conditions: make([]*v2.RoleScopeCondition, len(conditions)), + } + for i, condition := range conditions { + rt.RoleScopeConditions.Conditions[i] = &v2.RoleScopeCondition{ + Expression: condition, + } + } + + return nil + } +} + // NewRoleTrait creates a new `RoleTrait` with the provided profile. func NewRoleTrait(opts ...RoleTraitOption) (*v2.RoleTrait, error) { groupTrait := &v2.RoleTrait{} @@ -46,7 +62,7 @@ func GetRoleTrait(resource *v2.Resource) (*v2.RoleTrait, error) { return nil, err } if !ok { - return nil, fmt.Errorf("role trait was not found on resource") + return nil, status.Errorf(codes.NotFound, "role trait was not found on resource") } return ret, nil diff --git a/vendor/modules.txt b/vendor/modules.txt index 04dbd5ed..3908fb9d 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -159,7 +159,7 @@ github.com/benbjohnson/clock # github.com/cenkalti/backoff/v4 v4.3.0 ## explicit; go 1.18 github.com/cenkalti/backoff/v4 -# github.com/conductorone/baton-sdk v0.6.24 +# github.com/conductorone/baton-sdk v0.7.4 ## explicit; go 1.25.2 github.com/conductorone/baton-sdk/internal/connector github.com/conductorone/baton-sdk/pb/c1/c1z/v1 From ef18989634b023c5ba98f69a7db5a3a087962a45 Mon Sep 17 00:00:00 2001 From: vipulgowda Date: Wed, 28 Jan 2026 18:30:45 +0530 Subject: [PATCH 09/19] lint fix --- pkg/connector/team.go | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/pkg/connector/team.go b/pkg/connector/team.go index f220f884..b1a5d6f1 100644 --- a/pkg/connector/team.go +++ b/pkg/connector/team.go @@ -25,6 +25,10 @@ import ( const ( teamRoleMember = "member" teamRoleMaintainer = "maintainer" + + // Team privacy levels + teamPrivacySecret = "secret" + teamPrivacyClosed = "closed" ) var teamAccessLevels = []string{ @@ -627,21 +631,19 @@ func (o *teamResourceType) handleCreateTeamAction(ctx context.Context, args *str if privacy, ok := actions.GetStringArg(args, "privacy"); ok && privacy != "" { if isNestedTeam { // Nested teams can only be "closed" - if privacy == "secret" { + if privacy == teamPrivacySecret { l.Warn("github-connector: secret privacy not allowed for nested teams, using closed", zap.String("requested_privacy", privacy), ) } - newTeam.Privacy = github.Ptr("closed") - } else { + newTeam.Privacy = github.Ptr(teamPrivacyClosed) + } else if privacy == teamPrivacySecret || privacy == teamPrivacyClosed { // Non-nested teams can be "secret" or "closed" - if privacy == "secret" || privacy == "closed" { - newTeam.Privacy = github.Ptr(privacy) - } + newTeam.Privacy = github.Ptr(privacy) } } else if isNestedTeam { // Default for nested teams is "closed" - newTeam.Privacy = github.Ptr("closed") + newTeam.Privacy = github.Ptr(teamPrivacyClosed) } // Note: Default for non-nested teams is "secret" (handled by GitHub API) @@ -752,7 +754,7 @@ func (o *teamResourceType) handleDeleteTeamAction(ctx context.Context, args *str } // Get the team to find its slug - team, resp, err := o.client.Teams.GetTeamByID(ctx, orgID, teamID) + team, resp, err := o.client.Teams.GetTeamByID(ctx, orgID, teamID) //nolint:staticcheck // TODO: migrate to GetTeamBySlug if err != nil { return nil, nil, wrapGitHubError(err, resp, fmt.Sprintf("failed to get team %d", teamID)) } @@ -819,7 +821,7 @@ func (o *teamResourceType) handleUpdateTeamAction(ctx context.Context, args *str } // Get the team to find its slug - team, resp, err := o.client.Teams.GetTeamByID(ctx, orgID, teamID) + team, resp, err := o.client.Teams.GetTeamByID(ctx, orgID, teamID) //nolint:staticcheck // TODO: migrate to GetTeamBySlug if err != nil { return nil, nil, wrapGitHubError(err, resp, fmt.Sprintf("failed to get team %d", teamID)) } @@ -851,7 +853,7 @@ func (o *teamResourceType) handleUpdateTeamAction(ctx context.Context, args *str } if privacy, ok := actions.GetStringArg(args, "privacy"); ok && privacy != "" { - if privacy == "secret" || privacy == "closed" { + if privacy == teamPrivacySecret || privacy == teamPrivacyClosed { updateTeam.Privacy = github.Ptr(privacy) hasUpdates = true } else { From 47760150593f6b7e420a4f69649e4cfaa4ca1ec0 Mon Sep 17 00:00:00 2001 From: vipulgowda Date: Fri, 30 Jan 2026 10:10:33 +0530 Subject: [PATCH 10/19] upgrade baton-sdk to 0.7.10 --- go.mod | 2 +- go.sum | 4 +- .../baton-sdk/pb/c1/config/v1/config.pb.go | 711 +++++++++++++++-- .../pb/c1/config/v1/config.pb.validate.go | 725 ++++++++++++++++++ .../pb/c1/config/v1/config_protoopaque.pb.go | 713 +++++++++++++++-- .../baton-sdk/pkg/actions/args.go | 228 ++++++ .../conductorone/baton-sdk/pkg/cli/cli.go | 8 +- .../baton-sdk/pkg/cli/commands.go | 16 +- .../baton-sdk/pkg/cli/healthcheck_command.go | 81 ++ .../baton-sdk/pkg/cli/lambda_server__added.go | 1 + .../baton-sdk/pkg/config/config.go | 27 +- .../connectorbuilder/resource_provisioner.go | 2 +- .../baton-sdk/pkg/connectorrunner/runner.go | 46 +- .../pkg/field/default_relationships.go | 4 + .../baton-sdk/pkg/field/defaults.go | 23 + .../baton-sdk/pkg/healthcheck/server.go | 214 ++++++ .../conductorone/baton-sdk/pkg/sdk/version.go | 2 +- .../conductorone/baton-sdk/pkg/sync/syncer.go | 11 + .../baton-sdk/pkg/tasks/c1api/actions.go | 4 +- .../types/resource/security_insight_trait.go | 33 + .../baton-sdk/pkg/uhttp/transport.go | 51 +- vendor/modules.txt | 3 +- 22 files changed, 2726 insertions(+), 183 deletions(-) create mode 100644 vendor/github.com/conductorone/baton-sdk/pkg/cli/healthcheck_command.go create mode 100644 vendor/github.com/conductorone/baton-sdk/pkg/healthcheck/server.go diff --git a/go.mod b/go.mod index e8784351..60c37994 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/conductorone/baton-github go 1.25.2 require ( - github.com/conductorone/baton-sdk v0.7.4 + github.com/conductorone/baton-sdk v0.7.10 github.com/deckarep/golang-set/v2 v2.8.0 github.com/ennyjfrick/ruleguard-logfatal v0.0.2 github.com/golang-jwt/jwt/v5 v5.2.2 diff --git a/go.sum b/go.sum index 45ac2db2..45850e97 100644 --- a/go.sum +++ b/go.sum @@ -60,8 +60,8 @@ github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyY github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/conductorone/baton-sdk v0.7.4 h1:JD79NYgIficX00ucugU/5//r2rpGPNqAsHlZsgE0GCM= -github.com/conductorone/baton-sdk v0.7.4/go.mod h1:9S5feBOuIJxlNdGmkv3ObkCNHbVyOHr6foNrIrk+d4Y= +github.com/conductorone/baton-sdk v0.7.10 h1:eK/RTU8CZyTosYSNYmDzfAahGnxqpOq6rheBcwTx7w0= +github.com/conductorone/baton-sdk v0.7.10/go.mod h1:9S5feBOuIJxlNdGmkv3ObkCNHbVyOHr6foNrIrk+d4Y= github.com/conductorone/dpop v0.2.3 h1:s91U3845GHQ6P6FWrdNr2SEOy1ES/jcFs1JtKSl2S+o= github.com/conductorone/dpop v0.2.3/go.mod h1:gyo8TtzB9SCFCsjsICH4IaLZ7y64CcrDXMOPBwfq/3s= github.com/conductorone/dpop/integrations/dpop_grpc v0.2.3 h1:kLMCNIh0Mo2vbvvkCmJ3ixsPbXEJ6HPcW53Ku9yje3s= diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/config/v1/config.pb.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/config/v1/config.pb.go index 06945eed..d563d18f 100644 --- a/vendor/github.com/conductorone/baton-sdk/pb/c1/config/v1/config.pb.go +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/config/v1/config.pb.go @@ -566,6 +566,8 @@ type Field struct { // *Field_ResourceIdSliceField // *Field_ResourceField // *Field_ResourceSliceField + // *Field_EntitlementSliceField + // *Field_GrantSliceField Field isField_Field `protobuf_oneof:"field"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache @@ -733,6 +735,24 @@ func (x *Field) GetResourceSliceField() *ResourceSliceField { return nil } +func (x *Field) GetEntitlementSliceField() *EntitlementSliceField { + if x != nil { + if x, ok := x.Field.(*Field_EntitlementSliceField); ok { + return x.EntitlementSliceField + } + } + return nil +} + +func (x *Field) GetGrantSliceField() *GrantSliceField { + if x != nil { + if x, ok := x.Field.(*Field_GrantSliceField); ok { + return x.GrantSliceField + } + } + return nil +} + func (x *Field) SetName(v string) { x.Name = v } @@ -833,6 +853,22 @@ func (x *Field) SetResourceSliceField(v *ResourceSliceField) { x.Field = &Field_ResourceSliceField{v} } +func (x *Field) SetEntitlementSliceField(v *EntitlementSliceField) { + if v == nil { + x.Field = nil + return + } + x.Field = &Field_EntitlementSliceField{v} +} + +func (x *Field) SetGrantSliceField(v *GrantSliceField) { + if v == nil { + x.Field = nil + return + } + x.Field = &Field_GrantSliceField{v} +} + func (x *Field) HasField() bool { if x == nil { return false @@ -912,6 +948,22 @@ func (x *Field) HasResourceSliceField() bool { return ok } +func (x *Field) HasEntitlementSliceField() bool { + if x == nil { + return false + } + _, ok := x.Field.(*Field_EntitlementSliceField) + return ok +} + +func (x *Field) HasGrantSliceField() bool { + if x == nil { + return false + } + _, ok := x.Field.(*Field_GrantSliceField) + return ok +} + func (x *Field) ClearField() { x.Field = nil } @@ -970,6 +1022,18 @@ func (x *Field) ClearResourceSliceField() { } } +func (x *Field) ClearEntitlementSliceField() { + if _, ok := x.Field.(*Field_EntitlementSliceField); ok { + x.Field = nil + } +} + +func (x *Field) ClearGrantSliceField() { + if _, ok := x.Field.(*Field_GrantSliceField); ok { + x.Field = nil + } +} + const Field_Field_not_set_case case_Field_Field = 0 const Field_StringField_case case_Field_Field = 100 const Field_IntField_case case_Field_Field = 101 @@ -980,6 +1044,8 @@ const Field_ResourceIdField_case case_Field_Field = 105 const Field_ResourceIdSliceField_case case_Field_Field = 106 const Field_ResourceField_case case_Field_Field = 107 const Field_ResourceSliceField_case case_Field_Field = 108 +const Field_EntitlementSliceField_case case_Field_Field = 109 +const Field_GrantSliceField_case case_Field_Field = 110 func (x *Field) WhichField() case_Field_Field { if x == nil { @@ -1004,6 +1070,10 @@ func (x *Field) WhichField() case_Field_Field { return Field_ResourceField_case case *Field_ResourceSliceField: return Field_ResourceSliceField_case + case *Field_EntitlementSliceField: + return Field_EntitlementSliceField_case + case *Field_GrantSliceField: + return Field_GrantSliceField_case default: return Field_Field_not_set_case } @@ -1028,8 +1098,10 @@ type Field_builder struct { ResourceIdField *ResourceIdField ResourceIdSliceField *ResourceIdSliceField // These are meant to serve as return types for actions. - ResourceField *ResourceField - ResourceSliceField *ResourceSliceField + ResourceField *ResourceField + ResourceSliceField *ResourceSliceField + EntitlementSliceField *EntitlementSliceField + GrantSliceField *GrantSliceField // -- end of Field } @@ -1071,6 +1143,12 @@ func (b0 Field_builder) Build() *Field { if b.ResourceSliceField != nil { x.Field = &Field_ResourceSliceField{b.ResourceSliceField} } + if b.EntitlementSliceField != nil { + x.Field = &Field_EntitlementSliceField{b.EntitlementSliceField} + } + if b.GrantSliceField != nil { + x.Field = &Field_GrantSliceField{b.GrantSliceField} + } return m0 } @@ -1125,6 +1203,14 @@ type Field_ResourceSliceField struct { ResourceSliceField *ResourceSliceField `protobuf:"bytes,108,opt,name=resource_slice_field,json=resourceSliceField,proto3,oneof"` } +type Field_EntitlementSliceField struct { + EntitlementSliceField *EntitlementSliceField `protobuf:"bytes,109,opt,name=entitlement_slice_field,json=entitlementSliceField,proto3,oneof"` +} + +type Field_GrantSliceField struct { + GrantSliceField *GrantSliceField `protobuf:"bytes,110,opt,name=grant_slice_field,json=grantSliceField,proto3,oneof"` +} + func (*Field_StringField) isField_Field() {} func (*Field_IntField) isField_Field() {} @@ -1143,6 +1229,10 @@ func (*Field_ResourceField) isField_Field() {} func (*Field_ResourceSliceField) isField_Field() {} +func (*Field_EntitlementSliceField) isField_Field() {} + +func (*Field_GrantSliceField) isField_Field() {} + // These are partially duplicate with the Resource proto in the connector package. // This is to avoid import cycles type Resource struct { @@ -1476,6 +1566,442 @@ func (b0 ResourceSliceField_builder) Build() *ResourceSliceField { return m0 } +// Simplified Entitlement for config return types (avoids import cycles with connector package) +type Entitlement struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` + Slug string `protobuf:"bytes,4,opt,name=slug,proto3" json:"slug,omitempty"` + Purpose string `protobuf:"bytes,5,opt,name=purpose,proto3" json:"purpose,omitempty"` // "PURPOSE_VALUE_ASSIGNMENT", "PURPOSE_VALUE_PERMISSION", or "PURPOSE_VALUE_OWNERSHIP" + GrantableToResourceTypeIds []string `protobuf:"bytes,6,rep,name=grantable_to_resource_type_ids,json=grantableToResourceTypeIds,proto3" json:"grantable_to_resource_type_ids,omitempty"` + ResourceId string `protobuf:"bytes,7,opt,name=resource_id,json=resourceId,proto3" json:"resource_id,omitempty"` + ResourceTypeId string `protobuf:"bytes,8,opt,name=resource_type_id,json=resourceTypeId,proto3" json:"resource_type_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Entitlement) Reset() { + *x = Entitlement{} + mi := &file_c1_config_v1_config_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Entitlement) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Entitlement) ProtoMessage() {} + +func (x *Entitlement) ProtoReflect() protoreflect.Message { + mi := &file_c1_config_v1_config_proto_msgTypes[8] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *Entitlement) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +func (x *Entitlement) GetDisplayName() string { + if x != nil { + return x.DisplayName + } + return "" +} + +func (x *Entitlement) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *Entitlement) GetSlug() string { + if x != nil { + return x.Slug + } + return "" +} + +func (x *Entitlement) GetPurpose() string { + if x != nil { + return x.Purpose + } + return "" +} + +func (x *Entitlement) GetGrantableToResourceTypeIds() []string { + if x != nil { + return x.GrantableToResourceTypeIds + } + return nil +} + +func (x *Entitlement) GetResourceId() string { + if x != nil { + return x.ResourceId + } + return "" +} + +func (x *Entitlement) GetResourceTypeId() string { + if x != nil { + return x.ResourceTypeId + } + return "" +} + +func (x *Entitlement) SetId(v string) { + x.Id = v +} + +func (x *Entitlement) SetDisplayName(v string) { + x.DisplayName = v +} + +func (x *Entitlement) SetDescription(v string) { + x.Description = v +} + +func (x *Entitlement) SetSlug(v string) { + x.Slug = v +} + +func (x *Entitlement) SetPurpose(v string) { + x.Purpose = v +} + +func (x *Entitlement) SetGrantableToResourceTypeIds(v []string) { + x.GrantableToResourceTypeIds = v +} + +func (x *Entitlement) SetResourceId(v string) { + x.ResourceId = v +} + +func (x *Entitlement) SetResourceTypeId(v string) { + x.ResourceTypeId = v +} + +type Entitlement_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Id string + DisplayName string + Description string + Slug string + Purpose string + GrantableToResourceTypeIds []string + ResourceId string + ResourceTypeId string +} + +func (b0 Entitlement_builder) Build() *Entitlement { + m0 := &Entitlement{} + b, x := &b0, m0 + _, _ = b, x + x.Id = b.Id + x.DisplayName = b.DisplayName + x.Description = b.Description + x.Slug = b.Slug + x.Purpose = b.Purpose + x.GrantableToResourceTypeIds = b.GrantableToResourceTypeIds + x.ResourceId = b.ResourceId + x.ResourceTypeId = b.ResourceTypeId + return m0 +} + +type EntitlementSliceField struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + DefaultValue []*Entitlement `protobuf:"bytes,1,rep,name=default_value,json=defaultValue,proto3" json:"default_value,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *EntitlementSliceField) Reset() { + *x = EntitlementSliceField{} + mi := &file_c1_config_v1_config_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *EntitlementSliceField) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EntitlementSliceField) ProtoMessage() {} + +func (x *EntitlementSliceField) ProtoReflect() protoreflect.Message { + mi := &file_c1_config_v1_config_proto_msgTypes[9] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *EntitlementSliceField) GetDefaultValue() []*Entitlement { + if x != nil { + return x.DefaultValue + } + return nil +} + +func (x *EntitlementSliceField) SetDefaultValue(v []*Entitlement) { + x.DefaultValue = v +} + +type EntitlementSliceField_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + DefaultValue []*Entitlement +} + +func (b0 EntitlementSliceField_builder) Build() *EntitlementSliceField { + m0 := &EntitlementSliceField{} + b, x := &b0, m0 + _, _ = b, x + x.DefaultValue = b.DefaultValue + return m0 +} + +// Reference to an entitlement (used in Grant) +type EntitlementRef struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *EntitlementRef) Reset() { + *x = EntitlementRef{} + mi := &file_c1_config_v1_config_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *EntitlementRef) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EntitlementRef) ProtoMessage() {} + +func (x *EntitlementRef) ProtoReflect() protoreflect.Message { + mi := &file_c1_config_v1_config_proto_msgTypes[10] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *EntitlementRef) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +func (x *EntitlementRef) SetId(v string) { + x.Id = v +} + +type EntitlementRef_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Id string +} + +func (b0 EntitlementRef_builder) Build() *EntitlementRef { + m0 := &EntitlementRef{} + b, x := &b0, m0 + _, _ = b, x + x.Id = b.Id + return m0 +} + +// Simplified Grant for config return types (avoids import cycles with connector package) +type Grant struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Entitlement *EntitlementRef `protobuf:"bytes,2,opt,name=entitlement,proto3" json:"entitlement,omitempty"` + Principal *Resource `protobuf:"bytes,3,opt,name=principal,proto3" json:"principal,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Grant) Reset() { + *x = Grant{} + mi := &file_c1_config_v1_config_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Grant) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Grant) ProtoMessage() {} + +func (x *Grant) ProtoReflect() protoreflect.Message { + mi := &file_c1_config_v1_config_proto_msgTypes[11] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *Grant) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +func (x *Grant) GetEntitlement() *EntitlementRef { + if x != nil { + return x.Entitlement + } + return nil +} + +func (x *Grant) GetPrincipal() *Resource { + if x != nil { + return x.Principal + } + return nil +} + +func (x *Grant) SetId(v string) { + x.Id = v +} + +func (x *Grant) SetEntitlement(v *EntitlementRef) { + x.Entitlement = v +} + +func (x *Grant) SetPrincipal(v *Resource) { + x.Principal = v +} + +func (x *Grant) HasEntitlement() bool { + if x == nil { + return false + } + return x.Entitlement != nil +} + +func (x *Grant) HasPrincipal() bool { + if x == nil { + return false + } + return x.Principal != nil +} + +func (x *Grant) ClearEntitlement() { + x.Entitlement = nil +} + +func (x *Grant) ClearPrincipal() { + x.Principal = nil +} + +type Grant_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Id string + Entitlement *EntitlementRef + Principal *Resource +} + +func (b0 Grant_builder) Build() *Grant { + m0 := &Grant{} + b, x := &b0, m0 + _, _ = b, x + x.Id = b.Id + x.Entitlement = b.Entitlement + x.Principal = b.Principal + return m0 +} + +type GrantSliceField struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + DefaultValue []*Grant `protobuf:"bytes,1,rep,name=default_value,json=defaultValue,proto3" json:"default_value,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GrantSliceField) Reset() { + *x = GrantSliceField{} + mi := &file_c1_config_v1_config_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GrantSliceField) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GrantSliceField) ProtoMessage() {} + +func (x *GrantSliceField) ProtoReflect() protoreflect.Message { + mi := &file_c1_config_v1_config_proto_msgTypes[12] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *GrantSliceField) GetDefaultValue() []*Grant { + if x != nil { + return x.DefaultValue + } + return nil +} + +func (x *GrantSliceField) SetDefaultValue(v []*Grant) { + x.DefaultValue = v +} + +type GrantSliceField_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + DefaultValue []*Grant +} + +func (b0 GrantSliceField_builder) Build() *GrantSliceField { + m0 := &GrantSliceField{} + b, x := &b0, m0 + _, _ = b, x + x.DefaultValue = b.DefaultValue + return m0 +} + type ResourceIdField struct { state protoimpl.MessageState `protogen:"hybrid.v1"` DefaultValue *ResourceId `protobuf:"bytes,1,opt,name=default_value,json=defaultValue,proto3" json:"default_value,omitempty"` @@ -1486,7 +2012,7 @@ type ResourceIdField struct { func (x *ResourceIdField) Reset() { *x = ResourceIdField{} - mi := &file_c1_config_v1_config_proto_msgTypes[8] + mi := &file_c1_config_v1_config_proto_msgTypes[13] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1498,7 +2024,7 @@ func (x *ResourceIdField) String() string { func (*ResourceIdField) ProtoMessage() {} func (x *ResourceIdField) ProtoReflect() protoreflect.Message { - mi := &file_c1_config_v1_config_proto_msgTypes[8] + mi := &file_c1_config_v1_config_proto_msgTypes[13] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1579,7 +2105,7 @@ type ResourceIdSliceField struct { func (x *ResourceIdSliceField) Reset() { *x = ResourceIdSliceField{} - mi := &file_c1_config_v1_config_proto_msgTypes[9] + mi := &file_c1_config_v1_config_proto_msgTypes[14] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1591,7 +2117,7 @@ func (x *ResourceIdSliceField) String() string { func (*ResourceIdSliceField) ProtoMessage() {} func (x *ResourceIdSliceField) ProtoReflect() protoreflect.Message { - mi := &file_c1_config_v1_config_proto_msgTypes[9] + mi := &file_c1_config_v1_config_proto_msgTypes[14] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1662,7 +2188,7 @@ type IntField struct { func (x *IntField) Reset() { *x = IntField{} - mi := &file_c1_config_v1_config_proto_msgTypes[10] + mi := &file_c1_config_v1_config_proto_msgTypes[15] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1674,7 +2200,7 @@ func (x *IntField) String() string { func (*IntField) ProtoMessage() {} func (x *IntField) ProtoReflect() protoreflect.Message { - mi := &file_c1_config_v1_config_proto_msgTypes[10] + mi := &file_c1_config_v1_config_proto_msgTypes[15] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1745,7 +2271,7 @@ type BoolField struct { func (x *BoolField) Reset() { *x = BoolField{} - mi := &file_c1_config_v1_config_proto_msgTypes[11] + mi := &file_c1_config_v1_config_proto_msgTypes[16] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1757,7 +2283,7 @@ func (x *BoolField) String() string { func (*BoolField) ProtoMessage() {} func (x *BoolField) ProtoReflect() protoreflect.Message { - mi := &file_c1_config_v1_config_proto_msgTypes[11] + mi := &file_c1_config_v1_config_proto_msgTypes[16] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1827,7 +2353,7 @@ type StringSliceField struct { func (x *StringSliceField) Reset() { *x = StringSliceField{} - mi := &file_c1_config_v1_config_proto_msgTypes[12] + mi := &file_c1_config_v1_config_proto_msgTypes[17] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1839,7 +2365,7 @@ func (x *StringSliceField) String() string { func (*StringSliceField) ProtoMessage() {} func (x *StringSliceField) ProtoReflect() protoreflect.Message { - mi := &file_c1_config_v1_config_proto_msgTypes[12] + mi := &file_c1_config_v1_config_proto_msgTypes[17] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1909,7 +2435,7 @@ type StringMapField struct { func (x *StringMapField) Reset() { *x = StringMapField{} - mi := &file_c1_config_v1_config_proto_msgTypes[13] + mi := &file_c1_config_v1_config_proto_msgTypes[18] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1921,7 +2447,7 @@ func (x *StringMapField) String() string { func (*StringMapField) ProtoMessage() {} func (x *StringMapField) ProtoReflect() protoreflect.Message { - mi := &file_c1_config_v1_config_proto_msgTypes[13] + mi := &file_c1_config_v1_config_proto_msgTypes[18] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1992,7 +2518,7 @@ type StringFieldOption struct { func (x *StringFieldOption) Reset() { *x = StringFieldOption{} - mi := &file_c1_config_v1_config_proto_msgTypes[14] + mi := &file_c1_config_v1_config_proto_msgTypes[19] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2004,7 +2530,7 @@ func (x *StringFieldOption) String() string { func (*StringFieldOption) ProtoMessage() {} func (x *StringFieldOption) ProtoReflect() protoreflect.Message { - mi := &file_c1_config_v1_config_proto_msgTypes[14] + mi := &file_c1_config_v1_config_proto_msgTypes[19] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2080,7 +2606,7 @@ type StringField struct { func (x *StringField) Reset() { *x = StringField{} - mi := &file_c1_config_v1_config_proto_msgTypes[15] + mi := &file_c1_config_v1_config_proto_msgTypes[20] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2092,7 +2618,7 @@ func (x *StringField) String() string { func (*StringField) ProtoMessage() {} func (x *StringField) ProtoReflect() protoreflect.Message { - mi := &file_c1_config_v1_config_proto_msgTypes[15] + mi := &file_c1_config_v1_config_proto_msgTypes[20] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2225,7 +2751,7 @@ const file_c1_config_v1_config_proto_rawDesc = "" + "\fdisplay_name\x18\x02 \x01(\tR\vdisplayName\x12\x1b\n" + "\thelp_text\x18\x03 \x01(\tR\bhelpText\x12\x16\n" + "\x06fields\x18\x04 \x03(\tR\x06fields\x12\x18\n" + - "\adefault\x18\x05 \x01(\bR\adefault\"\xf1\x06\n" + + "\adefault\x18\x05 \x01(\bR\adefault\"\x9d\b\n" + "\x05Field\x12\x12\n" + "\x04name\x18\x01 \x01(\tR\x04name\x12!\n" + "\fdisplay_name\x18\x02 \x01(\tR\vdisplayName\x12 \n" + @@ -2244,7 +2770,9 @@ const file_c1_config_v1_config_proto_rawDesc = "" + "\x11resource_id_field\x18i \x01(\v2\x1d.c1.config.v1.ResourceIdFieldH\x00R\x0fresourceIdField\x12[\n" + "\x17resource_id_slice_field\x18j \x01(\v2\".c1.config.v1.ResourceIdSliceFieldH\x00R\x14resourceIdSliceField\x12D\n" + "\x0eresource_field\x18k \x01(\v2\x1b.c1.config.v1.ResourceFieldH\x00R\rresourceField\x12T\n" + - "\x14resource_slice_field\x18l \x01(\v2 .c1.config.v1.ResourceSliceFieldH\x00R\x12resourceSliceFieldB\a\n" + + "\x14resource_slice_field\x18l \x01(\v2 .c1.config.v1.ResourceSliceFieldH\x00R\x12resourceSliceField\x12]\n" + + "\x17entitlement_slice_field\x18m \x01(\v2#.c1.config.v1.EntitlementSliceFieldH\x00R\x15entitlementSliceField\x12K\n" + + "\x11grant_slice_field\x18n \x01(\v2\x1d.c1.config.v1.GrantSliceFieldH\x00R\x0fgrantSliceFieldB\a\n" + "\x05field\"\x8a\x02\n" + "\bResource\x129\n" + "\vresource_id\x18\x01 \x01(\v2\x18.c1.config.v1.ResourceIdR\n" + @@ -2261,7 +2789,27 @@ const file_c1_config_v1_config_proto_rawDesc = "" + "\rResourceField\x12;\n" + "\rdefault_value\x18\x01 \x01(\v2\x16.c1.config.v1.ResourceR\fdefaultValue\"Q\n" + "\x12ResourceSliceField\x12;\n" + - "\rdefault_value\x18\x01 \x03(\v2\x16.c1.config.v1.ResourceR\fdefaultValue\"\x94\x01\n" + + "\rdefault_value\x18\x01 \x03(\v2\x16.c1.config.v1.ResourceR\fdefaultValue\"\x9f\x02\n" + + "\vEntitlement\x12\x0e\n" + + "\x02id\x18\x01 \x01(\tR\x02id\x12!\n" + + "\fdisplay_name\x18\x02 \x01(\tR\vdisplayName\x12 \n" + + "\vdescription\x18\x03 \x01(\tR\vdescription\x12\x12\n" + + "\x04slug\x18\x04 \x01(\tR\x04slug\x12\x18\n" + + "\apurpose\x18\x05 \x01(\tR\apurpose\x12B\n" + + "\x1egrantable_to_resource_type_ids\x18\x06 \x03(\tR\x1agrantableToResourceTypeIds\x12\x1f\n" + + "\vresource_id\x18\a \x01(\tR\n" + + "resourceId\x12(\n" + + "\x10resource_type_id\x18\b \x01(\tR\x0eresourceTypeId\"W\n" + + "\x15EntitlementSliceField\x12>\n" + + "\rdefault_value\x18\x01 \x03(\v2\x19.c1.config.v1.EntitlementR\fdefaultValue\" \n" + + "\x0eEntitlementRef\x12\x0e\n" + + "\x02id\x18\x01 \x01(\tR\x02id\"\x8d\x01\n" + + "\x05Grant\x12\x0e\n" + + "\x02id\x18\x01 \x01(\tR\x02id\x12>\n" + + "\ventitlement\x18\x02 \x01(\v2\x1c.c1.config.v1.EntitlementRefR\ventitlement\x124\n" + + "\tprincipal\x18\x03 \x01(\v2\x16.c1.config.v1.ResourceR\tprincipal\"K\n" + + "\x0fGrantSliceField\x128\n" + + "\rdefault_value\x18\x01 \x03(\v2\x13.c1.config.v1.GrantR\fdefaultValue\"\x94\x01\n" + "\x0fResourceIdField\x12=\n" + "\rdefault_value\x18\x01 \x01(\v2\x18.c1.config.v1.ResourceIdR\fdefaultValue\x128\n" + "\x05rules\x18\x03 \x01(\v2\x1d.c1.config.v1.ResourceIDRulesH\x00R\x05rules\x88\x01\x01B\b\n" + @@ -2314,7 +2862,7 @@ const file_c1_config_v1_config_proto_rawDesc = "" + "\x1dSTRING_FIELD_TYPE_FILE_UPLOAD\x10\x04B3Z1github.com/conductorone/baton-sdk/pb/c1/config/v1b\x06proto3" var file_c1_config_v1_config_proto_enumTypes = make([]protoimpl.EnumInfo, 2) -var file_c1_config_v1_config_proto_msgTypes = make([]protoimpl.MessageInfo, 17) +var file_c1_config_v1_config_proto_msgTypes = make([]protoimpl.MessageInfo, 22) var file_c1_config_v1_config_proto_goTypes = []any{ (ConstraintKind)(0), // 0: c1.config.v1.ConstraintKind (StringFieldType)(0), // 1: c1.config.v1.StringFieldType @@ -2326,61 +2874,72 @@ var file_c1_config_v1_config_proto_goTypes = []any{ (*ResourceId)(nil), // 7: c1.config.v1.ResourceId (*ResourceField)(nil), // 8: c1.config.v1.ResourceField (*ResourceSliceField)(nil), // 9: c1.config.v1.ResourceSliceField - (*ResourceIdField)(nil), // 10: c1.config.v1.ResourceIdField - (*ResourceIdSliceField)(nil), // 11: c1.config.v1.ResourceIdSliceField - (*IntField)(nil), // 12: c1.config.v1.IntField - (*BoolField)(nil), // 13: c1.config.v1.BoolField - (*StringSliceField)(nil), // 14: c1.config.v1.StringSliceField - (*StringMapField)(nil), // 15: c1.config.v1.StringMapField - (*StringFieldOption)(nil), // 16: c1.config.v1.StringFieldOption - (*StringField)(nil), // 17: c1.config.v1.StringField - nil, // 18: c1.config.v1.StringMapField.DefaultValueEntry - (*anypb.Any)(nil), // 19: google.protobuf.Any - (*ResourceIDRules)(nil), // 20: c1.config.v1.ResourceIDRules - (*RepeatedResourceIdRules)(nil), // 21: c1.config.v1.RepeatedResourceIdRules - (*Int64Rules)(nil), // 22: c1.config.v1.Int64Rules - (*BoolRules)(nil), // 23: c1.config.v1.BoolRules - (*RepeatedStringRules)(nil), // 24: c1.config.v1.RepeatedStringRules - (*StringMapRules)(nil), // 25: c1.config.v1.StringMapRules - (*StringRules)(nil), // 26: c1.config.v1.StringRules + (*Entitlement)(nil), // 10: c1.config.v1.Entitlement + (*EntitlementSliceField)(nil), // 11: c1.config.v1.EntitlementSliceField + (*EntitlementRef)(nil), // 12: c1.config.v1.EntitlementRef + (*Grant)(nil), // 13: c1.config.v1.Grant + (*GrantSliceField)(nil), // 14: c1.config.v1.GrantSliceField + (*ResourceIdField)(nil), // 15: c1.config.v1.ResourceIdField + (*ResourceIdSliceField)(nil), // 16: c1.config.v1.ResourceIdSliceField + (*IntField)(nil), // 17: c1.config.v1.IntField + (*BoolField)(nil), // 18: c1.config.v1.BoolField + (*StringSliceField)(nil), // 19: c1.config.v1.StringSliceField + (*StringMapField)(nil), // 20: c1.config.v1.StringMapField + (*StringFieldOption)(nil), // 21: c1.config.v1.StringFieldOption + (*StringField)(nil), // 22: c1.config.v1.StringField + nil, // 23: c1.config.v1.StringMapField.DefaultValueEntry + (*anypb.Any)(nil), // 24: google.protobuf.Any + (*ResourceIDRules)(nil), // 25: c1.config.v1.ResourceIDRules + (*RepeatedResourceIdRules)(nil), // 26: c1.config.v1.RepeatedResourceIdRules + (*Int64Rules)(nil), // 27: c1.config.v1.Int64Rules + (*BoolRules)(nil), // 28: c1.config.v1.BoolRules + (*RepeatedStringRules)(nil), // 29: c1.config.v1.RepeatedStringRules + (*StringMapRules)(nil), // 30: c1.config.v1.StringMapRules + (*StringRules)(nil), // 31: c1.config.v1.StringRules } var file_c1_config_v1_config_proto_depIdxs = []int32{ 5, // 0: c1.config.v1.Configuration.fields:type_name -> c1.config.v1.Field 3, // 1: c1.config.v1.Configuration.constraints:type_name -> c1.config.v1.Constraint 4, // 2: c1.config.v1.Configuration.field_groups:type_name -> c1.config.v1.FieldGroup 0, // 3: c1.config.v1.Constraint.kind:type_name -> c1.config.v1.ConstraintKind - 17, // 4: c1.config.v1.Field.string_field:type_name -> c1.config.v1.StringField - 12, // 5: c1.config.v1.Field.int_field:type_name -> c1.config.v1.IntField - 13, // 6: c1.config.v1.Field.bool_field:type_name -> c1.config.v1.BoolField - 14, // 7: c1.config.v1.Field.string_slice_field:type_name -> c1.config.v1.StringSliceField - 15, // 8: c1.config.v1.Field.string_map_field:type_name -> c1.config.v1.StringMapField - 10, // 9: c1.config.v1.Field.resource_id_field:type_name -> c1.config.v1.ResourceIdField - 11, // 10: c1.config.v1.Field.resource_id_slice_field:type_name -> c1.config.v1.ResourceIdSliceField + 22, // 4: c1.config.v1.Field.string_field:type_name -> c1.config.v1.StringField + 17, // 5: c1.config.v1.Field.int_field:type_name -> c1.config.v1.IntField + 18, // 6: c1.config.v1.Field.bool_field:type_name -> c1.config.v1.BoolField + 19, // 7: c1.config.v1.Field.string_slice_field:type_name -> c1.config.v1.StringSliceField + 20, // 8: c1.config.v1.Field.string_map_field:type_name -> c1.config.v1.StringMapField + 15, // 9: c1.config.v1.Field.resource_id_field:type_name -> c1.config.v1.ResourceIdField + 16, // 10: c1.config.v1.Field.resource_id_slice_field:type_name -> c1.config.v1.ResourceIdSliceField 8, // 11: c1.config.v1.Field.resource_field:type_name -> c1.config.v1.ResourceField 9, // 12: c1.config.v1.Field.resource_slice_field:type_name -> c1.config.v1.ResourceSliceField - 7, // 13: c1.config.v1.Resource.resource_id:type_name -> c1.config.v1.ResourceId - 7, // 14: c1.config.v1.Resource.parent_resource_id:type_name -> c1.config.v1.ResourceId - 19, // 15: c1.config.v1.Resource.annotations:type_name -> google.protobuf.Any - 6, // 16: c1.config.v1.ResourceField.default_value:type_name -> c1.config.v1.Resource - 6, // 17: c1.config.v1.ResourceSliceField.default_value:type_name -> c1.config.v1.Resource - 7, // 18: c1.config.v1.ResourceIdField.default_value:type_name -> c1.config.v1.ResourceId - 20, // 19: c1.config.v1.ResourceIdField.rules:type_name -> c1.config.v1.ResourceIDRules - 10, // 20: c1.config.v1.ResourceIdSliceField.default_value:type_name -> c1.config.v1.ResourceIdField - 21, // 21: c1.config.v1.ResourceIdSliceField.rules:type_name -> c1.config.v1.RepeatedResourceIdRules - 22, // 22: c1.config.v1.IntField.rules:type_name -> c1.config.v1.Int64Rules - 23, // 23: c1.config.v1.BoolField.rules:type_name -> c1.config.v1.BoolRules - 24, // 24: c1.config.v1.StringSliceField.rules:type_name -> c1.config.v1.RepeatedStringRules - 18, // 25: c1.config.v1.StringMapField.default_value:type_name -> c1.config.v1.StringMapField.DefaultValueEntry - 25, // 26: c1.config.v1.StringMapField.rules:type_name -> c1.config.v1.StringMapRules - 26, // 27: c1.config.v1.StringField.rules:type_name -> c1.config.v1.StringRules - 1, // 28: c1.config.v1.StringField.type:type_name -> c1.config.v1.StringFieldType - 16, // 29: c1.config.v1.StringField.options:type_name -> c1.config.v1.StringFieldOption - 19, // 30: c1.config.v1.StringMapField.DefaultValueEntry.value:type_name -> google.protobuf.Any - 31, // [31:31] is the sub-list for method output_type - 31, // [31:31] is the sub-list for method input_type - 31, // [31:31] is the sub-list for extension type_name - 31, // [31:31] is the sub-list for extension extendee - 0, // [0:31] is the sub-list for field type_name + 11, // 13: c1.config.v1.Field.entitlement_slice_field:type_name -> c1.config.v1.EntitlementSliceField + 14, // 14: c1.config.v1.Field.grant_slice_field:type_name -> c1.config.v1.GrantSliceField + 7, // 15: c1.config.v1.Resource.resource_id:type_name -> c1.config.v1.ResourceId + 7, // 16: c1.config.v1.Resource.parent_resource_id:type_name -> c1.config.v1.ResourceId + 24, // 17: c1.config.v1.Resource.annotations:type_name -> google.protobuf.Any + 6, // 18: c1.config.v1.ResourceField.default_value:type_name -> c1.config.v1.Resource + 6, // 19: c1.config.v1.ResourceSliceField.default_value:type_name -> c1.config.v1.Resource + 10, // 20: c1.config.v1.EntitlementSliceField.default_value:type_name -> c1.config.v1.Entitlement + 12, // 21: c1.config.v1.Grant.entitlement:type_name -> c1.config.v1.EntitlementRef + 6, // 22: c1.config.v1.Grant.principal:type_name -> c1.config.v1.Resource + 13, // 23: c1.config.v1.GrantSliceField.default_value:type_name -> c1.config.v1.Grant + 7, // 24: c1.config.v1.ResourceIdField.default_value:type_name -> c1.config.v1.ResourceId + 25, // 25: c1.config.v1.ResourceIdField.rules:type_name -> c1.config.v1.ResourceIDRules + 15, // 26: c1.config.v1.ResourceIdSliceField.default_value:type_name -> c1.config.v1.ResourceIdField + 26, // 27: c1.config.v1.ResourceIdSliceField.rules:type_name -> c1.config.v1.RepeatedResourceIdRules + 27, // 28: c1.config.v1.IntField.rules:type_name -> c1.config.v1.Int64Rules + 28, // 29: c1.config.v1.BoolField.rules:type_name -> c1.config.v1.BoolRules + 29, // 30: c1.config.v1.StringSliceField.rules:type_name -> c1.config.v1.RepeatedStringRules + 23, // 31: c1.config.v1.StringMapField.default_value:type_name -> c1.config.v1.StringMapField.DefaultValueEntry + 30, // 32: c1.config.v1.StringMapField.rules:type_name -> c1.config.v1.StringMapRules + 31, // 33: c1.config.v1.StringField.rules:type_name -> c1.config.v1.StringRules + 1, // 34: c1.config.v1.StringField.type:type_name -> c1.config.v1.StringFieldType + 21, // 35: c1.config.v1.StringField.options:type_name -> c1.config.v1.StringFieldOption + 24, // 36: c1.config.v1.StringMapField.DefaultValueEntry.value:type_name -> google.protobuf.Any + 37, // [37:37] is the sub-list for method output_type + 37, // [37:37] is the sub-list for method input_type + 37, // [37:37] is the sub-list for extension type_name + 37, // [37:37] is the sub-list for extension extendee + 0, // [0:37] is the sub-list for field type_name } func init() { file_c1_config_v1_config_proto_init() } @@ -2399,21 +2958,23 @@ func file_c1_config_v1_config_proto_init() { (*Field_ResourceIdSliceField)(nil), (*Field_ResourceField)(nil), (*Field_ResourceSliceField)(nil), + (*Field_EntitlementSliceField)(nil), + (*Field_GrantSliceField)(nil), } - file_c1_config_v1_config_proto_msgTypes[8].OneofWrappers = []any{} - file_c1_config_v1_config_proto_msgTypes[9].OneofWrappers = []any{} - file_c1_config_v1_config_proto_msgTypes[10].OneofWrappers = []any{} - file_c1_config_v1_config_proto_msgTypes[11].OneofWrappers = []any{} - file_c1_config_v1_config_proto_msgTypes[12].OneofWrappers = []any{} file_c1_config_v1_config_proto_msgTypes[13].OneofWrappers = []any{} + file_c1_config_v1_config_proto_msgTypes[14].OneofWrappers = []any{} file_c1_config_v1_config_proto_msgTypes[15].OneofWrappers = []any{} + file_c1_config_v1_config_proto_msgTypes[16].OneofWrappers = []any{} + file_c1_config_v1_config_proto_msgTypes[17].OneofWrappers = []any{} + file_c1_config_v1_config_proto_msgTypes[18].OneofWrappers = []any{} + file_c1_config_v1_config_proto_msgTypes[20].OneofWrappers = []any{} type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: unsafe.Slice(unsafe.StringData(file_c1_config_v1_config_proto_rawDesc), len(file_c1_config_v1_config_proto_rawDesc)), NumEnums: 2, - NumMessages: 17, + NumMessages: 22, NumExtensions: 0, NumServices: 0, }, diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/config/v1/config.pb.validate.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/config/v1/config.pb.validate.go index c58e2bd2..3c99fe9b 100644 --- a/vendor/github.com/conductorone/baton-sdk/pb/c1/config/v1/config.pb.validate.go +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/config/v1/config.pb.validate.go @@ -870,6 +870,88 @@ func (m *Field) validate(all bool) error { } } + case *Field_EntitlementSliceField: + if v == nil { + err := FieldValidationError{ + field: "Field", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetEntitlementSliceField()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, FieldValidationError{ + field: "EntitlementSliceField", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, FieldValidationError{ + field: "EntitlementSliceField", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetEntitlementSliceField()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return FieldValidationError{ + field: "EntitlementSliceField", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *Field_GrantSliceField: + if v == nil { + err := FieldValidationError{ + field: "Field", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetGrantSliceField()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, FieldValidationError{ + field: "GrantSliceField", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, FieldValidationError{ + field: "GrantSliceField", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetGrantSliceField()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return FieldValidationError{ + field: "GrantSliceField", + reason: "embedded message failed validation", + cause: err, + } + } + } + default: _ = v // ensures v is used } @@ -1514,6 +1596,649 @@ var _ interface { ErrorName() string } = ResourceSliceFieldValidationError{} +// Validate checks the field values on Entitlement with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Entitlement) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Entitlement with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in EntitlementMultiError, or +// nil if none found. +func (m *Entitlement) ValidateAll() error { + return m.validate(true) +} + +func (m *Entitlement) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Id + + // no validation rules for DisplayName + + // no validation rules for Description + + // no validation rules for Slug + + // no validation rules for Purpose + + // no validation rules for ResourceId + + // no validation rules for ResourceTypeId + + if len(errors) > 0 { + return EntitlementMultiError(errors) + } + + return nil +} + +// EntitlementMultiError is an error wrapping multiple validation errors +// returned by Entitlement.ValidateAll() if the designated constraints aren't met. +type EntitlementMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m EntitlementMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m EntitlementMultiError) AllErrors() []error { return m } + +// EntitlementValidationError is the validation error returned by +// Entitlement.Validate if the designated constraints aren't met. +type EntitlementValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e EntitlementValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e EntitlementValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e EntitlementValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e EntitlementValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e EntitlementValidationError) ErrorName() string { return "EntitlementValidationError" } + +// Error satisfies the builtin error interface +func (e EntitlementValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sEntitlement.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = EntitlementValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = EntitlementValidationError{} + +// Validate checks the field values on EntitlementSliceField with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *EntitlementSliceField) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on EntitlementSliceField with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// EntitlementSliceFieldMultiError, or nil if none found. +func (m *EntitlementSliceField) ValidateAll() error { + return m.validate(true) +} + +func (m *EntitlementSliceField) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetDefaultValue() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, EntitlementSliceFieldValidationError{ + field: fmt.Sprintf("DefaultValue[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, EntitlementSliceFieldValidationError{ + field: fmt.Sprintf("DefaultValue[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return EntitlementSliceFieldValidationError{ + field: fmt.Sprintf("DefaultValue[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return EntitlementSliceFieldMultiError(errors) + } + + return nil +} + +// EntitlementSliceFieldMultiError is an error wrapping multiple validation +// errors returned by EntitlementSliceField.ValidateAll() if the designated +// constraints aren't met. +type EntitlementSliceFieldMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m EntitlementSliceFieldMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m EntitlementSliceFieldMultiError) AllErrors() []error { return m } + +// EntitlementSliceFieldValidationError is the validation error returned by +// EntitlementSliceField.Validate if the designated constraints aren't met. +type EntitlementSliceFieldValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e EntitlementSliceFieldValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e EntitlementSliceFieldValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e EntitlementSliceFieldValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e EntitlementSliceFieldValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e EntitlementSliceFieldValidationError) ErrorName() string { + return "EntitlementSliceFieldValidationError" +} + +// Error satisfies the builtin error interface +func (e EntitlementSliceFieldValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sEntitlementSliceField.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = EntitlementSliceFieldValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = EntitlementSliceFieldValidationError{} + +// Validate checks the field values on EntitlementRef with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *EntitlementRef) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on EntitlementRef with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in EntitlementRefMultiError, +// or nil if none found. +func (m *EntitlementRef) ValidateAll() error { + return m.validate(true) +} + +func (m *EntitlementRef) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Id + + if len(errors) > 0 { + return EntitlementRefMultiError(errors) + } + + return nil +} + +// EntitlementRefMultiError is an error wrapping multiple validation errors +// returned by EntitlementRef.ValidateAll() if the designated constraints +// aren't met. +type EntitlementRefMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m EntitlementRefMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m EntitlementRefMultiError) AllErrors() []error { return m } + +// EntitlementRefValidationError is the validation error returned by +// EntitlementRef.Validate if the designated constraints aren't met. +type EntitlementRefValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e EntitlementRefValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e EntitlementRefValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e EntitlementRefValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e EntitlementRefValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e EntitlementRefValidationError) ErrorName() string { return "EntitlementRefValidationError" } + +// Error satisfies the builtin error interface +func (e EntitlementRefValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sEntitlementRef.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = EntitlementRefValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = EntitlementRefValidationError{} + +// Validate checks the field values on Grant with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Grant) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Grant with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in GrantMultiError, or nil if none found. +func (m *Grant) ValidateAll() error { + return m.validate(true) +} + +func (m *Grant) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Id + + if all { + switch v := interface{}(m.GetEntitlement()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, GrantValidationError{ + field: "Entitlement", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, GrantValidationError{ + field: "Entitlement", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetEntitlement()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return GrantValidationError{ + field: "Entitlement", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetPrincipal()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, GrantValidationError{ + field: "Principal", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, GrantValidationError{ + field: "Principal", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetPrincipal()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return GrantValidationError{ + field: "Principal", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return GrantMultiError(errors) + } + + return nil +} + +// GrantMultiError is an error wrapping multiple validation errors returned by +// Grant.ValidateAll() if the designated constraints aren't met. +type GrantMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m GrantMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m GrantMultiError) AllErrors() []error { return m } + +// GrantValidationError is the validation error returned by Grant.Validate if +// the designated constraints aren't met. +type GrantValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e GrantValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e GrantValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e GrantValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e GrantValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e GrantValidationError) ErrorName() string { return "GrantValidationError" } + +// Error satisfies the builtin error interface +func (e GrantValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sGrant.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = GrantValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = GrantValidationError{} + +// Validate checks the field values on GrantSliceField with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *GrantSliceField) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on GrantSliceField with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// GrantSliceFieldMultiError, or nil if none found. +func (m *GrantSliceField) ValidateAll() error { + return m.validate(true) +} + +func (m *GrantSliceField) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetDefaultValue() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, GrantSliceFieldValidationError{ + field: fmt.Sprintf("DefaultValue[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, GrantSliceFieldValidationError{ + field: fmt.Sprintf("DefaultValue[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return GrantSliceFieldValidationError{ + field: fmt.Sprintf("DefaultValue[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return GrantSliceFieldMultiError(errors) + } + + return nil +} + +// GrantSliceFieldMultiError is an error wrapping multiple validation errors +// returned by GrantSliceField.ValidateAll() if the designated constraints +// aren't met. +type GrantSliceFieldMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m GrantSliceFieldMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m GrantSliceFieldMultiError) AllErrors() []error { return m } + +// GrantSliceFieldValidationError is the validation error returned by +// GrantSliceField.Validate if the designated constraints aren't met. +type GrantSliceFieldValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e GrantSliceFieldValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e GrantSliceFieldValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e GrantSliceFieldValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e GrantSliceFieldValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e GrantSliceFieldValidationError) ErrorName() string { return "GrantSliceFieldValidationError" } + +// Error satisfies the builtin error interface +func (e GrantSliceFieldValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sGrantSliceField.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = GrantSliceFieldValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = GrantSliceFieldValidationError{} + // Validate checks the field values on ResourceIdField with the rules defined // in the proto definition for this message. If any rules are violated, the // first error encountered is returned, or nil if there are no violations. diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/config/v1/config_protoopaque.pb.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/config/v1/config_protoopaque.pb.go index 5cc36692..a236f5f3 100644 --- a/vendor/github.com/conductorone/baton-sdk/pb/c1/config/v1/config_protoopaque.pb.go +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/config/v1/config_protoopaque.pb.go @@ -721,6 +721,24 @@ func (x *Field) GetResourceSliceField() *ResourceSliceField { return nil } +func (x *Field) GetEntitlementSliceField() *EntitlementSliceField { + if x != nil { + if x, ok := x.xxx_hidden_Field.(*field_EntitlementSliceField); ok { + return x.EntitlementSliceField + } + } + return nil +} + +func (x *Field) GetGrantSliceField() *GrantSliceField { + if x != nil { + if x, ok := x.xxx_hidden_Field.(*field_GrantSliceField); ok { + return x.GrantSliceField + } + } + return nil +} + func (x *Field) SetName(v string) { x.xxx_hidden_Name = v } @@ -821,6 +839,22 @@ func (x *Field) SetResourceSliceField(v *ResourceSliceField) { x.xxx_hidden_Field = &field_ResourceSliceField{v} } +func (x *Field) SetEntitlementSliceField(v *EntitlementSliceField) { + if v == nil { + x.xxx_hidden_Field = nil + return + } + x.xxx_hidden_Field = &field_EntitlementSliceField{v} +} + +func (x *Field) SetGrantSliceField(v *GrantSliceField) { + if v == nil { + x.xxx_hidden_Field = nil + return + } + x.xxx_hidden_Field = &field_GrantSliceField{v} +} + func (x *Field) HasField() bool { if x == nil { return false @@ -900,6 +934,22 @@ func (x *Field) HasResourceSliceField() bool { return ok } +func (x *Field) HasEntitlementSliceField() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_Field.(*field_EntitlementSliceField) + return ok +} + +func (x *Field) HasGrantSliceField() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_Field.(*field_GrantSliceField) + return ok +} + func (x *Field) ClearField() { x.xxx_hidden_Field = nil } @@ -958,6 +1008,18 @@ func (x *Field) ClearResourceSliceField() { } } +func (x *Field) ClearEntitlementSliceField() { + if _, ok := x.xxx_hidden_Field.(*field_EntitlementSliceField); ok { + x.xxx_hidden_Field = nil + } +} + +func (x *Field) ClearGrantSliceField() { + if _, ok := x.xxx_hidden_Field.(*field_GrantSliceField); ok { + x.xxx_hidden_Field = nil + } +} + const Field_Field_not_set_case case_Field_Field = 0 const Field_StringField_case case_Field_Field = 100 const Field_IntField_case case_Field_Field = 101 @@ -968,6 +1030,8 @@ const Field_ResourceIdField_case case_Field_Field = 105 const Field_ResourceIdSliceField_case case_Field_Field = 106 const Field_ResourceField_case case_Field_Field = 107 const Field_ResourceSliceField_case case_Field_Field = 108 +const Field_EntitlementSliceField_case case_Field_Field = 109 +const Field_GrantSliceField_case case_Field_Field = 110 func (x *Field) WhichField() case_Field_Field { if x == nil { @@ -992,6 +1056,10 @@ func (x *Field) WhichField() case_Field_Field { return Field_ResourceField_case case *field_ResourceSliceField: return Field_ResourceSliceField_case + case *field_EntitlementSliceField: + return Field_EntitlementSliceField_case + case *field_GrantSliceField: + return Field_GrantSliceField_case default: return Field_Field_not_set_case } @@ -1016,8 +1084,10 @@ type Field_builder struct { ResourceIdField *ResourceIdField ResourceIdSliceField *ResourceIdSliceField // These are meant to serve as return types for actions. - ResourceField *ResourceField - ResourceSliceField *ResourceSliceField + ResourceField *ResourceField + ResourceSliceField *ResourceSliceField + EntitlementSliceField *EntitlementSliceField + GrantSliceField *GrantSliceField // -- end of xxx_hidden_Field } @@ -1059,6 +1129,12 @@ func (b0 Field_builder) Build() *Field { if b.ResourceSliceField != nil { x.xxx_hidden_Field = &field_ResourceSliceField{b.ResourceSliceField} } + if b.EntitlementSliceField != nil { + x.xxx_hidden_Field = &field_EntitlementSliceField{b.EntitlementSliceField} + } + if b.GrantSliceField != nil { + x.xxx_hidden_Field = &field_GrantSliceField{b.GrantSliceField} + } return m0 } @@ -1113,6 +1189,14 @@ type field_ResourceSliceField struct { ResourceSliceField *ResourceSliceField `protobuf:"bytes,108,opt,name=resource_slice_field,json=resourceSliceField,proto3,oneof"` } +type field_EntitlementSliceField struct { + EntitlementSliceField *EntitlementSliceField `protobuf:"bytes,109,opt,name=entitlement_slice_field,json=entitlementSliceField,proto3,oneof"` +} + +type field_GrantSliceField struct { + GrantSliceField *GrantSliceField `protobuf:"bytes,110,opt,name=grant_slice_field,json=grantSliceField,proto3,oneof"` +} + func (*field_StringField) isField_Field() {} func (*field_IntField) isField_Field() {} @@ -1131,6 +1215,10 @@ func (*field_ResourceField) isField_Field() {} func (*field_ResourceSliceField) isField_Field() {} +func (*field_EntitlementSliceField) isField_Field() {} + +func (*field_GrantSliceField) isField_Field() {} + // These are partially duplicate with the Resource proto in the connector package. // This is to avoid import cycles type Resource struct { @@ -1468,6 +1556,446 @@ func (b0 ResourceSliceField_builder) Build() *ResourceSliceField { return m0 } +// Simplified Entitlement for config return types (avoids import cycles with connector package) +type Entitlement struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Id string `protobuf:"bytes,1,opt,name=id,proto3"` + xxx_hidden_DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3"` + xxx_hidden_Description string `protobuf:"bytes,3,opt,name=description,proto3"` + xxx_hidden_Slug string `protobuf:"bytes,4,opt,name=slug,proto3"` + xxx_hidden_Purpose string `protobuf:"bytes,5,opt,name=purpose,proto3"` + xxx_hidden_GrantableToResourceTypeIds []string `protobuf:"bytes,6,rep,name=grantable_to_resource_type_ids,json=grantableToResourceTypeIds,proto3"` + xxx_hidden_ResourceId string `protobuf:"bytes,7,opt,name=resource_id,json=resourceId,proto3"` + xxx_hidden_ResourceTypeId string `protobuf:"bytes,8,opt,name=resource_type_id,json=resourceTypeId,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Entitlement) Reset() { + *x = Entitlement{} + mi := &file_c1_config_v1_config_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Entitlement) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Entitlement) ProtoMessage() {} + +func (x *Entitlement) ProtoReflect() protoreflect.Message { + mi := &file_c1_config_v1_config_proto_msgTypes[8] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *Entitlement) GetId() string { + if x != nil { + return x.xxx_hidden_Id + } + return "" +} + +func (x *Entitlement) GetDisplayName() string { + if x != nil { + return x.xxx_hidden_DisplayName + } + return "" +} + +func (x *Entitlement) GetDescription() string { + if x != nil { + return x.xxx_hidden_Description + } + return "" +} + +func (x *Entitlement) GetSlug() string { + if x != nil { + return x.xxx_hidden_Slug + } + return "" +} + +func (x *Entitlement) GetPurpose() string { + if x != nil { + return x.xxx_hidden_Purpose + } + return "" +} + +func (x *Entitlement) GetGrantableToResourceTypeIds() []string { + if x != nil { + return x.xxx_hidden_GrantableToResourceTypeIds + } + return nil +} + +func (x *Entitlement) GetResourceId() string { + if x != nil { + return x.xxx_hidden_ResourceId + } + return "" +} + +func (x *Entitlement) GetResourceTypeId() string { + if x != nil { + return x.xxx_hidden_ResourceTypeId + } + return "" +} + +func (x *Entitlement) SetId(v string) { + x.xxx_hidden_Id = v +} + +func (x *Entitlement) SetDisplayName(v string) { + x.xxx_hidden_DisplayName = v +} + +func (x *Entitlement) SetDescription(v string) { + x.xxx_hidden_Description = v +} + +func (x *Entitlement) SetSlug(v string) { + x.xxx_hidden_Slug = v +} + +func (x *Entitlement) SetPurpose(v string) { + x.xxx_hidden_Purpose = v +} + +func (x *Entitlement) SetGrantableToResourceTypeIds(v []string) { + x.xxx_hidden_GrantableToResourceTypeIds = v +} + +func (x *Entitlement) SetResourceId(v string) { + x.xxx_hidden_ResourceId = v +} + +func (x *Entitlement) SetResourceTypeId(v string) { + x.xxx_hidden_ResourceTypeId = v +} + +type Entitlement_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Id string + DisplayName string + Description string + Slug string + Purpose string + GrantableToResourceTypeIds []string + ResourceId string + ResourceTypeId string +} + +func (b0 Entitlement_builder) Build() *Entitlement { + m0 := &Entitlement{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Id = b.Id + x.xxx_hidden_DisplayName = b.DisplayName + x.xxx_hidden_Description = b.Description + x.xxx_hidden_Slug = b.Slug + x.xxx_hidden_Purpose = b.Purpose + x.xxx_hidden_GrantableToResourceTypeIds = b.GrantableToResourceTypeIds + x.xxx_hidden_ResourceId = b.ResourceId + x.xxx_hidden_ResourceTypeId = b.ResourceTypeId + return m0 +} + +type EntitlementSliceField struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_DefaultValue *[]*Entitlement `protobuf:"bytes,1,rep,name=default_value,json=defaultValue,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *EntitlementSliceField) Reset() { + *x = EntitlementSliceField{} + mi := &file_c1_config_v1_config_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *EntitlementSliceField) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EntitlementSliceField) ProtoMessage() {} + +func (x *EntitlementSliceField) ProtoReflect() protoreflect.Message { + mi := &file_c1_config_v1_config_proto_msgTypes[9] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *EntitlementSliceField) GetDefaultValue() []*Entitlement { + if x != nil { + if x.xxx_hidden_DefaultValue != nil { + return *x.xxx_hidden_DefaultValue + } + } + return nil +} + +func (x *EntitlementSliceField) SetDefaultValue(v []*Entitlement) { + x.xxx_hidden_DefaultValue = &v +} + +type EntitlementSliceField_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + DefaultValue []*Entitlement +} + +func (b0 EntitlementSliceField_builder) Build() *EntitlementSliceField { + m0 := &EntitlementSliceField{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_DefaultValue = &b.DefaultValue + return m0 +} + +// Reference to an entitlement (used in Grant) +type EntitlementRef struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Id string `protobuf:"bytes,1,opt,name=id,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *EntitlementRef) Reset() { + *x = EntitlementRef{} + mi := &file_c1_config_v1_config_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *EntitlementRef) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EntitlementRef) ProtoMessage() {} + +func (x *EntitlementRef) ProtoReflect() protoreflect.Message { + mi := &file_c1_config_v1_config_proto_msgTypes[10] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *EntitlementRef) GetId() string { + if x != nil { + return x.xxx_hidden_Id + } + return "" +} + +func (x *EntitlementRef) SetId(v string) { + x.xxx_hidden_Id = v +} + +type EntitlementRef_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Id string +} + +func (b0 EntitlementRef_builder) Build() *EntitlementRef { + m0 := &EntitlementRef{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Id = b.Id + return m0 +} + +// Simplified Grant for config return types (avoids import cycles with connector package) +type Grant struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Id string `protobuf:"bytes,1,opt,name=id,proto3"` + xxx_hidden_Entitlement *EntitlementRef `protobuf:"bytes,2,opt,name=entitlement,proto3"` + xxx_hidden_Principal *Resource `protobuf:"bytes,3,opt,name=principal,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Grant) Reset() { + *x = Grant{} + mi := &file_c1_config_v1_config_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Grant) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Grant) ProtoMessage() {} + +func (x *Grant) ProtoReflect() protoreflect.Message { + mi := &file_c1_config_v1_config_proto_msgTypes[11] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *Grant) GetId() string { + if x != nil { + return x.xxx_hidden_Id + } + return "" +} + +func (x *Grant) GetEntitlement() *EntitlementRef { + if x != nil { + return x.xxx_hidden_Entitlement + } + return nil +} + +func (x *Grant) GetPrincipal() *Resource { + if x != nil { + return x.xxx_hidden_Principal + } + return nil +} + +func (x *Grant) SetId(v string) { + x.xxx_hidden_Id = v +} + +func (x *Grant) SetEntitlement(v *EntitlementRef) { + x.xxx_hidden_Entitlement = v +} + +func (x *Grant) SetPrincipal(v *Resource) { + x.xxx_hidden_Principal = v +} + +func (x *Grant) HasEntitlement() bool { + if x == nil { + return false + } + return x.xxx_hidden_Entitlement != nil +} + +func (x *Grant) HasPrincipal() bool { + if x == nil { + return false + } + return x.xxx_hidden_Principal != nil +} + +func (x *Grant) ClearEntitlement() { + x.xxx_hidden_Entitlement = nil +} + +func (x *Grant) ClearPrincipal() { + x.xxx_hidden_Principal = nil +} + +type Grant_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Id string + Entitlement *EntitlementRef + Principal *Resource +} + +func (b0 Grant_builder) Build() *Grant { + m0 := &Grant{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Id = b.Id + x.xxx_hidden_Entitlement = b.Entitlement + x.xxx_hidden_Principal = b.Principal + return m0 +} + +type GrantSliceField struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_DefaultValue *[]*Grant `protobuf:"bytes,1,rep,name=default_value,json=defaultValue,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GrantSliceField) Reset() { + *x = GrantSliceField{} + mi := &file_c1_config_v1_config_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GrantSliceField) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GrantSliceField) ProtoMessage() {} + +func (x *GrantSliceField) ProtoReflect() protoreflect.Message { + mi := &file_c1_config_v1_config_proto_msgTypes[12] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *GrantSliceField) GetDefaultValue() []*Grant { + if x != nil { + if x.xxx_hidden_DefaultValue != nil { + return *x.xxx_hidden_DefaultValue + } + } + return nil +} + +func (x *GrantSliceField) SetDefaultValue(v []*Grant) { + x.xxx_hidden_DefaultValue = &v +} + +type GrantSliceField_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + DefaultValue []*Grant +} + +func (b0 GrantSliceField_builder) Build() *GrantSliceField { + m0 := &GrantSliceField{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_DefaultValue = &b.DefaultValue + return m0 +} + type ResourceIdField struct { state protoimpl.MessageState `protogen:"opaque.v1"` xxx_hidden_DefaultValue *ResourceId `protobuf:"bytes,1,opt,name=default_value,json=defaultValue,proto3"` @@ -1478,7 +2006,7 @@ type ResourceIdField struct { func (x *ResourceIdField) Reset() { *x = ResourceIdField{} - mi := &file_c1_config_v1_config_proto_msgTypes[8] + mi := &file_c1_config_v1_config_proto_msgTypes[13] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1490,7 +2018,7 @@ func (x *ResourceIdField) String() string { func (*ResourceIdField) ProtoMessage() {} func (x *ResourceIdField) ProtoReflect() protoreflect.Message { - mi := &file_c1_config_v1_config_proto_msgTypes[8] + mi := &file_c1_config_v1_config_proto_msgTypes[13] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1571,7 +2099,7 @@ type ResourceIdSliceField struct { func (x *ResourceIdSliceField) Reset() { *x = ResourceIdSliceField{} - mi := &file_c1_config_v1_config_proto_msgTypes[9] + mi := &file_c1_config_v1_config_proto_msgTypes[14] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1583,7 +2111,7 @@ func (x *ResourceIdSliceField) String() string { func (*ResourceIdSliceField) ProtoMessage() {} func (x *ResourceIdSliceField) ProtoReflect() protoreflect.Message { - mi := &file_c1_config_v1_config_proto_msgTypes[9] + mi := &file_c1_config_v1_config_proto_msgTypes[14] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1655,7 +2183,7 @@ type IntField struct { func (x *IntField) Reset() { *x = IntField{} - mi := &file_c1_config_v1_config_proto_msgTypes[10] + mi := &file_c1_config_v1_config_proto_msgTypes[15] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1667,7 +2195,7 @@ func (x *IntField) String() string { func (*IntField) ProtoMessage() {} func (x *IntField) ProtoReflect() protoreflect.Message { - mi := &file_c1_config_v1_config_proto_msgTypes[10] + mi := &file_c1_config_v1_config_proto_msgTypes[15] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1738,7 +2266,7 @@ type BoolField struct { func (x *BoolField) Reset() { *x = BoolField{} - mi := &file_c1_config_v1_config_proto_msgTypes[11] + mi := &file_c1_config_v1_config_proto_msgTypes[16] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1750,7 +2278,7 @@ func (x *BoolField) String() string { func (*BoolField) ProtoMessage() {} func (x *BoolField) ProtoReflect() protoreflect.Message { - mi := &file_c1_config_v1_config_proto_msgTypes[11] + mi := &file_c1_config_v1_config_proto_msgTypes[16] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1820,7 +2348,7 @@ type StringSliceField struct { func (x *StringSliceField) Reset() { *x = StringSliceField{} - mi := &file_c1_config_v1_config_proto_msgTypes[12] + mi := &file_c1_config_v1_config_proto_msgTypes[17] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1832,7 +2360,7 @@ func (x *StringSliceField) String() string { func (*StringSliceField) ProtoMessage() {} func (x *StringSliceField) ProtoReflect() protoreflect.Message { - mi := &file_c1_config_v1_config_proto_msgTypes[12] + mi := &file_c1_config_v1_config_proto_msgTypes[17] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1902,7 +2430,7 @@ type StringMapField struct { func (x *StringMapField) Reset() { *x = StringMapField{} - mi := &file_c1_config_v1_config_proto_msgTypes[13] + mi := &file_c1_config_v1_config_proto_msgTypes[18] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1914,7 +2442,7 @@ func (x *StringMapField) String() string { func (*StringMapField) ProtoMessage() {} func (x *StringMapField) ProtoReflect() protoreflect.Message { - mi := &file_c1_config_v1_config_proto_msgTypes[13] + mi := &file_c1_config_v1_config_proto_msgTypes[18] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1985,7 +2513,7 @@ type StringFieldOption struct { func (x *StringFieldOption) Reset() { *x = StringFieldOption{} - mi := &file_c1_config_v1_config_proto_msgTypes[14] + mi := &file_c1_config_v1_config_proto_msgTypes[19] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1997,7 +2525,7 @@ func (x *StringFieldOption) String() string { func (*StringFieldOption) ProtoMessage() {} func (x *StringFieldOption) ProtoReflect() protoreflect.Message { - mi := &file_c1_config_v1_config_proto_msgTypes[14] + mi := &file_c1_config_v1_config_proto_msgTypes[19] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2072,7 +2600,7 @@ type StringField struct { func (x *StringField) Reset() { *x = StringField{} - mi := &file_c1_config_v1_config_proto_msgTypes[15] + mi := &file_c1_config_v1_config_proto_msgTypes[20] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2084,7 +2612,7 @@ func (x *StringField) String() string { func (*StringField) ProtoMessage() {} func (x *StringField) ProtoReflect() protoreflect.Message { - mi := &file_c1_config_v1_config_proto_msgTypes[15] + mi := &file_c1_config_v1_config_proto_msgTypes[20] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2219,7 +2747,7 @@ const file_c1_config_v1_config_proto_rawDesc = "" + "\fdisplay_name\x18\x02 \x01(\tR\vdisplayName\x12\x1b\n" + "\thelp_text\x18\x03 \x01(\tR\bhelpText\x12\x16\n" + "\x06fields\x18\x04 \x03(\tR\x06fields\x12\x18\n" + - "\adefault\x18\x05 \x01(\bR\adefault\"\xf1\x06\n" + + "\adefault\x18\x05 \x01(\bR\adefault\"\x9d\b\n" + "\x05Field\x12\x12\n" + "\x04name\x18\x01 \x01(\tR\x04name\x12!\n" + "\fdisplay_name\x18\x02 \x01(\tR\vdisplayName\x12 \n" + @@ -2238,7 +2766,9 @@ const file_c1_config_v1_config_proto_rawDesc = "" + "\x11resource_id_field\x18i \x01(\v2\x1d.c1.config.v1.ResourceIdFieldH\x00R\x0fresourceIdField\x12[\n" + "\x17resource_id_slice_field\x18j \x01(\v2\".c1.config.v1.ResourceIdSliceFieldH\x00R\x14resourceIdSliceField\x12D\n" + "\x0eresource_field\x18k \x01(\v2\x1b.c1.config.v1.ResourceFieldH\x00R\rresourceField\x12T\n" + - "\x14resource_slice_field\x18l \x01(\v2 .c1.config.v1.ResourceSliceFieldH\x00R\x12resourceSliceFieldB\a\n" + + "\x14resource_slice_field\x18l \x01(\v2 .c1.config.v1.ResourceSliceFieldH\x00R\x12resourceSliceField\x12]\n" + + "\x17entitlement_slice_field\x18m \x01(\v2#.c1.config.v1.EntitlementSliceFieldH\x00R\x15entitlementSliceField\x12K\n" + + "\x11grant_slice_field\x18n \x01(\v2\x1d.c1.config.v1.GrantSliceFieldH\x00R\x0fgrantSliceFieldB\a\n" + "\x05field\"\x8a\x02\n" + "\bResource\x129\n" + "\vresource_id\x18\x01 \x01(\v2\x18.c1.config.v1.ResourceIdR\n" + @@ -2255,7 +2785,27 @@ const file_c1_config_v1_config_proto_rawDesc = "" + "\rResourceField\x12;\n" + "\rdefault_value\x18\x01 \x01(\v2\x16.c1.config.v1.ResourceR\fdefaultValue\"Q\n" + "\x12ResourceSliceField\x12;\n" + - "\rdefault_value\x18\x01 \x03(\v2\x16.c1.config.v1.ResourceR\fdefaultValue\"\x94\x01\n" + + "\rdefault_value\x18\x01 \x03(\v2\x16.c1.config.v1.ResourceR\fdefaultValue\"\x9f\x02\n" + + "\vEntitlement\x12\x0e\n" + + "\x02id\x18\x01 \x01(\tR\x02id\x12!\n" + + "\fdisplay_name\x18\x02 \x01(\tR\vdisplayName\x12 \n" + + "\vdescription\x18\x03 \x01(\tR\vdescription\x12\x12\n" + + "\x04slug\x18\x04 \x01(\tR\x04slug\x12\x18\n" + + "\apurpose\x18\x05 \x01(\tR\apurpose\x12B\n" + + "\x1egrantable_to_resource_type_ids\x18\x06 \x03(\tR\x1agrantableToResourceTypeIds\x12\x1f\n" + + "\vresource_id\x18\a \x01(\tR\n" + + "resourceId\x12(\n" + + "\x10resource_type_id\x18\b \x01(\tR\x0eresourceTypeId\"W\n" + + "\x15EntitlementSliceField\x12>\n" + + "\rdefault_value\x18\x01 \x03(\v2\x19.c1.config.v1.EntitlementR\fdefaultValue\" \n" + + "\x0eEntitlementRef\x12\x0e\n" + + "\x02id\x18\x01 \x01(\tR\x02id\"\x8d\x01\n" + + "\x05Grant\x12\x0e\n" + + "\x02id\x18\x01 \x01(\tR\x02id\x12>\n" + + "\ventitlement\x18\x02 \x01(\v2\x1c.c1.config.v1.EntitlementRefR\ventitlement\x124\n" + + "\tprincipal\x18\x03 \x01(\v2\x16.c1.config.v1.ResourceR\tprincipal\"K\n" + + "\x0fGrantSliceField\x128\n" + + "\rdefault_value\x18\x01 \x03(\v2\x13.c1.config.v1.GrantR\fdefaultValue\"\x94\x01\n" + "\x0fResourceIdField\x12=\n" + "\rdefault_value\x18\x01 \x01(\v2\x18.c1.config.v1.ResourceIdR\fdefaultValue\x128\n" + "\x05rules\x18\x03 \x01(\v2\x1d.c1.config.v1.ResourceIDRulesH\x00R\x05rules\x88\x01\x01B\b\n" + @@ -2308,7 +2858,7 @@ const file_c1_config_v1_config_proto_rawDesc = "" + "\x1dSTRING_FIELD_TYPE_FILE_UPLOAD\x10\x04B3Z1github.com/conductorone/baton-sdk/pb/c1/config/v1b\x06proto3" var file_c1_config_v1_config_proto_enumTypes = make([]protoimpl.EnumInfo, 2) -var file_c1_config_v1_config_proto_msgTypes = make([]protoimpl.MessageInfo, 17) +var file_c1_config_v1_config_proto_msgTypes = make([]protoimpl.MessageInfo, 22) var file_c1_config_v1_config_proto_goTypes = []any{ (ConstraintKind)(0), // 0: c1.config.v1.ConstraintKind (StringFieldType)(0), // 1: c1.config.v1.StringFieldType @@ -2320,61 +2870,72 @@ var file_c1_config_v1_config_proto_goTypes = []any{ (*ResourceId)(nil), // 7: c1.config.v1.ResourceId (*ResourceField)(nil), // 8: c1.config.v1.ResourceField (*ResourceSliceField)(nil), // 9: c1.config.v1.ResourceSliceField - (*ResourceIdField)(nil), // 10: c1.config.v1.ResourceIdField - (*ResourceIdSliceField)(nil), // 11: c1.config.v1.ResourceIdSliceField - (*IntField)(nil), // 12: c1.config.v1.IntField - (*BoolField)(nil), // 13: c1.config.v1.BoolField - (*StringSliceField)(nil), // 14: c1.config.v1.StringSliceField - (*StringMapField)(nil), // 15: c1.config.v1.StringMapField - (*StringFieldOption)(nil), // 16: c1.config.v1.StringFieldOption - (*StringField)(nil), // 17: c1.config.v1.StringField - nil, // 18: c1.config.v1.StringMapField.DefaultValueEntry - (*anypb.Any)(nil), // 19: google.protobuf.Any - (*ResourceIDRules)(nil), // 20: c1.config.v1.ResourceIDRules - (*RepeatedResourceIdRules)(nil), // 21: c1.config.v1.RepeatedResourceIdRules - (*Int64Rules)(nil), // 22: c1.config.v1.Int64Rules - (*BoolRules)(nil), // 23: c1.config.v1.BoolRules - (*RepeatedStringRules)(nil), // 24: c1.config.v1.RepeatedStringRules - (*StringMapRules)(nil), // 25: c1.config.v1.StringMapRules - (*StringRules)(nil), // 26: c1.config.v1.StringRules + (*Entitlement)(nil), // 10: c1.config.v1.Entitlement + (*EntitlementSliceField)(nil), // 11: c1.config.v1.EntitlementSliceField + (*EntitlementRef)(nil), // 12: c1.config.v1.EntitlementRef + (*Grant)(nil), // 13: c1.config.v1.Grant + (*GrantSliceField)(nil), // 14: c1.config.v1.GrantSliceField + (*ResourceIdField)(nil), // 15: c1.config.v1.ResourceIdField + (*ResourceIdSliceField)(nil), // 16: c1.config.v1.ResourceIdSliceField + (*IntField)(nil), // 17: c1.config.v1.IntField + (*BoolField)(nil), // 18: c1.config.v1.BoolField + (*StringSliceField)(nil), // 19: c1.config.v1.StringSliceField + (*StringMapField)(nil), // 20: c1.config.v1.StringMapField + (*StringFieldOption)(nil), // 21: c1.config.v1.StringFieldOption + (*StringField)(nil), // 22: c1.config.v1.StringField + nil, // 23: c1.config.v1.StringMapField.DefaultValueEntry + (*anypb.Any)(nil), // 24: google.protobuf.Any + (*ResourceIDRules)(nil), // 25: c1.config.v1.ResourceIDRules + (*RepeatedResourceIdRules)(nil), // 26: c1.config.v1.RepeatedResourceIdRules + (*Int64Rules)(nil), // 27: c1.config.v1.Int64Rules + (*BoolRules)(nil), // 28: c1.config.v1.BoolRules + (*RepeatedStringRules)(nil), // 29: c1.config.v1.RepeatedStringRules + (*StringMapRules)(nil), // 30: c1.config.v1.StringMapRules + (*StringRules)(nil), // 31: c1.config.v1.StringRules } var file_c1_config_v1_config_proto_depIdxs = []int32{ 5, // 0: c1.config.v1.Configuration.fields:type_name -> c1.config.v1.Field 3, // 1: c1.config.v1.Configuration.constraints:type_name -> c1.config.v1.Constraint 4, // 2: c1.config.v1.Configuration.field_groups:type_name -> c1.config.v1.FieldGroup 0, // 3: c1.config.v1.Constraint.kind:type_name -> c1.config.v1.ConstraintKind - 17, // 4: c1.config.v1.Field.string_field:type_name -> c1.config.v1.StringField - 12, // 5: c1.config.v1.Field.int_field:type_name -> c1.config.v1.IntField - 13, // 6: c1.config.v1.Field.bool_field:type_name -> c1.config.v1.BoolField - 14, // 7: c1.config.v1.Field.string_slice_field:type_name -> c1.config.v1.StringSliceField - 15, // 8: c1.config.v1.Field.string_map_field:type_name -> c1.config.v1.StringMapField - 10, // 9: c1.config.v1.Field.resource_id_field:type_name -> c1.config.v1.ResourceIdField - 11, // 10: c1.config.v1.Field.resource_id_slice_field:type_name -> c1.config.v1.ResourceIdSliceField + 22, // 4: c1.config.v1.Field.string_field:type_name -> c1.config.v1.StringField + 17, // 5: c1.config.v1.Field.int_field:type_name -> c1.config.v1.IntField + 18, // 6: c1.config.v1.Field.bool_field:type_name -> c1.config.v1.BoolField + 19, // 7: c1.config.v1.Field.string_slice_field:type_name -> c1.config.v1.StringSliceField + 20, // 8: c1.config.v1.Field.string_map_field:type_name -> c1.config.v1.StringMapField + 15, // 9: c1.config.v1.Field.resource_id_field:type_name -> c1.config.v1.ResourceIdField + 16, // 10: c1.config.v1.Field.resource_id_slice_field:type_name -> c1.config.v1.ResourceIdSliceField 8, // 11: c1.config.v1.Field.resource_field:type_name -> c1.config.v1.ResourceField 9, // 12: c1.config.v1.Field.resource_slice_field:type_name -> c1.config.v1.ResourceSliceField - 7, // 13: c1.config.v1.Resource.resource_id:type_name -> c1.config.v1.ResourceId - 7, // 14: c1.config.v1.Resource.parent_resource_id:type_name -> c1.config.v1.ResourceId - 19, // 15: c1.config.v1.Resource.annotations:type_name -> google.protobuf.Any - 6, // 16: c1.config.v1.ResourceField.default_value:type_name -> c1.config.v1.Resource - 6, // 17: c1.config.v1.ResourceSliceField.default_value:type_name -> c1.config.v1.Resource - 7, // 18: c1.config.v1.ResourceIdField.default_value:type_name -> c1.config.v1.ResourceId - 20, // 19: c1.config.v1.ResourceIdField.rules:type_name -> c1.config.v1.ResourceIDRules - 10, // 20: c1.config.v1.ResourceIdSliceField.default_value:type_name -> c1.config.v1.ResourceIdField - 21, // 21: c1.config.v1.ResourceIdSliceField.rules:type_name -> c1.config.v1.RepeatedResourceIdRules - 22, // 22: c1.config.v1.IntField.rules:type_name -> c1.config.v1.Int64Rules - 23, // 23: c1.config.v1.BoolField.rules:type_name -> c1.config.v1.BoolRules - 24, // 24: c1.config.v1.StringSliceField.rules:type_name -> c1.config.v1.RepeatedStringRules - 18, // 25: c1.config.v1.StringMapField.default_value:type_name -> c1.config.v1.StringMapField.DefaultValueEntry - 25, // 26: c1.config.v1.StringMapField.rules:type_name -> c1.config.v1.StringMapRules - 26, // 27: c1.config.v1.StringField.rules:type_name -> c1.config.v1.StringRules - 1, // 28: c1.config.v1.StringField.type:type_name -> c1.config.v1.StringFieldType - 16, // 29: c1.config.v1.StringField.options:type_name -> c1.config.v1.StringFieldOption - 19, // 30: c1.config.v1.StringMapField.DefaultValueEntry.value:type_name -> google.protobuf.Any - 31, // [31:31] is the sub-list for method output_type - 31, // [31:31] is the sub-list for method input_type - 31, // [31:31] is the sub-list for extension type_name - 31, // [31:31] is the sub-list for extension extendee - 0, // [0:31] is the sub-list for field type_name + 11, // 13: c1.config.v1.Field.entitlement_slice_field:type_name -> c1.config.v1.EntitlementSliceField + 14, // 14: c1.config.v1.Field.grant_slice_field:type_name -> c1.config.v1.GrantSliceField + 7, // 15: c1.config.v1.Resource.resource_id:type_name -> c1.config.v1.ResourceId + 7, // 16: c1.config.v1.Resource.parent_resource_id:type_name -> c1.config.v1.ResourceId + 24, // 17: c1.config.v1.Resource.annotations:type_name -> google.protobuf.Any + 6, // 18: c1.config.v1.ResourceField.default_value:type_name -> c1.config.v1.Resource + 6, // 19: c1.config.v1.ResourceSliceField.default_value:type_name -> c1.config.v1.Resource + 10, // 20: c1.config.v1.EntitlementSliceField.default_value:type_name -> c1.config.v1.Entitlement + 12, // 21: c1.config.v1.Grant.entitlement:type_name -> c1.config.v1.EntitlementRef + 6, // 22: c1.config.v1.Grant.principal:type_name -> c1.config.v1.Resource + 13, // 23: c1.config.v1.GrantSliceField.default_value:type_name -> c1.config.v1.Grant + 7, // 24: c1.config.v1.ResourceIdField.default_value:type_name -> c1.config.v1.ResourceId + 25, // 25: c1.config.v1.ResourceIdField.rules:type_name -> c1.config.v1.ResourceIDRules + 15, // 26: c1.config.v1.ResourceIdSliceField.default_value:type_name -> c1.config.v1.ResourceIdField + 26, // 27: c1.config.v1.ResourceIdSliceField.rules:type_name -> c1.config.v1.RepeatedResourceIdRules + 27, // 28: c1.config.v1.IntField.rules:type_name -> c1.config.v1.Int64Rules + 28, // 29: c1.config.v1.BoolField.rules:type_name -> c1.config.v1.BoolRules + 29, // 30: c1.config.v1.StringSliceField.rules:type_name -> c1.config.v1.RepeatedStringRules + 23, // 31: c1.config.v1.StringMapField.default_value:type_name -> c1.config.v1.StringMapField.DefaultValueEntry + 30, // 32: c1.config.v1.StringMapField.rules:type_name -> c1.config.v1.StringMapRules + 31, // 33: c1.config.v1.StringField.rules:type_name -> c1.config.v1.StringRules + 1, // 34: c1.config.v1.StringField.type:type_name -> c1.config.v1.StringFieldType + 21, // 35: c1.config.v1.StringField.options:type_name -> c1.config.v1.StringFieldOption + 24, // 36: c1.config.v1.StringMapField.DefaultValueEntry.value:type_name -> google.protobuf.Any + 37, // [37:37] is the sub-list for method output_type + 37, // [37:37] is the sub-list for method input_type + 37, // [37:37] is the sub-list for extension type_name + 37, // [37:37] is the sub-list for extension extendee + 0, // [0:37] is the sub-list for field type_name } func init() { file_c1_config_v1_config_proto_init() } @@ -2393,21 +2954,23 @@ func file_c1_config_v1_config_proto_init() { (*field_ResourceIdSliceField)(nil), (*field_ResourceField)(nil), (*field_ResourceSliceField)(nil), + (*field_EntitlementSliceField)(nil), + (*field_GrantSliceField)(nil), } - file_c1_config_v1_config_proto_msgTypes[8].OneofWrappers = []any{} - file_c1_config_v1_config_proto_msgTypes[9].OneofWrappers = []any{} - file_c1_config_v1_config_proto_msgTypes[10].OneofWrappers = []any{} - file_c1_config_v1_config_proto_msgTypes[11].OneofWrappers = []any{} - file_c1_config_v1_config_proto_msgTypes[12].OneofWrappers = []any{} file_c1_config_v1_config_proto_msgTypes[13].OneofWrappers = []any{} + file_c1_config_v1_config_proto_msgTypes[14].OneofWrappers = []any{} file_c1_config_v1_config_proto_msgTypes[15].OneofWrappers = []any{} + file_c1_config_v1_config_proto_msgTypes[16].OneofWrappers = []any{} + file_c1_config_v1_config_proto_msgTypes[17].OneofWrappers = []any{} + file_c1_config_v1_config_proto_msgTypes[18].OneofWrappers = []any{} + file_c1_config_v1_config_proto_msgTypes[20].OneofWrappers = []any{} type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: unsafe.Slice(unsafe.StringData(file_c1_config_v1_config_proto_rawDesc), len(file_c1_config_v1_config_proto_rawDesc)), NumEnums: 2, - NumMessages: 17, + NumMessages: 22, NumExtensions: 0, NumServices: 0, }, diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/actions/args.go b/vendor/github.com/conductorone/baton-sdk/pkg/actions/args.go index 367b3287..995a4cb2 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/actions/args.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/actions/args.go @@ -541,3 +541,231 @@ func NewReturnValues(success bool, fields ...ReturnField) *structpb.Struct { return rv } + +// entitlementToBasicEntitlement converts a v2.Entitlement to a config.Entitlement. +func entitlementToBasicEntitlement(entitlement *v2.Entitlement) *config.Entitlement { + var grantableToResourceTypeIDs []string + for _, rt := range entitlement.GetGrantableTo() { + grantableToResourceTypeIDs = append(grantableToResourceTypeIDs, rt.GetId()) + } + + var resourceId, resourceTypeId string + if entitlement.GetResource() != nil && entitlement.GetResource().GetId() != nil { + resourceId = entitlement.GetResource().GetId().GetResource() + resourceTypeId = entitlement.GetResource().GetId().GetResourceType() + } + + return config.Entitlement_builder{ + Id: entitlement.GetId(), + DisplayName: entitlement.GetDisplayName(), + Description: entitlement.GetDescription(), + Slug: entitlement.GetSlug(), + Purpose: entitlement.GetPurpose().String(), + GrantableToResourceTypeIds: grantableToResourceTypeIDs, + ResourceId: resourceId, + ResourceTypeId: resourceTypeId, + }.Build() +} + +// basicEntitlementToEntitlement converts a config.Entitlement to a v2.Entitlement. +func basicEntitlementToEntitlement(basicEntitlement *config.Entitlement) *v2.Entitlement { + var grantableTo []*v2.ResourceType + for _, rtId := range basicEntitlement.GetGrantableToResourceTypeIds() { + grantableTo = append(grantableTo, &v2.ResourceType{Id: rtId}) + } + + var resource *v2.Resource + if basicEntitlement.GetResourceId() != "" && basicEntitlement.GetResourceTypeId() != "" { + resource = &v2.Resource{ + Id: &v2.ResourceId{ + Resource: basicEntitlement.GetResourceId(), + ResourceType: basicEntitlement.GetResourceTypeId(), + }, + } + } + + // Parse purpose from string + purposeValue := v2.Entitlement_PURPOSE_VALUE_UNSPECIFIED + switch basicEntitlement.GetPurpose() { + case "PURPOSE_VALUE_ASSIGNMENT": + purposeValue = v2.Entitlement_PURPOSE_VALUE_ASSIGNMENT + case "PURPOSE_VALUE_PERMISSION": + purposeValue = v2.Entitlement_PURPOSE_VALUE_PERMISSION + case "PURPOSE_VALUE_OWNERSHIP": + purposeValue = v2.Entitlement_PURPOSE_VALUE_OWNERSHIP + } + + return &v2.Entitlement{ + Id: basicEntitlement.GetId(), + DisplayName: basicEntitlement.GetDisplayName(), + Description: basicEntitlement.GetDescription(), + Slug: basicEntitlement.GetSlug(), + Purpose: purposeValue, + GrantableTo: grantableTo, + Resource: resource, + } +} + +// grantToBasicGrant converts a v2.Grant to a config.Grant. +func grantToBasicGrant(grant *v2.Grant) *config.Grant { + var entitlementRef *config.EntitlementRef + if grant.GetEntitlement() != nil { + entitlementRef = config.EntitlementRef_builder{ + Id: grant.GetEntitlement().GetId(), + }.Build() + } + + var principal *config.Resource + if grant.GetPrincipal() != nil { + principal = resourceToBasicResource(grant.GetPrincipal()) + } + + return config.Grant_builder{ + Id: grant.GetId(), + Entitlement: entitlementRef, + Principal: principal, + }.Build() +} + +// basicGrantToGrant converts a config.Grant to a v2.Grant. +func basicGrantToGrant(basicGrant *config.Grant) *v2.Grant { + var entitlement *v2.Entitlement + if basicGrant.GetEntitlement() != nil { + entitlement = &v2.Entitlement{ + Id: basicGrant.GetEntitlement().GetId(), + } + } + + var principal *v2.Resource + if basicGrant.GetPrincipal() != nil { + principal = basicResourceToResource(basicGrant.GetPrincipal()) + } + + return &v2.Grant{ + Id: basicGrant.GetId(), + Entitlement: entitlement, + Principal: principal, + } +} + +// GetEntitlementListFieldArg extracts a list of Entitlement proto messages from the args struct by key. +// Each Entitlement is expected to be stored as a JSON-serialized struct value. +// Returns the list of Entitlement and true if found and valid, or nil and false otherwise. +func GetEntitlementListFieldArg(args *structpb.Struct, key string) ([]*v2.Entitlement, bool) { + if args == nil || args.Fields == nil { + return nil, false + } + value, ok := args.Fields[key] + if !ok { + return nil, false + } + listValue, ok := value.GetKind().(*structpb.Value_ListValue) + if !ok { + return nil, false + } + var entitlements []*v2.Entitlement + for _, v := range listValue.ListValue.Values { + structValue, ok := v.GetKind().(*structpb.Value_StructValue) + if !ok { + return nil, false + } + + // Marshal the struct value back to JSON, then unmarshal into the proto message + jsonBytes, err := protojson.Marshal(structValue.StructValue) + if err != nil { + return nil, false + } + + basicEntitlement := &config.Entitlement{} + if err := protojson.Unmarshal(jsonBytes, basicEntitlement); err != nil { + return nil, false + } + + entitlements = append(entitlements, basicEntitlementToEntitlement(basicEntitlement)) + } + return entitlements, true +} + +// GetGrantListFieldArg extracts a list of Grant proto messages from the args struct by key. +// Each Grant is expected to be stored as a JSON-serialized struct value. +// Returns the list of Grant and true if found and valid, or nil and false otherwise. +func GetGrantListFieldArg(args *structpb.Struct, key string) ([]*v2.Grant, bool) { + if args == nil || args.Fields == nil { + return nil, false + } + value, ok := args.Fields[key] + if !ok { + return nil, false + } + listValue, ok := value.GetKind().(*structpb.Value_ListValue) + if !ok { + return nil, false + } + var grants []*v2.Grant + for _, v := range listValue.ListValue.Values { + structValue, ok := v.GetKind().(*structpb.Value_StructValue) + if !ok { + return nil, false + } + + // Marshal the struct value back to JSON, then unmarshal into the proto message + jsonBytes, err := protojson.Marshal(structValue.StructValue) + if err != nil { + return nil, false + } + + basicGrant := &config.Grant{} + if err := protojson.Unmarshal(jsonBytes, basicGrant); err != nil { + return nil, false + } + + grants = append(grants, basicGrantToGrant(basicGrant)) + } + return grants, true +} + +// NewEntitlementListReturnField creates a return field with a list of Entitlement proto values. +func NewEntitlementListReturnField(key string, entitlements []*v2.Entitlement) (ReturnField, error) { + listValues := make([]*structpb.Value, len(entitlements)) + for i, entitlement := range entitlements { + if entitlement == nil { + return ReturnField{}, fmt.Errorf("entitlement at index %d cannot be nil", i) + } + basicEntitlement := entitlementToBasicEntitlement(entitlement) + jsonBytes, err := protojson.Marshal(basicEntitlement) + if err != nil { + return ReturnField{}, fmt.Errorf("failed to marshal entitlement: %w", err) + } + + structValue := &structpb.Struct{} + if err := protojson.Unmarshal(jsonBytes, structValue); err != nil { + return ReturnField{}, fmt.Errorf("failed to unmarshal entitlement to struct: %w", err) + } + + listValues[i] = structpb.NewStructValue(structValue) + } + return ReturnField{Key: key, Value: structpb.NewListValue(&structpb.ListValue{Values: listValues})}, nil +} + +// NewGrantListReturnField creates a return field with a list of Grant proto values. +func NewGrantListReturnField(key string, grants []*v2.Grant) (ReturnField, error) { + listValues := make([]*structpb.Value, len(grants)) + for i, grant := range grants { + if grant == nil { + return ReturnField{}, fmt.Errorf("grant at index %d cannot be nil", i) + } + basicGrant := grantToBasicGrant(grant) + jsonBytes, err := protojson.Marshal(basicGrant) + if err != nil { + return ReturnField{}, fmt.Errorf("failed to marshal grant: %w", err) + } + + structValue := &structpb.Struct{} + if err := protojson.Unmarshal(jsonBytes, structValue); err != nil { + return ReturnField{}, fmt.Errorf("failed to unmarshal grant to struct: %w", err) + } + + listValues[i] = structpb.NewStructValue(structValue) + } + return ReturnField{Key: key, Value: structpb.NewListValue(&structpb.ListValue{Values: listValues})}, nil +} diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/cli/cli.go b/vendor/github.com/conductorone/baton-sdk/pkg/cli/cli.go index 93a6e3ba..8adde283 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/cli/cli.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/cli/cli.go @@ -16,8 +16,9 @@ import ( ) type RunTimeOpts struct { - SessionStore sessions.SessionStore - TokenSource oauth2.TokenSource + SessionStore sessions.SessionStore + TokenSource oauth2.TokenSource + SelectedAuthMethod string } // GetConnectorFunc is a function type that creates a connector instance. @@ -35,7 +36,8 @@ func WithSessionCache(ctx context.Context, constructor sessions.SessionStoreCons } type ConnectorOpts struct { - TokenSource oauth2.TokenSource + TokenSource oauth2.TokenSource + SelectedAuthMethod string } type NewConnector[T field.Configurable] func(ctx context.Context, cfg T, opts *ConnectorOpts) (connectorbuilder.ConnectorBuilderV2, []connectorbuilder.Opt, error) diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/cli/commands.go b/vendor/github.com/conductorone/baton-sdk/pkg/cli/commands.go index efbaff6b..1b7aadcd 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/cli/commands.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/cli/commands.go @@ -179,6 +179,13 @@ func MakeMainCommand[T field.Configurable]( if v.GetBool("skip-full-sync") { opts = append(opts, connectorrunner.WithFullSyncDisabled()) } + if v.GetBool("health-check") { + opts = append(opts, connectorrunner.WithHealthCheck( + true, + v.GetInt("health-check-port"), + v.GetString("health-check-bind-address"), + )) + } } else { switch { case v.GetString("grant-entitlement") != "": @@ -373,7 +380,8 @@ func MakeMainCommand[T field.Configurable]( opts = append(opts, connectorrunner.WithSkipGrants(v.GetBool("skip-grants"))) } - c, err := getconnector(runCtx, t, RunTimeOpts{}) + // Save the selected authentication method and get the connector. + c, err := getconnector(runCtx, t, RunTimeOpts{SelectedAuthMethod: v.GetString("auth-method")}) if err != nil { return err } @@ -542,6 +550,7 @@ func MakeGRPCServerCommand[T field.Configurable]( otterOptions.MaximumWeight = uint64(sessionStoreMaximumSize) } }), + SelectedAuthMethod: v.GetString("auth-method"), }) if err != nil { return err @@ -643,12 +652,13 @@ func MakeCapabilitiesCommand[T field.Configurable]( if err != nil { return fmt.Errorf("failed to make configuration: %w", err) } + authMethod := v.GetString("auth-method") // validate required fields and relationship constraints - if err := field.Validate(confschema, t, field.WithAuthMethod(v.GetString("auth-method"))); err != nil { + if err := field.Validate(confschema, t, field.WithAuthMethod(authMethod)); err != nil { return err } - c, err = getconnector(runCtx, t, RunTimeOpts{}) + c, err = getconnector(runCtx, t, RunTimeOpts{SelectedAuthMethod: authMethod}) if err != nil { return err } diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/cli/healthcheck_command.go b/vendor/github.com/conductorone/baton-sdk/pkg/cli/healthcheck_command.go new file mode 100644 index 00000000..63b0f3c3 --- /dev/null +++ b/vendor/github.com/conductorone/baton-sdk/pkg/cli/healthcheck_command.go @@ -0,0 +1,81 @@ +package cli + +import ( + "context" + "fmt" + "net" + "net/http" + "net/url" + "strconv" + "time" + + "github.com/conductorone/baton-sdk/pkg/uhttp" + "github.com/spf13/cobra" + "github.com/spf13/viper" +) + +var validHealthCheckEndpoints = map[string]string{ + "health": "/health", + "ready": "/ready", + "live": "/live", +} + +func MakeHealthCheckCommand( + ctx context.Context, + v *viper.Viper, +) func(*cobra.Command, []string) error { + return func(cmd *cobra.Command, args []string) error { + err := v.BindPFlags(cmd.Flags()) + if err != nil { + return err + } + + // Get configuration from persistent parent flags + port := v.GetInt("health-check-port") + bindAddress := v.GetString("health-check-bind-address") + + // Get subcommand-specific flags + endpoint, _ := cmd.Flags().GetString("endpoint") + timeout, _ := cmd.Flags().GetInt("timeout") + + // Validate endpoint + path, ok := validHealthCheckEndpoints[endpoint] + if !ok { + return fmt.Errorf("invalid endpoint: %s (valid: health, ready, live)", endpoint) + } + + // Construct URL + u := &url.URL{ + Scheme: "http", + Host: net.JoinHostPort(bindAddress, strconv.Itoa(port)), + Path: path, + } + + // Create HTTP client using baton-sdk uhttp package + client, err := uhttp.NewClient(ctx) + if err != nil { + return fmt.Errorf("failed to create http client: %w", err) + } + // Override the default 5-minute timeout with our configured timeout + client.Timeout = time.Duration(timeout) * time.Second + + // Make request + req, err := http.NewRequestWithContext(ctx, http.MethodGet, u.String(), nil) + if err != nil { + return fmt.Errorf("failed to create request: %w", err) + } + + resp, err := client.Do(req) + if err != nil { + return fmt.Errorf("health check failed: %w", err) + } + defer resp.Body.Close() + + // Check response status + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("health check returned status %d", resp.StatusCode) + } + + return nil + } +} diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/cli/lambda_server__added.go b/vendor/github.com/conductorone/baton-sdk/pkg/cli/lambda_server__added.go index ae5e0c20..3d9cf060 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/cli/lambda_server__added.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/cli/lambda_server__added.go @@ -226,6 +226,7 @@ func OptionallyAddLambdaCommand[T field.Configurable]( otterOptions.MaximumWeight = uint64(sessionStoreMaximumSize) } }), + SelectedAuthMethod: authMethodStr, } if hasOauthField(schemaFields) { diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/config/config.go b/vendor/github.com/conductorone/baton-sdk/pkg/config/config.go index 02cc5d18..f6ae6be2 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/config/config.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/config/config.go @@ -30,7 +30,8 @@ func RunConnector[T field.Configurable]( ) { f := func(ctx context.Context, cfg T, runTimeOpts cli.RunTimeOpts) (types.ConnectorServer, error) { l := ctxzap.Extract(ctx) - connector, builderOpts, err := cf(ctx, cfg, &cli.ConnectorOpts{TokenSource: runTimeOpts.TokenSource}) + connector, builderOpts, err := cf(ctx, cfg, &cli.ConnectorOpts{TokenSource: runTimeOpts.TokenSource, + SelectedAuthMethod: runTimeOpts.SelectedAuthMethod}) if err != nil { return nil, err } @@ -240,6 +241,30 @@ func DefineConfigurationV2[T field.Configurable]( return nil, nil, err } + // Health check client command - doesn't need connector config validation + healthCheckCmd := &cobra.Command{ + Use: "health-check", + Short: "Check the health of a running connector", + Long: `Query the health check server of a running connector. + +This command is designed for use in container/Kubernetes health check scenarios. +It queries the specified endpoint and exits with code 0 if healthy, or non-zero otherwise. + +Examples: + # Check health using defaults (localhost:8081/health) + connector-name health-check + + # Check readiness endpoint + connector-name health-check --endpoint=ready + + # Check liveness with custom port + connector-name health-check --endpoint=live --health-check-port=9090`, + RunE: cli.MakeHealthCheckCommand(ctx, v), + } + healthCheckCmd.Flags().String("endpoint", "health", "Endpoint to check: health, ready, or live") + healthCheckCmd.Flags().Int("timeout", 5, "Request timeout in seconds") + mainCMD.AddCommand(healthCheckCmd) + return v, mainCMD, nil } diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/connectorbuilder/resource_provisioner.go b/vendor/github.com/conductorone/baton-sdk/pkg/connectorbuilder/resource_provisioner.go index 01c7f238..6f09907d 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/connectorbuilder/resource_provisioner.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/connectorbuilder/resource_provisioner.go @@ -46,7 +46,7 @@ type GrantProvisioner interface { // This is the recommended interface for implementing provisioning operations in new connectors. // It differs from ResourceProvisioner by returning a list of grants from the Grant method. type ResourceProvisionerV2 interface { - ResourceSyncer + ResourceSyncerV2 ResourceProvisionerV2Limited } diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/connectorrunner/runner.go b/vendor/github.com/conductorone/baton-sdk/pkg/connectorrunner/runner.go index fb5201ae..2690bcdb 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/connectorrunner/runner.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/connectorrunner/runner.go @@ -12,6 +12,7 @@ import ( "github.com/conductorone/baton-sdk/pkg/bid" "github.com/conductorone/baton-sdk/pkg/connectorbuilder" + "github.com/conductorone/baton-sdk/pkg/healthcheck" "github.com/conductorone/baton-sdk/pkg/synccompactor" "golang.org/x/sync/semaphore" "google.golang.org/protobuf/types/known/structpb" @@ -38,10 +39,11 @@ const ( ) type connectorRunner struct { - cw types.ClientWrapper - oneShot bool - tasks tasks.Manager - debugFile *os.File + cw types.ClientWrapper + oneShot bool + tasks tasks.Manager + debugFile *os.File + healthServer *healthcheck.Server } var ErrSigTerm = errors.New("context cancelled by process shutdown") @@ -240,6 +242,14 @@ func (c *connectorRunner) run(ctx context.Context) error { func (c *connectorRunner) Close(ctx context.Context) error { var retErr error + // Stop health check server if running + if c.healthServer != nil { + if err := c.healthServer.Stop(ctx); err != nil { + retErr = errors.Join(retErr, err) + } + c.healthServer = nil + } + if err := c.cw.Close(); err != nil { retErr = errors.Join(retErr, err) } @@ -360,6 +370,9 @@ type runnerConfig struct { syncResourceTypeIDs []string defaultCapabilitiesConnectorBuilder connectorbuilder.ConnectorBuilder defaultCapabilitiesConnectorBuilderV2 connectorbuilder.ConnectorBuilderV2 + healthCheckEnabled bool + healthCheckPort int + healthCheckBindAddress string } func WithSessionStoreEnabled() Option { @@ -722,6 +735,16 @@ func WithDefaultCapabilitiesConnectorBuilderV2(t connectorbuilder.ConnectorBuild } } +// WithHealthCheck enables the HTTP health check server. +func WithHealthCheck(enabled bool, port int, bindAddress string) Option { + return func(ctx context.Context, cfg *runnerConfig) error { + cfg.healthCheckEnabled = enabled + cfg.healthCheckPort = port + cfg.healthCheckBindAddress = bindAddress + return nil + } +} + func ExtractDefaultConnector(ctx context.Context, options ...Option) (any, error) { cfg := &runnerConfig{} @@ -916,5 +939,20 @@ func NewConnectorRunner(ctx context.Context, c types.ConnectorServer, opts ...Op } runner.tasks = tm + // Start health check server if enabled (only for daemon mode) + if cfg.healthCheckEnabled { + healthCfg := healthcheck.Config{ + Enabled: true, + Port: cfg.healthCheckPort, + BindAddress: cfg.healthCheckBindAddress, + } + healthServer := healthcheck.NewServer(healthCfg, cw.C) + if err := healthServer.Start(ctx); err != nil { + _ = cw.Close() // Clean up connector wrapper on failure + return nil, fmt.Errorf("failed to start health check server: %w", err) + } + runner.healthServer = healthServer + } + return runner, nil } diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/field/default_relationships.go b/vendor/github.com/conductorone/baton-sdk/pkg/field/default_relationships.go index bc995d11..6136cc84 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/field/default_relationships.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/field/default_relationships.go @@ -39,6 +39,10 @@ var DefaultRelationships = []SchemaFieldRelationship{ []SchemaField{skipGrants}, []SchemaField{targetedSyncResourceIDs}, ), + FieldsDependentOn( + []SchemaField{healthCheckPortField, healthCheckBindAddressField}, + []SchemaField{healthCheckField}, + ), } func EnsureDefaultRelationships(original []SchemaFieldRelationship) []SchemaFieldRelationship { diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/field/defaults.go b/vendor/github.com/conductorone/baton-sdk/pkg/field/defaults.go index 9259241e..d08a0c8b 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/field/defaults.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/field/defaults.go @@ -290,6 +290,25 @@ var ( WithExportTarget(ExportTargetOps), WithHidden(true), WithPersistent(true)) + + healthCheckField = BoolField("health-check", + WithDescription("Enable the HTTP health check endpoint"), + WithDefaultValue(false), + WithPersistent(true), + WithExportTarget(ExportTargetOps)) + + healthCheckPortField = IntField("health-check-port", + WithDescription("Port for the HTTP health check endpoint"), + WithDefaultValue(8081), + WithPersistent(true), + WithExportTarget(ExportTargetOps)) + + healthCheckBindAddressField = StringField("health-check-bind-address", + WithDescription("Bind address for health check server (127.0.0.1 for localhost-only)"), + WithDefaultValue("127.0.0.1"), + WithPersistent(true), + WithHidden(true), + WithExportTarget(ExportTargetOps)) ) func LambdaServerFields() []SchemaField { @@ -373,6 +392,10 @@ var DefaultFields = []SchemaField{ otelLoggingDisabled, authMethod, + + healthCheckField, + healthCheckPortField, + healthCheckBindAddressField, } func IsFieldAmongDefaultList(f SchemaField) bool { diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/healthcheck/server.go b/vendor/github.com/conductorone/baton-sdk/pkg/healthcheck/server.go new file mode 100644 index 00000000..1ffba6f4 --- /dev/null +++ b/vendor/github.com/conductorone/baton-sdk/pkg/healthcheck/server.go @@ -0,0 +1,214 @@ +package healthcheck + +import ( + "context" + "encoding/json" + "fmt" + "net" + "net/http" + "strconv" + "sync" + "time" + + "github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/ctxzap" + "go.uber.org/zap" + + v2 "github.com/conductorone/baton-sdk/pb/c1/connector/v2" + "github.com/conductorone/baton-sdk/pkg/types" +) + +const ( + defaultHealthCheckTimeout = 30 * time.Second + shutdownTimeout = 5 * time.Second +) + +// Config holds the configuration for the health check server. +type Config struct { + Enabled bool + Port int + BindAddress string +} + +// HealthResponse represents the JSON response for health check endpoints. +type HealthResponse struct { + Status string `json:"status"` + Timestamp string `json:"timestamp"` + Details map[string]string `json:"details,omitempty"` +} + +// ClientFunc is a function that returns a ConnectorClient. +type ClientFunc func(context.Context) (types.ConnectorClient, error) + +// Server manages the HTTP health check server lifecycle. +type Server struct { + cfg Config + clientFunc ClientFunc + server *http.Server + mu sync.Mutex + started bool + ctx context.Context +} + +// NewServer creates a new health check server. +func NewServer(cfg Config, clientFunc ClientFunc) *Server { + return &Server{ + cfg: cfg, + clientFunc: clientFunc, + } +} + +// Start starts the HTTP health check server. +func (s *Server) Start(ctx context.Context) error { + s.mu.Lock() + defer s.mu.Unlock() + + if s.started { + return fmt.Errorf("health check server already started") + } + + s.ctx = ctx + l := ctxzap.Extract(ctx) + + mux := http.NewServeMux() + + // Register health check endpoints + mux.HandleFunc("/health", s.healthHandler) + mux.HandleFunc("/ready", s.readyHandler) + mux.HandleFunc("/live", s.liveHandler) + + addr := net.JoinHostPort(s.cfg.BindAddress, strconv.Itoa(s.cfg.Port)) + lc := &net.ListenConfig{} + listener, err := lc.Listen(ctx, "tcp", addr) + if err != nil { + return fmt.Errorf("failed to create health check listener: %w", err) + } + + s.server = &http.Server{ + Handler: mux, + ReadHeaderTimeout: 10 * time.Second, + } + + s.started = true + + go func() { + l.Info("health check server starting", zap.String("address", addr)) + if err := s.server.Serve(listener); err != nil && err != http.ErrServerClosed { + l.Error("health check server error", zap.Error(err)) + } + }() + + return nil +} + +// Stop gracefully shuts down the health check server. +func (s *Server) Stop(ctx context.Context) error { + s.mu.Lock() + defer s.mu.Unlock() + + if !s.started || s.server == nil { + return nil + } + + l := ctxzap.Extract(ctx) + l.Info("stopping health check server") + + shutdownCtx, cancel := context.WithTimeout(ctx, shutdownTimeout) + defer cancel() + + if err := s.server.Shutdown(shutdownCtx); err != nil { + return fmt.Errorf("failed to shutdown health check server: %w", err) + } + + s.started = false + return nil +} + +// healthHandler handles the /health endpoint. +// It calls Validate() on the connector and returns the health status. +func (s *Server) healthHandler(w http.ResponseWriter, r *http.Request) { + ctx := s.ctx + if ctx == nil { + ctx = r.Context() + } + l := ctxzap.Extract(ctx) + + response := HealthResponse{ + Timestamp: time.Now().UTC().Format(time.RFC3339), + Details: make(map[string]string), + } + + // Get the connector client + client, err := s.clientFunc(ctx) + if err != nil { + l.Warn("health check failed: could not get connector client", zap.Error(err)) + response.Status = "unhealthy" + response.Details["error"] = "failed to get connector client" + s.writeJSON(w, http.StatusServiceUnavailable, response) + return + } + + // Call Validate() on the connector with a timeout + validateCtx, cancel := context.WithTimeout(ctx, defaultHealthCheckTimeout) + defer cancel() + + _, err = client.Validate(validateCtx, &v2.ConnectorServiceValidateRequest{}) + if err != nil { + l.Warn("health check failed: connector validation failed", zap.Error(err)) + response.Status = "unhealthy" + response.Details["error"] = "connector validation failed" + response.Details["validation_error"] = err.Error() + s.writeJSON(w, http.StatusServiceUnavailable, response) + return + } + + response.Status = "healthy" + s.writeJSON(w, http.StatusOK, response) +} + +// readyHandler handles the /ready endpoint. +// It checks if the connector client can be obtained (ready to serve). +func (s *Server) readyHandler(w http.ResponseWriter, r *http.Request) { + ctx := s.ctx + if ctx == nil { + ctx = r.Context() + } + l := ctxzap.Extract(ctx) + + response := HealthResponse{ + Timestamp: time.Now().UTC().Format(time.RFC3339), + Details: make(map[string]string), + } + + // Try to get the connector client + _, err := s.clientFunc(ctx) + if err != nil { + l.Warn("readiness check failed: could not get connector client", zap.Error(err)) + response.Status = "not_ready" + response.Details["error"] = "failed to get connector client" + s.writeJSON(w, http.StatusServiceUnavailable, response) + return + } + + response.Status = "ready" + s.writeJSON(w, http.StatusOK, response) +} + +// liveHandler handles the /live endpoint. +// It always returns HTTP 200 to indicate the process is alive. +func (s *Server) liveHandler(w http.ResponseWriter, _ *http.Request) { + response := HealthResponse{ + Status: "alive", + Timestamp: time.Now().UTC().Format(time.RFC3339), + } + s.writeJSON(w, http.StatusOK, response) +} + +// writeJSON writes a JSON response with the given status code. +func (s *Server) writeJSON(w http.ResponseWriter, statusCode int, data interface{}) { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(statusCode) + if err := json.NewEncoder(w).Encode(data); err != nil { + // If encoding fails, we can't do much about it + return + } +} diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/sdk/version.go b/vendor/github.com/conductorone/baton-sdk/pkg/sdk/version.go index f2d6acbd..b3cfeeac 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/sdk/version.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/sdk/version.go @@ -1,3 +1,3 @@ package sdk -const Version = "v0.7.3" +const Version = "v0.7.9" diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/sync/syncer.go b/vendor/github.com/conductorone/baton-sdk/pkg/sync/syncer.go index d2e8be55..532cc8e4 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/sync/syncer.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/sync/syncer.go @@ -2518,6 +2518,11 @@ func (s *syncer) listExternalEntitlementsForResource(ctx context.Context, resour break } } + for _, ent := range ents { + annos := annotations.Annotations(ent.GetAnnotations()) + annos.Update(&v2.EntitlementImmutable{}) + ent.SetAnnotations(annos) + } return ents, nil } @@ -2535,6 +2540,12 @@ func (s *syncer) listExternalGrantsForEntitlement(ctx context.Context, ent *v2.E } grants := grantsForEntitlementResp.GetList() if len(grants) > 0 { + // Add immutable annotation to external resource grants. + for _, grant := range grants { + annos := annotations.Annotations(grant.GetAnnotations()) + annos.Update(&v2.GrantImmutable{}) + grant.SetAnnotations(annos) + } if !yield(grants, err) { return } diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/actions.go b/vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/actions.go index 02737057..323b5665 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/actions.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/actions.go @@ -139,6 +139,8 @@ func (c *actionInvokeTaskHandler) HandleTask(ctx context.Context) error { return c.helpers.FinishTask(ctx, nil, nil, err) } + l.Debug("ActionInvoke response", zap.Any("resp", resp)) + // Check if the action itself failed and propagate the error if resp.GetStatus() == v2.BatonActionStatus_BATON_ACTION_STATUS_FAILED { errMsg := "action failed" @@ -156,8 +158,6 @@ func (c *actionInvokeTaskHandler) HandleTask(ctx context.Context) error { return c.helpers.FinishTask(ctx, resp, nil, fmt.Errorf("%s", errMsg)) } - l.Debug("ActionInvoke response", zap.Any("resp", resp)) - return c.helpers.FinishTask(ctx, resp, nil, nil) } diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/types/resource/security_insight_trait.go b/vendor/github.com/conductorone/baton-sdk/pkg/types/resource/security_insight_trait.go index 939bb8f2..4aec02f7 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/types/resource/security_insight_trait.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/types/resource/security_insight_trait.go @@ -93,6 +93,18 @@ func WithInsightExternalResourceTarget(externalId string, appHint string) Securi } } +// WithInsightAppUserTarget sets the app user target for the insight. +// Use this when the insight should be resolved to an AppUser by email and external ID. +func WithInsightAppUserTarget(email string, externalId string) SecurityInsightTraitOption { + return func(t *v2.SecurityInsightTrait) error { + t.SetAppUser(v2.SecurityInsightTrait_AppUserTarget_builder{ + Email: email, + ExternalId: externalId, + }.Build()) + return nil + } +} + // NewSecurityInsightTrait creates a new SecurityInsightTrait with the given options. // You must provide either WithRiskScore or WithIssue to set the insight type. // @@ -291,6 +303,11 @@ func IsExternalResourceTarget(trait *v2.SecurityInsightTrait) bool { return trait.GetExternalResource() != nil } +// IsAppUserTarget returns true if the insight targets an app user. +func IsAppUserTarget(trait *v2.SecurityInsightTrait) bool { + return trait.GetAppUser() != nil +} + // --- Target data extractors --- // GetUserTargetEmail returns the user email from a SecurityInsightTrait, or empty string if not a user target. @@ -321,3 +338,19 @@ func GetExternalResourceTargetAppHint(trait *v2.SecurityInsightTrait) string { } return "" } + +// GetAppUserTargetEmail returns the email from a SecurityInsightTrait, or empty string if not an app user target. +func GetAppUserTargetEmail(trait *v2.SecurityInsightTrait) string { + if appUser := trait.GetAppUser(); appUser != nil { + return appUser.GetEmail() + } + return "" +} + +// GetAppUserTargetExternalId returns the external ID from a SecurityInsightTrait, or empty string if not an app user target. +func GetAppUserTargetExternalId(trait *v2.SecurityInsightTrait) string { + if appUser := trait.GetAppUser(); appUser != nil { + return appUser.GetExternalId() + } + return "" +} diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/uhttp/transport.go b/vendor/github.com/conductorone/baton-sdk/pkg/uhttp/transport.go index cae9b483..7066921b 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/uhttp/transport.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/uhttp/transport.go @@ -137,24 +137,32 @@ func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { if err != nil { return nil, fmt.Errorf("uhttp: cycle failed: %w", err) } - if t.log { - t.l(ctx).Debug("Request started", - zap.String("http.method", req.Method), - zap.String("http.url_details.host", req.URL.Host), - zap.String("http.url_details.path", req.URL.Path), - zap.String("http.url_details.query", req.URL.RawQuery), - ) - } + start := time.Now() + defer func() { + if r := recover(); r != nil { + if t.log { + duration := time.Since(start) + t.l(ctx).Error("HTTP request panic", + zap.String("http.method", req.Method), + zap.String("http.url_details.host", req.URL.Host), + zap.String("http.url_details.path", req.URL.Path), + zap.String("http.url_details.query", req.URL.RawQuery), + zap.Duration("duration", duration), + zap.Any("panic", r), + ) + } + panic(r) + } + }() resp, err := rt.RoundTrip(req) if t.log { - fields := []zap.Field{zap.String("http.method", req.Method), + duration := time.Since(start) + fields := []zap.Field{ + zap.String("http.method", req.Method), zap.String("http.url_details.host", req.URL.Host), zap.String("http.url_details.path", req.URL.Path), zap.String("http.url_details.query", req.URL.RawQuery), - } - - if err != nil { - fields = append(fields, zap.Error(err)) + zap.Duration("duration", duration), } if resp != nil { @@ -170,7 +178,22 @@ func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { fields = append(fields, zap.Any("http.headers", headers)) } - t.l(ctx).Debug("Request complete", fields...) + l := t.l(ctx) + switch { + case err != nil: + // Always log errors - request failed to complete + fields = append(fields, zap.Error(err)) + l.Error("HTTP request failed", fields...) + case resp != nil && resp.StatusCode >= 500: + // Server errors are noteworthy + l.Warn("HTTP request server error", fields...) + case resp != nil && resp.StatusCode >= 400: + // Client errors at debug - usually expected (404s, etc) + l.Debug("HTTP request client error", fields...) + default: + // Success + l.Debug("HTTP request complete", fields...) + } } return resp, err } diff --git a/vendor/modules.txt b/vendor/modules.txt index 3908fb9d..ba42c1d4 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -159,7 +159,7 @@ github.com/benbjohnson/clock # github.com/cenkalti/backoff/v4 v4.3.0 ## explicit; go 1.18 github.com/cenkalti/backoff/v4 -# github.com/conductorone/baton-sdk v0.7.4 +# github.com/conductorone/baton-sdk v0.7.10 ## explicit; go 1.25.2 github.com/conductorone/baton-sdk/internal/connector github.com/conductorone/baton-sdk/pb/c1/c1z/v1 @@ -188,6 +188,7 @@ github.com/conductorone/baton-sdk/pkg/dotc1z/manager github.com/conductorone/baton-sdk/pkg/dotc1z/manager/local github.com/conductorone/baton-sdk/pkg/dotc1z/manager/s3 github.com/conductorone/baton-sdk/pkg/field +github.com/conductorone/baton-sdk/pkg/healthcheck github.com/conductorone/baton-sdk/pkg/lambda/grpc github.com/conductorone/baton-sdk/pkg/lambda/grpc/config github.com/conductorone/baton-sdk/pkg/lambda/grpc/middleware From a0afe0afcfc68d6009bcf02f44f01da165163bfc Mon Sep 17 00:00:00 2001 From: vipulgowda Date: Fri, 30 Jan 2026 10:14:02 +0530 Subject: [PATCH 11/19] handle create of teams and repos only --- pkg/connector/repository.go | 410 ------------------------------------ pkg/connector/team.go | 300 -------------------------- 2 files changed, 710 deletions(-) diff --git a/pkg/connector/repository.go b/pkg/connector/repository.go index 88d4ae64..d47267d8 100644 --- a/pkg/connector/repository.go +++ b/pkg/connector/repository.go @@ -443,12 +443,6 @@ func (o *repositoryResourceType) ResourceActions(ctx context.Context, registry a if err := o.registerCreateRepositoryAction(ctx, registry); err != nil { return err } - if err := o.registerUpdateRepositoryAction(ctx, registry); err != nil { - return err - } - if err := o.registerDeleteRepositoryAction(ctx, registry); err != nil { - return err - } return nil } @@ -625,173 +619,6 @@ func (o *repositoryResourceType) registerCreateRepositoryAction(ctx context.Cont }, o.handleCreateRepositoryAction) } -func (o *repositoryResourceType) registerDeleteRepositoryAction(ctx context.Context, registry actions.ActionRegistry) error { - return registry.Register(ctx, &v2.BatonActionSchema{ - Name: "delete", - DisplayName: "Delete Repository", - Description: "Delete a repository from a GitHub organization", - ActionType: []v2.ActionType{v2.ActionType_ACTION_TYPE_RESOURCE_DELETE}, - Arguments: []*config.Field{ - { - Name: "resource", - DisplayName: "Repository Resource", - Description: "The repository resource to delete", - Field: &config.Field_ResourceIdField{}, - IsRequired: true, - }, - { - Name: "parent", - DisplayName: "Parent Organization", - Description: "The organization the repository belongs to", - Field: &config.Field_ResourceIdField{}, - IsRequired: true, - }, - }, - ReturnTypes: []*config.Field{ - {Name: "success", Field: &config.Field_BoolField{}}, - }, - }, o.handleDeleteRepositoryAction) -} - -func (o *repositoryResourceType) registerUpdateRepositoryAction(ctx context.Context, registry actions.ActionRegistry) error { - return registry.Register(ctx, &v2.BatonActionSchema{ - Name: "update", - DisplayName: "Update Repository", - Description: "Update an existing repository in a GitHub organization", - ActionType: []v2.ActionType{}, - Arguments: []*config.Field{ - { - Name: "resource", - DisplayName: "Repository Resource", - Description: "The repository resource to update", - Field: &config.Field_ResourceIdField{}, - IsRequired: true, - }, - { - Name: "parent", - DisplayName: "Parent Organization", - Description: "The organization the repository belongs to", - Field: &config.Field_ResourceIdField{}, - IsRequired: true, - }, - { - Name: "name", - DisplayName: "Repository Name", - Description: "The new name of the repository (leave empty to keep current)", - Field: &config.Field_StringField{}, - }, - { - Name: "description", - DisplayName: "Description", - Description: "A description of the repository", - Field: &config.Field_StringField{}, - }, - { - Name: "homepage", - DisplayName: "Homepage", - Description: "A URL with more information about the repository", - Field: &config.Field_StringField{}, - }, - { - Name: "private", - DisplayName: "Private", - Description: "Whether the repository should be private (true/false)", - Field: &config.Field_BoolField{}, - }, - { - Name: "visibility", - DisplayName: "Visibility", - Description: "The visibility level of the repository", - Field: &config.Field_StringField{ - StringField: &config.StringField{ - Options: []*config.StringFieldOption{ - {Value: "public", DisplayName: "Public"}, - {Value: "private", DisplayName: "Private"}, - {Value: "internal", DisplayName: "Internal (Enterprise only)"}, - }, - }, - }, - }, - { - Name: "has_issues", - DisplayName: "Has Issues", - Description: "Enable issues for this repository (true/false)", - Field: &config.Field_BoolField{}, - }, - { - Name: "has_projects", - DisplayName: "Has Projects", - Description: "Enable projects for this repository (true/false)", - Field: &config.Field_BoolField{}, - }, - { - Name: "has_wiki", - DisplayName: "Has Wiki", - Description: "Enable wiki for this repository (true/false)", - Field: &config.Field_BoolField{}, - }, - { - Name: "has_discussions", - DisplayName: "Has Discussions", - Description: "Enable discussions for this repository (true/false)", - Field: &config.Field_BoolField{}, - }, - { - Name: "default_branch", - DisplayName: "Default Branch", - Description: "The default branch of the repository", - Field: &config.Field_StringField{}, - }, - { - Name: "allow_squash_merge", - DisplayName: "Allow Squash Merge", - Description: "Allow squash-merging pull requests (true/false)", - Field: &config.Field_BoolField{}, - }, - { - Name: "allow_merge_commit", - DisplayName: "Allow Merge Commit", - Description: "Allow merging pull requests with a merge commit (true/false)", - Field: &config.Field_BoolField{}, - }, - { - Name: "allow_rebase_merge", - DisplayName: "Allow Rebase Merge", - Description: "Allow rebase-merging pull requests (true/false)", - Field: &config.Field_BoolField{}, - }, - { - Name: "allow_auto_merge", - DisplayName: "Allow Auto Merge", - Description: "Allow auto-merge on pull requests (true/false)", - Field: &config.Field_BoolField{}, - }, - { - Name: "delete_branch_on_merge", - DisplayName: "Delete Branch on Merge", - Description: "Automatically delete head branches after pull requests are merged (true/false)", - Field: &config.Field_BoolField{}, - }, - { - Name: "archived", - DisplayName: "Archived", - Description: "Archive the repository (true/false). Note: You cannot unarchive repositories through the API", - Field: &config.Field_BoolField{}, - }, - { - Name: "is_template", - DisplayName: "Is Template", - Description: "Make this repository available as a template (true/false)", - Field: &config.Field_BoolField{}, - }, - }, - ReturnTypes: []*config.Field{ - {Name: "success", Field: &config.Field_BoolField{}}, - {Name: "resource", Field: &config.Field_ResourceField{}}, - }, - }, o.handleUpdateRepositoryAction) -} - func (o *repositoryResourceType) handleCreateRepositoryAction(ctx context.Context, args *structpb.Struct) (*structpb.Struct, annotations.Annotations, error) { l := ctxzap.Extract(ctx) @@ -925,240 +752,3 @@ func (o *repositoryResourceType) handleCreateRepositoryAction(ctx context.Contex return actions.NewReturnValues(true, resourceRv), annos, nil } - -func (o *repositoryResourceType) handleDeleteRepositoryAction(ctx context.Context, args *structpb.Struct) (*structpb.Struct, annotations.Annotations, error) { - l := ctxzap.Extract(ctx) - - // Extract the repository resource ID using SDK helper - resourceID, err := actions.RequireResourceIDArg(args, "resource") - if err != nil { - return nil, nil, err - } - - // Extract the parent org resource ID using SDK helper - parentResourceID, err := actions.RequireResourceIDArg(args, "parent") - if err != nil { - return nil, nil, err - } - - // Parse the repo ID from the resource - repoID, err := strconv.ParseInt(resourceID.Resource, 10, 64) - if err != nil { - return nil, nil, fmt.Errorf("invalid repository ID %s: %w", resourceID.Resource, err) - } - - // Get the organization name from the parent resource ID - orgName, err := o.orgCache.GetOrgName(ctx, parentResourceID) - if err != nil { - return nil, nil, fmt.Errorf("failed to get organization name: %w", err) - } - - // First, get the repository to find its name (needed for deletion) - repo, resp, err := o.client.Repositories.GetByID(ctx, repoID) - if err != nil { - return nil, nil, wrapGitHubError(err, resp, fmt.Sprintf("failed to get repository %d", repoID)) - } - - repoName := repo.GetName() - - l.Info("github-connector: deleting repository via action", - zap.Int64("repo_id", repoID), - zap.String("repo_name", repoName), - zap.String("org_name", orgName), - ) - - // Delete the repository via GitHub API - resp, err = o.client.Repositories.Delete(ctx, orgName, repoName) - if err != nil { - return nil, nil, wrapGitHubError(err, resp, fmt.Sprintf("failed to delete repository %s in org %s", repoName, orgName)) - } - - var annos annotations.Annotations - if rateLimitData, err := extractRateLimitData(resp); err == nil { - annos.WithRateLimiting(rateLimitData) - } - - l.Info("github-connector: repository deleted successfully via action", - zap.Int64("repo_id", repoID), - zap.String("repo_name", repoName), - zap.String("org_name", orgName), - ) - - return actions.NewReturnValues(true), annos, nil -} - -func (o *repositoryResourceType) handleUpdateRepositoryAction(ctx context.Context, args *structpb.Struct) (*structpb.Struct, annotations.Annotations, error) { - l := ctxzap.Extract(ctx) - - // Extract the repository resource ID using SDK helper - resourceID, err := actions.RequireResourceIDArg(args, "resource") - if err != nil { - return nil, nil, err - } - - // Extract the parent org resource ID using SDK helper - parentResourceID, err := actions.RequireResourceIDArg(args, "parent") - if err != nil { - return nil, nil, err - } - - // Parse the repo ID from the resource - repoID, err := strconv.ParseInt(resourceID.Resource, 10, 64) - if err != nil { - return nil, nil, fmt.Errorf("invalid repository ID %s: %w", resourceID.Resource, err) - } - - // Get the organization name from the parent resource ID - orgName, err := o.orgCache.GetOrgName(ctx, parentResourceID) - if err != nil { - return nil, nil, fmt.Errorf("failed to get organization name: %w", err) - } - - // First, get the current repository to find its name - repo, resp, err := o.client.Repositories.GetByID(ctx, repoID) - if err != nil { - return nil, nil, wrapGitHubError(err, resp, fmt.Sprintf("failed to get repository %d", repoID)) - } - - currentRepoName := repo.GetName() - - l.Info("github-connector: updating repository via action", - zap.Int64("repo_id", repoID), - zap.String("repo_name", currentRepoName), - zap.String("org_name", orgName), - ) - - // Build the Repository update request - updateRepo := &github.Repository{} - - // Track if any updates were provided - hasUpdates := false - - // Extract optional fields using SDK helpers - if name, ok := actions.GetStringArg(args, "name"); ok && name != "" { - updateRepo.Name = github.Ptr(name) - hasUpdates = true - } - - if description, ok := actions.GetStringArg(args, "description"); ok { - updateRepo.Description = github.Ptr(description) - hasUpdates = true - } - - if homepage, ok := actions.GetStringArg(args, "homepage"); ok { - updateRepo.Homepage = github.Ptr(homepage) - hasUpdates = true - } - - if private, ok := actions.GetBoolArg(args, "private"); ok { - updateRepo.Private = github.Ptr(private) - hasUpdates = true - } - - if visibility, ok := actions.GetStringArg(args, "visibility"); ok && visibility != "" { - if visibility == "public" || visibility == "private" || visibility == "internal" { - updateRepo.Visibility = github.Ptr(visibility) - hasUpdates = true - } else { - l.Warn("github-connector: invalid visibility value, ignoring", - zap.String("provided_visibility", visibility), - ) - } - } - - if hasIssues, ok := actions.GetBoolArg(args, "has_issues"); ok { - updateRepo.HasIssues = github.Ptr(hasIssues) - hasUpdates = true - } - - if hasProjects, ok := actions.GetBoolArg(args, "has_projects"); ok { - updateRepo.HasProjects = github.Ptr(hasProjects) - hasUpdates = true - } - - if hasWiki, ok := actions.GetBoolArg(args, "has_wiki"); ok { - updateRepo.HasWiki = github.Ptr(hasWiki) - hasUpdates = true - } - - if hasDiscussions, ok := actions.GetBoolArg(args, "has_discussions"); ok { - updateRepo.HasDiscussions = github.Ptr(hasDiscussions) - hasUpdates = true - } - - if defaultBranch, ok := actions.GetStringArg(args, "default_branch"); ok && defaultBranch != "" { - updateRepo.DefaultBranch = github.Ptr(defaultBranch) - hasUpdates = true - } - - if allowSquashMerge, ok := actions.GetBoolArg(args, "allow_squash_merge"); ok { - updateRepo.AllowSquashMerge = github.Ptr(allowSquashMerge) - hasUpdates = true - } - - if allowMergeCommit, ok := actions.GetBoolArg(args, "allow_merge_commit"); ok { - updateRepo.AllowMergeCommit = github.Ptr(allowMergeCommit) - hasUpdates = true - } - - if allowRebaseMerge, ok := actions.GetBoolArg(args, "allow_rebase_merge"); ok { - updateRepo.AllowRebaseMerge = github.Ptr(allowRebaseMerge) - hasUpdates = true - } - - if allowAutoMerge, ok := actions.GetBoolArg(args, "allow_auto_merge"); ok { - updateRepo.AllowAutoMerge = github.Ptr(allowAutoMerge) - hasUpdates = true - } - - if deleteBranchOnMerge, ok := actions.GetBoolArg(args, "delete_branch_on_merge"); ok { - updateRepo.DeleteBranchOnMerge = github.Ptr(deleteBranchOnMerge) - hasUpdates = true - } - - if archived, ok := actions.GetBoolArg(args, "archived"); ok { - updateRepo.Archived = github.Ptr(archived) - hasUpdates = true - } - - if isTemplate, ok := actions.GetBoolArg(args, "is_template"); ok { - updateRepo.IsTemplate = github.Ptr(isTemplate) - hasUpdates = true - } - - if !hasUpdates { - return nil, nil, fmt.Errorf("no update fields provided") - } - - // Update the repository via GitHub API - updatedRepo, resp, err := o.client.Repositories.Edit(ctx, orgName, currentRepoName, updateRepo) - if err != nil { - return nil, nil, wrapGitHubError(err, resp, fmt.Sprintf("failed to update repository %s in org %s", currentRepoName, orgName)) - } - - // Extract rate limit data for annotations - var annos annotations.Annotations - if rateLimitData, err := extractRateLimitData(resp); err == nil { - annos.WithRateLimiting(rateLimitData) - } - - l.Info("github-connector: repository updated successfully via action", - zap.Int64("repo_id", updatedRepo.GetID()), - zap.String("repo_name", updatedRepo.GetName()), - zap.String("repo_full_name", updatedRepo.GetFullName()), - ) - - // Create the resource representation of the updated repository - repoResource, err := repositoryResource(ctx, updatedRepo, parentResourceID) - if err != nil { - return nil, annos, fmt.Errorf("failed to create resource representation: %w", err) - } - - // Build return values using SDK helpers - resourceRv, err := actions.NewResourceReturnField("resource", repoResource) - if err != nil { - return nil, annos, err - } - - return actions.NewReturnValues(true, resourceRv), annos, nil -} diff --git a/pkg/connector/team.go b/pkg/connector/team.go index b1a5d6f1..2d54e9ba 100644 --- a/pkg/connector/team.go +++ b/pkg/connector/team.go @@ -375,12 +375,6 @@ func (o *teamResourceType) ResourceActions(ctx context.Context, registry actions if err := o.registerCreateTeamAction(ctx, registry); err != nil { return err } - if err := o.registerUpdateTeamAction(ctx, registry); err != nil { - return err - } - if err := o.registerDeleteTeamAction(ctx, registry); err != nil { - return err - } return nil } @@ -484,101 +478,6 @@ func (o *teamResourceType) registerCreateTeamAction(ctx context.Context, registr }, o.handleCreateTeamAction) } -func (o *teamResourceType) registerDeleteTeamAction(ctx context.Context, registry actions.ActionRegistry) error { - return registry.Register(ctx, &v2.BatonActionSchema{ - Name: "delete", - DisplayName: "Delete Team", - Description: "Delete a team from a GitHub organization", - ActionType: []v2.ActionType{v2.ActionType_ACTION_TYPE_RESOURCE_DELETE}, - Arguments: []*config.Field{ - { - Name: "resource", - DisplayName: "Team Resource", - Description: "The team resource to delete", - Field: &config.Field_ResourceIdField{}, - IsRequired: true, - }, - { - Name: "parent", - DisplayName: "Parent Organization", - Description: "The organization the team belongs to", - Field: &config.Field_ResourceIdField{}, - IsRequired: true, - }, - }, - ReturnTypes: []*config.Field{ - {Name: "success", Field: &config.Field_BoolField{}}, - }, - }, o.handleDeleteTeamAction) -} - -func (o *teamResourceType) registerUpdateTeamAction(ctx context.Context, registry actions.ActionRegistry) error { - return registry.Register(ctx, &v2.BatonActionSchema{ - Name: "update", - DisplayName: "Update Team", - Description: "Update an existing team in a GitHub organization", - ActionType: []v2.ActionType{}, - Arguments: []*config.Field{ - { - Name: "resource", - DisplayName: "Team Resource", - Description: "The team resource to update", - Field: &config.Field_ResourceIdField{}, - IsRequired: true, - }, - { - Name: "parent", - DisplayName: "Parent Organization", - Description: "The organization the team belongs to", - Field: &config.Field_ResourceIdField{}, - IsRequired: true, - }, - { - Name: "name", - DisplayName: "Team Name", - Description: "The new name of the team (leave empty to keep current)", - Field: &config.Field_StringField{}, - }, - { - Name: "description", - DisplayName: "Description", - Description: "A description of the team", - Field: &config.Field_StringField{}, - }, - { - Name: "privacy", - DisplayName: "Privacy", - Description: "The privacy level of the team", - Field: &config.Field_StringField{ - StringField: &config.StringField{ - Options: []*config.StringFieldOption{ - {Value: "secret", DisplayName: "Secret (only visible to org owners and team members)"}, - {Value: "closed", DisplayName: "Closed (visible to all org members)"}, - }, - }, - }, - }, - { - Name: "notification_setting", - DisplayName: "Notification Setting", - Description: "The notification setting for the team", - Field: &config.Field_StringField{ - StringField: &config.StringField{ - Options: []*config.StringFieldOption{ - {Value: "notifications_enabled", DisplayName: "Enabled"}, - {Value: "notifications_disabled", DisplayName: "Disabled"}, - }, - }, - }, - }, - }, - ReturnTypes: []*config.Field{ - {Name: "success", Field: &config.Field_BoolField{}}, - {Name: "resource", Field: &config.Field_ResourceField{}}, - }, - }, o.handleUpdateTeamAction) -} - func (o *teamResourceType) handleCreateTeamAction(ctx context.Context, args *structpb.Struct) (*structpb.Struct, annotations.Annotations, error) { l := ctxzap.Extract(ctx) @@ -720,205 +619,6 @@ func (o *teamResourceType) handleCreateTeamAction(ctx context.Context, args *str return actions.NewReturnValues(true, resourceRv), annos, nil } -func (o *teamResourceType) handleDeleteTeamAction(ctx context.Context, args *structpb.Struct) (*structpb.Struct, annotations.Annotations, error) { - l := ctxzap.Extract(ctx) - - // Extract the team resource ID using SDK helper - resourceID, err := actions.RequireResourceIDArg(args, "resource") - if err != nil { - return nil, nil, err - } - - // Extract the parent org resource ID using SDK helper - parentResourceID, err := actions.RequireResourceIDArg(args, "parent") - if err != nil { - return nil, nil, err - } - - // Parse the team ID from the resource - teamID, err := strconv.ParseInt(resourceID.Resource, 10, 64) - if err != nil { - return nil, nil, fmt.Errorf("invalid team ID %s: %w", resourceID.Resource, err) - } - - // Parse the org ID from the parent resource - orgID, err := strconv.ParseInt(parentResourceID.Resource, 10, 64) - if err != nil { - return nil, nil, fmt.Errorf("invalid org ID %s: %w", parentResourceID.Resource, err) - } - - // Get the organization name from the cache - orgName, err := o.orgCache.GetOrgName(ctx, parentResourceID) - if err != nil { - return nil, nil, fmt.Errorf("failed to get organization name: %w", err) - } - - // Get the team to find its slug - team, resp, err := o.client.Teams.GetTeamByID(ctx, orgID, teamID) //nolint:staticcheck // TODO: migrate to GetTeamBySlug - if err != nil { - return nil, nil, wrapGitHubError(err, resp, fmt.Sprintf("failed to get team %d", teamID)) - } - - teamSlug := team.GetSlug() - - l.Info("github-connector: deleting team via action", - zap.Int64("team_id", teamID), - zap.String("team_slug", teamSlug), - zap.String("org_name", orgName), - ) - - // Delete the team using slug - resp, err = o.client.Teams.DeleteTeamBySlug(ctx, orgName, teamSlug) - if err != nil { - return nil, nil, wrapGitHubError(err, resp, fmt.Sprintf("failed to delete team %s in org %s", teamSlug, orgName)) - } - - var annos annotations.Annotations - if rateLimitData, err := extractRateLimitData(resp); err == nil { - annos.WithRateLimiting(rateLimitData) - } - - l.Info("github-connector: team deleted successfully via action", - zap.Int64("team_id", teamID), - zap.String("team_slug", teamSlug), - zap.String("org_name", orgName), - ) - - return actions.NewReturnValues(true), annos, nil -} - -func (o *teamResourceType) handleUpdateTeamAction(ctx context.Context, args *structpb.Struct) (*structpb.Struct, annotations.Annotations, error) { - l := ctxzap.Extract(ctx) - - // Extract the team resource ID using SDK helper - resourceID, err := actions.RequireResourceIDArg(args, "resource") - if err != nil { - return nil, nil, err - } - - // Extract the parent org resource ID using SDK helper - parentResourceID, err := actions.RequireResourceIDArg(args, "parent") - if err != nil { - return nil, nil, err - } - - // Parse the team ID from the resource - teamID, err := strconv.ParseInt(resourceID.Resource, 10, 64) - if err != nil { - return nil, nil, fmt.Errorf("invalid team ID %s: %w", resourceID.Resource, err) - } - - // Parse the org ID from the parent resource - orgID, err := strconv.ParseInt(parentResourceID.Resource, 10, 64) - if err != nil { - return nil, nil, fmt.Errorf("invalid org ID %s: %w", parentResourceID.Resource, err) - } - - // Get the organization name from the cache - orgName, err := o.orgCache.GetOrgName(ctx, parentResourceID) - if err != nil { - return nil, nil, fmt.Errorf("failed to get organization name: %w", err) - } - - // Get the team to find its slug - team, resp, err := o.client.Teams.GetTeamByID(ctx, orgID, teamID) //nolint:staticcheck // TODO: migrate to GetTeamBySlug - if err != nil { - return nil, nil, wrapGitHubError(err, resp, fmt.Sprintf("failed to get team %d", teamID)) - } - - teamSlug := team.GetSlug() - - l.Info("github-connector: updating team via action", - zap.Int64("team_id", teamID), - zap.String("team_slug", teamSlug), - zap.String("org_name", orgName), - ) - - // Build the NewTeam update request - // Note: GitHub API uses NewTeam for both create and edit operations - updateTeam := github.NewTeam{} - - // Track if any updates were provided - hasUpdates := false - - // Extract optional fields using SDK helpers - if name, ok := actions.GetStringArg(args, "name"); ok && name != "" { - updateTeam.Name = name - hasUpdates = true - } - - if description, ok := actions.GetStringArg(args, "description"); ok { - updateTeam.Description = github.Ptr(description) - hasUpdates = true - } - - if privacy, ok := actions.GetStringArg(args, "privacy"); ok && privacy != "" { - if privacy == teamPrivacySecret || privacy == teamPrivacyClosed { - updateTeam.Privacy = github.Ptr(privacy) - hasUpdates = true - } else { - l.Warn("github-connector: invalid privacy value, ignoring", - zap.String("provided_privacy", privacy), - ) - } - } - - if notificationSetting, ok := actions.GetStringArg(args, "notification_setting"); ok && notificationSetting != "" { - if notificationSetting == "notifications_enabled" || notificationSetting == "notifications_disabled" { - updateTeam.NotificationSetting = github.Ptr(notificationSetting) - hasUpdates = true - } else { - l.Warn("github-connector: invalid notification_setting value, ignoring", - zap.String("provided_notification_setting", notificationSetting), - ) - } - } - - if parentTeamID, ok := actions.GetIntArg(args, "parent_team_id"); ok { - if parentTeamID > 0 { - updateTeam.ParentTeamID = github.Ptr(parentTeamID) - hasUpdates = true - } - // Note: Setting to 0 would remove the parent, but GitHub API requires omitting the field entirely - } - - if !hasUpdates { - return nil, nil, fmt.Errorf("no update fields provided") - } - - // Update the team via GitHub API using slug - updatedTeam, resp, err := o.client.Teams.EditTeamBySlug(ctx, orgName, teamSlug, updateTeam, false) - if err != nil { - return nil, nil, wrapGitHubError(err, resp, fmt.Sprintf("failed to update team %s in org %s", teamSlug, orgName)) - } - - // Extract rate limit data for annotations - var annos annotations.Annotations - if rateLimitData, err := extractRateLimitData(resp); err == nil { - annos.WithRateLimiting(rateLimitData) - } - - l.Info("github-connector: team updated successfully via action", - zap.Int64("team_id", updatedTeam.GetID()), - zap.String("team_name", updatedTeam.GetName()), - zap.String("team_slug", updatedTeam.GetSlug()), - ) - - // Create the resource representation of the updated team - resource, err := teamResource(updatedTeam, parentResourceID) - if err != nil { - return nil, annos, fmt.Errorf("failed to create resource representation: %w", err) - } - - // Build return values using SDK helpers - resourceRv, err := actions.NewResourceReturnField("resource", resource) - if err != nil { - return nil, annos, err - } - - return actions.NewReturnValues(true, resourceRv), annos, nil -} - func teamBuilder(client *github.Client, orgCache *orgNameCache) *teamResourceType { return &teamResourceType{ resourceType: resourceTypeTeam, From ae712cb706ab5c938e228b83226cf94462045ffd Mon Sep 17 00:00:00 2001 From: vipulgowda Date: Fri, 30 Jan 2026 12:21:23 +0530 Subject: [PATCH 12/19] updates repo and team groups creation --- pkg/connector/repository.go | 237 ++++++++++++++++-------------------- pkg/connector/team.go | 83 ++++++++++--- 2 files changed, 172 insertions(+), 148 deletions(-) diff --git a/pkg/connector/repository.go b/pkg/connector/repository.go index d47267d8..19c319d3 100644 --- a/pkg/connector/repository.go +++ b/pkg/connector/repository.go @@ -455,14 +455,20 @@ func (o *repositoryResourceType) registerCreateRepositoryAction(ctx context.Cont Arguments: []*config.Field{ { Name: "name", - DisplayName: "Repository Name", + DisplayName: "Repository name", Description: "The name of the repository to create", Field: &config.Field_StringField{}, IsRequired: true, }, { - Name: "parent", - DisplayName: "Parent Organization", + Name: "description", + DisplayName: "Description", + Description: "A description of the repository", + Field: &config.Field_StringField{}, + }, + { + Name: "org", + DisplayName: "Organization", Description: "The organization to create the repository in", Field: &config.Field_ResourceIdField{ ResourceIdField: &config.ResourceIdField{ @@ -473,18 +479,6 @@ func (o *repositoryResourceType) registerCreateRepositoryAction(ctx context.Cont }, IsRequired: true, }, - { - Name: "description", - DisplayName: "Description", - Description: "A description of the repository", - Field: &config.Field_StringField{}, - }, - { - Name: "private", - DisplayName: "Private", - Description: "Whether the repository should be private (true/false)", - Field: &config.Field_BoolField{}, - }, { Name: "visibility", DisplayName: "Visibility", @@ -492,42 +486,21 @@ func (o *repositoryResourceType) registerCreateRepositoryAction(ctx context.Cont Field: &config.Field_StringField{ StringField: &config.StringField{ Options: []*config.StringFieldOption{ - {Value: "public", DisplayName: "Public"}, - {Value: "private", DisplayName: "Private"}, - {Value: "internal", DisplayName: "Internal (Enterprise only)"}, + {Value: "public", DisplayName: "Public", Name: "Anyone on the internet can view this repository"}, + {Value: "private", DisplayName: "Private", Name: "You can choose who can see this repository"}, }, }, }, }, { - Name: "has_issues", - DisplayName: "Has Issues", - Description: "Enable issues for this repository (true/false)", - Field: &config.Field_BoolField{}, - }, - { - Name: "has_projects", - DisplayName: "Has Projects", - Description: "Enable projects for this repository (true/false)", - Field: &config.Field_BoolField{}, - }, - { - Name: "has_wiki", - DisplayName: "Has Wiki", - Description: "Enable wiki for this repository (true/false)", - Field: &config.Field_BoolField{}, - }, - { - Name: "has_discussions", - DisplayName: "Has Discussions", - Description: "Enable discussions for this repository (true/false)", - Field: &config.Field_BoolField{}, - }, - { - Name: "auto_init", - DisplayName: "Auto Initialize", - Description: "Create an initial commit with empty README (true/false)", - Field: &config.Field_BoolField{}, + Name: "add_readme", + DisplayName: "Add README.md", + Description: "Add a README.md file to the repository", + Field: &config.Field_BoolField{ + BoolField: &config.BoolField{ + DefaultValue: true, + }, + }, }, { Name: "gitignore_template", @@ -536,7 +509,7 @@ func (o *repositoryResourceType) registerCreateRepositoryAction(ctx context.Cont Field: &config.Field_StringField{ StringField: &config.StringField{ Options: []*config.StringFieldOption{ - {Value: "", DisplayName: "None"}, + {Value: "", DisplayName: "No .gitignore template"}, {Value: "Go", DisplayName: "Go"}, {Value: "Python", DisplayName: "Python"}, {Value: "Node", DisplayName: "Node"}, @@ -560,7 +533,7 @@ func (o *repositoryResourceType) registerCreateRepositoryAction(ctx context.Cont Field: &config.Field_StringField{ StringField: &config.StringField{ Options: []*config.StringFieldOption{ - {Value: "", DisplayName: "None"}, + {Value: "", DisplayName: "No license"}, {Value: "mit", DisplayName: "MIT License"}, {Value: "apache-2.0", DisplayName: "Apache License 2.0"}, {Value: "gpl-3.0", DisplayName: "GNU GPLv3"}, @@ -575,42 +548,6 @@ func (o *repositoryResourceType) registerCreateRepositoryAction(ctx context.Cont }, }, }, - { - Name: "allow_squash_merge", - DisplayName: "Allow Squash Merge", - Description: "Allow squash-merging pull requests (true/false)", - Field: &config.Field_BoolField{}, - }, - { - Name: "allow_merge_commit", - DisplayName: "Allow Merge Commit", - Description: "Allow merging pull requests with a merge commit (true/false)", - Field: &config.Field_BoolField{}, - }, - { - Name: "allow_rebase_merge", - DisplayName: "Allow Rebase Merge", - Description: "Allow rebase-merging pull requests (true/false)", - Field: &config.Field_BoolField{}, - }, - { - Name: "allow_auto_merge", - DisplayName: "Allow Auto Merge", - Description: "Allow auto-merge on pull requests (true/false)", - Field: &config.Field_BoolField{}, - }, - { - Name: "delete_branch_on_merge", - DisplayName: "Delete Branch on Merge", - Description: "Automatically delete head branches after pull requests are merged (true/false)", - Field: &config.Field_BoolField{}, - }, - { - Name: "is_template", - DisplayName: "Is Template", - Description: "Make this repository available as a template (true/false)", - Field: &config.Field_BoolField{}, - }, }, ReturnTypes: []*config.Field{ {Name: "success", Field: &config.Field_BoolField{}}, @@ -628,7 +565,7 @@ func (o *repositoryResourceType) handleCreateRepositoryAction(ctx context.Contex return nil, nil, err } - parentResourceID, err := actions.RequireResourceIDArg(args, "parent") + parentResourceID, err := actions.RequireResourceIDArg(args, "org") if err != nil { return nil, nil, err } @@ -654,12 +591,8 @@ func (o *repositoryResourceType) handleCreateRepositoryAction(ctx context.Contex newRepo.Description = github.Ptr(description) } - if private, ok := actions.GetBoolArg(args, "private"); ok { - newRepo.Private = github.Ptr(private) - } - if visibility, ok := actions.GetStringArg(args, "visibility"); ok && visibility != "" { - if visibility == "public" || visibility == "private" || visibility == "internal" { + if visibility == "public" || visibility == "private" { newRepo.Visibility = github.Ptr(visibility) } else { l.Warn("github-connector: invalid visibility value, using default", @@ -668,24 +601,9 @@ func (o *repositoryResourceType) handleCreateRepositoryAction(ctx context.Contex } } - if hasIssues, ok := actions.GetBoolArg(args, "has_issues"); ok { - newRepo.HasIssues = github.Ptr(hasIssues) - } - - if hasProjects, ok := actions.GetBoolArg(args, "has_projects"); ok { - newRepo.HasProjects = github.Ptr(hasProjects) - } - - if hasWiki, ok := actions.GetBoolArg(args, "has_wiki"); ok { - newRepo.HasWiki = github.Ptr(hasWiki) - } - - if hasDiscussions, ok := actions.GetBoolArg(args, "has_discussions"); ok { - newRepo.HasDiscussions = github.Ptr(hasDiscussions) - } - - if autoInit, ok := actions.GetBoolArg(args, "auto_init"); ok { - newRepo.AutoInit = github.Ptr(autoInit) + // add_readme maps to AutoInit in GitHub API + if addReadme, ok := actions.GetBoolArg(args, "add_readme"); ok { + newRepo.AutoInit = github.Ptr(addReadme) } if gitignoreTemplate, ok := actions.GetStringArg(args, "gitignore_template"); ok && gitignoreTemplate != "" { @@ -696,30 +614,6 @@ func (o *repositoryResourceType) handleCreateRepositoryAction(ctx context.Contex newRepo.LicenseTemplate = github.Ptr(licenseTemplate) } - if allowSquashMerge, ok := actions.GetBoolArg(args, "allow_squash_merge"); ok { - newRepo.AllowSquashMerge = github.Ptr(allowSquashMerge) - } - - if allowMergeCommit, ok := actions.GetBoolArg(args, "allow_merge_commit"); ok { - newRepo.AllowMergeCommit = github.Ptr(allowMergeCommit) - } - - if allowRebaseMerge, ok := actions.GetBoolArg(args, "allow_rebase_merge"); ok { - newRepo.AllowRebaseMerge = github.Ptr(allowRebaseMerge) - } - - if allowAutoMerge, ok := actions.GetBoolArg(args, "allow_auto_merge"); ok { - newRepo.AllowAutoMerge = github.Ptr(allowAutoMerge) - } - - if deleteBranchOnMerge, ok := actions.GetBoolArg(args, "delete_branch_on_merge"); ok { - newRepo.DeleteBranchOnMerge = github.Ptr(deleteBranchOnMerge) - } - - if isTemplate, ok := actions.GetBoolArg(args, "is_template"); ok { - newRepo.IsTemplate = github.Ptr(isTemplate) - } - // Create the repository via GitHub API createdRepo, resp, err := o.client.Repositories.Create(ctx, orgName, newRepo) if err != nil { @@ -744,11 +638,88 @@ func (o *repositoryResourceType) handleCreateRepositoryAction(ctx context.Contex return nil, annos, fmt.Errorf("failed to create resource representation: %w", err) } + // Generate entitlements for the newly created repository + entitlements := make([]*v2.Entitlement, 0, len(repoAccessLevels)) + for _, level := range repoAccessLevels { + entitlements = append(entitlements, entitlement.NewPermissionEntitlement(repoResource, level, + entitlement.WithDisplayName(fmt.Sprintf("%s Repo %s", repoResource.DisplayName, titleCase(level))), + entitlement.WithDescription(fmt.Sprintf("Access to %s repository in GitHub", repoResource.DisplayName)), + entitlement.WithAnnotation(&v2.V1Identifier{ + Id: fmt.Sprintf("repo:%s:role:%s", repoResource.Id.Resource, level), + }), + entitlement.WithGrantableTo(resourceTypeUser, resourceTypeTeam), + )) + } + + // Fetch grants for the newly created repository by listing collaborators + var grants []*v2.Grant + + // List user collaborators + collabOpts := &github.ListCollaboratorsOptions{ + Affiliation: "all", + ListOptions: github.ListOptions{ + PerPage: maxPageSize, + }, + } + users, _, err := o.client.Repositories.ListCollaborators(ctx, orgName, createdRepo.GetName(), collabOpts) + if err != nil { + l.Warn("github-connector: failed to list collaborators for grants", zap.Error(err)) + } else { + for _, user := range users { + for permission, hasPermission := range user.Permissions { + if !hasPermission { + continue + } + ur, err := userResource(ctx, user, user.GetEmail(), nil) + if err != nil { + continue + } + g := grant.NewGrant(repoResource, permission, ur.Id, grant.WithAnnotation(&v2.V1Identifier{ + Id: fmt.Sprintf("repo-grant:%s:%d:%s", repoResource.Id.Resource, user.GetID(), permission), + })) + g.Principal = ur + grants = append(grants, g) + } + } + } + + // List team collaborators + teamOpts := &github.ListOptions{ + PerPage: maxPageSize, + } + teams, _, err := o.client.Repositories.ListTeams(ctx, orgName, createdRepo.GetName(), teamOpts) + if err != nil { + l.Warn("github-connector: failed to list teams for grants", zap.Error(err)) + } else { + for _, team := range teams { + permission := team.GetPermission() + tr, err := teamResource(team, parentResourceID) + if err != nil { + continue + } + g := grant.NewGrant(repoResource, permission, tr.Id, grant.WithAnnotation(&v2.V1Identifier{ + Id: fmt.Sprintf("repo-grant:%s:%d:%s", repoResource.Id.Resource, team.GetID(), permission), + })) + g.Principal = tr + grants = append(grants, g) + } + } + // Build return values using SDK helpers resourceRv, err := actions.NewResourceReturnField("resource", repoResource) if err != nil { return nil, annos, err } - return actions.NewReturnValues(true, resourceRv), annos, nil + entitlementsRv, err := actions.NewEntitlementListReturnField("entitlements", entitlements) + if err != nil { + return nil, annos, err + } + + grantsRv, err := actions.NewGrantListReturnField("grants", grants) + if err != nil { + return nil, annos, err + } + + return actions.NewReturnValues(true, resourceRv, entitlementsRv, grantsRv), annos, nil } diff --git a/pkg/connector/team.go b/pkg/connector/team.go index 2d54e9ba..3c9e5130 100644 --- a/pkg/connector/team.go +++ b/pkg/connector/team.go @@ -387,21 +387,21 @@ func (o *teamResourceType) registerCreateTeamAction(ctx context.Context, registr Arguments: []*config.Field{ { Name: "name", - DisplayName: "Team Name", - Description: "The name of the team.", + DisplayName: "Team name", + Description: "You’ll use this name to mention this team in conversations.", Field: &config.Field_StringField{}, IsRequired: true, }, { Name: "description", DisplayName: "Description", - Description: "The description of the team.", + Description: "What is this team all about?", Field: &config.Field_StringField{}, }, { Name: "org", DisplayName: "Organization", - Description: "The organization name. The name is not case sensitive.", + Description: "The organization name.", Field: &config.Field_ResourceIdField{ ResourceIdField: &config.ResourceIdField{ Rules: &config.ResourceIDRules{ @@ -413,8 +413,8 @@ func (o *teamResourceType) registerCreateTeamAction(ctx context.Context, registr }, { Name: "parent", - DisplayName: "Parent Team ID", - Description: "The name of a team to set as the parent team.", + DisplayName: "Parent team", + Description: "The team to set as the parent team.", Field: &config.Field_ResourceIdField{ ResourceIdField: &config.ResourceIdField{ Rules: &config.ResourceIDRules{ @@ -426,20 +426,20 @@ func (o *teamResourceType) registerCreateTeamAction(ctx context.Context, registr { Name: "privacy", DisplayName: "Privacy", - Description: "The privacy level of the team", + Description: "The level of privacy this team should have.", Field: &config.Field_StringField{ StringField: &config.StringField{ Options: []*config.StringFieldOption{ - {Value: "secret", DisplayName: "Secret (only visible to org owners and team members)"}, - {Value: "closed", DisplayName: "Closed (visible to all org members)"}, + {Value: "secret", Name: "Secret is only visible to org owners and team members", DisplayName: "Secret"}, + {Value: "closed", Name: "Closed is visible to all org members. When parent team is set, this is the only allowed privacy level.", DisplayName: "Closed"}, }, }, }, }, { Name: "notifications_enabled", - DisplayName: "Team Notifications", - Description: "Enable team notifications. When enabled, team members receive notifications when the team is @mentioned. Default: enabled", + DisplayName: "Team notifications", + Description: "When enabled, team members receive notifications when the team is @mentioned.", Field: &config.Field_BoolField{ BoolField: &config.BoolField{ DefaultValue: true, @@ -460,7 +460,7 @@ func (o *teamResourceType) registerCreateTeamAction(ctx context.Context, registr }, { Name: "repo_names", - DisplayName: "Repository Names", + DisplayName: "Repository names", Description: "The full name (e.g., organization-name/repository-name) of repositories to add the team to.", Field: &config.Field_ResourceIdSliceField{ ResourceIdSliceField: &config.ResourceIdSliceField{ @@ -605,18 +605,71 @@ func (o *teamResourceType) handleCreateTeamAction(ctx context.Context, args *str ) // Create the resource representation of the newly created team - resource, err := teamResource(createdTeam, parentResourceID) + teamRes, err := teamResource(createdTeam, parentResourceID) if err != nil { return nil, annos, fmt.Errorf("failed to create resource representation: %w", err) } + // Generate entitlements for the newly created team + entitlements := make([]*v2.Entitlement, 0, len(teamAccessLevels)) + for _, level := range teamAccessLevels { + entitlements = append(entitlements, entitlement.NewPermissionEntitlement(teamRes, level, + entitlement.WithAnnotation(&v2.V1Identifier{ + Id: fmt.Sprintf("team:%s:role:%s", teamRes.Id.Resource, level), + }), + entitlement.WithDisplayName(fmt.Sprintf("%s Team %s", teamRes.DisplayName, titleCase(level))), + entitlement.WithDescription(fmt.Sprintf("Access to %s team in GitHub", teamRes.DisplayName)), + entitlement.WithGrantableTo(resourceTypeUser), + )) + } + + // Fetch grants for the newly created team by listing members + var grants []*v2.Grant + for _, role := range teamAccessLevels { + opts := &github.TeamListTeamMembersOptions{ + Role: role, + ListOptions: github.ListOptions{ + PerPage: maxPageSize, + }, + } + members, _, err := o.client.Teams.ListTeamMembersByID(ctx, createdTeam.GetOrganization().GetID(), createdTeam.GetID(), opts) + if err != nil { + l.Warn("github-connector: failed to list team members for grants", + zap.Error(err), + zap.String("role", role), + ) + continue + } + for _, member := range members { + ur, err := userResource(ctx, member, member.GetEmail(), nil) + if err != nil { + continue + } + grants = append(grants, grant.NewGrant(teamRes, role, ur.Id, + grant.WithAnnotation(&v2.V1Identifier{ + Id: fmt.Sprintf("team-grant:%s:%d:%s", teamRes.Id.Resource, member.GetID(), role), + }), + )) + } + } + // Build return values using SDK helpers - resourceRv, err := actions.NewResourceReturnField("resource", resource) + resourceRv, err := actions.NewResourceReturnField("resource", teamRes) + if err != nil { + return nil, annos, err + } + + entitlementsRv, err := actions.NewEntitlementListReturnField("entitlements", entitlements) + if err != nil { + return nil, annos, err + } + + grantsRv, err := actions.NewGrantListReturnField("grants", grants) if err != nil { return nil, annos, err } - return actions.NewReturnValues(true, resourceRv), annos, nil + return actions.NewReturnValues(true, resourceRv, entitlementsRv, grantsRv), annos, nil } func teamBuilder(client *github.Client, orgCache *orgNameCache) *teamResourceType { From fef640fe941591d4e74c3c81126174e6d528a9f8 Mon Sep 17 00:00:00 2001 From: vipulgowda Date: Fri, 30 Jan 2026 12:29:11 +0530 Subject: [PATCH 13/19] lint fix --- pkg/connector/team.go | 1 - 1 file changed, 1 deletion(-) diff --git a/pkg/connector/team.go b/pkg/connector/team.go index 3c9e5130..1172e848 100644 --- a/pkg/connector/team.go +++ b/pkg/connector/team.go @@ -26,7 +26,6 @@ const ( teamRoleMember = "member" teamRoleMaintainer = "maintainer" - // Team privacy levels teamPrivacySecret = "secret" teamPrivacyClosed = "closed" ) From 9f975099a97a26587c12907a7710fac9bf044d24 Mon Sep 17 00:00:00 2001 From: vipulgowda Date: Fri, 30 Jan 2026 13:52:09 +0530 Subject: [PATCH 14/19] improvements --- pkg/connector/repository.go | 36 +++++++++++++++++++++++++++--------- pkg/connector/team.go | 7 +++++++ 2 files changed, 34 insertions(+), 9 deletions(-) diff --git a/pkg/connector/repository.go b/pkg/connector/repository.go index 19c319d3..55586443 100644 --- a/pkg/connector/repository.go +++ b/pkg/connector/repository.go @@ -489,6 +489,7 @@ func (o *repositoryResourceType) registerCreateRepositoryAction(ctx context.Cont {Value: "public", DisplayName: "Public", Name: "Anyone on the internet can view this repository"}, {Value: "private", DisplayName: "Private", Name: "You can choose who can see this repository"}, }, + DefaultValue: "private", }, }, }, @@ -552,6 +553,12 @@ func (o *repositoryResourceType) registerCreateRepositoryAction(ctx context.Cont ReturnTypes: []*config.Field{ {Name: "success", Field: &config.Field_BoolField{}}, {Name: "resource", Field: &config.Field_ResourceField{}}, + {Name: "entitlements", DisplayName: "Entitlements", Field: &config.Field_EntitlementSliceField{ + EntitlementSliceField: &config.EntitlementSliceField{}, + }}, + {Name: "grants", DisplayName: "Grants", Field: &config.Field_GrantSliceField{ + GrantSliceField: &config.GrantSliceField{}, + }}, }, }, o.handleCreateRepositoryAction) } @@ -692,16 +699,27 @@ func (o *repositoryResourceType) handleCreateRepositoryAction(ctx context.Contex l.Warn("github-connector: failed to list teams for grants", zap.Error(err)) } else { for _, team := range teams { - permission := team.GetPermission() - tr, err := teamResource(team, parentResourceID) - if err != nil { - continue + for permission, hasPermission := range team.Permissions { + if !hasPermission { + continue + } + tr, err := teamResource(team, parentResourceID) + if err != nil { + continue + } + grants = append(grants, grant.NewGrant(repoResource, permission, tr.Id, grant.WithAnnotation( + &v2.V1Identifier{ + Id: fmt.Sprintf("repo-grant:%s:%d:%s", repoResource.Id.Resource, team.GetID(), permission), + }, + &v2.GrantExpandable{ + EntitlementIds: []string{ + entitlement.NewEntitlementID(tr, teamRoleMaintainer), + entitlement.NewEntitlementID(tr, teamRoleMember), + }, + Shallow: true, + }, + ))) } - g := grant.NewGrant(repoResource, permission, tr.Id, grant.WithAnnotation(&v2.V1Identifier{ - Id: fmt.Sprintf("repo-grant:%s:%d:%s", repoResource.Id.Resource, team.GetID(), permission), - })) - g.Principal = tr - grants = append(grants, g) } } diff --git a/pkg/connector/team.go b/pkg/connector/team.go index 1172e848..caf5acb2 100644 --- a/pkg/connector/team.go +++ b/pkg/connector/team.go @@ -432,6 +432,7 @@ func (o *teamResourceType) registerCreateTeamAction(ctx context.Context, registr {Value: "secret", Name: "Secret is only visible to org owners and team members", DisplayName: "Secret"}, {Value: "closed", Name: "Closed is visible to all org members. When parent team is set, this is the only allowed privacy level.", DisplayName: "Closed"}, }, + DefaultValue: "closed", }, }, }, @@ -473,6 +474,12 @@ func (o *teamResourceType) registerCreateTeamAction(ctx context.Context, registr ReturnTypes: []*config.Field{ {Name: "success", Field: &config.Field_BoolField{}}, {Name: "resource", Field: &config.Field_ResourceField{}}, + {Name: "entitlements", DisplayName: "Entitlements", Field: &config.Field_EntitlementSliceField{ + EntitlementSliceField: &config.EntitlementSliceField{}, + }}, + {Name: "grants", DisplayName: "Grants", Field: &config.Field_GrantSliceField{ + GrantSliceField: &config.GrantSliceField{}, + }}, }, }, o.handleCreateTeamAction) } From e5880dfaf6d01459913134548815d656c10bb3f2 Mon Sep 17 00:00:00 2001 From: vipulgowda Date: Fri, 30 Jan 2026 14:15:47 +0530 Subject: [PATCH 15/19] refactor --- pkg/connector/repository.go | 15 ++++----------- pkg/connector/team.go | 15 ++++----------- 2 files changed, 8 insertions(+), 22 deletions(-) diff --git a/pkg/connector/repository.go b/pkg/connector/repository.go index 55586443..807538d5 100644 --- a/pkg/connector/repository.go +++ b/pkg/connector/repository.go @@ -645,17 +645,10 @@ func (o *repositoryResourceType) handleCreateRepositoryAction(ctx context.Contex return nil, annos, fmt.Errorf("failed to create resource representation: %w", err) } - // Generate entitlements for the newly created repository - entitlements := make([]*v2.Entitlement, 0, len(repoAccessLevels)) - for _, level := range repoAccessLevels { - entitlements = append(entitlements, entitlement.NewPermissionEntitlement(repoResource, level, - entitlement.WithDisplayName(fmt.Sprintf("%s Repo %s", repoResource.DisplayName, titleCase(level))), - entitlement.WithDescription(fmt.Sprintf("Access to %s repository in GitHub", repoResource.DisplayName)), - entitlement.WithAnnotation(&v2.V1Identifier{ - Id: fmt.Sprintf("repo:%s:role:%s", repoResource.Id.Resource, level), - }), - entitlement.WithGrantableTo(resourceTypeUser, resourceTypeTeam), - )) + // Generate entitlements for the newly created repository (reuse existing method) + entitlements, _, _, err := o.Entitlements(ctx, repoResource, nil) + if err != nil { + return nil, annos, fmt.Errorf("failed to generate entitlements: %w", err) } // Fetch grants for the newly created repository by listing collaborators diff --git a/pkg/connector/team.go b/pkg/connector/team.go index caf5acb2..02b01900 100644 --- a/pkg/connector/team.go +++ b/pkg/connector/team.go @@ -616,17 +616,10 @@ func (o *teamResourceType) handleCreateTeamAction(ctx context.Context, args *str return nil, annos, fmt.Errorf("failed to create resource representation: %w", err) } - // Generate entitlements for the newly created team - entitlements := make([]*v2.Entitlement, 0, len(teamAccessLevels)) - for _, level := range teamAccessLevels { - entitlements = append(entitlements, entitlement.NewPermissionEntitlement(teamRes, level, - entitlement.WithAnnotation(&v2.V1Identifier{ - Id: fmt.Sprintf("team:%s:role:%s", teamRes.Id.Resource, level), - }), - entitlement.WithDisplayName(fmt.Sprintf("%s Team %s", teamRes.DisplayName, titleCase(level))), - entitlement.WithDescription(fmt.Sprintf("Access to %s team in GitHub", teamRes.DisplayName)), - entitlement.WithGrantableTo(resourceTypeUser), - )) + // Generate entitlements for the newly created team (reuse existing method) + entitlements, _, _, err := o.Entitlements(ctx, teamRes, nil) + if err != nil { + return nil, annos, fmt.Errorf("failed to generate entitlements: %w", err) } // Fetch grants for the newly created team by listing members From 4d833f0277981aa312e6587357d568fb1a109e78 Mon Sep 17 00:00:00 2001 From: vipulgowda Date: Fri, 30 Jan 2026 15:07:45 +0530 Subject: [PATCH 16/19] grants refactor --- pkg/connector/repository.go | 76 +++++++------------------------------ pkg/connector/team.go | 35 ++++++----------- 2 files changed, 24 insertions(+), 87 deletions(-) diff --git a/pkg/connector/repository.go b/pkg/connector/repository.go index 807538d5..a821a1a8 100644 --- a/pkg/connector/repository.go +++ b/pkg/connector/repository.go @@ -602,9 +602,7 @@ func (o *repositoryResourceType) handleCreateRepositoryAction(ctx context.Contex if visibility == "public" || visibility == "private" { newRepo.Visibility = github.Ptr(visibility) } else { - l.Warn("github-connector: invalid visibility value, using default", - zap.String("provided_visibility", visibility), - ) + return nil, nil, fmt.Errorf("invalid visibility: %q (must be \"public\" or \"private\")", visibility) } } @@ -651,69 +649,21 @@ func (o *repositoryResourceType) handleCreateRepositoryAction(ctx context.Contex return nil, annos, fmt.Errorf("failed to generate entitlements: %w", err) } - // Fetch grants for the newly created repository by listing collaborators + // Fetch grants for the newly created repository by reusing the existing Grants method var grants []*v2.Grant - - // List user collaborators - collabOpts := &github.ListCollaboratorsOptions{ - Affiliation: "all", - ListOptions: github.ListOptions{ - PerPage: maxPageSize, - }, - } - users, _, err := o.client.Repositories.ListCollaborators(ctx, orgName, createdRepo.GetName(), collabOpts) - if err != nil { - l.Warn("github-connector: failed to list collaborators for grants", zap.Error(err)) - } else { - for _, user := range users { - for permission, hasPermission := range user.Permissions { - if !hasPermission { - continue - } - ur, err := userResource(ctx, user, user.GetEmail(), nil) - if err != nil { - continue - } - g := grant.NewGrant(repoResource, permission, ur.Id, grant.WithAnnotation(&v2.V1Identifier{ - Id: fmt.Sprintf("repo-grant:%s:%d:%s", repoResource.Id.Resource, user.GetID(), permission), - })) - g.Principal = ur - grants = append(grants, g) - } + pageToken := "" + for { + pToken := &pagination.Token{Token: pageToken} + pageGrants, nextToken, _, err := o.Grants(ctx, repoResource, pToken) + if err != nil { + l.Warn("github-connector: failed to fetch grants for repository", zap.Error(err)) + break } - } - - // List team collaborators - teamOpts := &github.ListOptions{ - PerPage: maxPageSize, - } - teams, _, err := o.client.Repositories.ListTeams(ctx, orgName, createdRepo.GetName(), teamOpts) - if err != nil { - l.Warn("github-connector: failed to list teams for grants", zap.Error(err)) - } else { - for _, team := range teams { - for permission, hasPermission := range team.Permissions { - if !hasPermission { - continue - } - tr, err := teamResource(team, parentResourceID) - if err != nil { - continue - } - grants = append(grants, grant.NewGrant(repoResource, permission, tr.Id, grant.WithAnnotation( - &v2.V1Identifier{ - Id: fmt.Sprintf("repo-grant:%s:%d:%s", repoResource.Id.Resource, team.GetID(), permission), - }, - &v2.GrantExpandable{ - EntitlementIds: []string{ - entitlement.NewEntitlementID(tr, teamRoleMaintainer), - entitlement.NewEntitlementID(tr, teamRoleMember), - }, - Shallow: true, - }, - ))) - } + grants = append(grants, pageGrants...) + if nextToken == "" { + break } + pageToken = nextToken } // Build return values using SDK helpers diff --git a/pkg/connector/team.go b/pkg/connector/team.go index 02b01900..9c1f213a 100644 --- a/pkg/connector/team.go +++ b/pkg/connector/team.go @@ -622,34 +622,21 @@ func (o *teamResourceType) handleCreateTeamAction(ctx context.Context, args *str return nil, annos, fmt.Errorf("failed to generate entitlements: %w", err) } - // Fetch grants for the newly created team by listing members + // Fetch grants for the newly created team by reusing the existing Grants method var grants []*v2.Grant - for _, role := range teamAccessLevels { - opts := &github.TeamListTeamMembersOptions{ - Role: role, - ListOptions: github.ListOptions{ - PerPage: maxPageSize, - }, - } - members, _, err := o.client.Teams.ListTeamMembersByID(ctx, createdTeam.GetOrganization().GetID(), createdTeam.GetID(), opts) + pageToken := "" + for { + pToken := &pagination.Token{Token: pageToken} + pageGrants, nextToken, _, err := o.Grants(ctx, teamRes, pToken) if err != nil { - l.Warn("github-connector: failed to list team members for grants", - zap.Error(err), - zap.String("role", role), - ) - continue + l.Warn("github-connector: failed to fetch grants for team", zap.Error(err)) + break } - for _, member := range members { - ur, err := userResource(ctx, member, member.GetEmail(), nil) - if err != nil { - continue - } - grants = append(grants, grant.NewGrant(teamRes, role, ur.Id, - grant.WithAnnotation(&v2.V1Identifier{ - Id: fmt.Sprintf("team-grant:%s:%d:%s", teamRes.Id.Resource, member.GetID(), role), - }), - )) + grants = append(grants, pageGrants...) + if nextToken == "" { + break } + pageToken = nextToken } // Build return values using SDK helpers From f6357070f9fcaa5115d15dc364d69c76b0f7668e Mon Sep 17 00:00:00 2001 From: vipulgowda Date: Tue, 3 Feb 2026 23:27:58 +0530 Subject: [PATCH 17/19] more improvements --- pkg/connector/repository.go | 21 +++++++++++++++++---- pkg/connector/team.go | 26 ++++++++++++++++++++++---- 2 files changed, 39 insertions(+), 8 deletions(-) diff --git a/pkg/connector/repository.go b/pkg/connector/repository.go index a821a1a8..8f23b7be 100644 --- a/pkg/connector/repository.go +++ b/pkg/connector/repository.go @@ -488,6 +488,7 @@ func (o *repositoryResourceType) registerCreateRepositoryAction(ctx context.Cont Options: []*config.StringFieldOption{ {Value: "public", DisplayName: "Public", Name: "Anyone on the internet can view this repository"}, {Value: "private", DisplayName: "Private", Name: "You can choose who can see this repository"}, + {Value: "internal", DisplayName: "Internal", Name: "Members of the enterprise can view this repository (enterprise only)"}, }, DefaultValue: "private", }, @@ -599,23 +600,35 @@ func (o *repositoryResourceType) handleCreateRepositoryAction(ctx context.Contex } if visibility, ok := actions.GetStringArg(args, "visibility"); ok && visibility != "" { - if visibility == "public" || visibility == "private" { + if visibility == "public" || visibility == "private" || visibility == "internal" { newRepo.Visibility = github.Ptr(visibility) } else { - return nil, nil, fmt.Errorf("invalid visibility: %q (must be \"public\" or \"private\")", visibility) + return nil, nil, fmt.Errorf("invalid visibility: %q (must be \"public\", \"private\", or \"internal\")", visibility) } } + // Extract template options first to validate AutoInit requirements + gitignoreTemplate, hasGitignore := actions.GetStringArg(args, "gitignore_template") + licenseTemplate, hasLicense := actions.GetStringArg(args, "license_template") + hasTemplates := (hasGitignore && gitignoreTemplate != "") || (hasLicense && licenseTemplate != "") + // add_readme maps to AutoInit in GitHub API + // GitHub requires AutoInit=true when using gitignore_template or license_template if addReadme, ok := actions.GetBoolArg(args, "add_readme"); ok { + if !addReadme && hasTemplates { + return nil, nil, fmt.Errorf("add_readme must be true when gitignore_template or license_template is provided (GitHub requires auto_init=true for templates)") + } newRepo.AutoInit = github.Ptr(addReadme) + } else if hasTemplates { + // If templates are provided but add_readme wasn't explicitly set, enable AutoInit + newRepo.AutoInit = github.Ptr(true) } - if gitignoreTemplate, ok := actions.GetStringArg(args, "gitignore_template"); ok && gitignoreTemplate != "" { + if hasGitignore && gitignoreTemplate != "" { newRepo.GitignoreTemplate = github.Ptr(gitignoreTemplate) } - if licenseTemplate, ok := actions.GetStringArg(args, "license_template"); ok && licenseTemplate != "" { + if hasLicense && licenseTemplate != "" { newRepo.LicenseTemplate = github.Ptr(licenseTemplate) } diff --git a/pkg/connector/team.go b/pkg/connector/team.go index 9c1f213a..7836aca2 100644 --- a/pkg/connector/team.go +++ b/pkg/connector/team.go @@ -448,8 +448,8 @@ func (o *teamResourceType) registerCreateTeamAction(ctx context.Context, registr }, { Name: "maintainers", - DisplayName: "Maintainers", - Description: "List GitHub usernames for organization members who will become team maintainers.", + DisplayName: "Team Maintainers", + Description: "List of user resource IDs for organization members who will become team maintainers.", Field: &config.Field_ResourceIdSliceField{ ResourceIdSliceField: &config.ResourceIdSliceField{ Rules: &config.RepeatedResourceIdRules{ @@ -460,8 +460,8 @@ func (o *teamResourceType) registerCreateTeamAction(ctx context.Context, registr }, { Name: "repo_names", - DisplayName: "Repository names", - Description: "The full name (e.g., organization-name/repository-name) of repositories to add the team to.", + DisplayName: "Repositories", + Description: "List of repository resource IDs to add the team to.", Field: &config.Field_ResourceIdSliceField{ ResourceIdSliceField: &config.ResourceIdSliceField{ Rules: &config.RepeatedResourceIdRules{ @@ -526,6 +526,21 @@ func (o *teamResourceType) handleCreateTeamAction(ctx context.Context, args *str if err != nil { return nil, nil, fmt.Errorf("invalid parent team ID: %w", err) } + + // Fetch the parent team to validate it's not a secret team + // GitHub does not allow child teams under secret parent teams + org, resp, err := o.client.Organizations.Get(ctx, orgName) + if err != nil { + return nil, nil, wrapGitHubError(err, resp, fmt.Sprintf("failed to get organization %s", orgName)) + } + parentTeam, resp, err := o.client.Teams.GetTeamByID(ctx, org.GetID(), parentTeamID) //nolint:staticcheck // TODO: migrate to GetTeamBySlug + if err != nil { + return nil, nil, wrapGitHubError(err, resp, fmt.Sprintf("failed to get parent team %d", parentTeamID)) + } + if parentTeam.GetPrivacy() == teamPrivacySecret { + return nil, nil, fmt.Errorf("cannot create child team: parent team %q has privacy set to \"secret\"; GitHub does not allow child teams under secret parent teams", parentTeam.GetName()) + } + newTeam.ParentTeamID = github.Ptr(parentTeamID) isNestedTeam = true } @@ -545,6 +560,9 @@ func (o *teamResourceType) handleCreateTeamAction(ctx context.Context, args *str } else if privacy == teamPrivacySecret || privacy == teamPrivacyClosed { // Non-nested teams can be "secret" or "closed" newTeam.Privacy = github.Ptr(privacy) + } else { + // Invalid privacy value for non-nested team + return nil, nil, fmt.Errorf("invalid privacy value: %q (must be \"secret\" or \"closed\")", privacy) } } else if isNestedTeam { // Default for nested teams is "closed" From 58a8c95409c14f430f121351731cf72dfcb3ebd5 Mon Sep 17 00:00:00 2001 From: vipulgowda Date: Tue, 3 Feb 2026 23:32:10 +0530 Subject: [PATCH 18/19] lint fix --- pkg/connector/team.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/pkg/connector/team.go b/pkg/connector/team.go index 7836aca2..7d307c42 100644 --- a/pkg/connector/team.go +++ b/pkg/connector/team.go @@ -549,7 +549,8 @@ func (o *teamResourceType) handleCreateTeamAction(ctx context.Context, args *str // - For non-nested teams: "secret" (default) or "closed" // - For nested/child teams: only "closed" is allowed (default: closed) if privacy, ok := actions.GetStringArg(args, "privacy"); ok && privacy != "" { - if isNestedTeam { + switch { + case isNestedTeam: // Nested teams can only be "closed" if privacy == teamPrivacySecret { l.Warn("github-connector: secret privacy not allowed for nested teams, using closed", @@ -557,10 +558,10 @@ func (o *teamResourceType) handleCreateTeamAction(ctx context.Context, args *str ) } newTeam.Privacy = github.Ptr(teamPrivacyClosed) - } else if privacy == teamPrivacySecret || privacy == teamPrivacyClosed { + case privacy == teamPrivacySecret || privacy == teamPrivacyClosed: // Non-nested teams can be "secret" or "closed" newTeam.Privacy = github.Ptr(privacy) - } else { + default: // Invalid privacy value for non-nested team return nil, nil, fmt.Errorf("invalid privacy value: %q (must be \"secret\" or \"closed\")", privacy) } From 4e7150013da551125e002fcb81673544a1eb5cf5 Mon Sep 17 00:00:00 2001 From: Muhammad Kumail Date: Tue, 3 Feb 2026 23:11:02 +0000 Subject: [PATCH 19/19] rebase: main branch and baton sdk --- go.mod | 65 +- go.sum | 156 +- .../aws/aws-sdk-go-v2/aws/config.go | 12 + .../aws/aws-sdk-go-v2/aws/credentials.go | 4 + .../aws-sdk-go-v2/aws/go_module_metadata.go | 2 +- .../aws/middleware/user_agent.go | 7 + .../aws/aws-sdk-go-v2/aws/retry/middleware.go | 2 +- .../aws/aws-sdk-go-v2/aws/signer/v4/stream.go | 2 +- .../aws/transport/http/client.go | 2 + .../aws/transport/http/timeout_read_closer.go | 5 + .../endpoints/awsrulesfn/partitions.go | 157 +- .../endpoints/awsrulesfn/partitions.json | 82 +- vendor/github.com/aws/smithy-go/CHANGELOG.md | 69 + vendor/github.com/aws/smithy-go/Makefile | 51 +- vendor/github.com/aws/smithy-go/README.md | 17 +- .../aws/smithy-go/endpoints/endpoint.go | 2 +- .../aws/smithy-go/go_module_metadata.go | 2 +- .../github.com/aws/smithy-go/metrics/nop.go | 93 +- .../aws/smithy-go/middleware/ordered_group.go | 14 +- .../aws/smithy-go/middleware/step_build.go | 185 +- .../smithy-go/middleware/step_deserialize.go | 175 +- .../aws/smithy-go/middleware/step_finalize.go | 167 +- .../smithy-go/middleware/step_initialize.go | 160 +- .../smithy-go/middleware/step_serialize.go | 166 +- vendor/github.com/aws/smithy-go/modman.toml | 1 - .../smithy-go/transport/http/interceptor.go | 321 + .../transport/http/interceptor_middleware.go | 325 + .../aws/smithy-go/transport/http/metrics.go | 6 + .../github.com/cenkalti/backoff/v4/context.go | 62 - .../cenkalti/backoff/v4/exponential.go | 216 - .../github.com/cenkalti/backoff/v4/retry.go | 146 - .../github.com/cenkalti/backoff/v4/tries.go | 38 - .../cenkalti/backoff/{v4 => v5}/.gitignore | 0 .../cenkalti/backoff/v5/CHANGELOG.md | 29 + .../cenkalti/backoff/{v4 => v5}/LICENSE | 0 .../cenkalti/backoff/{v4 => v5}/README.md | 15 +- .../cenkalti/backoff/{v4 => v5}/backoff.go | 14 +- .../github.com/cenkalti/backoff/v5/error.go | 46 + .../cenkalti/backoff/v5/exponential.go | 118 + .../github.com/cenkalti/backoff/v5/retry.go | 139 + .../cenkalti/backoff/{v4 => v5}/ticker.go | 18 +- .../cenkalti/backoff/{v4 => v5}/timer.go | 2 +- .../github.com/cespare/xxhash/v2/LICENSE.txt | 22 + vendor/github.com/cespare/xxhash/v2/README.md | 74 + .../github.com/cespare/xxhash/v2/testall.sh | 10 + vendor/github.com/cespare/xxhash/v2/xxhash.go | 243 + .../cespare/xxhash/v2/xxhash_amd64.s | 209 + .../cespare/xxhash/v2/xxhash_arm64.s | 183 + .../cespare/xxhash/v2/xxhash_asm.go | 15 + .../cespare/xxhash/v2/xxhash_other.go | 76 + .../cespare/xxhash/v2/xxhash_safe.go | 16 + .../cespare/xxhash/v2/xxhash_unsafe.go | 58 + .../pb/c1/connectorapi/baton/v1/baton.pb.go | 674 +- .../baton/v1/baton.pb.validate.go | 402 + .../baton/v1/baton_protoopaque.pb.go | 676 +- .../pkg/connectorbuilder/resource_manager.go | 4 +- .../baton-sdk/pkg/dotc1z/sql_helpers.go | 13 + .../conductorone/baton-sdk/pkg/sdk/version.go | 2 +- .../pkg/tasks/c1api/list_event_feeds.go | 56 + .../baton-sdk/pkg/tasks/c1api/list_events.go | 63 + .../baton-sdk/pkg/tasks/c1api/manager.go | 4 + .../conductorone/baton-sdk/pkg/tasks/tasks.go | 8 + .../go-jose/go-jose/v4/CHANGELOG.md | 96 - .../github.com/go-jose/go-jose/v4/README.md | 76 +- .../github.com/go-jose/go-jose/v4/crypter.go | 20 +- vendor/github.com/go-jose/go-jose/v4/jwe.go | 19 +- vendor/github.com/go-jose/go-jose/v4/jwk.go | 59 +- vendor/github.com/go-jose/go-jose/v4/jws.go | 74 +- .../github.com/go-jose/go-jose/v4/shared.go | 33 +- .../github.com/go-jose/go-jose/v4/signing.go | 44 +- .../go-jose/go-jose/v4/symmetric.go | 13 +- vendor/github.com/go-logr/logr/.golangci.yaml | 16 +- vendor/github.com/go-logr/logr/funcr/funcr.go | 8 +- .../grpc-gateway/v2/runtime/BUILD.bazel | 1 + .../grpc-gateway/v2/runtime/context.go | 6 +- .../grpc-gateway/v2/runtime/errors.go | 32 +- .../grpc-gateway/v2/runtime/handler.go | 10 +- .../grpc-gateway/v2/runtime/marshal_jsonpb.go | 6 +- .../grpc-gateway/v2/runtime/mux.go | 16 +- .../auto/sdk/internal/telemetry/id.go | 2 +- .../auto/sdk/internal/telemetry/number.go | 2 +- .../auto/sdk/internal/telemetry/span.go | 70 +- .../auto/sdk/internal/telemetry/status.go | 10 +- .../auto/sdk/internal/telemetry/traces.go | 4 +- .../auto/sdk/internal/telemetry/value.go | 14 +- vendor/go.opentelemetry.io/auto/sdk/span.go | 25 +- vendor/go.opentelemetry.io/auto/sdk/tracer.go | 29 +- .../contrib/bridges/otelzap/LICENSE | 30 + .../contrib/bridges/otelzap/convert.go | 7 +- .../contrib/bridges/otelzap/core.go | 36 +- .../contrib/bridges/otelzap/encoder.go | 11 +- .../google.golang.org/grpc/otelgrpc/LICENSE | 30 + .../google.golang.org/grpc/otelgrpc/config.go | 130 +- .../grpc/otelgrpc/interceptor.go | 501 +- .../grpc/otelgrpc/internal/parse.go | 3 +- .../grpc/otelgrpc/metadata_supplier.go | 25 +- .../grpc/otelgrpc/semconv.go | 41 - .../grpc/otelgrpc/stats_handler.go | 302 +- .../grpc/otelgrpc/version.go | 9 +- .../go.opentelemetry.io/otel/.clomonitor.yml | 3 + .../go.opentelemetry.io/otel/.codespellignore | 2 + vendor/go.opentelemetry.io/otel/.golangci.yml | 467 +- vendor/go.opentelemetry.io/otel/.lycheeignore | 7 + vendor/go.opentelemetry.io/otel/CHANGELOG.md | 271 +- vendor/go.opentelemetry.io/otel/CODEOWNERS | 2 +- .../go.opentelemetry.io/otel/CONTRIBUTING.md | 523 +- vendor/go.opentelemetry.io/otel/LICENSE | 30 + vendor/go.opentelemetry.io/otel/Makefile | 29 +- vendor/go.opentelemetry.io/otel/README.md | 25 +- vendor/go.opentelemetry.io/otel/RELEASING.md | 63 +- .../otel/SECURITY-INSIGHTS.yml | 203 + vendor/go.opentelemetry.io/otel/VERSIONING.md | 2 +- .../otel/attribute/encoder.go | 14 +- .../otel/attribute/filter.go | 12 +- .../otel/attribute/hash.go | 92 + .../internal}/attribute.go | 18 +- .../otel/attribute/internal/xxhash/xxhash.go | 64 + .../otel/attribute/iterator.go | 7 +- .../go.opentelemetry.io/otel/attribute/key.go | 2 +- .../go.opentelemetry.io/otel/attribute/kv.go | 2 +- .../otel/attribute/rawhelpers.go | 37 + .../go.opentelemetry.io/otel/attribute/set.go | 151 +- .../otel/attribute/type_string.go | 5 +- .../otel/attribute/value.go | 23 +- .../otel/baggage/baggage.go | 16 +- .../go.opentelemetry.io/otel/codes/codes.go | 4 +- .../otel/dependencies.Dockerfile | 5 +- .../otlp/otlplog/otlploggrpc/LICENSE | 30 + .../otlp/otlplog/otlploggrpc/client.go | 58 +- .../otlp/otlplog/otlploggrpc/config.go | 35 +- .../otlp/otlplog/otlploggrpc/exporter.go | 5 +- .../otlp/otlplog/otlploggrpc/internal/gen.go | 20 + .../internal/observ/instrumentation.go | 309 + .../otlploggrpc/internal/observ/target.go | 143 + .../otlploggrpc/internal/partialsuccess.go | 43 + .../otlploggrpc/internal/retry/retry.go | 35 +- .../otlploggrpc/internal/transform/log.go | 4 +- .../otlplog/otlploggrpc/internal/version.go | 8 + .../otlplog/otlploggrpc/internal/x/README.md | 36 + .../otlploggrpc/internal/x/features.go | 23 + .../otlp/otlplog/otlploggrpc/internal/x/x.go | 58 + .../otlp/otlplog/otlploggrpc/version.go | 2 +- .../otel/exporters/otlp/otlptrace/LICENSE | 30 + .../otel/exporters/otlp/otlptrace/exporter.go | 2 +- .../internal/tracetransform/attribute.go | 5 +- .../tracetransform/instrumentation.go | 3 +- .../internal/tracetransform/resource.go | 3 +- .../otlptrace/internal/tracetransform/span.go | 22 +- .../otlp/otlptrace/otlptracegrpc/LICENSE | 30 + .../otlp/otlptrace/otlptracegrpc/client.go | 47 +- .../otlptracegrpc/internal/counter/counter.go | 31 + .../internal/envconfig/envconfig.go | 4 +- .../otlptrace/otlptracegrpc/internal/gen.go | 10 + .../otlptracegrpc/internal/observ/doc.go | 6 + .../internal/observ/instrumentation.go | 341 + .../otlptracegrpc/internal/observ/target.go | 143 + .../internal/otlpconfig/envconfig.go | 14 +- .../internal/otlpconfig/options.go | 21 +- .../internal/otlpconfig/optiontypes.go | 2 +- .../otlptracegrpc/internal/otlpconfig/tls.go | 2 +- .../otlptracegrpc/internal/partialsuccess.go | 13 +- .../otlptracegrpc/internal/retry/retry.go | 35 +- .../otlptracegrpc/internal/version.go | 8 + .../otlptracegrpc/internal/x/README.md | 36 + .../otlptracegrpc/internal/x/observ.go | 22 + .../otlptrace/otlptracegrpc/internal/x/x.go | 58 + .../otlp/otlptrace/otlptracegrpc/options.go | 5 +- .../otel/exporters/otlp/otlptrace/version.go | 2 +- .../go.opentelemetry.io/otel/get_main_pkgs.sh | 30 - .../go.opentelemetry.io/otel/internal/gen.go | 18 - .../otel/internal/global/handler.go | 1 + .../otel/internal/global/internal_logging.go | 8 +- .../otel/internal/global/meter.go | 47 +- .../otel/internal/global/trace.go | 14 +- .../otel/internal/rawhelpers.go | 48 - vendor/go.opentelemetry.io/otel/log/LICENSE | 30 + vendor/go.opentelemetry.io/otel/log/doc.go | 17 +- .../otel/log/embedded/embedded.go | 18 +- .../otel/log/global/log.go | 2 +- .../otel/log/internal/global/log.go | 2 + .../go.opentelemetry.io/otel/log/keyvalue.go | 8 +- .../otel/log/kind_string.go | 5 +- vendor/go.opentelemetry.io/otel/log/logger.go | 49 +- .../go.opentelemetry.io/otel/log/noop/noop.go | 6 +- vendor/go.opentelemetry.io/otel/log/record.go | 8 + .../otel/log/severity_string.go | 5 +- vendor/go.opentelemetry.io/otel/metric.go | 2 +- .../go.opentelemetry.io/otel/metric/LICENSE | 30 + .../otel/metric/asyncfloat64.go | 12 +- .../otel/metric/asyncint64.go | 8 +- .../go.opentelemetry.io/otel/metric/config.go | 38 +- .../otel/metric/instrument.go | 16 +- .../go.opentelemetry.io/otel/metric/meter.go | 10 +- .../otel/metric/noop/noop.go | 25 +- .../otel/propagation/baggage.go | 40 +- .../otel/propagation/propagation.go | 34 +- .../otel/propagation/trace_context.go | 8 +- vendor/go.opentelemetry.io/otel/renovate.json | 7 +- vendor/go.opentelemetry.io/otel/sdk/LICENSE | 30 + .../otel/sdk/internal/x/features.go | 39 + .../otel/sdk/internal/x/x.go | 48 +- .../go.opentelemetry.io/otel/sdk/log/LICENSE | 30 + .../go.opentelemetry.io/otel/sdk/log/batch.go | 43 +- .../go.opentelemetry.io/otel/sdk/log/doc.go | 5 +- .../otel/sdk/log/exporter.go | 15 +- .../otel/sdk/log/filter_processor.go | 62 - .../otel/sdk/log/instrumentation.go | 39 + .../otel/sdk/log/internal/observ/doc.go | 6 + .../internal/observ/simple_log_processor.go | 126 + .../otel/sdk/log/internal/x/README.md | 34 + .../otel/sdk/log/internal/x/features.go | 23 + .../otel/sdk/log/internal/x/x.go | 58 + .../otel/sdk/log/logger.go | 43 +- .../otel/sdk/log/processor.go | 44 +- .../otel/sdk/log/provider.go | 39 +- .../otel/sdk/log/record.go | 382 +- .../otel/sdk/log/simple.go | 29 +- .../otel/sdk/resource/builtin.go | 4 +- .../otel/sdk/resource/container.go | 4 +- .../otel/sdk/resource/env.go | 2 +- .../otel/sdk/resource/host_id.go | 4 +- .../otel/sdk/resource/host_id_bsd.go | 1 - .../otel/sdk/resource/host_id_linux.go | 1 - .../otel/sdk/resource/host_id_unsupported.go | 1 - .../otel/sdk/resource/host_id_windows.go | 1 - .../otel/sdk/resource/os.go | 6 +- .../otel/sdk/resource/os_release_unix.go | 7 +- .../otel/sdk/resource/os_unix.go | 1 - .../otel/sdk/resource/os_unsupported.go | 1 - .../otel/sdk/resource/process.go | 18 +- .../otel/sdk/resource/resource.go | 4 +- .../otel/sdk/trace/batch_span_processor.go | 58 +- .../go.opentelemetry.io/otel/sdk/trace/doc.go | 3 + .../otel/sdk/trace/id_generator.go | 30 +- .../otel/sdk/{ => trace}/internal/env/env.go | 4 +- .../internal/observ/batch_span_processor.go | 119 + .../otel/sdk/trace/internal/observ/doc.go | 6 + .../internal/observ/simple_span_processor.go | 97 + .../otel/sdk/trace/internal/observ/tracer.go | 223 + .../otel/sdk/trace/provider.go | 26 +- .../otel/sdk/trace/sampling.go | 8 +- .../otel/sdk/trace/simple_span_processor.go | 37 +- .../otel/sdk/trace/snapshot.go | 2 +- .../otel/sdk/trace/span.go | 30 +- .../otel/sdk/trace/span_limits.go | 2 +- .../otel/sdk/trace/tracer.go | 45 +- .../otel/sdk/trace/version.go | 9 - .../go.opentelemetry.io/otel/sdk/version.go | 3 +- .../otel/semconv/v1.17.0/README.md | 3 - .../otel/semconv/v1.17.0/doc.go | 9 - .../otel/semconv/v1.17.0/event.go | 188 - .../otel/semconv/v1.17.0/http.go | 10 - .../otel/semconv/v1.17.0/resource.go | 1999 -- .../otel/semconv/v1.17.0/trace.go | 3364 ---- .../otel/semconv/v1.26.0/README.md | 3 - .../otel/semconv/v1.26.0/attribute_group.go | 8996 --------- .../otel/semconv/v1.26.0/exception.go | 9 - .../otel/semconv/v1.26.0/metric.go | 1307 -- .../otel/semconv/v1.26.0/schema.go | 9 - .../otel/semconv/v1.37.0/MIGRATION.md | 41 + .../otel/semconv/v1.37.0/README.md | 3 + .../otel/semconv/v1.37.0/attribute_group.go | 15193 ++++++++++++++++ .../otel/semconv/{v1.26.0 => v1.37.0}/doc.go | 4 +- .../otel/semconv/v1.37.0/error_type.go | 56 + .../semconv/{v1.17.0 => v1.37.0}/exception.go | 2 +- .../otel/semconv/v1.37.0/otelconv/metric.go | 2264 +++ .../otel/semconv/v1.37.0/rpcconv/metric.go | 1010 + .../semconv/{v1.17.0 => v1.37.0}/schema.go | 4 +- vendor/go.opentelemetry.io/otel/trace/LICENSE | 30 + vendor/go.opentelemetry.io/otel/trace/auto.go | 11 +- .../go.opentelemetry.io/otel/trace/config.go | 49 +- vendor/go.opentelemetry.io/otel/trace/hex.go | 38 + .../otel/trace/internal/telemetry/attr.go | 2 +- .../otel/trace/internal/telemetry/id.go | 6 +- .../otel/trace/internal/telemetry/span.go | 56 +- .../otel/trace/internal/telemetry/status.go | 12 +- .../otel/trace/internal/telemetry/traces.go | 4 +- .../otel/trace/internal/telemetry/value.go | 6 +- vendor/go.opentelemetry.io/otel/trace/noop.go | 6 +- .../otel/trace/noop/noop.go | 2 +- vendor/go.opentelemetry.io/otel/trace/span.go | 4 + .../go.opentelemetry.io/otel/trace/trace.go | 156 +- .../otel/trace/tracestate.go | 6 +- .../otel/verify_readmes.sh | 21 - vendor/go.opentelemetry.io/otel/version.go | 2 +- vendor/go.opentelemetry.io/otel/versions.yaml | 30 +- .../collector/logs/v1/logs_service_grpc.pb.go | 4 - .../trace/v1/trace_service_grpc.pb.go | 4 - .../proto/otlp/common/v1/common.pb.go | 161 +- .../proto/otlp/logs/v1/logs.pb.go | 6 +- .../proto/otlp/resource/v1/resource.pb.go | 59 +- .../proto/otlp/trace/v1/trace.pb.go | 36 +- vendor/go.uber.org/zap/.golangci.yml | 2 +- vendor/go.uber.org/zap/CHANGELOG.md | 10 + vendor/go.uber.org/zap/CODE_OF_CONDUCT.md | 4 +- vendor/go.uber.org/zap/LICENSE | 2 +- vendor/go.uber.org/zap/Makefile | 2 +- vendor/go.uber.org/zap/field.go | 10 + vendor/go.uber.org/zap/http_handler.go | 2 +- vendor/go.uber.org/zap/logger.go | 6 +- vendor/go.uber.org/zap/options.go | 6 +- vendor/go.uber.org/zap/sink.go | 2 +- .../zap/zapcore/buffered_write_syncer.go | 23 +- .../zap/zapcore/console_encoder.go | 2 +- vendor/go.uber.org/zap/zapcore/entry.go | 14 +- vendor/go.uber.org/zap/zapcore/lazy_with.go | 35 +- vendor/go.uber.org/zap/zapcore/level.go | 14 +- .../x/crypto/chacha20/chacha_arm64.s | 2 +- .../chacha20poly1305_amd64.go | 10 +- .../chacha20poly1305_generic.go | 10 +- .../x/crypto/curve25519/curve25519.go | 13 +- .../x/crypto/internal/poly1305/mac_noasm.go | 2 +- .../poly1305/{sum_amd64.go => sum_asm.go} | 2 +- .../x/crypto/internal/poly1305/sum_loong64.s | 123 + .../x/crypto/internal/poly1305/sum_ppc64x.go | 47 - vendor/golang.org/x/crypto/ssh/certs.go | 67 +- vendor/golang.org/x/crypto/ssh/cipher.go | 80 +- vendor/golang.org/x/crypto/ssh/client.go | 1 + vendor/golang.org/x/crypto/ssh/client_auth.go | 26 +- vendor/golang.org/x/crypto/ssh/common.go | 477 +- vendor/golang.org/x/crypto/ssh/connection.go | 12 + vendor/golang.org/x/crypto/ssh/doc.go | 11 + vendor/golang.org/x/crypto/ssh/handshake.go | 83 +- vendor/golang.org/x/crypto/ssh/kex.go | 155 +- vendor/golang.org/x/crypto/ssh/keys.go | 96 +- vendor/golang.org/x/crypto/ssh/mac.go | 42 +- vendor/golang.org/x/crypto/ssh/messages.go | 10 +- vendor/golang.org/x/crypto/ssh/mlkem.go | 168 + vendor/golang.org/x/crypto/ssh/server.go | 46 +- vendor/golang.org/x/crypto/ssh/tcpip.go | 2 +- vendor/golang.org/x/crypto/ssh/transport.go | 19 +- .../x/net/context/ctxhttp/ctxhttp.go | 2 +- vendor/golang.org/x/net/http2/config.go | 63 +- vendor/golang.org/x/net/http2/config_go124.go | 61 - vendor/golang.org/x/net/http2/config_go125.go | 15 + vendor/golang.org/x/net/http2/config_go126.go | 15 + .../x/net/http2/config_pre_go124.go | 16 - vendor/golang.org/x/net/http2/frame.go | 124 +- vendor/golang.org/x/net/http2/gotrack.go | 17 +- vendor/golang.org/x/net/http2/http2.go | 37 +- vendor/golang.org/x/net/http2/server.go | 269 +- vendor/golang.org/x/net/http2/timer.go | 20 - vendor/golang.org/x/net/http2/transport.go | 248 +- vendor/golang.org/x/net/http2/writesched.go | 67 +- ...rity.go => writesched_priority_rfc7540.go} | 109 +- .../net/http2/writesched_priority_rfc9218.go | 209 + .../x/net/http2/writesched_roundrobin.go | 2 +- .../x/net/internal/httpcommon/headermap.go | 6 +- .../x/net/internal/httpcommon/request.go | 168 +- vendor/golang.org/x/net/trace/events.go | 2 +- .../clientcredentials/clientcredentials.go | 10 +- vendor/golang.org/x/oauth2/internal/doc.go | 2 +- vendor/golang.org/x/oauth2/internal/oauth2.go | 2 +- vendor/golang.org/x/oauth2/internal/token.go | 50 +- .../golang.org/x/oauth2/internal/transport.go | 4 +- vendor/golang.org/x/oauth2/jws/jws.go | 6 +- vendor/golang.org/x/oauth2/jwt/jwt.go | 13 +- vendor/golang.org/x/oauth2/oauth2.go | 58 +- vendor/golang.org/x/oauth2/pkce.go | 15 +- vendor/golang.org/x/oauth2/token.go | 15 +- vendor/golang.org/x/oauth2/transport.go | 24 +- vendor/golang.org/x/sys/cpu/cpu.go | 3 - vendor/golang.org/x/sys/cpu/cpu_arm64.go | 20 +- vendor/golang.org/x/sys/cpu/cpu_arm64.s | 7 - vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go | 1 - .../golang.org/x/sys/cpu/cpu_gccgo_arm64.go | 1 - .../golang.org/x/sys/cpu/cpu_netbsd_arm64.go | 2 +- .../golang.org/x/sys/cpu/cpu_openbsd_arm64.go | 2 +- vendor/golang.org/x/sys/unix/mkerrors.sh | 3 +- vendor/golang.org/x/sys/unix/zerrors_linux.go | 2 + .../x/sys/unix/zerrors_linux_386.go | 2 + .../x/sys/unix/zerrors_linux_amd64.go | 2 + .../x/sys/unix/zerrors_linux_arm.go | 2 + .../x/sys/unix/zerrors_linux_arm64.go | 2 + .../x/sys/unix/zerrors_linux_loong64.go | 2 + .../x/sys/unix/zerrors_linux_mips.go | 2 + .../x/sys/unix/zerrors_linux_mips64.go | 2 + .../x/sys/unix/zerrors_linux_mips64le.go | 2 + .../x/sys/unix/zerrors_linux_mipsle.go | 2 + .../x/sys/unix/zerrors_linux_ppc.go | 2 + .../x/sys/unix/zerrors_linux_ppc64.go | 2 + .../x/sys/unix/zerrors_linux_ppc64le.go | 2 + .../x/sys/unix/zerrors_linux_riscv64.go | 2 + .../x/sys/unix/zerrors_linux_s390x.go | 2 + .../x/sys/unix/zerrors_linux_sparc64.go | 2 + .../x/sys/unix/ztypes_netbsd_arm.go | 2 +- vendor/golang.org/x/term/term_windows.go | 4 +- vendor/golang.org/x/term/terminal.go | 15 +- vendor/golang.org/x/text/unicode/bidi/core.go | 11 +- .../googleapis/api/httpbody/httpbody.pb.go | 2 +- .../rpc/errdetails/error_details.pb.go | 315 +- .../googleapis/rpc/status/status.pb.go | 2 +- vendor/google.golang.org/grpc/CONTRIBUTING.md | 184 +- vendor/google.golang.org/grpc/MAINTAINERS.md | 8 +- vendor/google.golang.org/grpc/README.md | 1 + .../grpc/balancer/balancer.go | 8 +- .../grpc/balancer/base/balancer.go | 12 +- .../endpointsharding/endpointsharding.go | 77 +- .../grpc/balancer/pickfirst/pickfirst.go | 867 +- .../pickfirst/pickfirstleaf/pickfirstleaf.go | 932 - .../grpc/balancer/roundrobin/roundrobin.go | 13 +- .../grpc/balancer_wrapper.go | 9 +- .../grpc_binarylog_v1/binarylog.pb.go | 191 +- vendor/google.golang.org/grpc/clientconn.go | 194 +- .../grpc/credentials/credentials.go | 62 +- .../grpc/credentials/insecure/insecure.go | 8 +- .../google.golang.org/grpc/credentials/tls.go | 46 +- vendor/google.golang.org/grpc/dialoptions.go | 25 +- .../grpc/encoding/encoding.go | 20 + .../grpc/encoding/internal/internal.go | 28 + .../grpc/encoding/proto/proto.go | 20 +- .../grpc/experimental/stats/metricregistry.go | 72 + .../grpc/experimental/stats/metrics.go | 10 + .../grpc/health/grpc_health_v1/health.pb.go | 173 +- .../health/grpc_health_v1/health_grpc.pb.go | 70 +- .../balancer/gracefulswitch/gracefulswitch.go | 22 +- .../grpc/internal/buffer/unbounded.go | 1 + .../grpc/internal/channelz/trace.go | 2 +- .../grpc/internal/credentials/credentials.go | 14 - .../grpc/internal/envconfig/envconfig.go | 55 +- .../grpc/internal/envconfig/xds.go | 16 + .../grpc/internal/experimental.go | 4 + .../internal/grpcsync/callback_serializer.go | 22 +- .../grpc/internal/grpcsync/event.go | 19 +- .../grpc/internal/idle/idle.go | 77 +- .../grpc/internal/internal.go | 53 +- .../grpc/internal/metadata/metadata.go | 26 +- .../delegatingresolver/delegatingresolver.go | 246 +- .../internal/resolver/dns/dns_resolver.go | 20 +- .../internal/stats/metrics_recorder_list.go | 10 + .../grpc/internal/stats/stats.go | 70 + .../grpc/internal/status/status.go | 8 + .../grpc/internal/transport/client_stream.go | 34 +- .../grpc/internal/transport/controlbuf.go | 102 +- .../grpc/internal/transport/flowcontrol.go | 23 +- .../grpc/internal/transport/handler_server.go | 50 +- .../grpc/internal/transport/http2_client.go | 279 +- .../grpc/internal/transport/http2_server.go | 146 +- .../grpc/internal/transport/http_util.go | 176 +- .../grpc/internal/transport/server_stream.go | 19 +- .../grpc/internal/transport/transport.go | 109 +- .../google.golang.org/grpc/mem/buffer_pool.go | 29 +- .../grpc/mem/buffer_slice.go | 104 +- .../google.golang.org/grpc/picker_wrapper.go | 36 +- vendor/google.golang.org/grpc/preloader.go | 3 - vendor/google.golang.org/grpc/resolver/map.go | 81 +- .../grpc/resolver/resolver.go | 5 + .../grpc/resolver_wrapper.go | 1 + vendor/google.golang.org/grpc/rpc_util.go | 138 +- vendor/google.golang.org/grpc/server.go | 127 +- .../google.golang.org/grpc/stats/handlers.go | 9 + vendor/google.golang.org/grpc/stats/stats.go | 55 +- vendor/google.golang.org/grpc/stream.go | 261 +- vendor/google.golang.org/grpc/version.go | 2 +- .../protobuf/encoding/protowire/wire.go | 26 +- .../editiondefaults/editions_defaults.binpb | Bin 146 -> 154 bytes .../internal/editionssupport/editions.go | 2 +- .../protobuf/internal/filedesc/desc.go | 52 +- .../protobuf/internal/filedesc/desc_init.go | 14 + .../protobuf/internal/filedesc/desc_lazy.go | 20 + .../protobuf/internal/filedesc/editions.go | 15 +- .../protobuf/internal/filedesc/presence.go | 33 + .../protobuf/internal/genid/api_gen.go | 6 + .../protobuf/internal/genid/descriptor_gen.go | 90 +- .../internal/impl/codec_message_opaque.go | 3 +- .../protobuf/internal/impl/message_opaque.go | 45 +- .../protobuf/internal/impl/presence.go | 3 - .../protobuf/internal/version/version.go | 2 +- .../protobuf/reflect/protodesc/desc.go | 22 + .../protobuf/reflect/protodesc/desc_init.go | 2 + .../protobuf/reflect/protodesc/proto.go | 37 +- .../reflect/protoreflect/source_gen.go | 8 + .../types/descriptorpb/descriptor.pb.go | 643 +- vendor/modules.txt | 147 +- 474 files changed, 36171 insertions(+), 23269 deletions(-) create mode 100644 vendor/github.com/aws/smithy-go/transport/http/interceptor.go create mode 100644 vendor/github.com/aws/smithy-go/transport/http/interceptor_middleware.go delete mode 100644 vendor/github.com/cenkalti/backoff/v4/context.go delete mode 100644 vendor/github.com/cenkalti/backoff/v4/exponential.go delete mode 100644 vendor/github.com/cenkalti/backoff/v4/retry.go delete mode 100644 vendor/github.com/cenkalti/backoff/v4/tries.go rename vendor/github.com/cenkalti/backoff/{v4 => v5}/.gitignore (100%) create mode 100644 vendor/github.com/cenkalti/backoff/v5/CHANGELOG.md rename vendor/github.com/cenkalti/backoff/{v4 => v5}/LICENSE (100%) rename vendor/github.com/cenkalti/backoff/{v4 => v5}/README.md (64%) rename vendor/github.com/cenkalti/backoff/{v4 => v5}/backoff.go (87%) create mode 100644 vendor/github.com/cenkalti/backoff/v5/error.go create mode 100644 vendor/github.com/cenkalti/backoff/v5/exponential.go create mode 100644 vendor/github.com/cenkalti/backoff/v5/retry.go rename vendor/github.com/cenkalti/backoff/{v4 => v5}/ticker.go (80%) rename vendor/github.com/cenkalti/backoff/{v4 => v5}/timer.go (96%) create mode 100644 vendor/github.com/cespare/xxhash/v2/LICENSE.txt create mode 100644 vendor/github.com/cespare/xxhash/v2/README.md create mode 100644 vendor/github.com/cespare/xxhash/v2/testall.sh create mode 100644 vendor/github.com/cespare/xxhash/v2/xxhash.go create mode 100644 vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s create mode 100644 vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s create mode 100644 vendor/github.com/cespare/xxhash/v2/xxhash_asm.go create mode 100644 vendor/github.com/cespare/xxhash/v2/xxhash_other.go create mode 100644 vendor/github.com/cespare/xxhash/v2/xxhash_safe.go create mode 100644 vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go create mode 100644 vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/list_event_feeds.go create mode 100644 vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/list_events.go delete mode 100644 vendor/github.com/go-jose/go-jose/v4/CHANGELOG.md delete mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/semconv.go create mode 100644 vendor/go.opentelemetry.io/otel/.clomonitor.yml create mode 100644 vendor/go.opentelemetry.io/otel/SECURITY-INSIGHTS.yml create mode 100644 vendor/go.opentelemetry.io/otel/attribute/hash.go rename vendor/go.opentelemetry.io/otel/{internal/attribute => attribute/internal}/attribute.go (84%) create mode 100644 vendor/go.opentelemetry.io/otel/attribute/internal/xxhash/xxhash.go create mode 100644 vendor/go.opentelemetry.io/otel/attribute/rawhelpers.go create mode 100644 vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/gen.go create mode 100644 vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/observ/instrumentation.go create mode 100644 vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/observ/target.go create mode 100644 vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/partialsuccess.go create mode 100644 vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/version.go create mode 100644 vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/x/README.md create mode 100644 vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/x/features.go create mode 100644 vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/x/x.go create mode 100644 vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/counter/counter.go create mode 100644 vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/observ/doc.go create mode 100644 vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/observ/instrumentation.go create mode 100644 vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/observ/target.go create mode 100644 vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/version.go create mode 100644 vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/x/README.md create mode 100644 vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/x/observ.go create mode 100644 vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/x/x.go delete mode 100644 vendor/go.opentelemetry.io/otel/get_main_pkgs.sh delete mode 100644 vendor/go.opentelemetry.io/otel/internal/gen.go delete mode 100644 vendor/go.opentelemetry.io/otel/internal/rawhelpers.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/internal/x/features.go delete mode 100644 vendor/go.opentelemetry.io/otel/sdk/log/filter_processor.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/log/instrumentation.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/log/internal/observ/doc.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/log/internal/observ/simple_log_processor.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/log/internal/x/README.md create mode 100644 vendor/go.opentelemetry.io/otel/sdk/log/internal/x/features.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/log/internal/x/x.go rename vendor/go.opentelemetry.io/otel/sdk/{ => trace}/internal/env/env.go (97%) create mode 100644 vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/batch_span_processor.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/doc.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/simple_span_processor.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/tracer.go delete mode 100644 vendor/go.opentelemetry.io/otel/sdk/trace/version.go delete mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.17.0/README.md delete mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.17.0/doc.go delete mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.17.0/event.go delete mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.17.0/http.go delete mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go delete mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go delete mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.26.0/README.md delete mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.26.0/attribute_group.go delete mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.26.0/exception.go delete mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.26.0/metric.go delete mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.26.0/schema.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.37.0/MIGRATION.md create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.37.0/README.md create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.37.0/attribute_group.go rename vendor/go.opentelemetry.io/otel/semconv/{v1.26.0 => v1.37.0}/doc.go (80%) create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.37.0/error_type.go rename vendor/go.opentelemetry.io/otel/semconv/{v1.17.0 => v1.37.0}/exception.go (76%) create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.37.0/otelconv/metric.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.37.0/rpcconv/metric.go rename vendor/go.opentelemetry.io/otel/semconv/{v1.17.0 => v1.37.0}/schema.go (72%) create mode 100644 vendor/go.opentelemetry.io/otel/trace/hex.go delete mode 100644 vendor/go.opentelemetry.io/otel/verify_readmes.sh rename vendor/golang.org/x/crypto/internal/poly1305/{sum_amd64.go => sum_asm.go} (94%) create mode 100644 vendor/golang.org/x/crypto/internal/poly1305/sum_loong64.s delete mode 100644 vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64x.go create mode 100644 vendor/golang.org/x/crypto/ssh/mlkem.go delete mode 100644 vendor/golang.org/x/net/http2/config_go124.go create mode 100644 vendor/golang.org/x/net/http2/config_go125.go create mode 100644 vendor/golang.org/x/net/http2/config_go126.go delete mode 100644 vendor/golang.org/x/net/http2/config_pre_go124.go delete mode 100644 vendor/golang.org/x/net/http2/timer.go rename vendor/golang.org/x/net/http2/{writesched_priority.go => writesched_priority_rfc7540.go} (77%) create mode 100644 vendor/golang.org/x/net/http2/writesched_priority_rfc9218.go delete mode 100644 vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go create mode 100644 vendor/google.golang.org/grpc/encoding/internal/internal.go create mode 100644 vendor/google.golang.org/grpc/internal/stats/stats.go create mode 100644 vendor/google.golang.org/protobuf/internal/filedesc/presence.go diff --git a/go.mod b/go.mod index 60c37994..5957a2c4 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/conductorone/baton-github go 1.25.2 require ( - github.com/conductorone/baton-sdk v0.7.10 + github.com/conductorone/baton-sdk v0.7.12 github.com/deckarep/golang-set/v2 v2.8.0 github.com/ennyjfrick/ruleguard-logfatal v0.0.2 github.com/golang-jwt/jwt/v5 v5.2.2 @@ -13,11 +13,11 @@ require ( github.com/quasilyte/go-ruleguard/dsl v0.3.22 github.com/shurcooL/githubv4 v0.0.0-20240727222349-48295856cce7 github.com/stretchr/testify v1.11.1 - go.uber.org/zap v1.27.0 - golang.org/x/oauth2 v0.29.0 - golang.org/x/text v0.24.0 - google.golang.org/grpc v1.71.1 - google.golang.org/protobuf v1.36.6 + go.uber.org/zap v1.27.1 + golang.org/x/oauth2 v0.32.0 + golang.org/x/text v0.31.0 + google.golang.org/grpc v1.78.0 + google.golang.org/protobuf v1.36.10 ) require ( @@ -25,7 +25,7 @@ require ( filippo.io/edwards25519 v1.1.0 // indirect github.com/Masterminds/semver/v3 v3.4.0 // indirect github.com/aws/aws-lambda-go v1.47.0 // indirect - github.com/aws/aws-sdk-go-v2 v1.36.3 // indirect + github.com/aws/aws-sdk-go-v2 v1.41.0 // indirect github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.10 // indirect github.com/aws/aws-sdk-go-v2/config v1.29.2 // indirect github.com/aws/aws-sdk-go-v2/credentials v1.17.55 // indirect @@ -44,9 +44,10 @@ require ( github.com/aws/aws-sdk-go-v2/service/sso v1.24.12 // indirect github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.11 // indirect github.com/aws/aws-sdk-go-v2/service/sts v1.33.10 // indirect - github.com/aws/smithy-go v1.22.2 // indirect + github.com/aws/smithy-go v1.24.0 // indirect github.com/benbjohnson/clock v1.3.5 // indirect - github.com/cenkalti/backoff/v4 v4.3.0 // indirect + github.com/cenkalti/backoff/v5 v5.0.3 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/conductorone/dpop v0.2.3 // indirect github.com/conductorone/dpop/integrations/dpop_grpc v0.2.3 // indirect github.com/conductorone/dpop/integrations/dpop_oauth2 v0.2.3 // indirect @@ -57,8 +58,8 @@ require ( github.com/envoyproxy/protoc-gen-validate v1.2.1 // indirect github.com/fsnotify/fsnotify v1.8.0 // indirect github.com/glebarez/go-sqlite v1.22.0 // indirect - github.com/go-jose/go-jose/v4 v4.0.5 // indirect - github.com/go-logr/logr v1.4.2 // indirect + github.com/go-jose/go-jose/v4 v4.1.3 // indirect + github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-ole/go-ole v1.3.0 // indirect github.com/golang/protobuf v1.5.4 // indirect @@ -66,7 +67,7 @@ require ( github.com/google/go-querystring v1.1.0 // indirect github.com/google/uuid v1.6.0 // indirect github.com/gorilla/mux v1.8.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.1 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3 // indirect github.com/hashicorp/hcl v1.0.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jellydator/ttlcache/v3 v3.3.0 // indirect @@ -99,30 +100,30 @@ require ( github.com/tklauser/go-sysconf v0.3.16 // indirect github.com/tklauser/numcpus v0.11.0 // indirect github.com/yusufpapurcu/wmi v1.2.4 // indirect - go.opentelemetry.io/auto/sdk v1.1.0 // indirect - go.opentelemetry.io/contrib/bridges/otelzap v0.10.0 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.59.0 // indirect - go.opentelemetry.io/otel v1.35.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.11.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0 // indirect - go.opentelemetry.io/otel/log v0.11.0 // indirect - go.opentelemetry.io/otel/metric v1.35.0 // indirect - go.opentelemetry.io/otel/sdk v1.35.0 // indirect - go.opentelemetry.io/otel/sdk/log v0.11.0 // indirect - go.opentelemetry.io/otel/trace v1.35.0 // indirect - go.opentelemetry.io/proto/otlp v1.5.0 // indirect + go.opentelemetry.io/auto/sdk v1.2.1 // indirect + go.opentelemetry.io/contrib/bridges/otelzap v0.14.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.63.0 // indirect + go.opentelemetry.io/otel v1.39.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.15.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.39.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.39.0 // indirect + go.opentelemetry.io/otel/log v0.15.0 // indirect + go.opentelemetry.io/otel/metric v1.39.0 // indirect + go.opentelemetry.io/otel/sdk v1.39.0 // indirect + go.opentelemetry.io/otel/sdk/log v0.15.0 // indirect + go.opentelemetry.io/otel/trace v1.39.0 // indirect + go.opentelemetry.io/proto/otlp v1.9.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/ratelimit v0.3.1 // indirect - golang.org/x/crypto v0.34.0 // indirect + golang.org/x/crypto v0.44.0 // indirect golang.org/x/exp v0.0.0-20250128182459-e0ece0dbea4c // indirect - golang.org/x/net v0.35.0 // indirect - golang.org/x/sync v0.13.0 // indirect - golang.org/x/sys v0.38.0 // indirect - golang.org/x/term v0.33.0 // indirect + golang.org/x/net v0.47.0 // indirect + golang.org/x/sync v0.18.0 // indirect + golang.org/x/sys v0.39.0 // indirect + golang.org/x/term v0.37.0 // indirect golang.org/x/time v0.8.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20250218202821-56aae31c358a // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20250219182151-9fdb1cabc7b2 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20251202230838-ff82c1b0f217 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20251202230838-ff82c1b0f217 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/go.sum b/go.sum index 45850e97..9f27ec62 100644 --- a/go.sum +++ b/go.sum @@ -12,8 +12,8 @@ github.com/Masterminds/semver/v3 v3.4.0 h1:Zog+i5UMtVoCU8oKka5P7i9q9HgrJeGzI9SA1 github.com/Masterminds/semver/v3 v3.4.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= github.com/aws/aws-lambda-go v1.47.0 h1:0H8s0vumYx/YKs4sE7YM0ktwL2eWse+kfopsRI1sXVI= github.com/aws/aws-lambda-go v1.47.0/go.mod h1:dpMpZgvWx5vuQJfBt0zqBha60q7Dd7RfgJv23DymV8A= -github.com/aws/aws-sdk-go-v2 v1.36.3 h1:mJoei2CxPutQVxaATCzDUjcZEjVRdpsiiXi2o38yqWM= -github.com/aws/aws-sdk-go-v2 v1.36.3/go.mod h1:LLXuLpgzEbD766Z5ECcRmi8AzSwfZItDtmABVkRLGzg= +github.com/aws/aws-sdk-go-v2 v1.41.0 h1:tNvqh1s+v0vFYdA1xq0aOJH+Y5cRyZ5upu6roPgPKd4= +github.com/aws/aws-sdk-go-v2 v1.41.0/go.mod h1:MayyLB8y+buD9hZqkCW3kX1AKq07Y5pXxtgB+rRFhz0= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.10 h1:zAybnyUQXIZ5mok5Jqwlf58/TFE7uvd3IAsa1aF9cXs= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.10/go.mod h1:qqvMj6gHLR/EXWZw4ZbqlPbQUyenf4h82UQUlKc+l14= github.com/aws/aws-sdk-go-v2/config v1.29.2 h1:JuIxOEPcSKpMB0J+khMjznG9LIhIBdmqNiEcPclnwqc= @@ -50,18 +50,20 @@ github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.11 h1:mUwIpAvILeKFnRx4h1dEgGE github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.11/go.mod h1:JDJtD+b8HNVv71axz8+S5492KM8wTzHRFpMKQbPlYxw= github.com/aws/aws-sdk-go-v2/service/sts v1.33.10 h1:g9d+TOsu3ac7SgmY2dUf1qMgu/uJVTlQ4VCbH6hRxSw= github.com/aws/aws-sdk-go-v2/service/sts v1.33.10/go.mod h1:WZfNmntu92HO44MVZAubQaz3qCuIdeOdog2sADfU6hU= -github.com/aws/smithy-go v1.22.2 h1:6D9hW43xKFrRx/tXXfAlIZc4JI+yQe6snnWcQyxSyLQ= -github.com/aws/smithy-go v1.22.2/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= +github.com/aws/smithy-go v1.24.0 h1:LpilSUItNPFr1eY85RYgTIg5eIEPtvFbskaFcmmIUnk= +github.com/aws/smithy-go v1.24.0/go.mod h1:LEj2LM3rBRQJxPZTB4KuzZkaZYnZPnvgIhb4pu07mx0= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o= github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= -github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= -github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM= +github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/conductorone/baton-sdk v0.7.10 h1:eK/RTU8CZyTosYSNYmDzfAahGnxqpOq6rheBcwTx7w0= -github.com/conductorone/baton-sdk v0.7.10/go.mod h1:9S5feBOuIJxlNdGmkv3ObkCNHbVyOHr6foNrIrk+d4Y= +github.com/conductorone/baton-sdk v0.7.12 h1:LU9MZaYoxVZyAWMtTJE/i74FKp5Xp1kX2s7p0iBbrog= +github.com/conductorone/baton-sdk v0.7.12/go.mod h1:agmFrml6APUw4ZlqMEBrnXYj3aAOGKOJ6gztiNj64h0= github.com/conductorone/dpop v0.2.3 h1:s91U3845GHQ6P6FWrdNr2SEOy1ES/jcFs1JtKSl2S+o= github.com/conductorone/dpop v0.2.3/go.mod h1:gyo8TtzB9SCFCsjsICH4IaLZ7y64CcrDXMOPBwfq/3s= github.com/conductorone/dpop/integrations/dpop_grpc v0.2.3 h1:kLMCNIh0Mo2vbvvkCmJ3ixsPbXEJ6HPcW53Ku9yje3s= @@ -96,13 +98,13 @@ github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/ github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= github.com/glebarez/go-sqlite v1.22.0 h1:uAcMJhaA6r3LHMTFgP0SifzgXg46yJkgxqyuyec+ruQ= github.com/glebarez/go-sqlite v1.22.0/go.mod h1:PlBIdHe0+aUEFn+r2/uthrWq4FxbzugL0L8Li6yQJbc= -github.com/go-jose/go-jose/v4 v4.0.5 h1:M6T8+mKZl/+fNNuFHvGIzDz7BTLQPIounk/b9dw3AaE= -github.com/go-jose/go-jose/v4 v4.0.5/go.mod h1:s3P1lRrkT8igV8D9OjyL4WRyHvjB6a4JSllnOrmmBOA= +github.com/go-jose/go-jose/v4 v4.1.3 h1:CVLmWDhDVRa6Mi/IgCgaopNosCaHz7zrMeF9MlZRkrs= +github.com/go-jose/go-jose/v4 v4.1.3/go.mod h1:x4oUasVrzR7071A4TnHLGSPpNOm2a21K9Kf04k1rs08= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= -github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= @@ -145,8 +147,8 @@ github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 h1:UH//fgunKIs4JdUbpDl1VZCDaL56wXCB/5+wF6uHfaI= github.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vbp88Yd8NsDy6rZz+RcrMPxvld8= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.1 h1:e9Rjr40Z98/clHv5Yg79Is0NtosR5LXRvdr7o/6NwbA= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.1/go.mod h1:tIxuGz/9mpox++sgp9fJjHO0+q1X9/UOWd798aAm22M= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3 h1:NmZ1PKzSTQbuGHw9DGPFomqkkLWMC+vZCkfs+FHv1Vg= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3/go.mod h1:zQrxl1YP88HQlA6i9c63DSVPFklWpGX4OWAc9bFuaH4= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= @@ -210,8 +212,8 @@ github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567 h1:M8mH9eK4OUR4l github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567/go.mod h1:DWNGW8A4Y+GyBgPuaQJuWiy0XYftx4Xm/y5Jqk9I6VQ= github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE= github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= -github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= -github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= +github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= +github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/sagikazarmark/locafero v0.7.0 h1:5MqpDsTGNDhY8sGp0Aowyf0qKsPrhewaLSsFaodPcyo= github.com/sagikazarmark/locafero v0.7.0/go.mod h1:2za3Cg5rMaTMoG/2Ulr9AwtFaIppKXTRYnozin4aB5k= @@ -260,36 +262,40 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= -go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= -go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= -go.opentelemetry.io/contrib/bridges/otelzap v0.10.0 h1:ojdSRDvjrnm30beHOmwsSvLpoRF40MlwNCA+Oo93kXU= -go.opentelemetry.io/contrib/bridges/otelzap v0.10.0/go.mod h1:oTTm4g7NEtHSV2i/0FeVdPaPgUIZPfQkFbq0vbzqnv0= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.59.0 h1:rgMkmiGfix9vFJDcDi1PK8WEQP4FLQwLDfhp5ZLpFeE= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.59.0/go.mod h1:ijPqXp5P6IRRByFVVg9DY8P5HkxkHE5ARIa+86aXPf4= -go.opentelemetry.io/otel v1.35.0 h1:xKWKPxrxB6OtMCbmMY021CqC45J+3Onta9MqjhnusiQ= -go.opentelemetry.io/otel v1.35.0/go.mod h1:UEqy8Zp11hpkUrL73gSlELM0DupHoiq72dR+Zqel/+Y= -go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.11.0 h1:HMUytBT3uGhPKYY/u/G5MR9itrlSO2SMOsSD3Tk3k7A= -go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.11.0/go.mod h1:hdDXsiNLmdW/9BF2jQpnHHlhFajpWCEYfM6e5m2OAZg= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0 h1:OeNbIYk/2C15ckl7glBlOBp5+WlYsOElzTNmiPW/x60= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0/go.mod h1:7Bept48yIeqxP2OZ9/AqIpYS94h2or0aB4FypJTc8ZM= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0 h1:tgJ0uaNS4c98WRNUEx5U3aDlrDOI5Rs+1Vifcw4DJ8U= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0/go.mod h1:U7HYyW0zt/a9x5J1Kjs+r1f/d4ZHnYFclhYY2+YbeoE= -go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.27.0 h1:/jlt1Y8gXWiHG9FBx6cJaIC5hYx5Fe64nC8w5Cylt/0= -go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.27.0/go.mod h1:bmToOGOBZ4hA9ghphIc1PAf66VA8KOtsuy3+ScStG20= -go.opentelemetry.io/otel/log v0.11.0 h1:c24Hrlk5WJ8JWcwbQxdBqxZdOK7PcP/LFtOtwpDTe3Y= -go.opentelemetry.io/otel/log v0.11.0/go.mod h1:U/sxQ83FPmT29trrifhQg+Zj2lo1/IPN1PF6RTFqdwc= -go.opentelemetry.io/otel/metric v1.35.0 h1:0znxYu2SNyuMSQT4Y9WDWej0VpcsxkuklLa4/siN90M= -go.opentelemetry.io/otel/metric v1.35.0/go.mod h1:nKVFgxBZ2fReX6IlyW28MgZojkoAkJGaE8CpgeAU3oE= -go.opentelemetry.io/otel/sdk v1.35.0 h1:iPctf8iprVySXSKJffSS79eOjl9pvxV9ZqOWT0QejKY= -go.opentelemetry.io/otel/sdk v1.35.0/go.mod h1:+ga1bZliga3DxJ3CQGg3updiaAJoNECOgJREo9KHGQg= -go.opentelemetry.io/otel/sdk/log v0.11.0 h1:7bAOpjpGglWhdEzP8z0VXc4jObOiDEwr3IYbhBnjk2c= -go.opentelemetry.io/otel/sdk/log v0.11.0/go.mod h1:dndLTxZbwBstZoqsJB3kGsRPkpAgaJrWfQg3lhlHFFY= -go.opentelemetry.io/otel/sdk/metric v1.34.0 h1:5CeK9ujjbFVL5c1PhLuStg1wxA7vQv7ce1EK0Gyvahk= -go.opentelemetry.io/otel/sdk/metric v1.34.0/go.mod h1:jQ/r8Ze28zRKoNRdkjCZxfs6YvBTG1+YIqyFVFYec5w= -go.opentelemetry.io/otel/trace v1.35.0 h1:dPpEfJu1sDIqruz7BHFG3c7528f6ddfSWfFDVt/xgMs= -go.opentelemetry.io/otel/trace v1.35.0/go.mod h1:WUk7DtFp1Aw2MkvqGdwiXYDZZNvA/1J8o6xRXLrIkyc= -go.opentelemetry.io/proto/otlp v1.5.0 h1:xJvq7gMzB31/d406fB8U5CBdyQGw4P399D1aQWU/3i4= -go.opentelemetry.io/proto/otlp v1.5.0/go.mod h1:keN8WnHxOy8PG0rQZjJJ5A2ebUoafqWp0eVQ4yIXvJ4= +go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= +go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= +go.opentelemetry.io/contrib/bridges/otelzap v0.14.0 h1:2nKw2ZXZOC0N8RBsBbYwGwfKR7kJWzzyCZ6QfUGW/es= +go.opentelemetry.io/contrib/bridges/otelzap v0.14.0/go.mod h1:kvyVt0WEI5BB6XaIStXPIkCSQ2nSkyd8IZnAHLEXge4= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.63.0 h1:YH4g8lQroajqUwWbq/tr2QX1JFmEXaDLgG+ew9bLMWo= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.63.0/go.mod h1:fvPi2qXDqFs8M4B4fmJhE92TyQs9Ydjlg3RvfUp+NbQ= +go.opentelemetry.io/otel v1.39.0 h1:8yPrr/S0ND9QEfTfdP9V+SiwT4E0G7Y5MO7p85nis48= +go.opentelemetry.io/otel v1.39.0/go.mod h1:kLlFTywNWrFyEdH0oj2xK0bFYZtHRYUdv1NklR/tgc8= +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.15.0 h1:W+m0g+/6v3pa5PgVf2xoFMi5YtNR06WtS7ve5pcvLtM= +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.15.0/go.mod h1:JM31r0GGZ/GU94mX8hN4D8v6e40aFlUECSQ48HaLgHM= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.39.0 h1:f0cb2XPmrqn4XMy9PNliTgRKJgS5WcL/u0/WRYGz4t0= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.39.0/go.mod h1:vnakAaFckOMiMtOIhFI2MNH4FYrZzXCYxmb1LlhoGz8= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.39.0 h1:in9O8ESIOlwJAEGTkkf34DesGRAc/Pn8qJ7k3r/42LM= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.39.0/go.mod h1:Rp0EXBm5tfnv0WL+ARyO/PHBEaEAT8UUHQ6AGJcSq6c= +go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.39.0 h1:5gn2urDL/FBnK8OkCfD1j3/ER79rUuTYmCvlXBKeYL8= +go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.39.0/go.mod h1:0fBG6ZJxhqByfFZDwSwpZGzJU671HkwpWaNe2t4VUPI= +go.opentelemetry.io/otel/log v0.15.0 h1:0VqVnc3MgyYd7QqNVIldC3dsLFKgazR6P3P3+ypkyDY= +go.opentelemetry.io/otel/log v0.15.0/go.mod h1:9c/G1zbyZfgu1HmQD7Qj84QMmwTp2QCQsZH1aeoWDE4= +go.opentelemetry.io/otel/log/logtest v0.15.0 h1:porNFuxAjodl6LhePevOc3n7bo3Wi3JhGXNWe7KP8iU= +go.opentelemetry.io/otel/log/logtest v0.15.0/go.mod h1:c8epqBXGHgS1LiNgmD+LuNYK9lSS3mqvtMdxLsfJgLg= +go.opentelemetry.io/otel/metric v1.39.0 h1:d1UzonvEZriVfpNKEVmHXbdf909uGTOQjA0HF0Ls5Q0= +go.opentelemetry.io/otel/metric v1.39.0/go.mod h1:jrZSWL33sD7bBxg1xjrqyDjnuzTUB0x1nBERXd7Ftcs= +go.opentelemetry.io/otel/sdk v1.39.0 h1:nMLYcjVsvdui1B/4FRkwjzoRVsMK8uL/cj0OyhKzt18= +go.opentelemetry.io/otel/sdk v1.39.0/go.mod h1:vDojkC4/jsTJsE+kh+LXYQlbL8CgrEcwmt1ENZszdJE= +go.opentelemetry.io/otel/sdk/log v0.15.0 h1:WgMEHOUt5gjJE93yqfqJOkRflApNif84kxoHWS9VVHE= +go.opentelemetry.io/otel/sdk/log v0.15.0/go.mod h1:qDC/FlKQCXfH5hokGsNg9aUBGMJQsrUyeOiW5u+dKBQ= +go.opentelemetry.io/otel/sdk/log/logtest v0.14.0 h1:Ijbtz+JKXl8T2MngiwqBlPaHqc4YCaP/i13Qrow6gAM= +go.opentelemetry.io/otel/sdk/log/logtest v0.14.0/go.mod h1:dCU8aEL6q+L9cYTqcVOk8rM9Tp8WdnHOPLiBgp0SGOA= +go.opentelemetry.io/otel/sdk/metric v1.39.0 h1:cXMVVFVgsIf2YL6QkRF4Urbr/aMInf+2WKg+sEJTtB8= +go.opentelemetry.io/otel/sdk/metric v1.39.0/go.mod h1:xq9HEVH7qeX69/JnwEfp6fVq5wosJsY1mt4lLfYdVew= +go.opentelemetry.io/otel/trace v1.39.0 h1:2d2vfpEDmCJ5zVYz7ijaJdOF59xLomrvj7bjt6/qCJI= +go.opentelemetry.io/otel/trace v1.39.0/go.mod h1:88w4/PnZSazkGzz/w84VHpQafiU4EtqqlVdxWy+rNOA= +go.opentelemetry.io/proto/otlp v1.9.0 h1:l706jCMITVouPOqEnii2fIAuO3IVGBRPV5ICjceRb/A= +go.opentelemetry.io/proto/otlp v1.9.0/go.mod h1:xE+Cx5E/eEHw+ISFkwPLwCZefwVjY+pqKg1qcK03+/4= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= @@ -302,15 +308,15 @@ go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN8 go.uber.org/ratelimit v0.3.1 h1:K4qVE+byfv/B3tC+4nYWP7v/6SimcO7HzHekoMNBma0= go.uber.org/ratelimit v0.3.1/go.mod h1:6euWsTB6U/Nb3X++xEUXA8ciPJvr19Q/0h1+oDcJhRk= go.uber.org/zap v1.18.1/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= -go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= -go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +go.uber.org/zap v1.27.1 h1:08RqriUEv8+ArZRYSTXy1LeBScaMpVSTBhCeaZYfMYc= +go.uber.org/zap v1.27.1/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.34.0 h1:+/C6tk6rf/+t5DhUketUbD1aNGqiSX3j15Z6xuIDlBA= -golang.org/x/crypto v0.34.0/go.mod h1:dy7dXNW32cAb/6/PRuTNsix8T+vJAqvuIy5Bli/x0YQ= +golang.org/x/crypto v0.44.0 h1:A97SsFvM3AIwEEmTBiaxPPTYpDC47w720rdiiUvgoAU= +golang.org/x/crypto v0.44.0/go.mod h1:013i+Nw79BMiQiMsOPcVCB5ZIJbYkerPrGnOa00tvmc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20250128182459-e0ece0dbea4c h1:KL/ZBHXgKGVmuZBZ01Lt57yE5ws8ZPSkkihmEyq7FXc= golang.org/x/exp v0.0.0-20250128182459-e0ece0dbea4c/go.mod h1:tujkw807nyEEAamNbDrEGzRav+ilXA7PCRAd6xsmwiU= @@ -322,8 +328,8 @@ golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.23.0 h1:Zb7khfcRGKk+kqfxFaP5tZqCnDZMjC5VtUBs87Hr6QM= -golang.org/x/mod v0.23.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= +golang.org/x/mod v0.29.0 h1:HV8lRxZC4l2cr3Zq1LvtOsi/ThTgWnUk/y64QSs8GwA= +golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -332,18 +338,18 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.35.0 h1:T5GQRQb2y08kTAByq9L4/bz8cipCdA8FbRTXewonqY8= -golang.org/x/net v0.35.0/go.mod h1:EglIi67kWsHKlRzzVMUD93VMSWGFOMSZgxFjparz1Qk= +golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY= +golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.29.0 h1:WdYw2tdTK1S8olAzWHdgeqfy+Mtm9XNhv/xJsY65d98= -golang.org/x/oauth2 v0.29.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= +golang.org/x/oauth2 v0.32.0 h1:jsCblLleRMDrxMN29H3z/k1KliIvpLgCkE6R8FXXNgY= +golang.org/x/oauth2 v0.32.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.13.0 h1:AauUjRAJ9OSnvULf/ARrrVywoJDy0YS2AwQ98I37610= -golang.org/x/sync v0.13.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I= +golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -354,14 +360,14 @@ golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc= -golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= -golang.org/x/term v0.33.0 h1:NuFncQrRcaRvVmgRkvM3j/F00gWIAlcmlB8ACEKmGIg= -golang.org/x/term v0.33.0/go.mod h1:s18+ql9tYWp1IfpV9DmCtQDDSRBUjKaw9M1eAv5UeF0= +golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk= +golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU= +golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0= -golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU= +golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM= +golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM= golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg= golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -373,30 +379,32 @@ golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.30.0 h1:BgcpHewrV5AUp2G9MebG4XPFI1E2W41zU1SaqVA9vJY= -golang.org/x/tools v0.30.0/go.mod h1:c347cR/OJfw5TI+GfX7RUPNMdDRRbjvYTS0jPyvsVtY= +golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ= +golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= +gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto/googleapis/api v0.0.0-20250218202821-56aae31c358a h1:nwKuGPlUAt+aR+pcrkfFRrTU1BVrSmYyYMxYbUIVHr0= -google.golang.org/genproto/googleapis/api v0.0.0-20250218202821-56aae31c358a/go.mod h1:3kWAYMk1I75K4vykHtKt2ycnOgpA6974V7bREqbsenU= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250219182151-9fdb1cabc7b2 h1:DMTIbak9GhdaSxEjvVzAeNZvyc03I61duqNbnm3SU0M= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250219182151-9fdb1cabc7b2/go.mod h1:LuRYeWDFV6WOn90g357N17oMCaxpgCnbi/44qJvDn2I= +google.golang.org/genproto/googleapis/api v0.0.0-20251202230838-ff82c1b0f217 h1:fCvbg86sFXwdrl5LgVcTEvNC+2txB5mgROGmRL5mrls= +google.golang.org/genproto/googleapis/api v0.0.0-20251202230838-ff82c1b0f217/go.mod h1:+rXWjjaukWZun3mLfjmVnQi18E1AsFbDN9QdJ5YXLto= +google.golang.org/genproto/googleapis/rpc v0.0.0-20251202230838-ff82c1b0f217 h1:gRkg/vSppuSQoDjxyiGfN4Upv/h/DQmIR10ZU8dh4Ww= +google.golang.org/genproto/googleapis/rpc v0.0.0-20251202230838-ff82c1b0f217/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.71.1 h1:ffsFWr7ygTUscGPI0KKK6TLrGz0476KUvvsbqWK0rPI= -google.golang.org/grpc v1.71.1/go.mod h1:H0GRtasmQOh9LkFoCPDu3ZrwUtD1YGE+b2vYBYd/8Ec= -google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= -google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= +google.golang.org/grpc v1.78.0 h1:K1XZG/yGDJnzMdd/uZHAkVqJE+xIDOcmdSFZkBUicNc= +google.golang.org/grpc v1.78.0/go.mod h1:I47qjTo4OKbMkjA/aOOwxDIiPSBofUtQUI5EfpWvW7U= +google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE= +google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/config.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/config.go index a015cc5b..3219517d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/config.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/config.go @@ -6,6 +6,7 @@ import ( smithybearer "github.com/aws/smithy-go/auth/bearer" "github.com/aws/smithy-go/logging" "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" ) // HTTPClient provides the interface to provide custom HTTPClients. Generally @@ -192,6 +193,17 @@ type Config struct { // This variable is sourced from environment variable AWS_RESPONSE_CHECKSUM_VALIDATION or // the shared config profile attribute "response_checksum_validation". ResponseChecksumValidation ResponseChecksumValidation + + // Registry of HTTP interceptors. + Interceptors smithyhttp.InterceptorRegistry + + // Priority list of preferred auth scheme IDs. + AuthSchemePreference []string + + // ServiceOptions provides service specific configuration options that will be applied + // when constructing clients for specific services. Each callback function receives the service ID + // and the service's Options struct, allowing for dynamic configuration based on the service. + ServiceOptions []func(string, any) } // NewConfig returns a new Config pointer that can be chained with builder diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/credentials.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/credentials.go index 4ad2ee44..9f94cfe0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/credentials.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/credentials.go @@ -118,6 +118,10 @@ const ( CredentialSourceHTTP // CredentialSourceIMDS credentials resolved from the instance metadata service (IMDS) CredentialSourceIMDS + // CredentialSourceProfileLogin credentials resolved from an `aws login` session sourced from a profile + CredentialSourceProfileLogin + // CredentialSourceLogin credentials resolved from an `aws login` session + CredentialSourceLogin ) // A Credentials is the AWS credentials value for individual credential fields. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go index 8e930fc6..c9d0bdc4 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go @@ -3,4 +3,4 @@ package aws // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.36.3" +const goModuleVersion = "1.41.0" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/user_agent.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/user_agent.go index 6ee3391b..157a7150 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/user_agent.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/user_agent.go @@ -135,6 +135,11 @@ const ( UserAgentFeatureCredentialsAwsSdkStore = "y" // n/a (this is used by .NET based sdk) UserAgentFeatureCredentialsHTTP = "z" UserAgentFeatureCredentialsIMDS = "0" + + UserAgentFeatureBearerServiceEnvVars = "3" + + UserAgentFeatureCredentialsProfileLogin = "AC" + UserAgentFeatureCredentialsLogin = "AD" ) var credentialSourceToFeature = map[aws.CredentialSource]UserAgentFeature{ @@ -158,6 +163,8 @@ var credentialSourceToFeature = map[aws.CredentialSource]UserAgentFeature{ aws.CredentialSourceProcess: UserAgentFeatureCredentialsProcess, aws.CredentialSourceHTTP: UserAgentFeatureCredentialsHTTP, aws.CredentialSourceIMDS: UserAgentFeatureCredentialsIMDS, + aws.CredentialSourceProfileLogin: UserAgentFeatureCredentialsProfileLogin, + aws.CredentialSourceLogin: UserAgentFeatureCredentialsLogin, } // RequestUserAgent is a build middleware that set the User-Agent for the request. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/middleware.go index 52d59b04..5549922a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/middleware.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/middleware.go @@ -260,7 +260,7 @@ func (r *Attempt) handleAttempt( // Get a retry token that will be released after the releaseRetryToken, retryTokenErr := r.retryer.GetRetryToken(ctx, err) if retryTokenErr != nil { - return out, attemptResult, nopRelease, retryTokenErr + return out, attemptResult, nopRelease, errors.Join(err, retryTokenErr) } //------------------------------ diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/stream.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/stream.go index 66aa2bd6..32875e07 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/stream.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/stream.go @@ -59,7 +59,7 @@ func (s *StreamSigner) GetSignature(ctx context.Context, headers, payload []byte prevSignature := s.prevSignature - st := v4Internal.NewSigningTime(signingTime) + st := v4Internal.NewSigningTime(signingTime.UTC()) sigKey := s.signingKeyDeriver.DeriveKey(s.credentials, s.service, s.region, st) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/client.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/client.go index 8d7c35a9..c7ef0acc 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/client.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/client.go @@ -18,6 +18,7 @@ var ( // Default connection pool options DefaultHTTPTransportMaxIdleConns = 100 DefaultHTTPTransportMaxIdleConnsPerHost = 10 + DefaultHTTPTransportMaxConnsPerHost = 2048 // Default connection timeouts DefaultHTTPTransportIdleConnTimeout = 90 * time.Second @@ -186,6 +187,7 @@ func defaultHTTPTransport() *http.Transport { TLSHandshakeTimeout: DefaultHTTPTransportTLSHandleshakeTimeout, MaxIdleConns: DefaultHTTPTransportMaxIdleConns, MaxIdleConnsPerHost: DefaultHTTPTransportMaxIdleConnsPerHost, + MaxConnsPerHost: DefaultHTTPTransportMaxConnsPerHost, IdleConnTimeout: DefaultHTTPTransportIdleConnTimeout, ExpectContinueTimeout: DefaultHTTPTransportExpectContinueTimeout, ForceAttemptHTTP2: true, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/timeout_read_closer.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/timeout_read_closer.go index 993929bd..4881ae14 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/timeout_read_closer.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/timeout_read_closer.go @@ -64,6 +64,11 @@ func (r *timeoutReadCloser) Close() error { // AddResponseReadTimeoutMiddleware adds a middleware to the stack that wraps the // response body so that a read that takes too long will return an error. +// +// Deprecated: This API was previously exposed to customize behavior of the +// Kinesis service. That customization has been removed and this middleware's +// implementation can cause panics within the standard library networking loop. +// See #2752. func AddResponseReadTimeoutMiddleware(stack *middleware.Stack, duration time.Duration) error { return stack.Deserialize.Add(&readTimeout{duration: duration}, middleware.After) } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.go index 5f077999..6ab4d966 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.go @@ -3,7 +3,8 @@ package awsrulesfn // GetPartition returns an AWS [Partition] for the region provided. If the -// partition cannot be determined nil will be returned. +// partition cannot be determined then the default partition (AWS commercial) +// will be returned. func GetPartition(region string) *PartitionConfig { return getPartition(partitions, region) } @@ -11,7 +12,7 @@ func GetPartition(region string) *PartitionConfig { var partitions = []Partition{ { ID: "aws", - RegionRegex: "^(us|eu|ap|sa|ca|me|af|il)\\-\\w+\\-\\d+$", + RegionRegex: "^(us|eu|ap|sa|ca|me|af|il|mx)\\-\\w+\\-\\d+$", DefaultConfig: PartitionConfig{ Name: "aws", DnsSuffix: "amazonaws.com", @@ -35,6 +36,13 @@ var partitions = []Partition{ SupportsFIPS: nil, SupportsDualStack: nil, }, + "ap-east-2": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, "ap-northeast-1": { Name: nil, DnsSuffix: nil, @@ -98,6 +106,27 @@ var partitions = []Partition{ SupportsFIPS: nil, SupportsDualStack: nil, }, + "ap-southeast-5": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "ap-southeast-6": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "ap-southeast-7": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, "aws-global": { Name: nil, DnsSuffix: nil, @@ -196,6 +225,13 @@ var partitions = []Partition{ SupportsFIPS: nil, SupportsDualStack: nil, }, + "mx-central-1": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, "sa-east-1": { Name: nil, DnsSuffix: nil, @@ -269,32 +305,18 @@ var partitions = []Partition{ }, }, { - ID: "aws-us-gov", - RegionRegex: "^us\\-gov\\-\\w+\\-\\d+$", + ID: "aws-eusc", + RegionRegex: "^eusc\\-(de)\\-\\w+\\-\\d+$", DefaultConfig: PartitionConfig{ - Name: "aws-us-gov", - DnsSuffix: "amazonaws.com", - DualStackDnsSuffix: "api.aws", + Name: "aws-eusc", + DnsSuffix: "amazonaws.eu", + DualStackDnsSuffix: "api.amazonwebservices.eu", SupportsFIPS: true, SupportsDualStack: true, - ImplicitGlobalRegion: "us-gov-west-1", + ImplicitGlobalRegion: "eusc-de-east-1", }, Regions: map[string]RegionOverrides{ - "aws-us-gov-global": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "us-gov-east-1": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "us-gov-west-1": { + "eusc-de-east-1": { Name: nil, DnsSuffix: nil, DualStackDnsSuffix: nil, @@ -309,9 +331,9 @@ var partitions = []Partition{ DefaultConfig: PartitionConfig{ Name: "aws-iso", DnsSuffix: "c2s.ic.gov", - DualStackDnsSuffix: "c2s.ic.gov", + DualStackDnsSuffix: "api.aws.ic.gov", SupportsFIPS: true, - SupportsDualStack: false, + SupportsDualStack: true, ImplicitGlobalRegion: "us-iso-east-1", }, Regions: map[string]RegionOverrides{ @@ -344,9 +366,9 @@ var partitions = []Partition{ DefaultConfig: PartitionConfig{ Name: "aws-iso-b", DnsSuffix: "sc2s.sgov.gov", - DualStackDnsSuffix: "sc2s.sgov.gov", + DualStackDnsSuffix: "api.aws.scloud", SupportsFIPS: true, - SupportsDualStack: false, + SupportsDualStack: true, ImplicitGlobalRegion: "us-isob-east-1", }, Regions: map[string]RegionOverrides{ @@ -364,6 +386,13 @@ var partitions = []Partition{ SupportsFIPS: nil, SupportsDualStack: nil, }, + "us-isob-west-1": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, }, }, { @@ -372,12 +401,19 @@ var partitions = []Partition{ DefaultConfig: PartitionConfig{ Name: "aws-iso-e", DnsSuffix: "cloud.adc-e.uk", - DualStackDnsSuffix: "cloud.adc-e.uk", + DualStackDnsSuffix: "api.cloud-aws.adc-e.uk", SupportsFIPS: true, - SupportsDualStack: false, + SupportsDualStack: true, ImplicitGlobalRegion: "eu-isoe-west-1", }, Regions: map[string]RegionOverrides{ + "aws-iso-e-global": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, "eu-isoe-west-1": { Name: nil, DnsSuffix: nil, @@ -393,11 +429,68 @@ var partitions = []Partition{ DefaultConfig: PartitionConfig{ Name: "aws-iso-f", DnsSuffix: "csp.hci.ic.gov", - DualStackDnsSuffix: "csp.hci.ic.gov", + DualStackDnsSuffix: "api.aws.hci.ic.gov", SupportsFIPS: true, - SupportsDualStack: false, + SupportsDualStack: true, ImplicitGlobalRegion: "us-isof-south-1", }, - Regions: map[string]RegionOverrides{}, + Regions: map[string]RegionOverrides{ + "aws-iso-f-global": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "us-isof-east-1": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "us-isof-south-1": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + }, + }, + { + ID: "aws-us-gov", + RegionRegex: "^us\\-gov\\-\\w+\\-\\d+$", + DefaultConfig: PartitionConfig{ + Name: "aws-us-gov", + DnsSuffix: "amazonaws.com", + DualStackDnsSuffix: "api.aws", + SupportsFIPS: true, + SupportsDualStack: true, + ImplicitGlobalRegion: "us-gov-west-1", + }, + Regions: map[string]RegionOverrides{ + "aws-us-gov-global": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "us-gov-east-1": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "us-gov-west-1": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + }, }, } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.json b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.json index e19224f1..c789264d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.json +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.json @@ -17,6 +17,9 @@ "ap-east-1" : { "description" : "Asia Pacific (Hong Kong)" }, + "ap-east-2" : { + "description" : "Asia Pacific (Taipei)" + }, "ap-northeast-1" : { "description" : "Asia Pacific (Tokyo)" }, @@ -47,11 +50,14 @@ "ap-southeast-5" : { "description" : "Asia Pacific (Malaysia)" }, + "ap-southeast-6" : { + "description" : "Asia Pacific (New Zealand)" + }, "ap-southeast-7" : { "description" : "Asia Pacific (Thailand)" }, "aws-global" : { - "description" : "AWS Standard global region" + "description" : "aws global region" }, "ca-central-1" : { "description" : "Canada (Central)" @@ -124,7 +130,7 @@ "regionRegex" : "^cn\\-\\w+\\-\\d+$", "regions" : { "aws-cn-global" : { - "description" : "AWS China global region" + "description" : "aws-cn global region" }, "cn-north-1" : { "description" : "China (Beijing)" @@ -134,41 +140,35 @@ } } }, { - "id" : "aws-us-gov", + "id" : "aws-eusc", "outputs" : { - "dnsSuffix" : "amazonaws.com", - "dualStackDnsSuffix" : "api.aws", - "implicitGlobalRegion" : "us-gov-west-1", - "name" : "aws-us-gov", + "dnsSuffix" : "amazonaws.eu", + "dualStackDnsSuffix" : "api.amazonwebservices.eu", + "implicitGlobalRegion" : "eusc-de-east-1", + "name" : "aws-eusc", "supportsDualStack" : true, "supportsFIPS" : true }, - "regionRegex" : "^us\\-gov\\-\\w+\\-\\d+$", + "regionRegex" : "^eusc\\-(de)\\-\\w+\\-\\d+$", "regions" : { - "aws-us-gov-global" : { - "description" : "AWS GovCloud (US) global region" - }, - "us-gov-east-1" : { - "description" : "AWS GovCloud (US-East)" - }, - "us-gov-west-1" : { - "description" : "AWS GovCloud (US-West)" + "eusc-de-east-1" : { + "description" : "EU (Germany)" } } }, { "id" : "aws-iso", "outputs" : { "dnsSuffix" : "c2s.ic.gov", - "dualStackDnsSuffix" : "c2s.ic.gov", + "dualStackDnsSuffix" : "api.aws.ic.gov", "implicitGlobalRegion" : "us-iso-east-1", "name" : "aws-iso", - "supportsDualStack" : false, + "supportsDualStack" : true, "supportsFIPS" : true }, "regionRegex" : "^us\\-iso\\-\\w+\\-\\d+$", "regions" : { "aws-iso-global" : { - "description" : "AWS ISO (US) global region" + "description" : "aws-iso global region" }, "us-iso-east-1" : { "description" : "US ISO East" @@ -181,33 +181,39 @@ "id" : "aws-iso-b", "outputs" : { "dnsSuffix" : "sc2s.sgov.gov", - "dualStackDnsSuffix" : "sc2s.sgov.gov", + "dualStackDnsSuffix" : "api.aws.scloud", "implicitGlobalRegion" : "us-isob-east-1", "name" : "aws-iso-b", - "supportsDualStack" : false, + "supportsDualStack" : true, "supportsFIPS" : true }, "regionRegex" : "^us\\-isob\\-\\w+\\-\\d+$", "regions" : { "aws-iso-b-global" : { - "description" : "AWS ISOB (US) global region" + "description" : "aws-iso-b global region" }, "us-isob-east-1" : { "description" : "US ISOB East (Ohio)" + }, + "us-isob-west-1" : { + "description" : "US ISOB West" } } }, { "id" : "aws-iso-e", "outputs" : { "dnsSuffix" : "cloud.adc-e.uk", - "dualStackDnsSuffix" : "cloud.adc-e.uk", + "dualStackDnsSuffix" : "api.cloud-aws.adc-e.uk", "implicitGlobalRegion" : "eu-isoe-west-1", "name" : "aws-iso-e", - "supportsDualStack" : false, + "supportsDualStack" : true, "supportsFIPS" : true }, "regionRegex" : "^eu\\-isoe\\-\\w+\\-\\d+$", "regions" : { + "aws-iso-e-global" : { + "description" : "aws-iso-e global region" + }, "eu-isoe-west-1" : { "description" : "EU ISOE West" } @@ -216,16 +222,16 @@ "id" : "aws-iso-f", "outputs" : { "dnsSuffix" : "csp.hci.ic.gov", - "dualStackDnsSuffix" : "csp.hci.ic.gov", + "dualStackDnsSuffix" : "api.aws.hci.ic.gov", "implicitGlobalRegion" : "us-isof-south-1", "name" : "aws-iso-f", - "supportsDualStack" : false, + "supportsDualStack" : true, "supportsFIPS" : true }, "regionRegex" : "^us\\-isof\\-\\w+\\-\\d+$", "regions" : { "aws-iso-f-global" : { - "description" : "AWS ISOF global region" + "description" : "aws-iso-f global region" }, "us-isof-east-1" : { "description" : "US ISOF EAST" @@ -234,6 +240,28 @@ "description" : "US ISOF SOUTH" } } + }, { + "id" : "aws-us-gov", + "outputs" : { + "dnsSuffix" : "amazonaws.com", + "dualStackDnsSuffix" : "api.aws", + "implicitGlobalRegion" : "us-gov-west-1", + "name" : "aws-us-gov", + "supportsDualStack" : true, + "supportsFIPS" : true + }, + "regionRegex" : "^us\\-gov\\-\\w+\\-\\d+$", + "regions" : { + "aws-us-gov-global" : { + "description" : "aws-us-gov global region" + }, + "us-gov-east-1" : { + "description" : "AWS GovCloud (US-East)" + }, + "us-gov-west-1" : { + "description" : "AWS GovCloud (US-West)" + } + } } ], "version" : "1.1" } \ No newline at end of file diff --git a/vendor/github.com/aws/smithy-go/CHANGELOG.md b/vendor/github.com/aws/smithy-go/CHANGELOG.md index de39171c..80af245f 100644 --- a/vendor/github.com/aws/smithy-go/CHANGELOG.md +++ b/vendor/github.com/aws/smithy-go/CHANGELOG.md @@ -1,3 +1,72 @@ +# Release (2025-12-01) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/smithy-go`: v1.24.0 + * **Feature**: Improve allocation footprint of the middleware stack. This should convey a ~10% reduction in allocations per SDK request. + +# Release (2025-11-03) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/smithy-go`: v1.23.2 + * **Bug Fix**: Adjust the initial sizes of each middleware phase to avoid some unnecessary reallocation. + * **Bug Fix**: Avoid unnecessary allocation overhead from the metrics system when not in use. + +# Release (2025-10-15) + +## General Highlights +* **Dependency Update**: Bump minimum go version to 1.23. +* **Dependency Update**: Updated to the latest SDK module versions + +# Release (2025-09-18) + +## Module Highlights +* `github.com/aws/smithy-go/aws-http-auth`: [v1.1.0](aws-http-auth/CHANGELOG.md#v110-2025-09-18) + * **Feature**: Added support for SIG4/SIGV4A querystring authentication. + +# Release (2025-08-27) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/smithy-go`: v1.23.0 + * **Feature**: Sort map keys in JSON Document types. + +# Release (2025-07-24) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/smithy-go`: v1.22.5 + * **Feature**: Add HTTP interceptors. + +# Release (2025-06-16) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/smithy-go`: v1.22.4 + * **Bug Fix**: Fix CBOR serd empty check for string and enum fields + * **Bug Fix**: Fix HTTP metrics data race. + * **Bug Fix**: Replace usages of deprecated ioutil package. + +# Release (2025-02-17) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/smithy-go`: v1.22.3 + * **Dependency Update**: Bump minimum Go version to 1.22 per our language support policy. + # Release (2025-01-21) ## General Highlights diff --git a/vendor/github.com/aws/smithy-go/Makefile b/vendor/github.com/aws/smithy-go/Makefile index a3c2cf17..a12b124d 100644 --- a/vendor/github.com/aws/smithy-go/Makefile +++ b/vendor/github.com/aws/smithy-go/Makefile @@ -13,6 +13,7 @@ REPOTOOLS_CMD_GENERATE_CHANGELOG = ${REPOTOOLS_MODULE}/cmd/generatechangelog@${R REPOTOOLS_CMD_CHANGELOG = ${REPOTOOLS_MODULE}/cmd/changelog@${REPOTOOLS_VERSION} REPOTOOLS_CMD_TAG_RELEASE = ${REPOTOOLS_MODULE}/cmd/tagrelease@${REPOTOOLS_VERSION} REPOTOOLS_CMD_MODULE_VERSION = ${REPOTOOLS_MODULE}/cmd/moduleversion@${REPOTOOLS_VERSION} +REPOTOOLS_CMD_EACHMODULE = ${REPOTOOLS_MODULE}/cmd/eachmodule@${REPOTOOLS_VERSION} UNIT_TEST_TAGS= BUILD_TAGS= @@ -30,6 +31,24 @@ smithy-build: smithy-clean: cd codegen && ./gradlew clean +GRADLE_RETRIES := 3 +GRADLE_SLEEP := 2 + +# We're making a call to ./gradlew to trigger downloading Gradle and +# starting the daemon. Any call works, so using `./gradlew help` +ensure-gradle-up: + @cd codegen && for i in $(shell seq 1 $(GRADLE_RETRIES)); do \ + echo "Checking if Gradle daemon is up, attempt $$i..."; \ + if ./gradlew help; then \ + echo "Gradle daemon is up!"; \ + exit 0; \ + fi; \ + echo "Failed to start Gradle, retrying in $(GRADLE_SLEEP) seconds..."; \ + sleep $(GRADLE_SLEEP); \ + done; \ + echo "Failed to start Gradle after $(GRADLE_RETRIES) attempts."; \ + exit 1 + ################## # Linting/Verify # ################## @@ -37,8 +56,11 @@ smithy-clean: verify: vet -vet: - go vet ${BUILD_TAGS} --all ./... +vet: vet-modules-. + +vet-modules-%: + go run ${REPOTOOLS_CMD_EACHMODULE} -p $(subst vet-modules-,,$@) \ + "go vet ${BUILD_TAGS} --all ./..." cover: go test ${BUILD_TAGS} -coverprofile c.out ./... @@ -48,23 +70,22 @@ cover: ################ # Unit Testing # ################ -.PHONY: unit unit-race unit-test unit-race-test +.PHONY: test unit unit-race + +test: unit-race + +unit: verify unit-modules-. -unit: verify - go vet ${BUILD_TAGS} --all ./... && \ - go test ${BUILD_TAGS} ${RUN_NONE} ./... && \ - go test -timeout=1m ${UNIT_TEST_TAGS} ./... +unit-modules-%: + go run ${REPOTOOLS_CMD_EACHMODULE} -p $(subst unit-modules-,,$@) \ + "go test -timeout=1m ${UNIT_TEST_TAGS} ./..." -unit-race: verify - go vet ${BUILD_TAGS} --all ./... && \ - go test ${BUILD_TAGS} ${RUN_NONE} ./... && \ - go test -timeout=1m ${UNIT_TEST_TAGS} -race -cpu=4 ./... +unit-race: verify unit-race-modules-. -unit-test: verify - go test -timeout=1m ${UNIT_TEST_TAGS} ./... +unit-race-modules-%: + go run ${REPOTOOLS_CMD_EACHMODULE} -p $(subst unit-race-modules-,,$@) \ + "go test -timeout=1m ${UNIT_TEST_TAGS} -race -cpu=4 ./..." -unit-race-test: verify - go test -timeout=1m ${UNIT_TEST_TAGS} -race -cpu=4 ./... ##################### # Release Process # diff --git a/vendor/github.com/aws/smithy-go/README.md b/vendor/github.com/aws/smithy-go/README.md index 08df7458..ddce37b9 100644 --- a/vendor/github.com/aws/smithy-go/README.md +++ b/vendor/github.com/aws/smithy-go/README.md @@ -4,19 +4,21 @@ [Smithy](https://smithy.io/) code generators for Go and the accompanying smithy-go runtime. -The smithy-go runtime requires a minimum version of Go 1.20. +The smithy-go runtime requires a minimum version of Go 1.23. **WARNING: All interfaces are subject to change.** -## Can I use the code generators? +## :no_entry_sign: DO NOT use the code generators in this repository + +**The code generators in this repository do not generate working clients at +this time.** In order to generate a usable smithy client you must provide a [protocol definition](https://github.com/aws/smithy-go/blob/main/codegen/smithy-go-codegen/src/main/java/software/amazon/smithy/go/codegen/integration/ProtocolGenerator.java), such as [AWS restJson1](https://smithy.io/2.0/aws/protocols/aws-restjson1-protocol.html), in order to generate transport mechanisms and serialization/deserialization code ("serde") accordingly. -The code generator does not currently support any protocols out of the box other than the new `smithy.protocols#rpcv2Cbor`, -therefore the useability of this project on its own is currently limited. +The code generator does not currently support any protocols out of the box. Support for all [AWS protocols](https://smithy.io/2.0/aws/protocols/index.html) exists in [aws-sdk-go-v2](https://github.com/aws/aws-sdk-go-v2). We are tracking the movement of those out of the SDK into smithy-go in @@ -31,6 +33,7 @@ This repository implements the following Smithy build plugins: |----|------------|-------------| | `go-codegen` | `software.amazon.smithy.go:smithy-go-codegen` | Implements Go client code generation for Smithy models. | | `go-server-codegen` | `software.amazon.smithy.go:smithy-go-codegen` | Implements Go server code generation for Smithy models. | +| `go-shape-codegen` | `software.amazon.smithy.go:smithy-go-codegen` | Implements Go shape code generation (types only) for Smithy models. | **NOTE: Build plugins are not currently published to mavenCentral. You must publish to mavenLocal to make the build plugins visible to the Smithy CLI. The artifact version is currently fixed at 0.1.0.** @@ -77,7 +80,7 @@ example created from `smithy init`: "service": "example.weather#Weather", "module": "github.com/example/weather", "generateGoMod": true, - "goDirective": "1.20" + "goDirective": "1.23" } } } @@ -87,6 +90,10 @@ example created from `smithy init`: This plugin is a work-in-progress and is currently undocumented. +## `go-shape-codegen` + +This plugin is a work-in-progress and is currently undocumented. + ## License This project is licensed under the Apache-2.0 License. diff --git a/vendor/github.com/aws/smithy-go/endpoints/endpoint.go b/vendor/github.com/aws/smithy-go/endpoints/endpoint.go index a9352839..f778272b 100644 --- a/vendor/github.com/aws/smithy-go/endpoints/endpoint.go +++ b/vendor/github.com/aws/smithy-go/endpoints/endpoint.go @@ -9,7 +9,7 @@ import ( // Endpoint is the endpoint object returned by Endpoint resolution V2 type Endpoint struct { - // The complete URL minimally specfiying the scheme and host. + // The complete URL minimally specifying the scheme and host. // May optionally specify the port and base path component. URI url.URL diff --git a/vendor/github.com/aws/smithy-go/go_module_metadata.go b/vendor/github.com/aws/smithy-go/go_module_metadata.go index a51ceca4..b6c4c2f5 100644 --- a/vendor/github.com/aws/smithy-go/go_module_metadata.go +++ b/vendor/github.com/aws/smithy-go/go_module_metadata.go @@ -3,4 +3,4 @@ package smithy // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.22.2" +const goModuleVersion = "1.24.0" diff --git a/vendor/github.com/aws/smithy-go/metrics/nop.go b/vendor/github.com/aws/smithy-go/metrics/nop.go index fb374e1f..444126df 100644 --- a/vendor/github.com/aws/smithy-go/metrics/nop.go +++ b/vendor/github.com/aws/smithy-go/metrics/nop.go @@ -9,54 +9,82 @@ var _ MeterProvider = (*NopMeterProvider)(nil) // Meter returns a meter which creates no-op instruments. func (NopMeterProvider) Meter(string, ...MeterOption) Meter { - return nopMeter{} + return NopMeter{} } -type nopMeter struct{} +// NopMeter creates no-op instruments. +type NopMeter struct{} -var _ Meter = (*nopMeter)(nil) +var _ Meter = (*NopMeter)(nil) -func (nopMeter) Int64Counter(string, ...InstrumentOption) (Int64Counter, error) { - return nopInstrument[int64]{}, nil +// Int64Counter creates a no-op instrument. +func (NopMeter) Int64Counter(string, ...InstrumentOption) (Int64Counter, error) { + return nopInstrumentInt64, nil } -func (nopMeter) Int64UpDownCounter(string, ...InstrumentOption) (Int64UpDownCounter, error) { - return nopInstrument[int64]{}, nil + +// Int64UpDownCounter creates a no-op instrument. +func (NopMeter) Int64UpDownCounter(string, ...InstrumentOption) (Int64UpDownCounter, error) { + return nopInstrumentInt64, nil } -func (nopMeter) Int64Gauge(string, ...InstrumentOption) (Int64Gauge, error) { - return nopInstrument[int64]{}, nil + +// Int64Gauge creates a no-op instrument. +func (NopMeter) Int64Gauge(string, ...InstrumentOption) (Int64Gauge, error) { + return nopInstrumentInt64, nil } -func (nopMeter) Int64Histogram(string, ...InstrumentOption) (Int64Histogram, error) { - return nopInstrument[int64]{}, nil + +// Int64Histogram creates a no-op instrument. +func (NopMeter) Int64Histogram(string, ...InstrumentOption) (Int64Histogram, error) { + return nopInstrumentInt64, nil } -func (nopMeter) Int64AsyncCounter(string, Int64Callback, ...InstrumentOption) (AsyncInstrument, error) { - return nopInstrument[int64]{}, nil + +// Int64AsyncCounter creates a no-op instrument. +func (NopMeter) Int64AsyncCounter(string, Int64Callback, ...InstrumentOption) (AsyncInstrument, error) { + return nopInstrumentInt64, nil } -func (nopMeter) Int64AsyncUpDownCounter(string, Int64Callback, ...InstrumentOption) (AsyncInstrument, error) { - return nopInstrument[int64]{}, nil + +// Int64AsyncUpDownCounter creates a no-op instrument. +func (NopMeter) Int64AsyncUpDownCounter(string, Int64Callback, ...InstrumentOption) (AsyncInstrument, error) { + return nopInstrumentInt64, nil } -func (nopMeter) Int64AsyncGauge(string, Int64Callback, ...InstrumentOption) (AsyncInstrument, error) { - return nopInstrument[int64]{}, nil + +// Int64AsyncGauge creates a no-op instrument. +func (NopMeter) Int64AsyncGauge(string, Int64Callback, ...InstrumentOption) (AsyncInstrument, error) { + return nopInstrumentInt64, nil } -func (nopMeter) Float64Counter(string, ...InstrumentOption) (Float64Counter, error) { - return nopInstrument[float64]{}, nil + +// Float64Counter creates a no-op instrument. +func (NopMeter) Float64Counter(string, ...InstrumentOption) (Float64Counter, error) { + return nopInstrumentFloat64, nil } -func (nopMeter) Float64UpDownCounter(string, ...InstrumentOption) (Float64UpDownCounter, error) { - return nopInstrument[float64]{}, nil + +// Float64UpDownCounter creates a no-op instrument. +func (NopMeter) Float64UpDownCounter(string, ...InstrumentOption) (Float64UpDownCounter, error) { + return nopInstrumentFloat64, nil } -func (nopMeter) Float64Gauge(string, ...InstrumentOption) (Float64Gauge, error) { - return nopInstrument[float64]{}, nil + +// Float64Gauge creates a no-op instrument. +func (NopMeter) Float64Gauge(string, ...InstrumentOption) (Float64Gauge, error) { + return nopInstrumentFloat64, nil } -func (nopMeter) Float64Histogram(string, ...InstrumentOption) (Float64Histogram, error) { - return nopInstrument[float64]{}, nil + +// Float64Histogram creates a no-op instrument. +func (NopMeter) Float64Histogram(string, ...InstrumentOption) (Float64Histogram, error) { + return nopInstrumentFloat64, nil } -func (nopMeter) Float64AsyncCounter(string, Float64Callback, ...InstrumentOption) (AsyncInstrument, error) { - return nopInstrument[float64]{}, nil + +// Float64AsyncCounter creates a no-op instrument. +func (NopMeter) Float64AsyncCounter(string, Float64Callback, ...InstrumentOption) (AsyncInstrument, error) { + return nopInstrumentFloat64, nil } -func (nopMeter) Float64AsyncUpDownCounter(string, Float64Callback, ...InstrumentOption) (AsyncInstrument, error) { - return nopInstrument[float64]{}, nil + +// Float64AsyncUpDownCounter creates a no-op instrument. +func (NopMeter) Float64AsyncUpDownCounter(string, Float64Callback, ...InstrumentOption) (AsyncInstrument, error) { + return nopInstrumentFloat64, nil } -func (nopMeter) Float64AsyncGauge(string, Float64Callback, ...InstrumentOption) (AsyncInstrument, error) { - return nopInstrument[float64]{}, nil + +// Float64AsyncGauge creates a no-op instrument. +func (NopMeter) Float64AsyncGauge(string, Float64Callback, ...InstrumentOption) (AsyncInstrument, error) { + return nopInstrumentFloat64, nil } type nopInstrument[N any] struct{} @@ -65,3 +93,6 @@ func (nopInstrument[N]) Add(context.Context, N, ...RecordMetricOption) {} func (nopInstrument[N]) Sample(context.Context, N, ...RecordMetricOption) {} func (nopInstrument[N]) Record(context.Context, N, ...RecordMetricOption) {} func (nopInstrument[_]) Stop() {} + +var nopInstrumentInt64 = nopInstrument[int64]{} +var nopInstrumentFloat64 = nopInstrument[float64]{} diff --git a/vendor/github.com/aws/smithy-go/middleware/ordered_group.go b/vendor/github.com/aws/smithy-go/middleware/ordered_group.go index 4b195308..daf90136 100644 --- a/vendor/github.com/aws/smithy-go/middleware/ordered_group.go +++ b/vendor/github.com/aws/smithy-go/middleware/ordered_group.go @@ -23,12 +23,14 @@ type orderedIDs struct { items map[string]ider } -const baseOrderedItems = 5 +// selected based on the general upper bound of # of middlewares in each step +// in the downstream aws-sdk-go-v2 +const baseOrderedItems = 8 -func newOrderedIDs() *orderedIDs { +func newOrderedIDs(cap int) *orderedIDs { return &orderedIDs{ - order: newRelativeOrder(), - items: make(map[string]ider, baseOrderedItems), + order: newRelativeOrder(cap), + items: make(map[string]ider, cap), } } @@ -141,9 +143,9 @@ type relativeOrder struct { order []string } -func newRelativeOrder() *relativeOrder { +func newRelativeOrder(cap int) *relativeOrder { return &relativeOrder{ - order: make([]string, 0, baseOrderedItems), + order: make([]string, 0, cap), } } diff --git a/vendor/github.com/aws/smithy-go/middleware/step_build.go b/vendor/github.com/aws/smithy-go/middleware/step_build.go index 7e1d94ca..db8c2671 100644 --- a/vendor/github.com/aws/smithy-go/middleware/step_build.go +++ b/vendor/github.com/aws/smithy-go/middleware/step_build.go @@ -1,7 +1,9 @@ +// Code generated by smithy-go/middleware/generate.go DO NOT EDIT. package middleware import ( "context" + "fmt" ) // BuildInput provides the input parameters for the BuildMiddleware to consume. @@ -25,14 +27,14 @@ type BuildHandler interface { } // BuildMiddleware provides the interface for middleware specific to the -// serialize step. Delegates to the next BuildHandler for further +// build step. Delegates to the next BuildHandler for further // processing. type BuildMiddleware interface { - // Unique ID for the middleware in theBuildStep. The step does not allow - // duplicate IDs. + // ID returns a unique ID for the middleware in the BuildStep. The step does not + // allow duplicate IDs. ID() string - // Invokes the middleware behavior which must delegate to the next handler + // HandleBuild invokes the middleware behavior which must delegate to the next handler // for the middleware chain to continue. The method must return a result or // error to its caller. HandleBuild(ctx context.Context, in BuildInput, next BuildHandler) ( @@ -54,7 +56,9 @@ type buildMiddlewareFunc struct { id string // Middleware function to be called. - fn func(context.Context, BuildInput, BuildHandler) (BuildOutput, Metadata, error) + fn func(context.Context, BuildInput, BuildHandler) ( + BuildOutput, Metadata, error, + ) } // ID returns the unique ID for the middleware. @@ -69,23 +73,22 @@ func (s buildMiddlewareFunc) HandleBuild(ctx context.Context, in BuildInput, nex var _ BuildMiddleware = (buildMiddlewareFunc{}) -// BuildStep provides the ordered grouping of BuildMiddleware to be invoked on -// a handler. +// BuildStep provides the ordered grouping of BuildMiddleware to be +// invoked on a handler. type BuildStep struct { - ids *orderedIDs + head *decoratedBuildHandler + tail *decoratedBuildHandler } -// NewBuildStep returns a BuildStep ready to have middleware for -// initialization added to it. +// NewBuildStep returns an BuildStep ready to have middleware for +// build added to it. func NewBuildStep() *BuildStep { - return &BuildStep{ - ids: newOrderedIDs(), - } + return &BuildStep{} } var _ Middleware = (*BuildStep)(nil) -// ID returns the unique name of the step as a middleware. +// ID returns the unique ID of the step as a middleware. func (s *BuildStep) ID() string { return "Build stack step" } @@ -97,77 +100,161 @@ func (s *BuildStep) ID() string { func (s *BuildStep) HandleMiddleware(ctx context.Context, in interface{}, next Handler) ( out interface{}, metadata Metadata, err error, ) { - order := s.ids.GetOrder() - - var h BuildHandler = buildWrapHandler{Next: next} - for i := len(order) - 1; i >= 0; i-- { - h = decoratedBuildHandler{ - Next: h, - With: order[i].(BuildMiddleware), - } - } - sIn := BuildInput{ Request: in, } - res, metadata, err := h.HandleBuild(ctx, sIn) + wh := &buildWrapHandler{next} + if s.head == nil { + res, metadata, err := wh.HandleBuild(ctx, sIn) + return res.Result, metadata, err + } + + s.tail.Next = wh + res, metadata, err := s.head.HandleBuild(ctx, sIn) return res.Result, metadata, err } // Get retrieves the middleware identified by id. If the middleware is not present, returns false. func (s *BuildStep) Get(id string) (BuildMiddleware, bool) { - get, ok := s.ids.Get(id) - if !ok { + found, _ := s.get(id) + if found == nil { return nil, false } - return get.(BuildMiddleware), ok + + return found.With, true } // Add injects the middleware to the relative position of the middleware group. -// Returns an error if the middleware already exists. +// +// Add never returns an error. It used to for duplicate phases but this +// behavior has since been removed as part of a performance optimization. The +// return value from Add can be ignored. func (s *BuildStep) Add(m BuildMiddleware, pos RelativePosition) error { - return s.ids.Add(m, pos) + if s.head == nil { + s.head = &decoratedBuildHandler{nil, m} + s.tail = s.head + return nil + } + + if pos == Before { + s.head = &decoratedBuildHandler{s.head, m} + } else { + tail := &decoratedBuildHandler{nil, m} + s.tail.Next = tail + s.tail = tail + } + + return nil } -// Insert injects the middleware relative to an existing middleware id. -// Returns an error if the original middleware does not exist, or the middleware +// Insert injects the middleware relative to an existing middleware ID. +// Returns error if the original middleware does not exist, or the middleware // being added already exists. func (s *BuildStep) Insert(m BuildMiddleware, relativeTo string, pos RelativePosition) error { - return s.ids.Insert(m, relativeTo, pos) + found, prev := s.get(relativeTo) + if found == nil { + return fmt.Errorf("not found: %s", m.ID()) + } + + if pos == Before { + if prev == nil { // at the front + s.head = &decoratedBuildHandler{s.head, m} + } else { // somewhere in the middle + prev.Next = &decoratedBuildHandler{found, m} + } + } else { + if found.Next == nil { // at the end + tail := &decoratedBuildHandler{nil, m} + s.tail.Next = tail + s.tail = tail + } else { // somewhere in the middle + found.Next = &decoratedBuildHandler{found.Next, m} + } + } + + return nil } // Swap removes the middleware by id, replacing it with the new middleware. -// Returns the middleware removed, or an error if the middleware to be removed +// Returns the middleware removed, or error if the middleware to be removed // doesn't exist. func (s *BuildStep) Swap(id string, m BuildMiddleware) (BuildMiddleware, error) { - removed, err := s.ids.Swap(id, m) - if err != nil { - return nil, err + found, _ := s.get(id) + if found == nil { + return nil, fmt.Errorf("not found: %s", m.ID()) } - return removed.(BuildMiddleware), nil + swapped := found.With + found.With = m + return swapped, nil } // Remove removes the middleware by id. Returns error if the middleware // doesn't exist. func (s *BuildStep) Remove(id string) (BuildMiddleware, error) { - removed, err := s.ids.Remove(id) - if err != nil { - return nil, err + found, prev := s.get(id) + if found == nil { + return nil, fmt.Errorf("not found: %s", id) } - return removed.(BuildMiddleware), nil + if s.head == s.tail { // it's the only one + s.head = nil + s.tail = nil + } else if found == s.head { // at the front + s.head = s.head.Next.(*decoratedBuildHandler) + } else if found == s.tail { // at the end + prev.Next = nil + s.tail = prev + } else { + prev.Next = found.Next // somewhere in the middle + } + + return found.With, nil } // List returns a list of the middleware in the step. func (s *BuildStep) List() []string { - return s.ids.List() + var ids []string + for h := s.head; h != nil; { + ids = append(ids, h.With.ID()) + if h.Next == nil { + break + } + + // once executed, tail.Next of the list will be set to an + // *buildWrapHandler, make sure to check for that + if hnext, ok := h.Next.(*decoratedBuildHandler); ok { + h = hnext + } else { + break + } + } + return ids } // Clear removes all middleware in the step. func (s *BuildStep) Clear() { - s.ids.Clear() + s.head = nil + s.tail = nil +} + +func (s *BuildStep) get(id string) (found, prev *decoratedBuildHandler) { + for h := s.head; h != nil; { + if h.With.ID() == id { + found = h + return + } + prev = h + if h.Next == nil { + return + } + + // once executed, tail.Next of the list will be set to an + // *buildWrapHandler + h, _ = h.Next.(*decoratedBuildHandler) + } + return } type buildWrapHandler struct { @@ -176,7 +263,7 @@ type buildWrapHandler struct { var _ BuildHandler = (*buildWrapHandler)(nil) -// Implements BuildHandler, converts types and delegates to underlying +// HandleBuild implements BuildHandler, converts types and delegates to underlying // generic handler. func (w buildWrapHandler) HandleBuild(ctx context.Context, in BuildInput) ( out BuildOutput, metadata Metadata, err error, @@ -200,12 +287,12 @@ func (h decoratedBuildHandler) HandleBuild(ctx context.Context, in BuildInput) ( return h.With.HandleBuild(ctx, in, h.Next) } -// BuildHandlerFunc provides a wrapper around a function to be used as a build middleware handler. +// BuildHandlerFunc provides a wrapper around a function to be used as buildMiddleware. type BuildHandlerFunc func(context.Context, BuildInput) (BuildOutput, Metadata, error) -// HandleBuild invokes the wrapped function with the provided arguments. -func (b BuildHandlerFunc) HandleBuild(ctx context.Context, in BuildInput) (BuildOutput, Metadata, error) { - return b(ctx, in) +// HandleBuild calls the wrapped function with the provided arguments. +func (f BuildHandlerFunc) HandleBuild(ctx context.Context, in BuildInput) (BuildOutput, Metadata, error) { + return f(ctx, in) } var _ BuildHandler = BuildHandlerFunc(nil) diff --git a/vendor/github.com/aws/smithy-go/middleware/step_deserialize.go b/vendor/github.com/aws/smithy-go/middleware/step_deserialize.go index 44860721..1f337f2d 100644 --- a/vendor/github.com/aws/smithy-go/middleware/step_deserialize.go +++ b/vendor/github.com/aws/smithy-go/middleware/step_deserialize.go @@ -1,7 +1,9 @@ +// Code generated by smithy-go/middleware/generate.go DO NOT EDIT. package middleware import ( "context" + "fmt" ) // DeserializeInput provides the input parameters for the DeserializeInput to @@ -11,10 +13,7 @@ type DeserializeInput struct { Request interface{} } -// DeserializeOutput provides the result returned by the next -// DeserializeHandler. The DeserializeMiddleware should deserialize the -// RawResponse into a Result that can be consumed by middleware higher up in -// the stack. +// DeserializeOutput provides the result returned by the next DeserializeHandler. type DeserializeOutput struct { RawResponse interface{} Result interface{} @@ -29,7 +28,7 @@ type DeserializeHandler interface { } // DeserializeMiddleware provides the interface for middleware specific to the -// serialize step. Delegates to the next DeserializeHandler for further +// deserialize step. Delegates to the next DeserializeHandler for further // processing. type DeserializeMiddleware interface { // ID returns a unique ID for the middleware in the DeserializeStep. The step does not @@ -44,8 +43,8 @@ type DeserializeMiddleware interface { ) } -// DeserializeMiddlewareFunc returns a DeserializeMiddleware with the unique ID -// provided, and the func to be invoked. +// DeserializeMiddlewareFunc returns a DeserializeMiddleware with the unique ID provided, +// and the func to be invoked. func DeserializeMiddlewareFunc(id string, fn func(context.Context, DeserializeInput, DeserializeHandler) (DeserializeOutput, Metadata, error)) DeserializeMiddleware { return deserializeMiddlewareFunc{ id: id, @@ -78,15 +77,14 @@ var _ DeserializeMiddleware = (deserializeMiddlewareFunc{}) // DeserializeStep provides the ordered grouping of DeserializeMiddleware to be // invoked on a handler. type DeserializeStep struct { - ids *orderedIDs + head *decoratedDeserializeHandler + tail *decoratedDeserializeHandler } -// NewDeserializeStep returns a DeserializeStep ready to have middleware for -// initialization added to it. +// NewDeserializeStep returns an DeserializeStep ready to have middleware for +// deserialize added to it. func NewDeserializeStep() *DeserializeStep { - return &DeserializeStep{ - ids: newOrderedIDs(), - } + return &DeserializeStep{} } var _ Middleware = (*DeserializeStep)(nil) @@ -103,77 +101,161 @@ func (s *DeserializeStep) ID() string { func (s *DeserializeStep) HandleMiddleware(ctx context.Context, in interface{}, next Handler) ( out interface{}, metadata Metadata, err error, ) { - order := s.ids.GetOrder() - - var h DeserializeHandler = deserializeWrapHandler{Next: next} - for i := len(order) - 1; i >= 0; i-- { - h = decoratedDeserializeHandler{ - Next: h, - With: order[i].(DeserializeMiddleware), - } - } - sIn := DeserializeInput{ Request: in, } - res, metadata, err := h.HandleDeserialize(ctx, sIn) + wh := &deserializeWrapHandler{next} + if s.head == nil { + res, metadata, err := wh.HandleDeserialize(ctx, sIn) + return res.Result, metadata, err + } + + s.tail.Next = wh + res, metadata, err := s.head.HandleDeserialize(ctx, sIn) return res.Result, metadata, err } // Get retrieves the middleware identified by id. If the middleware is not present, returns false. func (s *DeserializeStep) Get(id string) (DeserializeMiddleware, bool) { - get, ok := s.ids.Get(id) - if !ok { + found, _ := s.get(id) + if found == nil { return nil, false } - return get.(DeserializeMiddleware), ok + + return found.With, true } // Add injects the middleware to the relative position of the middleware group. -// Returns an error if the middleware already exists. +// +// Add never returns an error. It used to for duplicate phases but this +// behavior has since been removed as part of a performance optimization. The +// return value from Add can be ignored. func (s *DeserializeStep) Add(m DeserializeMiddleware, pos RelativePosition) error { - return s.ids.Add(m, pos) + if s.head == nil { + s.head = &decoratedDeserializeHandler{nil, m} + s.tail = s.head + return nil + } + + if pos == Before { + s.head = &decoratedDeserializeHandler{s.head, m} + } else { + tail := &decoratedDeserializeHandler{nil, m} + s.tail.Next = tail + s.tail = tail + } + + return nil } // Insert injects the middleware relative to an existing middleware ID. // Returns error if the original middleware does not exist, or the middleware // being added already exists. func (s *DeserializeStep) Insert(m DeserializeMiddleware, relativeTo string, pos RelativePosition) error { - return s.ids.Insert(m, relativeTo, pos) + found, prev := s.get(relativeTo) + if found == nil { + return fmt.Errorf("not found: %s", m.ID()) + } + + if pos == Before { + if prev == nil { // at the front + s.head = &decoratedDeserializeHandler{s.head, m} + } else { // somewhere in the middle + prev.Next = &decoratedDeserializeHandler{found, m} + } + } else { + if found.Next == nil { // at the end + tail := &decoratedDeserializeHandler{nil, m} + s.tail.Next = tail + s.tail = tail + } else { // somewhere in the middle + found.Next = &decoratedDeserializeHandler{found.Next, m} + } + } + + return nil } // Swap removes the middleware by id, replacing it with the new middleware. // Returns the middleware removed, or error if the middleware to be removed // doesn't exist. func (s *DeserializeStep) Swap(id string, m DeserializeMiddleware) (DeserializeMiddleware, error) { - removed, err := s.ids.Swap(id, m) - if err != nil { - return nil, err + found, _ := s.get(id) + if found == nil { + return nil, fmt.Errorf("not found: %s", m.ID()) } - return removed.(DeserializeMiddleware), nil + swapped := found.With + found.With = m + return swapped, nil } // Remove removes the middleware by id. Returns error if the middleware // doesn't exist. func (s *DeserializeStep) Remove(id string) (DeserializeMiddleware, error) { - removed, err := s.ids.Remove(id) - if err != nil { - return nil, err + found, prev := s.get(id) + if found == nil { + return nil, fmt.Errorf("not found: %s", id) + } + + if s.head == s.tail { // it's the only one + s.head = nil + s.tail = nil + } else if found == s.head { // at the front + s.head = s.head.Next.(*decoratedDeserializeHandler) + } else if found == s.tail { // at the end + prev.Next = nil + s.tail = prev + } else { + prev.Next = found.Next // somewhere in the middle } - return removed.(DeserializeMiddleware), nil + return found.With, nil } // List returns a list of the middleware in the step. func (s *DeserializeStep) List() []string { - return s.ids.List() + var ids []string + for h := s.head; h != nil; { + ids = append(ids, h.With.ID()) + if h.Next == nil { + break + } + + // once executed, tail.Next of the list will be set to an + // *deserializeWrapHandler, make sure to check for that + if hnext, ok := h.Next.(*decoratedDeserializeHandler); ok { + h = hnext + } else { + break + } + } + return ids } // Clear removes all middleware in the step. func (s *DeserializeStep) Clear() { - s.ids.Clear() + s.head = nil + s.tail = nil +} + +func (s *DeserializeStep) get(id string) (found, prev *decoratedDeserializeHandler) { + for h := s.head; h != nil; { + if h.With.ID() == id { + found = h + return + } + prev = h + if h.Next == nil { + return + } + + // once executed, tail.Next of the list will be set to an + // *deserializeWrapHandler + h, _ = h.Next.(*decoratedDeserializeHandler) + } + return } type deserializeWrapHandler struct { @@ -187,9 +269,10 @@ var _ DeserializeHandler = (*deserializeWrapHandler)(nil) func (w deserializeWrapHandler) HandleDeserialize(ctx context.Context, in DeserializeInput) ( out DeserializeOutput, metadata Metadata, err error, ) { - resp, metadata, err := w.Next.Handle(ctx, in.Request) + res, metadata, err := w.Next.Handle(ctx, in.Request) return DeserializeOutput{ - RawResponse: resp, + RawResponse: res, + Result: nil, }, metadata, err } @@ -206,12 +289,12 @@ func (h decoratedDeserializeHandler) HandleDeserialize(ctx context.Context, in D return h.With.HandleDeserialize(ctx, in, h.Next) } -// DeserializeHandlerFunc provides a wrapper around a function to be used as a deserialize middleware handler. +// DeserializeHandlerFunc provides a wrapper around a function to be used as deserializeMiddleware. type DeserializeHandlerFunc func(context.Context, DeserializeInput) (DeserializeOutput, Metadata, error) -// HandleDeserialize invokes the wrapped function with the given arguments. -func (d DeserializeHandlerFunc) HandleDeserialize(ctx context.Context, in DeserializeInput) (DeserializeOutput, Metadata, error) { - return d(ctx, in) +// HandleDeserialize calls the wrapped function with the provided arguments. +func (f DeserializeHandlerFunc) HandleDeserialize(ctx context.Context, in DeserializeInput) (DeserializeOutput, Metadata, error) { + return f(ctx, in) } var _ DeserializeHandler = DeserializeHandlerFunc(nil) diff --git a/vendor/github.com/aws/smithy-go/middleware/step_finalize.go b/vendor/github.com/aws/smithy-go/middleware/step_finalize.go index 065e3885..1a0ad9fb 100644 --- a/vendor/github.com/aws/smithy-go/middleware/step_finalize.go +++ b/vendor/github.com/aws/smithy-go/middleware/step_finalize.go @@ -1,6 +1,10 @@ +// Code generated by smithy-go/middleware/generate.go DO NOT EDIT. package middleware -import "context" +import ( + "context" + "fmt" +) // FinalizeInput provides the input parameters for the FinalizeMiddleware to // consume. FinalizeMiddleware may modify the Request value before forwarding @@ -23,7 +27,7 @@ type FinalizeHandler interface { } // FinalizeMiddleware provides the interface for middleware specific to the -// serialize step. Delegates to the next FinalizeHandler for further +// finalize step. Delegates to the next FinalizeHandler for further // processing. type FinalizeMiddleware interface { // ID returns a unique ID for the middleware in the FinalizeStep. The step does not @@ -38,8 +42,8 @@ type FinalizeMiddleware interface { ) } -// FinalizeMiddlewareFunc returns a FinalizeMiddleware with the unique ID -// provided, and the func to be invoked. +// FinalizeMiddlewareFunc returns a FinalizeMiddleware with the unique ID provided, +// and the func to be invoked. func FinalizeMiddlewareFunc(id string, fn func(context.Context, FinalizeInput, FinalizeHandler) (FinalizeOutput, Metadata, error)) FinalizeMiddleware { return finalizeMiddlewareFunc{ id: id, @@ -72,20 +76,19 @@ var _ FinalizeMiddleware = (finalizeMiddlewareFunc{}) // FinalizeStep provides the ordered grouping of FinalizeMiddleware to be // invoked on a handler. type FinalizeStep struct { - ids *orderedIDs + head *decoratedFinalizeHandler + tail *decoratedFinalizeHandler } -// NewFinalizeStep returns a FinalizeStep ready to have middleware for -// initialization added to it. +// NewFinalizeStep returns an FinalizeStep ready to have middleware for +// finalize added to it. func NewFinalizeStep() *FinalizeStep { - return &FinalizeStep{ - ids: newOrderedIDs(), - } + return &FinalizeStep{} } var _ Middleware = (*FinalizeStep)(nil) -// ID returns the unique id of the step as a middleware. +// ID returns the unique ID of the step as a middleware. func (s *FinalizeStep) ID() string { return "Finalize stack step" } @@ -97,77 +100,161 @@ func (s *FinalizeStep) ID() string { func (s *FinalizeStep) HandleMiddleware(ctx context.Context, in interface{}, next Handler) ( out interface{}, metadata Metadata, err error, ) { - order := s.ids.GetOrder() - - var h FinalizeHandler = finalizeWrapHandler{Next: next} - for i := len(order) - 1; i >= 0; i-- { - h = decoratedFinalizeHandler{ - Next: h, - With: order[i].(FinalizeMiddleware), - } - } - sIn := FinalizeInput{ Request: in, } - res, metadata, err := h.HandleFinalize(ctx, sIn) + wh := &finalizeWrapHandler{next} + if s.head == nil { + res, metadata, err := wh.HandleFinalize(ctx, sIn) + return res.Result, metadata, err + } + + s.tail.Next = wh + res, metadata, err := s.head.HandleFinalize(ctx, sIn) return res.Result, metadata, err } // Get retrieves the middleware identified by id. If the middleware is not present, returns false. func (s *FinalizeStep) Get(id string) (FinalizeMiddleware, bool) { - get, ok := s.ids.Get(id) - if !ok { + found, _ := s.get(id) + if found == nil { return nil, false } - return get.(FinalizeMiddleware), ok + + return found.With, true } // Add injects the middleware to the relative position of the middleware group. -// Returns an error if the middleware already exists. +// +// Add never returns an error. It used to for duplicate phases but this +// behavior has since been removed as part of a performance optimization. The +// return value from Add can be ignored. func (s *FinalizeStep) Add(m FinalizeMiddleware, pos RelativePosition) error { - return s.ids.Add(m, pos) + if s.head == nil { + s.head = &decoratedFinalizeHandler{nil, m} + s.tail = s.head + return nil + } + + if pos == Before { + s.head = &decoratedFinalizeHandler{s.head, m} + } else { + tail := &decoratedFinalizeHandler{nil, m} + s.tail.Next = tail + s.tail = tail + } + + return nil } // Insert injects the middleware relative to an existing middleware ID. // Returns error if the original middleware does not exist, or the middleware // being added already exists. func (s *FinalizeStep) Insert(m FinalizeMiddleware, relativeTo string, pos RelativePosition) error { - return s.ids.Insert(m, relativeTo, pos) + found, prev := s.get(relativeTo) + if found == nil { + return fmt.Errorf("not found: %s", m.ID()) + } + + if pos == Before { + if prev == nil { // at the front + s.head = &decoratedFinalizeHandler{s.head, m} + } else { // somewhere in the middle + prev.Next = &decoratedFinalizeHandler{found, m} + } + } else { + if found.Next == nil { // at the end + tail := &decoratedFinalizeHandler{nil, m} + s.tail.Next = tail + s.tail = tail + } else { // somewhere in the middle + found.Next = &decoratedFinalizeHandler{found.Next, m} + } + } + + return nil } // Swap removes the middleware by id, replacing it with the new middleware. // Returns the middleware removed, or error if the middleware to be removed // doesn't exist. func (s *FinalizeStep) Swap(id string, m FinalizeMiddleware) (FinalizeMiddleware, error) { - removed, err := s.ids.Swap(id, m) - if err != nil { - return nil, err + found, _ := s.get(id) + if found == nil { + return nil, fmt.Errorf("not found: %s", m.ID()) } - return removed.(FinalizeMiddleware), nil + swapped := found.With + found.With = m + return swapped, nil } // Remove removes the middleware by id. Returns error if the middleware // doesn't exist. func (s *FinalizeStep) Remove(id string) (FinalizeMiddleware, error) { - removed, err := s.ids.Remove(id) - if err != nil { - return nil, err + found, prev := s.get(id) + if found == nil { + return nil, fmt.Errorf("not found: %s", id) + } + + if s.head == s.tail { // it's the only one + s.head = nil + s.tail = nil + } else if found == s.head { // at the front + s.head = s.head.Next.(*decoratedFinalizeHandler) + } else if found == s.tail { // at the end + prev.Next = nil + s.tail = prev + } else { + prev.Next = found.Next // somewhere in the middle } - return removed.(FinalizeMiddleware), nil + return found.With, nil } // List returns a list of the middleware in the step. func (s *FinalizeStep) List() []string { - return s.ids.List() + var ids []string + for h := s.head; h != nil; { + ids = append(ids, h.With.ID()) + if h.Next == nil { + break + } + + // once executed, tail.Next of the list will be set to an + // *finalizeWrapHandler, make sure to check for that + if hnext, ok := h.Next.(*decoratedFinalizeHandler); ok { + h = hnext + } else { + break + } + } + return ids } // Clear removes all middleware in the step. func (s *FinalizeStep) Clear() { - s.ids.Clear() + s.head = nil + s.tail = nil +} + +func (s *FinalizeStep) get(id string) (found, prev *decoratedFinalizeHandler) { + for h := s.head; h != nil; { + if h.With.ID() == id { + found = h + return + } + prev = h + if h.Next == nil { + return + } + + // once executed, tail.Next of the list will be set to an + // *finalizeWrapHandler + h, _ = h.Next.(*decoratedFinalizeHandler) + } + return } type finalizeWrapHandler struct { @@ -200,10 +287,10 @@ func (h decoratedFinalizeHandler) HandleFinalize(ctx context.Context, in Finaliz return h.With.HandleFinalize(ctx, in, h.Next) } -// FinalizeHandlerFunc provides a wrapper around a function to be used as a finalize middleware handler. +// FinalizeHandlerFunc provides a wrapper around a function to be used as finalizeMiddleware. type FinalizeHandlerFunc func(context.Context, FinalizeInput) (FinalizeOutput, Metadata, error) -// HandleFinalize invokes the wrapped function with the given arguments. +// HandleFinalize calls the wrapped function with the provided arguments. func (f FinalizeHandlerFunc) HandleFinalize(ctx context.Context, in FinalizeInput) (FinalizeOutput, Metadata, error) { return f(ctx, in) } diff --git a/vendor/github.com/aws/smithy-go/middleware/step_initialize.go b/vendor/github.com/aws/smithy-go/middleware/step_initialize.go index fe359144..446f3b7b 100644 --- a/vendor/github.com/aws/smithy-go/middleware/step_initialize.go +++ b/vendor/github.com/aws/smithy-go/middleware/step_initialize.go @@ -1,10 +1,15 @@ +// Code generated by smithy-go/middleware/generate.go DO NOT EDIT. package middleware -import "context" +import ( + "context" + "fmt" +) // InitializeInput wraps the input parameters for the InitializeMiddlewares to // consume. InitializeMiddleware may modify the parameter value before // forwarding it along to the next InitializeHandler. + type InitializeInput struct { Parameters interface{} } @@ -72,15 +77,14 @@ var _ InitializeMiddleware = (initializeMiddlewareFunc{}) // InitializeStep provides the ordered grouping of InitializeMiddleware to be // invoked on a handler. type InitializeStep struct { - ids *orderedIDs + head *decoratedInitializeHandler + tail *decoratedInitializeHandler } // NewInitializeStep returns an InitializeStep ready to have middleware for -// initialization added to it. +// initialize added to it. func NewInitializeStep() *InitializeStep { - return &InitializeStep{ - ids: newOrderedIDs(), - } + return &InitializeStep{} } var _ Middleware = (*InitializeStep)(nil) @@ -97,77 +101,161 @@ func (s *InitializeStep) ID() string { func (s *InitializeStep) HandleMiddleware(ctx context.Context, in interface{}, next Handler) ( out interface{}, metadata Metadata, err error, ) { - order := s.ids.GetOrder() - - var h InitializeHandler = initializeWrapHandler{Next: next} - for i := len(order) - 1; i >= 0; i-- { - h = decoratedInitializeHandler{ - Next: h, - With: order[i].(InitializeMiddleware), - } - } - sIn := InitializeInput{ Parameters: in, } - res, metadata, err := h.HandleInitialize(ctx, sIn) + wh := &initializeWrapHandler{next} + if s.head == nil { + res, metadata, err := wh.HandleInitialize(ctx, sIn) + return res.Result, metadata, err + } + + s.tail.Next = wh + res, metadata, err := s.head.HandleInitialize(ctx, sIn) return res.Result, metadata, err } // Get retrieves the middleware identified by id. If the middleware is not present, returns false. func (s *InitializeStep) Get(id string) (InitializeMiddleware, bool) { - get, ok := s.ids.Get(id) - if !ok { + found, _ := s.get(id) + if found == nil { return nil, false } - return get.(InitializeMiddleware), ok + + return found.With, true } // Add injects the middleware to the relative position of the middleware group. -// Returns an error if the middleware already exists. +// +// Add never returns an error. It used to for duplicate phases but this +// behavior has since been removed as part of a performance optimization. The +// return value from Add can be ignored. func (s *InitializeStep) Add(m InitializeMiddleware, pos RelativePosition) error { - return s.ids.Add(m, pos) + if s.head == nil { + s.head = &decoratedInitializeHandler{nil, m} + s.tail = s.head + return nil + } + + if pos == Before { + s.head = &decoratedInitializeHandler{s.head, m} + } else { + tail := &decoratedInitializeHandler{nil, m} + s.tail.Next = tail + s.tail = tail + } + + return nil } // Insert injects the middleware relative to an existing middleware ID. // Returns error if the original middleware does not exist, or the middleware // being added already exists. func (s *InitializeStep) Insert(m InitializeMiddleware, relativeTo string, pos RelativePosition) error { - return s.ids.Insert(m, relativeTo, pos) + found, prev := s.get(relativeTo) + if found == nil { + return fmt.Errorf("not found: %s", m.ID()) + } + + if pos == Before { + if prev == nil { // at the front + s.head = &decoratedInitializeHandler{s.head, m} + } else { // somewhere in the middle + prev.Next = &decoratedInitializeHandler{found, m} + } + } else { + if found.Next == nil { // at the end + tail := &decoratedInitializeHandler{nil, m} + s.tail.Next = tail + s.tail = tail + } else { // somewhere in the middle + found.Next = &decoratedInitializeHandler{found.Next, m} + } + } + + return nil } // Swap removes the middleware by id, replacing it with the new middleware. // Returns the middleware removed, or error if the middleware to be removed // doesn't exist. func (s *InitializeStep) Swap(id string, m InitializeMiddleware) (InitializeMiddleware, error) { - removed, err := s.ids.Swap(id, m) - if err != nil { - return nil, err + found, _ := s.get(id) + if found == nil { + return nil, fmt.Errorf("not found: %s", m.ID()) } - return removed.(InitializeMiddleware), nil + swapped := found.With + found.With = m + return swapped, nil } // Remove removes the middleware by id. Returns error if the middleware // doesn't exist. func (s *InitializeStep) Remove(id string) (InitializeMiddleware, error) { - removed, err := s.ids.Remove(id) - if err != nil { - return nil, err + found, prev := s.get(id) + if found == nil { + return nil, fmt.Errorf("not found: %s", id) } - return removed.(InitializeMiddleware), nil + if s.head == s.tail { // it's the only one + s.head = nil + s.tail = nil + } else if found == s.head { // at the front + s.head = s.head.Next.(*decoratedInitializeHandler) + } else if found == s.tail { // at the end + prev.Next = nil + s.tail = prev + } else { + prev.Next = found.Next // somewhere in the middle + } + + return found.With, nil } // List returns a list of the middleware in the step. func (s *InitializeStep) List() []string { - return s.ids.List() + var ids []string + for h := s.head; h != nil; { + ids = append(ids, h.With.ID()) + if h.Next == nil { + break + } + + // once executed, tail.Next of the list will be set to an + // *initializeWrapHandler, make sure to check for that + if hnext, ok := h.Next.(*decoratedInitializeHandler); ok { + h = hnext + } else { + break + } + } + return ids } // Clear removes all middleware in the step. func (s *InitializeStep) Clear() { - s.ids.Clear() + s.head = nil + s.tail = nil +} + +func (s *InitializeStep) get(id string) (found, prev *decoratedInitializeHandler) { + for h := s.head; h != nil; { + if h.With.ID() == id { + found = h + return + } + prev = h + if h.Next == nil { + return + } + + // once executed, tail.Next of the list will be set to an + // *initializeWrapHandler + h, _ = h.Next.(*decoratedInitializeHandler) + } + return } type initializeWrapHandler struct { @@ -200,12 +288,12 @@ func (h decoratedInitializeHandler) HandleInitialize(ctx context.Context, in Ini return h.With.HandleInitialize(ctx, in, h.Next) } -// InitializeHandlerFunc provides a wrapper around a function to be used as an initialize middleware handler. +// InitializeHandlerFunc provides a wrapper around a function to be used as initializeMiddleware. type InitializeHandlerFunc func(context.Context, InitializeInput) (InitializeOutput, Metadata, error) // HandleInitialize calls the wrapped function with the provided arguments. -func (i InitializeHandlerFunc) HandleInitialize(ctx context.Context, in InitializeInput) (InitializeOutput, Metadata, error) { - return i(ctx, in) +func (f InitializeHandlerFunc) HandleInitialize(ctx context.Context, in InitializeInput) (InitializeOutput, Metadata, error) { + return f(ctx, in) } var _ InitializeHandler = InitializeHandlerFunc(nil) diff --git a/vendor/github.com/aws/smithy-go/middleware/step_serialize.go b/vendor/github.com/aws/smithy-go/middleware/step_serialize.go index 114bafce..942ebb4f 100644 --- a/vendor/github.com/aws/smithy-go/middleware/step_serialize.go +++ b/vendor/github.com/aws/smithy-go/middleware/step_serialize.go @@ -1,6 +1,10 @@ +// Code generated by smithy-go/middleware/generate.go DO NOT EDIT. package middleware -import "context" +import ( + "context" + "fmt" +) // SerializeInput provides the input parameters for the SerializeMiddleware to // consume. SerializeMiddleware may modify the Request value before forwarding @@ -41,8 +45,8 @@ type SerializeMiddleware interface { ) } -// SerializeMiddlewareFunc returns a SerializeMiddleware with the unique ID -// provided, and the func to be invoked. +// SerializeMiddlewareFunc returns a SerializeMiddleware with the unique ID provided, +// and the func to be invoked. func SerializeMiddlewareFunc(id string, fn func(context.Context, SerializeInput, SerializeHandler) (SerializeOutput, Metadata, error)) SerializeMiddleware { return serializeMiddlewareFunc{ id: id, @@ -75,17 +79,15 @@ var _ SerializeMiddleware = (serializeMiddlewareFunc{}) // SerializeStep provides the ordered grouping of SerializeMiddleware to be // invoked on a handler. type SerializeStep struct { + head *decoratedSerializeHandler + tail *decoratedSerializeHandler newRequest func() interface{} - ids *orderedIDs } -// NewSerializeStep returns a SerializeStep ready to have middleware for -// initialization added to it. The newRequest func parameter is used to -// initialize the transport specific request for the stack SerializeStep to -// serialize the input parameters into. +// NewSerializeStep returns an SerializeStep ready to have middleware for +// serialize added to it. func NewSerializeStep(newRequest func() interface{}) *SerializeStep { return &SerializeStep{ - ids: newOrderedIDs(), newRequest: newRequest, } } @@ -104,78 +106,162 @@ func (s *SerializeStep) ID() string { func (s *SerializeStep) HandleMiddleware(ctx context.Context, in interface{}, next Handler) ( out interface{}, metadata Metadata, err error, ) { - order := s.ids.GetOrder() - - var h SerializeHandler = serializeWrapHandler{Next: next} - for i := len(order) - 1; i >= 0; i-- { - h = decoratedSerializeHandler{ - Next: h, - With: order[i].(SerializeMiddleware), - } - } - sIn := SerializeInput{ Parameters: in, Request: s.newRequest(), } - res, metadata, err := h.HandleSerialize(ctx, sIn) + wh := &serializeWrapHandler{next} + if s.head == nil { + res, metadata, err := wh.HandleSerialize(ctx, sIn) + return res.Result, metadata, err + } + + s.tail.Next = wh + res, metadata, err := s.head.HandleSerialize(ctx, sIn) return res.Result, metadata, err } // Get retrieves the middleware identified by id. If the middleware is not present, returns false. func (s *SerializeStep) Get(id string) (SerializeMiddleware, bool) { - get, ok := s.ids.Get(id) - if !ok { + found, _ := s.get(id) + if found == nil { return nil, false } - return get.(SerializeMiddleware), ok + + return found.With, true } // Add injects the middleware to the relative position of the middleware group. -// Returns an error if the middleware already exists. +// +// Add never returns an error. It used to for duplicate phases but this +// behavior has since been removed as part of a performance optimization. The +// return value from Add can be ignored. func (s *SerializeStep) Add(m SerializeMiddleware, pos RelativePosition) error { - return s.ids.Add(m, pos) + if s.head == nil { + s.head = &decoratedSerializeHandler{nil, m} + s.tail = s.head + return nil + } + + if pos == Before { + s.head = &decoratedSerializeHandler{s.head, m} + } else { + tail := &decoratedSerializeHandler{nil, m} + s.tail.Next = tail + s.tail = tail + } + + return nil } // Insert injects the middleware relative to an existing middleware ID. // Returns error if the original middleware does not exist, or the middleware // being added already exists. func (s *SerializeStep) Insert(m SerializeMiddleware, relativeTo string, pos RelativePosition) error { - return s.ids.Insert(m, relativeTo, pos) + found, prev := s.get(relativeTo) + if found == nil { + return fmt.Errorf("not found: %s", m.ID()) + } + + if pos == Before { + if prev == nil { // at the front + s.head = &decoratedSerializeHandler{s.head, m} + } else { // somewhere in the middle + prev.Next = &decoratedSerializeHandler{found, m} + } + } else { + if found.Next == nil { // at the end + tail := &decoratedSerializeHandler{nil, m} + s.tail.Next = tail + s.tail = tail + } else { // somewhere in the middle + found.Next = &decoratedSerializeHandler{found.Next, m} + } + } + + return nil } // Swap removes the middleware by id, replacing it with the new middleware. // Returns the middleware removed, or error if the middleware to be removed // doesn't exist. func (s *SerializeStep) Swap(id string, m SerializeMiddleware) (SerializeMiddleware, error) { - removed, err := s.ids.Swap(id, m) - if err != nil { - return nil, err + found, _ := s.get(id) + if found == nil { + return nil, fmt.Errorf("not found: %s", m.ID()) } - return removed.(SerializeMiddleware), nil + swapped := found.With + found.With = m + return swapped, nil } // Remove removes the middleware by id. Returns error if the middleware // doesn't exist. func (s *SerializeStep) Remove(id string) (SerializeMiddleware, error) { - removed, err := s.ids.Remove(id) - if err != nil { - return nil, err + found, prev := s.get(id) + if found == nil { + return nil, fmt.Errorf("not found: %s", id) + } + + if s.head == s.tail { // it's the only one + s.head = nil + s.tail = nil + } else if found == s.head { // at the front + s.head = s.head.Next.(*decoratedSerializeHandler) + } else if found == s.tail { // at the end + prev.Next = nil + s.tail = prev + } else { + prev.Next = found.Next // somewhere in the middle } - return removed.(SerializeMiddleware), nil + return found.With, nil } // List returns a list of the middleware in the step. func (s *SerializeStep) List() []string { - return s.ids.List() + var ids []string + for h := s.head; h != nil; { + ids = append(ids, h.With.ID()) + if h.Next == nil { + break + } + + // once executed, tail.Next of the list will be set to an + // *serializeWrapHandler, make sure to check for that + if hnext, ok := h.Next.(*decoratedSerializeHandler); ok { + h = hnext + } else { + break + } + } + return ids } // Clear removes all middleware in the step. func (s *SerializeStep) Clear() { - s.ids.Clear() + s.head = nil + s.tail = nil +} + +func (s *SerializeStep) get(id string) (found, prev *decoratedSerializeHandler) { + for h := s.head; h != nil; { + if h.With.ID() == id { + found = h + return + } + prev = h + if h.Next == nil { + return + } + + // once executed, tail.Next of the list will be set to an + // *serializeWrapHandler + h, _ = h.Next.(*decoratedSerializeHandler) + } + return } type serializeWrapHandler struct { @@ -184,7 +270,7 @@ type serializeWrapHandler struct { var _ SerializeHandler = (*serializeWrapHandler)(nil) -// Implements SerializeHandler, converts types and delegates to underlying +// HandleSerialize implements SerializeHandler, converts types and delegates to underlying // generic handler. func (w serializeWrapHandler) HandleSerialize(ctx context.Context, in SerializeInput) ( out SerializeOutput, metadata Metadata, err error, @@ -208,12 +294,12 @@ func (h decoratedSerializeHandler) HandleSerialize(ctx context.Context, in Seria return h.With.HandleSerialize(ctx, in, h.Next) } -// SerializeHandlerFunc provides a wrapper around a function to be used as a serialize middleware handler. +// SerializeHandlerFunc provides a wrapper around a function to be used as serializeMiddleware. type SerializeHandlerFunc func(context.Context, SerializeInput) (SerializeOutput, Metadata, error) // HandleSerialize calls the wrapped function with the provided arguments. -func (s SerializeHandlerFunc) HandleSerialize(ctx context.Context, in SerializeInput) (SerializeOutput, Metadata, error) { - return s(ctx, in) +func (f SerializeHandlerFunc) HandleSerialize(ctx context.Context, in SerializeInput) (SerializeOutput, Metadata, error) { + return f(ctx, in) } var _ SerializeHandler = SerializeHandlerFunc(nil) diff --git a/vendor/github.com/aws/smithy-go/modman.toml b/vendor/github.com/aws/smithy-go/modman.toml index 9d94b7cb..aac582fa 100644 --- a/vendor/github.com/aws/smithy-go/modman.toml +++ b/vendor/github.com/aws/smithy-go/modman.toml @@ -1,5 +1,4 @@ [dependencies] - "github.com/jmespath/go-jmespath" = "v0.4.0" [modules] diff --git a/vendor/github.com/aws/smithy-go/transport/http/interceptor.go b/vendor/github.com/aws/smithy-go/transport/http/interceptor.go new file mode 100644 index 00000000..e21f2632 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/transport/http/interceptor.go @@ -0,0 +1,321 @@ +package http + +import ( + "context" +) + +func icopy[T any](v []T) []T { + s := make([]T, len(v)) + copy(s, v) + return s +} + +// InterceptorContext is all the information available in different +// interceptors. +// +// Not all information is available in each interceptor, see each interface +// definition for more details. +type InterceptorContext struct { + Input any + Request *Request + + Output any + Response *Response +} + +// InterceptorRegistry holds a list of operation interceptors. +// +// Interceptors allow callers to insert custom behavior at well-defined points +// within a client's operation lifecycle. +// +// # Interceptor context +// +// All interceptors are invoked with a context object that contains input and +// output containers for the operation. The individual fields that are +// available will depend on what the interceptor is and, in certain +// interceptors, how far the operation was able to progress. See the +// documentation for each interface definition for more information about field +// availability. +// +// Implementations MUST NOT directly mutate the values of the fields in the +// interceptor context. They are free to mutate the existing values _pointed +// to_ by those fields, however. +// +// # Returning errors +// +// All interceptors can return errors. If an interceptor returns an error +// _before_ the client's retry loop, the operation will fail immediately. If +// one returns an error _within_ the retry loop, the error WILL be considered +// according to the client's retry policy. +// +// # Adding interceptors +// +// Idiomatically you will simply use one of the Add() receiver methods to +// register interceptors as desired. However, the list for each interface is +// exported on the registry struct and the caller is free to manipulate it +// directly, for example, to register a number of interceptors all at once, or +// to remove one that was previously registered. +// +// The base SDK client WILL NOT add any interceptors. SDK operations and +// customizations are implemented in terms of middleware. +// +// Modifications to the registry will not persist across operation calls when +// using per-operation functional options. This means you can register +// interceptors on a per-operation basis without affecting other operations. +type InterceptorRegistry struct { + BeforeExecution []BeforeExecutionInterceptor + BeforeSerialization []BeforeSerializationInterceptor + AfterSerialization []AfterSerializationInterceptor + BeforeRetryLoop []BeforeRetryLoopInterceptor + BeforeAttempt []BeforeAttemptInterceptor + BeforeSigning []BeforeSigningInterceptor + AfterSigning []AfterSigningInterceptor + BeforeTransmit []BeforeTransmitInterceptor + AfterTransmit []AfterTransmitInterceptor + BeforeDeserialization []BeforeDeserializationInterceptor + AfterDeserialization []AfterDeserializationInterceptor + AfterAttempt []AfterAttemptInterceptor + AfterExecution []AfterExecutionInterceptor +} + +// Copy returns a deep copy of the registry. This is used by SDK clients on +// each operation call in order to prevent per-op config mutation from +// persisting. +func (i *InterceptorRegistry) Copy() InterceptorRegistry { + return InterceptorRegistry{ + BeforeExecution: icopy(i.BeforeExecution), + BeforeSerialization: icopy(i.BeforeSerialization), + AfterSerialization: icopy(i.AfterSerialization), + BeforeRetryLoop: icopy(i.BeforeRetryLoop), + BeforeAttempt: icopy(i.BeforeAttempt), + BeforeSigning: icopy(i.BeforeSigning), + AfterSigning: icopy(i.AfterSigning), + BeforeTransmit: icopy(i.BeforeTransmit), + AfterTransmit: icopy(i.AfterTransmit), + BeforeDeserialization: icopy(i.BeforeDeserialization), + AfterDeserialization: icopy(i.AfterDeserialization), + AfterAttempt: icopy(i.AfterAttempt), + AfterExecution: icopy(i.AfterExecution), + } +} + +// AddBeforeExecution registers the provided BeforeExecutionInterceptor. +func (i *InterceptorRegistry) AddBeforeExecution(v BeforeExecutionInterceptor) { + i.BeforeExecution = append(i.BeforeExecution, v) +} + +// AddBeforeSerialization registers the provided BeforeSerializationInterceptor. +func (i *InterceptorRegistry) AddBeforeSerialization(v BeforeSerializationInterceptor) { + i.BeforeSerialization = append(i.BeforeSerialization, v) +} + +// AddAfterSerialization registers the provided AfterSerializationInterceptor. +func (i *InterceptorRegistry) AddAfterSerialization(v AfterSerializationInterceptor) { + i.AfterSerialization = append(i.AfterSerialization, v) +} + +// AddBeforeRetryLoop registers the provided BeforeRetryLoopInterceptor. +func (i *InterceptorRegistry) AddBeforeRetryLoop(v BeforeRetryLoopInterceptor) { + i.BeforeRetryLoop = append(i.BeforeRetryLoop, v) +} + +// AddBeforeAttempt registers the provided BeforeAttemptInterceptor. +func (i *InterceptorRegistry) AddBeforeAttempt(v BeforeAttemptInterceptor) { + i.BeforeAttempt = append(i.BeforeAttempt, v) +} + +// AddBeforeSigning registers the provided BeforeSigningInterceptor. +func (i *InterceptorRegistry) AddBeforeSigning(v BeforeSigningInterceptor) { + i.BeforeSigning = append(i.BeforeSigning, v) +} + +// AddAfterSigning registers the provided AfterSigningInterceptor. +func (i *InterceptorRegistry) AddAfterSigning(v AfterSigningInterceptor) { + i.AfterSigning = append(i.AfterSigning, v) +} + +// AddBeforeTransmit registers the provided BeforeTransmitInterceptor. +func (i *InterceptorRegistry) AddBeforeTransmit(v BeforeTransmitInterceptor) { + i.BeforeTransmit = append(i.BeforeTransmit, v) +} + +// AddAfterTransmit registers the provided AfterTransmitInterceptor. +func (i *InterceptorRegistry) AddAfterTransmit(v AfterTransmitInterceptor) { + i.AfterTransmit = append(i.AfterTransmit, v) +} + +// AddBeforeDeserialization registers the provided BeforeDeserializationInterceptor. +func (i *InterceptorRegistry) AddBeforeDeserialization(v BeforeDeserializationInterceptor) { + i.BeforeDeserialization = append(i.BeforeDeserialization, v) +} + +// AddAfterDeserialization registers the provided AfterDeserializationInterceptor. +func (i *InterceptorRegistry) AddAfterDeserialization(v AfterDeserializationInterceptor) { + i.AfterDeserialization = append(i.AfterDeserialization, v) +} + +// AddAfterAttempt registers the provided AfterAttemptInterceptor. +func (i *InterceptorRegistry) AddAfterAttempt(v AfterAttemptInterceptor) { + i.AfterAttempt = append(i.AfterAttempt, v) +} + +// AddAfterExecution registers the provided AfterExecutionInterceptor. +func (i *InterceptorRegistry) AddAfterExecution(v AfterExecutionInterceptor) { + i.AfterExecution = append(i.AfterExecution, v) +} + +// BeforeExecutionInterceptor runs before anything else in the operation +// lifecycle. +// +// Available InterceptorContext fields: +// - Input +type BeforeExecutionInterceptor interface { + BeforeExecution(ctx context.Context, in *InterceptorContext) error +} + +// BeforeSerializationInterceptor runs before the operation input is serialized +// into its transport request. +// +// Serialization occurs before the operation's retry loop. +// +// Available InterceptorContext fields: +// - Input +type BeforeSerializationInterceptor interface { + BeforeSerialization(ctx context.Context, in *InterceptorContext) error +} + +// AfterSerializationInterceptor runs after the operation input is serialized +// into its transport request. +// +// Available InterceptorContext fields: +// - Input +// - Request +type AfterSerializationInterceptor interface { + AfterSerialization(ctx context.Context, in *InterceptorContext) error +} + +// BeforeRetryLoopInterceptor runs right before the operation enters the retry loop. +// +// Available InterceptorContext fields: +// - Input +// - Request +type BeforeRetryLoopInterceptor interface { + BeforeRetryLoop(ctx context.Context, in *InterceptorContext) error +} + +// BeforeAttemptInterceptor runs right before every attempt in the retry loop. +// +// If this interceptor returns an error, AfterAttempt interceptors WILL NOT be +// invoked. +// +// Available InterceptorContext fields: +// - Input +// - Request +type BeforeAttemptInterceptor interface { + BeforeAttempt(ctx context.Context, in *InterceptorContext) error +} + +// BeforeSigningInterceptor runs right before the request is signed. +// +// Signing occurs within the operation's retry loop. +// +// Available InterceptorContext fields: +// - Input +// - Request +type BeforeSigningInterceptor interface { + BeforeSigning(ctx context.Context, in *InterceptorContext) error +} + +// AfterSigningInterceptor runs right after the request is signed. +// +// It is unsafe to modify the outgoing HTTP request at or past this hook, since +// doing so may invalidate the signature of the request. +// +// Available InterceptorContext fields: +// - Input +// - Request +type AfterSigningInterceptor interface { + AfterSigning(ctx context.Context, in *InterceptorContext) error +} + +// BeforeTransmitInterceptor runs right before the HTTP request is sent. +// +// HTTP transmit occurs within the operation's retry loop. +// +// Available InterceptorContext fields: +// - Input +// - Request +type BeforeTransmitInterceptor interface { + BeforeTransmit(ctx context.Context, in *InterceptorContext) error +} + +// AfterTransmitInterceptor runs right after the HTTP response is received. +// +// It will always be invoked when a response is received, regardless of its +// status code. Conversely, it WILL NOT be invoked if the HTTP round-trip was +// not successful, e.g. because of a DNS resolution error +// +// Available InterceptorContext fields: +// - Input +// - Request +// - Response +type AfterTransmitInterceptor interface { + AfterTransmit(ctx context.Context, in *InterceptorContext) error +} + +// BeforeDeserializationInterceptor runs right before the incoming HTTP response +// is deserialized. +// +// This interceptor IS NOT invoked if the HTTP round-trip was not successful. +// +// Deserialization occurs within the operation's retry loop. +// +// Available InterceptorContext fields: +// - Input +// - Request +// - Response +type BeforeDeserializationInterceptor interface { + BeforeDeserialization(ctx context.Context, in *InterceptorContext) error +} + +// AfterDeserializationInterceptor runs right after the incoming HTTP response +// is deserialized. This hook is invoked regardless of whether the deserialized +// result was an error. +// +// This interceptor IS NOT invoked if the HTTP round-trip was not successful. +// +// Available InterceptorContext fields: +// - Input +// - Output (IF the operation had a success-level response) +// - Request +// - Response +type AfterDeserializationInterceptor interface { + AfterDeserialization(ctx context.Context, in *InterceptorContext) error +} + +// AfterAttemptInterceptor runs right after the incoming HTTP response +// is deserialized. This hook is invoked regardless of whether the deserialized +// result was an error, or if another interceptor within the retry loop +// returned an error. +// +// Available InterceptorContext fields: +// - Input +// - Output (IF the operation had a success-level response) +// - Request (IF the operation did not return an error during serialization) +// - Response (IF the operation was able to transmit the HTTP request) +type AfterAttemptInterceptor interface { + AfterAttempt(ctx context.Context, in *InterceptorContext) error +} + +// AfterExecutionInterceptor runs after everything else. It runs regardless of +// how far the operation progressed in its lifecycle, and regardless of whether +// the operation succeeded or failed. +// +// Available InterceptorContext fields: +// - Input +// - Output (IF the operation had a success-level response) +// - Request (IF the operation did not return an error during serialization) +// - Response (IF the operation was able to transmit the HTTP request) +type AfterExecutionInterceptor interface { + AfterExecution(ctx context.Context, in *InterceptorContext) error +} diff --git a/vendor/github.com/aws/smithy-go/transport/http/interceptor_middleware.go b/vendor/github.com/aws/smithy-go/transport/http/interceptor_middleware.go new file mode 100644 index 00000000..2cc4b57f --- /dev/null +++ b/vendor/github.com/aws/smithy-go/transport/http/interceptor_middleware.go @@ -0,0 +1,325 @@ +package http + +import ( + "context" + "errors" + + "github.com/aws/smithy-go/middleware" +) + +type ictxKey struct{} + +func withIctx(ctx context.Context) context.Context { + return middleware.WithStackValue(ctx, ictxKey{}, &InterceptorContext{}) +} + +func getIctx(ctx context.Context) *InterceptorContext { + return middleware.GetStackValue(ctx, ictxKey{}).(*InterceptorContext) +} + +// InterceptExecution runs Before/AfterExecutionInterceptors. +type InterceptExecution struct { + BeforeExecution []BeforeExecutionInterceptor + AfterExecution []AfterExecutionInterceptor +} + +// ID identifies the middleware. +func (m *InterceptExecution) ID() string { + return "InterceptExecution" +} + +// HandleInitialize runs the interceptors. +func (m *InterceptExecution) HandleInitialize( + ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler, +) ( + out middleware.InitializeOutput, md middleware.Metadata, err error, +) { + ctx = withIctx(ctx) + getIctx(ctx).Input = in.Parameters + + for _, i := range m.BeforeExecution { + if err := i.BeforeExecution(ctx, getIctx(ctx)); err != nil { + return out, md, err + } + } + + out, md, err = next.HandleInitialize(ctx, in) + + for _, i := range m.AfterExecution { + if err := i.AfterExecution(ctx, getIctx(ctx)); err != nil { + return out, md, err + } + } + + return out, md, err +} + +// InterceptBeforeSerialization runs BeforeSerializationInterceptors. +type InterceptBeforeSerialization struct { + Interceptors []BeforeSerializationInterceptor +} + +// ID identifies the middleware. +func (m *InterceptBeforeSerialization) ID() string { + return "InterceptBeforeSerialization" +} + +// HandleSerialize runs the interceptors. +func (m *InterceptBeforeSerialization) HandleSerialize( + ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler, +) ( + out middleware.SerializeOutput, md middleware.Metadata, err error, +) { + for _, i := range m.Interceptors { + if err := i.BeforeSerialization(ctx, getIctx(ctx)); err != nil { + return out, md, err + } + } + + return next.HandleSerialize(ctx, in) +} + +// InterceptAfterSerialization runs AfterSerializationInterceptors. +type InterceptAfterSerialization struct { + Interceptors []AfterSerializationInterceptor +} + +// ID identifies the middleware. +func (m *InterceptAfterSerialization) ID() string { + return "InterceptAfterSerialization" +} + +// HandleSerialize runs the interceptors. +func (m *InterceptAfterSerialization) HandleSerialize( + ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler, +) ( + out middleware.SerializeOutput, md middleware.Metadata, err error, +) { + getIctx(ctx).Request = in.Request.(*Request) + + for _, i := range m.Interceptors { + if err := i.AfterSerialization(ctx, getIctx(ctx)); err != nil { + return out, md, err + } + } + + return next.HandleSerialize(ctx, in) +} + +// InterceptBeforeRetryLoop runs BeforeRetryLoopInterceptors. +type InterceptBeforeRetryLoop struct { + Interceptors []BeforeRetryLoopInterceptor +} + +// ID identifies the middleware. +func (m *InterceptBeforeRetryLoop) ID() string { + return "InterceptBeforeRetryLoop" +} + +// HandleFinalize runs the interceptors. +func (m *InterceptBeforeRetryLoop) HandleFinalize( + ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, +) ( + out middleware.FinalizeOutput, md middleware.Metadata, err error, +) { + for _, i := range m.Interceptors { + if err := i.BeforeRetryLoop(ctx, getIctx(ctx)); err != nil { + return out, md, err + } + } + + return next.HandleFinalize(ctx, in) +} + +// InterceptBeforeSigning runs BeforeSigningInterceptors. +type InterceptBeforeSigning struct { + Interceptors []BeforeSigningInterceptor +} + +// ID identifies the middleware. +func (m *InterceptBeforeSigning) ID() string { + return "InterceptBeforeSigning" +} + +// HandleFinalize runs the interceptors. +func (m *InterceptBeforeSigning) HandleFinalize( + ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, +) ( + out middleware.FinalizeOutput, md middleware.Metadata, err error, +) { + for _, i := range m.Interceptors { + if err := i.BeforeSigning(ctx, getIctx(ctx)); err != nil { + return out, md, err + } + } + + return next.HandleFinalize(ctx, in) +} + +// InterceptAfterSigning runs AfterSigningInterceptors. +type InterceptAfterSigning struct { + Interceptors []AfterSigningInterceptor +} + +// ID identifies the middleware. +func (m *InterceptAfterSigning) ID() string { + return "InterceptAfterSigning" +} + +// HandleFinalize runs the interceptors. +func (m *InterceptAfterSigning) HandleFinalize( + ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, +) ( + out middleware.FinalizeOutput, md middleware.Metadata, err error, +) { + for _, i := range m.Interceptors { + if err := i.AfterSigning(ctx, getIctx(ctx)); err != nil { + return out, md, err + } + } + + return next.HandleFinalize(ctx, in) +} + +// InterceptTransmit runs BeforeTransmitInterceptors and AfterTransmitInterceptors. +type InterceptTransmit struct { + BeforeTransmit []BeforeTransmitInterceptor + AfterTransmit []AfterTransmitInterceptor +} + +// ID identifies the middleware. +func (m *InterceptTransmit) ID() string { + return "InterceptTransmit" +} + +// HandleDeserialize runs the interceptors. +func (m *InterceptTransmit) HandleDeserialize( + ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler, +) ( + out middleware.DeserializeOutput, md middleware.Metadata, err error, +) { + for _, i := range m.BeforeTransmit { + if err := i.BeforeTransmit(ctx, getIctx(ctx)); err != nil { + return out, md, err + } + } + + out, md, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, md, err + } + + // the root of the decorated middleware guarantees this will be here + // (client.go: ClientHandler.Handle) + getIctx(ctx).Response = out.RawResponse.(*Response) + + for _, i := range m.AfterTransmit { + if err := i.AfterTransmit(ctx, getIctx(ctx)); err != nil { + return out, md, err + } + } + + return out, md, err +} + +// InterceptBeforeDeserialization runs BeforeDeserializationInterceptors. +type InterceptBeforeDeserialization struct { + Interceptors []BeforeDeserializationInterceptor +} + +// ID identifies the middleware. +func (m *InterceptBeforeDeserialization) ID() string { + return "InterceptBeforeDeserialization" +} + +// HandleDeserialize runs the interceptors. +func (m *InterceptBeforeDeserialization) HandleDeserialize( + ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler, +) ( + out middleware.DeserializeOutput, md middleware.Metadata, err error, +) { + out, md, err = next.HandleDeserialize(ctx, in) + if err != nil { + var terr *RequestSendError + if errors.As(err, &terr) { + return out, md, err + } + } + + for _, i := range m.Interceptors { + if err := i.BeforeDeserialization(ctx, getIctx(ctx)); err != nil { + return out, md, err + } + } + + return out, md, err +} + +// InterceptAfterDeserialization runs AfterDeserializationInterceptors. +type InterceptAfterDeserialization struct { + Interceptors []AfterDeserializationInterceptor +} + +// ID identifies the middleware. +func (m *InterceptAfterDeserialization) ID() string { + return "InterceptAfterDeserialization" +} + +// HandleDeserialize runs the interceptors. +func (m *InterceptAfterDeserialization) HandleDeserialize( + ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler, +) ( + out middleware.DeserializeOutput, md middleware.Metadata, err error, +) { + out, md, err = next.HandleDeserialize(ctx, in) + if err != nil { + var terr *RequestSendError + if errors.As(err, &terr) { + return out, md, err + } + } + + getIctx(ctx).Output = out.Result + + for _, i := range m.Interceptors { + if err := i.AfterDeserialization(ctx, getIctx(ctx)); err != nil { + return out, md, err + } + } + + return out, md, err +} + +// InterceptAttempt runs AfterAttemptInterceptors. +type InterceptAttempt struct { + BeforeAttempt []BeforeAttemptInterceptor + AfterAttempt []AfterAttemptInterceptor +} + +// ID identifies the middleware. +func (m *InterceptAttempt) ID() string { + return "InterceptAttempt" +} + +// HandleFinalize runs the interceptors. +func (m *InterceptAttempt) HandleFinalize( + ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, +) ( + out middleware.FinalizeOutput, md middleware.Metadata, err error, +) { + for _, i := range m.BeforeAttempt { + if err := i.BeforeAttempt(ctx, getIctx(ctx)); err != nil { + return out, md, err + } + } + + out, md, err = next.HandleFinalize(ctx, in) + + for _, i := range m.AfterAttempt { + if err := i.AfterAttempt(ctx, getIctx(ctx)); err != nil { + return out, md, err + } + } + + return out, md, err +} diff --git a/vendor/github.com/aws/smithy-go/transport/http/metrics.go b/vendor/github.com/aws/smithy-go/transport/http/metrics.go index d1beaa59..b4cd4a47 100644 --- a/vendor/github.com/aws/smithy-go/transport/http/metrics.go +++ b/vendor/github.com/aws/smithy-go/transport/http/metrics.go @@ -17,6 +17,12 @@ var now = time.Now func withMetrics(parent context.Context, client ClientDo, meter metrics.Meter) ( context.Context, ClientDo, error, ) { + // WithClientTrace is an expensive operation - avoid calling it if we're + // not actually using a metrics sink. + if _, ok := meter.(metrics.NopMeter); ok { + return parent, client, nil + } + hm, err := newHTTPMetrics(meter) if err != nil { return nil, nil, err diff --git a/vendor/github.com/cenkalti/backoff/v4/context.go b/vendor/github.com/cenkalti/backoff/v4/context.go deleted file mode 100644 index 48482330..00000000 --- a/vendor/github.com/cenkalti/backoff/v4/context.go +++ /dev/null @@ -1,62 +0,0 @@ -package backoff - -import ( - "context" - "time" -) - -// BackOffContext is a backoff policy that stops retrying after the context -// is canceled. -type BackOffContext interface { // nolint: golint - BackOff - Context() context.Context -} - -type backOffContext struct { - BackOff - ctx context.Context -} - -// WithContext returns a BackOffContext with context ctx -// -// ctx must not be nil -func WithContext(b BackOff, ctx context.Context) BackOffContext { // nolint: golint - if ctx == nil { - panic("nil context") - } - - if b, ok := b.(*backOffContext); ok { - return &backOffContext{ - BackOff: b.BackOff, - ctx: ctx, - } - } - - return &backOffContext{ - BackOff: b, - ctx: ctx, - } -} - -func getContext(b BackOff) context.Context { - if cb, ok := b.(BackOffContext); ok { - return cb.Context() - } - if tb, ok := b.(*backOffTries); ok { - return getContext(tb.delegate) - } - return context.Background() -} - -func (b *backOffContext) Context() context.Context { - return b.ctx -} - -func (b *backOffContext) NextBackOff() time.Duration { - select { - case <-b.ctx.Done(): - return Stop - default: - return b.BackOff.NextBackOff() - } -} diff --git a/vendor/github.com/cenkalti/backoff/v4/exponential.go b/vendor/github.com/cenkalti/backoff/v4/exponential.go deleted file mode 100644 index aac99f19..00000000 --- a/vendor/github.com/cenkalti/backoff/v4/exponential.go +++ /dev/null @@ -1,216 +0,0 @@ -package backoff - -import ( - "math/rand" - "time" -) - -/* -ExponentialBackOff is a backoff implementation that increases the backoff -period for each retry attempt using a randomization function that grows exponentially. - -NextBackOff() is calculated using the following formula: - - randomized interval = - RetryInterval * (random value in range [1 - RandomizationFactor, 1 + RandomizationFactor]) - -In other words NextBackOff() will range between the randomization factor -percentage below and above the retry interval. - -For example, given the following parameters: - - RetryInterval = 2 - RandomizationFactor = 0.5 - Multiplier = 2 - -the actual backoff period used in the next retry attempt will range between 1 and 3 seconds, -multiplied by the exponential, that is, between 2 and 6 seconds. - -Note: MaxInterval caps the RetryInterval and not the randomized interval. - -If the time elapsed since an ExponentialBackOff instance is created goes past the -MaxElapsedTime, then the method NextBackOff() starts returning backoff.Stop. - -The elapsed time can be reset by calling Reset(). - -Example: Given the following default arguments, for 10 tries the sequence will be, -and assuming we go over the MaxElapsedTime on the 10th try: - - Request # RetryInterval (seconds) Randomized Interval (seconds) - - 1 0.5 [0.25, 0.75] - 2 0.75 [0.375, 1.125] - 3 1.125 [0.562, 1.687] - 4 1.687 [0.8435, 2.53] - 5 2.53 [1.265, 3.795] - 6 3.795 [1.897, 5.692] - 7 5.692 [2.846, 8.538] - 8 8.538 [4.269, 12.807] - 9 12.807 [6.403, 19.210] - 10 19.210 backoff.Stop - -Note: Implementation is not thread-safe. -*/ -type ExponentialBackOff struct { - InitialInterval time.Duration - RandomizationFactor float64 - Multiplier float64 - MaxInterval time.Duration - // After MaxElapsedTime the ExponentialBackOff returns Stop. - // It never stops if MaxElapsedTime == 0. - MaxElapsedTime time.Duration - Stop time.Duration - Clock Clock - - currentInterval time.Duration - startTime time.Time -} - -// Clock is an interface that returns current time for BackOff. -type Clock interface { - Now() time.Time -} - -// ExponentialBackOffOpts is a function type used to configure ExponentialBackOff options. -type ExponentialBackOffOpts func(*ExponentialBackOff) - -// Default values for ExponentialBackOff. -const ( - DefaultInitialInterval = 500 * time.Millisecond - DefaultRandomizationFactor = 0.5 - DefaultMultiplier = 1.5 - DefaultMaxInterval = 60 * time.Second - DefaultMaxElapsedTime = 15 * time.Minute -) - -// NewExponentialBackOff creates an instance of ExponentialBackOff using default values. -func NewExponentialBackOff(opts ...ExponentialBackOffOpts) *ExponentialBackOff { - b := &ExponentialBackOff{ - InitialInterval: DefaultInitialInterval, - RandomizationFactor: DefaultRandomizationFactor, - Multiplier: DefaultMultiplier, - MaxInterval: DefaultMaxInterval, - MaxElapsedTime: DefaultMaxElapsedTime, - Stop: Stop, - Clock: SystemClock, - } - for _, fn := range opts { - fn(b) - } - b.Reset() - return b -} - -// WithInitialInterval sets the initial interval between retries. -func WithInitialInterval(duration time.Duration) ExponentialBackOffOpts { - return func(ebo *ExponentialBackOff) { - ebo.InitialInterval = duration - } -} - -// WithRandomizationFactor sets the randomization factor to add jitter to intervals. -func WithRandomizationFactor(randomizationFactor float64) ExponentialBackOffOpts { - return func(ebo *ExponentialBackOff) { - ebo.RandomizationFactor = randomizationFactor - } -} - -// WithMultiplier sets the multiplier for increasing the interval after each retry. -func WithMultiplier(multiplier float64) ExponentialBackOffOpts { - return func(ebo *ExponentialBackOff) { - ebo.Multiplier = multiplier - } -} - -// WithMaxInterval sets the maximum interval between retries. -func WithMaxInterval(duration time.Duration) ExponentialBackOffOpts { - return func(ebo *ExponentialBackOff) { - ebo.MaxInterval = duration - } -} - -// WithMaxElapsedTime sets the maximum total time for retries. -func WithMaxElapsedTime(duration time.Duration) ExponentialBackOffOpts { - return func(ebo *ExponentialBackOff) { - ebo.MaxElapsedTime = duration - } -} - -// WithRetryStopDuration sets the duration after which retries should stop. -func WithRetryStopDuration(duration time.Duration) ExponentialBackOffOpts { - return func(ebo *ExponentialBackOff) { - ebo.Stop = duration - } -} - -// WithClockProvider sets the clock used to measure time. -func WithClockProvider(clock Clock) ExponentialBackOffOpts { - return func(ebo *ExponentialBackOff) { - ebo.Clock = clock - } -} - -type systemClock struct{} - -func (t systemClock) Now() time.Time { - return time.Now() -} - -// SystemClock implements Clock interface that uses time.Now(). -var SystemClock = systemClock{} - -// Reset the interval back to the initial retry interval and restarts the timer. -// Reset must be called before using b. -func (b *ExponentialBackOff) Reset() { - b.currentInterval = b.InitialInterval - b.startTime = b.Clock.Now() -} - -// NextBackOff calculates the next backoff interval using the formula: -// Randomized interval = RetryInterval * (1 ± RandomizationFactor) -func (b *ExponentialBackOff) NextBackOff() time.Duration { - // Make sure we have not gone over the maximum elapsed time. - elapsed := b.GetElapsedTime() - next := getRandomValueFromInterval(b.RandomizationFactor, rand.Float64(), b.currentInterval) - b.incrementCurrentInterval() - if b.MaxElapsedTime != 0 && elapsed+next > b.MaxElapsedTime { - return b.Stop - } - return next -} - -// GetElapsedTime returns the elapsed time since an ExponentialBackOff instance -// is created and is reset when Reset() is called. -// -// The elapsed time is computed using time.Now().UnixNano(). It is -// safe to call even while the backoff policy is used by a running -// ticker. -func (b *ExponentialBackOff) GetElapsedTime() time.Duration { - return b.Clock.Now().Sub(b.startTime) -} - -// Increments the current interval by multiplying it with the multiplier. -func (b *ExponentialBackOff) incrementCurrentInterval() { - // Check for overflow, if overflow is detected set the current interval to the max interval. - if float64(b.currentInterval) >= float64(b.MaxInterval)/b.Multiplier { - b.currentInterval = b.MaxInterval - } else { - b.currentInterval = time.Duration(float64(b.currentInterval) * b.Multiplier) - } -} - -// Returns a random value from the following interval: -// [currentInterval - randomizationFactor * currentInterval, currentInterval + randomizationFactor * currentInterval]. -func getRandomValueFromInterval(randomizationFactor, random float64, currentInterval time.Duration) time.Duration { - if randomizationFactor == 0 { - return currentInterval // make sure no randomness is used when randomizationFactor is 0. - } - var delta = randomizationFactor * float64(currentInterval) - var minInterval = float64(currentInterval) - delta - var maxInterval = float64(currentInterval) + delta - - // Get a random value from the range [minInterval, maxInterval]. - // The formula used below has a +1 because if the minInterval is 1 and the maxInterval is 3 then - // we want a 33% chance for selecting either 1, 2 or 3. - return time.Duration(minInterval + (random * (maxInterval - minInterval + 1))) -} diff --git a/vendor/github.com/cenkalti/backoff/v4/retry.go b/vendor/github.com/cenkalti/backoff/v4/retry.go deleted file mode 100644 index b9c0c51c..00000000 --- a/vendor/github.com/cenkalti/backoff/v4/retry.go +++ /dev/null @@ -1,146 +0,0 @@ -package backoff - -import ( - "errors" - "time" -) - -// An OperationWithData is executing by RetryWithData() or RetryNotifyWithData(). -// The operation will be retried using a backoff policy if it returns an error. -type OperationWithData[T any] func() (T, error) - -// An Operation is executing by Retry() or RetryNotify(). -// The operation will be retried using a backoff policy if it returns an error. -type Operation func() error - -func (o Operation) withEmptyData() OperationWithData[struct{}] { - return func() (struct{}, error) { - return struct{}{}, o() - } -} - -// Notify is a notify-on-error function. It receives an operation error and -// backoff delay if the operation failed (with an error). -// -// NOTE that if the backoff policy stated to stop retrying, -// the notify function isn't called. -type Notify func(error, time.Duration) - -// Retry the operation o until it does not return error or BackOff stops. -// o is guaranteed to be run at least once. -// -// If o returns a *PermanentError, the operation is not retried, and the -// wrapped error is returned. -// -// Retry sleeps the goroutine for the duration returned by BackOff after a -// failed operation returns. -func Retry(o Operation, b BackOff) error { - return RetryNotify(o, b, nil) -} - -// RetryWithData is like Retry but returns data in the response too. -func RetryWithData[T any](o OperationWithData[T], b BackOff) (T, error) { - return RetryNotifyWithData(o, b, nil) -} - -// RetryNotify calls notify function with the error and wait duration -// for each failed attempt before sleep. -func RetryNotify(operation Operation, b BackOff, notify Notify) error { - return RetryNotifyWithTimer(operation, b, notify, nil) -} - -// RetryNotifyWithData is like RetryNotify but returns data in the response too. -func RetryNotifyWithData[T any](operation OperationWithData[T], b BackOff, notify Notify) (T, error) { - return doRetryNotify(operation, b, notify, nil) -} - -// RetryNotifyWithTimer calls notify function with the error and wait duration using the given Timer -// for each failed attempt before sleep. -// A default timer that uses system timer is used when nil is passed. -func RetryNotifyWithTimer(operation Operation, b BackOff, notify Notify, t Timer) error { - _, err := doRetryNotify(operation.withEmptyData(), b, notify, t) - return err -} - -// RetryNotifyWithTimerAndData is like RetryNotifyWithTimer but returns data in the response too. -func RetryNotifyWithTimerAndData[T any](operation OperationWithData[T], b BackOff, notify Notify, t Timer) (T, error) { - return doRetryNotify(operation, b, notify, t) -} - -func doRetryNotify[T any](operation OperationWithData[T], b BackOff, notify Notify, t Timer) (T, error) { - var ( - err error - next time.Duration - res T - ) - if t == nil { - t = &defaultTimer{} - } - - defer func() { - t.Stop() - }() - - ctx := getContext(b) - - b.Reset() - for { - res, err = operation() - if err == nil { - return res, nil - } - - var permanent *PermanentError - if errors.As(err, &permanent) { - return res, permanent.Err - } - - if next = b.NextBackOff(); next == Stop { - if cerr := ctx.Err(); cerr != nil { - return res, cerr - } - - return res, err - } - - if notify != nil { - notify(err, next) - } - - t.Start(next) - - select { - case <-ctx.Done(): - return res, ctx.Err() - case <-t.C(): - } - } -} - -// PermanentError signals that the operation should not be retried. -type PermanentError struct { - Err error -} - -func (e *PermanentError) Error() string { - return e.Err.Error() -} - -func (e *PermanentError) Unwrap() error { - return e.Err -} - -func (e *PermanentError) Is(target error) bool { - _, ok := target.(*PermanentError) - return ok -} - -// Permanent wraps the given err in a *PermanentError. -func Permanent(err error) error { - if err == nil { - return nil - } - return &PermanentError{ - Err: err, - } -} diff --git a/vendor/github.com/cenkalti/backoff/v4/tries.go b/vendor/github.com/cenkalti/backoff/v4/tries.go deleted file mode 100644 index 28d58ca3..00000000 --- a/vendor/github.com/cenkalti/backoff/v4/tries.go +++ /dev/null @@ -1,38 +0,0 @@ -package backoff - -import "time" - -/* -WithMaxRetries creates a wrapper around another BackOff, which will -return Stop if NextBackOff() has been called too many times since -the last time Reset() was called - -Note: Implementation is not thread-safe. -*/ -func WithMaxRetries(b BackOff, max uint64) BackOff { - return &backOffTries{delegate: b, maxTries: max} -} - -type backOffTries struct { - delegate BackOff - maxTries uint64 - numTries uint64 -} - -func (b *backOffTries) NextBackOff() time.Duration { - if b.maxTries == 0 { - return Stop - } - if b.maxTries > 0 { - if b.maxTries <= b.numTries { - return Stop - } - b.numTries++ - } - return b.delegate.NextBackOff() -} - -func (b *backOffTries) Reset() { - b.numTries = 0 - b.delegate.Reset() -} diff --git a/vendor/github.com/cenkalti/backoff/v4/.gitignore b/vendor/github.com/cenkalti/backoff/v5/.gitignore similarity index 100% rename from vendor/github.com/cenkalti/backoff/v4/.gitignore rename to vendor/github.com/cenkalti/backoff/v5/.gitignore diff --git a/vendor/github.com/cenkalti/backoff/v5/CHANGELOG.md b/vendor/github.com/cenkalti/backoff/v5/CHANGELOG.md new file mode 100644 index 00000000..658c3743 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v5/CHANGELOG.md @@ -0,0 +1,29 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [5.0.0] - 2024-12-19 + +### Added + +- RetryAfterError can be returned from an operation to indicate how long to wait before the next retry. + +### Changed + +- Retry function now accepts additional options for specifying max number of tries and max elapsed time. +- Retry function now accepts a context.Context. +- Operation function signature changed to return result (any type) and error. + +### Removed + +- RetryNotify* and RetryWithData functions. Only single Retry function remains. +- Optional arguments from ExponentialBackoff constructor. +- Clock and Timer interfaces. + +### Fixed + +- The original error is returned from Retry if there's a PermanentError. (#144) +- The Retry function respects the wrapped PermanentError. (#140) diff --git a/vendor/github.com/cenkalti/backoff/v4/LICENSE b/vendor/github.com/cenkalti/backoff/v5/LICENSE similarity index 100% rename from vendor/github.com/cenkalti/backoff/v4/LICENSE rename to vendor/github.com/cenkalti/backoff/v5/LICENSE diff --git a/vendor/github.com/cenkalti/backoff/v4/README.md b/vendor/github.com/cenkalti/backoff/v5/README.md similarity index 64% rename from vendor/github.com/cenkalti/backoff/v4/README.md rename to vendor/github.com/cenkalti/backoff/v5/README.md index 9433004a..4611b1d1 100644 --- a/vendor/github.com/cenkalti/backoff/v4/README.md +++ b/vendor/github.com/cenkalti/backoff/v5/README.md @@ -1,4 +1,4 @@ -# Exponential Backoff [![GoDoc][godoc image]][godoc] [![Coverage Status][coveralls image]][coveralls] +# Exponential Backoff [![GoDoc][godoc image]][godoc] This is a Go port of the exponential backoff algorithm from [Google's HTTP Client Library for Java][google-http-java-client]. @@ -9,9 +9,11 @@ The retries exponentially increase and stop increasing when a certain threshold ## Usage -Import path is `github.com/cenkalti/backoff/v4`. Please note the version part at the end. +Import path is `github.com/cenkalti/backoff/v5`. Please note the version part at the end. -Use https://pkg.go.dev/github.com/cenkalti/backoff/v4 to view the documentation. +For most cases, use `Retry` function. See [example_test.go][example] for an example. + +If you have specific needs, copy `Retry` function (from [retry.go][retry-src]) into your code and modify it as needed. ## Contributing @@ -19,12 +21,11 @@ Use https://pkg.go.dev/github.com/cenkalti/backoff/v4 to view the documentation. * Please don't send a PR without opening an issue and discussing it first. * If proposed change is not a common use case, I will probably not accept it. -[godoc]: https://pkg.go.dev/github.com/cenkalti/backoff/v4 +[godoc]: https://pkg.go.dev/github.com/cenkalti/backoff/v5 [godoc image]: https://godoc.org/github.com/cenkalti/backoff?status.png -[coveralls]: https://coveralls.io/github/cenkalti/backoff?branch=master -[coveralls image]: https://coveralls.io/repos/github/cenkalti/backoff/badge.svg?branch=master [google-http-java-client]: https://github.com/google/google-http-java-client/blob/da1aa993e90285ec18579f1553339b00e19b3ab5/google-http-client/src/main/java/com/google/api/client/util/ExponentialBackOff.java [exponential backoff wiki]: http://en.wikipedia.org/wiki/Exponential_backoff -[advanced example]: https://pkg.go.dev/github.com/cenkalti/backoff/v4?tab=doc#pkg-examples +[retry-src]: https://github.com/cenkalti/backoff/blob/v5/retry.go +[example]: https://github.com/cenkalti/backoff/blob/v5/example_test.go diff --git a/vendor/github.com/cenkalti/backoff/v4/backoff.go b/vendor/github.com/cenkalti/backoff/v5/backoff.go similarity index 87% rename from vendor/github.com/cenkalti/backoff/v4/backoff.go rename to vendor/github.com/cenkalti/backoff/v5/backoff.go index 3676ee40..dd2b24ca 100644 --- a/vendor/github.com/cenkalti/backoff/v4/backoff.go +++ b/vendor/github.com/cenkalti/backoff/v5/backoff.go @@ -15,16 +15,16 @@ import "time" // BackOff is a backoff policy for retrying an operation. type BackOff interface { // NextBackOff returns the duration to wait before retrying the operation, - // or backoff. Stop to indicate that no more retries should be made. + // backoff.Stop to indicate that no more retries should be made. // // Example usage: // - // duration := backoff.NextBackOff(); - // if (duration == backoff.Stop) { - // // Do not retry operation. - // } else { - // // Sleep for duration and retry operation. - // } + // duration := backoff.NextBackOff() + // if duration == backoff.Stop { + // // Do not retry operation. + // } else { + // // Sleep for duration and retry operation. + // } // NextBackOff() time.Duration diff --git a/vendor/github.com/cenkalti/backoff/v5/error.go b/vendor/github.com/cenkalti/backoff/v5/error.go new file mode 100644 index 00000000..beb2b38a --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v5/error.go @@ -0,0 +1,46 @@ +package backoff + +import ( + "fmt" + "time" +) + +// PermanentError signals that the operation should not be retried. +type PermanentError struct { + Err error +} + +// Permanent wraps the given err in a *PermanentError. +func Permanent(err error) error { + if err == nil { + return nil + } + return &PermanentError{ + Err: err, + } +} + +// Error returns a string representation of the Permanent error. +func (e *PermanentError) Error() string { + return e.Err.Error() +} + +// Unwrap returns the wrapped error. +func (e *PermanentError) Unwrap() error { + return e.Err +} + +// RetryAfterError signals that the operation should be retried after the given duration. +type RetryAfterError struct { + Duration time.Duration +} + +// RetryAfter returns a RetryAfter error that specifies how long to wait before retrying. +func RetryAfter(seconds int) error { + return &RetryAfterError{Duration: time.Duration(seconds) * time.Second} +} + +// Error returns a string representation of the RetryAfter error. +func (e *RetryAfterError) Error() string { + return fmt.Sprintf("retry after %s", e.Duration) +} diff --git a/vendor/github.com/cenkalti/backoff/v5/exponential.go b/vendor/github.com/cenkalti/backoff/v5/exponential.go new file mode 100644 index 00000000..79d425e8 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v5/exponential.go @@ -0,0 +1,118 @@ +package backoff + +import ( + "math/rand/v2" + "time" +) + +/* +ExponentialBackOff is a backoff implementation that increases the backoff +period for each retry attempt using a randomization function that grows exponentially. + +NextBackOff() is calculated using the following formula: + + randomized interval = + RetryInterval * (random value in range [1 - RandomizationFactor, 1 + RandomizationFactor]) + +In other words NextBackOff() will range between the randomization factor +percentage below and above the retry interval. + +For example, given the following parameters: + + RetryInterval = 2 + RandomizationFactor = 0.5 + Multiplier = 2 + +the actual backoff period used in the next retry attempt will range between 1 and 3 seconds, +multiplied by the exponential, that is, between 2 and 6 seconds. + +Note: MaxInterval caps the RetryInterval and not the randomized interval. + +Example: Given the following default arguments, for 9 tries the sequence will be: + + Request # RetryInterval (seconds) Randomized Interval (seconds) + + 1 0.5 [0.25, 0.75] + 2 0.75 [0.375, 1.125] + 3 1.125 [0.562, 1.687] + 4 1.687 [0.8435, 2.53] + 5 2.53 [1.265, 3.795] + 6 3.795 [1.897, 5.692] + 7 5.692 [2.846, 8.538] + 8 8.538 [4.269, 12.807] + 9 12.807 [6.403, 19.210] + +Note: Implementation is not thread-safe. +*/ +type ExponentialBackOff struct { + InitialInterval time.Duration + RandomizationFactor float64 + Multiplier float64 + MaxInterval time.Duration + + currentInterval time.Duration +} + +// Default values for ExponentialBackOff. +const ( + DefaultInitialInterval = 500 * time.Millisecond + DefaultRandomizationFactor = 0.5 + DefaultMultiplier = 1.5 + DefaultMaxInterval = 60 * time.Second +) + +// NewExponentialBackOff creates an instance of ExponentialBackOff using default values. +func NewExponentialBackOff() *ExponentialBackOff { + return &ExponentialBackOff{ + InitialInterval: DefaultInitialInterval, + RandomizationFactor: DefaultRandomizationFactor, + Multiplier: DefaultMultiplier, + MaxInterval: DefaultMaxInterval, + } +} + +// Reset the interval back to the initial retry interval and restarts the timer. +// Reset must be called before using b. +func (b *ExponentialBackOff) Reset() { + b.currentInterval = b.InitialInterval +} + +// NextBackOff calculates the next backoff interval using the formula: +// +// Randomized interval = RetryInterval * (1 ± RandomizationFactor) +func (b *ExponentialBackOff) NextBackOff() time.Duration { + if b.currentInterval == 0 { + b.currentInterval = b.InitialInterval + } + + next := getRandomValueFromInterval(b.RandomizationFactor, rand.Float64(), b.currentInterval) + b.incrementCurrentInterval() + return next +} + +// Increments the current interval by multiplying it with the multiplier. +func (b *ExponentialBackOff) incrementCurrentInterval() { + // Check for overflow, if overflow is detected set the current interval to the max interval. + if float64(b.currentInterval) >= float64(b.MaxInterval)/b.Multiplier { + b.currentInterval = b.MaxInterval + } else { + b.currentInterval = time.Duration(float64(b.currentInterval) * b.Multiplier) + } +} + +// Returns a random value from the following interval: +// +// [currentInterval - randomizationFactor * currentInterval, currentInterval + randomizationFactor * currentInterval]. +func getRandomValueFromInterval(randomizationFactor, random float64, currentInterval time.Duration) time.Duration { + if randomizationFactor == 0 { + return currentInterval // make sure no randomness is used when randomizationFactor is 0. + } + var delta = randomizationFactor * float64(currentInterval) + var minInterval = float64(currentInterval) - delta + var maxInterval = float64(currentInterval) + delta + + // Get a random value from the range [minInterval, maxInterval]. + // The formula used below has a +1 because if the minInterval is 1 and the maxInterval is 3 then + // we want a 33% chance for selecting either 1, 2 or 3. + return time.Duration(minInterval + (random * (maxInterval - minInterval + 1))) +} diff --git a/vendor/github.com/cenkalti/backoff/v5/retry.go b/vendor/github.com/cenkalti/backoff/v5/retry.go new file mode 100644 index 00000000..32a7f988 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v5/retry.go @@ -0,0 +1,139 @@ +package backoff + +import ( + "context" + "errors" + "time" +) + +// DefaultMaxElapsedTime sets a default limit for the total retry duration. +const DefaultMaxElapsedTime = 15 * time.Minute + +// Operation is a function that attempts an operation and may be retried. +type Operation[T any] func() (T, error) + +// Notify is a function called on operation error with the error and backoff duration. +type Notify func(error, time.Duration) + +// retryOptions holds configuration settings for the retry mechanism. +type retryOptions struct { + BackOff BackOff // Strategy for calculating backoff periods. + Timer timer // Timer to manage retry delays. + Notify Notify // Optional function to notify on each retry error. + MaxTries uint // Maximum number of retry attempts. + MaxElapsedTime time.Duration // Maximum total time for all retries. +} + +type RetryOption func(*retryOptions) + +// WithBackOff configures a custom backoff strategy. +func WithBackOff(b BackOff) RetryOption { + return func(args *retryOptions) { + args.BackOff = b + } +} + +// withTimer sets a custom timer for managing delays between retries. +func withTimer(t timer) RetryOption { + return func(args *retryOptions) { + args.Timer = t + } +} + +// WithNotify sets a notification function to handle retry errors. +func WithNotify(n Notify) RetryOption { + return func(args *retryOptions) { + args.Notify = n + } +} + +// WithMaxTries limits the number of all attempts. +func WithMaxTries(n uint) RetryOption { + return func(args *retryOptions) { + args.MaxTries = n + } +} + +// WithMaxElapsedTime limits the total duration for retry attempts. +func WithMaxElapsedTime(d time.Duration) RetryOption { + return func(args *retryOptions) { + args.MaxElapsedTime = d + } +} + +// Retry attempts the operation until success, a permanent error, or backoff completion. +// It ensures the operation is executed at least once. +// +// Returns the operation result or error if retries are exhausted or context is cancelled. +func Retry[T any](ctx context.Context, operation Operation[T], opts ...RetryOption) (T, error) { + // Initialize default retry options. + args := &retryOptions{ + BackOff: NewExponentialBackOff(), + Timer: &defaultTimer{}, + MaxElapsedTime: DefaultMaxElapsedTime, + } + + // Apply user-provided options to the default settings. + for _, opt := range opts { + opt(args) + } + + defer args.Timer.Stop() + + startedAt := time.Now() + args.BackOff.Reset() + for numTries := uint(1); ; numTries++ { + // Execute the operation. + res, err := operation() + if err == nil { + return res, nil + } + + // Stop retrying if maximum tries exceeded. + if args.MaxTries > 0 && numTries >= args.MaxTries { + return res, err + } + + // Handle permanent errors without retrying. + var permanent *PermanentError + if errors.As(err, &permanent) { + return res, permanent.Unwrap() + } + + // Stop retrying if context is cancelled. + if cerr := context.Cause(ctx); cerr != nil { + return res, cerr + } + + // Calculate next backoff duration. + next := args.BackOff.NextBackOff() + if next == Stop { + return res, err + } + + // Reset backoff if RetryAfterError is encountered. + var retryAfter *RetryAfterError + if errors.As(err, &retryAfter) { + next = retryAfter.Duration + args.BackOff.Reset() + } + + // Stop retrying if maximum elapsed time exceeded. + if args.MaxElapsedTime > 0 && time.Since(startedAt)+next > args.MaxElapsedTime { + return res, err + } + + // Notify on error if a notifier function is provided. + if args.Notify != nil { + args.Notify(err, next) + } + + // Wait for the next backoff period or context cancellation. + args.Timer.Start(next) + select { + case <-args.Timer.C(): + case <-ctx.Done(): + return res, context.Cause(ctx) + } + } +} diff --git a/vendor/github.com/cenkalti/backoff/v4/ticker.go b/vendor/github.com/cenkalti/backoff/v5/ticker.go similarity index 80% rename from vendor/github.com/cenkalti/backoff/v4/ticker.go rename to vendor/github.com/cenkalti/backoff/v5/ticker.go index df9d68bc..f0d4b2ae 100644 --- a/vendor/github.com/cenkalti/backoff/v4/ticker.go +++ b/vendor/github.com/cenkalti/backoff/v5/ticker.go @@ -1,7 +1,6 @@ package backoff import ( - "context" "sync" "time" ) @@ -14,8 +13,7 @@ type Ticker struct { C <-chan time.Time c chan time.Time b BackOff - ctx context.Context - timer Timer + timer timer stop chan struct{} stopOnce sync.Once } @@ -27,22 +25,12 @@ type Ticker struct { // provided backoff policy (notably calling NextBackOff or Reset) // while the ticker is running. func NewTicker(b BackOff) *Ticker { - return NewTickerWithTimer(b, &defaultTimer{}) -} - -// NewTickerWithTimer returns a new Ticker with a custom timer. -// A default timer that uses system timer is used when nil is passed. -func NewTickerWithTimer(b BackOff, timer Timer) *Ticker { - if timer == nil { - timer = &defaultTimer{} - } c := make(chan time.Time) t := &Ticker{ C: c, c: c, b: b, - ctx: getContext(b), - timer: timer, + timer: &defaultTimer{}, stop: make(chan struct{}), } t.b.Reset() @@ -73,8 +61,6 @@ func (t *Ticker) run() { case <-t.stop: t.c = nil // Prevent future ticks from being sent to the channel. return - case <-t.ctx.Done(): - return } } } diff --git a/vendor/github.com/cenkalti/backoff/v4/timer.go b/vendor/github.com/cenkalti/backoff/v5/timer.go similarity index 96% rename from vendor/github.com/cenkalti/backoff/v4/timer.go rename to vendor/github.com/cenkalti/backoff/v5/timer.go index 8120d021..a8953097 100644 --- a/vendor/github.com/cenkalti/backoff/v4/timer.go +++ b/vendor/github.com/cenkalti/backoff/v5/timer.go @@ -2,7 +2,7 @@ package backoff import "time" -type Timer interface { +type timer interface { Start(duration time.Duration) Stop() C() <-chan time.Time diff --git a/vendor/github.com/cespare/xxhash/v2/LICENSE.txt b/vendor/github.com/cespare/xxhash/v2/LICENSE.txt new file mode 100644 index 00000000..24b53065 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/LICENSE.txt @@ -0,0 +1,22 @@ +Copyright (c) 2016 Caleb Spare + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/cespare/xxhash/v2/README.md b/vendor/github.com/cespare/xxhash/v2/README.md new file mode 100644 index 00000000..33c88305 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/README.md @@ -0,0 +1,74 @@ +# xxhash + +[![Go Reference](https://pkg.go.dev/badge/github.com/cespare/xxhash/v2.svg)](https://pkg.go.dev/github.com/cespare/xxhash/v2) +[![Test](https://github.com/cespare/xxhash/actions/workflows/test.yml/badge.svg)](https://github.com/cespare/xxhash/actions/workflows/test.yml) + +xxhash is a Go implementation of the 64-bit [xxHash] algorithm, XXH64. This is a +high-quality hashing algorithm that is much faster than anything in the Go +standard library. + +This package provides a straightforward API: + +``` +func Sum64(b []byte) uint64 +func Sum64String(s string) uint64 +type Digest struct{ ... } + func New() *Digest +``` + +The `Digest` type implements hash.Hash64. Its key methods are: + +``` +func (*Digest) Write([]byte) (int, error) +func (*Digest) WriteString(string) (int, error) +func (*Digest) Sum64() uint64 +``` + +The package is written with optimized pure Go and also contains even faster +assembly implementations for amd64 and arm64. If desired, the `purego` build tag +opts into using the Go code even on those architectures. + +[xxHash]: http://cyan4973.github.io/xxHash/ + +## Compatibility + +This package is in a module and the latest code is in version 2 of the module. +You need a version of Go with at least "minimal module compatibility" to use +github.com/cespare/xxhash/v2: + +* 1.9.7+ for Go 1.9 +* 1.10.3+ for Go 1.10 +* Go 1.11 or later + +I recommend using the latest release of Go. + +## Benchmarks + +Here are some quick benchmarks comparing the pure-Go and assembly +implementations of Sum64. + +| input size | purego | asm | +| ---------- | --------- | --------- | +| 4 B | 1.3 GB/s | 1.2 GB/s | +| 16 B | 2.9 GB/s | 3.5 GB/s | +| 100 B | 6.9 GB/s | 8.1 GB/s | +| 4 KB | 11.7 GB/s | 16.7 GB/s | +| 10 MB | 12.0 GB/s | 17.3 GB/s | + +These numbers were generated on Ubuntu 20.04 with an Intel Xeon Platinum 8252C +CPU using the following commands under Go 1.19.2: + +``` +benchstat <(go test -tags purego -benchtime 500ms -count 15 -bench 'Sum64$') +benchstat <(go test -benchtime 500ms -count 15 -bench 'Sum64$') +``` + +## Projects using this package + +- [InfluxDB](https://github.com/influxdata/influxdb) +- [Prometheus](https://github.com/prometheus/prometheus) +- [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics) +- [FreeCache](https://github.com/coocood/freecache) +- [FastCache](https://github.com/VictoriaMetrics/fastcache) +- [Ristretto](https://github.com/dgraph-io/ristretto) +- [Badger](https://github.com/dgraph-io/badger) diff --git a/vendor/github.com/cespare/xxhash/v2/testall.sh b/vendor/github.com/cespare/xxhash/v2/testall.sh new file mode 100644 index 00000000..94b9c443 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/testall.sh @@ -0,0 +1,10 @@ +#!/bin/bash +set -eu -o pipefail + +# Small convenience script for running the tests with various combinations of +# arch/tags. This assumes we're running on amd64 and have qemu available. + +go test ./... +go test -tags purego ./... +GOARCH=arm64 go test +GOARCH=arm64 go test -tags purego diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash.go b/vendor/github.com/cespare/xxhash/v2/xxhash.go new file mode 100644 index 00000000..78bddf1c --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/xxhash.go @@ -0,0 +1,243 @@ +// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described +// at http://cyan4973.github.io/xxHash/. +package xxhash + +import ( + "encoding/binary" + "errors" + "math/bits" +) + +const ( + prime1 uint64 = 11400714785074694791 + prime2 uint64 = 14029467366897019727 + prime3 uint64 = 1609587929392839161 + prime4 uint64 = 9650029242287828579 + prime5 uint64 = 2870177450012600261 +) + +// Store the primes in an array as well. +// +// The consts are used when possible in Go code to avoid MOVs but we need a +// contiguous array for the assembly code. +var primes = [...]uint64{prime1, prime2, prime3, prime4, prime5} + +// Digest implements hash.Hash64. +// +// Note that a zero-valued Digest is not ready to receive writes. +// Call Reset or create a Digest using New before calling other methods. +type Digest struct { + v1 uint64 + v2 uint64 + v3 uint64 + v4 uint64 + total uint64 + mem [32]byte + n int // how much of mem is used +} + +// New creates a new Digest with a zero seed. +func New() *Digest { + return NewWithSeed(0) +} + +// NewWithSeed creates a new Digest with the given seed. +func NewWithSeed(seed uint64) *Digest { + var d Digest + d.ResetWithSeed(seed) + return &d +} + +// Reset clears the Digest's state so that it can be reused. +// It uses a seed value of zero. +func (d *Digest) Reset() { + d.ResetWithSeed(0) +} + +// ResetWithSeed clears the Digest's state so that it can be reused. +// It uses the given seed to initialize the state. +func (d *Digest) ResetWithSeed(seed uint64) { + d.v1 = seed + prime1 + prime2 + d.v2 = seed + prime2 + d.v3 = seed + d.v4 = seed - prime1 + d.total = 0 + d.n = 0 +} + +// Size always returns 8 bytes. +func (d *Digest) Size() int { return 8 } + +// BlockSize always returns 32 bytes. +func (d *Digest) BlockSize() int { return 32 } + +// Write adds more data to d. It always returns len(b), nil. +func (d *Digest) Write(b []byte) (n int, err error) { + n = len(b) + d.total += uint64(n) + + memleft := d.mem[d.n&(len(d.mem)-1):] + + if d.n+n < 32 { + // This new data doesn't even fill the current block. + copy(memleft, b) + d.n += n + return + } + + if d.n > 0 { + // Finish off the partial block. + c := copy(memleft, b) + d.v1 = round(d.v1, u64(d.mem[0:8])) + d.v2 = round(d.v2, u64(d.mem[8:16])) + d.v3 = round(d.v3, u64(d.mem[16:24])) + d.v4 = round(d.v4, u64(d.mem[24:32])) + b = b[c:] + d.n = 0 + } + + if len(b) >= 32 { + // One or more full blocks left. + nw := writeBlocks(d, b) + b = b[nw:] + } + + // Store any remaining partial block. + copy(d.mem[:], b) + d.n = len(b) + + return +} + +// Sum appends the current hash to b and returns the resulting slice. +func (d *Digest) Sum(b []byte) []byte { + s := d.Sum64() + return append( + b, + byte(s>>56), + byte(s>>48), + byte(s>>40), + byte(s>>32), + byte(s>>24), + byte(s>>16), + byte(s>>8), + byte(s), + ) +} + +// Sum64 returns the current hash. +func (d *Digest) Sum64() uint64 { + var h uint64 + + if d.total >= 32 { + v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 + h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) + h = mergeRound(h, v1) + h = mergeRound(h, v2) + h = mergeRound(h, v3) + h = mergeRound(h, v4) + } else { + h = d.v3 + prime5 + } + + h += d.total + + b := d.mem[:d.n&(len(d.mem)-1)] + for ; len(b) >= 8; b = b[8:] { + k1 := round(0, u64(b[:8])) + h ^= k1 + h = rol27(h)*prime1 + prime4 + } + if len(b) >= 4 { + h ^= uint64(u32(b[:4])) * prime1 + h = rol23(h)*prime2 + prime3 + b = b[4:] + } + for ; len(b) > 0; b = b[1:] { + h ^= uint64(b[0]) * prime5 + h = rol11(h) * prime1 + } + + h ^= h >> 33 + h *= prime2 + h ^= h >> 29 + h *= prime3 + h ^= h >> 32 + + return h +} + +const ( + magic = "xxh\x06" + marshaledSize = len(magic) + 8*5 + 32 +) + +// MarshalBinary implements the encoding.BinaryMarshaler interface. +func (d *Digest) MarshalBinary() ([]byte, error) { + b := make([]byte, 0, marshaledSize) + b = append(b, magic...) + b = appendUint64(b, d.v1) + b = appendUint64(b, d.v2) + b = appendUint64(b, d.v3) + b = appendUint64(b, d.v4) + b = appendUint64(b, d.total) + b = append(b, d.mem[:d.n]...) + b = b[:len(b)+len(d.mem)-d.n] + return b, nil +} + +// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. +func (d *Digest) UnmarshalBinary(b []byte) error { + if len(b) < len(magic) || string(b[:len(magic)]) != magic { + return errors.New("xxhash: invalid hash state identifier") + } + if len(b) != marshaledSize { + return errors.New("xxhash: invalid hash state size") + } + b = b[len(magic):] + b, d.v1 = consumeUint64(b) + b, d.v2 = consumeUint64(b) + b, d.v3 = consumeUint64(b) + b, d.v4 = consumeUint64(b) + b, d.total = consumeUint64(b) + copy(d.mem[:], b) + d.n = int(d.total % uint64(len(d.mem))) + return nil +} + +func appendUint64(b []byte, x uint64) []byte { + var a [8]byte + binary.LittleEndian.PutUint64(a[:], x) + return append(b, a[:]...) +} + +func consumeUint64(b []byte) ([]byte, uint64) { + x := u64(b) + return b[8:], x +} + +func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) } +func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) } + +func round(acc, input uint64) uint64 { + acc += input * prime2 + acc = rol31(acc) + acc *= prime1 + return acc +} + +func mergeRound(acc, val uint64) uint64 { + val = round(0, val) + acc ^= val + acc = acc*prime1 + prime4 + return acc +} + +func rol1(x uint64) uint64 { return bits.RotateLeft64(x, 1) } +func rol7(x uint64) uint64 { return bits.RotateLeft64(x, 7) } +func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) } +func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) } +func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) } +func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) } +func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) } +func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) } diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s new file mode 100644 index 00000000..3e8b1325 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s @@ -0,0 +1,209 @@ +//go:build !appengine && gc && !purego +// +build !appengine +// +build gc +// +build !purego + +#include "textflag.h" + +// Registers: +#define h AX +#define d AX +#define p SI // pointer to advance through b +#define n DX +#define end BX // loop end +#define v1 R8 +#define v2 R9 +#define v3 R10 +#define v4 R11 +#define x R12 +#define prime1 R13 +#define prime2 R14 +#define prime4 DI + +#define round(acc, x) \ + IMULQ prime2, x \ + ADDQ x, acc \ + ROLQ $31, acc \ + IMULQ prime1, acc + +// round0 performs the operation x = round(0, x). +#define round0(x) \ + IMULQ prime2, x \ + ROLQ $31, x \ + IMULQ prime1, x + +// mergeRound applies a merge round on the two registers acc and x. +// It assumes that prime1, prime2, and prime4 have been loaded. +#define mergeRound(acc, x) \ + round0(x) \ + XORQ x, acc \ + IMULQ prime1, acc \ + ADDQ prime4, acc + +// blockLoop processes as many 32-byte blocks as possible, +// updating v1, v2, v3, and v4. It assumes that there is at least one block +// to process. +#define blockLoop() \ +loop: \ + MOVQ +0(p), x \ + round(v1, x) \ + MOVQ +8(p), x \ + round(v2, x) \ + MOVQ +16(p), x \ + round(v3, x) \ + MOVQ +24(p), x \ + round(v4, x) \ + ADDQ $32, p \ + CMPQ p, end \ + JLE loop + +// func Sum64(b []byte) uint64 +TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32 + // Load fixed primes. + MOVQ ·primes+0(SB), prime1 + MOVQ ·primes+8(SB), prime2 + MOVQ ·primes+24(SB), prime4 + + // Load slice. + MOVQ b_base+0(FP), p + MOVQ b_len+8(FP), n + LEAQ (p)(n*1), end + + // The first loop limit will be len(b)-32. + SUBQ $32, end + + // Check whether we have at least one block. + CMPQ n, $32 + JLT noBlocks + + // Set up initial state (v1, v2, v3, v4). + MOVQ prime1, v1 + ADDQ prime2, v1 + MOVQ prime2, v2 + XORQ v3, v3 + XORQ v4, v4 + SUBQ prime1, v4 + + blockLoop() + + MOVQ v1, h + ROLQ $1, h + MOVQ v2, x + ROLQ $7, x + ADDQ x, h + MOVQ v3, x + ROLQ $12, x + ADDQ x, h + MOVQ v4, x + ROLQ $18, x + ADDQ x, h + + mergeRound(h, v1) + mergeRound(h, v2) + mergeRound(h, v3) + mergeRound(h, v4) + + JMP afterBlocks + +noBlocks: + MOVQ ·primes+32(SB), h + +afterBlocks: + ADDQ n, h + + ADDQ $24, end + CMPQ p, end + JG try4 + +loop8: + MOVQ (p), x + ADDQ $8, p + round0(x) + XORQ x, h + ROLQ $27, h + IMULQ prime1, h + ADDQ prime4, h + + CMPQ p, end + JLE loop8 + +try4: + ADDQ $4, end + CMPQ p, end + JG try1 + + MOVL (p), x + ADDQ $4, p + IMULQ prime1, x + XORQ x, h + + ROLQ $23, h + IMULQ prime2, h + ADDQ ·primes+16(SB), h + +try1: + ADDQ $4, end + CMPQ p, end + JGE finalize + +loop1: + MOVBQZX (p), x + ADDQ $1, p + IMULQ ·primes+32(SB), x + XORQ x, h + ROLQ $11, h + IMULQ prime1, h + + CMPQ p, end + JL loop1 + +finalize: + MOVQ h, x + SHRQ $33, x + XORQ x, h + IMULQ prime2, h + MOVQ h, x + SHRQ $29, x + XORQ x, h + IMULQ ·primes+16(SB), h + MOVQ h, x + SHRQ $32, x + XORQ x, h + + MOVQ h, ret+24(FP) + RET + +// func writeBlocks(d *Digest, b []byte) int +TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40 + // Load fixed primes needed for round. + MOVQ ·primes+0(SB), prime1 + MOVQ ·primes+8(SB), prime2 + + // Load slice. + MOVQ b_base+8(FP), p + MOVQ b_len+16(FP), n + LEAQ (p)(n*1), end + SUBQ $32, end + + // Load vN from d. + MOVQ s+0(FP), d + MOVQ 0(d), v1 + MOVQ 8(d), v2 + MOVQ 16(d), v3 + MOVQ 24(d), v4 + + // We don't need to check the loop condition here; this function is + // always called with at least one block of data to process. + blockLoop() + + // Copy vN back to d. + MOVQ v1, 0(d) + MOVQ v2, 8(d) + MOVQ v3, 16(d) + MOVQ v4, 24(d) + + // The number of bytes written is p minus the old base pointer. + SUBQ b_base+8(FP), p + MOVQ p, ret+32(FP) + + RET diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s b/vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s new file mode 100644 index 00000000..7e3145a2 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s @@ -0,0 +1,183 @@ +//go:build !appengine && gc && !purego +// +build !appengine +// +build gc +// +build !purego + +#include "textflag.h" + +// Registers: +#define digest R1 +#define h R2 // return value +#define p R3 // input pointer +#define n R4 // input length +#define nblocks R5 // n / 32 +#define prime1 R7 +#define prime2 R8 +#define prime3 R9 +#define prime4 R10 +#define prime5 R11 +#define v1 R12 +#define v2 R13 +#define v3 R14 +#define v4 R15 +#define x1 R20 +#define x2 R21 +#define x3 R22 +#define x4 R23 + +#define round(acc, x) \ + MADD prime2, acc, x, acc \ + ROR $64-31, acc \ + MUL prime1, acc + +// round0 performs the operation x = round(0, x). +#define round0(x) \ + MUL prime2, x \ + ROR $64-31, x \ + MUL prime1, x + +#define mergeRound(acc, x) \ + round0(x) \ + EOR x, acc \ + MADD acc, prime4, prime1, acc + +// blockLoop processes as many 32-byte blocks as possible, +// updating v1, v2, v3, and v4. It assumes that n >= 32. +#define blockLoop() \ + LSR $5, n, nblocks \ + PCALIGN $16 \ + loop: \ + LDP.P 16(p), (x1, x2) \ + LDP.P 16(p), (x3, x4) \ + round(v1, x1) \ + round(v2, x2) \ + round(v3, x3) \ + round(v4, x4) \ + SUB $1, nblocks \ + CBNZ nblocks, loop + +// func Sum64(b []byte) uint64 +TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32 + LDP b_base+0(FP), (p, n) + + LDP ·primes+0(SB), (prime1, prime2) + LDP ·primes+16(SB), (prime3, prime4) + MOVD ·primes+32(SB), prime5 + + CMP $32, n + CSEL LT, prime5, ZR, h // if n < 32 { h = prime5 } else { h = 0 } + BLT afterLoop + + ADD prime1, prime2, v1 + MOVD prime2, v2 + MOVD $0, v3 + NEG prime1, v4 + + blockLoop() + + ROR $64-1, v1, x1 + ROR $64-7, v2, x2 + ADD x1, x2 + ROR $64-12, v3, x3 + ROR $64-18, v4, x4 + ADD x3, x4 + ADD x2, x4, h + + mergeRound(h, v1) + mergeRound(h, v2) + mergeRound(h, v3) + mergeRound(h, v4) + +afterLoop: + ADD n, h + + TBZ $4, n, try8 + LDP.P 16(p), (x1, x2) + + round0(x1) + + // NOTE: here and below, sequencing the EOR after the ROR (using a + // rotated register) is worth a small but measurable speedup for small + // inputs. + ROR $64-27, h + EOR x1 @> 64-27, h, h + MADD h, prime4, prime1, h + + round0(x2) + ROR $64-27, h + EOR x2 @> 64-27, h, h + MADD h, prime4, prime1, h + +try8: + TBZ $3, n, try4 + MOVD.P 8(p), x1 + + round0(x1) + ROR $64-27, h + EOR x1 @> 64-27, h, h + MADD h, prime4, prime1, h + +try4: + TBZ $2, n, try2 + MOVWU.P 4(p), x2 + + MUL prime1, x2 + ROR $64-23, h + EOR x2 @> 64-23, h, h + MADD h, prime3, prime2, h + +try2: + TBZ $1, n, try1 + MOVHU.P 2(p), x3 + AND $255, x3, x1 + LSR $8, x3, x2 + + MUL prime5, x1 + ROR $64-11, h + EOR x1 @> 64-11, h, h + MUL prime1, h + + MUL prime5, x2 + ROR $64-11, h + EOR x2 @> 64-11, h, h + MUL prime1, h + +try1: + TBZ $0, n, finalize + MOVBU (p), x4 + + MUL prime5, x4 + ROR $64-11, h + EOR x4 @> 64-11, h, h + MUL prime1, h + +finalize: + EOR h >> 33, h + MUL prime2, h + EOR h >> 29, h + MUL prime3, h + EOR h >> 32, h + + MOVD h, ret+24(FP) + RET + +// func writeBlocks(d *Digest, b []byte) int +TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40 + LDP ·primes+0(SB), (prime1, prime2) + + // Load state. Assume v[1-4] are stored contiguously. + MOVD d+0(FP), digest + LDP 0(digest), (v1, v2) + LDP 16(digest), (v3, v4) + + LDP b_base+8(FP), (p, n) + + blockLoop() + + // Store updated state. + STP (v1, v2), 0(digest) + STP (v3, v4), 16(digest) + + BIC $31, n + MOVD n, ret+32(FP) + RET diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go b/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go new file mode 100644 index 00000000..78f95f25 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go @@ -0,0 +1,15 @@ +//go:build (amd64 || arm64) && !appengine && gc && !purego +// +build amd64 arm64 +// +build !appengine +// +build gc +// +build !purego + +package xxhash + +// Sum64 computes the 64-bit xxHash digest of b with a zero seed. +// +//go:noescape +func Sum64(b []byte) uint64 + +//go:noescape +func writeBlocks(d *Digest, b []byte) int diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go new file mode 100644 index 00000000..118e49e8 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go @@ -0,0 +1,76 @@ +//go:build (!amd64 && !arm64) || appengine || !gc || purego +// +build !amd64,!arm64 appengine !gc purego + +package xxhash + +// Sum64 computes the 64-bit xxHash digest of b with a zero seed. +func Sum64(b []byte) uint64 { + // A simpler version would be + // d := New() + // d.Write(b) + // return d.Sum64() + // but this is faster, particularly for small inputs. + + n := len(b) + var h uint64 + + if n >= 32 { + v1 := primes[0] + prime2 + v2 := prime2 + v3 := uint64(0) + v4 := -primes[0] + for len(b) >= 32 { + v1 = round(v1, u64(b[0:8:len(b)])) + v2 = round(v2, u64(b[8:16:len(b)])) + v3 = round(v3, u64(b[16:24:len(b)])) + v4 = round(v4, u64(b[24:32:len(b)])) + b = b[32:len(b):len(b)] + } + h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) + h = mergeRound(h, v1) + h = mergeRound(h, v2) + h = mergeRound(h, v3) + h = mergeRound(h, v4) + } else { + h = prime5 + } + + h += uint64(n) + + for ; len(b) >= 8; b = b[8:] { + k1 := round(0, u64(b[:8])) + h ^= k1 + h = rol27(h)*prime1 + prime4 + } + if len(b) >= 4 { + h ^= uint64(u32(b[:4])) * prime1 + h = rol23(h)*prime2 + prime3 + b = b[4:] + } + for ; len(b) > 0; b = b[1:] { + h ^= uint64(b[0]) * prime5 + h = rol11(h) * prime1 + } + + h ^= h >> 33 + h *= prime2 + h ^= h >> 29 + h *= prime3 + h ^= h >> 32 + + return h +} + +func writeBlocks(d *Digest, b []byte) int { + v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 + n := len(b) + for len(b) >= 32 { + v1 = round(v1, u64(b[0:8:len(b)])) + v2 = round(v2, u64(b[8:16:len(b)])) + v3 = round(v3, u64(b[16:24:len(b)])) + v4 = round(v4, u64(b[24:32:len(b)])) + b = b[32:len(b):len(b)] + } + d.v1, d.v2, d.v3, d.v4 = v1, v2, v3, v4 + return n - len(b) +} diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go new file mode 100644 index 00000000..05f5e7df --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go @@ -0,0 +1,16 @@ +//go:build appengine +// +build appengine + +// This file contains the safe implementations of otherwise unsafe-using code. + +package xxhash + +// Sum64String computes the 64-bit xxHash digest of s with a zero seed. +func Sum64String(s string) uint64 { + return Sum64([]byte(s)) +} + +// WriteString adds more data to d. It always returns len(s), nil. +func (d *Digest) WriteString(s string) (n int, err error) { + return d.Write([]byte(s)) +} diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go new file mode 100644 index 00000000..cf9d42ae --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go @@ -0,0 +1,58 @@ +//go:build !appengine +// +build !appengine + +// This file encapsulates usage of unsafe. +// xxhash_safe.go contains the safe implementations. + +package xxhash + +import ( + "unsafe" +) + +// In the future it's possible that compiler optimizations will make these +// XxxString functions unnecessary by realizing that calls such as +// Sum64([]byte(s)) don't need to copy s. See https://go.dev/issue/2205. +// If that happens, even if we keep these functions they can be replaced with +// the trivial safe code. + +// NOTE: The usual way of doing an unsafe string-to-[]byte conversion is: +// +// var b []byte +// bh := (*reflect.SliceHeader)(unsafe.Pointer(&b)) +// bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data +// bh.Len = len(s) +// bh.Cap = len(s) +// +// Unfortunately, as of Go 1.15.3 the inliner's cost model assigns a high enough +// weight to this sequence of expressions that any function that uses it will +// not be inlined. Instead, the functions below use a different unsafe +// conversion designed to minimize the inliner weight and allow both to be +// inlined. There is also a test (TestInlining) which verifies that these are +// inlined. +// +// See https://github.com/golang/go/issues/42739 for discussion. + +// Sum64String computes the 64-bit xxHash digest of s with a zero seed. +// It may be faster than Sum64([]byte(s)) by avoiding a copy. +func Sum64String(s string) uint64 { + b := *(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)})) + return Sum64(b) +} + +// WriteString adds more data to d. It always returns len(s), nil. +// It may be faster than Write([]byte(s)) by avoiding a copy. +func (d *Digest) WriteString(s string) (n int, err error) { + d.Write(*(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)}))) + // d.Write always returns len(s), nil. + // Ignoring the return output and returning these fixed values buys a + // savings of 6 in the inliner's cost model. + return len(s), nil +} + +// sliceHeader is similar to reflect.SliceHeader, but it assumes that the layout +// of the first two words is the same as the layout of a string. +type sliceHeader struct { + s string + cap int +} diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/connectorapi/baton/v1/baton.pb.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/connectorapi/baton/v1/baton.pb.go index 76d95c61..02f3090a 100644 --- a/vendor/github.com/conductorone/baton-sdk/pb/c1/connectorapi/baton/v1/baton.pb.go +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/connectorapi/baton/v1/baton.pb.go @@ -106,6 +106,8 @@ type Task struct { // *Task_ActionStatus // *Task_CreateSyncDiff // *Task_CompactSyncs_ + // *Task_ListEventFeeds + // *Task_ListEvents TaskType isTask_TaskType `protobuf_oneof:"task_type"` Debug bool `protobuf:"varint,3,opt,name=debug,proto3" json:"debug,omitempty"` unknownFields protoimpl.UnknownFields @@ -347,6 +349,24 @@ func (x *Task) GetCompactSyncs() *Task_CompactSyncs { return nil } +func (x *Task) GetListEventFeeds() *Task_ListEventFeedsTask { + if x != nil { + if x, ok := x.TaskType.(*Task_ListEventFeeds); ok { + return x.ListEventFeeds + } + } + return nil +} + +func (x *Task) GetListEvents() *Task_ListEventsTask { + if x != nil { + if x, ok := x.TaskType.(*Task_ListEvents); ok { + return x.ListEvents + } + } + return nil +} + func (x *Task) GetDebug() bool { if x != nil { return x.Debug @@ -530,6 +550,22 @@ func (x *Task) SetCompactSyncs(v *Task_CompactSyncs) { x.TaskType = &Task_CompactSyncs_{v} } +func (x *Task) SetListEventFeeds(v *Task_ListEventFeedsTask) { + if v == nil { + x.TaskType = nil + return + } + x.TaskType = &Task_ListEventFeeds{v} +} + +func (x *Task) SetListEvents(v *Task_ListEventsTask) { + if v == nil { + x.TaskType = nil + return + } + x.TaskType = &Task_ListEvents{v} +} + func (x *Task) SetDebug(v bool) { x.Debug = v } @@ -709,6 +745,22 @@ func (x *Task) HasCompactSyncs() bool { return ok } +func (x *Task) HasListEventFeeds() bool { + if x == nil { + return false + } + _, ok := x.TaskType.(*Task_ListEventFeeds) + return ok +} + +func (x *Task) HasListEvents() bool { + if x == nil { + return false + } + _, ok := x.TaskType.(*Task_ListEvents) + return ok +} + func (x *Task) ClearTaskType() { x.TaskType = nil } @@ -839,6 +891,18 @@ func (x *Task) ClearCompactSyncs() { } } +func (x *Task) ClearListEventFeeds() { + if _, ok := x.TaskType.(*Task_ListEventFeeds); ok { + x.TaskType = nil + } +} + +func (x *Task) ClearListEvents() { + if _, ok := x.TaskType.(*Task_ListEvents); ok { + x.TaskType = nil + } +} + const Task_TaskType_not_set_case case_Task_TaskType = 0 const Task_None_case case_Task_TaskType = 100 const Task_Hello_case case_Task_TaskType = 101 @@ -861,6 +925,8 @@ const Task_ActionInvoke_case case_Task_TaskType = 117 const Task_ActionStatus_case case_Task_TaskType = 118 const Task_CreateSyncDiff_case case_Task_TaskType = 119 const Task_CompactSyncs_case case_Task_TaskType = 120 +const Task_ListEventFeeds_case case_Task_TaskType = 121 +const Task_ListEvents_case case_Task_TaskType = 122 func (x *Task) WhichTaskType() case_Task_TaskType { if x == nil { @@ -909,6 +975,10 @@ func (x *Task) WhichTaskType() case_Task_TaskType { return Task_CreateSyncDiff_case case *Task_CompactSyncs_: return Task_CompactSyncs_case + case *Task_ListEventFeeds: + return Task_ListEventFeeds_case + case *Task_ListEvents: + return Task_ListEvents_case default: return Task_TaskType_not_set_case } @@ -941,6 +1011,8 @@ type Task_builder struct { ActionStatus *Task_ActionStatusTask CreateSyncDiff *Task_CreateSyncDiffTask CompactSyncs *Task_CompactSyncs + ListEventFeeds *Task_ListEventFeedsTask + ListEvents *Task_ListEventsTask // -- end of TaskType Debug bool } @@ -1014,6 +1086,12 @@ func (b0 Task_builder) Build() *Task { if b.CompactSyncs != nil { x.TaskType = &Task_CompactSyncs_{b.CompactSyncs} } + if b.ListEventFeeds != nil { + x.TaskType = &Task_ListEventFeeds{b.ListEventFeeds} + } + if b.ListEvents != nil { + x.TaskType = &Task_ListEvents{b.ListEvents} + } x.Debug = b.Debug return m0 } @@ -1116,6 +1194,14 @@ type Task_CompactSyncs_ struct { CompactSyncs *Task_CompactSyncs `protobuf:"bytes,120,opt,name=compact_syncs,json=compactSyncs,proto3,oneof"` } +type Task_ListEventFeeds struct { + ListEventFeeds *Task_ListEventFeedsTask `protobuf:"bytes,121,opt,name=list_event_feeds,json=listEventFeeds,proto3,oneof"` +} + +type Task_ListEvents struct { + ListEvents *Task_ListEventsTask `protobuf:"bytes,122,opt,name=list_events,json=listEvents,proto3,oneof"` +} + func (*Task_None) isTask_TaskType() {} func (*Task_Hello) isTask_TaskType() {} @@ -1158,6 +1244,10 @@ func (*Task_CreateSyncDiff) isTask_TaskType() {} func (*Task_CompactSyncs_) isTask_TaskType() {} +func (*Task_ListEventFeeds) isTask_TaskType() {} + +func (*Task_ListEvents) isTask_TaskType() {} + type BatonServiceHelloRequest struct { state protoimpl.MessageState `protogen:"hybrid.v1"` HostId string `protobuf:"bytes,1,opt,name=host_id,json=hostId,proto3" json:"host_id,omitempty"` @@ -2717,6 +2807,187 @@ func (b0 Task_EventFeedTask_builder) Build() *Task_EventFeedTask { return m0 } +type Task_ListEventsTask struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + Annotations []*anypb.Any `protobuf:"bytes,1,rep,name=annotations,proto3" json:"annotations,omitempty"` + Cursor string `protobuf:"bytes,2,opt,name=cursor,proto3" json:"cursor,omitempty"` + StartAt *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=start_at,json=startAt,proto3" json:"start_at,omitempty"` + EventFeedId string `protobuf:"bytes,4,opt,name=event_feed_id,json=eventFeedId,proto3" json:"event_feed_id,omitempty"` + PageSize uint32 `protobuf:"varint,5,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Task_ListEventsTask) Reset() { + *x = Task_ListEventsTask{} + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Task_ListEventsTask) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Task_ListEventsTask) ProtoMessage() {} + +func (x *Task_ListEventsTask) ProtoReflect() protoreflect.Message { + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[17] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *Task_ListEventsTask) GetAnnotations() []*anypb.Any { + if x != nil { + return x.Annotations + } + return nil +} + +func (x *Task_ListEventsTask) GetCursor() string { + if x != nil { + return x.Cursor + } + return "" +} + +func (x *Task_ListEventsTask) GetStartAt() *timestamppb.Timestamp { + if x != nil { + return x.StartAt + } + return nil +} + +func (x *Task_ListEventsTask) GetEventFeedId() string { + if x != nil { + return x.EventFeedId + } + return "" +} + +func (x *Task_ListEventsTask) GetPageSize() uint32 { + if x != nil { + return x.PageSize + } + return 0 +} + +func (x *Task_ListEventsTask) SetAnnotations(v []*anypb.Any) { + x.Annotations = v +} + +func (x *Task_ListEventsTask) SetCursor(v string) { + x.Cursor = v +} + +func (x *Task_ListEventsTask) SetStartAt(v *timestamppb.Timestamp) { + x.StartAt = v +} + +func (x *Task_ListEventsTask) SetEventFeedId(v string) { + x.EventFeedId = v +} + +func (x *Task_ListEventsTask) SetPageSize(v uint32) { + x.PageSize = v +} + +func (x *Task_ListEventsTask) HasStartAt() bool { + if x == nil { + return false + } + return x.StartAt != nil +} + +func (x *Task_ListEventsTask) ClearStartAt() { + x.StartAt = nil +} + +type Task_ListEventsTask_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Annotations []*anypb.Any + Cursor string + StartAt *timestamppb.Timestamp + EventFeedId string + PageSize uint32 +} + +func (b0 Task_ListEventsTask_builder) Build() *Task_ListEventsTask { + m0 := &Task_ListEventsTask{} + b, x := &b0, m0 + _, _ = b, x + x.Annotations = b.Annotations + x.Cursor = b.Cursor + x.StartAt = b.StartAt + x.EventFeedId = b.EventFeedId + x.PageSize = b.PageSize + return m0 +} + +type Task_ListEventFeedsTask struct { + state protoimpl.MessageState `protogen:"hybrid.v1"` + Annotations []*anypb.Any `protobuf:"bytes,1,rep,name=annotations,proto3" json:"annotations,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Task_ListEventFeedsTask) Reset() { + *x = Task_ListEventFeedsTask{} + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Task_ListEventFeedsTask) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Task_ListEventFeedsTask) ProtoMessage() {} + +func (x *Task_ListEventFeedsTask) ProtoReflect() protoreflect.Message { + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[18] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *Task_ListEventFeedsTask) GetAnnotations() []*anypb.Any { + if x != nil { + return x.Annotations + } + return nil +} + +func (x *Task_ListEventFeedsTask) SetAnnotations(v []*anypb.Any) { + x.Annotations = v +} + +type Task_ListEventFeedsTask_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Annotations []*anypb.Any +} + +func (b0 Task_ListEventFeedsTask_builder) Build() *Task_ListEventFeedsTask { + m0 := &Task_ListEventFeedsTask{} + b, x := &b0, m0 + _, _ = b, x + x.Annotations = b.Annotations + return m0 +} + type Task_GrantTask struct { state protoimpl.MessageState `protogen:"hybrid.v1"` Entitlement *v2.Entitlement `protobuf:"bytes,1,opt,name=entitlement,proto3" json:"entitlement,omitempty"` @@ -2729,7 +3000,7 @@ type Task_GrantTask struct { func (x *Task_GrantTask) Reset() { *x = Task_GrantTask{} - mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[17] + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[19] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2741,7 +3012,7 @@ func (x *Task_GrantTask) String() string { func (*Task_GrantTask) ProtoMessage() {} func (x *Task_GrantTask) ProtoReflect() protoreflect.Message { - mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[17] + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[19] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2859,7 +3130,7 @@ type Task_RevokeTask struct { func (x *Task_RevokeTask) Reset() { *x = Task_RevokeTask{} - mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[18] + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[20] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2871,7 +3142,7 @@ func (x *Task_RevokeTask) String() string { func (*Task_RevokeTask) ProtoMessage() {} func (x *Task_RevokeTask) ProtoReflect() protoreflect.Message { - mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[18] + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[20] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2943,7 +3214,7 @@ type Task_CreateAccountTask struct { func (x *Task_CreateAccountTask) Reset() { *x = Task_CreateAccountTask{} - mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[19] + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[21] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2955,7 +3226,7 @@ func (x *Task_CreateAccountTask) String() string { func (*Task_CreateAccountTask) ProtoMessage() {} func (x *Task_CreateAccountTask) ProtoReflect() protoreflect.Message { - mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[19] + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[21] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3061,7 +3332,7 @@ type Task_CreateResourceTask struct { func (x *Task_CreateResourceTask) Reset() { *x = Task_CreateResourceTask{} - mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[20] + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[22] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3073,7 +3344,7 @@ func (x *Task_CreateResourceTask) String() string { func (*Task_CreateResourceTask) ProtoMessage() {} func (x *Task_CreateResourceTask) ProtoReflect() protoreflect.Message { - mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[20] + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[22] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3130,7 +3401,7 @@ type Task_DeleteResourceTask struct { func (x *Task_DeleteResourceTask) Reset() { *x = Task_DeleteResourceTask{} - mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[21] + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[23] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3142,7 +3413,7 @@ func (x *Task_DeleteResourceTask) String() string { func (*Task_DeleteResourceTask) ProtoMessage() {} func (x *Task_DeleteResourceTask) ProtoReflect() protoreflect.Message { - mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[21] + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[23] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3224,7 +3495,7 @@ type Task_RotateCredentialsTask struct { func (x *Task_RotateCredentialsTask) Reset() { *x = Task_RotateCredentialsTask{} - mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[22] + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[24] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3236,7 +3507,7 @@ func (x *Task_RotateCredentialsTask) String() string { func (*Task_RotateCredentialsTask) ProtoMessage() {} func (x *Task_RotateCredentialsTask) ProtoReflect() protoreflect.Message { - mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[22] + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[24] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3331,7 +3602,7 @@ type Task_CreateTicketTask struct { func (x *Task_CreateTicketTask) Reset() { *x = Task_CreateTicketTask{} - mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[23] + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[25] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3343,7 +3614,7 @@ func (x *Task_CreateTicketTask) String() string { func (*Task_CreateTicketTask) ProtoMessage() {} func (x *Task_CreateTicketTask) ProtoReflect() protoreflect.Message { - mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[23] + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[25] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3436,7 +3707,7 @@ type Task_BulkCreateTicketsTask struct { func (x *Task_BulkCreateTicketsTask) Reset() { *x = Task_BulkCreateTicketsTask{} - mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[24] + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[26] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3448,7 +3719,7 @@ func (x *Task_BulkCreateTicketsTask) String() string { func (*Task_BulkCreateTicketsTask) ProtoMessage() {} func (x *Task_BulkCreateTicketsTask) ProtoReflect() protoreflect.Message { - mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[24] + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[26] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3493,7 +3764,7 @@ type Task_BulkGetTicketsTask struct { func (x *Task_BulkGetTicketsTask) Reset() { *x = Task_BulkGetTicketsTask{} - mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[25] + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[27] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3505,7 +3776,7 @@ func (x *Task_BulkGetTicketsTask) String() string { func (*Task_BulkGetTicketsTask) ProtoMessage() {} func (x *Task_BulkGetTicketsTask) ProtoReflect() protoreflect.Message { - mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[25] + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[27] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3550,7 +3821,7 @@ type Task_ListTicketSchemasTask struct { func (x *Task_ListTicketSchemasTask) Reset() { *x = Task_ListTicketSchemasTask{} - mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[26] + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[28] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3562,7 +3833,7 @@ func (x *Task_ListTicketSchemasTask) String() string { func (*Task_ListTicketSchemasTask) ProtoMessage() {} func (x *Task_ListTicketSchemasTask) ProtoReflect() protoreflect.Message { - mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[26] + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[28] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3608,7 +3879,7 @@ type Task_GetTicketTask struct { func (x *Task_GetTicketTask) Reset() { *x = Task_GetTicketTask{} - mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[27] + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[29] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3620,7 +3891,7 @@ func (x *Task_GetTicketTask) String() string { func (*Task_GetTicketTask) ProtoMessage() {} func (x *Task_GetTicketTask) ProtoReflect() protoreflect.Message { - mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[27] + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[29] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3680,7 +3951,7 @@ type Task_ActionListSchemasTask struct { func (x *Task_ActionListSchemasTask) Reset() { *x = Task_ActionListSchemasTask{} - mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[28] + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[30] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3692,7 +3963,7 @@ func (x *Task_ActionListSchemasTask) String() string { func (*Task_ActionListSchemasTask) ProtoMessage() {} func (x *Task_ActionListSchemasTask) ProtoReflect() protoreflect.Message { - mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[28] + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[30] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3752,7 +4023,7 @@ type Task_ActionGetSchemaTask struct { func (x *Task_ActionGetSchemaTask) Reset() { *x = Task_ActionGetSchemaTask{} - mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[29] + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[31] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3764,7 +4035,7 @@ func (x *Task_ActionGetSchemaTask) String() string { func (*Task_ActionGetSchemaTask) ProtoMessage() {} func (x *Task_ActionGetSchemaTask) ProtoReflect() protoreflect.Message { - mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[29] + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[31] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3826,7 +4097,7 @@ type Task_ActionInvokeTask struct { func (x *Task_ActionInvokeTask) Reset() { *x = Task_ActionInvokeTask{} - mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[30] + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[32] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3838,7 +4109,7 @@ func (x *Task_ActionInvokeTask) String() string { func (*Task_ActionInvokeTask) ProtoMessage() {} func (x *Task_ActionInvokeTask) ProtoReflect() protoreflect.Message { - mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[30] + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[32] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3936,7 +4207,7 @@ type Task_ActionStatusTask struct { func (x *Task_ActionStatusTask) Reset() { *x = Task_ActionStatusTask{} - mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[31] + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[33] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3948,7 +4219,7 @@ func (x *Task_ActionStatusTask) String() string { func (*Task_ActionStatusTask) ProtoMessage() {} func (x *Task_ActionStatusTask) ProtoReflect() protoreflect.Message { - mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[31] + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[33] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4022,7 +4293,7 @@ type Task_CreateSyncDiffTask struct { func (x *Task_CreateSyncDiffTask) Reset() { *x = Task_CreateSyncDiffTask{} - mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[32] + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[34] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4034,7 +4305,7 @@ func (x *Task_CreateSyncDiffTask) String() string { func (*Task_CreateSyncDiffTask) ProtoMessage() {} func (x *Task_CreateSyncDiffTask) ProtoReflect() protoreflect.Message { - mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[32] + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[34] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4107,7 +4378,7 @@ type Task_CompactSyncs struct { func (x *Task_CompactSyncs) Reset() { *x = Task_CompactSyncs{} - mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[33] + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[35] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4119,7 +4390,7 @@ func (x *Task_CompactSyncs) String() string { func (*Task_CompactSyncs) ProtoMessage() {} func (x *Task_CompactSyncs) ProtoReflect() protoreflect.Message { - mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[33] + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[35] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4178,7 +4449,7 @@ type Task_CompactSyncs_CompactableSync struct { func (x *Task_CompactSyncs_CompactableSync) Reset() { *x = Task_CompactSyncs_CompactableSync{} - mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[34] + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[36] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4190,7 +4461,7 @@ func (x *Task_CompactSyncs_CompactableSync) String() string { func (*Task_CompactSyncs_CompactableSync) ProtoMessage() {} func (x *Task_CompactSyncs_CompactableSync) ProtoReflect() protoreflect.Message { - mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[34] + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[36] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4250,7 +4521,7 @@ type BatonServiceHelloRequest_BuildInfo struct { func (x *BatonServiceHelloRequest_BuildInfo) Reset() { *x = BatonServiceHelloRequest_BuildInfo{} - mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[35] + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[37] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4262,7 +4533,7 @@ func (x *BatonServiceHelloRequest_BuildInfo) String() string { func (*BatonServiceHelloRequest_BuildInfo) ProtoMessage() {} func (x *BatonServiceHelloRequest_BuildInfo) ProtoReflect() protoreflect.Message { - mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[35] + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[37] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4340,7 +4611,7 @@ type BatonServiceHelloRequest_OSInfo struct { func (x *BatonServiceHelloRequest_OSInfo) Reset() { *x = BatonServiceHelloRequest_OSInfo{} - mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[36] + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[38] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4352,7 +4623,7 @@ func (x *BatonServiceHelloRequest_OSInfo) String() string { func (*BatonServiceHelloRequest_OSInfo) ProtoMessage() {} func (x *BatonServiceHelloRequest_OSInfo) ProtoReflect() protoreflect.Message { - mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[36] + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[38] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4490,7 +4761,7 @@ type BatonServiceUploadAssetRequest_UploadMetadata struct { func (x *BatonServiceUploadAssetRequest_UploadMetadata) Reset() { *x = BatonServiceUploadAssetRequest_UploadMetadata{} - mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[37] + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[39] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4502,7 +4773,7 @@ func (x *BatonServiceUploadAssetRequest_UploadMetadata) String() string { func (*BatonServiceUploadAssetRequest_UploadMetadata) ProtoMessage() {} func (x *BatonServiceUploadAssetRequest_UploadMetadata) ProtoReflect() protoreflect.Message { - mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[37] + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[39] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4574,7 +4845,7 @@ type BatonServiceUploadAssetRequest_UploadData struct { func (x *BatonServiceUploadAssetRequest_UploadData) Reset() { *x = BatonServiceUploadAssetRequest_UploadData{} - mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[38] + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[40] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4586,7 +4857,7 @@ func (x *BatonServiceUploadAssetRequest_UploadData) String() string { func (*BatonServiceUploadAssetRequest_UploadData) ProtoMessage() {} func (x *BatonServiceUploadAssetRequest_UploadData) ProtoReflect() protoreflect.Message { - mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[38] + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[40] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4636,7 +4907,7 @@ type BatonServiceUploadAssetRequest_UploadEOF struct { func (x *BatonServiceUploadAssetRequest_UploadEOF) Reset() { *x = BatonServiceUploadAssetRequest_UploadEOF{} - mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[39] + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[41] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4648,7 +4919,7 @@ func (x *BatonServiceUploadAssetRequest_UploadEOF) String() string { func (*BatonServiceUploadAssetRequest_UploadEOF) ProtoMessage() {} func (x *BatonServiceUploadAssetRequest_UploadEOF) ProtoReflect() protoreflect.Message { - mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[39] + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[41] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4712,7 +4983,7 @@ type BatonServiceFinishTaskRequest_Error struct { func (x *BatonServiceFinishTaskRequest_Error) Reset() { *x = BatonServiceFinishTaskRequest_Error{} - mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[40] + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[42] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4724,7 +4995,7 @@ func (x *BatonServiceFinishTaskRequest_Error) String() string { func (*BatonServiceFinishTaskRequest_Error) ProtoMessage() {} func (x *BatonServiceFinishTaskRequest_Error) ProtoReflect() protoreflect.Message { - mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[40] + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[42] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4809,7 +5080,7 @@ type BatonServiceFinishTaskRequest_Success struct { func (x *BatonServiceFinishTaskRequest_Success) Reset() { *x = BatonServiceFinishTaskRequest_Success{} - mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[41] + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[43] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4821,7 +5092,7 @@ func (x *BatonServiceFinishTaskRequest_Success) String() string { func (*BatonServiceFinishTaskRequest_Success) ProtoMessage() {} func (x *BatonServiceFinishTaskRequest_Success) ProtoReflect() protoreflect.Message { - mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[41] + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[43] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4886,7 +5157,7 @@ var File_c1_connectorapi_baton_v1_baton_proto protoreflect.FileDescriptor const file_c1_connectorapi_baton_v1_baton_proto_rawDesc = "" + "\n" + - "$c1/connectorapi/baton/v1/baton.proto\x12\x18c1.connectorapi.baton.v1\x1a\x1fc1/connector/v2/connector.proto\x1a!c1/connector/v2/entitlement.proto\x1a\x1bc1/connector/v2/grant.proto\x1a\x1ec1/connector/v2/resource.proto\x1a\x1cc1/connector/v2/ticket.proto\x1a\x19google/protobuf/any.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1cgoogle/protobuf/struct.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x17google/rpc/status.proto\x1a\x17validate/validate.proto\"\xb9)\n" + + "$c1/connectorapi/baton/v1/baton.proto\x12\x18c1.connectorapi.baton.v1\x1a\x1fc1/connector/v2/connector.proto\x1a!c1/connector/v2/entitlement.proto\x1a\x1bc1/connector/v2/grant.proto\x1a\x1ec1/connector/v2/resource.proto\x1a\x1cc1/connector/v2/ticket.proto\x1a\x19google/protobuf/any.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1cgoogle/protobuf/struct.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x17google/rpc/status.proto\x1a\x17validate/validate.proto\"\xa2-\n" + "\x04Task\x12\x0e\n" + "\x02id\x18\x01 \x01(\tR\x02id\x12=\n" + "\x06status\x18\x02 \x01(\x0e2%.c1.connectorapi.baton.v1.Task.StatusR\x06status\x12=\n" + @@ -4912,7 +5183,10 @@ const file_c1_connectorapi_baton_v1_baton_proto_rawDesc = "" + "\raction_invoke\x18u \x01(\v2/.c1.connectorapi.baton.v1.Task.ActionInvokeTaskH\x00R\factionInvoke\x12V\n" + "\raction_status\x18v \x01(\v2/.c1.connectorapi.baton.v1.Task.ActionStatusTaskH\x00R\factionStatus\x12]\n" + "\x10create_sync_diff\x18w \x01(\v21.c1.connectorapi.baton.v1.Task.CreateSyncDiffTaskH\x00R\x0ecreateSyncDiff\x12R\n" + - "\rcompact_syncs\x18x \x01(\v2+.c1.connectorapi.baton.v1.Task.CompactSyncsH\x00R\fcompactSyncs\x12\x14\n" + + "\rcompact_syncs\x18x \x01(\v2+.c1.connectorapi.baton.v1.Task.CompactSyncsH\x00R\fcompactSyncs\x12]\n" + + "\x10list_event_feeds\x18y \x01(\v21.c1.connectorapi.baton.v1.Task.ListEventFeedsTaskH\x00R\x0elistEventFeeds\x12P\n" + + "\vlist_events\x18z \x01(\v2-.c1.connectorapi.baton.v1.Task.ListEventsTaskH\x00R\n" + + "listEvents\x12\x14\n" + "\x05debug\x18\x03 \x01(\bR\x05debug\x1aB\n" + "\bNoneTask\x126\n" + "\vannotations\x18\x01 \x03(\v2\x14.google.protobuf.AnyR\vannotations\x1aC\n" + @@ -4925,7 +5199,16 @@ const file_c1_connectorapi_baton_v1_baton_proto_rawDesc = "" + "\x17targeted_sync_resources\x18\x04 \x03(\v2\x19.c1.connector.v2.ResourceR\x15targetedSyncResources\x1a~\n" + "\rEventFeedTask\x126\n" + "\vannotations\x18\x01 \x03(\v2\x14.google.protobuf.AnyR\vannotations\x125\n" + - "\bstart_at\x18\x02 \x01(\v2\x1a.google.protobuf.TimestampR\astartAt\x1a\xf3\x01\n" + + "\bstart_at\x18\x02 \x01(\v2\x1a.google.protobuf.TimestampR\astartAt\x1a\xe7\x01\n" + + "\x0eListEventsTask\x126\n" + + "\vannotations\x18\x01 \x03(\v2\x14.google.protobuf.AnyR\vannotations\x12%\n" + + "\x06cursor\x18\x02 \x01(\tB\r\xfaB\n" + + "r\b \x01(\x80 \xd0\x01\x01R\x06cursor\x125\n" + + "\bstart_at\x18\x03 \x01(\v2\x1a.google.protobuf.TimestampR\astartAt\x12\"\n" + + "\revent_feed_id\x18\x04 \x01(\tR\veventFeedId\x12\x1b\n" + + "\tpage_size\x18\x05 \x01(\rR\bpageSize\x1aL\n" + + "\x12ListEventFeedsTask\x126\n" + + "\vannotations\x18\x01 \x03(\v2\x14.google.protobuf.AnyR\vannotations\x1a\xf3\x01\n" + "\tGrantTask\x12>\n" + "\ventitlement\x18\x01 \x01(\v2\x1c.c1.connector.v2.EntitlementR\ventitlement\x127\n" + "\tprincipal\x18\x02 \x01(\v2\x19.c1.connector.v2.ResourceR\tprincipal\x126\n" + @@ -5100,7 +5383,7 @@ const file_c1_connectorapi_baton_v1_baton_proto_rawDesc = "" + "\x0eStartDebugging\x12/.c1.connectorapi.baton.v1.StartDebuggingRequest\x1a0.c1.connectorapi.baton.v1.StartDebuggingResponse\"\x00B7Z5gitlab.com/ductone/c1/pkg/pb/c1/connectorapi/baton/v1b\x06proto3" var file_c1_connectorapi_baton_v1_baton_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_c1_connectorapi_baton_v1_baton_proto_msgTypes = make([]protoimpl.MessageInfo, 42) +var file_c1_connectorapi_baton_v1_baton_proto_msgTypes = make([]protoimpl.MessageInfo, 44) var file_c1_connectorapi_baton_v1_baton_proto_goTypes = []any{ (Task_Status)(0), // 0: c1.connectorapi.baton.v1.Task.Status (*Task)(nil), // 1: c1.connectorapi.baton.v1.Task @@ -5120,149 +5403,156 @@ var file_c1_connectorapi_baton_v1_baton_proto_goTypes = []any{ (*Task_HelloTask)(nil), // 15: c1.connectorapi.baton.v1.Task.HelloTask (*Task_SyncFullTask)(nil), // 16: c1.connectorapi.baton.v1.Task.SyncFullTask (*Task_EventFeedTask)(nil), // 17: c1.connectorapi.baton.v1.Task.EventFeedTask - (*Task_GrantTask)(nil), // 18: c1.connectorapi.baton.v1.Task.GrantTask - (*Task_RevokeTask)(nil), // 19: c1.connectorapi.baton.v1.Task.RevokeTask - (*Task_CreateAccountTask)(nil), // 20: c1.connectorapi.baton.v1.Task.CreateAccountTask - (*Task_CreateResourceTask)(nil), // 21: c1.connectorapi.baton.v1.Task.CreateResourceTask - (*Task_DeleteResourceTask)(nil), // 22: c1.connectorapi.baton.v1.Task.DeleteResourceTask - (*Task_RotateCredentialsTask)(nil), // 23: c1.connectorapi.baton.v1.Task.RotateCredentialsTask - (*Task_CreateTicketTask)(nil), // 24: c1.connectorapi.baton.v1.Task.CreateTicketTask - (*Task_BulkCreateTicketsTask)(nil), // 25: c1.connectorapi.baton.v1.Task.BulkCreateTicketsTask - (*Task_BulkGetTicketsTask)(nil), // 26: c1.connectorapi.baton.v1.Task.BulkGetTicketsTask - (*Task_ListTicketSchemasTask)(nil), // 27: c1.connectorapi.baton.v1.Task.ListTicketSchemasTask - (*Task_GetTicketTask)(nil), // 28: c1.connectorapi.baton.v1.Task.GetTicketTask - (*Task_ActionListSchemasTask)(nil), // 29: c1.connectorapi.baton.v1.Task.ActionListSchemasTask - (*Task_ActionGetSchemaTask)(nil), // 30: c1.connectorapi.baton.v1.Task.ActionGetSchemaTask - (*Task_ActionInvokeTask)(nil), // 31: c1.connectorapi.baton.v1.Task.ActionInvokeTask - (*Task_ActionStatusTask)(nil), // 32: c1.connectorapi.baton.v1.Task.ActionStatusTask - (*Task_CreateSyncDiffTask)(nil), // 33: c1.connectorapi.baton.v1.Task.CreateSyncDiffTask - (*Task_CompactSyncs)(nil), // 34: c1.connectorapi.baton.v1.Task.CompactSyncs - (*Task_CompactSyncs_CompactableSync)(nil), // 35: c1.connectorapi.baton.v1.Task.CompactSyncs.CompactableSync - (*BatonServiceHelloRequest_BuildInfo)(nil), // 36: c1.connectorapi.baton.v1.BatonServiceHelloRequest.BuildInfo - (*BatonServiceHelloRequest_OSInfo)(nil), // 37: c1.connectorapi.baton.v1.BatonServiceHelloRequest.OSInfo - (*BatonServiceUploadAssetRequest_UploadMetadata)(nil), // 38: c1.connectorapi.baton.v1.BatonServiceUploadAssetRequest.UploadMetadata - (*BatonServiceUploadAssetRequest_UploadData)(nil), // 39: c1.connectorapi.baton.v1.BatonServiceUploadAssetRequest.UploadData - (*BatonServiceUploadAssetRequest_UploadEOF)(nil), // 40: c1.connectorapi.baton.v1.BatonServiceUploadAssetRequest.UploadEOF - (*BatonServiceFinishTaskRequest_Error)(nil), // 41: c1.connectorapi.baton.v1.BatonServiceFinishTaskRequest.Error - (*BatonServiceFinishTaskRequest_Success)(nil), // 42: c1.connectorapi.baton.v1.BatonServiceFinishTaskRequest.Success - (*v2.ConnectorMetadata)(nil), // 43: c1.connector.v2.ConnectorMetadata - (*anypb.Any)(nil), // 44: google.protobuf.Any - (*durationpb.Duration)(nil), // 45: google.protobuf.Duration - (*status.Status)(nil), // 46: google.rpc.Status - (*v2.Resource)(nil), // 47: c1.connector.v2.Resource - (*timestamppb.Timestamp)(nil), // 48: google.protobuf.Timestamp - (*v2.Entitlement)(nil), // 49: c1.connector.v2.Entitlement - (*v2.Grant)(nil), // 50: c1.connector.v2.Grant - (*v2.AccountInfo)(nil), // 51: c1.connector.v2.AccountInfo - (*v2.CredentialOptions)(nil), // 52: c1.connector.v2.CredentialOptions - (*v2.EncryptionConfig)(nil), // 53: c1.connector.v2.EncryptionConfig - (*v2.ResourceId)(nil), // 54: c1.connector.v2.ResourceId - (*v2.TicketRequest)(nil), // 55: c1.connector.v2.TicketRequest - (*v2.TicketSchema)(nil), // 56: c1.connector.v2.TicketSchema - (*structpb.Struct)(nil), // 57: google.protobuf.Struct + (*Task_ListEventsTask)(nil), // 18: c1.connectorapi.baton.v1.Task.ListEventsTask + (*Task_ListEventFeedsTask)(nil), // 19: c1.connectorapi.baton.v1.Task.ListEventFeedsTask + (*Task_GrantTask)(nil), // 20: c1.connectorapi.baton.v1.Task.GrantTask + (*Task_RevokeTask)(nil), // 21: c1.connectorapi.baton.v1.Task.RevokeTask + (*Task_CreateAccountTask)(nil), // 22: c1.connectorapi.baton.v1.Task.CreateAccountTask + (*Task_CreateResourceTask)(nil), // 23: c1.connectorapi.baton.v1.Task.CreateResourceTask + (*Task_DeleteResourceTask)(nil), // 24: c1.connectorapi.baton.v1.Task.DeleteResourceTask + (*Task_RotateCredentialsTask)(nil), // 25: c1.connectorapi.baton.v1.Task.RotateCredentialsTask + (*Task_CreateTicketTask)(nil), // 26: c1.connectorapi.baton.v1.Task.CreateTicketTask + (*Task_BulkCreateTicketsTask)(nil), // 27: c1.connectorapi.baton.v1.Task.BulkCreateTicketsTask + (*Task_BulkGetTicketsTask)(nil), // 28: c1.connectorapi.baton.v1.Task.BulkGetTicketsTask + (*Task_ListTicketSchemasTask)(nil), // 29: c1.connectorapi.baton.v1.Task.ListTicketSchemasTask + (*Task_GetTicketTask)(nil), // 30: c1.connectorapi.baton.v1.Task.GetTicketTask + (*Task_ActionListSchemasTask)(nil), // 31: c1.connectorapi.baton.v1.Task.ActionListSchemasTask + (*Task_ActionGetSchemaTask)(nil), // 32: c1.connectorapi.baton.v1.Task.ActionGetSchemaTask + (*Task_ActionInvokeTask)(nil), // 33: c1.connectorapi.baton.v1.Task.ActionInvokeTask + (*Task_ActionStatusTask)(nil), // 34: c1.connectorapi.baton.v1.Task.ActionStatusTask + (*Task_CreateSyncDiffTask)(nil), // 35: c1.connectorapi.baton.v1.Task.CreateSyncDiffTask + (*Task_CompactSyncs)(nil), // 36: c1.connectorapi.baton.v1.Task.CompactSyncs + (*Task_CompactSyncs_CompactableSync)(nil), // 37: c1.connectorapi.baton.v1.Task.CompactSyncs.CompactableSync + (*BatonServiceHelloRequest_BuildInfo)(nil), // 38: c1.connectorapi.baton.v1.BatonServiceHelloRequest.BuildInfo + (*BatonServiceHelloRequest_OSInfo)(nil), // 39: c1.connectorapi.baton.v1.BatonServiceHelloRequest.OSInfo + (*BatonServiceUploadAssetRequest_UploadMetadata)(nil), // 40: c1.connectorapi.baton.v1.BatonServiceUploadAssetRequest.UploadMetadata + (*BatonServiceUploadAssetRequest_UploadData)(nil), // 41: c1.connectorapi.baton.v1.BatonServiceUploadAssetRequest.UploadData + (*BatonServiceUploadAssetRequest_UploadEOF)(nil), // 42: c1.connectorapi.baton.v1.BatonServiceUploadAssetRequest.UploadEOF + (*BatonServiceFinishTaskRequest_Error)(nil), // 43: c1.connectorapi.baton.v1.BatonServiceFinishTaskRequest.Error + (*BatonServiceFinishTaskRequest_Success)(nil), // 44: c1.connectorapi.baton.v1.BatonServiceFinishTaskRequest.Success + (*v2.ConnectorMetadata)(nil), // 45: c1.connector.v2.ConnectorMetadata + (*anypb.Any)(nil), // 46: google.protobuf.Any + (*durationpb.Duration)(nil), // 47: google.protobuf.Duration + (*status.Status)(nil), // 48: google.rpc.Status + (*v2.Resource)(nil), // 49: c1.connector.v2.Resource + (*timestamppb.Timestamp)(nil), // 50: google.protobuf.Timestamp + (*v2.Entitlement)(nil), // 51: c1.connector.v2.Entitlement + (*v2.Grant)(nil), // 52: c1.connector.v2.Grant + (*v2.AccountInfo)(nil), // 53: c1.connector.v2.AccountInfo + (*v2.CredentialOptions)(nil), // 54: c1.connector.v2.CredentialOptions + (*v2.EncryptionConfig)(nil), // 55: c1.connector.v2.EncryptionConfig + (*v2.ResourceId)(nil), // 56: c1.connector.v2.ResourceId + (*v2.TicketRequest)(nil), // 57: c1.connector.v2.TicketRequest + (*v2.TicketSchema)(nil), // 58: c1.connector.v2.TicketSchema + (*structpb.Struct)(nil), // 59: google.protobuf.Struct } var file_c1_connectorapi_baton_v1_baton_proto_depIdxs = []int32{ 0, // 0: c1.connectorapi.baton.v1.Task.status:type_name -> c1.connectorapi.baton.v1.Task.Status 14, // 1: c1.connectorapi.baton.v1.Task.none:type_name -> c1.connectorapi.baton.v1.Task.NoneTask 15, // 2: c1.connectorapi.baton.v1.Task.hello:type_name -> c1.connectorapi.baton.v1.Task.HelloTask 16, // 3: c1.connectorapi.baton.v1.Task.sync_full:type_name -> c1.connectorapi.baton.v1.Task.SyncFullTask - 18, // 4: c1.connectorapi.baton.v1.Task.grant:type_name -> c1.connectorapi.baton.v1.Task.GrantTask - 19, // 5: c1.connectorapi.baton.v1.Task.revoke:type_name -> c1.connectorapi.baton.v1.Task.RevokeTask - 20, // 6: c1.connectorapi.baton.v1.Task.create_account:type_name -> c1.connectorapi.baton.v1.Task.CreateAccountTask - 21, // 7: c1.connectorapi.baton.v1.Task.create_resource:type_name -> c1.connectorapi.baton.v1.Task.CreateResourceTask - 22, // 8: c1.connectorapi.baton.v1.Task.delete_resource:type_name -> c1.connectorapi.baton.v1.Task.DeleteResourceTask - 23, // 9: c1.connectorapi.baton.v1.Task.rotate_credentials:type_name -> c1.connectorapi.baton.v1.Task.RotateCredentialsTask + 20, // 4: c1.connectorapi.baton.v1.Task.grant:type_name -> c1.connectorapi.baton.v1.Task.GrantTask + 21, // 5: c1.connectorapi.baton.v1.Task.revoke:type_name -> c1.connectorapi.baton.v1.Task.RevokeTask + 22, // 6: c1.connectorapi.baton.v1.Task.create_account:type_name -> c1.connectorapi.baton.v1.Task.CreateAccountTask + 23, // 7: c1.connectorapi.baton.v1.Task.create_resource:type_name -> c1.connectorapi.baton.v1.Task.CreateResourceTask + 24, // 8: c1.connectorapi.baton.v1.Task.delete_resource:type_name -> c1.connectorapi.baton.v1.Task.DeleteResourceTask + 25, // 9: c1.connectorapi.baton.v1.Task.rotate_credentials:type_name -> c1.connectorapi.baton.v1.Task.RotateCredentialsTask 17, // 10: c1.connectorapi.baton.v1.Task.event_feed:type_name -> c1.connectorapi.baton.v1.Task.EventFeedTask - 24, // 11: c1.connectorapi.baton.v1.Task.create_ticket_task:type_name -> c1.connectorapi.baton.v1.Task.CreateTicketTask - 27, // 12: c1.connectorapi.baton.v1.Task.list_ticket_schemas:type_name -> c1.connectorapi.baton.v1.Task.ListTicketSchemasTask - 28, // 13: c1.connectorapi.baton.v1.Task.get_ticket:type_name -> c1.connectorapi.baton.v1.Task.GetTicketTask - 25, // 14: c1.connectorapi.baton.v1.Task.bulk_create_tickets:type_name -> c1.connectorapi.baton.v1.Task.BulkCreateTicketsTask - 26, // 15: c1.connectorapi.baton.v1.Task.bulk_get_tickets:type_name -> c1.connectorapi.baton.v1.Task.BulkGetTicketsTask - 29, // 16: c1.connectorapi.baton.v1.Task.action_list_schemas:type_name -> c1.connectorapi.baton.v1.Task.ActionListSchemasTask - 30, // 17: c1.connectorapi.baton.v1.Task.action_get_schema:type_name -> c1.connectorapi.baton.v1.Task.ActionGetSchemaTask - 31, // 18: c1.connectorapi.baton.v1.Task.action_invoke:type_name -> c1.connectorapi.baton.v1.Task.ActionInvokeTask - 32, // 19: c1.connectorapi.baton.v1.Task.action_status:type_name -> c1.connectorapi.baton.v1.Task.ActionStatusTask - 33, // 20: c1.connectorapi.baton.v1.Task.create_sync_diff:type_name -> c1.connectorapi.baton.v1.Task.CreateSyncDiffTask - 34, // 21: c1.connectorapi.baton.v1.Task.compact_syncs:type_name -> c1.connectorapi.baton.v1.Task.CompactSyncs - 36, // 22: c1.connectorapi.baton.v1.BatonServiceHelloRequest.build_info:type_name -> c1.connectorapi.baton.v1.BatonServiceHelloRequest.BuildInfo - 37, // 23: c1.connectorapi.baton.v1.BatonServiceHelloRequest.os_info:type_name -> c1.connectorapi.baton.v1.BatonServiceHelloRequest.OSInfo - 43, // 24: c1.connectorapi.baton.v1.BatonServiceHelloRequest.connector_metadata:type_name -> c1.connector.v2.ConnectorMetadata - 44, // 25: c1.connectorapi.baton.v1.BatonServiceHelloRequest.annotations:type_name -> google.protobuf.Any - 44, // 26: c1.connectorapi.baton.v1.BatonServiceHelloResponse.annotations:type_name -> google.protobuf.Any - 1, // 27: c1.connectorapi.baton.v1.BatonServiceGetTaskResponse.task:type_name -> c1.connectorapi.baton.v1.Task - 45, // 28: c1.connectorapi.baton.v1.BatonServiceGetTaskResponse.next_poll:type_name -> google.protobuf.Duration - 45, // 29: c1.connectorapi.baton.v1.BatonServiceGetTaskResponse.next_heartbeat:type_name -> google.protobuf.Duration - 44, // 30: c1.connectorapi.baton.v1.BatonServiceGetTaskResponse.annotations:type_name -> google.protobuf.Any - 44, // 31: c1.connectorapi.baton.v1.BatonServiceHeartbeatRequest.annotations:type_name -> google.protobuf.Any - 45, // 32: c1.connectorapi.baton.v1.BatonServiceHeartbeatResponse.next_heartbeat:type_name -> google.protobuf.Duration - 44, // 33: c1.connectorapi.baton.v1.BatonServiceHeartbeatResponse.annotations:type_name -> google.protobuf.Any - 38, // 34: c1.connectorapi.baton.v1.BatonServiceUploadAssetRequest.metadata:type_name -> c1.connectorapi.baton.v1.BatonServiceUploadAssetRequest.UploadMetadata - 39, // 35: c1.connectorapi.baton.v1.BatonServiceUploadAssetRequest.data:type_name -> c1.connectorapi.baton.v1.BatonServiceUploadAssetRequest.UploadData - 40, // 36: c1.connectorapi.baton.v1.BatonServiceUploadAssetRequest.eof:type_name -> c1.connectorapi.baton.v1.BatonServiceUploadAssetRequest.UploadEOF - 44, // 37: c1.connectorapi.baton.v1.BatonServiceUploadAssetResponse.annotations:type_name -> google.protobuf.Any - 46, // 38: c1.connectorapi.baton.v1.BatonServiceFinishTaskRequest.status:type_name -> google.rpc.Status - 41, // 39: c1.connectorapi.baton.v1.BatonServiceFinishTaskRequest.error:type_name -> c1.connectorapi.baton.v1.BatonServiceFinishTaskRequest.Error - 42, // 40: c1.connectorapi.baton.v1.BatonServiceFinishTaskRequest.success:type_name -> c1.connectorapi.baton.v1.BatonServiceFinishTaskRequest.Success - 44, // 41: c1.connectorapi.baton.v1.BatonServiceFinishTaskResponse.annotations:type_name -> google.protobuf.Any - 44, // 42: c1.connectorapi.baton.v1.Task.NoneTask.annotations:type_name -> google.protobuf.Any - 44, // 43: c1.connectorapi.baton.v1.Task.HelloTask.annotations:type_name -> google.protobuf.Any - 44, // 44: c1.connectorapi.baton.v1.Task.SyncFullTask.annotations:type_name -> google.protobuf.Any - 47, // 45: c1.connectorapi.baton.v1.Task.SyncFullTask.targeted_sync_resources:type_name -> c1.connector.v2.Resource - 44, // 46: c1.connectorapi.baton.v1.Task.EventFeedTask.annotations:type_name -> google.protobuf.Any - 48, // 47: c1.connectorapi.baton.v1.Task.EventFeedTask.start_at:type_name -> google.protobuf.Timestamp - 49, // 48: c1.connectorapi.baton.v1.Task.GrantTask.entitlement:type_name -> c1.connector.v2.Entitlement - 47, // 49: c1.connectorapi.baton.v1.Task.GrantTask.principal:type_name -> c1.connector.v2.Resource - 44, // 50: c1.connectorapi.baton.v1.Task.GrantTask.annotations:type_name -> google.protobuf.Any - 45, // 51: c1.connectorapi.baton.v1.Task.GrantTask.duration:type_name -> google.protobuf.Duration - 50, // 52: c1.connectorapi.baton.v1.Task.RevokeTask.grant:type_name -> c1.connector.v2.Grant - 44, // 53: c1.connectorapi.baton.v1.Task.RevokeTask.annotations:type_name -> google.protobuf.Any - 51, // 54: c1.connectorapi.baton.v1.Task.CreateAccountTask.account_info:type_name -> c1.connector.v2.AccountInfo - 52, // 55: c1.connectorapi.baton.v1.Task.CreateAccountTask.credential_options:type_name -> c1.connector.v2.CredentialOptions - 53, // 56: c1.connectorapi.baton.v1.Task.CreateAccountTask.encryption_configs:type_name -> c1.connector.v2.EncryptionConfig - 47, // 57: c1.connectorapi.baton.v1.Task.CreateResourceTask.resource:type_name -> c1.connector.v2.Resource - 54, // 58: c1.connectorapi.baton.v1.Task.DeleteResourceTask.resource_id:type_name -> c1.connector.v2.ResourceId - 54, // 59: c1.connectorapi.baton.v1.Task.DeleteResourceTask.parent_resource_id:type_name -> c1.connector.v2.ResourceId - 54, // 60: c1.connectorapi.baton.v1.Task.RotateCredentialsTask.resource_id:type_name -> c1.connector.v2.ResourceId - 52, // 61: c1.connectorapi.baton.v1.Task.RotateCredentialsTask.credential_options:type_name -> c1.connector.v2.CredentialOptions - 53, // 62: c1.connectorapi.baton.v1.Task.RotateCredentialsTask.encryption_configs:type_name -> c1.connector.v2.EncryptionConfig - 55, // 63: c1.connectorapi.baton.v1.Task.CreateTicketTask.ticket_request:type_name -> c1.connector.v2.TicketRequest - 56, // 64: c1.connectorapi.baton.v1.Task.CreateTicketTask.ticket_schema:type_name -> c1.connector.v2.TicketSchema - 44, // 65: c1.connectorapi.baton.v1.Task.CreateTicketTask.annotations:type_name -> google.protobuf.Any - 24, // 66: c1.connectorapi.baton.v1.Task.BulkCreateTicketsTask.ticket_requests:type_name -> c1.connectorapi.baton.v1.Task.CreateTicketTask - 28, // 67: c1.connectorapi.baton.v1.Task.BulkGetTicketsTask.ticket_requests:type_name -> c1.connectorapi.baton.v1.Task.GetTicketTask - 44, // 68: c1.connectorapi.baton.v1.Task.ListTicketSchemasTask.annotations:type_name -> google.protobuf.Any - 44, // 69: c1.connectorapi.baton.v1.Task.GetTicketTask.annotations:type_name -> google.protobuf.Any - 44, // 70: c1.connectorapi.baton.v1.Task.ActionListSchemasTask.annotations:type_name -> google.protobuf.Any - 44, // 71: c1.connectorapi.baton.v1.Task.ActionGetSchemaTask.annotations:type_name -> google.protobuf.Any - 57, // 72: c1.connectorapi.baton.v1.Task.ActionInvokeTask.args:type_name -> google.protobuf.Struct - 44, // 73: c1.connectorapi.baton.v1.Task.ActionInvokeTask.annotations:type_name -> google.protobuf.Any - 44, // 74: c1.connectorapi.baton.v1.Task.ActionStatusTask.annotations:type_name -> google.protobuf.Any - 44, // 75: c1.connectorapi.baton.v1.Task.CreateSyncDiffTask.annotations:type_name -> google.protobuf.Any - 35, // 76: c1.connectorapi.baton.v1.Task.CompactSyncs.compactable_syncs:type_name -> c1.connectorapi.baton.v1.Task.CompactSyncs.CompactableSync - 44, // 77: c1.connectorapi.baton.v1.Task.CompactSyncs.annotations:type_name -> google.protobuf.Any - 44, // 78: c1.connectorapi.baton.v1.BatonServiceUploadAssetRequest.UploadMetadata.annotations:type_name -> google.protobuf.Any - 44, // 79: c1.connectorapi.baton.v1.BatonServiceUploadAssetRequest.UploadEOF.annotations:type_name -> google.protobuf.Any - 44, // 80: c1.connectorapi.baton.v1.BatonServiceFinishTaskRequest.Error.annotations:type_name -> google.protobuf.Any - 44, // 81: c1.connectorapi.baton.v1.BatonServiceFinishTaskRequest.Error.response:type_name -> google.protobuf.Any - 44, // 82: c1.connectorapi.baton.v1.BatonServiceFinishTaskRequest.Success.annotations:type_name -> google.protobuf.Any - 44, // 83: c1.connectorapi.baton.v1.BatonServiceFinishTaskRequest.Success.response:type_name -> google.protobuf.Any - 2, // 84: c1.connectorapi.baton.v1.BatonService.Hello:input_type -> c1.connectorapi.baton.v1.BatonServiceHelloRequest - 4, // 85: c1.connectorapi.baton.v1.BatonService.GetTask:input_type -> c1.connectorapi.baton.v1.BatonServiceGetTaskRequest - 6, // 86: c1.connectorapi.baton.v1.BatonService.Heartbeat:input_type -> c1.connectorapi.baton.v1.BatonServiceHeartbeatRequest - 10, // 87: c1.connectorapi.baton.v1.BatonService.FinishTask:input_type -> c1.connectorapi.baton.v1.BatonServiceFinishTaskRequest - 8, // 88: c1.connectorapi.baton.v1.BatonService.UploadAsset:input_type -> c1.connectorapi.baton.v1.BatonServiceUploadAssetRequest - 12, // 89: c1.connectorapi.baton.v1.BatonService.StartDebugging:input_type -> c1.connectorapi.baton.v1.StartDebuggingRequest - 3, // 90: c1.connectorapi.baton.v1.BatonService.Hello:output_type -> c1.connectorapi.baton.v1.BatonServiceHelloResponse - 5, // 91: c1.connectorapi.baton.v1.BatonService.GetTask:output_type -> c1.connectorapi.baton.v1.BatonServiceGetTaskResponse - 7, // 92: c1.connectorapi.baton.v1.BatonService.Heartbeat:output_type -> c1.connectorapi.baton.v1.BatonServiceHeartbeatResponse - 11, // 93: c1.connectorapi.baton.v1.BatonService.FinishTask:output_type -> c1.connectorapi.baton.v1.BatonServiceFinishTaskResponse - 9, // 94: c1.connectorapi.baton.v1.BatonService.UploadAsset:output_type -> c1.connectorapi.baton.v1.BatonServiceUploadAssetResponse - 13, // 95: c1.connectorapi.baton.v1.BatonService.StartDebugging:output_type -> c1.connectorapi.baton.v1.StartDebuggingResponse - 90, // [90:96] is the sub-list for method output_type - 84, // [84:90] is the sub-list for method input_type - 84, // [84:84] is the sub-list for extension type_name - 84, // [84:84] is the sub-list for extension extendee - 0, // [0:84] is the sub-list for field type_name + 26, // 11: c1.connectorapi.baton.v1.Task.create_ticket_task:type_name -> c1.connectorapi.baton.v1.Task.CreateTicketTask + 29, // 12: c1.connectorapi.baton.v1.Task.list_ticket_schemas:type_name -> c1.connectorapi.baton.v1.Task.ListTicketSchemasTask + 30, // 13: c1.connectorapi.baton.v1.Task.get_ticket:type_name -> c1.connectorapi.baton.v1.Task.GetTicketTask + 27, // 14: c1.connectorapi.baton.v1.Task.bulk_create_tickets:type_name -> c1.connectorapi.baton.v1.Task.BulkCreateTicketsTask + 28, // 15: c1.connectorapi.baton.v1.Task.bulk_get_tickets:type_name -> c1.connectorapi.baton.v1.Task.BulkGetTicketsTask + 31, // 16: c1.connectorapi.baton.v1.Task.action_list_schemas:type_name -> c1.connectorapi.baton.v1.Task.ActionListSchemasTask + 32, // 17: c1.connectorapi.baton.v1.Task.action_get_schema:type_name -> c1.connectorapi.baton.v1.Task.ActionGetSchemaTask + 33, // 18: c1.connectorapi.baton.v1.Task.action_invoke:type_name -> c1.connectorapi.baton.v1.Task.ActionInvokeTask + 34, // 19: c1.connectorapi.baton.v1.Task.action_status:type_name -> c1.connectorapi.baton.v1.Task.ActionStatusTask + 35, // 20: c1.connectorapi.baton.v1.Task.create_sync_diff:type_name -> c1.connectorapi.baton.v1.Task.CreateSyncDiffTask + 36, // 21: c1.connectorapi.baton.v1.Task.compact_syncs:type_name -> c1.connectorapi.baton.v1.Task.CompactSyncs + 19, // 22: c1.connectorapi.baton.v1.Task.list_event_feeds:type_name -> c1.connectorapi.baton.v1.Task.ListEventFeedsTask + 18, // 23: c1.connectorapi.baton.v1.Task.list_events:type_name -> c1.connectorapi.baton.v1.Task.ListEventsTask + 38, // 24: c1.connectorapi.baton.v1.BatonServiceHelloRequest.build_info:type_name -> c1.connectorapi.baton.v1.BatonServiceHelloRequest.BuildInfo + 39, // 25: c1.connectorapi.baton.v1.BatonServiceHelloRequest.os_info:type_name -> c1.connectorapi.baton.v1.BatonServiceHelloRequest.OSInfo + 45, // 26: c1.connectorapi.baton.v1.BatonServiceHelloRequest.connector_metadata:type_name -> c1.connector.v2.ConnectorMetadata + 46, // 27: c1.connectorapi.baton.v1.BatonServiceHelloRequest.annotations:type_name -> google.protobuf.Any + 46, // 28: c1.connectorapi.baton.v1.BatonServiceHelloResponse.annotations:type_name -> google.protobuf.Any + 1, // 29: c1.connectorapi.baton.v1.BatonServiceGetTaskResponse.task:type_name -> c1.connectorapi.baton.v1.Task + 47, // 30: c1.connectorapi.baton.v1.BatonServiceGetTaskResponse.next_poll:type_name -> google.protobuf.Duration + 47, // 31: c1.connectorapi.baton.v1.BatonServiceGetTaskResponse.next_heartbeat:type_name -> google.protobuf.Duration + 46, // 32: c1.connectorapi.baton.v1.BatonServiceGetTaskResponse.annotations:type_name -> google.protobuf.Any + 46, // 33: c1.connectorapi.baton.v1.BatonServiceHeartbeatRequest.annotations:type_name -> google.protobuf.Any + 47, // 34: c1.connectorapi.baton.v1.BatonServiceHeartbeatResponse.next_heartbeat:type_name -> google.protobuf.Duration + 46, // 35: c1.connectorapi.baton.v1.BatonServiceHeartbeatResponse.annotations:type_name -> google.protobuf.Any + 40, // 36: c1.connectorapi.baton.v1.BatonServiceUploadAssetRequest.metadata:type_name -> c1.connectorapi.baton.v1.BatonServiceUploadAssetRequest.UploadMetadata + 41, // 37: c1.connectorapi.baton.v1.BatonServiceUploadAssetRequest.data:type_name -> c1.connectorapi.baton.v1.BatonServiceUploadAssetRequest.UploadData + 42, // 38: c1.connectorapi.baton.v1.BatonServiceUploadAssetRequest.eof:type_name -> c1.connectorapi.baton.v1.BatonServiceUploadAssetRequest.UploadEOF + 46, // 39: c1.connectorapi.baton.v1.BatonServiceUploadAssetResponse.annotations:type_name -> google.protobuf.Any + 48, // 40: c1.connectorapi.baton.v1.BatonServiceFinishTaskRequest.status:type_name -> google.rpc.Status + 43, // 41: c1.connectorapi.baton.v1.BatonServiceFinishTaskRequest.error:type_name -> c1.connectorapi.baton.v1.BatonServiceFinishTaskRequest.Error + 44, // 42: c1.connectorapi.baton.v1.BatonServiceFinishTaskRequest.success:type_name -> c1.connectorapi.baton.v1.BatonServiceFinishTaskRequest.Success + 46, // 43: c1.connectorapi.baton.v1.BatonServiceFinishTaskResponse.annotations:type_name -> google.protobuf.Any + 46, // 44: c1.connectorapi.baton.v1.Task.NoneTask.annotations:type_name -> google.protobuf.Any + 46, // 45: c1.connectorapi.baton.v1.Task.HelloTask.annotations:type_name -> google.protobuf.Any + 46, // 46: c1.connectorapi.baton.v1.Task.SyncFullTask.annotations:type_name -> google.protobuf.Any + 49, // 47: c1.connectorapi.baton.v1.Task.SyncFullTask.targeted_sync_resources:type_name -> c1.connector.v2.Resource + 46, // 48: c1.connectorapi.baton.v1.Task.EventFeedTask.annotations:type_name -> google.protobuf.Any + 50, // 49: c1.connectorapi.baton.v1.Task.EventFeedTask.start_at:type_name -> google.protobuf.Timestamp + 46, // 50: c1.connectorapi.baton.v1.Task.ListEventsTask.annotations:type_name -> google.protobuf.Any + 50, // 51: c1.connectorapi.baton.v1.Task.ListEventsTask.start_at:type_name -> google.protobuf.Timestamp + 46, // 52: c1.connectorapi.baton.v1.Task.ListEventFeedsTask.annotations:type_name -> google.protobuf.Any + 51, // 53: c1.connectorapi.baton.v1.Task.GrantTask.entitlement:type_name -> c1.connector.v2.Entitlement + 49, // 54: c1.connectorapi.baton.v1.Task.GrantTask.principal:type_name -> c1.connector.v2.Resource + 46, // 55: c1.connectorapi.baton.v1.Task.GrantTask.annotations:type_name -> google.protobuf.Any + 47, // 56: c1.connectorapi.baton.v1.Task.GrantTask.duration:type_name -> google.protobuf.Duration + 52, // 57: c1.connectorapi.baton.v1.Task.RevokeTask.grant:type_name -> c1.connector.v2.Grant + 46, // 58: c1.connectorapi.baton.v1.Task.RevokeTask.annotations:type_name -> google.protobuf.Any + 53, // 59: c1.connectorapi.baton.v1.Task.CreateAccountTask.account_info:type_name -> c1.connector.v2.AccountInfo + 54, // 60: c1.connectorapi.baton.v1.Task.CreateAccountTask.credential_options:type_name -> c1.connector.v2.CredentialOptions + 55, // 61: c1.connectorapi.baton.v1.Task.CreateAccountTask.encryption_configs:type_name -> c1.connector.v2.EncryptionConfig + 49, // 62: c1.connectorapi.baton.v1.Task.CreateResourceTask.resource:type_name -> c1.connector.v2.Resource + 56, // 63: c1.connectorapi.baton.v1.Task.DeleteResourceTask.resource_id:type_name -> c1.connector.v2.ResourceId + 56, // 64: c1.connectorapi.baton.v1.Task.DeleteResourceTask.parent_resource_id:type_name -> c1.connector.v2.ResourceId + 56, // 65: c1.connectorapi.baton.v1.Task.RotateCredentialsTask.resource_id:type_name -> c1.connector.v2.ResourceId + 54, // 66: c1.connectorapi.baton.v1.Task.RotateCredentialsTask.credential_options:type_name -> c1.connector.v2.CredentialOptions + 55, // 67: c1.connectorapi.baton.v1.Task.RotateCredentialsTask.encryption_configs:type_name -> c1.connector.v2.EncryptionConfig + 57, // 68: c1.connectorapi.baton.v1.Task.CreateTicketTask.ticket_request:type_name -> c1.connector.v2.TicketRequest + 58, // 69: c1.connectorapi.baton.v1.Task.CreateTicketTask.ticket_schema:type_name -> c1.connector.v2.TicketSchema + 46, // 70: c1.connectorapi.baton.v1.Task.CreateTicketTask.annotations:type_name -> google.protobuf.Any + 26, // 71: c1.connectorapi.baton.v1.Task.BulkCreateTicketsTask.ticket_requests:type_name -> c1.connectorapi.baton.v1.Task.CreateTicketTask + 30, // 72: c1.connectorapi.baton.v1.Task.BulkGetTicketsTask.ticket_requests:type_name -> c1.connectorapi.baton.v1.Task.GetTicketTask + 46, // 73: c1.connectorapi.baton.v1.Task.ListTicketSchemasTask.annotations:type_name -> google.protobuf.Any + 46, // 74: c1.connectorapi.baton.v1.Task.GetTicketTask.annotations:type_name -> google.protobuf.Any + 46, // 75: c1.connectorapi.baton.v1.Task.ActionListSchemasTask.annotations:type_name -> google.protobuf.Any + 46, // 76: c1.connectorapi.baton.v1.Task.ActionGetSchemaTask.annotations:type_name -> google.protobuf.Any + 59, // 77: c1.connectorapi.baton.v1.Task.ActionInvokeTask.args:type_name -> google.protobuf.Struct + 46, // 78: c1.connectorapi.baton.v1.Task.ActionInvokeTask.annotations:type_name -> google.protobuf.Any + 46, // 79: c1.connectorapi.baton.v1.Task.ActionStatusTask.annotations:type_name -> google.protobuf.Any + 46, // 80: c1.connectorapi.baton.v1.Task.CreateSyncDiffTask.annotations:type_name -> google.protobuf.Any + 37, // 81: c1.connectorapi.baton.v1.Task.CompactSyncs.compactable_syncs:type_name -> c1.connectorapi.baton.v1.Task.CompactSyncs.CompactableSync + 46, // 82: c1.connectorapi.baton.v1.Task.CompactSyncs.annotations:type_name -> google.protobuf.Any + 46, // 83: c1.connectorapi.baton.v1.BatonServiceUploadAssetRequest.UploadMetadata.annotations:type_name -> google.protobuf.Any + 46, // 84: c1.connectorapi.baton.v1.BatonServiceUploadAssetRequest.UploadEOF.annotations:type_name -> google.protobuf.Any + 46, // 85: c1.connectorapi.baton.v1.BatonServiceFinishTaskRequest.Error.annotations:type_name -> google.protobuf.Any + 46, // 86: c1.connectorapi.baton.v1.BatonServiceFinishTaskRequest.Error.response:type_name -> google.protobuf.Any + 46, // 87: c1.connectorapi.baton.v1.BatonServiceFinishTaskRequest.Success.annotations:type_name -> google.protobuf.Any + 46, // 88: c1.connectorapi.baton.v1.BatonServiceFinishTaskRequest.Success.response:type_name -> google.protobuf.Any + 2, // 89: c1.connectorapi.baton.v1.BatonService.Hello:input_type -> c1.connectorapi.baton.v1.BatonServiceHelloRequest + 4, // 90: c1.connectorapi.baton.v1.BatonService.GetTask:input_type -> c1.connectorapi.baton.v1.BatonServiceGetTaskRequest + 6, // 91: c1.connectorapi.baton.v1.BatonService.Heartbeat:input_type -> c1.connectorapi.baton.v1.BatonServiceHeartbeatRequest + 10, // 92: c1.connectorapi.baton.v1.BatonService.FinishTask:input_type -> c1.connectorapi.baton.v1.BatonServiceFinishTaskRequest + 8, // 93: c1.connectorapi.baton.v1.BatonService.UploadAsset:input_type -> c1.connectorapi.baton.v1.BatonServiceUploadAssetRequest + 12, // 94: c1.connectorapi.baton.v1.BatonService.StartDebugging:input_type -> c1.connectorapi.baton.v1.StartDebuggingRequest + 3, // 95: c1.connectorapi.baton.v1.BatonService.Hello:output_type -> c1.connectorapi.baton.v1.BatonServiceHelloResponse + 5, // 96: c1.connectorapi.baton.v1.BatonService.GetTask:output_type -> c1.connectorapi.baton.v1.BatonServiceGetTaskResponse + 7, // 97: c1.connectorapi.baton.v1.BatonService.Heartbeat:output_type -> c1.connectorapi.baton.v1.BatonServiceHeartbeatResponse + 11, // 98: c1.connectorapi.baton.v1.BatonService.FinishTask:output_type -> c1.connectorapi.baton.v1.BatonServiceFinishTaskResponse + 9, // 99: c1.connectorapi.baton.v1.BatonService.UploadAsset:output_type -> c1.connectorapi.baton.v1.BatonServiceUploadAssetResponse + 13, // 100: c1.connectorapi.baton.v1.BatonService.StartDebugging:output_type -> c1.connectorapi.baton.v1.StartDebuggingResponse + 95, // [95:101] is the sub-list for method output_type + 89, // [89:95] is the sub-list for method input_type + 89, // [89:89] is the sub-list for extension type_name + 89, // [89:89] is the sub-list for extension extendee + 0, // [0:89] is the sub-list for field type_name } func init() { file_c1_connectorapi_baton_v1_baton_proto_init() } @@ -5292,6 +5582,8 @@ func file_c1_connectorapi_baton_v1_baton_proto_init() { (*Task_ActionStatus)(nil), (*Task_CreateSyncDiff)(nil), (*Task_CompactSyncs_)(nil), + (*Task_ListEventFeeds)(nil), + (*Task_ListEvents)(nil), } file_c1_connectorapi_baton_v1_baton_proto_msgTypes[7].OneofWrappers = []any{ (*BatonServiceUploadAssetRequest_Metadata)(nil), @@ -5308,7 +5600,7 @@ func file_c1_connectorapi_baton_v1_baton_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: unsafe.Slice(unsafe.StringData(file_c1_connectorapi_baton_v1_baton_proto_rawDesc), len(file_c1_connectorapi_baton_v1_baton_proto_rawDesc)), NumEnums: 1, - NumMessages: 42, + NumMessages: 44, NumExtensions: 0, NumServices: 1, }, diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/connectorapi/baton/v1/baton.pb.validate.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/connectorapi/baton/v1/baton.pb.validate.go index a7b9b06d..3219a398 100644 --- a/vendor/github.com/conductorone/baton-sdk/pb/c1/connectorapi/baton/v1/baton.pb.validate.go +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/connectorapi/baton/v1/baton.pb.validate.go @@ -924,6 +924,88 @@ func (m *Task) validate(all bool) error { } } + case *Task_ListEventFeeds: + if v == nil { + err := TaskValidationError{ + field: "TaskType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetListEventFeeds()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, TaskValidationError{ + field: "ListEventFeeds", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, TaskValidationError{ + field: "ListEventFeeds", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetListEventFeeds()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return TaskValidationError{ + field: "ListEventFeeds", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *Task_ListEvents: + if v == nil { + err := TaskValidationError{ + field: "TaskType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetListEvents()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, TaskValidationError{ + field: "ListEvents", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, TaskValidationError{ + field: "ListEvents", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetListEvents()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return TaskValidationError{ + field: "ListEvents", + reason: "embedded message failed validation", + cause: err, + } + } + } + default: _ = v // ensures v is used } @@ -3697,6 +3779,326 @@ var _ interface { ErrorName() string } = Task_EventFeedTaskValidationError{} +// Validate checks the field values on Task_ListEventsTask with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *Task_ListEventsTask) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Task_ListEventsTask with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// Task_ListEventsTaskMultiError, or nil if none found. +func (m *Task_ListEventsTask) ValidateAll() error { + return m.validate(true) +} + +func (m *Task_ListEventsTask) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetAnnotations() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Task_ListEventsTaskValidationError{ + field: fmt.Sprintf("Annotations[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Task_ListEventsTaskValidationError{ + field: fmt.Sprintf("Annotations[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Task_ListEventsTaskValidationError{ + field: fmt.Sprintf("Annotations[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if m.GetCursor() != "" { + + if l := len(m.GetCursor()); l < 1 || l > 4096 { + err := Task_ListEventsTaskValidationError{ + field: "Cursor", + reason: "value length must be between 1 and 4096 bytes, inclusive", + } + if !all { + return err + } + errors = append(errors, err) + } + + } + + if all { + switch v := interface{}(m.GetStartAt()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Task_ListEventsTaskValidationError{ + field: "StartAt", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Task_ListEventsTaskValidationError{ + field: "StartAt", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetStartAt()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Task_ListEventsTaskValidationError{ + field: "StartAt", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for EventFeedId + + // no validation rules for PageSize + + if len(errors) > 0 { + return Task_ListEventsTaskMultiError(errors) + } + + return nil +} + +// Task_ListEventsTaskMultiError is an error wrapping multiple validation +// errors returned by Task_ListEventsTask.ValidateAll() if the designated +// constraints aren't met. +type Task_ListEventsTaskMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Task_ListEventsTaskMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Task_ListEventsTaskMultiError) AllErrors() []error { return m } + +// Task_ListEventsTaskValidationError is the validation error returned by +// Task_ListEventsTask.Validate if the designated constraints aren't met. +type Task_ListEventsTaskValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Task_ListEventsTaskValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Task_ListEventsTaskValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Task_ListEventsTaskValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Task_ListEventsTaskValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Task_ListEventsTaskValidationError) ErrorName() string { + return "Task_ListEventsTaskValidationError" +} + +// Error satisfies the builtin error interface +func (e Task_ListEventsTaskValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sTask_ListEventsTask.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Task_ListEventsTaskValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Task_ListEventsTaskValidationError{} + +// Validate checks the field values on Task_ListEventFeedsTask with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *Task_ListEventFeedsTask) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Task_ListEventFeedsTask with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// Task_ListEventFeedsTaskMultiError, or nil if none found. +func (m *Task_ListEventFeedsTask) ValidateAll() error { + return m.validate(true) +} + +func (m *Task_ListEventFeedsTask) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetAnnotations() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, Task_ListEventFeedsTaskValidationError{ + field: fmt.Sprintf("Annotations[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, Task_ListEventFeedsTaskValidationError{ + field: fmt.Sprintf("Annotations[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return Task_ListEventFeedsTaskValidationError{ + field: fmt.Sprintf("Annotations[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return Task_ListEventFeedsTaskMultiError(errors) + } + + return nil +} + +// Task_ListEventFeedsTaskMultiError is an error wrapping multiple validation +// errors returned by Task_ListEventFeedsTask.ValidateAll() if the designated +// constraints aren't met. +type Task_ListEventFeedsTaskMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m Task_ListEventFeedsTaskMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m Task_ListEventFeedsTaskMultiError) AllErrors() []error { return m } + +// Task_ListEventFeedsTaskValidationError is the validation error returned by +// Task_ListEventFeedsTask.Validate if the designated constraints aren't met. +type Task_ListEventFeedsTaskValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e Task_ListEventFeedsTaskValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e Task_ListEventFeedsTaskValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e Task_ListEventFeedsTaskValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e Task_ListEventFeedsTaskValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e Task_ListEventFeedsTaskValidationError) ErrorName() string { + return "Task_ListEventFeedsTaskValidationError" +} + +// Error satisfies the builtin error interface +func (e Task_ListEventFeedsTaskValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sTask_ListEventFeedsTask.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = Task_ListEventFeedsTaskValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = Task_ListEventFeedsTaskValidationError{} + // Validate checks the field values on Task_GrantTask with the rules defined in // the proto definition for this message. If any rules are violated, the first // error encountered is returned, or nil if there are no violations. diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/connectorapi/baton/v1/baton_protoopaque.pb.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/connectorapi/baton/v1/baton_protoopaque.pb.go index 65db2f42..df8c1b94 100644 --- a/vendor/github.com/conductorone/baton-sdk/pb/c1/connectorapi/baton/v1/baton_protoopaque.pb.go +++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/connectorapi/baton/v1/baton_protoopaque.pb.go @@ -317,6 +317,24 @@ func (x *Task) GetCompactSyncs() *Task_CompactSyncs { return nil } +func (x *Task) GetListEventFeeds() *Task_ListEventFeedsTask { + if x != nil { + if x, ok := x.xxx_hidden_TaskType.(*task_ListEventFeeds); ok { + return x.ListEventFeeds + } + } + return nil +} + +func (x *Task) GetListEvents() *Task_ListEventsTask { + if x != nil { + if x, ok := x.xxx_hidden_TaskType.(*task_ListEvents); ok { + return x.ListEvents + } + } + return nil +} + func (x *Task) GetDebug() bool { if x != nil { return x.xxx_hidden_Debug @@ -500,6 +518,22 @@ func (x *Task) SetCompactSyncs(v *Task_CompactSyncs) { x.xxx_hidden_TaskType = &task_CompactSyncs_{v} } +func (x *Task) SetListEventFeeds(v *Task_ListEventFeedsTask) { + if v == nil { + x.xxx_hidden_TaskType = nil + return + } + x.xxx_hidden_TaskType = &task_ListEventFeeds{v} +} + +func (x *Task) SetListEvents(v *Task_ListEventsTask) { + if v == nil { + x.xxx_hidden_TaskType = nil + return + } + x.xxx_hidden_TaskType = &task_ListEvents{v} +} + func (x *Task) SetDebug(v bool) { x.xxx_hidden_Debug = v } @@ -679,6 +713,22 @@ func (x *Task) HasCompactSyncs() bool { return ok } +func (x *Task) HasListEventFeeds() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_TaskType.(*task_ListEventFeeds) + return ok +} + +func (x *Task) HasListEvents() bool { + if x == nil { + return false + } + _, ok := x.xxx_hidden_TaskType.(*task_ListEvents) + return ok +} + func (x *Task) ClearTaskType() { x.xxx_hidden_TaskType = nil } @@ -809,6 +859,18 @@ func (x *Task) ClearCompactSyncs() { } } +func (x *Task) ClearListEventFeeds() { + if _, ok := x.xxx_hidden_TaskType.(*task_ListEventFeeds); ok { + x.xxx_hidden_TaskType = nil + } +} + +func (x *Task) ClearListEvents() { + if _, ok := x.xxx_hidden_TaskType.(*task_ListEvents); ok { + x.xxx_hidden_TaskType = nil + } +} + const Task_TaskType_not_set_case case_Task_TaskType = 0 const Task_None_case case_Task_TaskType = 100 const Task_Hello_case case_Task_TaskType = 101 @@ -831,6 +893,8 @@ const Task_ActionInvoke_case case_Task_TaskType = 117 const Task_ActionStatus_case case_Task_TaskType = 118 const Task_CreateSyncDiff_case case_Task_TaskType = 119 const Task_CompactSyncs_case case_Task_TaskType = 120 +const Task_ListEventFeeds_case case_Task_TaskType = 121 +const Task_ListEvents_case case_Task_TaskType = 122 func (x *Task) WhichTaskType() case_Task_TaskType { if x == nil { @@ -879,6 +943,10 @@ func (x *Task) WhichTaskType() case_Task_TaskType { return Task_CreateSyncDiff_case case *task_CompactSyncs_: return Task_CompactSyncs_case + case *task_ListEventFeeds: + return Task_ListEventFeeds_case + case *task_ListEvents: + return Task_ListEvents_case default: return Task_TaskType_not_set_case } @@ -911,6 +979,8 @@ type Task_builder struct { ActionStatus *Task_ActionStatusTask CreateSyncDiff *Task_CreateSyncDiffTask CompactSyncs *Task_CompactSyncs + ListEventFeeds *Task_ListEventFeedsTask + ListEvents *Task_ListEventsTask // -- end of xxx_hidden_TaskType Debug bool } @@ -984,6 +1054,12 @@ func (b0 Task_builder) Build() *Task { if b.CompactSyncs != nil { x.xxx_hidden_TaskType = &task_CompactSyncs_{b.CompactSyncs} } + if b.ListEventFeeds != nil { + x.xxx_hidden_TaskType = &task_ListEventFeeds{b.ListEventFeeds} + } + if b.ListEvents != nil { + x.xxx_hidden_TaskType = &task_ListEvents{b.ListEvents} + } x.xxx_hidden_Debug = b.Debug return m0 } @@ -1086,6 +1162,14 @@ type task_CompactSyncs_ struct { CompactSyncs *Task_CompactSyncs `protobuf:"bytes,120,opt,name=compact_syncs,json=compactSyncs,proto3,oneof"` } +type task_ListEventFeeds struct { + ListEventFeeds *Task_ListEventFeedsTask `protobuf:"bytes,121,opt,name=list_event_feeds,json=listEventFeeds,proto3,oneof"` +} + +type task_ListEvents struct { + ListEvents *Task_ListEventsTask `protobuf:"bytes,122,opt,name=list_events,json=listEvents,proto3,oneof"` +} + func (*task_None) isTask_TaskType() {} func (*task_Hello) isTask_TaskType() {} @@ -1128,6 +1212,10 @@ func (*task_CreateSyncDiff) isTask_TaskType() {} func (*task_CompactSyncs_) isTask_TaskType() {} +func (*task_ListEventFeeds) isTask_TaskType() {} + +func (*task_ListEvents) isTask_TaskType() {} + type BatonServiceHelloRequest struct { state protoimpl.MessageState `protogen:"opaque.v1"` xxx_hidden_HostId string `protobuf:"bytes,1,opt,name=host_id,json=hostId,proto3"` @@ -2688,6 +2776,191 @@ func (b0 Task_EventFeedTask_builder) Build() *Task_EventFeedTask { return m0 } +type Task_ListEventsTask struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Annotations *[]*anypb.Any `protobuf:"bytes,1,rep,name=annotations,proto3"` + xxx_hidden_Cursor string `protobuf:"bytes,2,opt,name=cursor,proto3"` + xxx_hidden_StartAt *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=start_at,json=startAt,proto3"` + xxx_hidden_EventFeedId string `protobuf:"bytes,4,opt,name=event_feed_id,json=eventFeedId,proto3"` + xxx_hidden_PageSize uint32 `protobuf:"varint,5,opt,name=page_size,json=pageSize,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Task_ListEventsTask) Reset() { + *x = Task_ListEventsTask{} + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Task_ListEventsTask) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Task_ListEventsTask) ProtoMessage() {} + +func (x *Task_ListEventsTask) ProtoReflect() protoreflect.Message { + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[17] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *Task_ListEventsTask) GetAnnotations() []*anypb.Any { + if x != nil { + if x.xxx_hidden_Annotations != nil { + return *x.xxx_hidden_Annotations + } + } + return nil +} + +func (x *Task_ListEventsTask) GetCursor() string { + if x != nil { + return x.xxx_hidden_Cursor + } + return "" +} + +func (x *Task_ListEventsTask) GetStartAt() *timestamppb.Timestamp { + if x != nil { + return x.xxx_hidden_StartAt + } + return nil +} + +func (x *Task_ListEventsTask) GetEventFeedId() string { + if x != nil { + return x.xxx_hidden_EventFeedId + } + return "" +} + +func (x *Task_ListEventsTask) GetPageSize() uint32 { + if x != nil { + return x.xxx_hidden_PageSize + } + return 0 +} + +func (x *Task_ListEventsTask) SetAnnotations(v []*anypb.Any) { + x.xxx_hidden_Annotations = &v +} + +func (x *Task_ListEventsTask) SetCursor(v string) { + x.xxx_hidden_Cursor = v +} + +func (x *Task_ListEventsTask) SetStartAt(v *timestamppb.Timestamp) { + x.xxx_hidden_StartAt = v +} + +func (x *Task_ListEventsTask) SetEventFeedId(v string) { + x.xxx_hidden_EventFeedId = v +} + +func (x *Task_ListEventsTask) SetPageSize(v uint32) { + x.xxx_hidden_PageSize = v +} + +func (x *Task_ListEventsTask) HasStartAt() bool { + if x == nil { + return false + } + return x.xxx_hidden_StartAt != nil +} + +func (x *Task_ListEventsTask) ClearStartAt() { + x.xxx_hidden_StartAt = nil +} + +type Task_ListEventsTask_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Annotations []*anypb.Any + Cursor string + StartAt *timestamppb.Timestamp + EventFeedId string + PageSize uint32 +} + +func (b0 Task_ListEventsTask_builder) Build() *Task_ListEventsTask { + m0 := &Task_ListEventsTask{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Annotations = &b.Annotations + x.xxx_hidden_Cursor = b.Cursor + x.xxx_hidden_StartAt = b.StartAt + x.xxx_hidden_EventFeedId = b.EventFeedId + x.xxx_hidden_PageSize = b.PageSize + return m0 +} + +type Task_ListEventFeedsTask struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Annotations *[]*anypb.Any `protobuf:"bytes,1,rep,name=annotations,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Task_ListEventFeedsTask) Reset() { + *x = Task_ListEventFeedsTask{} + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Task_ListEventFeedsTask) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Task_ListEventFeedsTask) ProtoMessage() {} + +func (x *Task_ListEventFeedsTask) ProtoReflect() protoreflect.Message { + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[18] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *Task_ListEventFeedsTask) GetAnnotations() []*anypb.Any { + if x != nil { + if x.xxx_hidden_Annotations != nil { + return *x.xxx_hidden_Annotations + } + } + return nil +} + +func (x *Task_ListEventFeedsTask) SetAnnotations(v []*anypb.Any) { + x.xxx_hidden_Annotations = &v +} + +type Task_ListEventFeedsTask_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Annotations []*anypb.Any +} + +func (b0 Task_ListEventFeedsTask_builder) Build() *Task_ListEventFeedsTask { + m0 := &Task_ListEventFeedsTask{} + b, x := &b0, m0 + _, _ = b, x + x.xxx_hidden_Annotations = &b.Annotations + return m0 +} + type Task_GrantTask struct { state protoimpl.MessageState `protogen:"opaque.v1"` xxx_hidden_Entitlement *v2.Entitlement `protobuf:"bytes,1,opt,name=entitlement,proto3"` @@ -2700,7 +2973,7 @@ type Task_GrantTask struct { func (x *Task_GrantTask) Reset() { *x = Task_GrantTask{} - mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[17] + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[19] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2712,7 +2985,7 @@ func (x *Task_GrantTask) String() string { func (*Task_GrantTask) ProtoMessage() {} func (x *Task_GrantTask) ProtoReflect() protoreflect.Message { - mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[17] + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[19] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2832,7 +3105,7 @@ type Task_RevokeTask struct { func (x *Task_RevokeTask) Reset() { *x = Task_RevokeTask{} - mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[18] + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[20] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2844,7 +3117,7 @@ func (x *Task_RevokeTask) String() string { func (*Task_RevokeTask) ProtoMessage() {} func (x *Task_RevokeTask) ProtoReflect() protoreflect.Message { - mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[18] + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[20] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2918,7 +3191,7 @@ type Task_CreateAccountTask struct { func (x *Task_CreateAccountTask) Reset() { *x = Task_CreateAccountTask{} - mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[19] + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[21] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2930,7 +3203,7 @@ func (x *Task_CreateAccountTask) String() string { func (*Task_CreateAccountTask) ProtoMessage() {} func (x *Task_CreateAccountTask) ProtoReflect() protoreflect.Message { - mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[19] + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[21] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3038,7 +3311,7 @@ type Task_CreateResourceTask struct { func (x *Task_CreateResourceTask) Reset() { *x = Task_CreateResourceTask{} - mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[20] + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[22] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3050,7 +3323,7 @@ func (x *Task_CreateResourceTask) String() string { func (*Task_CreateResourceTask) ProtoMessage() {} func (x *Task_CreateResourceTask) ProtoReflect() protoreflect.Message { - mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[20] + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[22] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3107,7 +3380,7 @@ type Task_DeleteResourceTask struct { func (x *Task_DeleteResourceTask) Reset() { *x = Task_DeleteResourceTask{} - mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[21] + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[23] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3119,7 +3392,7 @@ func (x *Task_DeleteResourceTask) String() string { func (*Task_DeleteResourceTask) ProtoMessage() {} func (x *Task_DeleteResourceTask) ProtoReflect() protoreflect.Message { - mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[21] + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[23] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3201,7 +3474,7 @@ type Task_RotateCredentialsTask struct { func (x *Task_RotateCredentialsTask) Reset() { *x = Task_RotateCredentialsTask{} - mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[22] + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[24] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3213,7 +3486,7 @@ func (x *Task_RotateCredentialsTask) String() string { func (*Task_RotateCredentialsTask) ProtoMessage() {} func (x *Task_RotateCredentialsTask) ProtoReflect() protoreflect.Message { - mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[22] + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[24] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3310,7 +3583,7 @@ type Task_CreateTicketTask struct { func (x *Task_CreateTicketTask) Reset() { *x = Task_CreateTicketTask{} - mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[23] + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[25] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3322,7 +3595,7 @@ func (x *Task_CreateTicketTask) String() string { func (*Task_CreateTicketTask) ProtoMessage() {} func (x *Task_CreateTicketTask) ProtoReflect() protoreflect.Message { - mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[23] + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[25] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3417,7 +3690,7 @@ type Task_BulkCreateTicketsTask struct { func (x *Task_BulkCreateTicketsTask) Reset() { *x = Task_BulkCreateTicketsTask{} - mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[24] + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[26] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3429,7 +3702,7 @@ func (x *Task_BulkCreateTicketsTask) String() string { func (*Task_BulkCreateTicketsTask) ProtoMessage() {} func (x *Task_BulkCreateTicketsTask) ProtoReflect() protoreflect.Message { - mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[24] + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[26] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3476,7 +3749,7 @@ type Task_BulkGetTicketsTask struct { func (x *Task_BulkGetTicketsTask) Reset() { *x = Task_BulkGetTicketsTask{} - mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[25] + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[27] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3488,7 +3761,7 @@ func (x *Task_BulkGetTicketsTask) String() string { func (*Task_BulkGetTicketsTask) ProtoMessage() {} func (x *Task_BulkGetTicketsTask) ProtoReflect() protoreflect.Message { - mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[25] + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[27] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3535,7 +3808,7 @@ type Task_ListTicketSchemasTask struct { func (x *Task_ListTicketSchemasTask) Reset() { *x = Task_ListTicketSchemasTask{} - mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[26] + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[28] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3547,7 +3820,7 @@ func (x *Task_ListTicketSchemasTask) String() string { func (*Task_ListTicketSchemasTask) ProtoMessage() {} func (x *Task_ListTicketSchemasTask) ProtoReflect() protoreflect.Message { - mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[26] + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[28] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3595,7 +3868,7 @@ type Task_GetTicketTask struct { func (x *Task_GetTicketTask) Reset() { *x = Task_GetTicketTask{} - mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[27] + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[29] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3607,7 +3880,7 @@ func (x *Task_GetTicketTask) String() string { func (*Task_GetTicketTask) ProtoMessage() {} func (x *Task_GetTicketTask) ProtoReflect() protoreflect.Message { - mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[27] + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[29] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3668,7 +3941,7 @@ type Task_ActionListSchemasTask struct { func (x *Task_ActionListSchemasTask) Reset() { *x = Task_ActionListSchemasTask{} - mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[28] + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[30] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3680,7 +3953,7 @@ func (x *Task_ActionListSchemasTask) String() string { func (*Task_ActionListSchemasTask) ProtoMessage() {} func (x *Task_ActionListSchemasTask) ProtoReflect() protoreflect.Message { - mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[28] + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[30] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3742,7 +4015,7 @@ type Task_ActionGetSchemaTask struct { func (x *Task_ActionGetSchemaTask) Reset() { *x = Task_ActionGetSchemaTask{} - mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[29] + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[31] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3754,7 +4027,7 @@ func (x *Task_ActionGetSchemaTask) String() string { func (*Task_ActionGetSchemaTask) ProtoMessage() {} func (x *Task_ActionGetSchemaTask) ProtoReflect() protoreflect.Message { - mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[29] + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[31] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3817,7 +4090,7 @@ type Task_ActionInvokeTask struct { func (x *Task_ActionInvokeTask) Reset() { *x = Task_ActionInvokeTask{} - mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[30] + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[32] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3829,7 +4102,7 @@ func (x *Task_ActionInvokeTask) String() string { func (*Task_ActionInvokeTask) ProtoMessage() {} func (x *Task_ActionInvokeTask) ProtoReflect() protoreflect.Message { - mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[30] + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[32] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3929,7 +4202,7 @@ type Task_ActionStatusTask struct { func (x *Task_ActionStatusTask) Reset() { *x = Task_ActionStatusTask{} - mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[31] + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[33] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3941,7 +4214,7 @@ func (x *Task_ActionStatusTask) String() string { func (*Task_ActionStatusTask) ProtoMessage() {} func (x *Task_ActionStatusTask) ProtoReflect() protoreflect.Message { - mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[31] + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[33] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4016,7 +4289,7 @@ type Task_CreateSyncDiffTask struct { func (x *Task_CreateSyncDiffTask) Reset() { *x = Task_CreateSyncDiffTask{} - mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[32] + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[34] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4028,7 +4301,7 @@ func (x *Task_CreateSyncDiffTask) String() string { func (*Task_CreateSyncDiffTask) ProtoMessage() {} func (x *Task_CreateSyncDiffTask) ProtoReflect() protoreflect.Message { - mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[32] + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[34] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4103,7 +4376,7 @@ type Task_CompactSyncs struct { func (x *Task_CompactSyncs) Reset() { *x = Task_CompactSyncs{} - mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[33] + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[35] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4115,7 +4388,7 @@ func (x *Task_CompactSyncs) String() string { func (*Task_CompactSyncs) ProtoMessage() {} func (x *Task_CompactSyncs) ProtoReflect() protoreflect.Message { - mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[33] + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[35] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4178,7 +4451,7 @@ type Task_CompactSyncs_CompactableSync struct { func (x *Task_CompactSyncs_CompactableSync) Reset() { *x = Task_CompactSyncs_CompactableSync{} - mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[34] + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[36] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4190,7 +4463,7 @@ func (x *Task_CompactSyncs_CompactableSync) String() string { func (*Task_CompactSyncs_CompactableSync) ProtoMessage() {} func (x *Task_CompactSyncs_CompactableSync) ProtoReflect() protoreflect.Message { - mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[34] + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[36] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4250,7 +4523,7 @@ type BatonServiceHelloRequest_BuildInfo struct { func (x *BatonServiceHelloRequest_BuildInfo) Reset() { *x = BatonServiceHelloRequest_BuildInfo{} - mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[35] + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[37] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4262,7 +4535,7 @@ func (x *BatonServiceHelloRequest_BuildInfo) String() string { func (*BatonServiceHelloRequest_BuildInfo) ProtoMessage() {} func (x *BatonServiceHelloRequest_BuildInfo) ProtoReflect() protoreflect.Message { - mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[35] + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[37] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4340,7 +4613,7 @@ type BatonServiceHelloRequest_OSInfo struct { func (x *BatonServiceHelloRequest_OSInfo) Reset() { *x = BatonServiceHelloRequest_OSInfo{} - mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[36] + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[38] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4352,7 +4625,7 @@ func (x *BatonServiceHelloRequest_OSInfo) String() string { func (*BatonServiceHelloRequest_OSInfo) ProtoMessage() {} func (x *BatonServiceHelloRequest_OSInfo) ProtoReflect() protoreflect.Message { - mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[36] + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[38] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4490,7 +4763,7 @@ type BatonServiceUploadAssetRequest_UploadMetadata struct { func (x *BatonServiceUploadAssetRequest_UploadMetadata) Reset() { *x = BatonServiceUploadAssetRequest_UploadMetadata{} - mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[37] + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[39] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4502,7 +4775,7 @@ func (x *BatonServiceUploadAssetRequest_UploadMetadata) String() string { func (*BatonServiceUploadAssetRequest_UploadMetadata) ProtoMessage() {} func (x *BatonServiceUploadAssetRequest_UploadMetadata) ProtoReflect() protoreflect.Message { - mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[37] + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[39] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4575,7 +4848,7 @@ type BatonServiceUploadAssetRequest_UploadData struct { func (x *BatonServiceUploadAssetRequest_UploadData) Reset() { *x = BatonServiceUploadAssetRequest_UploadData{} - mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[38] + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[40] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4587,7 +4860,7 @@ func (x *BatonServiceUploadAssetRequest_UploadData) String() string { func (*BatonServiceUploadAssetRequest_UploadData) ProtoMessage() {} func (x *BatonServiceUploadAssetRequest_UploadData) ProtoReflect() protoreflect.Message { - mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[38] + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[40] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4637,7 +4910,7 @@ type BatonServiceUploadAssetRequest_UploadEOF struct { func (x *BatonServiceUploadAssetRequest_UploadEOF) Reset() { *x = BatonServiceUploadAssetRequest_UploadEOF{} - mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[39] + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[41] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4649,7 +4922,7 @@ func (x *BatonServiceUploadAssetRequest_UploadEOF) String() string { func (*BatonServiceUploadAssetRequest_UploadEOF) ProtoMessage() {} func (x *BatonServiceUploadAssetRequest_UploadEOF) ProtoReflect() protoreflect.Message { - mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[39] + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[41] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4714,7 +4987,7 @@ type BatonServiceFinishTaskRequest_Error struct { func (x *BatonServiceFinishTaskRequest_Error) Reset() { *x = BatonServiceFinishTaskRequest_Error{} - mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[40] + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[42] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4726,7 +4999,7 @@ func (x *BatonServiceFinishTaskRequest_Error) String() string { func (*BatonServiceFinishTaskRequest_Error) ProtoMessage() {} func (x *BatonServiceFinishTaskRequest_Error) ProtoReflect() protoreflect.Message { - mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[40] + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[42] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4812,7 +5085,7 @@ type BatonServiceFinishTaskRequest_Success struct { func (x *BatonServiceFinishTaskRequest_Success) Reset() { *x = BatonServiceFinishTaskRequest_Success{} - mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[41] + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[43] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4824,7 +5097,7 @@ func (x *BatonServiceFinishTaskRequest_Success) String() string { func (*BatonServiceFinishTaskRequest_Success) ProtoMessage() {} func (x *BatonServiceFinishTaskRequest_Success) ProtoReflect() protoreflect.Message { - mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[41] + mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[43] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4891,7 +5164,7 @@ var File_c1_connectorapi_baton_v1_baton_proto protoreflect.FileDescriptor const file_c1_connectorapi_baton_v1_baton_proto_rawDesc = "" + "\n" + - "$c1/connectorapi/baton/v1/baton.proto\x12\x18c1.connectorapi.baton.v1\x1a\x1fc1/connector/v2/connector.proto\x1a!c1/connector/v2/entitlement.proto\x1a\x1bc1/connector/v2/grant.proto\x1a\x1ec1/connector/v2/resource.proto\x1a\x1cc1/connector/v2/ticket.proto\x1a\x19google/protobuf/any.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1cgoogle/protobuf/struct.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x17google/rpc/status.proto\x1a\x17validate/validate.proto\"\xb9)\n" + + "$c1/connectorapi/baton/v1/baton.proto\x12\x18c1.connectorapi.baton.v1\x1a\x1fc1/connector/v2/connector.proto\x1a!c1/connector/v2/entitlement.proto\x1a\x1bc1/connector/v2/grant.proto\x1a\x1ec1/connector/v2/resource.proto\x1a\x1cc1/connector/v2/ticket.proto\x1a\x19google/protobuf/any.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1cgoogle/protobuf/struct.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x17google/rpc/status.proto\x1a\x17validate/validate.proto\"\xa2-\n" + "\x04Task\x12\x0e\n" + "\x02id\x18\x01 \x01(\tR\x02id\x12=\n" + "\x06status\x18\x02 \x01(\x0e2%.c1.connectorapi.baton.v1.Task.StatusR\x06status\x12=\n" + @@ -4917,7 +5190,10 @@ const file_c1_connectorapi_baton_v1_baton_proto_rawDesc = "" + "\raction_invoke\x18u \x01(\v2/.c1.connectorapi.baton.v1.Task.ActionInvokeTaskH\x00R\factionInvoke\x12V\n" + "\raction_status\x18v \x01(\v2/.c1.connectorapi.baton.v1.Task.ActionStatusTaskH\x00R\factionStatus\x12]\n" + "\x10create_sync_diff\x18w \x01(\v21.c1.connectorapi.baton.v1.Task.CreateSyncDiffTaskH\x00R\x0ecreateSyncDiff\x12R\n" + - "\rcompact_syncs\x18x \x01(\v2+.c1.connectorapi.baton.v1.Task.CompactSyncsH\x00R\fcompactSyncs\x12\x14\n" + + "\rcompact_syncs\x18x \x01(\v2+.c1.connectorapi.baton.v1.Task.CompactSyncsH\x00R\fcompactSyncs\x12]\n" + + "\x10list_event_feeds\x18y \x01(\v21.c1.connectorapi.baton.v1.Task.ListEventFeedsTaskH\x00R\x0elistEventFeeds\x12P\n" + + "\vlist_events\x18z \x01(\v2-.c1.connectorapi.baton.v1.Task.ListEventsTaskH\x00R\n" + + "listEvents\x12\x14\n" + "\x05debug\x18\x03 \x01(\bR\x05debug\x1aB\n" + "\bNoneTask\x126\n" + "\vannotations\x18\x01 \x03(\v2\x14.google.protobuf.AnyR\vannotations\x1aC\n" + @@ -4930,7 +5206,16 @@ const file_c1_connectorapi_baton_v1_baton_proto_rawDesc = "" + "\x17targeted_sync_resources\x18\x04 \x03(\v2\x19.c1.connector.v2.ResourceR\x15targetedSyncResources\x1a~\n" + "\rEventFeedTask\x126\n" + "\vannotations\x18\x01 \x03(\v2\x14.google.protobuf.AnyR\vannotations\x125\n" + - "\bstart_at\x18\x02 \x01(\v2\x1a.google.protobuf.TimestampR\astartAt\x1a\xf3\x01\n" + + "\bstart_at\x18\x02 \x01(\v2\x1a.google.protobuf.TimestampR\astartAt\x1a\xe7\x01\n" + + "\x0eListEventsTask\x126\n" + + "\vannotations\x18\x01 \x03(\v2\x14.google.protobuf.AnyR\vannotations\x12%\n" + + "\x06cursor\x18\x02 \x01(\tB\r\xfaB\n" + + "r\b \x01(\x80 \xd0\x01\x01R\x06cursor\x125\n" + + "\bstart_at\x18\x03 \x01(\v2\x1a.google.protobuf.TimestampR\astartAt\x12\"\n" + + "\revent_feed_id\x18\x04 \x01(\tR\veventFeedId\x12\x1b\n" + + "\tpage_size\x18\x05 \x01(\rR\bpageSize\x1aL\n" + + "\x12ListEventFeedsTask\x126\n" + + "\vannotations\x18\x01 \x03(\v2\x14.google.protobuf.AnyR\vannotations\x1a\xf3\x01\n" + "\tGrantTask\x12>\n" + "\ventitlement\x18\x01 \x01(\v2\x1c.c1.connector.v2.EntitlementR\ventitlement\x127\n" + "\tprincipal\x18\x02 \x01(\v2\x19.c1.connector.v2.ResourceR\tprincipal\x126\n" + @@ -5105,7 +5390,7 @@ const file_c1_connectorapi_baton_v1_baton_proto_rawDesc = "" + "\x0eStartDebugging\x12/.c1.connectorapi.baton.v1.StartDebuggingRequest\x1a0.c1.connectorapi.baton.v1.StartDebuggingResponse\"\x00B7Z5gitlab.com/ductone/c1/pkg/pb/c1/connectorapi/baton/v1b\x06proto3" var file_c1_connectorapi_baton_v1_baton_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_c1_connectorapi_baton_v1_baton_proto_msgTypes = make([]protoimpl.MessageInfo, 42) +var file_c1_connectorapi_baton_v1_baton_proto_msgTypes = make([]protoimpl.MessageInfo, 44) var file_c1_connectorapi_baton_v1_baton_proto_goTypes = []any{ (Task_Status)(0), // 0: c1.connectorapi.baton.v1.Task.Status (*Task)(nil), // 1: c1.connectorapi.baton.v1.Task @@ -5125,149 +5410,156 @@ var file_c1_connectorapi_baton_v1_baton_proto_goTypes = []any{ (*Task_HelloTask)(nil), // 15: c1.connectorapi.baton.v1.Task.HelloTask (*Task_SyncFullTask)(nil), // 16: c1.connectorapi.baton.v1.Task.SyncFullTask (*Task_EventFeedTask)(nil), // 17: c1.connectorapi.baton.v1.Task.EventFeedTask - (*Task_GrantTask)(nil), // 18: c1.connectorapi.baton.v1.Task.GrantTask - (*Task_RevokeTask)(nil), // 19: c1.connectorapi.baton.v1.Task.RevokeTask - (*Task_CreateAccountTask)(nil), // 20: c1.connectorapi.baton.v1.Task.CreateAccountTask - (*Task_CreateResourceTask)(nil), // 21: c1.connectorapi.baton.v1.Task.CreateResourceTask - (*Task_DeleteResourceTask)(nil), // 22: c1.connectorapi.baton.v1.Task.DeleteResourceTask - (*Task_RotateCredentialsTask)(nil), // 23: c1.connectorapi.baton.v1.Task.RotateCredentialsTask - (*Task_CreateTicketTask)(nil), // 24: c1.connectorapi.baton.v1.Task.CreateTicketTask - (*Task_BulkCreateTicketsTask)(nil), // 25: c1.connectorapi.baton.v1.Task.BulkCreateTicketsTask - (*Task_BulkGetTicketsTask)(nil), // 26: c1.connectorapi.baton.v1.Task.BulkGetTicketsTask - (*Task_ListTicketSchemasTask)(nil), // 27: c1.connectorapi.baton.v1.Task.ListTicketSchemasTask - (*Task_GetTicketTask)(nil), // 28: c1.connectorapi.baton.v1.Task.GetTicketTask - (*Task_ActionListSchemasTask)(nil), // 29: c1.connectorapi.baton.v1.Task.ActionListSchemasTask - (*Task_ActionGetSchemaTask)(nil), // 30: c1.connectorapi.baton.v1.Task.ActionGetSchemaTask - (*Task_ActionInvokeTask)(nil), // 31: c1.connectorapi.baton.v1.Task.ActionInvokeTask - (*Task_ActionStatusTask)(nil), // 32: c1.connectorapi.baton.v1.Task.ActionStatusTask - (*Task_CreateSyncDiffTask)(nil), // 33: c1.connectorapi.baton.v1.Task.CreateSyncDiffTask - (*Task_CompactSyncs)(nil), // 34: c1.connectorapi.baton.v1.Task.CompactSyncs - (*Task_CompactSyncs_CompactableSync)(nil), // 35: c1.connectorapi.baton.v1.Task.CompactSyncs.CompactableSync - (*BatonServiceHelloRequest_BuildInfo)(nil), // 36: c1.connectorapi.baton.v1.BatonServiceHelloRequest.BuildInfo - (*BatonServiceHelloRequest_OSInfo)(nil), // 37: c1.connectorapi.baton.v1.BatonServiceHelloRequest.OSInfo - (*BatonServiceUploadAssetRequest_UploadMetadata)(nil), // 38: c1.connectorapi.baton.v1.BatonServiceUploadAssetRequest.UploadMetadata - (*BatonServiceUploadAssetRequest_UploadData)(nil), // 39: c1.connectorapi.baton.v1.BatonServiceUploadAssetRequest.UploadData - (*BatonServiceUploadAssetRequest_UploadEOF)(nil), // 40: c1.connectorapi.baton.v1.BatonServiceUploadAssetRequest.UploadEOF - (*BatonServiceFinishTaskRequest_Error)(nil), // 41: c1.connectorapi.baton.v1.BatonServiceFinishTaskRequest.Error - (*BatonServiceFinishTaskRequest_Success)(nil), // 42: c1.connectorapi.baton.v1.BatonServiceFinishTaskRequest.Success - (*v2.ConnectorMetadata)(nil), // 43: c1.connector.v2.ConnectorMetadata - (*anypb.Any)(nil), // 44: google.protobuf.Any - (*durationpb.Duration)(nil), // 45: google.protobuf.Duration - (*status.Status)(nil), // 46: google.rpc.Status - (*v2.Resource)(nil), // 47: c1.connector.v2.Resource - (*timestamppb.Timestamp)(nil), // 48: google.protobuf.Timestamp - (*v2.Entitlement)(nil), // 49: c1.connector.v2.Entitlement - (*v2.Grant)(nil), // 50: c1.connector.v2.Grant - (*v2.AccountInfo)(nil), // 51: c1.connector.v2.AccountInfo - (*v2.CredentialOptions)(nil), // 52: c1.connector.v2.CredentialOptions - (*v2.EncryptionConfig)(nil), // 53: c1.connector.v2.EncryptionConfig - (*v2.ResourceId)(nil), // 54: c1.connector.v2.ResourceId - (*v2.TicketRequest)(nil), // 55: c1.connector.v2.TicketRequest - (*v2.TicketSchema)(nil), // 56: c1.connector.v2.TicketSchema - (*structpb.Struct)(nil), // 57: google.protobuf.Struct + (*Task_ListEventsTask)(nil), // 18: c1.connectorapi.baton.v1.Task.ListEventsTask + (*Task_ListEventFeedsTask)(nil), // 19: c1.connectorapi.baton.v1.Task.ListEventFeedsTask + (*Task_GrantTask)(nil), // 20: c1.connectorapi.baton.v1.Task.GrantTask + (*Task_RevokeTask)(nil), // 21: c1.connectorapi.baton.v1.Task.RevokeTask + (*Task_CreateAccountTask)(nil), // 22: c1.connectorapi.baton.v1.Task.CreateAccountTask + (*Task_CreateResourceTask)(nil), // 23: c1.connectorapi.baton.v1.Task.CreateResourceTask + (*Task_DeleteResourceTask)(nil), // 24: c1.connectorapi.baton.v1.Task.DeleteResourceTask + (*Task_RotateCredentialsTask)(nil), // 25: c1.connectorapi.baton.v1.Task.RotateCredentialsTask + (*Task_CreateTicketTask)(nil), // 26: c1.connectorapi.baton.v1.Task.CreateTicketTask + (*Task_BulkCreateTicketsTask)(nil), // 27: c1.connectorapi.baton.v1.Task.BulkCreateTicketsTask + (*Task_BulkGetTicketsTask)(nil), // 28: c1.connectorapi.baton.v1.Task.BulkGetTicketsTask + (*Task_ListTicketSchemasTask)(nil), // 29: c1.connectorapi.baton.v1.Task.ListTicketSchemasTask + (*Task_GetTicketTask)(nil), // 30: c1.connectorapi.baton.v1.Task.GetTicketTask + (*Task_ActionListSchemasTask)(nil), // 31: c1.connectorapi.baton.v1.Task.ActionListSchemasTask + (*Task_ActionGetSchemaTask)(nil), // 32: c1.connectorapi.baton.v1.Task.ActionGetSchemaTask + (*Task_ActionInvokeTask)(nil), // 33: c1.connectorapi.baton.v1.Task.ActionInvokeTask + (*Task_ActionStatusTask)(nil), // 34: c1.connectorapi.baton.v1.Task.ActionStatusTask + (*Task_CreateSyncDiffTask)(nil), // 35: c1.connectorapi.baton.v1.Task.CreateSyncDiffTask + (*Task_CompactSyncs)(nil), // 36: c1.connectorapi.baton.v1.Task.CompactSyncs + (*Task_CompactSyncs_CompactableSync)(nil), // 37: c1.connectorapi.baton.v1.Task.CompactSyncs.CompactableSync + (*BatonServiceHelloRequest_BuildInfo)(nil), // 38: c1.connectorapi.baton.v1.BatonServiceHelloRequest.BuildInfo + (*BatonServiceHelloRequest_OSInfo)(nil), // 39: c1.connectorapi.baton.v1.BatonServiceHelloRequest.OSInfo + (*BatonServiceUploadAssetRequest_UploadMetadata)(nil), // 40: c1.connectorapi.baton.v1.BatonServiceUploadAssetRequest.UploadMetadata + (*BatonServiceUploadAssetRequest_UploadData)(nil), // 41: c1.connectorapi.baton.v1.BatonServiceUploadAssetRequest.UploadData + (*BatonServiceUploadAssetRequest_UploadEOF)(nil), // 42: c1.connectorapi.baton.v1.BatonServiceUploadAssetRequest.UploadEOF + (*BatonServiceFinishTaskRequest_Error)(nil), // 43: c1.connectorapi.baton.v1.BatonServiceFinishTaskRequest.Error + (*BatonServiceFinishTaskRequest_Success)(nil), // 44: c1.connectorapi.baton.v1.BatonServiceFinishTaskRequest.Success + (*v2.ConnectorMetadata)(nil), // 45: c1.connector.v2.ConnectorMetadata + (*anypb.Any)(nil), // 46: google.protobuf.Any + (*durationpb.Duration)(nil), // 47: google.protobuf.Duration + (*status.Status)(nil), // 48: google.rpc.Status + (*v2.Resource)(nil), // 49: c1.connector.v2.Resource + (*timestamppb.Timestamp)(nil), // 50: google.protobuf.Timestamp + (*v2.Entitlement)(nil), // 51: c1.connector.v2.Entitlement + (*v2.Grant)(nil), // 52: c1.connector.v2.Grant + (*v2.AccountInfo)(nil), // 53: c1.connector.v2.AccountInfo + (*v2.CredentialOptions)(nil), // 54: c1.connector.v2.CredentialOptions + (*v2.EncryptionConfig)(nil), // 55: c1.connector.v2.EncryptionConfig + (*v2.ResourceId)(nil), // 56: c1.connector.v2.ResourceId + (*v2.TicketRequest)(nil), // 57: c1.connector.v2.TicketRequest + (*v2.TicketSchema)(nil), // 58: c1.connector.v2.TicketSchema + (*structpb.Struct)(nil), // 59: google.protobuf.Struct } var file_c1_connectorapi_baton_v1_baton_proto_depIdxs = []int32{ 0, // 0: c1.connectorapi.baton.v1.Task.status:type_name -> c1.connectorapi.baton.v1.Task.Status 14, // 1: c1.connectorapi.baton.v1.Task.none:type_name -> c1.connectorapi.baton.v1.Task.NoneTask 15, // 2: c1.connectorapi.baton.v1.Task.hello:type_name -> c1.connectorapi.baton.v1.Task.HelloTask 16, // 3: c1.connectorapi.baton.v1.Task.sync_full:type_name -> c1.connectorapi.baton.v1.Task.SyncFullTask - 18, // 4: c1.connectorapi.baton.v1.Task.grant:type_name -> c1.connectorapi.baton.v1.Task.GrantTask - 19, // 5: c1.connectorapi.baton.v1.Task.revoke:type_name -> c1.connectorapi.baton.v1.Task.RevokeTask - 20, // 6: c1.connectorapi.baton.v1.Task.create_account:type_name -> c1.connectorapi.baton.v1.Task.CreateAccountTask - 21, // 7: c1.connectorapi.baton.v1.Task.create_resource:type_name -> c1.connectorapi.baton.v1.Task.CreateResourceTask - 22, // 8: c1.connectorapi.baton.v1.Task.delete_resource:type_name -> c1.connectorapi.baton.v1.Task.DeleteResourceTask - 23, // 9: c1.connectorapi.baton.v1.Task.rotate_credentials:type_name -> c1.connectorapi.baton.v1.Task.RotateCredentialsTask + 20, // 4: c1.connectorapi.baton.v1.Task.grant:type_name -> c1.connectorapi.baton.v1.Task.GrantTask + 21, // 5: c1.connectorapi.baton.v1.Task.revoke:type_name -> c1.connectorapi.baton.v1.Task.RevokeTask + 22, // 6: c1.connectorapi.baton.v1.Task.create_account:type_name -> c1.connectorapi.baton.v1.Task.CreateAccountTask + 23, // 7: c1.connectorapi.baton.v1.Task.create_resource:type_name -> c1.connectorapi.baton.v1.Task.CreateResourceTask + 24, // 8: c1.connectorapi.baton.v1.Task.delete_resource:type_name -> c1.connectorapi.baton.v1.Task.DeleteResourceTask + 25, // 9: c1.connectorapi.baton.v1.Task.rotate_credentials:type_name -> c1.connectorapi.baton.v1.Task.RotateCredentialsTask 17, // 10: c1.connectorapi.baton.v1.Task.event_feed:type_name -> c1.connectorapi.baton.v1.Task.EventFeedTask - 24, // 11: c1.connectorapi.baton.v1.Task.create_ticket_task:type_name -> c1.connectorapi.baton.v1.Task.CreateTicketTask - 27, // 12: c1.connectorapi.baton.v1.Task.list_ticket_schemas:type_name -> c1.connectorapi.baton.v1.Task.ListTicketSchemasTask - 28, // 13: c1.connectorapi.baton.v1.Task.get_ticket:type_name -> c1.connectorapi.baton.v1.Task.GetTicketTask - 25, // 14: c1.connectorapi.baton.v1.Task.bulk_create_tickets:type_name -> c1.connectorapi.baton.v1.Task.BulkCreateTicketsTask - 26, // 15: c1.connectorapi.baton.v1.Task.bulk_get_tickets:type_name -> c1.connectorapi.baton.v1.Task.BulkGetTicketsTask - 29, // 16: c1.connectorapi.baton.v1.Task.action_list_schemas:type_name -> c1.connectorapi.baton.v1.Task.ActionListSchemasTask - 30, // 17: c1.connectorapi.baton.v1.Task.action_get_schema:type_name -> c1.connectorapi.baton.v1.Task.ActionGetSchemaTask - 31, // 18: c1.connectorapi.baton.v1.Task.action_invoke:type_name -> c1.connectorapi.baton.v1.Task.ActionInvokeTask - 32, // 19: c1.connectorapi.baton.v1.Task.action_status:type_name -> c1.connectorapi.baton.v1.Task.ActionStatusTask - 33, // 20: c1.connectorapi.baton.v1.Task.create_sync_diff:type_name -> c1.connectorapi.baton.v1.Task.CreateSyncDiffTask - 34, // 21: c1.connectorapi.baton.v1.Task.compact_syncs:type_name -> c1.connectorapi.baton.v1.Task.CompactSyncs - 36, // 22: c1.connectorapi.baton.v1.BatonServiceHelloRequest.build_info:type_name -> c1.connectorapi.baton.v1.BatonServiceHelloRequest.BuildInfo - 37, // 23: c1.connectorapi.baton.v1.BatonServiceHelloRequest.os_info:type_name -> c1.connectorapi.baton.v1.BatonServiceHelloRequest.OSInfo - 43, // 24: c1.connectorapi.baton.v1.BatonServiceHelloRequest.connector_metadata:type_name -> c1.connector.v2.ConnectorMetadata - 44, // 25: c1.connectorapi.baton.v1.BatonServiceHelloRequest.annotations:type_name -> google.protobuf.Any - 44, // 26: c1.connectorapi.baton.v1.BatonServiceHelloResponse.annotations:type_name -> google.protobuf.Any - 1, // 27: c1.connectorapi.baton.v1.BatonServiceGetTaskResponse.task:type_name -> c1.connectorapi.baton.v1.Task - 45, // 28: c1.connectorapi.baton.v1.BatonServiceGetTaskResponse.next_poll:type_name -> google.protobuf.Duration - 45, // 29: c1.connectorapi.baton.v1.BatonServiceGetTaskResponse.next_heartbeat:type_name -> google.protobuf.Duration - 44, // 30: c1.connectorapi.baton.v1.BatonServiceGetTaskResponse.annotations:type_name -> google.protobuf.Any - 44, // 31: c1.connectorapi.baton.v1.BatonServiceHeartbeatRequest.annotations:type_name -> google.protobuf.Any - 45, // 32: c1.connectorapi.baton.v1.BatonServiceHeartbeatResponse.next_heartbeat:type_name -> google.protobuf.Duration - 44, // 33: c1.connectorapi.baton.v1.BatonServiceHeartbeatResponse.annotations:type_name -> google.protobuf.Any - 38, // 34: c1.connectorapi.baton.v1.BatonServiceUploadAssetRequest.metadata:type_name -> c1.connectorapi.baton.v1.BatonServiceUploadAssetRequest.UploadMetadata - 39, // 35: c1.connectorapi.baton.v1.BatonServiceUploadAssetRequest.data:type_name -> c1.connectorapi.baton.v1.BatonServiceUploadAssetRequest.UploadData - 40, // 36: c1.connectorapi.baton.v1.BatonServiceUploadAssetRequest.eof:type_name -> c1.connectorapi.baton.v1.BatonServiceUploadAssetRequest.UploadEOF - 44, // 37: c1.connectorapi.baton.v1.BatonServiceUploadAssetResponse.annotations:type_name -> google.protobuf.Any - 46, // 38: c1.connectorapi.baton.v1.BatonServiceFinishTaskRequest.status:type_name -> google.rpc.Status - 41, // 39: c1.connectorapi.baton.v1.BatonServiceFinishTaskRequest.error:type_name -> c1.connectorapi.baton.v1.BatonServiceFinishTaskRequest.Error - 42, // 40: c1.connectorapi.baton.v1.BatonServiceFinishTaskRequest.success:type_name -> c1.connectorapi.baton.v1.BatonServiceFinishTaskRequest.Success - 44, // 41: c1.connectorapi.baton.v1.BatonServiceFinishTaskResponse.annotations:type_name -> google.protobuf.Any - 44, // 42: c1.connectorapi.baton.v1.Task.NoneTask.annotations:type_name -> google.protobuf.Any - 44, // 43: c1.connectorapi.baton.v1.Task.HelloTask.annotations:type_name -> google.protobuf.Any - 44, // 44: c1.connectorapi.baton.v1.Task.SyncFullTask.annotations:type_name -> google.protobuf.Any - 47, // 45: c1.connectorapi.baton.v1.Task.SyncFullTask.targeted_sync_resources:type_name -> c1.connector.v2.Resource - 44, // 46: c1.connectorapi.baton.v1.Task.EventFeedTask.annotations:type_name -> google.protobuf.Any - 48, // 47: c1.connectorapi.baton.v1.Task.EventFeedTask.start_at:type_name -> google.protobuf.Timestamp - 49, // 48: c1.connectorapi.baton.v1.Task.GrantTask.entitlement:type_name -> c1.connector.v2.Entitlement - 47, // 49: c1.connectorapi.baton.v1.Task.GrantTask.principal:type_name -> c1.connector.v2.Resource - 44, // 50: c1.connectorapi.baton.v1.Task.GrantTask.annotations:type_name -> google.protobuf.Any - 45, // 51: c1.connectorapi.baton.v1.Task.GrantTask.duration:type_name -> google.protobuf.Duration - 50, // 52: c1.connectorapi.baton.v1.Task.RevokeTask.grant:type_name -> c1.connector.v2.Grant - 44, // 53: c1.connectorapi.baton.v1.Task.RevokeTask.annotations:type_name -> google.protobuf.Any - 51, // 54: c1.connectorapi.baton.v1.Task.CreateAccountTask.account_info:type_name -> c1.connector.v2.AccountInfo - 52, // 55: c1.connectorapi.baton.v1.Task.CreateAccountTask.credential_options:type_name -> c1.connector.v2.CredentialOptions - 53, // 56: c1.connectorapi.baton.v1.Task.CreateAccountTask.encryption_configs:type_name -> c1.connector.v2.EncryptionConfig - 47, // 57: c1.connectorapi.baton.v1.Task.CreateResourceTask.resource:type_name -> c1.connector.v2.Resource - 54, // 58: c1.connectorapi.baton.v1.Task.DeleteResourceTask.resource_id:type_name -> c1.connector.v2.ResourceId - 54, // 59: c1.connectorapi.baton.v1.Task.DeleteResourceTask.parent_resource_id:type_name -> c1.connector.v2.ResourceId - 54, // 60: c1.connectorapi.baton.v1.Task.RotateCredentialsTask.resource_id:type_name -> c1.connector.v2.ResourceId - 52, // 61: c1.connectorapi.baton.v1.Task.RotateCredentialsTask.credential_options:type_name -> c1.connector.v2.CredentialOptions - 53, // 62: c1.connectorapi.baton.v1.Task.RotateCredentialsTask.encryption_configs:type_name -> c1.connector.v2.EncryptionConfig - 55, // 63: c1.connectorapi.baton.v1.Task.CreateTicketTask.ticket_request:type_name -> c1.connector.v2.TicketRequest - 56, // 64: c1.connectorapi.baton.v1.Task.CreateTicketTask.ticket_schema:type_name -> c1.connector.v2.TicketSchema - 44, // 65: c1.connectorapi.baton.v1.Task.CreateTicketTask.annotations:type_name -> google.protobuf.Any - 24, // 66: c1.connectorapi.baton.v1.Task.BulkCreateTicketsTask.ticket_requests:type_name -> c1.connectorapi.baton.v1.Task.CreateTicketTask - 28, // 67: c1.connectorapi.baton.v1.Task.BulkGetTicketsTask.ticket_requests:type_name -> c1.connectorapi.baton.v1.Task.GetTicketTask - 44, // 68: c1.connectorapi.baton.v1.Task.ListTicketSchemasTask.annotations:type_name -> google.protobuf.Any - 44, // 69: c1.connectorapi.baton.v1.Task.GetTicketTask.annotations:type_name -> google.protobuf.Any - 44, // 70: c1.connectorapi.baton.v1.Task.ActionListSchemasTask.annotations:type_name -> google.protobuf.Any - 44, // 71: c1.connectorapi.baton.v1.Task.ActionGetSchemaTask.annotations:type_name -> google.protobuf.Any - 57, // 72: c1.connectorapi.baton.v1.Task.ActionInvokeTask.args:type_name -> google.protobuf.Struct - 44, // 73: c1.connectorapi.baton.v1.Task.ActionInvokeTask.annotations:type_name -> google.protobuf.Any - 44, // 74: c1.connectorapi.baton.v1.Task.ActionStatusTask.annotations:type_name -> google.protobuf.Any - 44, // 75: c1.connectorapi.baton.v1.Task.CreateSyncDiffTask.annotations:type_name -> google.protobuf.Any - 35, // 76: c1.connectorapi.baton.v1.Task.CompactSyncs.compactable_syncs:type_name -> c1.connectorapi.baton.v1.Task.CompactSyncs.CompactableSync - 44, // 77: c1.connectorapi.baton.v1.Task.CompactSyncs.annotations:type_name -> google.protobuf.Any - 44, // 78: c1.connectorapi.baton.v1.BatonServiceUploadAssetRequest.UploadMetadata.annotations:type_name -> google.protobuf.Any - 44, // 79: c1.connectorapi.baton.v1.BatonServiceUploadAssetRequest.UploadEOF.annotations:type_name -> google.protobuf.Any - 44, // 80: c1.connectorapi.baton.v1.BatonServiceFinishTaskRequest.Error.annotations:type_name -> google.protobuf.Any - 44, // 81: c1.connectorapi.baton.v1.BatonServiceFinishTaskRequest.Error.response:type_name -> google.protobuf.Any - 44, // 82: c1.connectorapi.baton.v1.BatonServiceFinishTaskRequest.Success.annotations:type_name -> google.protobuf.Any - 44, // 83: c1.connectorapi.baton.v1.BatonServiceFinishTaskRequest.Success.response:type_name -> google.protobuf.Any - 2, // 84: c1.connectorapi.baton.v1.BatonService.Hello:input_type -> c1.connectorapi.baton.v1.BatonServiceHelloRequest - 4, // 85: c1.connectorapi.baton.v1.BatonService.GetTask:input_type -> c1.connectorapi.baton.v1.BatonServiceGetTaskRequest - 6, // 86: c1.connectorapi.baton.v1.BatonService.Heartbeat:input_type -> c1.connectorapi.baton.v1.BatonServiceHeartbeatRequest - 10, // 87: c1.connectorapi.baton.v1.BatonService.FinishTask:input_type -> c1.connectorapi.baton.v1.BatonServiceFinishTaskRequest - 8, // 88: c1.connectorapi.baton.v1.BatonService.UploadAsset:input_type -> c1.connectorapi.baton.v1.BatonServiceUploadAssetRequest - 12, // 89: c1.connectorapi.baton.v1.BatonService.StartDebugging:input_type -> c1.connectorapi.baton.v1.StartDebuggingRequest - 3, // 90: c1.connectorapi.baton.v1.BatonService.Hello:output_type -> c1.connectorapi.baton.v1.BatonServiceHelloResponse - 5, // 91: c1.connectorapi.baton.v1.BatonService.GetTask:output_type -> c1.connectorapi.baton.v1.BatonServiceGetTaskResponse - 7, // 92: c1.connectorapi.baton.v1.BatonService.Heartbeat:output_type -> c1.connectorapi.baton.v1.BatonServiceHeartbeatResponse - 11, // 93: c1.connectorapi.baton.v1.BatonService.FinishTask:output_type -> c1.connectorapi.baton.v1.BatonServiceFinishTaskResponse - 9, // 94: c1.connectorapi.baton.v1.BatonService.UploadAsset:output_type -> c1.connectorapi.baton.v1.BatonServiceUploadAssetResponse - 13, // 95: c1.connectorapi.baton.v1.BatonService.StartDebugging:output_type -> c1.connectorapi.baton.v1.StartDebuggingResponse - 90, // [90:96] is the sub-list for method output_type - 84, // [84:90] is the sub-list for method input_type - 84, // [84:84] is the sub-list for extension type_name - 84, // [84:84] is the sub-list for extension extendee - 0, // [0:84] is the sub-list for field type_name + 26, // 11: c1.connectorapi.baton.v1.Task.create_ticket_task:type_name -> c1.connectorapi.baton.v1.Task.CreateTicketTask + 29, // 12: c1.connectorapi.baton.v1.Task.list_ticket_schemas:type_name -> c1.connectorapi.baton.v1.Task.ListTicketSchemasTask + 30, // 13: c1.connectorapi.baton.v1.Task.get_ticket:type_name -> c1.connectorapi.baton.v1.Task.GetTicketTask + 27, // 14: c1.connectorapi.baton.v1.Task.bulk_create_tickets:type_name -> c1.connectorapi.baton.v1.Task.BulkCreateTicketsTask + 28, // 15: c1.connectorapi.baton.v1.Task.bulk_get_tickets:type_name -> c1.connectorapi.baton.v1.Task.BulkGetTicketsTask + 31, // 16: c1.connectorapi.baton.v1.Task.action_list_schemas:type_name -> c1.connectorapi.baton.v1.Task.ActionListSchemasTask + 32, // 17: c1.connectorapi.baton.v1.Task.action_get_schema:type_name -> c1.connectorapi.baton.v1.Task.ActionGetSchemaTask + 33, // 18: c1.connectorapi.baton.v1.Task.action_invoke:type_name -> c1.connectorapi.baton.v1.Task.ActionInvokeTask + 34, // 19: c1.connectorapi.baton.v1.Task.action_status:type_name -> c1.connectorapi.baton.v1.Task.ActionStatusTask + 35, // 20: c1.connectorapi.baton.v1.Task.create_sync_diff:type_name -> c1.connectorapi.baton.v1.Task.CreateSyncDiffTask + 36, // 21: c1.connectorapi.baton.v1.Task.compact_syncs:type_name -> c1.connectorapi.baton.v1.Task.CompactSyncs + 19, // 22: c1.connectorapi.baton.v1.Task.list_event_feeds:type_name -> c1.connectorapi.baton.v1.Task.ListEventFeedsTask + 18, // 23: c1.connectorapi.baton.v1.Task.list_events:type_name -> c1.connectorapi.baton.v1.Task.ListEventsTask + 38, // 24: c1.connectorapi.baton.v1.BatonServiceHelloRequest.build_info:type_name -> c1.connectorapi.baton.v1.BatonServiceHelloRequest.BuildInfo + 39, // 25: c1.connectorapi.baton.v1.BatonServiceHelloRequest.os_info:type_name -> c1.connectorapi.baton.v1.BatonServiceHelloRequest.OSInfo + 45, // 26: c1.connectorapi.baton.v1.BatonServiceHelloRequest.connector_metadata:type_name -> c1.connector.v2.ConnectorMetadata + 46, // 27: c1.connectorapi.baton.v1.BatonServiceHelloRequest.annotations:type_name -> google.protobuf.Any + 46, // 28: c1.connectorapi.baton.v1.BatonServiceHelloResponse.annotations:type_name -> google.protobuf.Any + 1, // 29: c1.connectorapi.baton.v1.BatonServiceGetTaskResponse.task:type_name -> c1.connectorapi.baton.v1.Task + 47, // 30: c1.connectorapi.baton.v1.BatonServiceGetTaskResponse.next_poll:type_name -> google.protobuf.Duration + 47, // 31: c1.connectorapi.baton.v1.BatonServiceGetTaskResponse.next_heartbeat:type_name -> google.protobuf.Duration + 46, // 32: c1.connectorapi.baton.v1.BatonServiceGetTaskResponse.annotations:type_name -> google.protobuf.Any + 46, // 33: c1.connectorapi.baton.v1.BatonServiceHeartbeatRequest.annotations:type_name -> google.protobuf.Any + 47, // 34: c1.connectorapi.baton.v1.BatonServiceHeartbeatResponse.next_heartbeat:type_name -> google.protobuf.Duration + 46, // 35: c1.connectorapi.baton.v1.BatonServiceHeartbeatResponse.annotations:type_name -> google.protobuf.Any + 40, // 36: c1.connectorapi.baton.v1.BatonServiceUploadAssetRequest.metadata:type_name -> c1.connectorapi.baton.v1.BatonServiceUploadAssetRequest.UploadMetadata + 41, // 37: c1.connectorapi.baton.v1.BatonServiceUploadAssetRequest.data:type_name -> c1.connectorapi.baton.v1.BatonServiceUploadAssetRequest.UploadData + 42, // 38: c1.connectorapi.baton.v1.BatonServiceUploadAssetRequest.eof:type_name -> c1.connectorapi.baton.v1.BatonServiceUploadAssetRequest.UploadEOF + 46, // 39: c1.connectorapi.baton.v1.BatonServiceUploadAssetResponse.annotations:type_name -> google.protobuf.Any + 48, // 40: c1.connectorapi.baton.v1.BatonServiceFinishTaskRequest.status:type_name -> google.rpc.Status + 43, // 41: c1.connectorapi.baton.v1.BatonServiceFinishTaskRequest.error:type_name -> c1.connectorapi.baton.v1.BatonServiceFinishTaskRequest.Error + 44, // 42: c1.connectorapi.baton.v1.BatonServiceFinishTaskRequest.success:type_name -> c1.connectorapi.baton.v1.BatonServiceFinishTaskRequest.Success + 46, // 43: c1.connectorapi.baton.v1.BatonServiceFinishTaskResponse.annotations:type_name -> google.protobuf.Any + 46, // 44: c1.connectorapi.baton.v1.Task.NoneTask.annotations:type_name -> google.protobuf.Any + 46, // 45: c1.connectorapi.baton.v1.Task.HelloTask.annotations:type_name -> google.protobuf.Any + 46, // 46: c1.connectorapi.baton.v1.Task.SyncFullTask.annotations:type_name -> google.protobuf.Any + 49, // 47: c1.connectorapi.baton.v1.Task.SyncFullTask.targeted_sync_resources:type_name -> c1.connector.v2.Resource + 46, // 48: c1.connectorapi.baton.v1.Task.EventFeedTask.annotations:type_name -> google.protobuf.Any + 50, // 49: c1.connectorapi.baton.v1.Task.EventFeedTask.start_at:type_name -> google.protobuf.Timestamp + 46, // 50: c1.connectorapi.baton.v1.Task.ListEventsTask.annotations:type_name -> google.protobuf.Any + 50, // 51: c1.connectorapi.baton.v1.Task.ListEventsTask.start_at:type_name -> google.protobuf.Timestamp + 46, // 52: c1.connectorapi.baton.v1.Task.ListEventFeedsTask.annotations:type_name -> google.protobuf.Any + 51, // 53: c1.connectorapi.baton.v1.Task.GrantTask.entitlement:type_name -> c1.connector.v2.Entitlement + 49, // 54: c1.connectorapi.baton.v1.Task.GrantTask.principal:type_name -> c1.connector.v2.Resource + 46, // 55: c1.connectorapi.baton.v1.Task.GrantTask.annotations:type_name -> google.protobuf.Any + 47, // 56: c1.connectorapi.baton.v1.Task.GrantTask.duration:type_name -> google.protobuf.Duration + 52, // 57: c1.connectorapi.baton.v1.Task.RevokeTask.grant:type_name -> c1.connector.v2.Grant + 46, // 58: c1.connectorapi.baton.v1.Task.RevokeTask.annotations:type_name -> google.protobuf.Any + 53, // 59: c1.connectorapi.baton.v1.Task.CreateAccountTask.account_info:type_name -> c1.connector.v2.AccountInfo + 54, // 60: c1.connectorapi.baton.v1.Task.CreateAccountTask.credential_options:type_name -> c1.connector.v2.CredentialOptions + 55, // 61: c1.connectorapi.baton.v1.Task.CreateAccountTask.encryption_configs:type_name -> c1.connector.v2.EncryptionConfig + 49, // 62: c1.connectorapi.baton.v1.Task.CreateResourceTask.resource:type_name -> c1.connector.v2.Resource + 56, // 63: c1.connectorapi.baton.v1.Task.DeleteResourceTask.resource_id:type_name -> c1.connector.v2.ResourceId + 56, // 64: c1.connectorapi.baton.v1.Task.DeleteResourceTask.parent_resource_id:type_name -> c1.connector.v2.ResourceId + 56, // 65: c1.connectorapi.baton.v1.Task.RotateCredentialsTask.resource_id:type_name -> c1.connector.v2.ResourceId + 54, // 66: c1.connectorapi.baton.v1.Task.RotateCredentialsTask.credential_options:type_name -> c1.connector.v2.CredentialOptions + 55, // 67: c1.connectorapi.baton.v1.Task.RotateCredentialsTask.encryption_configs:type_name -> c1.connector.v2.EncryptionConfig + 57, // 68: c1.connectorapi.baton.v1.Task.CreateTicketTask.ticket_request:type_name -> c1.connector.v2.TicketRequest + 58, // 69: c1.connectorapi.baton.v1.Task.CreateTicketTask.ticket_schema:type_name -> c1.connector.v2.TicketSchema + 46, // 70: c1.connectorapi.baton.v1.Task.CreateTicketTask.annotations:type_name -> google.protobuf.Any + 26, // 71: c1.connectorapi.baton.v1.Task.BulkCreateTicketsTask.ticket_requests:type_name -> c1.connectorapi.baton.v1.Task.CreateTicketTask + 30, // 72: c1.connectorapi.baton.v1.Task.BulkGetTicketsTask.ticket_requests:type_name -> c1.connectorapi.baton.v1.Task.GetTicketTask + 46, // 73: c1.connectorapi.baton.v1.Task.ListTicketSchemasTask.annotations:type_name -> google.protobuf.Any + 46, // 74: c1.connectorapi.baton.v1.Task.GetTicketTask.annotations:type_name -> google.protobuf.Any + 46, // 75: c1.connectorapi.baton.v1.Task.ActionListSchemasTask.annotations:type_name -> google.protobuf.Any + 46, // 76: c1.connectorapi.baton.v1.Task.ActionGetSchemaTask.annotations:type_name -> google.protobuf.Any + 59, // 77: c1.connectorapi.baton.v1.Task.ActionInvokeTask.args:type_name -> google.protobuf.Struct + 46, // 78: c1.connectorapi.baton.v1.Task.ActionInvokeTask.annotations:type_name -> google.protobuf.Any + 46, // 79: c1.connectorapi.baton.v1.Task.ActionStatusTask.annotations:type_name -> google.protobuf.Any + 46, // 80: c1.connectorapi.baton.v1.Task.CreateSyncDiffTask.annotations:type_name -> google.protobuf.Any + 37, // 81: c1.connectorapi.baton.v1.Task.CompactSyncs.compactable_syncs:type_name -> c1.connectorapi.baton.v1.Task.CompactSyncs.CompactableSync + 46, // 82: c1.connectorapi.baton.v1.Task.CompactSyncs.annotations:type_name -> google.protobuf.Any + 46, // 83: c1.connectorapi.baton.v1.BatonServiceUploadAssetRequest.UploadMetadata.annotations:type_name -> google.protobuf.Any + 46, // 84: c1.connectorapi.baton.v1.BatonServiceUploadAssetRequest.UploadEOF.annotations:type_name -> google.protobuf.Any + 46, // 85: c1.connectorapi.baton.v1.BatonServiceFinishTaskRequest.Error.annotations:type_name -> google.protobuf.Any + 46, // 86: c1.connectorapi.baton.v1.BatonServiceFinishTaskRequest.Error.response:type_name -> google.protobuf.Any + 46, // 87: c1.connectorapi.baton.v1.BatonServiceFinishTaskRequest.Success.annotations:type_name -> google.protobuf.Any + 46, // 88: c1.connectorapi.baton.v1.BatonServiceFinishTaskRequest.Success.response:type_name -> google.protobuf.Any + 2, // 89: c1.connectorapi.baton.v1.BatonService.Hello:input_type -> c1.connectorapi.baton.v1.BatonServiceHelloRequest + 4, // 90: c1.connectorapi.baton.v1.BatonService.GetTask:input_type -> c1.connectorapi.baton.v1.BatonServiceGetTaskRequest + 6, // 91: c1.connectorapi.baton.v1.BatonService.Heartbeat:input_type -> c1.connectorapi.baton.v1.BatonServiceHeartbeatRequest + 10, // 92: c1.connectorapi.baton.v1.BatonService.FinishTask:input_type -> c1.connectorapi.baton.v1.BatonServiceFinishTaskRequest + 8, // 93: c1.connectorapi.baton.v1.BatonService.UploadAsset:input_type -> c1.connectorapi.baton.v1.BatonServiceUploadAssetRequest + 12, // 94: c1.connectorapi.baton.v1.BatonService.StartDebugging:input_type -> c1.connectorapi.baton.v1.StartDebuggingRequest + 3, // 95: c1.connectorapi.baton.v1.BatonService.Hello:output_type -> c1.connectorapi.baton.v1.BatonServiceHelloResponse + 5, // 96: c1.connectorapi.baton.v1.BatonService.GetTask:output_type -> c1.connectorapi.baton.v1.BatonServiceGetTaskResponse + 7, // 97: c1.connectorapi.baton.v1.BatonService.Heartbeat:output_type -> c1.connectorapi.baton.v1.BatonServiceHeartbeatResponse + 11, // 98: c1.connectorapi.baton.v1.BatonService.FinishTask:output_type -> c1.connectorapi.baton.v1.BatonServiceFinishTaskResponse + 9, // 99: c1.connectorapi.baton.v1.BatonService.UploadAsset:output_type -> c1.connectorapi.baton.v1.BatonServiceUploadAssetResponse + 13, // 100: c1.connectorapi.baton.v1.BatonService.StartDebugging:output_type -> c1.connectorapi.baton.v1.StartDebuggingResponse + 95, // [95:101] is the sub-list for method output_type + 89, // [89:95] is the sub-list for method input_type + 89, // [89:89] is the sub-list for extension type_name + 89, // [89:89] is the sub-list for extension extendee + 0, // [0:89] is the sub-list for field type_name } func init() { file_c1_connectorapi_baton_v1_baton_proto_init() } @@ -5297,6 +5589,8 @@ func file_c1_connectorapi_baton_v1_baton_proto_init() { (*task_ActionStatus)(nil), (*task_CreateSyncDiff)(nil), (*task_CompactSyncs_)(nil), + (*task_ListEventFeeds)(nil), + (*task_ListEvents)(nil), } file_c1_connectorapi_baton_v1_baton_proto_msgTypes[7].OneofWrappers = []any{ (*batonServiceUploadAssetRequest_Metadata)(nil), @@ -5313,7 +5607,7 @@ func file_c1_connectorapi_baton_v1_baton_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: unsafe.Slice(unsafe.StringData(file_c1_connectorapi_baton_v1_baton_proto_rawDesc), len(file_c1_connectorapi_baton_v1_baton_proto_rawDesc)), NumEnums: 1, - NumMessages: 42, + NumMessages: 44, NumExtensions: 0, NumServices: 1, }, diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/connectorbuilder/resource_manager.go b/vendor/github.com/conductorone/baton-sdk/pkg/connectorbuilder/resource_manager.go index e9ebbb7f..2f28d591 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/connectorbuilder/resource_manager.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/connectorbuilder/resource_manager.go @@ -37,7 +37,7 @@ type ResourceManagerV2Limited interface { // // This is the recommended interface for implementing resource creation operations in new connectors. type ResourceManagerV2 interface { - ResourceSyncer + ResourceSyncerV2 ResourceManagerV2Limited } @@ -62,7 +62,7 @@ type ResourceDeleterLimited interface { // This is the recommended interface for implementing resource deletion operations in new connectors. // It differs from ResourceDeleter by having the resource, not just the id. type ResourceDeleterV2 interface { - ResourceSyncer + ResourceSyncerV2 ResourceDeleterV2Limited } diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/sql_helpers.go b/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/sql_helpers.go index 296da43f..8528537b 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/sql_helpers.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/sql_helpers.go @@ -83,6 +83,11 @@ type hasPrincipalResourceTypeIDsListRequest interface { GetPrincipalResourceTypeIds() []string } +type hasParentResourceIdListRequest interface { + listRequest + GetParentResourceId() *v2.ResourceId +} + type protoHasID interface { proto.Message GetId() string @@ -189,6 +194,14 @@ func listConnectorObjects[T proto.Message](ctx context.Context, c *C1File, table } } + if parentResourceIdReq, ok := req.(hasParentResourceIdListRequest); ok { + p := parentResourceIdReq.GetParentResourceId() + if p != nil && p.GetResource() != "" { + q = q.Where(goqu.C("parent_resource_id").Eq(p.GetResource())) + q = q.Where(goqu.C("parent_resource_type_id").Eq(p.GetResourceType())) + } + } + // If a sync is running, be sure we only select from the current values switch { case reqSyncID != "": diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/sdk/version.go b/vendor/github.com/conductorone/baton-sdk/pkg/sdk/version.go index b3cfeeac..b75ca3bf 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/sdk/version.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/sdk/version.go @@ -1,3 +1,3 @@ package sdk -const Version = "v0.7.9" +const Version = "v0.7.10" diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/list_event_feeds.go b/vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/list_event_feeds.go new file mode 100644 index 00000000..e603a66f --- /dev/null +++ b/vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/list_event_feeds.go @@ -0,0 +1,56 @@ +package c1api + +import ( + "context" + "errors" + + "github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/ctxzap" + "go.uber.org/zap" + "google.golang.org/protobuf/proto" + + v2 "github.com/conductorone/baton-sdk/pb/c1/connector/v2" + v1 "github.com/conductorone/baton-sdk/pb/c1/connectorapi/baton/v1" + "github.com/conductorone/baton-sdk/pkg/annotations" + "github.com/conductorone/baton-sdk/pkg/types" +) + +type listEventFeedsHelpers interface { + ConnectorClient() types.ConnectorClient + FinishTask(ctx context.Context, resp proto.Message, annos annotations.Annotations, err error) error +} + +type listEventFeedsHandler struct { + task *v1.Task + helpers listEventFeedsHelpers +} + +func (c *listEventFeedsHandler) HandleTask(ctx context.Context) error { + ctx, span := tracer.Start(ctx, "listEventFeedsHandler.HandleTask") + defer span.End() + + l := ctxzap.Extract(ctx) + cc := c.helpers.ConnectorClient() + + t := c.task.GetListEventFeeds() + if t == nil { + l.Error("get list event feeds task was nil", zap.Any("get_list_event_feeds_task", t)) + return c.helpers.FinishTask(ctx, nil, nil, errors.Join(errors.New("malformed get list event feeds task"), ErrTaskNonRetryable)) + } + + feeds, err := cc.ListEventFeeds(ctx, &v2.ListEventFeedsRequest{}) + if err != nil { + return err + } + + resp := v2.ListEventFeedsResponse_builder{ + List: feeds.GetList(), + }.Build() + return c.helpers.FinishTask(ctx, resp, resp.GetAnnotations(), nil) +} + +func NewListEventFeedsHandler(task *v1.Task, helpers listEventFeedsHelpers) *listEventFeedsHandler { + return &listEventFeedsHandler{ + task: task, + helpers: helpers, + } +} diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/list_events.go b/vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/list_events.go new file mode 100644 index 00000000..152259f4 --- /dev/null +++ b/vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/list_events.go @@ -0,0 +1,63 @@ +package c1api + +import ( + "context" + "errors" + + "github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/ctxzap" + "go.uber.org/zap" + "google.golang.org/protobuf/proto" + + v2 "github.com/conductorone/baton-sdk/pb/c1/connector/v2" + v1 "github.com/conductorone/baton-sdk/pb/c1/connectorapi/baton/v1" + "github.com/conductorone/baton-sdk/pkg/annotations" + "github.com/conductorone/baton-sdk/pkg/types" +) + +type listEventsHelpers interface { + ConnectorClient() types.ConnectorClient + FinishTask(ctx context.Context, resp proto.Message, annos annotations.Annotations, err error) error +} + +type listEventsHandler struct { + task *v1.Task + helpers listEventsHelpers +} + +func (c *listEventsHandler) HandleTask(ctx context.Context) error { + ctx, span := tracer.Start(ctx, "listEventHandler.HandleTask") + defer span.End() + + l := ctxzap.Extract(ctx) + cc := c.helpers.ConnectorClient() + + t := c.task.GetListEvents() + if t == nil { + l.Error("get list event task was nil", zap.Any("get_list_event_task", t)) + return c.helpers.FinishTask(ctx, nil, nil, errors.Join(errors.New("malformed get list event task"), ErrTaskNonRetryable)) + } + + feeds, err := cc.ListEvents(ctx, v2.ListEventsRequest_builder{ + EventFeedId: t.GetEventFeedId(), + StartAt: t.GetStartAt(), + Cursor: t.GetCursor(), + PageSize: t.GetPageSize(), + }.Build()) + if err != nil { + return err + } + + resp := v2.ListEventsResponse_builder{ + Events: feeds.GetEvents(), + Cursor: feeds.GetCursor(), + HasMore: feeds.GetHasMore(), + }.Build() + return c.helpers.FinishTask(ctx, resp, resp.GetAnnotations(), nil) +} + +func NewListEventsHandler(task *v1.Task, helpers listEventsHelpers) *listEventsHandler { + return &listEventsHandler{ + task: task, + helpers: helpers, + } +} diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/manager.go b/vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/manager.go index 7b6aec9d..9a3ca905 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/manager.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/manager.go @@ -286,6 +286,10 @@ func (c *c1ApiTaskManager) Process(ctx context.Context, task *v1.Task, cc types. handler = newActionInvokeTaskHandler(task, tHelpers) case taskTypes.ActionStatusType: handler = newActionStatusTaskHandler(task, tHelpers) + case taskTypes.ListEventFeedsType: + handler = NewListEventFeedsHandler(task, tHelpers) + case taskTypes.ListEventsType: + handler = NewListEventsHandler(task, tHelpers) default: return c.finishTask(ctx, task, nil, nil, errors.New("unsupported task type")) } diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/tasks/tasks.go b/vendor/github.com/conductorone/baton-sdk/pkg/tasks/tasks.go index db0e64d4..b5590414 100644 --- a/vendor/github.com/conductorone/baton-sdk/pkg/tasks/tasks.go +++ b/vendor/github.com/conductorone/baton-sdk/pkg/tasks/tasks.go @@ -68,6 +68,10 @@ func Is(task *v1.Task, target taskTypes.TaskType) bool { return actualType == v1.Task_ActionStatus_case case taskTypes.CreateSyncDiff: return actualType == v1.Task_CreateSyncDiff_case + case taskTypes.ListEventFeedsType: + return actualType == v1.Task_ListEventFeeds_case + case taskTypes.ListEventsType: + return actualType == v1.Task_ListEvents_case default: return false } @@ -119,6 +123,10 @@ func GetType(task *v1.Task) taskTypes.TaskType { return taskTypes.ActionStatusType case v1.Task_CreateSyncDiff_case: return taskTypes.CreateSyncDiff + case v1.Task_ListEventFeeds_case: + return taskTypes.ListEventFeedsType + case v1.Task_ListEvents_case: + return taskTypes.ListEventsType default: return taskTypes.UnknownType } diff --git a/vendor/github.com/go-jose/go-jose/v4/CHANGELOG.md b/vendor/github.com/go-jose/go-jose/v4/CHANGELOG.md deleted file mode 100644 index 6f717dbd..00000000 --- a/vendor/github.com/go-jose/go-jose/v4/CHANGELOG.md +++ /dev/null @@ -1,96 +0,0 @@ -# v4.0.4 - -## Fixed - - - Reverted "Allow unmarshalling JSONWebKeySets with unsupported key types" as a - breaking change. See #136 / #137. - -# v4.0.3 - -## Changed - - - Allow unmarshalling JSONWebKeySets with unsupported key types (#130) - - Document that OpaqueKeyEncrypter can't be implemented (for now) (#129) - - Dependency updates - -# v4.0.2 - -## Changed - - - Improved documentation of Verify() to note that JSONWebKeySet is a supported - argument type (#104) - - Defined exported error values for missing x5c header and unsupported elliptic - curves error cases (#117) - -# v4.0.1 - -## Fixed - - - An attacker could send a JWE containing compressed data that used large - amounts of memory and CPU when decompressed by `Decrypt` or `DecryptMulti`. - Those functions now return an error if the decompressed data would exceed - 250kB or 10x the compressed size (whichever is larger). Thanks to - Enze Wang@Alioth and Jianjun Chen@Zhongguancun Lab (@zer0yu and @chenjj) - for reporting. - -# v4.0.0 - -This release makes some breaking changes in order to more thoroughly -address the vulnerabilities discussed in [Three New Attacks Against JSON Web -Tokens][1], "Sign/encrypt confusion", "Billion hash attack", and "Polyglot -token". - -## Changed - - - Limit JWT encryption types (exclude password or public key types) (#78) - - Enforce minimum length for HMAC keys (#85) - - jwt: match any audience in a list, rather than requiring all audiences (#81) - - jwt: accept only Compact Serialization (#75) - - jws: Add expected algorithms for signatures (#74) - - Require specifying expected algorithms for ParseEncrypted, - ParseSigned, ParseDetached, jwt.ParseEncrypted, jwt.ParseSigned, - jwt.ParseSignedAndEncrypted (#69, #74) - - Usually there is a small, known set of appropriate algorithms for a program - to use and it's a mistake to allow unexpected algorithms. For instance the - "billion hash attack" relies in part on programs accepting the PBES2 - encryption algorithm and doing the necessary work even if they weren't - specifically configured to allow PBES2. - - Revert "Strip padding off base64 strings" (#82) - - The specs require base64url encoding without padding. - - Minimum supported Go version is now 1.21 - -## Added - - - ParseSignedCompact, ParseSignedJSON, ParseEncryptedCompact, ParseEncryptedJSON. - - These allow parsing a specific serialization, as opposed to ParseSigned and - ParseEncrypted, which try to automatically detect which serialization was - provided. It's common to require a specific serialization for a specific - protocol - for instance JWT requires Compact serialization. - -[1]: https://i.blackhat.com/BH-US-23/Presentations/US-23-Tervoort-Three-New-Attacks-Against-JSON-Web-Tokens.pdf - -# v3.0.2 - -## Fixed - - - DecryptMulti: handle decompression error (#19) - -## Changed - - - jwe/CompactSerialize: improve performance (#67) - - Increase the default number of PBKDF2 iterations to 600k (#48) - - Return the proper algorithm for ECDSA keys (#45) - -## Added - - - Add Thumbprint support for opaque signers (#38) - -# v3.0.1 - -## Fixed - - - Security issue: an attacker specifying a large "p2c" value can cause - JSONWebEncryption.Decrypt and JSONWebEncryption.DecryptMulti to consume large - amounts of CPU, causing a DoS. Thanks to Matt Schwager (@mschwager) for the - disclosure and to Tom Tervoort for originally publishing the category of attack. - https://i.blackhat.com/BH-US-23/Presentations/US-23-Tervoort-Three-New-Attacks-Against-JSON-Web-Tokens.pdf diff --git a/vendor/github.com/go-jose/go-jose/v4/README.md b/vendor/github.com/go-jose/go-jose/v4/README.md index 02b57495..55c55091 100644 --- a/vendor/github.com/go-jose/go-jose/v4/README.md +++ b/vendor/github.com/go-jose/go-jose/v4/README.md @@ -3,7 +3,6 @@ [![godoc](https://pkg.go.dev/badge/github.com/go-jose/go-jose/v4.svg)](https://pkg.go.dev/github.com/go-jose/go-jose/v4) [![godoc](https://pkg.go.dev/badge/github.com/go-jose/go-jose/v4/jwt.svg)](https://pkg.go.dev/github.com/go-jose/go-jose/v4/jwt) [![license](https://img.shields.io/badge/license-apache_2.0-blue.svg?style=flat)](https://raw.githubusercontent.com/go-jose/go-jose/master/LICENSE) -[![test](https://img.shields.io/github/checks-status/go-jose/go-jose/v4)](https://github.com/go-jose/go-jose/actions) Package jose aims to provide an implementation of the Javascript Object Signing and Encryption set of standards. This includes support for JSON Web Encryption, @@ -29,17 +28,20 @@ libraries in other languages. ### Versions -[Version 4](https://github.com/go-jose/go-jose) -([branch](https://github.com/go-jose/go-jose/tree/main), -[doc](https://pkg.go.dev/github.com/go-jose/go-jose/v4), [releases](https://github.com/go-jose/go-jose/releases)) is the current stable version: +The forthcoming Version 5 will be released with several breaking API changes, +and will require Golang's `encoding/json/v2`, which is currently requires +Go 1.25 built with GOEXPERIMENT=jsonv2. + +Version 4 is the current stable version: import "github.com/go-jose/go-jose/v4" -The old [square/go-jose](https://github.com/square/go-jose) repo contains the prior v1 and v2 versions, which -are still useable but not actively developed anymore. +It supports at least the current and previous Golang release. Currently it +requires Golang 1.24. + +Version 3 is only receiving critical security updates. Migration to Version 4 is recommended. -Version 3, in this repo, is still receiving security fixes but not functionality -updates. +Versions 1 and 2 are obsolete, but can be found in the old repository, [square/go-jose](https://github.com/square/go-jose). ### Supported algorithms @@ -47,36 +49,36 @@ See below for a table of supported algorithms. Algorithm identifiers match the names in the [JSON Web Algorithms](https://dx.doi.org/10.17487/RFC7518) standard where possible. The Godoc reference has a list of constants. - Key encryption | Algorithm identifier(s) - :------------------------- | :------------------------------ - RSA-PKCS#1v1.5 | RSA1_5 - RSA-OAEP | RSA-OAEP, RSA-OAEP-256 - AES key wrap | A128KW, A192KW, A256KW - AES-GCM key wrap | A128GCMKW, A192GCMKW, A256GCMKW - ECDH-ES + AES key wrap | ECDH-ES+A128KW, ECDH-ES+A192KW, ECDH-ES+A256KW - ECDH-ES (direct) | ECDH-ES1 - Direct encryption | dir1 +| Key encryption | Algorithm identifier(s) | +|:-----------------------|:-----------------------------------------------| +| RSA-PKCS#1v1.5 | RSA1_5 | +| RSA-OAEP | RSA-OAEP, RSA-OAEP-256 | +| AES key wrap | A128KW, A192KW, A256KW | +| AES-GCM key wrap | A128GCMKW, A192GCMKW, A256GCMKW | +| ECDH-ES + AES key wrap | ECDH-ES+A128KW, ECDH-ES+A192KW, ECDH-ES+A256KW | +| ECDH-ES (direct) | ECDH-ES1 | +| Direct encryption | dir1 | 1. Not supported in multi-recipient mode - Signing / MAC | Algorithm identifier(s) - :------------------------- | :------------------------------ - RSASSA-PKCS#1v1.5 | RS256, RS384, RS512 - RSASSA-PSS | PS256, PS384, PS512 - HMAC | HS256, HS384, HS512 - ECDSA | ES256, ES384, ES512 - Ed25519 | EdDSA2 +| Signing / MAC | Algorithm identifier(s) | +|:------------------|:------------------------| +| RSASSA-PKCS#1v1.5 | RS256, RS384, RS512 | +| RSASSA-PSS | PS256, PS384, PS512 | +| HMAC | HS256, HS384, HS512 | +| ECDSA | ES256, ES384, ES512 | +| Ed25519 | EdDSA2 | 2. Only available in version 2 of the package - Content encryption | Algorithm identifier(s) - :------------------------- | :------------------------------ - AES-CBC+HMAC | A128CBC-HS256, A192CBC-HS384, A256CBC-HS512 - AES-GCM | A128GCM, A192GCM, A256GCM +| Content encryption | Algorithm identifier(s) | +|:-------------------|:--------------------------------------------| +| AES-CBC+HMAC | A128CBC-HS256, A192CBC-HS384, A256CBC-HS512 | +| AES-GCM | A128GCM, A192GCM, A256GCM | - Compression | Algorithm identifiers(s) - :------------------------- | ------------------------------- - DEFLATE (RFC 1951) | DEF +| Compression | Algorithm identifiers(s) | +|:-------------------|--------------------------| +| DEFLATE (RFC 1951) | DEF | ### Supported key types @@ -85,12 +87,12 @@ library, and can be passed to corresponding functions such as `NewEncrypter` or `NewSigner`. Each of these keys can also be wrapped in a JWK if desired, which allows attaching a key id. - Algorithm(s) | Corresponding types - :------------------------- | ------------------------------- - RSA | *[rsa.PublicKey](https://pkg.go.dev/crypto/rsa/#PublicKey), *[rsa.PrivateKey](https://pkg.go.dev/crypto/rsa/#PrivateKey) - ECDH, ECDSA | *[ecdsa.PublicKey](https://pkg.go.dev/crypto/ecdsa/#PublicKey), *[ecdsa.PrivateKey](https://pkg.go.dev/crypto/ecdsa/#PrivateKey) - EdDSA1 | [ed25519.PublicKey](https://pkg.go.dev/crypto/ed25519#PublicKey), [ed25519.PrivateKey](https://pkg.go.dev/crypto/ed25519#PrivateKey) - AES, HMAC | []byte +| Algorithm(s) | Corresponding types | +|:------------------|--------------------------------------------------------------------------------------------------------------------------------------| +| RSA | *[rsa.PublicKey](https://pkg.go.dev/crypto/rsa/#PublicKey), *[rsa.PrivateKey](https://pkg.go.dev/crypto/rsa/#PrivateKey) | +| ECDH, ECDSA | *[ecdsa.PublicKey](https://pkg.go.dev/crypto/ecdsa/#PublicKey), *[ecdsa.PrivateKey](https://pkg.go.dev/crypto/ecdsa/#PrivateKey) | +| EdDSA1 | [ed25519.PublicKey](https://pkg.go.dev/crypto/ed25519#PublicKey), [ed25519.PrivateKey](https://pkg.go.dev/crypto/ed25519#PrivateKey) | +| AES, HMAC | []byte | 1. Only available in version 2 or later of the package diff --git a/vendor/github.com/go-jose/go-jose/v4/crypter.go b/vendor/github.com/go-jose/go-jose/v4/crypter.go index d81b03b4..31290fc8 100644 --- a/vendor/github.com/go-jose/go-jose/v4/crypter.go +++ b/vendor/github.com/go-jose/go-jose/v4/crypter.go @@ -286,6 +286,10 @@ func makeJWERecipient(alg KeyAlgorithm, encryptionKey interface{}) (recipientKey return newSymmetricRecipient(alg, encryptionKey) case string: return newSymmetricRecipient(alg, []byte(encryptionKey)) + case JSONWebKey: + recipient, err := makeJWERecipient(alg, encryptionKey.Key) + recipient.keyID = encryptionKey.KeyID + return recipient, err case *JSONWebKey: recipient, err := makeJWERecipient(alg, encryptionKey.Key) recipient.keyID = encryptionKey.KeyID @@ -450,13 +454,9 @@ func (obj JSONWebEncryption) Decrypt(decryptionKey interface{}) ([]byte, error) return nil, errors.New("go-jose/go-jose: too many recipients in payload; expecting only one") } - critical, err := headers.getCritical() + err := headers.checkNoCritical() if err != nil { - return nil, fmt.Errorf("go-jose/go-jose: invalid crit header") - } - - if len(critical) > 0 { - return nil, fmt.Errorf("go-jose/go-jose: unsupported crit header") + return nil, err } key, err := tryJWKS(decryptionKey, obj.Header) @@ -523,13 +523,9 @@ func (obj JSONWebEncryption) Decrypt(decryptionKey interface{}) ([]byte, error) func (obj JSONWebEncryption) DecryptMulti(decryptionKey interface{}) (int, Header, []byte, error) { globalHeaders := obj.mergedHeaders(nil) - critical, err := globalHeaders.getCritical() + err := globalHeaders.checkNoCritical() if err != nil { - return -1, Header{}, nil, fmt.Errorf("go-jose/go-jose: invalid crit header") - } - - if len(critical) > 0 { - return -1, Header{}, nil, fmt.Errorf("go-jose/go-jose: unsupported crit header") + return -1, Header{}, nil, err } key, err := tryJWKS(decryptionKey, obj.Header) diff --git a/vendor/github.com/go-jose/go-jose/v4/jwe.go b/vendor/github.com/go-jose/go-jose/v4/jwe.go index 9f1322dc..6102f910 100644 --- a/vendor/github.com/go-jose/go-jose/v4/jwe.go +++ b/vendor/github.com/go-jose/go-jose/v4/jwe.go @@ -274,7 +274,7 @@ func validateAlgEnc(headers rawHeader, keyAlgorithms []KeyAlgorithm, contentEncr if alg != "" && !containsKeyAlgorithm(keyAlgorithms, alg) { return fmt.Errorf("unexpected key algorithm %q; expected %q", alg, keyAlgorithms) } - if alg != "" && !containsContentEncryption(contentEncryption, enc) { + if enc != "" && !containsContentEncryption(contentEncryption, enc) { return fmt.Errorf("unexpected content encryption algorithm %q; expected %q", enc, contentEncryption) } return nil @@ -288,11 +288,20 @@ func ParseEncryptedCompact( keyAlgorithms []KeyAlgorithm, contentEncryption []ContentEncryption, ) (*JSONWebEncryption, error) { - // Five parts is four separators - if strings.Count(input, ".") != 4 { - return nil, fmt.Errorf("go-jose/go-jose: compact JWE format must have five parts") + var parts [5]string + var ok bool + + for i := range 4 { + parts[i], input, ok = strings.Cut(input, ".") + if !ok { + return nil, errors.New("go-jose/go-jose: compact JWE format must have five parts") + } + } + // Validate that the last part does not contain more dots + if strings.ContainsRune(input, '.') { + return nil, errors.New("go-jose/go-jose: compact JWE format must have five parts") } - parts := strings.SplitN(input, ".", 5) + parts[4] = input rawProtected, err := base64.RawURLEncoding.DecodeString(parts[0]) if err != nil { diff --git a/vendor/github.com/go-jose/go-jose/v4/jwk.go b/vendor/github.com/go-jose/go-jose/v4/jwk.go index 9e57e93b..164d6a16 100644 --- a/vendor/github.com/go-jose/go-jose/v4/jwk.go +++ b/vendor/github.com/go-jose/go-jose/v4/jwk.go @@ -175,6 +175,8 @@ func (k JSONWebKey) MarshalJSON() ([]byte, error) { } // UnmarshalJSON reads a key from its JSON representation. +// +// Returns ErrUnsupportedKeyType for unrecognized or unsupported "kty" header values. func (k *JSONWebKey) UnmarshalJSON(data []byte) (err error) { var raw rawJSONWebKey err = json.Unmarshal(data, &raw) @@ -228,7 +230,7 @@ func (k *JSONWebKey) UnmarshalJSON(data []byte) (err error) { } key, err = raw.symmetricKey() case "OKP": - if raw.Crv == "Ed25519" && raw.X != nil { + if raw.Crv == "Ed25519" { if raw.D != nil { key, err = raw.edPrivateKey() if err == nil { @@ -238,17 +240,27 @@ func (k *JSONWebKey) UnmarshalJSON(data []byte) (err error) { key, err = raw.edPublicKey() keyPub = key } - } else { - return fmt.Errorf("go-jose/go-jose: unknown curve %s'", raw.Crv) } - default: - return fmt.Errorf("go-jose/go-jose: unknown json web key type '%s'", raw.Kty) + case "": + // kty MUST be present + err = fmt.Errorf("go-jose/go-jose: missing json web key type") } if err != nil { return } + if key == nil { + // RFC 7517: + // 5. JWK Set Format + // ... + // Implementations SHOULD ignore JWKs within a JWK Set that use "kty" + // (key type) values that are not understood by them, that are missing + // required members, or for which values are out of the supported + // ranges. + return ErrUnsupportedKeyType + } + if certPub != nil && keyPub != nil { if !reflect.DeepEqual(certPub, keyPub) { return errors.New("go-jose/go-jose: invalid JWK, public keys in key and x5c fields do not match") @@ -581,10 +593,10 @@ func fromEcPublicKey(pub *ecdsa.PublicKey) (*rawJSONWebKey, error) { func (key rawJSONWebKey) edPrivateKey() (ed25519.PrivateKey, error) { var missing []string - switch { - case key.D == nil: + if key.D == nil { missing = append(missing, "D") - case key.X == nil: + } + if key.X == nil { missing = append(missing, "X") } @@ -611,19 +623,21 @@ func (key rawJSONWebKey) edPublicKey() (ed25519.PublicKey, error) { func (key rawJSONWebKey) rsaPrivateKey() (*rsa.PrivateKey, error) { var missing []string - switch { - case key.N == nil: + if key.N == nil { missing = append(missing, "N") - case key.E == nil: + } + if key.E == nil { missing = append(missing, "E") - case key.D == nil: + } + if key.D == nil { missing = append(missing, "D") - case key.P == nil: + } + if key.P == nil { missing = append(missing, "P") - case key.Q == nil: + } + if key.Q == nil { missing = append(missing, "Q") } - if len(missing) > 0 { return nil, fmt.Errorf("go-jose/go-jose: invalid RSA private key, missing %s value(s)", strings.Join(missing, ", ")) } @@ -698,8 +712,19 @@ func (key rawJSONWebKey) ecPrivateKey() (*ecdsa.PrivateKey, error) { return nil, fmt.Errorf("go-jose/go-jose: unsupported elliptic curve '%s'", key.Crv) } - if key.X == nil || key.Y == nil || key.D == nil { - return nil, fmt.Errorf("go-jose/go-jose: invalid EC private key, missing x/y/d values") + var missing []string + if key.X == nil { + missing = append(missing, "X") + } + if key.Y == nil { + missing = append(missing, "Y") + } + if key.D == nil { + missing = append(missing, "D") + } + + if len(missing) > 0 { + return nil, fmt.Errorf("go-jose/go-jose: invalid EC private key, missing %s value(s)", strings.Join(missing, ", ")) } // The length of this octet string MUST be the full size of a coordinate for diff --git a/vendor/github.com/go-jose/go-jose/v4/jws.go b/vendor/github.com/go-jose/go-jose/v4/jws.go index d09d8ba5..c40bd3ec 100644 --- a/vendor/github.com/go-jose/go-jose/v4/jws.go +++ b/vendor/github.com/go-jose/go-jose/v4/jws.go @@ -75,7 +75,14 @@ type Signature struct { original *rawSignatureInfo } -// ParseSigned parses a signed message in JWS Compact or JWS JSON Serialization. +// ParseSigned parses a signed message in JWS Compact or JWS JSON Serialization. Validation fails if +// the JWS is signed with an algorithm that isn't in the provided list of signature algorithms. +// Applications should decide for themselves which signature algorithms are acceptable. If you're +// not sure which signature algorithms your application might receive, consult the documentation of +// the program which provides them or the protocol that you are implementing. You can also try +// getting an example JWS and decoding it with a tool like https://jwt.io to see what its "alg" +// header parameter indicates. The signature on the JWS does not get validated during parsing. Call +// Verify() after parsing to validate the signature and obtain the payload. // // https://datatracker.ietf.org/doc/html/rfc7515#section-7 func ParseSigned( @@ -90,7 +97,14 @@ func ParseSigned( return parseSignedCompact(signature, nil, signatureAlgorithms) } -// ParseSignedCompact parses a message in JWS Compact Serialization. +// ParseSignedCompact parses a message in JWS Compact Serialization. Validation fails if the JWS is +// signed with an algorithm that isn't in the provided list of signature algorithms. Applications +// should decide for themselves which signature algorithms are acceptable.If you're not sure which +// signature algorithms your application might receive, consult the documentation of the program +// which provides them or the protocol that you are implementing. You can also try getting an +// example JWS and decoding it with a tool like https://jwt.io to see what its "alg" header +// parameter indicates. The signature on the JWS does not get validated during parsing. Call +// Verify() after parsing to validate the signature and obtain the payload. // // https://datatracker.ietf.org/doc/html/rfc7515#section-7.1 func ParseSignedCompact( @@ -101,6 +115,15 @@ func ParseSignedCompact( } // ParseDetached parses a signed message in compact serialization format with detached payload. +// Validation fails if the JWS is signed with an algorithm that isn't in the provided list of +// signature algorithms. Applications should decide for themselves which signature algorithms are +// acceptable. If you're not sure which signature algorithms your application might receive, consult +// the documentation of the program which provides them or the protocol that you are implementing. +// You can also try getting an example JWS and decoding it with a tool like https://jwt.io to see +// what its "alg" header parameter indicates. The signature on the JWS does not get validated during +// parsing. Call Verify() after parsing to validate the signature and obtain the payload. +// +// https://datatracker.ietf.org/doc/html/rfc7515#appendix-F func ParseDetached( signature string, payload []byte, @@ -181,6 +204,25 @@ func containsSignatureAlgorithm(haystack []SignatureAlgorithm, needle SignatureA return false } +// ErrUnexpectedSignatureAlgorithm is returned when the signature algorithm in +// the JWS header does not match one of the expected algorithms. +type ErrUnexpectedSignatureAlgorithm struct { + // Got is the signature algorithm found in the JWS header. + Got SignatureAlgorithm + expected []SignatureAlgorithm +} + +func (e *ErrUnexpectedSignatureAlgorithm) Error() string { + return fmt.Sprintf("unexpected signature algorithm %q; expected %q", e.Got, e.expected) +} + +func newErrUnexpectedSignatureAlgorithm(got SignatureAlgorithm, expected []SignatureAlgorithm) error { + return &ErrUnexpectedSignatureAlgorithm{ + Got: got, + expected: expected, + } +} + // sanitized produces a cleaned-up JWS object from the raw JSON. func (parsed *rawJSONWebSignature) sanitized(signatureAlgorithms []SignatureAlgorithm) (*JSONWebSignature, error) { if len(signatureAlgorithms) == 0 { @@ -236,8 +278,7 @@ func (parsed *rawJSONWebSignature) sanitized(signatureAlgorithms []SignatureAlgo alg := SignatureAlgorithm(signature.Header.Algorithm) if !containsSignatureAlgorithm(signatureAlgorithms, alg) { - return nil, fmt.Errorf("go-jose/go-jose: unexpected signature algorithm %q; expected %q", - alg, signatureAlgorithms) + return nil, newErrUnexpectedSignatureAlgorithm(alg, signatureAlgorithms) } if signature.header != nil { @@ -285,8 +326,7 @@ func (parsed *rawJSONWebSignature) sanitized(signatureAlgorithms []SignatureAlgo alg := SignatureAlgorithm(obj.Signatures[i].Header.Algorithm) if !containsSignatureAlgorithm(signatureAlgorithms, alg) { - return nil, fmt.Errorf("go-jose/go-jose: unexpected signature algorithm %q; expected %q", - alg, signatureAlgorithms) + return nil, newErrUnexpectedSignatureAlgorithm(alg, signatureAlgorithms) } if obj.Signatures[i].header != nil { @@ -321,35 +361,43 @@ func (parsed *rawJSONWebSignature) sanitized(signatureAlgorithms []SignatureAlgo return obj, nil } +const tokenDelim = "." + // parseSignedCompact parses a message in compact format. func parseSignedCompact( input string, payload []byte, signatureAlgorithms []SignatureAlgorithm, ) (*JSONWebSignature, error) { - // Three parts is two separators - if strings.Count(input, ".") != 2 { + protected, s, ok := strings.Cut(input, tokenDelim) + if !ok { // no period found + return nil, fmt.Errorf("go-jose/go-jose: compact JWS format must have three parts") + } + claims, sig, ok := strings.Cut(s, tokenDelim) + if !ok { // only one period found + return nil, fmt.Errorf("go-jose/go-jose: compact JWS format must have three parts") + } + if strings.ContainsRune(sig, '.') { // too many periods found return nil, fmt.Errorf("go-jose/go-jose: compact JWS format must have three parts") } - parts := strings.SplitN(input, ".", 3) - if parts[1] != "" && payload != nil { + if claims != "" && payload != nil { return nil, fmt.Errorf("go-jose/go-jose: payload is not detached") } - rawProtected, err := base64.RawURLEncoding.DecodeString(parts[0]) + rawProtected, err := base64.RawURLEncoding.DecodeString(protected) if err != nil { return nil, err } if payload == nil { - payload, err = base64.RawURLEncoding.DecodeString(parts[1]) + payload, err = base64.RawURLEncoding.DecodeString(claims) if err != nil { return nil, err } } - signature, err := base64.RawURLEncoding.DecodeString(parts[2]) + signature, err := base64.RawURLEncoding.DecodeString(sig) if err != nil { return nil, err } diff --git a/vendor/github.com/go-jose/go-jose/v4/shared.go b/vendor/github.com/go-jose/go-jose/v4/shared.go index 1ec33961..35130b3a 100644 --- a/vendor/github.com/go-jose/go-jose/v4/shared.go +++ b/vendor/github.com/go-jose/go-jose/v4/shared.go @@ -77,6 +77,9 @@ var ( // ErrUnsupportedEllipticCurve indicates unsupported or unknown elliptic curve has been found. ErrUnsupportedEllipticCurve = errors.New("go-jose/go-jose: unsupported/unknown elliptic curve") + + // ErrUnsupportedCriticalHeader is returned when a header is marked critical but not supported by go-jose. + ErrUnsupportedCriticalHeader = errors.New("go-jose/go-jose: unsupported critical header") ) // Key management algorithms @@ -167,8 +170,8 @@ const ( ) // supportedCritical is the set of supported extensions that are understood and processed. -var supportedCritical = map[string]bool{ - headerB64: true, +var supportedCritical = map[string]struct{}{ + headerB64: {}, } // rawHeader represents the JOSE header for JWE/JWS objects (used for parsing). @@ -346,6 +349,32 @@ func (parsed rawHeader) getCritical() ([]string, error) { return q, nil } +// checkNoCritical verifies there are no critical headers present. +func (parsed rawHeader) checkNoCritical() error { + if _, ok := parsed[headerCritical]; ok { + return ErrUnsupportedCriticalHeader + } + + return nil +} + +// checkSupportedCritical verifies there are no unsupported critical headers. +// Supported headers are passed in as a set: map of names to empty structs +func (parsed rawHeader) checkSupportedCritical(supported map[string]struct{}) error { + crit, err := parsed.getCritical() + if err != nil { + return err + } + + for _, name := range crit { + if _, ok := supported[name]; !ok { + return ErrUnsupportedCriticalHeader + } + } + + return nil +} + // getS2C extracts parsed "p2c" from the raw JSON. func (parsed rawHeader) getP2C() (int, error) { v := parsed[headerP2C] diff --git a/vendor/github.com/go-jose/go-jose/v4/signing.go b/vendor/github.com/go-jose/go-jose/v4/signing.go index 3dec0112..5dbd04c2 100644 --- a/vendor/github.com/go-jose/go-jose/v4/signing.go +++ b/vendor/github.com/go-jose/go-jose/v4/signing.go @@ -404,15 +404,23 @@ func (obj JSONWebSignature) DetachedVerify(payload []byte, verificationKey inter } signature := obj.Signatures[0] - headers := signature.mergedHeaders() - critical, err := headers.getCritical() - if err != nil { - return err + + if signature.header != nil { + // Per https://www.rfc-editor.org/rfc/rfc7515.html#section-4.1.11, + // 4.1.11. "crit" (Critical) Header Parameter + // "When used, this Header Parameter MUST be integrity + // protected; therefore, it MUST occur only within the JWS + // Protected Header." + err = signature.header.checkNoCritical() + if err != nil { + return err + } } - for _, name := range critical { - if !supportedCritical[name] { - return ErrCryptoFailure + if signature.protected != nil { + err = signature.protected.checkSupportedCritical(supportedCritical) + if err != nil { + return err } } @@ -421,6 +429,7 @@ func (obj JSONWebSignature) DetachedVerify(payload []byte, verificationKey inter return ErrCryptoFailure } + headers := signature.mergedHeaders() alg := headers.getSignatureAlgorithm() err = verifier.verifyPayload(input, signature.Signature, alg) if err == nil { @@ -469,14 +478,22 @@ func (obj JSONWebSignature) DetachedVerifyMulti(payload []byte, verificationKey outer: for i, signature := range obj.Signatures { - headers := signature.mergedHeaders() - critical, err := headers.getCritical() - if err != nil { - continue + if signature.header != nil { + // Per https://www.rfc-editor.org/rfc/rfc7515.html#section-4.1.11, + // 4.1.11. "crit" (Critical) Header Parameter + // "When used, this Header Parameter MUST be integrity + // protected; therefore, it MUST occur only within the JWS + // Protected Header." + err = signature.header.checkNoCritical() + if err != nil { + continue outer + } } - for _, name := range critical { - if !supportedCritical[name] { + if signature.protected != nil { + // Check for only supported critical headers + err = signature.protected.checkSupportedCritical(supportedCritical) + if err != nil { continue outer } } @@ -486,6 +503,7 @@ outer: continue } + headers := signature.mergedHeaders() alg := headers.getSignatureAlgorithm() err = verifier.verifyPayload(input, signature.Signature, alg) if err == nil { diff --git a/vendor/github.com/go-jose/go-jose/v4/symmetric.go b/vendor/github.com/go-jose/go-jose/v4/symmetric.go index a69103b0..09efefb2 100644 --- a/vendor/github.com/go-jose/go-jose/v4/symmetric.go +++ b/vendor/github.com/go-jose/go-jose/v4/symmetric.go @@ -21,6 +21,7 @@ import ( "crypto/aes" "crypto/cipher" "crypto/hmac" + "crypto/pbkdf2" "crypto/rand" "crypto/sha256" "crypto/sha512" @@ -30,8 +31,6 @@ import ( "hash" "io" - "golang.org/x/crypto/pbkdf2" - josecipher "github.com/go-jose/go-jose/v4/cipher" ) @@ -330,7 +329,10 @@ func (ctx *symmetricKeyCipher) encryptKey(cek []byte, alg KeyAlgorithm) (recipie // derive key keyLen, h := getPbkdf2Params(alg) - key := pbkdf2.Key(ctx.key, salt, ctx.p2c, keyLen, h) + key, err := pbkdf2.Key(h, string(ctx.key), salt, ctx.p2c, keyLen) + if err != nil { + return recipientInfo{}, nil + } // use AES cipher with derived key block, err := aes.NewCipher(key) @@ -432,7 +434,10 @@ func (ctx *symmetricKeyCipher) decryptKey(headers rawHeader, recipient *recipien // derive key keyLen, h := getPbkdf2Params(alg) - key := pbkdf2.Key(ctx.key, salt, p2c, keyLen, h) + key, err := pbkdf2.Key(h, string(ctx.key), salt, p2c, keyLen) + if err != nil { + return nil, err + } // use AES cipher with derived key block, err := aes.NewCipher(key) diff --git a/vendor/github.com/go-logr/logr/.golangci.yaml b/vendor/github.com/go-logr/logr/.golangci.yaml index 0cffafa7..0ed62c1a 100644 --- a/vendor/github.com/go-logr/logr/.golangci.yaml +++ b/vendor/github.com/go-logr/logr/.golangci.yaml @@ -1,26 +1,28 @@ +version: "2" + run: timeout: 1m tests: true linters: - disable-all: true - enable: + default: none + enable: # please keep this alphabetized + - asasalint - asciicheck + - copyloopvar + - dupl - errcheck - forcetypeassert + - goconst - gocritic - - gofmt - - goimports - - gosimple - govet - ineffassign - misspell + - musttag - revive - staticcheck - - typecheck - unused issues: - exclude-use-default: false max-issues-per-linter: 0 max-same-issues: 10 diff --git a/vendor/github.com/go-logr/logr/funcr/funcr.go b/vendor/github.com/go-logr/logr/funcr/funcr.go index 30568e76..b22c57d7 100644 --- a/vendor/github.com/go-logr/logr/funcr/funcr.go +++ b/vendor/github.com/go-logr/logr/funcr/funcr.go @@ -77,7 +77,7 @@ func newSink(fn func(prefix, args string), formatter Formatter) logr.LogSink { write: fn, } // For skipping fnlogger.Info and fnlogger.Error. - l.Formatter.AddCallDepth(1) + l.AddCallDepth(1) // via Formatter return l } @@ -164,17 +164,17 @@ type fnlogger struct { } func (l fnlogger) WithName(name string) logr.LogSink { - l.Formatter.AddName(name) + l.AddName(name) // via Formatter return &l } func (l fnlogger) WithValues(kvList ...any) logr.LogSink { - l.Formatter.AddValues(kvList) + l.AddValues(kvList) // via Formatter return &l } func (l fnlogger) WithCallDepth(depth int) logr.LogSink { - l.Formatter.AddCallDepth(depth) + l.AddCallDepth(depth) // via Formatter return &l } diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel index a65d88eb..04b4bebf 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel @@ -27,6 +27,7 @@ go_library( "//internal/httprule", "//utilities", "@org_golang_google_genproto_googleapis_api//httpbody", + "@org_golang_google_grpc//:grpc", "@org_golang_google_grpc//codes", "@org_golang_google_grpc//grpclog", "@org_golang_google_grpc//health/grpc_health_v1", diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go index 2f2b3424..00b2228a 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go @@ -201,13 +201,13 @@ func annotateContext(ctx context.Context, mux *ServeMux, req *http.Request, rpcM if timeout != 0 { ctx, _ = context.WithTimeout(ctx, timeout) } - if len(pairs) == 0 { - return ctx, nil, nil - } md := metadata.Pairs(pairs...) for _, mda := range mux.metadataAnnotators { md = metadata.Join(md, mda(ctx, req)) } + if len(md) == 0 { + return ctx, nil, nil + } return ctx, md, nil } diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go index 41cd4f50..bbe7decf 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go @@ -148,22 +148,20 @@ func DefaultHTTPErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marsh } md, ok := ServerMetadataFromContext(ctx) - if !ok { - grpclog.Error("Failed to extract ServerMetadata from context") - } - - handleForwardResponseServerMetadata(w, mux, md) - - // RFC 7230 https://tools.ietf.org/html/rfc7230#section-4.1.2 - // Unless the request includes a TE header field indicating "trailers" - // is acceptable, as described in Section 4.3, a server SHOULD NOT - // generate trailer fields that it believes are necessary for the user - // agent to receive. - doForwardTrailers := requestAcceptsTrailers(r) - - if doForwardTrailers { - handleForwardResponseTrailerHeader(w, mux, md) - w.Header().Set("Transfer-Encoding", "chunked") + if ok { + handleForwardResponseServerMetadata(w, mux, md) + + // RFC 7230 https://tools.ietf.org/html/rfc7230#section-4.1.2 + // Unless the request includes a TE header field indicating "trailers" + // is acceptable, as described in Section 4.3, a server SHOULD NOT + // generate trailer fields that it believes are necessary for the user + // agent to receive. + doForwardTrailers := requestAcceptsTrailers(r) + + if doForwardTrailers { + handleForwardResponseTrailerHeader(w, mux, md) + w.Header().Set("Transfer-Encoding", "chunked") + } } st := HTTPStatusFromCode(s.Code()) @@ -176,7 +174,7 @@ func DefaultHTTPErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marsh grpclog.Errorf("Failed to write response: %v", err) } - if doForwardTrailers { + if ok && requestAcceptsTrailers(r) { handleForwardResponseTrailer(w, mux, md) } } diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go index f0727cf7..2f0b9e9e 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go @@ -153,12 +153,10 @@ type responseBody interface { // ForwardResponseMessage forwards the message "resp" from gRPC server to REST client. func ForwardResponseMessage(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, req *http.Request, resp proto.Message, opts ...func(context.Context, http.ResponseWriter, proto.Message) error) { md, ok := ServerMetadataFromContext(ctx) - if !ok { - grpclog.Error("Failed to extract ServerMetadata from context") + if ok { + handleForwardResponseServerMetadata(w, mux, md) } - handleForwardResponseServerMetadata(w, mux, md) - // RFC 7230 https://tools.ietf.org/html/rfc7230#section-4.1.2 // Unless the request includes a TE header field indicating "trailers" // is acceptable, as described in Section 4.3, a server SHOULD NOT @@ -166,7 +164,7 @@ func ForwardResponseMessage(ctx context.Context, mux *ServeMux, marshaler Marsha // agent to receive. doForwardTrailers := requestAcceptsTrailers(req) - if doForwardTrailers { + if ok && doForwardTrailers { handleForwardResponseTrailerHeader(w, mux, md) w.Header().Set("Transfer-Encoding", "chunked") } @@ -204,7 +202,7 @@ func ForwardResponseMessage(ctx context.Context, mux *ServeMux, marshaler Marsha grpclog.Errorf("Failed to write response: %v", err) } - if doForwardTrailers { + if ok && doForwardTrailers { handleForwardResponseTrailer(w, mux, md) } } diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_jsonpb.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_jsonpb.go index 8376d1e0..3d070630 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_jsonpb.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_jsonpb.go @@ -66,7 +66,7 @@ func (j *JSONPb) marshalTo(w io.Writer, v interface{}) error { var ( // protoMessageType is stored to prevent constant lookup of the same type at runtime. - protoMessageType = reflect.TypeOf((*proto.Message)(nil)).Elem() + protoMessageType = reflect.TypeFor[proto.Message]() ) // marshalNonProto marshals a non-message field of a protobuf message. @@ -325,9 +325,9 @@ type protoEnum interface { EnumDescriptor() ([]byte, []int) } -var typeProtoEnum = reflect.TypeOf((*protoEnum)(nil)).Elem() +var typeProtoEnum = reflect.TypeFor[protoEnum]() -var typeProtoMessage = reflect.TypeOf((*proto.Message)(nil)).Elem() +var typeProtoMessage = reflect.TypeFor[proto.Message]() // Delimiter for newline encoded JSON streams. func (j *JSONPb) Delimiter() []byte { diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go index 19255ec4..3eb16167 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go @@ -10,6 +10,7 @@ import ( "strings" "github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule" + "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/health/grpc_health_v1" @@ -281,12 +282,19 @@ func WithHealthEndpointAt(healthCheckClient grpc_health_v1.HealthClient, endpoin http.MethodGet, endpointPath, func(w http.ResponseWriter, r *http.Request, _ map[string]string, ) { _, outboundMarshaler := MarshalerForRequest(s, r) + annotatedContext, err := AnnotateContext(r.Context(), s, r, grpc_health_v1.Health_Check_FullMethodName, WithHTTPPathPattern(endpointPath)) + if err != nil { + s.errorHandler(r.Context(), s, outboundMarshaler, w, r, err) + return + } - resp, err := healthCheckClient.Check(r.Context(), &grpc_health_v1.HealthCheckRequest{ + var md ServerMetadata + resp, err := healthCheckClient.Check(annotatedContext, &grpc_health_v1.HealthCheckRequest{ Service: r.URL.Query().Get("service"), - }) + }, grpc.Header(&md.HeaderMD), grpc.Trailer(&md.TrailerMD)) + annotatedContext = NewServerMetadataContext(annotatedContext, md) if err != nil { - s.errorHandler(r.Context(), s, outboundMarshaler, w, r, err) + s.errorHandler(annotatedContext, s, outboundMarshaler, w, r, err) return } @@ -300,7 +308,7 @@ func WithHealthEndpointAt(healthCheckClient grpc_health_v1.HealthClient, endpoin err = status.Error(codes.NotFound, resp.String()) } - s.errorHandler(r.Context(), s, outboundMarshaler, w, r, err) + s.errorHandler(annotatedContext, s, outboundMarshaler, w, r, err) return } diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/id.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/id.go index e854d7e8..2950fdb4 100644 --- a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/id.go +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/id.go @@ -82,7 +82,7 @@ func marshalJSON(id []byte) ([]byte, error) { } // unmarshalJSON inflates trace id from hex string, possibly enclosed in quotes. -func unmarshalJSON(dst []byte, src []byte) error { +func unmarshalJSON(dst, src []byte) error { if l := len(src); l >= 2 && src[0] == '"' && src[l-1] == '"' { src = src[1 : l-1] } diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/number.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/number.go index 29e629d6..5bb3b16c 100644 --- a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/number.go +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/number.go @@ -41,7 +41,7 @@ func (i *protoInt64) UnmarshalJSON(data []byte) error { // strings or integers. type protoUint64 uint64 -// Int64 returns the protoUint64 as a uint64. +// Uint64 returns the protoUint64 as a uint64. func (i *protoUint64) Uint64() uint64 { return uint64(*i) } // UnmarshalJSON decodes both strings and integers. diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/span.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/span.go index a13a6b73..67f80b6a 100644 --- a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/span.go +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/span.go @@ -10,6 +10,7 @@ import ( "errors" "fmt" "io" + "math" "time" ) @@ -151,8 +152,8 @@ func (s Span) MarshalJSON() ([]byte, error) { }{ Alias: Alias(s), ParentSpanID: parentSpanId, - StartTime: uint64(startT), - EndTime: uint64(endT), + StartTime: uint64(startT), // nolint:gosec // >0 checked above. + EndTime: uint64(endT), // nolint:gosec // >0 checked above. }) } @@ -201,11 +202,13 @@ func (s *Span) UnmarshalJSON(data []byte) error { case "startTimeUnixNano", "start_time_unix_nano": var val protoUint64 err = decoder.Decode(&val) - s.StartTime = time.Unix(0, int64(val.Uint64())) + v := int64(min(val.Uint64(), math.MaxInt64)) //nolint:gosec // Overflow checked. + s.StartTime = time.Unix(0, v) case "endTimeUnixNano", "end_time_unix_nano": var val protoUint64 err = decoder.Decode(&val) - s.EndTime = time.Unix(0, int64(val.Uint64())) + v := int64(min(val.Uint64(), math.MaxInt64)) //nolint:gosec // Overflow checked. + s.EndTime = time.Unix(0, v) case "attributes": err = decoder.Decode(&s.Attrs) case "droppedAttributesCount", "dropped_attributes_count": @@ -248,13 +251,20 @@ func (s *Span) UnmarshalJSON(data []byte) error { type SpanFlags int32 const ( + // SpanFlagsTraceFlagsMask is a mask for trace-flags. + // // Bits 0-7 are used for trace flags. SpanFlagsTraceFlagsMask SpanFlags = 255 - // Bits 8 and 9 are used to indicate that the parent span or link span is remote. - // Bit 8 (`HAS_IS_REMOTE`) indicates whether the value is known. - // Bit 9 (`IS_REMOTE`) indicates whether the span or link is remote. + // SpanFlagsContextHasIsRemoteMask is a mask for HAS_IS_REMOTE status. + // + // Bits 8 and 9 are used to indicate that the parent span or link span is + // remote. Bit 8 (`HAS_IS_REMOTE`) indicates whether the value is known. SpanFlagsContextHasIsRemoteMask SpanFlags = 256 - // SpanFlagsContextHasIsRemoteMask indicates the Span is remote. + // SpanFlagsContextIsRemoteMask is a mask for IS_REMOTE status. + // + // Bits 8 and 9 are used to indicate that the parent span or link span is + // remote. Bit 9 (`IS_REMOTE`) indicates whether the span or link is + // remote. SpanFlagsContextIsRemoteMask SpanFlags = 512 ) @@ -263,26 +273,30 @@ const ( type SpanKind int32 const ( - // Indicates that the span represents an internal operation within an application, - // as opposed to an operation happening at the boundaries. Default value. + // SpanKindInternal indicates that the span represents an internal + // operation within an application, as opposed to an operation happening at + // the boundaries. SpanKindInternal SpanKind = 1 - // Indicates that the span covers server-side handling of an RPC or other - // remote network request. + // SpanKindServer indicates that the span covers server-side handling of an + // RPC or other remote network request. SpanKindServer SpanKind = 2 - // Indicates that the span describes a request to some remote service. + // SpanKindClient indicates that the span describes a request to some + // remote service. SpanKindClient SpanKind = 3 - // Indicates that the span describes a producer sending a message to a broker. - // Unlike CLIENT and SERVER, there is often no direct critical path latency relationship - // between producer and consumer spans. A PRODUCER span ends when the message was accepted - // by the broker while the logical processing of the message might span a much longer time. + // SpanKindProducer indicates that the span describes a producer sending a + // message to a broker. Unlike SpanKindClient and SpanKindServer, there is + // often no direct critical path latency relationship between producer and + // consumer spans. A SpanKindProducer span ends when the message was + // accepted by the broker while the logical processing of the message might + // span a much longer time. SpanKindProducer SpanKind = 4 - // Indicates that the span describes consumer receiving a message from a broker. - // Like the PRODUCER kind, there is often no direct critical path latency relationship - // between producer and consumer spans. + // SpanKindConsumer indicates that the span describes a consumer receiving + // a message from a broker. Like SpanKindProducer, there is often no direct + // critical path latency relationship between producer and consumer spans. SpanKindConsumer SpanKind = 5 ) -// Event is a time-stamped annotation of the span, consisting of user-supplied +// SpanEvent is a time-stamped annotation of the span, consisting of user-supplied // text description and key-value pairs. type SpanEvent struct { // time_unix_nano is the time the event occurred. @@ -312,7 +326,7 @@ func (e SpanEvent) MarshalJSON() ([]byte, error) { Time uint64 `json:"timeUnixNano,omitempty"` }{ Alias: Alias(e), - Time: uint64(t), + Time: uint64(t), //nolint:gosec // >0 checked above }) } @@ -347,7 +361,8 @@ func (se *SpanEvent) UnmarshalJSON(data []byte) error { case "timeUnixNano", "time_unix_nano": var val protoUint64 err = decoder.Decode(&val) - se.Time = time.Unix(0, int64(val.Uint64())) + v := int64(min(val.Uint64(), math.MaxInt64)) //nolint:gosec // Overflow checked. + se.Time = time.Unix(0, v) case "name": err = decoder.Decode(&se.Name) case "attributes": @@ -365,10 +380,11 @@ func (se *SpanEvent) UnmarshalJSON(data []byte) error { return nil } -// A pointer from the current span to another span in the same trace or in a -// different trace. For example, this can be used in batching operations, -// where a single batch handler processes multiple requests from different -// traces or when the handler receives a request from a different project. +// SpanLink is a reference from the current span to another span in the same +// trace or in a different trace. For example, this can be used in batching +// operations, where a single batch handler processes multiple requests from +// different traces or when the handler receives a request from a different +// project. type SpanLink struct { // A unique identifier of a trace that this linked span is part of. The ID is a // 16-byte array. diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/status.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/status.go index 1217776e..a2802764 100644 --- a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/status.go +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/status.go @@ -3,17 +3,19 @@ package telemetry +// StatusCode is the status of a Span. +// // For the semantics of status codes see // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/api.md#set-status type StatusCode int32 const ( - // The default status. + // StatusCodeUnset is the default status. StatusCodeUnset StatusCode = 0 - // The Span has been validated by an Application developer or Operator to - // have completed successfully. + // StatusCodeOK is used when the Span has been validated by an Application + // developer or Operator to have completed successfully. StatusCodeOK StatusCode = 1 - // The Span contains an error. + // StatusCodeError is used when the Span contains an error. StatusCodeError StatusCode = 2 ) diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/traces.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/traces.go index 69a348f0..44197b80 100644 --- a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/traces.go +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/traces.go @@ -71,7 +71,7 @@ func (td *Traces) UnmarshalJSON(data []byte) error { return nil } -// A collection of ScopeSpans from a Resource. +// ResourceSpans is a collection of ScopeSpans from a Resource. type ResourceSpans struct { // The resource for the spans in this message. // If this field is not set then no resource info is known. @@ -128,7 +128,7 @@ func (rs *ResourceSpans) UnmarshalJSON(data []byte) error { return nil } -// A collection of Spans produced by an InstrumentationScope. +// ScopeSpans is a collection of Spans produced by an InstrumentationScope. type ScopeSpans struct { // The instrumentation scope information for the spans in this message. // Semantically when InstrumentationScope isn't set, it is equivalent with diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/value.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/value.go index 0dd01b06..022768bb 100644 --- a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/value.go +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/value.go @@ -1,8 +1,6 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -//go:generate stringer -type=ValueKind -trimprefix=ValueKind - package telemetry import ( @@ -23,7 +21,7 @@ import ( // A zero value is valid and represents an empty value. type Value struct { // Ensure forward compatibility by explicitly making this not comparable. - noCmp [0]func() //nolint: unused // This is indeed used. + noCmp [0]func() //nolint:unused // This is indeed used. // num holds the value for Int64, Float64, and Bool. It holds the length // for String, Bytes, Slice, Map. @@ -92,7 +90,7 @@ func IntValue(v int) Value { return Int64Value(int64(v)) } // Int64Value returns a [Value] for an int64. func Int64Value(v int64) Value { - return Value{num: uint64(v), any: ValueKindInt64} + return Value{num: uint64(v), any: ValueKindInt64} //nolint:gosec // Raw value conv. } // Float64Value returns a [Value] for a float64. @@ -164,7 +162,7 @@ func (v Value) AsInt64() int64 { // this will return garbage. func (v Value) asInt64() int64 { // Assumes v.num was a valid int64 (overflow not checked). - return int64(v.num) // nolint: gosec + return int64(v.num) //nolint:gosec // Bounded. } // AsBool returns the value held by v as a bool. @@ -309,13 +307,13 @@ func (v Value) String() string { return v.asString() case ValueKindInt64: // Assumes v.num was a valid int64 (overflow not checked). - return strconv.FormatInt(int64(v.num), 10) // nolint: gosec + return strconv.FormatInt(int64(v.num), 10) //nolint:gosec // Bounded. case ValueKindFloat64: return strconv.FormatFloat(v.asFloat64(), 'g', -1, 64) case ValueKindBool: return strconv.FormatBool(v.asBool()) case ValueKindBytes: - return fmt.Sprint(v.asBytes()) + return string(v.asBytes()) case ValueKindMap: return fmt.Sprint(v.asMap()) case ValueKindSlice: @@ -343,7 +341,7 @@ func (v *Value) MarshalJSON() ([]byte, error) { case ValueKindInt64: return json.Marshal(struct { Value string `json:"intValue"` - }{strconv.FormatInt(int64(v.num), 10)}) + }{strconv.FormatInt(int64(v.num), 10)}) //nolint:gosec // Raw value conv. case ValueKindFloat64: return json.Marshal(struct { Value float64 `json:"doubleValue"` diff --git a/vendor/go.opentelemetry.io/auto/sdk/span.go b/vendor/go.opentelemetry.io/auto/sdk/span.go index 6ebea12a..815d271f 100644 --- a/vendor/go.opentelemetry.io/auto/sdk/span.go +++ b/vendor/go.opentelemetry.io/auto/sdk/span.go @@ -6,6 +6,7 @@ package sdk import ( "encoding/json" "fmt" + "math" "reflect" "runtime" "strings" @@ -16,7 +17,7 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" - semconv "go.opentelemetry.io/otel/semconv/v1.26.0" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" "go.opentelemetry.io/otel/trace" "go.opentelemetry.io/otel/trace/noop" @@ -85,7 +86,12 @@ func (s *span) SetAttributes(attrs ...attribute.KeyValue) { limit := maxSpan.Attrs if limit == 0 { // No attributes allowed. - s.span.DroppedAttrs += uint32(len(attrs)) + n := int64(len(attrs)) + if n > 0 { + s.span.DroppedAttrs += uint32( //nolint:gosec // Bounds checked. + min(n, math.MaxUint32), + ) + } return } @@ -121,8 +127,13 @@ func (s *span) SetAttributes(attrs ...attribute.KeyValue) { // convCappedAttrs converts up to limit attrs into a []telemetry.Attr. The // number of dropped attributes is also returned. func convCappedAttrs(limit int, attrs []attribute.KeyValue) ([]telemetry.Attr, uint32) { + n := len(attrs) if limit == 0 { - return nil, uint32(len(attrs)) + var out uint32 + if n > 0 { + out = uint32(min(int64(n), math.MaxUint32)) //nolint:gosec // Bounds checked. + } + return nil, out } if limit < 0 { @@ -130,8 +141,12 @@ func convCappedAttrs(limit int, attrs []attribute.KeyValue) ([]telemetry.Attr, u return convAttrs(attrs), 0 } - limit = min(len(attrs), limit) - return convAttrs(attrs[:limit]), uint32(len(attrs) - limit) + if n < 0 { + n = 0 + } + + limit = min(n, limit) + return convAttrs(attrs[:limit]), uint32(n - limit) //nolint:gosec // Bounds checked. } func convAttrs(attrs []attribute.KeyValue) []telemetry.Attr { diff --git a/vendor/go.opentelemetry.io/auto/sdk/tracer.go b/vendor/go.opentelemetry.io/auto/sdk/tracer.go index cbcfabde..e09acf02 100644 --- a/vendor/go.opentelemetry.io/auto/sdk/tracer.go +++ b/vendor/go.opentelemetry.io/auto/sdk/tracer.go @@ -5,6 +5,7 @@ package sdk import ( "context" + "math" "time" "go.opentelemetry.io/otel/trace" @@ -21,15 +22,20 @@ type tracer struct { var _ trace.Tracer = tracer{} -func (t tracer) Start(ctx context.Context, name string, opts ...trace.SpanStartOption) (context.Context, trace.Span) { - var psc trace.SpanContext +func (t tracer) Start( + ctx context.Context, + name string, + opts ...trace.SpanStartOption, +) (context.Context, trace.Span) { + var psc, sc trace.SpanContext sampled := true span := new(span) // Ask eBPF for sampling decision and span context info. - t.start(ctx, span, &psc, &sampled, &span.spanContext) + t.start(ctx, span, &psc, &sampled, &sc) span.sampled.Store(sampled) + span.spanContext = sc ctx = trace.ContextWithSpan(ctx, span) @@ -58,7 +64,13 @@ func (t *tracer) start( // start is used for testing. var start = func(context.Context, *span, *trace.SpanContext, *bool, *trace.SpanContext) {} -func (t tracer) traces(name string, cfg trace.SpanConfig, sc, psc trace.SpanContext) (*telemetry.Traces, *telemetry.Span) { +var intToUint32Bound = min(math.MaxInt, math.MaxUint32) + +func (t tracer) traces( + name string, + cfg trace.SpanConfig, + sc, psc trace.SpanContext, +) (*telemetry.Traces, *telemetry.Span) { span := &telemetry.Span{ TraceID: telemetry.TraceID(sc.TraceID()), SpanID: telemetry.SpanID(sc.SpanID()), @@ -73,11 +85,16 @@ func (t tracer) traces(name string, cfg trace.SpanConfig, sc, psc trace.SpanCont links := cfg.Links() if limit := maxSpan.Links; limit == 0 { - span.DroppedLinks = uint32(len(links)) + n := len(links) + if n > 0 { + bounded := max(min(n, intToUint32Bound), 0) + span.DroppedLinks = uint32(bounded) //nolint:gosec // Bounds checked. + } } else { if limit > 0 { n := max(len(links)-limit, 0) - span.DroppedLinks = uint32(n) + bounded := min(n, intToUint32Bound) + span.DroppedLinks = uint32(bounded) //nolint:gosec // Bounds checked. links = links[n:] } span.Links = convLinks(links) diff --git a/vendor/go.opentelemetry.io/contrib/bridges/otelzap/LICENSE b/vendor/go.opentelemetry.io/contrib/bridges/otelzap/LICENSE index 261eeb9e..f1aee0f1 100644 --- a/vendor/go.opentelemetry.io/contrib/bridges/otelzap/LICENSE +++ b/vendor/go.opentelemetry.io/contrib/bridges/otelzap/LICENSE @@ -199,3 +199,33 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +-------------------------------------------------------------------------------- + +Copyright 2009 The Go Authors. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google LLC nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/vendor/go.opentelemetry.io/contrib/bridges/otelzap/convert.go b/vendor/go.opentelemetry.io/contrib/bridges/otelzap/convert.go index 6f64c794..0606775c 100644 --- a/vendor/go.opentelemetry.io/contrib/bridges/otelzap/convert.go +++ b/vendor/go.opentelemetry.io/contrib/bridges/otelzap/convert.go @@ -1,4 +1,4 @@ -// Code created by gotmpl. DO NOT MODIFY. +// Code generated by gotmpl. DO NOT MODIFY. // source: internal/shared/logutil/convert.go.tmpl // Copyright The OpenTelemetry Authors @@ -13,6 +13,7 @@ import ( "strconv" "time" + "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/log" ) @@ -66,6 +67,10 @@ func convertValue(v any) log.Value { return log.BytesValue(val) case error: return log.StringValue(val.Error()) + case attribute.Value: + return log.ValueFromAttribute(val) + case log.Value: + return val } t := reflect.TypeOf(v) diff --git a/vendor/go.opentelemetry.io/contrib/bridges/otelzap/core.go b/vendor/go.opentelemetry.io/contrib/bridges/otelzap/core.go index e3564247..0cae1efb 100644 --- a/vendor/go.opentelemetry.io/contrib/bridges/otelzap/core.go +++ b/vendor/go.opentelemetry.io/contrib/bridges/otelzap/core.go @@ -37,17 +37,18 @@ import ( "context" "slices" - "go.uber.org/zap/zapcore" - + "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/log" "go.opentelemetry.io/otel/log/global" - semconv "go.opentelemetry.io/otel/semconv/v1.26.0" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" + "go.uber.org/zap/zapcore" ) type config struct { - provider log.LoggerProvider - version string - schemaURL string + provider log.LoggerProvider + version string + schemaURL string + attributes []attribute.KeyValue } func newConfig(options []Option) config { @@ -92,6 +93,15 @@ func WithSchemaURL(schemaURL string) Option { }) } +// WithAttributes returns an [Option] that configures the instrumentation scope +// attributes of the [log.Logger] used by a [Core]. +func WithAttributes(attributes ...attribute.KeyValue) Option { + return optFunc(func(c config) config { + c.attributes = attributes + return c + }) +} + // WithLoggerProvider returns an [Option] that configures [log.LoggerProvider] // used by a [Core] to create its [log.Logger]. // @@ -129,6 +139,9 @@ func NewCore(name string, opts ...Option) *Core { if cfg.schemaURL != "" { loggerOpts = append(loggerOpts, log.WithSchemaURL(cfg.schemaURL)) } + if cfg.attributes != nil { + loggerOpts = append(loggerOpts, log.WithInstrumentationAttributes(cfg.attributes...)) + } logger := cfg.provider.Logger(name, loggerOpts...) @@ -170,7 +183,7 @@ func (o *Core) clone() *Core { } // Sync flushes buffered logs (if any). -func (o *Core) Sync() error { +func (*Core) Sync() error { return nil } @@ -201,18 +214,19 @@ func (o *Core) Write(ent zapcore.Entry, fields []zapcore.Field) error { r.AddAttributes(o.attr...) if ent.Caller.Defined { r.AddAttributes( - log.String(string(semconv.CodeFilepathKey), ent.Caller.File), + log.String(string(semconv.CodeFilePathKey), ent.Caller.File), log.Int(string(semconv.CodeLineNumberKey), ent.Caller.Line), - log.String(string(semconv.CodeFunctionKey), ent.Caller.Function), + log.String(string(semconv.CodeFunctionNameKey), ent.Caller.Function), ) } if ent.Stack != "" { r.AddAttributes(log.String(string(semconv.CodeStacktraceKey), ent.Stack)) } + emitCtx := o.ctx if len(fields) > 0 { ctx, attrbuf := convertField(fields) if ctx != nil { - o.ctx = ctx + emitCtx = ctx } r.AddAttributes(attrbuf...) } @@ -221,7 +235,7 @@ func (o *Core) Write(ent zapcore.Entry, fields []zapcore.Field) error { if ent.LoggerName != "" { logger = o.provider.Logger(ent.LoggerName, o.opts...) } - logger.Emit(o.ctx, r) + logger.Emit(emitCtx, r) return nil } diff --git a/vendor/go.opentelemetry.io/contrib/bridges/otelzap/encoder.go b/vendor/go.opentelemetry.io/contrib/bridges/otelzap/encoder.go index 8147576a..8d868e08 100644 --- a/vendor/go.opentelemetry.io/contrib/bridges/otelzap/encoder.go +++ b/vendor/go.opentelemetry.io/contrib/bridges/otelzap/encoder.go @@ -6,9 +6,8 @@ package otelzap // import "go.opentelemetry.io/contrib/bridges/otelzap" import ( "time" - "go.uber.org/zap/zapcore" - "go.opentelemetry.io/otel/log" + "go.uber.org/zap/zapcore" ) var ( @@ -103,7 +102,7 @@ func (m *objectEncoder) AddInt(k string, v int) { m.cur.attrs = append(m.cur.attrs, log.Int(k, v)) } -func (m *objectEncoder) AddString(k string, v string) { +func (m *objectEncoder) AddString(k, v string) { m.cur.attrs = append(m.cur.attrs, log.String(k, v)) } @@ -115,7 +114,7 @@ func (m *objectEncoder) AddUint64(k string, v uint64) { }) } -func (m *objectEncoder) AddReflected(k string, v interface{}) error { +func (m *objectEncoder) AddReflected(k string, v any) error { m.cur.attrs = append(m.cur.attrs, log.KeyValue{ Key: k, @@ -185,7 +184,7 @@ func assignUintValue(v uint64) log.Value { if v > maxInt64 { return log.Float64Value(float64(v)) } - return log.Int64Value(int64(v)) // nolint:gosec // Overflow checked above. + return log.Int64Value(int64(v)) } // arrayEncoder implements [zapcore.ArrayEncoder]. @@ -218,7 +217,7 @@ func (a *arrayEncoder) AppendObject(v zapcore.ObjectMarshaler) error { return err } -func (a *arrayEncoder) AppendReflected(v interface{}) error { +func (a *arrayEncoder) AppendReflected(v any) error { a.elems = append(a.elems, convertValue(v)) return nil } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/LICENSE b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/LICENSE index 261eeb9e..f1aee0f1 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/LICENSE +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/LICENSE @@ -199,3 +199,33 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +-------------------------------------------------------------------------------- + +Copyright 2009 The Go Authors. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google LLC nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go index 9e87fb4b..e65c4907 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go @@ -4,23 +4,18 @@ package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" import ( - "google.golang.org/grpc/stats" + "context" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/metric" - "go.opentelemetry.io/otel/metric/noop" "go.opentelemetry.io/otel/propagation" - semconv "go.opentelemetry.io/otel/semconv/v1.17.0" "go.opentelemetry.io/otel/trace" + "google.golang.org/grpc/stats" ) -const ( - // ScopeName is the instrumentation scope name. - ScopeName = "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" - // GRPCStatusCodeKey is convention for numeric status code of a gRPC request. - GRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") -) +// ScopeName is the instrumentation scope name. +const ScopeName = "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" // InterceptorFilter is a predicate used to determine whether a given request in // interceptor info should be instrumented. A InterceptorFilter must return true if @@ -45,17 +40,11 @@ type config struct { SpanAttributes []attribute.KeyValue MetricAttributes []attribute.KeyValue + PublicEndpoint bool + PublicEndpointFn func(ctx context.Context, info *stats.RPCTagInfo) bool + ReceivedEvent bool SentEvent bool - - tracer trace.Tracer - meter metric.Meter - - rpcDuration metric.Float64Histogram - rpcInBytes metric.Int64Histogram - rpcOutBytes metric.Int64Histogram - rpcInMessages metric.Int64Histogram - rpcOutMessages metric.Int64Histogram } // Option applies an option value for a config. @@ -64,7 +53,7 @@ type Option interface { } // newConfig returns a config configured with all the passed Options. -func newConfig(opts []Option, role string) *config { +func newConfig(opts []Option) *config { c := &config{ Propagators: otel.GetTextMapPropagator(), TracerProvider: otel.GetTracerProvider(), @@ -73,88 +62,39 @@ func newConfig(opts []Option, role string) *config { for _, o := range opts { o.apply(c) } + return c +} - c.tracer = c.TracerProvider.Tracer( - ScopeName, - trace.WithInstrumentationVersion(SemVersion()), - ) - - c.meter = c.MeterProvider.Meter( - ScopeName, - metric.WithInstrumentationVersion(Version()), - metric.WithSchemaURL(semconv.SchemaURL), - ) - - var err error - c.rpcDuration, err = c.meter.Float64Histogram("rpc."+role+".duration", - metric.WithDescription("Measures the duration of inbound RPC."), - metric.WithUnit("ms")) - if err != nil { - otel.Handle(err) - if c.rpcDuration == nil { - c.rpcDuration = noop.Float64Histogram{} - } - } - - rpcRequestSize, err := c.meter.Int64Histogram("rpc."+role+".request.size", - metric.WithDescription("Measures size of RPC request messages (uncompressed)."), - metric.WithUnit("By")) - if err != nil { - otel.Handle(err) - if rpcRequestSize == nil { - rpcRequestSize = noop.Int64Histogram{} - } - } +type publicEndpointOption struct{ p bool } - rpcResponseSize, err := c.meter.Int64Histogram("rpc."+role+".response.size", - metric.WithDescription("Measures size of RPC response messages (uncompressed)."), - metric.WithUnit("By")) - if err != nil { - otel.Handle(err) - if rpcResponseSize == nil { - rpcResponseSize = noop.Int64Histogram{} - } - } +func (o publicEndpointOption) apply(c *config) { + c.PublicEndpoint = o.p +} - rpcRequestsPerRPC, err := c.meter.Int64Histogram("rpc."+role+".requests_per_rpc", - metric.WithDescription("Measures the number of messages received per RPC. Should be 1 for all non-streaming RPCs."), - metric.WithUnit("{count}")) - if err != nil { - otel.Handle(err) - if rpcRequestsPerRPC == nil { - rpcRequestsPerRPC = noop.Int64Histogram{} - } - } +// WithPublicEndpoint configures the Handler to link the span with an incoming +// span context. If this option is not provided, then the association is a child +// association instead of a link. +func WithPublicEndpoint() Option { + return publicEndpointOption{p: true} +} - rpcResponsesPerRPC, err := c.meter.Int64Histogram("rpc."+role+".responses_per_rpc", - metric.WithDescription("Measures the number of messages received per RPC. Should be 1 for all non-streaming RPCs."), - metric.WithUnit("{count}")) - if err != nil { - otel.Handle(err) - if rpcResponsesPerRPC == nil { - rpcResponsesPerRPC = noop.Int64Histogram{} - } - } +type publicEndpointFnOption struct { + fn func(context.Context, *stats.RPCTagInfo) bool +} - switch role { - case "client": - c.rpcInBytes = rpcResponseSize - c.rpcInMessages = rpcResponsesPerRPC - c.rpcOutBytes = rpcRequestSize - c.rpcOutMessages = rpcRequestsPerRPC - case "server": - c.rpcInBytes = rpcRequestSize - c.rpcInMessages = rpcRequestsPerRPC - c.rpcOutBytes = rpcResponseSize - c.rpcOutMessages = rpcResponsesPerRPC - default: - c.rpcInBytes = noop.Int64Histogram{} - c.rpcInMessages = noop.Int64Histogram{} - c.rpcOutBytes = noop.Int64Histogram{} - c.rpcOutMessages = noop.Int64Histogram{} +func (o publicEndpointFnOption) apply(c *config) { + if o.fn != nil { + c.PublicEndpointFn = o.fn } +} - return c +// WithPublicEndpointFn runs with every request, and allows conditionally +// configuring the Handler to link the span with an incoming span context. If +// this option is not provided or returns false, then the association is a +// child association instead of a link. +// Note: WithPublicEndpoint takes precedence over WithPublicEndpointFn. +func WithPublicEndpointFn(fn func(context.Context, *stats.RPCTagInfo) bool) Option { + return publicEndpointFnOption{fn: fn} } type propagatorsOption struct{ p propagation.TextMapPropagator } @@ -274,6 +214,8 @@ func (o spanStartOption) apply(c *config) { // WithSpanOptions configures an additional set of // trace.SpanOptions, which are applied to each new span. +// +// Deprecated: It is only used by the deprecated interceptor, and is unused by [NewClientHandler] and [NewServerHandler]. func WithSpanOptions(opts ...trace.SpanStartOption) Option { return spanStartOption{opts} } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptor.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptor.go index 7d5ed058..99f88ec3 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptor.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptor.go @@ -4,506 +4,33 @@ package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" // gRPC tracing middleware -// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/semantic_conventions/rpc.md +// https://opentelemetry.io/docs/specs/semconv/rpc/ import ( - "context" - "errors" - "io" "net" "strconv" - "time" - "google.golang.org/grpc" - grpc_codes "google.golang.org/grpc/codes" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/peer" - "google.golang.org/grpc/status" - "google.golang.org/protobuf/proto" - - "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" - "go.opentelemetry.io/otel/metric" - semconv "go.opentelemetry.io/otel/semconv/v1.17.0" - "go.opentelemetry.io/otel/trace" -) - -type messageType attribute.KeyValue - -// Event adds an event of the messageType to the span associated with the -// passed context with a message id. -func (m messageType) Event(ctx context.Context, id int, _ interface{}) { - span := trace.SpanFromContext(ctx) - if !span.IsRecording() { - return - } - span.AddEvent("message", trace.WithAttributes( - attribute.KeyValue(m), - RPCMessageIDKey.Int(id), - )) -} - -var ( - messageSent = messageType(RPCMessageTypeSent) - messageReceived = messageType(RPCMessageTypeReceived) + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" + grpc_codes "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" ) -// UnaryClientInterceptor returns a grpc.UnaryClientInterceptor suitable -// for use in a grpc.NewClient call. -// -// Deprecated: Use [NewClientHandler] instead. -func UnaryClientInterceptor(opts ...Option) grpc.UnaryClientInterceptor { - cfg := newConfig(opts, "client") - tracer := cfg.TracerProvider.Tracer( - ScopeName, - trace.WithInstrumentationVersion(Version()), - ) - - return func( - ctx context.Context, - method string, - req, reply interface{}, - cc *grpc.ClientConn, - invoker grpc.UnaryInvoker, - callOpts ...grpc.CallOption, - ) error { - i := &InterceptorInfo{ - Method: method, - Type: UnaryClient, - } - if cfg.InterceptorFilter != nil && !cfg.InterceptorFilter(i) { - return invoker(ctx, method, req, reply, cc, callOpts...) - } - - name, attr, _ := telemetryAttributes(method, cc.Target()) - - startOpts := append([]trace.SpanStartOption{ - trace.WithSpanKind(trace.SpanKindClient), - trace.WithAttributes(attr...), - }, - cfg.SpanStartOptions..., - ) - - ctx, span := tracer.Start( - ctx, - name, - startOpts..., - ) - defer span.End() - - ctx = inject(ctx, cfg.Propagators) - - if cfg.SentEvent { - messageSent.Event(ctx, 1, req) - } - - err := invoker(ctx, method, req, reply, cc, callOpts...) - - if cfg.ReceivedEvent { - messageReceived.Event(ctx, 1, reply) - } - - if err != nil { - s, _ := status.FromError(err) - span.SetStatus(codes.Error, s.Message()) - span.SetAttributes(statusCodeAttr(s.Code())) - } else { - span.SetAttributes(statusCodeAttr(grpc_codes.OK)) - } - - return err - } -} - -// clientStream wraps around the embedded grpc.ClientStream, and intercepts the RecvMsg and -// SendMsg method call. -type clientStream struct { - grpc.ClientStream - desc *grpc.StreamDesc - - span trace.Span - - receivedEvent bool - sentEvent bool - - receivedMessageID int - sentMessageID int -} - -var _ = proto.Marshal - -func (w *clientStream) RecvMsg(m interface{}) error { - err := w.ClientStream.RecvMsg(m) - - if err == nil && !w.desc.ServerStreams { - w.endSpan(nil) - } else if errors.Is(err, io.EOF) { - w.endSpan(nil) - } else if err != nil { - w.endSpan(err) - } else { - w.receivedMessageID++ - - if w.receivedEvent { - messageReceived.Event(w.Context(), w.receivedMessageID, m) - } - } - - return err -} - -func (w *clientStream) SendMsg(m interface{}) error { - err := w.ClientStream.SendMsg(m) - - w.sentMessageID++ - - if w.sentEvent { - messageSent.Event(w.Context(), w.sentMessageID, m) - } - - if err != nil { - w.endSpan(err) - } - - return err -} - -func (w *clientStream) Header() (metadata.MD, error) { - md, err := w.ClientStream.Header() - if err != nil { - w.endSpan(err) - } - - return md, err -} - -func (w *clientStream) CloseSend() error { - err := w.ClientStream.CloseSend() - if err != nil { - w.endSpan(err) - } - - return err -} - -func wrapClientStream(s grpc.ClientStream, desc *grpc.StreamDesc, span trace.Span, cfg *config) *clientStream { - return &clientStream{ - ClientStream: s, - span: span, - desc: desc, - receivedEvent: cfg.ReceivedEvent, - sentEvent: cfg.SentEvent, - } -} - -func (w *clientStream) endSpan(err error) { - if err != nil { - s, _ := status.FromError(err) - w.span.SetStatus(codes.Error, s.Message()) - w.span.SetAttributes(statusCodeAttr(s.Code())) - } else { - w.span.SetAttributes(statusCodeAttr(grpc_codes.OK)) - } - - w.span.End() -} - -// StreamClientInterceptor returns a grpc.StreamClientInterceptor suitable -// for use in a grpc.NewClient call. -// -// Deprecated: Use [NewClientHandler] instead. -func StreamClientInterceptor(opts ...Option) grpc.StreamClientInterceptor { - cfg := newConfig(opts, "client") - tracer := cfg.TracerProvider.Tracer( - ScopeName, - trace.WithInstrumentationVersion(Version()), - ) - - return func( - ctx context.Context, - desc *grpc.StreamDesc, - cc *grpc.ClientConn, - method string, - streamer grpc.Streamer, - callOpts ...grpc.CallOption, - ) (grpc.ClientStream, error) { - i := &InterceptorInfo{ - Method: method, - Type: StreamClient, - } - if cfg.InterceptorFilter != nil && !cfg.InterceptorFilter(i) { - return streamer(ctx, desc, cc, method, callOpts...) - } - - name, attr, _ := telemetryAttributes(method, cc.Target()) - - startOpts := append([]trace.SpanStartOption{ - trace.WithSpanKind(trace.SpanKindClient), - trace.WithAttributes(attr...), - }, - cfg.SpanStartOptions..., - ) - - ctx, span := tracer.Start( - ctx, - name, - startOpts..., - ) - - ctx = inject(ctx, cfg.Propagators) - - s, err := streamer(ctx, desc, cc, method, callOpts...) - if err != nil { - grpcStatus, _ := status.FromError(err) - span.SetStatus(codes.Error, grpcStatus.Message()) - span.SetAttributes(statusCodeAttr(grpcStatus.Code())) - span.End() - return s, err - } - stream := wrapClientStream(s, desc, span, cfg) - return stream, nil - } -} - -// UnaryServerInterceptor returns a grpc.UnaryServerInterceptor suitable -// for use in a grpc.NewServer call. -// -// Deprecated: Use [NewServerHandler] instead. -func UnaryServerInterceptor(opts ...Option) grpc.UnaryServerInterceptor { - cfg := newConfig(opts, "server") - tracer := cfg.TracerProvider.Tracer( - ScopeName, - trace.WithInstrumentationVersion(Version()), - ) - - return func( - ctx context.Context, - req interface{}, - info *grpc.UnaryServerInfo, - handler grpc.UnaryHandler, - ) (interface{}, error) { - i := &InterceptorInfo{ - UnaryServerInfo: info, - Type: UnaryServer, - } - if cfg.InterceptorFilter != nil && !cfg.InterceptorFilter(i) { - return handler(ctx, req) - } - - ctx = extract(ctx, cfg.Propagators) - name, attr, metricAttrs := telemetryAttributes(info.FullMethod, peerFromCtx(ctx)) - - startOpts := append([]trace.SpanStartOption{ - trace.WithSpanKind(trace.SpanKindServer), - trace.WithAttributes(attr...), - }, - cfg.SpanStartOptions..., - ) - - ctx, span := tracer.Start( - trace.ContextWithRemoteSpanContext(ctx, trace.SpanContextFromContext(ctx)), - name, - startOpts..., - ) - defer span.End() - - if cfg.ReceivedEvent { - messageReceived.Event(ctx, 1, req) - } - - before := time.Now() - - resp, err := handler(ctx, req) - - s, _ := status.FromError(err) - if err != nil { - statusCode, msg := serverStatus(s) - span.SetStatus(statusCode, msg) - if cfg.SentEvent { - messageSent.Event(ctx, 1, s.Proto()) - } - } else { - if cfg.SentEvent { - messageSent.Event(ctx, 1, resp) - } - } - grpcStatusCodeAttr := statusCodeAttr(s.Code()) - span.SetAttributes(grpcStatusCodeAttr) - - // Use floating point division here for higher precision (instead of Millisecond method). - elapsedTime := float64(time.Since(before)) / float64(time.Millisecond) - - metricAttrs = append(metricAttrs, grpcStatusCodeAttr) - cfg.rpcDuration.Record(ctx, elapsedTime, metric.WithAttributeSet(attribute.NewSet(metricAttrs...))) - - return resp, err - } -} - -// serverStream wraps around the embedded grpc.ServerStream, and intercepts the RecvMsg and -// SendMsg method call. -type serverStream struct { - grpc.ServerStream - ctx context.Context - - receivedMessageID int - sentMessageID int - - receivedEvent bool - sentEvent bool -} - -func (w *serverStream) Context() context.Context { - return w.ctx -} - -func (w *serverStream) RecvMsg(m interface{}) error { - err := w.ServerStream.RecvMsg(m) - - if err == nil { - w.receivedMessageID++ - if w.receivedEvent { - messageReceived.Event(w.Context(), w.receivedMessageID, m) - } - } - - return err -} - -func (w *serverStream) SendMsg(m interface{}) error { - err := w.ServerStream.SendMsg(m) - - w.sentMessageID++ - if w.sentEvent { - messageSent.Event(w.Context(), w.sentMessageID, m) - } - - return err -} - -func wrapServerStream(ctx context.Context, ss grpc.ServerStream, cfg *config) *serverStream { - return &serverStream{ - ServerStream: ss, - ctx: ctx, - receivedEvent: cfg.ReceivedEvent, - sentEvent: cfg.SentEvent, - } -} - -// StreamServerInterceptor returns a grpc.StreamServerInterceptor suitable -// for use in a grpc.NewServer call. -// -// Deprecated: Use [NewServerHandler] instead. -func StreamServerInterceptor(opts ...Option) grpc.StreamServerInterceptor { - cfg := newConfig(opts, "server") - tracer := cfg.TracerProvider.Tracer( - ScopeName, - trace.WithInstrumentationVersion(Version()), - ) - - return func( - srv interface{}, - ss grpc.ServerStream, - info *grpc.StreamServerInfo, - handler grpc.StreamHandler, - ) error { - ctx := ss.Context() - i := &InterceptorInfo{ - StreamServerInfo: info, - Type: StreamServer, - } - if cfg.InterceptorFilter != nil && !cfg.InterceptorFilter(i) { - return handler(srv, wrapServerStream(ctx, ss, cfg)) - } - - ctx = extract(ctx, cfg.Propagators) - name, attr, _ := telemetryAttributes(info.FullMethod, peerFromCtx(ctx)) - - startOpts := append([]trace.SpanStartOption{ - trace.WithSpanKind(trace.SpanKindServer), - trace.WithAttributes(attr...), - }, - cfg.SpanStartOptions..., - ) - - ctx, span := tracer.Start( - trace.ContextWithRemoteSpanContext(ctx, trace.SpanContextFromContext(ctx)), - name, - startOpts..., - ) - defer span.End() - - err := handler(srv, wrapServerStream(ctx, ss, cfg)) - if err != nil { - s, _ := status.FromError(err) - statusCode, msg := serverStatus(s) - span.SetStatus(statusCode, msg) - span.SetAttributes(statusCodeAttr(s.Code())) - } else { - span.SetAttributes(statusCodeAttr(grpc_codes.OK)) - } - - return err - } -} - -// telemetryAttributes returns a span name and span and metric attributes from -// the gRPC method and peer address. -func telemetryAttributes(fullMethod, peerAddress string) (string, []attribute.KeyValue, []attribute.KeyValue) { - name, methodAttrs := internal.ParseFullMethod(fullMethod) - peerAttrs := peerAttr(peerAddress) - - attrs := make([]attribute.KeyValue, 0, 1+len(methodAttrs)+len(peerAttrs)) - attrs = append(attrs, RPCSystemGRPC) - attrs = append(attrs, methodAttrs...) - metricAttrs := attrs[:1+len(methodAttrs)] - attrs = append(attrs, peerAttrs...) - return name, attrs, metricAttrs -} - -// peerAttr returns attributes about the peer address. -func peerAttr(addr string) []attribute.KeyValue { - host, p, err := net.SplitHostPort(addr) +// serverAddrAttrs returns the server address attributes for the hostport. +func serverAddrAttrs(hostport string) []attribute.KeyValue { + h, pStr, err := net.SplitHostPort(hostport) if err != nil { - return nil - } - - if host == "" { - host = "127.0.0.1" + // The server.address attribute is required. + return []attribute.KeyValue{semconv.ServerAddress(hostport)} } - port, err := strconv.Atoi(p) + p, err := strconv.Atoi(pStr) if err != nil { - return nil - } - - var attr []attribute.KeyValue - if ip := net.ParseIP(host); ip != nil { - attr = []attribute.KeyValue{ - semconv.NetSockPeerAddr(host), - semconv.NetSockPeerPort(port), - } - } else { - attr = []attribute.KeyValue{ - semconv.NetPeerName(host), - semconv.NetPeerPort(port), - } + return []attribute.KeyValue{semconv.ServerAddress(h)} } - - return attr -} - -// peerFromCtx returns a peer address from a context, if one exists. -func peerFromCtx(ctx context.Context) string { - p, ok := peer.FromContext(ctx) - if !ok { - return "" + return []attribute.KeyValue{ + semconv.ServerAddress(h), + semconv.ServerPort(p), } - return p.Addr.String() -} - -// statusCodeAttr returns status code attribute based on given gRPC code. -func statusCodeAttr(c grpc_codes.Code) attribute.KeyValue { - return GRPCStatusCodeKey.Int64(int64(c)) } // serverStatus returns a span status code and message for a given gRPC diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal/parse.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal/parse.go index bef07b7a..e46185e0 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal/parse.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal/parse.go @@ -1,13 +1,14 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 +// Package internal provides internal functionality for the otelgrpc package. package internal // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal" import ( "strings" "go.opentelemetry.io/otel/attribute" - semconv "go.opentelemetry.io/otel/semconv/v1.17.0" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" ) // ParseFullMethod returns a span name following the OpenTelemetry semantic diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/metadata_supplier.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/metadata_supplier.go index 3aa37915..b427e172 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/metadata_supplier.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/metadata_supplier.go @@ -6,15 +6,14 @@ package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.g import ( "context" - "google.golang.org/grpc/metadata" - "go.opentelemetry.io/otel/baggage" "go.opentelemetry.io/otel/propagation" "go.opentelemetry.io/otel/trace" + "google.golang.org/grpc/metadata" ) type metadataSupplier struct { - metadata *metadata.MD + metadata metadata.MD } // assert that metadataSupplier implements the TextMapCarrier interface. @@ -28,13 +27,13 @@ func (s *metadataSupplier) Get(key string) string { return values[0] } -func (s *metadataSupplier) Set(key string, value string) { +func (s *metadataSupplier) Set(key, value string) { s.metadata.Set(key, value) } func (s *metadataSupplier) Keys() []string { - out := make([]string, 0, len(*s.metadata)) - for key := range *s.metadata { + out := make([]string, 0, len(s.metadata)) + for key := range s.metadata { out = append(out, key) } return out @@ -43,11 +42,12 @@ func (s *metadataSupplier) Keys() []string { // Inject injects correlation context and span context into the gRPC // metadata object. This function is meant to be used on outgoing // requests. +// // Deprecated: Unnecessary public func. func Inject(ctx context.Context, md *metadata.MD, opts ...Option) { - c := newConfig(opts, "") + c := newConfig(opts) c.Propagators.Inject(ctx, &metadataSupplier{ - metadata: md, + metadata: *md, }) } @@ -57,7 +57,7 @@ func inject(ctx context.Context, propagators propagation.TextMapPropagator) cont md = metadata.MD{} } propagators.Inject(ctx, &metadataSupplier{ - metadata: &md, + metadata: md, }) return metadata.NewOutgoingContext(ctx, md) } @@ -65,11 +65,12 @@ func inject(ctx context.Context, propagators propagation.TextMapPropagator) cont // Extract returns the correlation context and span context that // another service encoded in the gRPC metadata object with Inject. // This function is meant to be used on incoming requests. +// // Deprecated: Unnecessary public func. func Extract(ctx context.Context, md *metadata.MD, opts ...Option) (baggage.Baggage, trace.SpanContext) { - c := newConfig(opts, "") + c := newConfig(opts) ctx = c.Propagators.Extract(ctx, &metadataSupplier{ - metadata: md, + metadata: *md, }) return baggage.FromContext(ctx), trace.SpanContextFromContext(ctx) @@ -82,6 +83,6 @@ func extract(ctx context.Context, propagators propagation.TextMapPropagator) con } return propagators.Extract(ctx, &metadataSupplier{ - metadata: &md, + metadata: md, }) } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/semconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/semconv.go deleted file mode 100644 index 409c621b..00000000 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/semconv.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" - -import ( - "go.opentelemetry.io/otel/attribute" - semconv "go.opentelemetry.io/otel/semconv/v1.17.0" -) - -// Semantic conventions for attribute keys for gRPC. -const ( - // Name of message transmitted or received. - RPCNameKey = attribute.Key("name") - - // Type of message transmitted or received. - RPCMessageTypeKey = attribute.Key("message.type") - - // Identifier of message transmitted or received. - RPCMessageIDKey = attribute.Key("message.id") - - // The compressed size of the message transmitted or received in bytes. - RPCMessageCompressedSizeKey = attribute.Key("message.compressed_size") - - // The uncompressed size of the message transmitted or received in - // bytes. - RPCMessageUncompressedSizeKey = attribute.Key("message.uncompressed_size") -) - -// Semantic conventions for common RPC attributes. -var ( - // Semantic convention for gRPC as the remoting system. - RPCSystemGRPC = semconv.RPCSystemGRPC - - // Semantic convention for a message named message. - RPCNameMessage = RPCNameKey.String("message") - - // Semantic conventions for RPC message types. - RPCMessageTypeSent = RPCMessageTypeKey.String("SENT") - RPCMessageTypeReceived = RPCMessageTypeKey.String("RECEIVED") -) diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go index c01cb897..29d7ab2b 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go @@ -8,16 +8,17 @@ import ( "sync/atomic" "time" - grpc_codes "google.golang.org/grpc/codes" - "google.golang.org/grpc/peer" - "google.golang.org/grpc/stats" - "google.golang.org/grpc/status" - + "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/metric" - semconv "go.opentelemetry.io/otel/semconv/v1.17.0" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" + "go.opentelemetry.io/otel/semconv/v1.37.0/rpcconv" "go.opentelemetry.io/otel/trace" + grpc_codes "google.golang.org/grpc/codes" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal" ) @@ -33,63 +34,175 @@ type gRPCContext struct { type serverHandler struct { *config + + tracer trace.Tracer + + duration rpcconv.ServerDuration + inSize rpcconv.ServerRequestSize + outSize rpcconv.ServerResponseSize + inMsg rpcconv.ServerRequestsPerRPC + outMsg rpcconv.ServerResponsesPerRPC } // NewServerHandler creates a stats.Handler for a gRPC server. func NewServerHandler(opts ...Option) stats.Handler { - h := &serverHandler{ - config: newConfig(opts, "server"), + c := newConfig(opts) + h := &serverHandler{config: c} + + h.tracer = c.TracerProvider.Tracer( + ScopeName, + trace.WithInstrumentationVersion(Version()), + ) + + meter := c.MeterProvider.Meter( + ScopeName, + metric.WithInstrumentationVersion(Version()), + metric.WithSchemaURL(semconv.SchemaURL), + ) + + var err error + h.duration, err = rpcconv.NewServerDuration(meter) + if err != nil { + otel.Handle(err) + } + + h.inSize, err = rpcconv.NewServerRequestSize(meter) + if err != nil { + otel.Handle(err) + } + + h.outSize, err = rpcconv.NewServerResponseSize(meter) + if err != nil { + otel.Handle(err) + } + + h.inMsg, err = rpcconv.NewServerRequestsPerRPC(meter) + if err != nil { + otel.Handle(err) + } + + h.outMsg, err = rpcconv.NewServerResponsesPerRPC(meter) + if err != nil { + otel.Handle(err) } return h } // TagConn can attach some information to the given context. -func (h *serverHandler) TagConn(ctx context.Context, info *stats.ConnTagInfo) context.Context { +func (*serverHandler) TagConn(ctx context.Context, _ *stats.ConnTagInfo) context.Context { return ctx } // HandleConn processes the Conn stats. -func (h *serverHandler) HandleConn(ctx context.Context, info stats.ConnStats) { +func (*serverHandler) HandleConn(context.Context, stats.ConnStats) { } // TagRPC can attach some information to the given context. func (h *serverHandler) TagRPC(ctx context.Context, info *stats.RPCTagInfo) context.Context { - ctx = extract(ctx, h.config.Propagators) + ctx = extract(ctx, h.Propagators) name, attrs := internal.ParseFullMethod(info.FullMethodName) - attrs = append(attrs, RPCSystemGRPC) - ctx, _ = h.tracer.Start( - trace.ContextWithRemoteSpanContext(ctx, trace.SpanContextFromContext(ctx)), - name, - trace.WithSpanKind(trace.SpanKindServer), - trace.WithAttributes(append(attrs, h.config.SpanAttributes...)...), - ) + attrs = append(attrs, semconv.RPCSystemGRPC) - gctx := gRPCContext{ - metricAttrs: append(attrs, h.config.MetricAttributes...), - record: true, + record := true + if h.Filter != nil { + record = h.Filter(info) + } + + if record { + opts := []trace.SpanStartOption{ + trace.WithSpanKind(trace.SpanKindServer), + trace.WithAttributes(append(attrs, h.SpanAttributes...)...), + } + if h.PublicEndpoint || (h.PublicEndpointFn != nil && h.PublicEndpointFn(ctx, info)) { + opts = append(opts, trace.WithNewRoot()) + // Linking incoming span context if any for public endpoint. + if s := trace.SpanContextFromContext(ctx); s.IsValid() && s.IsRemote() { + opts = append(opts, trace.WithLinks(trace.Link{SpanContext: s})) + } + } + ctx, _ = h.tracer.Start( + trace.ContextWithRemoteSpanContext(ctx, trace.SpanContextFromContext(ctx)), + name, + opts..., + ) } - if h.config.Filter != nil { - gctx.record = h.config.Filter(info) + + gctx := gRPCContext{ + metricAttrs: append(attrs, h.MetricAttributes...), + record: record, } + return context.WithValue(ctx, gRPCContextKey{}, &gctx) } // HandleRPC processes the RPC stats. func (h *serverHandler) HandleRPC(ctx context.Context, rs stats.RPCStats) { - isServer := true - h.handleRPC(ctx, rs, isServer) + h.handleRPC( + ctx, + rs, + h.duration.Inst(), + h.inSize, + h.outSize, + h.inMsg.Inst(), + h.outMsg.Inst(), + serverStatus, + ) } type clientHandler struct { *config + + tracer trace.Tracer + + duration rpcconv.ClientDuration + inSize rpcconv.ClientResponseSize + outSize rpcconv.ClientRequestSize + inMsg rpcconv.ClientResponsesPerRPC + outMsg rpcconv.ClientRequestsPerRPC } // NewClientHandler creates a stats.Handler for a gRPC client. func NewClientHandler(opts ...Option) stats.Handler { - h := &clientHandler{ - config: newConfig(opts, "client"), + c := newConfig(opts) + h := &clientHandler{config: c} + + h.tracer = c.TracerProvider.Tracer( + ScopeName, + trace.WithInstrumentationVersion(Version()), + ) + + meter := c.MeterProvider.Meter( + ScopeName, + metric.WithInstrumentationVersion(Version()), + metric.WithSchemaURL(semconv.SchemaURL), + ) + + var err error + h.duration, err = rpcconv.NewClientDuration(meter) + if err != nil { + otel.Handle(err) + } + + h.inSize, err = rpcconv.NewClientResponseSize(meter) + if err != nil { + otel.Handle(err) + } + + h.outSize, err = rpcconv.NewClientRequestSize(meter) + if err != nil { + otel.Handle(err) + } + + h.inMsg, err = rpcconv.NewClientResponsesPerRPC(meter) + if err != nil { + otel.Handle(err) + } + + h.outMsg, err = rpcconv.NewClientRequestsPerRPC(meter) + if err != nil { + otel.Handle(err) } return h @@ -98,112 +211,141 @@ func NewClientHandler(opts ...Option) stats.Handler { // TagRPC can attach some information to the given context. func (h *clientHandler) TagRPC(ctx context.Context, info *stats.RPCTagInfo) context.Context { name, attrs := internal.ParseFullMethod(info.FullMethodName) - attrs = append(attrs, RPCSystemGRPC) - ctx, _ = h.tracer.Start( - ctx, - name, - trace.WithSpanKind(trace.SpanKindClient), - trace.WithAttributes(append(attrs, h.config.SpanAttributes...)...), - ) + attrs = append(attrs, semconv.RPCSystemGRPC) - gctx := gRPCContext{ - metricAttrs: append(attrs, h.config.MetricAttributes...), - record: true, + record := true + if h.Filter != nil { + record = h.Filter(info) } - if h.config.Filter != nil { - gctx.record = h.config.Filter(info) + + if record { + ctx, _ = h.tracer.Start( + ctx, + name, + trace.WithSpanKind(trace.SpanKindClient), + trace.WithAttributes(append(attrs, h.SpanAttributes...)...), + ) } - return inject(context.WithValue(ctx, gRPCContextKey{}, &gctx), h.config.Propagators) + gctx := gRPCContext{ + metricAttrs: append(attrs, h.MetricAttributes...), + record: record, + } + + return inject(context.WithValue(ctx, gRPCContextKey{}, &gctx), h.Propagators) } // HandleRPC processes the RPC stats. func (h *clientHandler) HandleRPC(ctx context.Context, rs stats.RPCStats) { - isServer := false - h.handleRPC(ctx, rs, isServer) + h.handleRPC( + ctx, + rs, + h.duration.Inst(), + h.inSize, + h.outSize, + h.inMsg.Inst(), + h.outMsg.Inst(), + func(s *status.Status) (codes.Code, string) { + return codes.Error, s.Message() + }, + ) } // TagConn can attach some information to the given context. -func (h *clientHandler) TagConn(ctx context.Context, info *stats.ConnTagInfo) context.Context { +func (*clientHandler) TagConn(ctx context.Context, _ *stats.ConnTagInfo) context.Context { return ctx } // HandleConn processes the Conn stats. -func (h *clientHandler) HandleConn(context.Context, stats.ConnStats) { +func (*clientHandler) HandleConn(context.Context, stats.ConnStats) { // no-op } -func (c *config) handleRPC(ctx context.Context, rs stats.RPCStats, isServer bool) { // nolint: revive // isServer is not a control flag. - span := trace.SpanFromContext(ctx) - var metricAttrs []attribute.KeyValue - var messageId int64 +type int64Hist interface { + Record(context.Context, int64, ...attribute.KeyValue) +} +func (c *config) handleRPC( + ctx context.Context, + rs stats.RPCStats, + duration metric.Float64Histogram, + inSize, outSize int64Hist, + inMsg, outMsg metric.Int64Histogram, + recordStatus func(*status.Status) (codes.Code, string), +) { gctx, _ := ctx.Value(gRPCContextKey{}).(*gRPCContext) - if gctx != nil { - if !gctx.record { - return - } - metricAttrs = make([]attribute.KeyValue, 0, len(gctx.metricAttrs)+1) - metricAttrs = append(metricAttrs, gctx.metricAttrs...) + if gctx != nil && !gctx.record { + return } + span := trace.SpanFromContext(ctx) + var messageId int64 + switch rs := rs.(type) { case *stats.Begin: case *stats.InPayload: if gctx != nil { messageId = atomic.AddInt64(&gctx.inMessages, 1) - c.rpcInBytes.Record(ctx, int64(rs.Length), metric.WithAttributeSet(attribute.NewSet(metricAttrs...))) + inSize.Record(ctx, int64(rs.Length), gctx.metricAttrs...) } - if c.ReceivedEvent { + if c.ReceivedEvent && span.IsRecording() { span.AddEvent("message", trace.WithAttributes( - semconv.MessageTypeReceived, - semconv.MessageIDKey.Int64(messageId), - semconv.MessageCompressedSizeKey.Int(rs.CompressedLength), - semconv.MessageUncompressedSizeKey.Int(rs.Length), + semconv.RPCMessageTypeReceived, + semconv.RPCMessageIDKey.Int64(messageId), + semconv.RPCMessageCompressedSizeKey.Int(rs.CompressedLength), + semconv.RPCMessageUncompressedSizeKey.Int(rs.Length), ), ) } case *stats.OutPayload: if gctx != nil { messageId = atomic.AddInt64(&gctx.outMessages, 1) - c.rpcOutBytes.Record(ctx, int64(rs.Length), metric.WithAttributeSet(attribute.NewSet(metricAttrs...))) + outSize.Record(ctx, int64(rs.Length), gctx.metricAttrs...) } - if c.SentEvent { + if c.SentEvent && span.IsRecording() { span.AddEvent("message", trace.WithAttributes( - semconv.MessageTypeSent, - semconv.MessageIDKey.Int64(messageId), - semconv.MessageCompressedSizeKey.Int(rs.CompressedLength), - semconv.MessageUncompressedSizeKey.Int(rs.Length), + semconv.RPCMessageTypeSent, + semconv.RPCMessageIDKey.Int64(messageId), + semconv.RPCMessageCompressedSizeKey.Int(rs.CompressedLength), + semconv.RPCMessageUncompressedSizeKey.Int(rs.Length), ), ) } case *stats.OutTrailer: case *stats.OutHeader: - if p, ok := peer.FromContext(ctx); ok { - span.SetAttributes(peerAttr(p.Addr.String())...) + if span.IsRecording() { + if p, ok := peer.FromContext(ctx); ok { + span.SetAttributes(serverAddrAttrs(p.Addr.String())...) + } } case *stats.End: var rpcStatusAttr attribute.KeyValue + var s *status.Status if rs.Error != nil { - s, _ := status.FromError(rs.Error) - if isServer { - statusCode, msg := serverStatus(s) - span.SetStatus(statusCode, msg) - } else { - span.SetStatus(codes.Error, s.Message()) - } + s, _ = status.FromError(rs.Error) rpcStatusAttr = semconv.RPCGRPCStatusCodeKey.Int(int(s.Code())) } else { rpcStatusAttr = semconv.RPCGRPCStatusCodeKey.Int(int(grpc_codes.OK)) } - span.SetAttributes(rpcStatusAttr) - span.End() + if span.IsRecording() { + if s != nil { + c, m := recordStatus(s) + span.SetStatus(c, m) + } + span.SetAttributes(rpcStatusAttr) + span.End() + } + var metricAttrs []attribute.KeyValue + if gctx != nil { + metricAttrs = make([]attribute.KeyValue, 0, len(gctx.metricAttrs)+1) + metricAttrs = append(metricAttrs, gctx.metricAttrs...) + } metricAttrs = append(metricAttrs, rpcStatusAttr) // Allocate vararg slice once. recordOpts := []metric.RecordOption{metric.WithAttributeSet(attribute.NewSet(metricAttrs...))} @@ -212,10 +354,10 @@ func (c *config) handleRPC(ctx context.Context, rs stats.RPCStats, isServer bool // Measure right before calling Record() to capture as much elapsed time as possible. elapsedTime := float64(rs.EndTime.Sub(rs.BeginTime)) / float64(time.Millisecond) - c.rpcDuration.Record(ctx, elapsedTime, recordOpts...) + duration.Record(ctx, elapsedTime, recordOpts...) if gctx != nil { - c.rpcInMessages.Record(ctx, atomic.LoadInt64(&gctx.inMessages), recordOpts...) - c.rpcOutMessages.Record(ctx, atomic.LoadInt64(&gctx.outMessages), recordOpts...) + inMsg.Record(ctx, atomic.LoadInt64(&gctx.inMessages), recordOpts...) + outMsg.Record(ctx, atomic.LoadInt64(&gctx.outMessages), recordOpts...) } default: return diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go index 80e5f2f6..aa4f4e21 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go @@ -5,13 +5,6 @@ package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.g // Version is the current release version of the gRPC instrumentation. func Version() string { - return "0.59.0" + return "0.63.0" // This string is updated by the pre_release.sh script during release } - -// SemVersion is the semantic version to be supplied to tracer/meter creation. -// -// Deprecated: Use [Version] instead. -func SemVersion() string { - return Version() -} diff --git a/vendor/go.opentelemetry.io/otel/.clomonitor.yml b/vendor/go.opentelemetry.io/otel/.clomonitor.yml new file mode 100644 index 00000000..128d61a2 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/.clomonitor.yml @@ -0,0 +1,3 @@ +exemptions: + - check: artifacthub_badge + reason: "Artifact Hub doesn't support Go packages" diff --git a/vendor/go.opentelemetry.io/otel/.codespellignore b/vendor/go.opentelemetry.io/otel/.codespellignore index 6bf3abc4..a6d0cbcc 100644 --- a/vendor/go.opentelemetry.io/otel/.codespellignore +++ b/vendor/go.opentelemetry.io/otel/.codespellignore @@ -7,3 +7,5 @@ ans nam valu thirdparty +addOpt +observ diff --git a/vendor/go.opentelemetry.io/otel/.golangci.yml b/vendor/go.opentelemetry.io/otel/.golangci.yml index c58e48ab..1b1b2aff 100644 --- a/vendor/go.opentelemetry.io/otel/.golangci.yml +++ b/vendor/go.opentelemetry.io/otel/.golangci.yml @@ -1,24 +1,18 @@ -# See https://github.com/golangci/golangci-lint#config-file +version: "2" run: - issues-exit-code: 1 #Default - tests: true #Default - + issues-exit-code: 1 + tests: true linters: - # Disable everything by default so upgrades to not include new "default - # enabled" linters. - disable-all: true - # Specifically enable linters we want to use. + default: none enable: - asasalint - bodyclose - depguard - errcheck - errorlint + - gocritic - godot - - gofumpt - - goimports - gosec - - gosimple - govet - ineffassign - misspell @@ -26,227 +20,244 @@ linters: - revive - staticcheck - testifylint - - typecheck - unconvert - - unused - unparam + - unused - usestdlibvars - usetesting - + settings: + depguard: + rules: + auto/sdk: + files: + - '!internal/global/trace.go' + - ~internal/global/trace_test.go + deny: + - pkg: go.opentelemetry.io/auto/sdk + desc: Do not use SDK from automatic instrumentation. + non-tests: + files: + - '!$test' + - '!**/*test/*.go' + - '!**/internal/matchers/*.go' + deny: + - pkg: testing + - pkg: github.com/stretchr/testify + - pkg: crypto/md5 + - pkg: crypto/sha1 + - pkg: crypto/**/pkix + otel-internal: + files: + - '**/sdk/*.go' + - '**/sdk/**/*.go' + - '**/exporters/*.go' + - '**/exporters/**/*.go' + - '**/schema/*.go' + - '**/schema/**/*.go' + - '**/metric/*.go' + - '**/metric/**/*.go' + - '**/bridge/*.go' + - '**/bridge/**/*.go' + - '**/trace/*.go' + - '**/trace/**/*.go' + - '**/log/*.go' + - '**/log/**/*.go' + deny: + - pkg: go.opentelemetry.io/otel/internal$ + desc: Do not use cross-module internal packages. + - pkg: go.opentelemetry.io/otel/internal/internaltest + desc: Do not use cross-module internal packages. + otlp-internal: + files: + - '!**/exporters/otlp/internal/**/*.go' + deny: + - pkg: go.opentelemetry.io/otel/exporters/otlp/internal + desc: Do not use cross-module internal packages. + otlpmetric-internal: + files: + - '!**/exporters/otlp/otlpmetric/internal/*.go' + - '!**/exporters/otlp/otlpmetric/internal/**/*.go' + deny: + - pkg: go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal + desc: Do not use cross-module internal packages. + otlptrace-internal: + files: + - '!**/exporters/otlp/otlptrace/*.go' + - '!**/exporters/otlp/otlptrace/internal/**.go' + deny: + - pkg: go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal + desc: Do not use cross-module internal packages. + gocritic: + disabled-checks: + - appendAssign + - commentedOutCode + - dupArg + - hugeParam + - importShadow + - preferDecodeRune + - rangeValCopy + - unnamedResult + - whyNoLint + enable-all: true + godot: + exclude: + # Exclude links. + - '^ *\[[^]]+\]:' + # Exclude sentence fragments for lists. + - ^[ ]*[-•] + # Exclude sentences prefixing a list. + - :$ + misspell: + locale: US + ignore-rules: + - cancelled + perfsprint: + int-conversion: true + err-error: true + errorf: true + sprintf1: true + strconcat: true + revive: + confidence: 0.01 + rules: + - name: blank-imports + - name: bool-literal-in-expr + - name: constant-logical-expr + - name: context-as-argument + arguments: + - allowTypesBefore: '*testing.T' + disabled: true + - name: context-keys-type + - name: deep-exit + - name: defer + arguments: + - - call-chain + - loop + - name: dot-imports + - name: duplicated-imports + - name: early-return + arguments: + - preserveScope + - name: empty-block + - name: empty-lines + - name: error-naming + - name: error-return + - name: error-strings + - name: errorf + - name: exported + arguments: + - sayRepetitiveInsteadOfStutters + - name: flag-parameter + - name: identical-branches + - name: if-return + - name: import-shadowing + - name: increment-decrement + - name: indent-error-flow + arguments: + - preserveScope + - name: package-comments + - name: range + - name: range-val-in-closure + - name: range-val-address + - name: redefines-builtin-id + - name: string-format + arguments: + - - panic + - /^[^\n]*$/ + - must not contain line breaks + - name: struct-tag + - name: superfluous-else + arguments: + - preserveScope + - name: time-equal + - name: unconditional-recursion + - name: unexported-return + - name: unhandled-error + arguments: + - fmt.Fprint + - fmt.Fprintf + - fmt.Fprintln + - fmt.Print + - fmt.Printf + - fmt.Println + - name: unused-parameter + - name: unused-receiver + - name: unnecessary-stmt + - name: use-any + - name: useless-break + - name: var-declaration + - name: var-naming + arguments: + - ["ID"] # AllowList + - ["Otel", "Aws", "Gcp"] # DenyList + - name: waitgroup-by-value + testifylint: + enable-all: true + disable: + - float-compare + - go-require + - require-error + usetesting: + context-background: true + context-todo: true + exclusions: + generated: lax + presets: + - common-false-positives + - legacy + - std-error-handling + rules: + - linters: + - revive + path: schema/v.*/types/.* + text: avoid meaningless package names + # TODO: Having appropriate comments for exported objects helps development, + # even for objects in internal packages. Appropriate comments for all + # exported objects should be added and this exclusion removed. + - linters: + - revive + path: .*internal/.* + text: exported (method|function|type|const) (.+) should have comment or be unexported + # Yes, they are, but it's okay in a test. + - linters: + - revive + path: _test\.go + text: exported func.*returns unexported type.*which can be annoying to use + # Example test functions should be treated like main. + - linters: + - revive + path: example.*_test\.go + text: calls to (.+) only in main[(][)] or init[(][)] functions + # It's okay to not run gosec and perfsprint in a test. + - linters: + - gosec + - perfsprint + path: _test\.go + # Ignoring gosec G404: Use of weak random number generator (math/rand instead of crypto/rand) + # as we commonly use it in tests and examples. + - linters: + - gosec + text: 'G404:' + # Ignoring gosec G402: TLS MinVersion too low + # as the https://pkg.go.dev/crypto/tls#Config handles MinVersion default well. + - linters: + - gosec + text: 'G402: TLS MinVersion too low.' issues: - # Maximum issues count per one linter. - # Set to 0 to disable. - # Default: 50 - # Setting to unlimited so the linter only is run once to debug all issues. max-issues-per-linter: 0 - # Maximum count of issues with the same text. - # Set to 0 to disable. - # Default: 3 - # Setting to unlimited so the linter only is run once to debug all issues. max-same-issues: 0 - # Excluding configuration per-path, per-linter, per-text and per-source. - exclude-rules: - # TODO: Having appropriate comments for exported objects helps development, - # even for objects in internal packages. Appropriate comments for all - # exported objects should be added and this exclusion removed. - - path: '.*internal/.*' - text: "exported (method|function|type|const) (.+) should have comment or be unexported" - linters: - - revive - # Yes, they are, but it's okay in a test. - - path: _test\.go - text: "exported func.*returns unexported type.*which can be annoying to use" - linters: - - revive - # Example test functions should be treated like main. - - path: example.*_test\.go - text: "calls to (.+) only in main[(][)] or init[(][)] functions" - linters: - - revive - # It's okay to not run gosec and perfsprint in a test. - - path: _test\.go - linters: - - gosec - - perfsprint - # Ignoring gosec G404: Use of weak random number generator (math/rand instead of crypto/rand) - # as we commonly use it in tests and examples. - - text: "G404:" - linters: - - gosec - # Ignoring gosec G402: TLS MinVersion too low - # as the https://pkg.go.dev/crypto/tls#Config handles MinVersion default well. - - text: "G402: TLS MinVersion too low." - linters: - - gosec - include: - # revive exported should have comment or be unexported. - - EXC0012 - # revive package comment should be of the form ... - - EXC0013 - -linters-settings: - depguard: - rules: - non-tests: - files: - - "!$test" - - "!**/*test/*.go" - - "!**/internal/matchers/*.go" - deny: - - pkg: "testing" - - pkg: "github.com/stretchr/testify" - - pkg: "crypto/md5" - - pkg: "crypto/sha1" - - pkg: "crypto/**/pkix" - auto/sdk: - files: - - "!internal/global/trace.go" - - "~internal/global/trace_test.go" - deny: - - pkg: "go.opentelemetry.io/auto/sdk" - desc: Do not use SDK from automatic instrumentation. - otlp-internal: - files: - - "!**/exporters/otlp/internal/**/*.go" - deny: - - pkg: "go.opentelemetry.io/otel/exporters/otlp/internal" - desc: Do not use cross-module internal packages. - otlptrace-internal: - files: - - "!**/exporters/otlp/otlptrace/*.go" - - "!**/exporters/otlp/otlptrace/internal/**.go" - deny: - - pkg: "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal" - desc: Do not use cross-module internal packages. - otlpmetric-internal: - files: - - "!**/exporters/otlp/otlpmetric/internal/*.go" - - "!**/exporters/otlp/otlpmetric/internal/**/*.go" - deny: - - pkg: "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal" - desc: Do not use cross-module internal packages. - otel-internal: - files: - - "**/sdk/*.go" - - "**/sdk/**/*.go" - - "**/exporters/*.go" - - "**/exporters/**/*.go" - - "**/schema/*.go" - - "**/schema/**/*.go" - - "**/metric/*.go" - - "**/metric/**/*.go" - - "**/bridge/*.go" - - "**/bridge/**/*.go" - - "**/trace/*.go" - - "**/trace/**/*.go" - - "**/log/*.go" - - "**/log/**/*.go" - deny: - - pkg: "go.opentelemetry.io/otel/internal$" - desc: Do not use cross-module internal packages. - - pkg: "go.opentelemetry.io/otel/internal/attribute" - desc: Do not use cross-module internal packages. - - pkg: "go.opentelemetry.io/otel/internal/internaltest" - desc: Do not use cross-module internal packages. - - pkg: "go.opentelemetry.io/otel/internal/matchers" - desc: Do not use cross-module internal packages. - godot: - exclude: - # Exclude links. - - '^ *\[[^]]+\]:' - # Exclude sentence fragments for lists. - - '^[ ]*[-•]' - # Exclude sentences prefixing a list. - - ':$' - goimports: - local-prefixes: go.opentelemetry.io - misspell: - locale: US - ignore-words: - - cancelled - perfsprint: - err-error: true - errorf: true - int-conversion: true - sprintf1: true - strconcat: true - revive: - # Sets the default failure confidence. - # This means that linting errors with less than 0.8 confidence will be ignored. - # Default: 0.8 - confidence: 0.01 - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md - rules: - - name: blank-imports - - name: bool-literal-in-expr - - name: constant-logical-expr - - name: context-as-argument - disabled: true - arguments: - - allowTypesBefore: "*testing.T" - - name: context-keys-type - - name: deep-exit - - name: defer - arguments: - - ["call-chain", "loop"] - - name: dot-imports - - name: duplicated-imports - - name: early-return - arguments: - - "preserveScope" - - name: empty-block - - name: empty-lines - - name: error-naming - - name: error-return - - name: error-strings - - name: errorf - - name: exported - arguments: - - "sayRepetitiveInsteadOfStutters" - - name: flag-parameter - - name: identical-branches - - name: if-return - - name: import-shadowing - - name: increment-decrement - - name: indent-error-flow - arguments: - - "preserveScope" - - name: package-comments - - name: range - - name: range-val-in-closure - - name: range-val-address - - name: redefines-builtin-id - - name: string-format - arguments: - - - panic - - '/^[^\n]*$/' - - must not contain line breaks - - name: struct-tag - - name: superfluous-else - arguments: - - "preserveScope" - - name: time-equal - - name: unconditional-recursion - - name: unexported-return - - name: unhandled-error - arguments: - - "fmt.Fprint" - - "fmt.Fprintf" - - "fmt.Fprintln" - - "fmt.Print" - - "fmt.Printf" - - "fmt.Println" - - name: unnecessary-stmt - - name: useless-break - - name: var-declaration - - name: var-naming - arguments: - - ["ID"] # AllowList - - ["Otel", "Aws", "Gcp"] # DenyList - - name: waitgroup-by-value - testifylint: - enable-all: true - disable: - - float-compare - - go-require - - require-error +formatters: + enable: + - gofumpt + - goimports + - golines + settings: + gofumpt: + extra-rules: true + goimports: + local-prefixes: + - go.opentelemetry.io/otel + golines: + max-len: 120 + exclusions: + generated: lax diff --git a/vendor/go.opentelemetry.io/otel/.lycheeignore b/vendor/go.opentelemetry.io/otel/.lycheeignore index 40d62fa2..994b677d 100644 --- a/vendor/go.opentelemetry.io/otel/.lycheeignore +++ b/vendor/go.opentelemetry.io/otel/.lycheeignore @@ -1,6 +1,13 @@ http://localhost +https://localhost http://jaeger-collector https://github.com/open-telemetry/opentelemetry-go/milestone/ https://github.com/open-telemetry/opentelemetry-go/projects +# Weaver model URL for semantic-conventions repository. +https?:\/\/github\.com\/open-telemetry\/semantic-conventions\/archive\/refs\/tags\/[^.]+\.zip\[[^]]+] file:///home/runner/work/opentelemetry-go/opentelemetry-go/libraries file:///home/runner/work/opentelemetry-go/opentelemetry-go/manual +http://4.3.2.1:78/user/123 +file:///home/runner/work/opentelemetry-go/opentelemetry-go/exporters/otlp/otlptrace/otlptracegrpc/internal/observ/dns:/:4317 +# URL works, but it has blocked link checkers. +https://dl.acm.org/doi/10.1145/198429.198435 diff --git a/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/vendor/go.opentelemetry.io/otel/CHANGELOG.md index c076db28..ecbe0582 100644 --- a/vendor/go.opentelemetry.io/otel/CHANGELOG.md +++ b/vendor/go.opentelemetry.io/otel/CHANGELOG.md @@ -11,6 +11,267 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm +## [1.39.0/0.61.0/0.15.0/0.0.14] 2025-12-05 + +### Added + +- Greatly reduce the cost of recording metrics in `go.opentelemetry.io/otel/sdk/metric` using hashing for map keys. (#7175) +- Add `WithInstrumentationAttributeSet` option to `go.opentelemetry.io/otel/log`, `go.opentelemetry.io/otel/metric`, and `go.opentelemetry.io/otel/trace` packages. + This provides a concurrent-safe and performant alternative to `WithInstrumentationAttributes` by accepting a pre-constructed `attribute.Set`. (#7287) +- Add experimental observability for the Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus`. + Check the `go.opentelemetry.io/otel/exporters/prometheus/internal/x` package documentation for more information. (#7345) +- Add experimental observability metrics in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`. (#7353) +- Add temporality selector functions `DeltaTemporalitySelector`, `CumulativeTemporalitySelector`, `LowMemoryTemporalitySelector` to `go.opentelemetry.io/otel/sdk/metric`. (#7434) +- Add experimental observability metrics for simple log processor in `go.opentelemetry.io/otel/sdk/log`. (#7548) +- Add experimental observability metrics in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#7459) +- Add experimental observability metrics in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#7486) +- Add experimental observability metrics for simple span processor in `go.opentelemetry.io/otel/sdk/trace`. (#7374) +- Add experimental observability metrics in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#7512) +- Add experimental observability metrics for manual reader in `go.opentelemetry.io/otel/sdk/metric`. (#7524) +- Add experimental observability metrics for periodic reader in `go.opentelemetry.io/otel/sdk/metric`. (#7571) +- Support `OTEL_EXPORTER_OTLP_LOGS_INSECURE` and `OTEL_EXPORTER_OTLP_INSECURE` environmental variables in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#7608) +- Add `Enabled` method to the `Processor` interface in `go.opentelemetry.io/otel/sdk/log`. + All `Processor` implementations now include an `Enabled` method. (#7639) +- The `go.opentelemetry.io/otel/semconv/v1.38.0` package. + The package contains semantic conventions from the `v1.38.0` version of the OpenTelemetry Semantic Conventions. + See the [migration documentation](./semconv/v1.38.0/MIGRATION.md) for information on how to upgrade from `go.opentelemetry.io/otel/semconv/v1.37.0.`(#7648) + +### Changed + +- `Distinct` in `go.opentelemetry.io/otel/attribute` is no longer guaranteed to uniquely identify an attribute set. + Collisions between `Distinct` values for different Sets are possible with extremely high cardinality (billions of series per instrument), but are highly unlikely. (#7175) +- `WithInstrumentationAttributes` in `go.opentelemetry.io/otel/trace` synchronously de-duplicates the passed attributes instead of delegating it to the returned `TracerOption`. (#7266) +- `WithInstrumentationAttributes` in `go.opentelemetry.io/otel/meter` synchronously de-duplicates the passed attributes instead of delegating it to the returned `MeterOption`. (#7266) +- `WithInstrumentationAttributes` in `go.opentelemetry.io/otel/log` synchronously de-duplicates the passed attributes instead of delegating it to the returned `LoggerOption`. (#7266) +- Rename the `OTEL_GO_X_SELF_OBSERVABILITY` environment variable to `OTEL_GO_X_OBSERVABILITY` in `go.opentelemetry.io/otel/sdk/trace`, `go.opentelemetry.io/otel/sdk/log`, and `go.opentelemetry.io/otel/exporters/stdout/stdouttrace`. (#7302) +- Improve performance of histogram `Record` in `go.opentelemetry.io/otel/sdk/metric` when min and max are disabled using `NoMinMax`. (#7306) +- Improve error handling for dropped data during translation by using `prometheus.NewInvalidMetric` in `go.opentelemetry.io/otel/exporters/prometheus`. + ⚠️ **Breaking Change:** Previously, these cases were only logged and scrapes succeeded. + Now, when translation would drop data (e.g., invalid label/value), the exporter emits a `NewInvalidMetric`, and Prometheus scrapes **fail with HTTP 500** by default. + To preserve the prior behavior (scrapes succeed while errors are logged), configure your Prometheus HTTP handler with: `promhttp.HandlerOpts{ ErrorHandling: promhttp.ContinueOnError }`. (#7363) +- Replace fnv hash with xxhash in `go.opentelemetry.io/otel/attribute` for better performance. (#7371) +- The default `TranslationStrategy` in `go.opentelemetry.io/exporters/prometheus` is changed from `otlptranslator.NoUTF8EscapingWithSuffixes` to `otlptranslator.UnderscoreEscapingWithSuffixes`. (#7421) +- Improve performance of concurrent measurements in `go.opentelemetry.io/otel/sdk/metric`. (#7427) +- Include W3C TraceFlags (bits 0–7) in the OTLP `Span.Flags` field in `go.opentelemetry.io/exporters/otlp/otlptrace/otlptracehttp` and `go.opentelemetry.io/exporters/otlp/otlptrace/otlptracegrpc`. (#7438) +- The `ErrorType` function in `go.opentelemetry.io/otel/semconv/v1.37.0` now handles custom error types. + If an error implements an `ErrorType() string` method, the return value of that method will be used as the error type. (#7442) + +### Fixed + +- Fix `WithInstrumentationAttributes` options in `go.opentelemetry.io/otel/trace`, `go.opentelemetry.io/otel/metric`, and `go.opentelemetry.io/otel/log` to properly merge attributes when passed multiple times instead of replacing them. + Attributes with duplicate keys will use the last value passed. (#7300) +- The equality of `attribute.Set` when using the `Equal` method is not affected by the user overriding the empty set pointed to by `attribute.EmptySet` in `go.opentelemetry.io/otel/attribute`. (#7357) +- Return partial OTLP export errors to the caller in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`. (#7372) +- Return partial OTLP export errors to the caller in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#7372) +- Return partial OTLP export errors to the caller in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#7372) +- Return partial OTLP export errors to the caller in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#7372) +- Return partial OTLP export errors to the caller in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#7372) +- Return partial OTLP export errors to the caller in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#7372) +- Fix `AddAttributes`, `SetAttributes`, `SetBody` on `Record` in `go.opentelemetry.io/otel/sdk/log` to not mutate input. (#7403) +- Do not double record measurements of `RecordSet` methods in `go.opentelemetry.io/otel/semconv/v1.37.0`. (#7655) +- Do not double record measurements of `RecordSet` methods in `go.opentelemetry.io/otel/semconv/v1.36.0`. (#7656) + +### Removed + +- Drop support for [Go 1.23]. (#7274) +- Remove the `FilterProcessor` interface in `go.opentelemetry.io/otel/sdk/log`. + The `Enabled` method has been added to the `Processor` interface instead. + All `Processor` implementations must now implement the `Enabled` method. + Custom processors that do not filter records can implement `Enabled` to return `true`. (#7639) + +## [1.38.0/0.60.0/0.14.0/0.0.13] 2025-08-29 + +This release is the last to support [Go 1.23]. +The next release will require at least [Go 1.24]. + +### Added + +- Add native histogram exemplar support in `go.opentelemetry.io/otel/exporters/prometheus`. (#6772) +- Add template attribute functions to the `go.opentelmetry.io/otel/semconv/v1.34.0` package. (#6939) + - `ContainerLabel` + - `DBOperationParameter` + - `DBSystemParameter` + - `HTTPRequestHeader` + - `HTTPResponseHeader` + - `K8SCronJobAnnotation` + - `K8SCronJobLabel` + - `K8SDaemonSetAnnotation` + - `K8SDaemonSetLabel` + - `K8SDeploymentAnnotation` + - `K8SDeploymentLabel` + - `K8SJobAnnotation` + - `K8SJobLabel` + - `K8SNamespaceAnnotation` + - `K8SNamespaceLabel` + - `K8SNodeAnnotation` + - `K8SNodeLabel` + - `K8SPodAnnotation` + - `K8SPodLabel` + - `K8SReplicaSetAnnotation` + - `K8SReplicaSetLabel` + - `K8SStatefulSetAnnotation` + - `K8SStatefulSetLabel` + - `ProcessEnvironmentVariable` + - `RPCConnectRPCRequestMetadata` + - `RPCConnectRPCResponseMetadata` + - `RPCGRPCRequestMetadata` + - `RPCGRPCResponseMetadata` +- Add `ErrorType` attribute helper function to the `go.opentelmetry.io/otel/semconv/v1.34.0` package. (#6962) +- Add `WithAllowKeyDuplication` in `go.opentelemetry.io/otel/sdk/log` which can be used to disable deduplication for log records. (#6968) +- Add `WithCardinalityLimit` option to configure the cardinality limit in `go.opentelemetry.io/otel/sdk/metric`. (#6996, #7065, #7081, #7164, #7165, #7179) +- Add `Clone` method to `Record` in `go.opentelemetry.io/otel/log` that returns a copy of the record with no shared state. (#7001) +- Add experimental self-observability span and batch span processor metrics in `go.opentelemetry.io/otel/sdk/trace`. + Check the `go.opentelemetry.io/otel/sdk/trace/internal/x` package documentation for more information. (#7027, #6393, #7209) +- The `go.opentelemetry.io/otel/semconv/v1.36.0` package. + The package contains semantic conventions from the `v1.36.0` version of the OpenTelemetry Semantic Conventions. + See the [migration documentation](./semconv/v1.36.0/MIGRATION.md) for information on how to upgrade from `go.opentelemetry.io/otel/semconv/v1.34.0.`(#7032, #7041) +- Add support for configuring Prometheus name translation using `WithTranslationStrategy` option in `go.opentelemetry.io/otel/exporters/prometheus`. The current default translation strategy when UTF-8 mode is enabled is `NoUTF8EscapingWithSuffixes`, but a future release will change the default strategy to `UnderscoreEscapingWithSuffixes` for compliance with the specification. (#7111) +- Add experimental self-observability log metrics in `go.opentelemetry.io/otel/sdk/log`. + Check the `go.opentelemetry.io/otel/sdk/log/internal/x` package documentation for more information. (#7121) +- Add experimental self-observability trace exporter metrics in `go.opentelemetry.io/otel/exporters/stdout/stdouttrace`. + Check the `go.opentelemetry.io/otel/exporters/stdout/stdouttrace/internal/x` package documentation for more information. (#7133) +- Support testing of [Go 1.25]. (#7187) +- The `go.opentelemetry.io/otel/semconv/v1.37.0` package. + The package contains semantic conventions from the `v1.37.0` version of the OpenTelemetry Semantic Conventions. + See the [migration documentation](./semconv/v1.37.0/MIGRATION.md) for information on how to upgrade from `go.opentelemetry.io/otel/semconv/v1.36.0.`(#7254) + +### Changed + +- Optimize `TraceIDFromHex` and `SpanIDFromHex` in `go.opentelemetry.io/otel/sdk/trace`. (#6791) +- Change `AssertEqual` in `go.opentelemetry.io/otel/log/logtest` to accept `TestingT` in order to support benchmarks and fuzz tests. (#6908) +- Change `DefaultExemplarReservoirProviderSelector` in `go.opentelemetry.io/otel/sdk/metric` to use `runtime.GOMAXPROCS(0)` instead of `runtime.NumCPU()` for the `FixedSizeReservoirProvider` default size. (#7094) + +### Fixed + +- `SetBody` method of `Record` in `go.opentelemetry.io/otel/sdk/log` now deduplicates key-value collections (`log.Value` of `log.KindMap` from `go.opentelemetry.io/otel/log`). (#7002) +- Fix `go.opentelemetry.io/otel/exporters/prometheus` to not append a suffix if it's already present in metric name. (#7088) +- Fix the `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` self-observability component type and name. (#7195) +- Fix partial export count metric in `go.opentelemetry.io/otel/exporters/stdout/stdouttrace`. (#7199) + +### Deprecated + +- Deprecate `WithoutUnits` and `WithoutCounterSuffixes` options, preferring `WithTranslationStrategy` instead. (#7111) +- Deprecate support for `OTEL_GO_X_CARDINALITY_LIMIT` environment variable in `go.opentelemetry.io/otel/sdk/metric`. Use `WithCardinalityLimit` option instead. (#7166) + +## [0.59.1] 2025-07-21 + +### Changed + +- Retract `v0.59.0` release of `go.opentelemetry.io/otel/exporters/prometheus` module which appends incorrect unit suffixes. (#7046) +- Change `go.opentelemetry.io/otel/exporters/prometheus` to no longer deduplicate suffixes when UTF8 is enabled. + It is recommended to disable unit and counter suffixes in the exporter, and manually add suffixes if you rely on the existing behavior. (#7044) + +### Fixed + +- Fix `go.opentelemetry.io/otel/exporters/prometheus` to properly handle unit suffixes when the unit is in brackets. + E.g. `{spans}`. (#7044) + +## [1.37.0/0.59.0/0.13.0] 2025-06-25 + +### Added + +- The `go.opentelemetry.io/otel/semconv/v1.33.0` package. + The package contains semantic conventions from the `v1.33.0` version of the OpenTelemetry Semantic Conventions. + See the [migration documentation](./semconv/v1.33.0/MIGRATION.md) for information on how to upgrade from `go.opentelemetry.io/otel/semconv/v1.32.0.`(#6799) +- The `go.opentelemetry.io/otel/semconv/v1.34.0` package. + The package contains semantic conventions from the `v1.34.0` version of the OpenTelemetry Semantic Conventions. (#6812) +- Add metric's schema URL as `otel_scope_schema_url` label in `go.opentelemetry.io/otel/exporters/prometheus`. (#5947) +- Add metric's scope attributes as `otel_scope_[attribute]` labels in `go.opentelemetry.io/otel/exporters/prometheus`. (#5947) +- Add `EventName` to `EnabledParameters` in `go.opentelemetry.io/otel/log`. (#6825) +- Add `EventName` to `EnabledParameters` in `go.opentelemetry.io/otel/sdk/log`. (#6825) +- Changed handling of `go.opentelemetry.io/otel/exporters/prometheus` metric renaming to add unit suffixes when it doesn't match one of the pre-defined values in the unit suffix map. (#6839) + +### Changed + +- The semantic conventions have been upgraded from `v1.26.0` to `v1.34.0` in `go.opentelemetry.io/otel/bridge/opentracing`. (#6827) +- The semantic conventions have been upgraded from `v1.26.0` to `v1.34.0` in `go.opentelemetry.io/otel/exporters/zipkin`. (#6829) +- The semantic conventions have been upgraded from `v1.26.0` to `v1.34.0` in `go.opentelemetry.io/otel/metric`. (#6832) +- The semantic conventions have been upgraded from `v1.26.0` to `v1.34.0` in `go.opentelemetry.io/otel/sdk/resource`. (#6834) +- The semantic conventions have been upgraded from `v1.26.0` to `v1.34.0` in `go.opentelemetry.io/otel/sdk/trace`. (#6835) +- The semantic conventions have been upgraded from `v1.26.0` to `v1.34.0` in `go.opentelemetry.io/otel/trace`. (#6836) +- `Record.Resource` now returns `*resource.Resource` instead of `resource.Resource` in `go.opentelemetry.io/otel/sdk/log`. (#6864) +- Retry now shows error cause for context timeout in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`, `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`, `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`, `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`, `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`, `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#6898) + +### Fixed + +- Stop stripping trailing slashes from configured endpoint URL in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#6710) +- Stop stripping trailing slashes from configured endpoint URL in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#6710) +- Stop stripping trailing slashes from configured endpoint URL in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#6710) +- Stop stripping trailing slashes from configured endpoint URL in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#6710) +- Validate exponential histogram scale range for Prometheus compatibility in `go.opentelemetry.io/otel/exporters/prometheus`. (#6822) +- Context cancellation during metric pipeline produce does not corrupt data in `go.opentelemetry.io/otel/sdk/metric`. (#6914) + +### Removed + +- `go.opentelemetry.io/otel/exporters/prometheus` no longer exports `otel_scope_info` metric. (#6770) + +## [0.12.2] 2025-05-22 + +### Fixed + +- Retract `v0.12.0` release of `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc` module that contains invalid dependencies. (#6804) +- Retract `v0.12.0` release of `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp` module that contains invalid dependencies. (#6804) +- Retract `v0.12.0` release of `go.opentelemetry.io/otel/exporters/stdout/stdoutlog` module that contains invalid dependencies. (#6804) + +## [0.12.1] 2025-05-21 + +### Fixes + +- Use the proper dependency version of `go.opentelemetry.io/otel/sdk/log/logtest` in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`. (#6800) +- Use the proper dependency version of `go.opentelemetry.io/otel/sdk/log/logtest` in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#6800) +- Use the proper dependency version of `go.opentelemetry.io/otel/sdk/log/logtest` in `go.opentelemetry.io/otel/exporters/stdout/stdoutlog`. (#6800) + +## [1.36.0/0.58.0/0.12.0] 2025-05-20 + +### Added + +- Add exponential histogram support in `go.opentelemetry.io/otel/exporters/prometheus`. (#6421) +- The `go.opentelemetry.io/otel/semconv/v1.31.0` package. + The package contains semantic conventions from the `v1.31.0` version of the OpenTelemetry Semantic Conventions. + See the [migration documentation](./semconv/v1.31.0/MIGRATION.md) for information on how to upgrade from `go.opentelemetry.io/otel/semconv/v1.30.0`. (#6479) +- Add `Recording`, `Scope`, and `Record` types in `go.opentelemetry.io/otel/log/logtest`. (#6507) +- Add `WithHTTPClient` option to configure the `http.Client` used by `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#6751) +- Add `WithHTTPClient` option to configure the `http.Client` used by `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#6752) +- Add `WithHTTPClient` option to configure the `http.Client` used by `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#6688) +- Add `ValuesGetter` in `go.opentelemetry.io/otel/propagation`, a `TextMapCarrier` that supports retrieving multiple values for a single key. (#5973) +- Add `Values` method to `HeaderCarrier` to implement the new `ValuesGetter` interface in `go.opentelemetry.io/otel/propagation`. (#5973) +- Update `Baggage` in `go.opentelemetry.io/otel/propagation` to retrieve multiple values for a key when the carrier implements `ValuesGetter`. (#5973) +- Add `AssertEqual` function in `go.opentelemetry.io/otel/log/logtest`. (#6662) +- The `go.opentelemetry.io/otel/semconv/v1.32.0` package. + The package contains semantic conventions from the `v1.32.0` version of the OpenTelemetry Semantic Conventions. + See the [migration documentation](./semconv/v1.32.0/MIGRATION.md) for information on how to upgrade from `go.opentelemetry.io/otel/semconv/v1.31.0`(#6782) +- Add `Transform` option in `go.opentelemetry.io/otel/log/logtest`. (#6794) +- Add `Desc` option in `go.opentelemetry.io/otel/log/logtest`. (#6796) + +### Removed + +- Drop support for [Go 1.22]. (#6381, #6418) +- Remove `Resource` field from `EnabledParameters` in `go.opentelemetry.io/otel/sdk/log`. (#6494) +- Remove `RecordFactory` type from `go.opentelemetry.io/otel/log/logtest`. (#6492) +- Remove `ScopeRecords`, `EmittedRecord`, and `RecordFactory` types from `go.opentelemetry.io/otel/log/logtest`. (#6507) +- Remove `AssertRecordEqual` function in `go.opentelemetry.io/otel/log/logtest`, use `AssertEqual` instead. (#6662) + +### Changed + +- ⚠️ Update `github.com/prometheus/client_golang` to `v1.21.1`, which changes the `NameValidationScheme` to `UTF8Validation`. + This allows metrics names to keep original delimiters (e.g. `.`), rather than replacing with underscores. + This can be reverted by setting `github.com/prometheus/common/model.NameValidationScheme` to `LegacyValidation` in `github.com/prometheus/common/model`. (#6433) +- Initialize map with `len(keys)` in `NewAllowKeysFilter` and `NewDenyKeysFilter` to avoid unnecessary allocations in `go.opentelemetry.io/otel/attribute`. (#6455) +- `go.opentelemetry.io/otel/log/logtest` is now a separate Go module. (#6465) +- `go.opentelemetry.io/otel/sdk/log/logtest` is now a separate Go module. (#6466) +- `Recorder` in `go.opentelemetry.io/otel/log/logtest` no longer separately stores records emitted by loggers with the same instrumentation scope. (#6507) +- Improve performance of `BatchProcessor` in `go.opentelemetry.io/otel/sdk/log` by not exporting when exporter cannot accept more. (#6569, #6641) + +### Deprecated + +- Deprecate support for `model.LegacyValidation` for `go.opentelemetry.io/otel/exporters/prometheus`. (#6449) + +### Fixes + +- Stop percent encoding header environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#6392) +- Ensure the `noopSpan.tracerProvider` method is not inlined in `go.opentelemetry.io/otel/trace` so the `go.opentelemetry.io/auto` instrumentation can instrument non-recording spans. (#6456) +- Use a `sync.Pool` instead of allocating `metricdata.ResourceMetrics` in `go.opentelemetry.io/otel/exporters/prometheus`. (#6472) + ## [1.35.0/0.57.0/0.11.0] 2025-03-05 This release is the last to support [Go 1.22]. @@ -3237,7 +3498,14 @@ It contains api and sdk for trace and meter. - CircleCI build CI manifest files. - CODEOWNERS file to track owners of this project. -[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.35.0...HEAD +[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.39.0...HEAD +[1.39.0/0.61.0/0.15.0/0.0.14]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.39.0 +[1.38.0/0.60.0/0.14.0/0.0.13]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.38.0 +[0.59.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/exporters/prometheus/v0.59.1 +[1.37.0/0.59.0/0.13.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.37.0 +[0.12.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/log/v0.12.2 +[0.12.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/log/v0.12.1 +[1.36.0/0.58.0/0.12.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.36.0 [1.35.0/0.57.0/0.11.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.35.0 [1.34.0/0.56.0/0.10.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.34.0 [1.33.0/0.55.0/0.9.0/0.0.12]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.33.0 @@ -3329,6 +3597,7 @@ It contains api and sdk for trace and meter. +[Go 1.25]: https://go.dev/doc/go1.25 [Go 1.24]: https://go.dev/doc/go1.24 [Go 1.23]: https://go.dev/doc/go1.23 [Go 1.22]: https://go.dev/doc/go1.22 diff --git a/vendor/go.opentelemetry.io/otel/CODEOWNERS b/vendor/go.opentelemetry.io/otel/CODEOWNERS index 945a07d2..26a03aed 100644 --- a/vendor/go.opentelemetry.io/otel/CODEOWNERS +++ b/vendor/go.opentelemetry.io/otel/CODEOWNERS @@ -12,6 +12,6 @@ # https://help.github.com/en/articles/about-code-owners # -* @MrAlias @XSAM @dashpole @pellared @dmathieu +* @MrAlias @XSAM @dashpole @pellared @dmathieu @flc1125 CODEOWNERS @MrAlias @pellared @dashpole @XSAM @dmathieu diff --git a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md index 7b8af585..ff5e1f76 100644 --- a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md +++ b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md @@ -54,8 +54,8 @@ go get -d go.opentelemetry.io/otel (This may print some warning about "build constraints exclude all Go files", just ignore it.) -This will put the project in `${GOPATH}/src/go.opentelemetry.io/otel`. You -can alternatively use `git` directly with: +This will put the project in `${GOPATH}/src/go.opentelemetry.io/otel`. +Alternatively, you can use `git` directly with: ```sh git clone https://github.com/open-telemetry/opentelemetry-go @@ -65,8 +65,7 @@ git clone https://github.com/open-telemetry/opentelemetry-go that name is a kind of a redirector to GitHub that `go get` can understand, but `git` does not.) -This would put the project in the `opentelemetry-go` directory in -current working directory. +This will add the project as `opentelemetry-go` within the current directory. Enter the newly created directory and add your fork as a new remote: @@ -109,10 +108,9 @@ A PR is considered **ready to merge** when: This is not enforced through automation, but needs to be validated by the maintainer merging. - * The qualified approvals need to be from [Approver]s/[Maintainer]s - affiliated with different companies. Two qualified approvals from - [Approver]s or [Maintainer]s affiliated with the same company counts as a - single qualified approval. + * At least one of the qualified approvals needs to be from an + [Approver]/[Maintainer] affiliated with a different company than the author + of the PR. * PRs introducing changes that have already been discussed and consensus reached only need one qualified approval. The discussion and resolution needs to be linked to the PR. @@ -167,11 +165,11 @@ guidelines](https://opentelemetry.io/docs/specs/otel/library-guidelines). ### Focus on Capabilities, Not Structure Compliance OpenTelemetry is an evolving specification, one where the desires and -use cases are clear, but the method to satisfy those uses cases are +use cases are clear, but the methods to satisfy those use cases are not. As such, Contributions should provide functionality and behavior that -conforms to the specification, but the interface and structure is +conforms to the specification, but the interface and structure are flexible. It is preferable to have contributions follow the idioms of the @@ -193,6 +191,35 @@ should have `go test -bench` output in their description. should have [`benchstat`](https://pkg.go.dev/golang.org/x/perf/cmd/benchstat) output in their description. +## Dependencies + +This project uses [Go Modules] for dependency management. All modules will use +`go.mod` to explicitly list all direct and indirect dependencies, ensuring a +clear dependency graph. The `go.sum` file for each module will be committed to +the repository and used to verify the integrity of downloaded modules, +preventing malicious tampering. + +This project uses automated dependency update tools (i.e. dependabot, +renovatebot) to manage updates to dependencies. This ensures that dependencies +are kept up-to-date with the latest security patches and features and are +reviewed before being merged. If you would like to propose a change to a +dependency it should be done through a pull request that updates the `go.mod` +file and includes a description of the change. + +See the [versioning and compatibility](./VERSIONING.md) policy for more details +about dependency compatibility. + +[Go Modules]: https://pkg.go.dev/cmd/go#hdr-Modules__module_versions__and_more + +### Environment Dependencies + +This project does not partition dependencies based on the environment (i.e. +`development`, `staging`, `production`). + +Only the dependencies explicitly included in the released modules have been +tested and verified to work with the released code. No other guarantee is made +about the compatibility of other dependencies. + ## Documentation Each (non-internal, non-test) package must be documented using @@ -234,6 +261,10 @@ For a non-comprehensive but foundational overview of these best practices the [Effective Go](https://golang.org/doc/effective_go.html) documentation is an excellent starting place. +We also recommend following the +[Go Code Review Comments](https://go.dev/wiki/CodeReviewComments) +that collects common comments made during reviews of Go code. + As a convenience for developers building this project the `make precommit` will format, lint, validate, and in some cases fix the changes you plan to submit. This check will need to pass for your changes to be able to be @@ -587,6 +618,10 @@ See also: ### Testing +We allow using [`testify`](https://github.com/stretchr/testify) even though +it is seen as non-idiomatic according to +the [Go Test Comments](https://go.dev/wiki/TestComments#assert-libraries) page. + The tests should never leak goroutines. Use the term `ConcurrentSafe` in the test name when it aims to verify the @@ -599,8 +634,8 @@ is not in their root name. The use of internal packages should be scoped to a single module. A sub-module should never import from a parent internal package. This creates a coupling -between the two modules where a user can upgrade the parent without the child -and if the internal package API has changed it will fail to upgrade[^3]. +between the two modules where a user can upgrade the parent without the child, +and if the internal package API has changed, it will fail to upgrade[^3]. There are two known exceptions to this rule: @@ -621,7 +656,7 @@ this. ### Ignoring context cancellation -OpenTelemetry API implementations need to ignore the cancellation of the context that are +OpenTelemetry API implementations need to ignore the cancellation of the context that is passed when recording a value (e.g. starting a span, recording a measurement, emitting a log). Recording methods should not return an error describing the cancellation state of the context when they complete, nor should they abort any work. @@ -639,32 +674,478 @@ force flushing telemetry, shutting down a signal provider) the context cancellat should be honored. This means all work done on behalf of the user provided context should be canceled. +### Observability + +OpenTelemetry Go SDK components should be instrumented to enable users observability for the health and performance of the telemetry pipeline itself. +This allows operators to understand how well their observability infrastructure is functioning and to identify potential issues before they impact their applications. + +This section outlines the best practices for building instrumentation in OpenTelemetry Go SDK components. + +#### Environment Variable Activation + +Observability features are currently experimental. +They should be disabled by default and activated through the `OTEL_GO_X_OBSERVABILITY` environment variable. +This follows the established experimental feature pattern used throughout the SDK. + +Components should check for this environment variable using a consistent pattern: + +```go +import "go.opentelemetry.io/otel/*/internal/x" + +if x.Observability.Enabled() { + // Initialize observability metrics +} +``` + +**References**: + +- [stdouttrace exporter](./exporters/stdout/stdouttrace/internal/x/x.go) +- [sdk](./sdk/internal/x/x.go) + +#### Encapsulation + +Instrumentation should be encapsulated within a dedicated `struct` (e.g. `instrumentation`). +It should not be mixed into the instrumented component. + +Prefer this: + +```go +type SDKComponent struct { + inst *instrumentation +} + +type instrumentation struct { + inflight otelconv.SDKComponentInflight + exported otelconv.SDKComponentExported +} +``` + +To this: + +```go +// ❌ Avoid this pattern. +type SDKComponent struct { + /* other SDKComponent fields... */ + + inflight otelconv.SDKComponentInflight + exported otelconv.SDKComponentExported +} +``` + +The instrumentation code should not bloat the code being instrumented. +Likely, this means its own file, or its own package if it is complex or reused. + +#### Initialization + +Instrumentation setup should be explicit, side-effect free, and local to the relevant component. +Avoid relying on global or implicit [side effects][side-effect] for initialization. + +Encapsulate setup in constructor functions, ensuring clear ownership and scope: + +```go +import ( + "errors" + + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" + "go.opentelemetry.io/otel/semconv/v1.37.0/otelconv" +) + +type SDKComponent struct { + inst *instrumentation +} + +func NewSDKComponent(config Config) (*SDKComponent, error) { + inst, err := newInstrumentation() + if err != nil { + return nil, err + } + return &SDKComponent{inst: inst}, nil +} + +type instrumentation struct { + inflight otelconv.SDKComponentInflight + exported otelconv.SDKComponentExported +} + +func newInstrumentation() (*instrumentation, error) { + if !x.Observability.Enabled() { + return nil, nil + } + + meter := otel.GetMeterProvider().Meter( + "", + metric.WithInstrumentationVersion(sdk.Version()), + metric.WithSchemaURL(semconv.SchemaURL), + ) + + inst := &instrumentation{} + + var err, e error + inst.inflight, e = otelconv.NewSDKComponentInflight(meter) + err = errors.Join(err, e) + + inst.exported, e = otelconv.NewSDKComponentExported(meter) + err = errors.Join(err, e) + + return inst, err +} +``` + +```go +// ❌ Avoid this pattern. +func (c *Component) initObservability() { + // Initialize observability metrics + if !x.Observability.Enabled() { + return + } + + // Initialize observability metrics + c.inst = &instrumentation{/* ... */} +} +``` + +[side-effect]: https://en.wikipedia.org/wiki/Side_effect_(computer_science) + +#### Performance + +When observability is disabled there should be little to no overhead. + +```go +func (e *Exporter) ExportSpans(ctx context.Context, spans []trace.ReadOnlySpan) error { + if e.inst != nil { + attrs := expensiveOperation() + e.inst.recordSpanInflight(ctx, int64(len(spans)), attrs...) + } + // Export spans... +} +``` + +```go +// ❌ Avoid this pattern. +func (e *Exporter) ExportSpans(ctx context.Context, spans []trace.ReadOnlySpan) error { + attrs := expensiveOperation() + e.inst.recordSpanInflight(ctx, int64(len(spans)), attrs...) + // Export spans... +} + +func (i *instrumentation) recordSpanInflight(ctx context.Context, count int64, attrs ...attribute.KeyValue) { + if i == nil || i.inflight == nil { + return + } + i.inflight.Add(ctx, count, metric.WithAttributes(attrs...)) +} +``` + +When observability is enabled, the instrumentation code paths should be optimized to reduce allocation and computation overhead. + +##### Attribute and Option Allocation Management + +Pool attribute slices and options with [`sync.Pool`] to minimize allocations in measurement calls with dynamic attributes. + +```go +var ( + attrPool = sync.Pool{ + New: func() any { + // Pre-allocate common capacity + knownCap := 8 // Adjust based on expected usage + s := make([]attribute.KeyValue, 0, knownCap) + // Return a pointer to avoid extra allocation on Put(). + return &s + }, + } + + addOptPool = &sync.Pool{ + New: func() any { + const n = 1 // WithAttributeSet + o := make([]metric.AddOption, 0, n) + // Return a pointer to avoid extra allocation on Put(). + return &o + }, + } +) + +func (i *instrumentation) record(ctx context.Context, value int64, baseAttrs ...attribute.KeyValue) { + attrs := attrPool.Get().(*[]attribute.KeyValue) + defer func() { + *attrs = (*attrs)[:0] // Reset. + attrPool.Put(attrs) + }() + + *attrs = append(*attrs, baseAttrs...) + // Add any dynamic attributes. + *attrs = append(*attrs, semconv.OTelComponentName("exporter-1")) + + addOpt := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *addOpt = (*addOpt)[:0] + addOptPool.Put(addOpt) + }() + + set := attribute.NewSet(*attrs...) + *addOpt = append(*addOpt, metric.WithAttributeSet(set)) + + i.counter.Add(ctx, value, *addOpt...) +} +``` + +Pools are most effective when there are many pooled objects of the same sufficiently large size, and the objects are repeatedly used. +This amortizes the cost of allocation and synchronization. +Ideally, the pools should be scoped to be used as widely as possible within the component to maximize this efficiency while still ensuring correctness. + +[`sync.Pool`]: https://pkg.go.dev/sync#Pool + +##### Cache common attribute sets for repeated measurements + +If a static set of attributes are used for measurements and they are known at compile time, pre-compute and cache these attributes. + +```go +type spanLiveSetKey struct { + sampled bool +} + +var spanLiveSetCache = map[spanLiveSetKey]attribute.Set{ + {true}: attribute.NewSet( + otelconv.SDKSpanLive{}.AttrSpanSamplingResult( + otelconv.SpanSamplingResultRecordAndSample, + ), + ), + {false}: attribute.NewSet( + otelconv.SDKSpanLive{}.AttrSpanSamplingResult( + otelconv.SpanSamplingResultRecordOnly, + ), + ), +} + +func spanLiveSet(sampled bool) attribute.Set { + key := spanLiveSetKey{sampled: sampled} + return spanLiveSetCache[key] +} +``` + +##### Benchmarking + +Always provide benchmarks when introducing or refactoring instrumentation. +Demonstrate the impact (allocs/op, B/op, ns/op) in enabled/disabled scenarios: + +```go +func BenchmarkExportSpans(b *testing.B) { + scenarios := []struct { + name string + obsEnabled bool + }{ + {"ObsDisabled", false}, + {"ObsEnabled", true}, + } + + for _, scenario := range scenarios { + b.Run(scenario.name, func(b *testing.B) { + b.Setenv( + "OTEL_GO_X_OBSERVABILITY", + strconv.FormatBool(scenario.obsEnabled), + ) + + exporter := NewExporter() + spans := generateTestSpans(100) + + b.ResetTimer() + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + _ = exporter.ExportSpans(context.Background(), spans) + } + }) + } +} +``` + +#### Error Handling and Robustness + +Errors should be reported back to the caller if possible, and partial failures should be handled as gracefully as possible. + +```go +func newInstrumentation() (*instrumentation, error) { + if !x.Observability.Enabled() { + return nil, nil + } + + m := otel.GetMeterProvider().Meter(/* initialize meter */) + counter, err := otelconv.NewSDKComponentCounter(m) + // Use the partially initialized counter if available. + i := &instrumentation{counter: counter} + // Return any error to the caller. + return i, err +} +``` + +```go +// ❌ Avoid this pattern. +func newInstrumentation() *instrumentation { + if !x.Observability.Enabled() { + return nil, nil + } + + m := otel.GetMeterProvider().Meter(/* initialize meter */) + counter, err := otelconv.NewSDKComponentCounter(m) + if err != nil { + // ❌ Do not dump the error to the OTel Handler. Return it to the + // caller. + otel.Handle(err) + // ❌ Do not return nil if we can still use the partially initialized + // counter. + return nil + } + return &instrumentation{counter: counter} +} +``` + +If the instrumented component cannot report the error to the user, let it report the error to `otel.Handle`. + +#### Context Propagation + +Ensure observability measurements receive the correct context, especially for trace exemplars and distributed context: + +```go +func (e *Exporter) ExportSpans(ctx context.Context, spans []trace.ReadOnlySpan) error { + // Use the provided context for observability measurements + e.inst.recordSpanExportStarted(ctx, len(spans)) + + err := e.doExport(ctx, spans) + + if err != nil { + e.inst.recordSpanExportFailed(ctx, len(spans), err) + } else { + e.inst.recordSpanExportSucceeded(ctx, len(spans)) + } + + return err +} +``` + +```go +// ❌ Avoid this pattern. +func (e *Exporter) ExportSpans(ctx context.Context, spans []trace.ReadOnlySpan) error { + // ❌ Do not break the context propagation. + e.inst.recordSpanExportStarted(context.Background(), len(spans)) + + err := e.doExport(ctx, spans) + + /* ... */ + + return err +} +``` + +#### Semantic Conventions Compliance + +All observability metrics should follow the [OpenTelemetry Semantic Conventions for SDK metrics](https://github.com/open-telemetry/semantic-conventions/blob/1cf2476ae5e518225a766990a28a6d5602bd5a30/docs/otel/sdk-metrics.md). + +Use the metric semantic conventions convenience package [otelconv](./semconv/v1.37.0/otelconv/metric.go). + +##### Component Identification + +Component names and types should follow [semantic convention](https://github.com/open-telemetry/semantic-conventions/blob/1cf2476ae5e518225a766990a28a6d5602bd5a30/docs/registry/attributes/otel.md#otel-component-attributes). + +If a component is not a well-known type specified in the semantic conventions, use the package path scope type as a stable identifier. + +```go +componentType := "go.opentelemetry.io/otel/sdk/trace.Span" +``` + +```go +// ❌ Do not do this. +componentType := "trace-span" +``` + +The component name should be a stable unique identifier for the specific instance of the component. + +Use a global counter to ensure uniqueness if necessary. + +```go +// Unique 0-based ID counter for component instances. +var componentIDCounter atomic.Int64 + +// nextID returns the next unique ID for a component. +func nextID() int64 { + return componentIDCounter.Add(1) - 1 +} + +// componentName returns a unique name for the component instance. +func componentName() attribute.KeyValue { + id := nextID() + name := fmt.Sprintf("%s/%d", componentType, id) + return semconv.OTelComponentName(name) +} +``` + +The component ID will need to be resettable for deterministic testing. +If tests are in a different package than the component being tested (i.e. a `_test` package name), use a generated `counter` internal package to manage the counter. +See [stdouttrace exporter example](./exporters/stdout/stdouttrace/internal/gen.go) for reference. + +#### Testing + +Use deterministic testing with isolated state: + +```go +func TestObservability(t *testing.T) { + // Restore state after test to ensure this does not affect other tests. + prev := otel.GetMeterProvider() + t.Cleanup(func() { otel.SetMeterProvider(prev) }) + + // Isolate the meter provider for deterministic testing + reader := metric.NewManualReader() + meterProvider := metric.NewMeterProvider(metric.WithReader(reader)) + otel.SetMeterProvider(meterProvider) + + // Use t.Setenv to ensure environment variable is restored after test. + t.Setenv("OTEL_GO_X_OBSERVABILITY", "true") + + // Reset component ID counter to ensure deterministic component names. + componentIDCounter.Store(0) + + /* ... test code ... */ +} +``` + +Test order should not affect results. +Ensure that any global state (e.g. component ID counters) is reset between tests. + ## Approvers and Maintainers -### Triagers +### Maintainers -- [Cheng-Zhen Yang](https://github.com/scorpionknifes), Independent +- [Damien Mathieu](https://github.com/dmathieu), Elastic ([GPG](https://keys.openpgp.org/search?q=5A126B972A81A6CE443E5E1B408B8E44F0873832)) +- [David Ashpole](https://github.com/dashpole), Google ([GPG](https://keys.openpgp.org/search?q=C0D1BDDCAAEAE573673085F176327DA4D864DC70)) +- [Robert Pająk](https://github.com/pellared), Splunk ([GPG](https://keys.openpgp.org/search?q=CDAD3A60476A3DE599AA5092E5F7C35A4DBE90C2)) +- [Sam Xie](https://github.com/XSAM), Splunk ([GPG](https://keys.openpgp.org/search?q=AEA033782371ABB18EE39188B8044925D6FEEBEA)) +- [Tyler Yahn](https://github.com/MrAlias), Splunk ([GPG](https://keys.openpgp.org/search?q=0x46B0F3E1A8B1BA5A)) + +For more information about the maintainer role, see the [community repository](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md#maintainer). ### Approvers -### Maintainers +- [Flc](https://github.com/flc1125), Independent -- [Damien Mathieu](https://github.com/dmathieu), Elastic -- [David Ashpole](https://github.com/dashpole), Google -- [Robert Pająk](https://github.com/pellared), Splunk -- [Sam Xie](https://github.com/XSAM), Cisco/AppDynamics -- [Tyler Yahn](https://github.com/MrAlias), Splunk +For more information about the approver role, see the [community repository](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md#approver). + +### Triagers + +- [Alex Kats](https://github.com/akats7), Capital One + +For more information about the triager role, see the [community repository](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md#triager). ### Emeritus - [Aaron Clawson](https://github.com/MadVikingGod) - [Anthony Mirabella](https://github.com/Aneurysm9) +- [Cheng-Zhen Yang](https://github.com/scorpionknifes) - [Chester Cheung](https://github.com/hanyuancheung) - [Evan Torrie](https://github.com/evantorrie) - [Gustavo Silva Paiva](https://github.com/paivagustavo) - [Josh MacDonald](https://github.com/jmacd) - [Liz Fong-Jones](https://github.com/lizthegrey) +For more information about the emeritus role, see the [community repository](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md#emeritus-maintainerapprovertriager). + ### Become an Approver or a Maintainer See the [community membership document in OpenTelemetry community diff --git a/vendor/go.opentelemetry.io/otel/LICENSE b/vendor/go.opentelemetry.io/otel/LICENSE index 261eeb9e..f1aee0f1 100644 --- a/vendor/go.opentelemetry.io/otel/LICENSE +++ b/vendor/go.opentelemetry.io/otel/LICENSE @@ -199,3 +199,33 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +-------------------------------------------------------------------------------- + +Copyright 2009 The Go Authors. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google LLC nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/vendor/go.opentelemetry.io/otel/Makefile b/vendor/go.opentelemetry.io/otel/Makefile index 226410d7..44870248 100644 --- a/vendor/go.opentelemetry.io/otel/Makefile +++ b/vendor/go.opentelemetry.io/otel/Makefile @@ -34,17 +34,17 @@ $(TOOLS)/%: $(TOOLS_MOD_DIR)/go.mod | $(TOOLS) MULTIMOD = $(TOOLS)/multimod $(TOOLS)/multimod: PACKAGE=go.opentelemetry.io/build-tools/multimod -SEMCONVGEN = $(TOOLS)/semconvgen -$(TOOLS)/semconvgen: PACKAGE=go.opentelemetry.io/build-tools/semconvgen - CROSSLINK = $(TOOLS)/crosslink $(TOOLS)/crosslink: PACKAGE=go.opentelemetry.io/build-tools/crosslink SEMCONVKIT = $(TOOLS)/semconvkit $(TOOLS)/semconvkit: PACKAGE=go.opentelemetry.io/otel/$(TOOLS_MOD_DIR)/semconvkit +VERIFYREADMES = $(TOOLS)/verifyreadmes +$(TOOLS)/verifyreadmes: PACKAGE=go.opentelemetry.io/otel/$(TOOLS_MOD_DIR)/verifyreadmes + GOLANGCI_LINT = $(TOOLS)/golangci-lint -$(TOOLS)/golangci-lint: PACKAGE=github.com/golangci/golangci-lint/cmd/golangci-lint +$(TOOLS)/golangci-lint: PACKAGE=github.com/golangci/golangci-lint/v2/cmd/golangci-lint MISSPELL = $(TOOLS)/misspell $(TOOLS)/misspell: PACKAGE=github.com/client9/misspell/cmd/misspell @@ -68,7 +68,7 @@ GOVULNCHECK = $(TOOLS)/govulncheck $(TOOLS)/govulncheck: PACKAGE=golang.org/x/vuln/cmd/govulncheck .PHONY: tools -tools: $(CROSSLINK) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(SEMCONVGEN) $(MULTIMOD) $(SEMCONVKIT) $(GOTMPL) $(GORELEASE) +tools: $(CROSSLINK) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(VERIFYREADMES) $(MULTIMOD) $(SEMCONVKIT) $(GOTMPL) $(GORELEASE) # Virtualized python tools via docker @@ -146,11 +146,12 @@ build-tests/%: # Tests -TEST_TARGETS := test-default test-bench test-short test-verbose test-race test-concurrent-safe +TEST_TARGETS := test-default test-bench test-short test-verbose test-race test-concurrent-safe test-fuzz .PHONY: $(TEST_TARGETS) test test-default test-race: ARGS=-race test-bench: ARGS=-run=xxxxxMatchNothingxxxxx -test.benchtime=1ms -bench=. test-short: ARGS=-short +test-fuzz: ARGS=-fuzztime=10s -fuzz test-verbose: ARGS=-v -race test-concurrent-safe: ARGS=-run=ConcurrentSafe -count=100 -race test-concurrent-safe: TIMEOUT=120 @@ -213,11 +214,8 @@ go-mod-tidy/%: crosslink && cd $(DIR) \ && $(GO) mod tidy -compat=1.21 -.PHONY: lint-modules -lint-modules: go-mod-tidy - .PHONY: lint -lint: misspell lint-modules golangci-lint govulncheck +lint: misspell go-mod-tidy golangci-lint govulncheck .PHONY: vanity-import-check vanity-import-check: $(PORTO) @@ -284,7 +282,7 @@ semconv-generate: $(SEMCONVKIT) docker run --rm \ -u $(DOCKER_USER) \ --env HOME=/tmp/weaver \ - --mount 'type=bind,source=$(PWD)/semconv,target=/home/weaver/templates/registry/go,readonly' \ + --mount 'type=bind,source=$(PWD)/semconv/templates,target=/home/weaver/templates,readonly' \ --mount 'type=bind,source=$(PWD)/semconv/${TAG},target=/home/weaver/target' \ --mount 'type=bind,source=$(HOME)/.weaver,target=/tmp/weaver/.weaver' \ $(WEAVER_IMAGE) registry generate \ @@ -293,7 +291,7 @@ semconv-generate: $(SEMCONVKIT) --param tag=$(TAG) \ go \ /home/weaver/target - $(SEMCONVKIT) -output "$(SEMCONVPKG)/$(TAG)" -tag "$(TAG)" + $(SEMCONVKIT) -semconv "$(SEMCONVPKG)" -tag "$(TAG)" .PHONY: gorelease gorelease: $(OTEL_GO_MOD_DIRS:%=gorelease/%) @@ -319,10 +317,11 @@ add-tags: verify-mods @[ "${MODSET}" ] || ( echo ">> env var MODSET is not set"; exit 1 ) $(MULTIMOD) tag -m ${MODSET} -c ${COMMIT} +MARKDOWNIMAGE := $(shell awk '$$4=="markdown" {print $$2}' $(DEPENDENCIES_DOCKERFILE)) .PHONY: lint-markdown lint-markdown: - docker run -v "$(CURDIR):$(WORKDIR)" avtodev/markdown-lint:v1 -c $(WORKDIR)/.markdownlint.yaml $(WORKDIR)/**/*.md + docker run --rm -u $(DOCKER_USER) -v "$(CURDIR):$(WORKDIR)" $(MARKDOWNIMAGE) -c $(WORKDIR)/.markdownlint.yaml $(WORKDIR)/**/*.md .PHONY: verify-readmes -verify-readmes: - ./verify_readmes.sh +verify-readmes: $(VERIFYREADMES) + $(VERIFYREADMES) diff --git a/vendor/go.opentelemetry.io/otel/README.md b/vendor/go.opentelemetry.io/otel/README.md index 8421cd7e..c6335954 100644 --- a/vendor/go.opentelemetry.io/otel/README.md +++ b/vendor/go.opentelemetry.io/otel/README.md @@ -6,6 +6,8 @@ [![Go Report Card](https://goreportcard.com/badge/go.opentelemetry.io/otel)](https://goreportcard.com/report/go.opentelemetry.io/otel) [![OpenSSF Scorecard](https://api.scorecard.dev/projects/github.com/open-telemetry/opentelemetry-go/badge)](https://scorecard.dev/viewer/?uri=github.com/open-telemetry/opentelemetry-go) [![OpenSSF Best Practices](https://www.bestpractices.dev/projects/9996/badge)](https://www.bestpractices.dev/projects/9996) +[![Fuzzing Status](https://oss-fuzz-build-logs.storage.googleapis.com/badges/opentelemetry-go.svg)](https://issues.oss-fuzz.com/issues?q=project:opentelemetry-go) +[![FOSSA Status](https://app.fossa.com/api/projects/custom%2B162%2Fgithub.com%2Fopen-telemetry%2Fopentelemetry-go.svg?type=shield&issueType=license)](https://app.fossa.com/projects/custom%2B162%2Fgithub.com%2Fopen-telemetry%2Fopentelemetry-go?ref=badge_shield&issueType=license) [![Slack](https://img.shields.io/badge/slack-@cncf/otel--go-brightgreen.svg?logo=slack)](https://cloud-native.slack.com/archives/C01NPAXACKT) OpenTelemetry-Go is the [Go](https://golang.org/) implementation of [OpenTelemetry](https://opentelemetry.io/). @@ -51,27 +53,20 @@ Currently, this project supports the following environments. | OS | Go Version | Architecture | |----------|------------|--------------| +| Ubuntu | 1.25 | amd64 | | Ubuntu | 1.24 | amd64 | -| Ubuntu | 1.23 | amd64 | -| Ubuntu | 1.22 | amd64 | +| Ubuntu | 1.25 | 386 | | Ubuntu | 1.24 | 386 | -| Ubuntu | 1.23 | 386 | -| Ubuntu | 1.22 | 386 | +| Ubuntu | 1.25 | arm64 | | Ubuntu | 1.24 | arm64 | -| Ubuntu | 1.23 | arm64 | -| Ubuntu | 1.22 | arm64 | -| macOS 13 | 1.24 | amd64 | -| macOS 13 | 1.23 | amd64 | -| macOS 13 | 1.22 | amd64 | +| macOS | 1.25 | amd64 | +| macOS | 1.24 | amd64 | +| macOS | 1.25 | arm64 | | macOS | 1.24 | arm64 | -| macOS | 1.23 | arm64 | -| macOS | 1.22 | arm64 | +| Windows | 1.25 | amd64 | | Windows | 1.24 | amd64 | -| Windows | 1.23 | amd64 | -| Windows | 1.22 | amd64 | +| Windows | 1.25 | 386 | | Windows | 1.24 | 386 | -| Windows | 1.23 | 386 | -| Windows | 1.22 | 386 | While this project should work for other systems, no compatibility guarantees are made for those systems currently. diff --git a/vendor/go.opentelemetry.io/otel/RELEASING.md b/vendor/go.opentelemetry.io/otel/RELEASING.md index 1e13ae54..861756fd 100644 --- a/vendor/go.opentelemetry.io/otel/RELEASING.md +++ b/vendor/go.opentelemetry.io/otel/RELEASING.md @@ -1,5 +1,9 @@ # Release Process +## Create a `Version Release` issue + +Create a `Version Release` issue to track the release process. + ## Semantic Convention Generation New versions of the [OpenTelemetry Semantic Conventions] mean new versions of the `semconv` package need to be generated. @@ -20,7 +24,7 @@ Ensure things look correct before submitting a pull request to include the addit ## Breaking changes validation -You can run `make gorelease` that runs [gorelease](https://pkg.go.dev/golang.org/x/exp/cmd/gorelease) to ensure that there are no unwanted changes done in the public API. +You can run `make gorelease` which runs [gorelease](https://pkg.go.dev/golang.org/x/exp/cmd/gorelease) to ensure that there are no unwanted changes made in the public API. You can check/report problems with `gorelease` [here](https://golang.org/issues/26420). @@ -58,7 +62,7 @@ Update go.mod for submodules to depend on the new release which will happen in t ``` 3. Update the [Changelog](./CHANGELOG.md). - - Make sure all relevant changes for this release are included and are in language that non-contributors to the project can understand. + - Make sure all relevant changes for this release are included and are written in language that non-contributors to the project can understand. To verify this, you can look directly at the commits since the ``. ``` @@ -103,11 +107,50 @@ It is critical you make sure the version you push upstream is correct. ... ``` +## Sign artifacts + +To ensure we comply with CNCF best practices, we need to sign the release artifacts. + +Download the `.tar.gz` and `.zip` archives from the [tags page](https://github.com/open-telemetry/opentelemetry-go/tags) for the new release tag. +Both archives need to be signed with your GPG key. + +You can use [this script] to verify the contents of the archives before signing them. + +To find your GPG key ID, run: + +```terminal +gpg --list-secret-keys --keyid-format=long +``` + +The key ID is the 16-character string after `sec rsa4096/` (or similar). + +Set environment variables and sign both artifacts: + +```terminal +export VERSION="" # e.g., v1.32.0 +export KEY_ID="" + +gpg --local-user $KEY_ID --armor --detach-sign opentelemetry-go-$VERSION.tar.gz +gpg --local-user $KEY_ID --armor --detach-sign opentelemetry-go-$VERSION.zip +``` + +You can verify the signatures with: + +```terminal +gpg --verify opentelemetry-go-$VERSION.tar.gz.asc opentelemetry-go-$VERSION.tar.gz +gpg --verify opentelemetry-go-$VERSION.zip.asc opentelemetry-go-$VERSION.zip +``` + +[this script]: https://github.com/MrAlias/attest-sh + ## Release Finally create a Release for the new `` on GitHub. The release body should include all the release notes from the Changelog for this release. +***IMPORTANT***: GitHub Releases are immutable once created. +You must upload the signed artifacts (`.tar.gz`, `.tar.gz.asc`, `.zip`, and `.zip.asc`) when creating the release, as they cannot be added or modified later. + ## Post-Release ### Contrib Repository @@ -123,10 +166,16 @@ Importantly, bump any package versions referenced to be the latest one you just [Go instrumentation documentation]: https://opentelemetry.io/docs/languages/go/ [content/en/docs/languages/go]: https://github.com/open-telemetry/opentelemetry.io/tree/main/content/en/docs/languages/go -### Demo Repository +### Close the milestone + +Once a release is made, ensure all issues that were fixed and PRs that were merged as part of this release are added to the corresponding milestone. +This helps track what changes were included in each release. + +- To find issues that haven't been included in a milestone, use this [GitHub search query](https://github.com/open-telemetry/opentelemetry-go/issues?q=is%3Aissue%20no%3Amilestone%20is%3Aclosed%20sort%3Aupdated-desc%20reason%3Acompleted%20-label%3AStale%20linked%3Apr) +- To find merged PRs that haven't been included in a milestone, use this [GitHub search query](https://github.com/open-telemetry/opentelemetry-go/pulls?q=is%3Apr+no%3Amilestone+is%3Amerged). + +Once all related issues and PRs have been added to the milestone, close the milestone. -Bump the dependencies in the following Go services: +### Close the `Version Release` issue -- [`accounting`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/accounting) -- [`checkoutservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/checkout) -- [`productcatalogservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/product-catalog) +Once the todo list in the `Version Release` issue is complete, close the issue. diff --git a/vendor/go.opentelemetry.io/otel/SECURITY-INSIGHTS.yml b/vendor/go.opentelemetry.io/otel/SECURITY-INSIGHTS.yml new file mode 100644 index 00000000..8041fc62 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/SECURITY-INSIGHTS.yml @@ -0,0 +1,203 @@ +header: + schema-version: "1.0.0" + expiration-date: "2026-08-04T00:00:00.000Z" + last-updated: "2025-08-04" + last-reviewed: "2025-08-04" + commit-hash: 69e81088ad40f45a0764597326722dea8f3f00a8 + project-url: https://github.com/open-telemetry/opentelemetry-go + project-release: "v1.37.0" + changelog: https://github.com/open-telemetry/opentelemetry-go/blob/69e81088ad40f45a0764597326722dea8f3f00a8/CHANGELOG.md + license: https://github.com/open-telemetry/opentelemetry-go/blob/69e81088ad40f45a0764597326722dea8f3f00a8/LICENSE + +project-lifecycle: + status: active + bug-fixes-only: false + core-maintainers: + - https://github.com/dmathieu + - https://github.com/dashpole + - https://github.com/pellared + - https://github.com/XSAM + - https://github.com/MrAlias + release-process: | + See https://github.com/open-telemetry/opentelemetry-go/blob/69e81088ad40f45a0764597326722dea8f3f00a8/RELEASING.md + +contribution-policy: + accepts-pull-requests: true + accepts-automated-pull-requests: true + automated-tools-list: + - automated-tool: dependabot + action: allowed + comment: Automated dependency updates are accepted. + - automated-tool: renovatebot + action: allowed + comment: Automated dependency updates are accepted. + - automated-tool: opentelemetrybot + action: allowed + comment: Automated OpenTelemetry actions are accepted. + contributing-policy: https://github.com/open-telemetry/opentelemetry-go/blob/69e81088ad40f45a0764597326722dea8f3f00a8/CONTRIBUTING.md + code-of-conduct: https://github.com/open-telemetry/.github/blob/ffa15f76b65ec7bcc41f6a0b277edbb74f832206/CODE_OF_CONDUCT.md + +documentation: + - https://pkg.go.dev/go.opentelemetry.io/otel + - https://opentelemetry.io/docs/instrumentation/go/ + +distribution-points: + - pkg:golang/go.opentelemetry.io/otel + - pkg:golang/go.opentelemetry.io/otel/bridge/opencensus + - pkg:golang/go.opentelemetry.io/otel/bridge/opencensus/test + - pkg:golang/go.opentelemetry.io/otel/bridge/opentracing + - pkg:golang/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc + - pkg:golang/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp + - pkg:golang/go.opentelemetry.io/otel/exporters/otlp/otlptrace + - pkg:golang/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc + - pkg:golang/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp + - pkg:golang/go.opentelemetry.io/otel/exporters/stdout/stdoutmetric + - pkg:golang/go.opentelemetry.io/otel/exporters/stdout/stdouttrace + - pkg:golang/go.opentelemetry.io/otel/exporters/zipkin + - pkg:golang/go.opentelemetry.io/otel/metric + - pkg:golang/go.opentelemetry.io/otel/sdk + - pkg:golang/go.opentelemetry.io/otel/sdk/metric + - pkg:golang/go.opentelemetry.io/otel/trace + - pkg:golang/go.opentelemetry.io/otel/exporters/prometheus + - pkg:golang/go.opentelemetry.io/otel/log + - pkg:golang/go.opentelemetry.io/otel/log/logtest + - pkg:golang/go.opentelemetry.io/otel/sdk/log + - pkg:golang/go.opentelemetry.io/otel/sdk/log/logtest + - pkg:golang/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc + - pkg:golang/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp + - pkg:golang/go.opentelemetry.io/otel/exporters/stdout/stdoutlog + - pkg:golang/go.opentelemetry.io/otel/schema + +security-artifacts: + threat-model: + threat-model-created: false + comment: | + No formal threat model created yet. + self-assessment: + self-assessment-created: false + comment: | + No formal self-assessment yet. + +security-testing: + - tool-type: sca + tool-name: Dependabot + tool-version: latest + tool-url: https://github.com/dependabot + tool-rulesets: + - built-in + integration: + ad-hoc: false + ci: true + before-release: true + comment: | + Automated dependency updates. + - tool-type: sast + tool-name: golangci-lint + tool-version: latest + tool-url: https://github.com/golangci/golangci-lint + tool-rulesets: + - built-in + integration: + ad-hoc: false + ci: true + before-release: true + comment: | + Static analysis in CI. + - tool-type: fuzzing + tool-name: OSS-Fuzz + tool-version: latest + tool-url: https://github.com/google/oss-fuzz + tool-rulesets: + - default + integration: + ad-hoc: false + ci: false + before-release: false + comment: | + OpenTelemetry Go is integrated with OSS-Fuzz for continuous fuzz testing. See https://github.com/google/oss-fuzz/tree/f0f9b221190c6063a773bea606d192ebfc3d00cf/projects/opentelemetry-go for more details. + - tool-type: sast + tool-name: CodeQL + tool-version: latest + tool-url: https://github.com/github/codeql + tool-rulesets: + - default + integration: + ad-hoc: false + ci: true + before-release: true + comment: | + CodeQL static analysis is run in CI for all commits and pull requests to detect security vulnerabilities in the Go source code. See https://github.com/open-telemetry/opentelemetry-go/blob/d5b5b059849720144a03ca5c87561bfbdb940119/.github/workflows/codeql-analysis.yml for workflow details. + - tool-type: sca + tool-name: govulncheck + tool-version: latest + tool-url: https://pkg.go.dev/golang.org/x/vuln/cmd/govulncheck + tool-rulesets: + - default + integration: + ad-hoc: false + ci: true + before-release: true + comment: | + govulncheck is run in CI to detect known vulnerabilities in Go modules and code paths. See https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/.github/workflows/ci.yml for workflow configuration. + +security-assessments: + - auditor-name: 7ASecurity + auditor-url: https://7asecurity.com + auditor-report: https://7asecurity.com/reports/pentest-report-opentelemetry.pdf + report-year: 2023 + comment: | + This independent penetration test by 7ASecurity covered OpenTelemetry repositories including opentelemetry-go. The assessment focused on codebase review, threat modeling, and vulnerability identification. See the report for details of findings and recommendations applicable to opentelemetry-go. No critical vulnerabilities were found for this repository. + +security-contacts: + - type: email + value: cncf-opentelemetry-security@lists.cncf.io + primary: true + - type: website + value: https://github.com/open-telemetry/opentelemetry-go/security/policy + primary: false + +vulnerability-reporting: + accepts-vulnerability-reports: true + email-contact: cncf-opentelemetry-security@lists.cncf.io + security-policy: https://github.com/open-telemetry/opentelemetry-go/security/policy + comment: | + Security issues should be reported via email or GitHub security policy page. + +dependencies: + third-party-packages: true + dependencies-lists: + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/bridge/opencensus/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/bridge/opencensus/test/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/bridge/opentracing/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/otlp/otlplog/otlploggrpc/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/otlp/otlplog/otlploghttp/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/otlp/otlpmetric/otlpmetricgrpc/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/otlp/otlpmetric/otlpmetrichttp/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/otlp/otlptrace/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/otlp/otlptrace/otlptracegrpc/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/otlp/otlptrace/otlptracehttp/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/prometheus/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/stdout/stdoutlog/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/stdout/stdoutmetric/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/stdout/stdouttrace/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/zipkin/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/internal/tools/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/log/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/log/logtest/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/metric/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/schema/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/sdk/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/sdk/log/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/sdk/log/logtest/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/sdk/metric/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/trace/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/trace/internal/telemetry/test/go.mod + dependencies-lifecycle: + policy-url: https://github.com/open-telemetry/opentelemetry-go/blob/69e81088ad40f45a0764597326722dea8f3f00a8/CONTRIBUTING.md + comment: | + Dependency lifecycle managed via go.mod and renovatebot. + env-dependencies-policy: + policy-url: https://github.com/open-telemetry/opentelemetry-go/blob/69e81088ad40f45a0764597326722dea8f3f00a8/CONTRIBUTING.md + comment: | + See contributing policy for environment usage. diff --git a/vendor/go.opentelemetry.io/otel/VERSIONING.md b/vendor/go.opentelemetry.io/otel/VERSIONING.md index b8cb605c..b27c9e84 100644 --- a/vendor/go.opentelemetry.io/otel/VERSIONING.md +++ b/vendor/go.opentelemetry.io/otel/VERSIONING.md @@ -83,7 +83,7 @@ is designed so the following goals can be achieved. in either the module path or the import path. * In addition to public APIs, telemetry produced by stable instrumentation will remain stable and backwards compatible. This is to avoid breaking - alerts and dashboard. + alerts and dashboards. * Modules will be used to encapsulate instrumentation, detectors, exporters, propagators, and any other independent sets of related components. * Experimental modules still under active development will be versioned at diff --git a/vendor/go.opentelemetry.io/otel/attribute/encoder.go b/vendor/go.opentelemetry.io/otel/attribute/encoder.go index 318e42fc..6cc1a165 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/encoder.go +++ b/vendor/go.opentelemetry.io/otel/attribute/encoder.go @@ -16,7 +16,7 @@ type ( // set into a wire representation. Encoder interface { // Encode returns the serialized encoding of the attribute set using - // its Iterator. This result may be cached by a attribute.Set. + // its Iterator. This result may be cached by an attribute.Set. Encode(iterator Iterator) string // ID returns a value that is unique for each class of attribute @@ -78,7 +78,7 @@ func DefaultEncoder() Encoder { defaultEncoderOnce.Do(func() { defaultEncoderInstance = &defaultAttrEncoder{ pool: sync.Pool{ - New: func() interface{} { + New: func() any { return &bytes.Buffer{} }, }, @@ -96,11 +96,11 @@ func (d *defaultAttrEncoder) Encode(iter Iterator) string { for iter.Next() { i, keyValue := iter.IndexedAttribute() if i > 0 { - _, _ = buf.WriteRune(',') + _ = buf.WriteByte(',') } copyAndEscape(buf, string(keyValue.Key)) - _, _ = buf.WriteRune('=') + _ = buf.WriteByte('=') if keyValue.Value.Type() == STRING { copyAndEscape(buf, keyValue.Value.AsString()) @@ -122,14 +122,14 @@ func copyAndEscape(buf *bytes.Buffer, val string) { for _, ch := range val { switch ch { case '=', ',', escapeChar: - _, _ = buf.WriteRune(escapeChar) + _ = buf.WriteByte(escapeChar) } _, _ = buf.WriteRune(ch) } } -// Valid returns true if this encoder ID was allocated by -// `NewEncoderID`. Invalid encoder IDs will not be cached. +// Valid reports whether this encoder ID was allocated by +// [NewEncoderID]. Invalid encoder IDs will not be cached. func (id EncoderID) Valid() bool { return id.value != 0 } diff --git a/vendor/go.opentelemetry.io/otel/attribute/filter.go b/vendor/go.opentelemetry.io/otel/attribute/filter.go index be9cd922..624ebbe3 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/filter.go +++ b/vendor/go.opentelemetry.io/otel/attribute/filter.go @@ -15,11 +15,11 @@ type Filter func(KeyValue) bool // // If keys is empty a deny-all filter is returned. func NewAllowKeysFilter(keys ...Key) Filter { - if len(keys) <= 0 { - return func(kv KeyValue) bool { return false } + if len(keys) == 0 { + return func(KeyValue) bool { return false } } - allowed := make(map[Key]struct{}) + allowed := make(map[Key]struct{}, len(keys)) for _, k := range keys { allowed[k] = struct{}{} } @@ -34,11 +34,11 @@ func NewAllowKeysFilter(keys ...Key) Filter { // // If keys is empty an allow-all filter is returned. func NewDenyKeysFilter(keys ...Key) Filter { - if len(keys) <= 0 { - return func(kv KeyValue) bool { return true } + if len(keys) == 0 { + return func(KeyValue) bool { return true } } - forbid := make(map[Key]struct{}) + forbid := make(map[Key]struct{}, len(keys)) for _, k := range keys { forbid[k] = struct{}{} } diff --git a/vendor/go.opentelemetry.io/otel/attribute/hash.go b/vendor/go.opentelemetry.io/otel/attribute/hash.go new file mode 100644 index 00000000..6aa69aea --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/attribute/hash.go @@ -0,0 +1,92 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package attribute // import "go.opentelemetry.io/otel/attribute" + +import ( + "fmt" + "reflect" + + "go.opentelemetry.io/otel/attribute/internal/xxhash" +) + +// Type identifiers. These identifiers are hashed before the value of the +// corresponding type. This is done to distinguish values that are hashed with +// the same value representation (e.g. `int64(1)` and `true`, []int64{0} and +// int64(0)). +// +// These are all 8 byte length strings converted to a uint64 representation. A +// uint64 is used instead of the string directly as an optimization, it avoids +// the for loop in [xxhash] which adds minor overhead. +const ( + boolID uint64 = 7953749933313450591 // "_boolean" (little endian) + int64ID uint64 = 7592915492740740150 // "64_bit_i" (little endian) + float64ID uint64 = 7376742710626956342 // "64_bit_f" (little endian) + stringID uint64 = 6874584755375207263 // "_string_" (little endian) + boolSliceID uint64 = 6875993255270243167 // "_[]bool_" (little endian) + int64SliceID uint64 = 3762322556277578591 // "_[]int64" (little endian) + float64SliceID uint64 = 7308324551835016539 // "[]double" (little endian) + stringSliceID uint64 = 7453010373645655387 // "[]string" (little endian) +) + +// hashKVs returns a new xxHash64 hash of kvs. +func hashKVs(kvs []KeyValue) uint64 { + h := xxhash.New() + for _, kv := range kvs { + h = hashKV(h, kv) + } + return h.Sum64() +} + +// hashKV returns the xxHash64 hash of kv with h as the base. +func hashKV(h xxhash.Hash, kv KeyValue) xxhash.Hash { + h = h.String(string(kv.Key)) + + switch kv.Value.Type() { + case BOOL: + h = h.Uint64(boolID) + h = h.Uint64(kv.Value.numeric) + case INT64: + h = h.Uint64(int64ID) + h = h.Uint64(kv.Value.numeric) + case FLOAT64: + h = h.Uint64(float64ID) + // Assumes numeric stored with math.Float64bits. + h = h.Uint64(kv.Value.numeric) + case STRING: + h = h.Uint64(stringID) + h = h.String(kv.Value.stringly) + case BOOLSLICE: + h = h.Uint64(boolSliceID) + rv := reflect.ValueOf(kv.Value.slice) + for i := 0; i < rv.Len(); i++ { + h = h.Bool(rv.Index(i).Bool()) + } + case INT64SLICE: + h = h.Uint64(int64SliceID) + rv := reflect.ValueOf(kv.Value.slice) + for i := 0; i < rv.Len(); i++ { + h = h.Int64(rv.Index(i).Int()) + } + case FLOAT64SLICE: + h = h.Uint64(float64SliceID) + rv := reflect.ValueOf(kv.Value.slice) + for i := 0; i < rv.Len(); i++ { + h = h.Float64(rv.Index(i).Float()) + } + case STRINGSLICE: + h = h.Uint64(stringSliceID) + rv := reflect.ValueOf(kv.Value.slice) + for i := 0; i < rv.Len(); i++ { + h = h.String(rv.Index(i).String()) + } + case INVALID: + default: + // Logging is an alternative, but using the internal logger here + // causes an import cycle so it is not done. + v := kv.Value.AsInterface() + msg := fmt.Sprintf("unknown value type: %[1]v (%[1]T)", v) + panic(msg) + } + return h +} diff --git a/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go b/vendor/go.opentelemetry.io/otel/attribute/internal/attribute.go similarity index 84% rename from vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go rename to vendor/go.opentelemetry.io/otel/attribute/internal/attribute.go index 691d96c7..08755043 100644 --- a/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go +++ b/vendor/go.opentelemetry.io/otel/attribute/internal/attribute.go @@ -5,14 +5,14 @@ Package attribute provide several helper functions for some commonly used logic of processing attributes. */ -package attribute // import "go.opentelemetry.io/otel/internal/attribute" +package attribute // import "go.opentelemetry.io/otel/attribute/internal" import ( "reflect" ) // BoolSliceValue converts a bool slice into an array with same elements as slice. -func BoolSliceValue(v []bool) interface{} { +func BoolSliceValue(v []bool) any { var zero bool cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem() reflect.Copy(cp, reflect.ValueOf(v)) @@ -20,7 +20,7 @@ func BoolSliceValue(v []bool) interface{} { } // Int64SliceValue converts an int64 slice into an array with same elements as slice. -func Int64SliceValue(v []int64) interface{} { +func Int64SliceValue(v []int64) any { var zero int64 cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem() reflect.Copy(cp, reflect.ValueOf(v)) @@ -28,7 +28,7 @@ func Int64SliceValue(v []int64) interface{} { } // Float64SliceValue converts a float64 slice into an array with same elements as slice. -func Float64SliceValue(v []float64) interface{} { +func Float64SliceValue(v []float64) any { var zero float64 cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem() reflect.Copy(cp, reflect.ValueOf(v)) @@ -36,7 +36,7 @@ func Float64SliceValue(v []float64) interface{} { } // StringSliceValue converts a string slice into an array with same elements as slice. -func StringSliceValue(v []string) interface{} { +func StringSliceValue(v []string) any { var zero string cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem() reflect.Copy(cp, reflect.ValueOf(v)) @@ -44,7 +44,7 @@ func StringSliceValue(v []string) interface{} { } // AsBoolSlice converts a bool array into a slice into with same elements as array. -func AsBoolSlice(v interface{}) []bool { +func AsBoolSlice(v any) []bool { rv := reflect.ValueOf(v) if rv.Type().Kind() != reflect.Array { return nil @@ -57,7 +57,7 @@ func AsBoolSlice(v interface{}) []bool { } // AsInt64Slice converts an int64 array into a slice into with same elements as array. -func AsInt64Slice(v interface{}) []int64 { +func AsInt64Slice(v any) []int64 { rv := reflect.ValueOf(v) if rv.Type().Kind() != reflect.Array { return nil @@ -70,7 +70,7 @@ func AsInt64Slice(v interface{}) []int64 { } // AsFloat64Slice converts a float64 array into a slice into with same elements as array. -func AsFloat64Slice(v interface{}) []float64 { +func AsFloat64Slice(v any) []float64 { rv := reflect.ValueOf(v) if rv.Type().Kind() != reflect.Array { return nil @@ -83,7 +83,7 @@ func AsFloat64Slice(v interface{}) []float64 { } // AsStringSlice converts a string array into a slice into with same elements as array. -func AsStringSlice(v interface{}) []string { +func AsStringSlice(v any) []string { rv := reflect.ValueOf(v) if rv.Type().Kind() != reflect.Array { return nil diff --git a/vendor/go.opentelemetry.io/otel/attribute/internal/xxhash/xxhash.go b/vendor/go.opentelemetry.io/otel/attribute/internal/xxhash/xxhash.go new file mode 100644 index 00000000..113a9783 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/attribute/internal/xxhash/xxhash.go @@ -0,0 +1,64 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package xxhash provides a wrapper around the xxhash library for attribute hashing. +package xxhash // import "go.opentelemetry.io/otel/attribute/internal/xxhash" + +import ( + "encoding/binary" + "math" + + "github.com/cespare/xxhash/v2" +) + +// Hash wraps xxhash.Digest to provide an API friendly for hashing attribute values. +type Hash struct { + d *xxhash.Digest +} + +// New returns a new initialized xxHash64 hasher. +func New() Hash { + return Hash{d: xxhash.New()} +} + +func (h Hash) Uint64(val uint64) Hash { + var buf [8]byte + binary.LittleEndian.PutUint64(buf[:], val) + // errors from Write are always nil for xxhash + // if it returns an err then panic + _, err := h.d.Write(buf[:]) + if err != nil { + panic("xxhash write of uint64 failed: " + err.Error()) + } + return h +} + +func (h Hash) Bool(val bool) Hash { // nolint:revive // This is a hashing function. + if val { + return h.Uint64(1) + } + return h.Uint64(0) +} + +func (h Hash) Float64(val float64) Hash { + return h.Uint64(math.Float64bits(val)) +} + +func (h Hash) Int64(val int64) Hash { + return h.Uint64(uint64(val)) // nolint:gosec // Overflow doesn't matter since we are hashing. +} + +func (h Hash) String(val string) Hash { + // errors from WriteString are always nil for xxhash + // if it returns an err then panic + _, err := h.d.WriteString(val) + if err != nil { + panic("xxhash write of string failed: " + err.Error()) + } + return h +} + +// Sum64 returns the current hash value. +func (h Hash) Sum64() uint64 { + return h.d.Sum64() +} diff --git a/vendor/go.opentelemetry.io/otel/attribute/iterator.go b/vendor/go.opentelemetry.io/otel/attribute/iterator.go index f2ba89ce..8df6249f 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/iterator.go +++ b/vendor/go.opentelemetry.io/otel/attribute/iterator.go @@ -25,8 +25,8 @@ type oneIterator struct { attr KeyValue } -// Next moves the iterator to the next position. Returns false if there are no -// more attributes. +// Next moves the iterator to the next position. +// Next reports whether there are more attributes. func (i *Iterator) Next() bool { i.idx++ return i.idx < i.Len() @@ -106,7 +106,8 @@ func (oi *oneIterator) advance() { } } -// Next returns true if there is another attribute available. +// Next moves the iterator to the next position. +// Next reports whether there is another attribute available. func (m *MergeIterator) Next() bool { if m.one.done && m.two.done { return false diff --git a/vendor/go.opentelemetry.io/otel/attribute/key.go b/vendor/go.opentelemetry.io/otel/attribute/key.go index d9a22c65..80a9e564 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/key.go +++ b/vendor/go.opentelemetry.io/otel/attribute/key.go @@ -117,7 +117,7 @@ func (k Key) StringSlice(v []string) KeyValue { } } -// Defined returns true for non-empty keys. +// Defined reports whether the key is not empty. func (k Key) Defined() bool { return len(k) != 0 } diff --git a/vendor/go.opentelemetry.io/otel/attribute/kv.go b/vendor/go.opentelemetry.io/otel/attribute/kv.go index 3028f9a4..8c6928ca 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/kv.go +++ b/vendor/go.opentelemetry.io/otel/attribute/kv.go @@ -13,7 +13,7 @@ type KeyValue struct { Value Value } -// Valid returns if kv is a valid OpenTelemetry attribute. +// Valid reports whether kv is a valid OpenTelemetry attribute. func (kv KeyValue) Valid() bool { return kv.Key.Defined() && kv.Value.Type() != INVALID } diff --git a/vendor/go.opentelemetry.io/otel/attribute/rawhelpers.go b/vendor/go.opentelemetry.io/otel/attribute/rawhelpers.go new file mode 100644 index 00000000..5791c6e7 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/attribute/rawhelpers.go @@ -0,0 +1,37 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package attribute // import "go.opentelemetry.io/otel/attribute" + +import ( + "math" +) + +func boolToRaw(b bool) uint64 { // nolint:revive // b is not a control flag. + if b { + return 1 + } + return 0 +} + +func rawToBool(r uint64) bool { + return r != 0 +} + +func int64ToRaw(i int64) uint64 { + // Assumes original was a valid int64 (overflow not checked). + return uint64(i) // nolint: gosec +} + +func rawToInt64(r uint64) int64 { + // Assumes original was a valid int64 (overflow not checked). + return int64(r) // nolint: gosec +} + +func float64ToRaw(f float64) uint64 { + return math.Float64bits(f) +} + +func rawToFloat64(r uint64) float64 { + return math.Float64frombits(r) +} diff --git a/vendor/go.opentelemetry.io/otel/attribute/set.go b/vendor/go.opentelemetry.io/otel/attribute/set.go index 6cbefcea..911d557e 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/set.go +++ b/vendor/go.opentelemetry.io/otel/attribute/set.go @@ -9,6 +9,8 @@ import ( "reflect" "slices" "sort" + + "go.opentelemetry.io/otel/attribute/internal/xxhash" ) type ( @@ -23,19 +25,19 @@ type ( // the Equals method to ensure stable equivalence checking. // // Users should also use the Distinct returned from Equivalent as a map key - // instead of a Set directly. In addition to that type providing guarantees - // on stable equivalence, it may also provide performance improvements. + // instead of a Set directly. Set has relatively poor performance when used + // as a map key compared to Distinct. Set struct { - equivalent Distinct + hash uint64 + data any } - // Distinct is a unique identifier of a Set. + // Distinct is an identifier of a Set which is very likely to be unique. // - // Distinct is designed to be ensures equivalence stability: comparisons - // will return the save value across versions. For this reason, Distinct - // should always be used as a map key instead of a Set. + // Distinct should be used as a map key instead of a Set for to provide better + // performance for map operations. Distinct struct { - iface interface{} + hash uint64 } // Sortable implements sort.Interface, used for sorting KeyValue. @@ -46,15 +48,34 @@ type ( Sortable []KeyValue ) +// Compile time check these types remain comparable. +var ( + _ = isComparable(Set{}) + _ = isComparable(Distinct{}) +) + +func isComparable[T comparable](t T) T { return t } + var ( // keyValueType is used in computeDistinctReflect. keyValueType = reflect.TypeOf(KeyValue{}) - // emptySet is returned for empty attribute sets. - emptySet = &Set{ - equivalent: Distinct{ - iface: [0]KeyValue{}, - }, + // emptyHash is the hash of an empty set. + emptyHash = xxhash.New().Sum64() + + // userDefinedEmptySet is an empty set. It was mistakenly exposed to users + // as something they can assign to, so it must remain addressable and + // mutable. + // + // This is kept for backwards compatibility, but should not be used in new code. + userDefinedEmptySet = &Set{ + hash: emptyHash, + data: [0]KeyValue{}, + } + + emptySet = Set{ + hash: emptyHash, + data: [0]KeyValue{}, } ) @@ -62,33 +83,35 @@ var ( // // This is a convenience provided for optimized calling utility. func EmptySet() *Set { - return emptySet + // Continue to return the pointer to the user-defined empty set for + // backwards-compatibility. + // + // New code should not use this, instead use emptySet. + return userDefinedEmptySet } -// reflectValue abbreviates reflect.ValueOf(d). -func (d Distinct) reflectValue() reflect.Value { - return reflect.ValueOf(d.iface) -} +// Valid reports whether this value refers to a valid Set. +func (d Distinct) Valid() bool { return d.hash != 0 } -// Valid returns true if this value refers to a valid Set. -func (d Distinct) Valid() bool { - return d.iface != nil +// reflectValue abbreviates reflect.ValueOf(d). +func (l Set) reflectValue() reflect.Value { + return reflect.ValueOf(l.data) } // Len returns the number of attributes in this set. func (l *Set) Len() int { - if l == nil || !l.equivalent.Valid() { + if l == nil || l.hash == 0 { return 0 } - return l.equivalent.reflectValue().Len() + return l.reflectValue().Len() } // Get returns the KeyValue at ordered position idx in this set. func (l *Set) Get(idx int) (KeyValue, bool) { - if l == nil || !l.equivalent.Valid() { + if l == nil || l.hash == 0 { return KeyValue{}, false } - value := l.equivalent.reflectValue() + value := l.reflectValue() if idx >= 0 && idx < value.Len() { // Note: The Go compiler successfully avoids an allocation for @@ -101,10 +124,10 @@ func (l *Set) Get(idx int) (KeyValue, bool) { // Value returns the value of a specified key in this set. func (l *Set) Value(k Key) (Value, bool) { - if l == nil || !l.equivalent.Valid() { + if l == nil || l.hash == 0 { return Value{}, false } - rValue := l.equivalent.reflectValue() + rValue := l.reflectValue() vlen := rValue.Len() idx := sort.Search(vlen, func(idx int) bool { @@ -120,7 +143,7 @@ func (l *Set) Value(k Key) (Value, bool) { return Value{}, false } -// HasValue tests whether a key is defined in this set. +// HasValue reports whether a key is defined in this set. func (l *Set) HasValue(k Key) bool { if l == nil { return false @@ -144,20 +167,29 @@ func (l *Set) ToSlice() []KeyValue { return iter.ToSlice() } -// Equivalent returns a value that may be used as a map key. The Distinct type -// guarantees that the result will equal the equivalent. Distinct value of any +// Equivalent returns a value that may be used as a map key. Equal Distinct +// values are very likely to be equivalent attribute Sets. Distinct value of any // attribute set with the same elements as this, where sets are made unique by // choosing the last value in the input for any given key. func (l *Set) Equivalent() Distinct { - if l == nil || !l.equivalent.Valid() { - return emptySet.equivalent + if l == nil || l.hash == 0 { + return Distinct{hash: emptySet.hash} } - return l.equivalent + return Distinct{hash: l.hash} } -// Equals returns true if the argument set is equivalent to this set. +// Equals reports whether the argument set is equivalent to this set. func (l *Set) Equals(o *Set) bool { - return l.Equivalent() == o.Equivalent() + if l.Equivalent() != o.Equivalent() { + return false + } + if l == nil || l.hash == 0 { + l = &emptySet + } + if o == nil || o.hash == 0 { + o = &emptySet + } + return l.data == o.data } // Encoded returns the encoded form of this set, according to encoder. @@ -169,12 +201,6 @@ func (l *Set) Encoded(encoder Encoder) string { return encoder.Encode(l.Iter()) } -func empty() Set { - return Set{ - equivalent: emptySet.equivalent, - } -} - // NewSet returns a new Set. See the documentation for // NewSetWithSortableFiltered for more details. // @@ -204,7 +230,7 @@ func NewSetWithSortable(kvs []KeyValue, _ *Sortable) Set { func NewSetWithFiltered(kvs []KeyValue, filter Filter) (Set, []KeyValue) { // Check for empty set. if len(kvs) == 0 { - return empty(), nil + return emptySet, nil } // Stable sort so the following de-duplication can implement @@ -233,10 +259,10 @@ func NewSetWithFiltered(kvs []KeyValue, filter Filter) (Set, []KeyValue) { if filter != nil { if div := filteredToFront(kvs, filter); div != 0 { - return Set{equivalent: computeDistinct(kvs[div:])}, kvs[:div] + return newSet(kvs[div:]), kvs[:div] } } - return Set{equivalent: computeDistinct(kvs)}, nil + return newSet(kvs), nil } // NewSetWithSortableFiltered returns a new Set. @@ -316,7 +342,7 @@ func (l *Set) Filter(re Filter) (Set, []KeyValue) { if first == 0 { // It is safe to assume len(slice) >= 1 given we found at least one // attribute above that needs to be filtered out. - return Set{equivalent: computeDistinct(slice[1:])}, slice[:1] + return newSet(slice[1:]), slice[:1] } // Move the filtered slice[first] to the front (preserving order). @@ -326,25 +352,24 @@ func (l *Set) Filter(re Filter) (Set, []KeyValue) { // Do not re-evaluate re(slice[first+1:]). div := filteredToFront(slice[1:first+1], re) + 1 - return Set{equivalent: computeDistinct(slice[div:])}, slice[:div] + return newSet(slice[div:]), slice[:div] } -// computeDistinct returns a Distinct using either the fixed- or -// reflect-oriented code path, depending on the size of the input. The input -// slice is assumed to already be sorted and de-duplicated. -func computeDistinct(kvs []KeyValue) Distinct { - iface := computeDistinctFixed(kvs) - if iface == nil { - iface = computeDistinctReflect(kvs) +// newSet returns a new set based on the sorted and uniqued kvs. +func newSet(kvs []KeyValue) Set { + s := Set{ + hash: hashKVs(kvs), + data: computeDataFixed(kvs), } - return Distinct{ - iface: iface, + if s.data == nil { + s.data = computeDataReflect(kvs) } + return s } -// computeDistinctFixed computes a Distinct for small slices. It returns nil -// if the input is too large for this code path. -func computeDistinctFixed(kvs []KeyValue) interface{} { +// computeDataFixed computes a Set data for small slices. It returns nil if the +// input is too large for this code path. +func computeDataFixed(kvs []KeyValue) any { switch len(kvs) { case 1: return [1]KeyValue(kvs) @@ -371,9 +396,9 @@ func computeDistinctFixed(kvs []KeyValue) interface{} { } } -// computeDistinctReflect computes a Distinct using reflection, works for any -// size input. -func computeDistinctReflect(kvs []KeyValue) interface{} { +// computeDataReflect computes a Set data using reflection, works for any size +// input. +func computeDataReflect(kvs []KeyValue) any { at := reflect.New(reflect.ArrayOf(len(kvs), keyValueType)).Elem() for i, keyValue := range kvs { *(at.Index(i).Addr().Interface().(*KeyValue)) = keyValue @@ -383,11 +408,11 @@ func computeDistinctReflect(kvs []KeyValue) interface{} { // MarshalJSON returns the JSON encoding of the Set. func (l *Set) MarshalJSON() ([]byte, error) { - return json.Marshal(l.equivalent.iface) + return json.Marshal(l.data) } // MarshalLog is the marshaling function used by the logging system to represent this Set. -func (l Set) MarshalLog() interface{} { +func (l Set) MarshalLog() any { kvs := make(map[string]string) for _, kv := range l.ToSlice() { kvs[string(kv.Key)] = kv.Value.Emit() diff --git a/vendor/go.opentelemetry.io/otel/attribute/type_string.go b/vendor/go.opentelemetry.io/otel/attribute/type_string.go index e584b247..24f1fa37 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/type_string.go +++ b/vendor/go.opentelemetry.io/otel/attribute/type_string.go @@ -24,8 +24,9 @@ const _Type_name = "INVALIDBOOLINT64FLOAT64STRINGBOOLSLICEINT64SLICEFLOAT64SLICE var _Type_index = [...]uint8{0, 7, 11, 16, 23, 29, 38, 48, 60, 71} func (i Type) String() string { - if i < 0 || i >= Type(len(_Type_index)-1) { + idx := int(i) - 0 + if i < 0 || idx >= len(_Type_index)-1 { return "Type(" + strconv.FormatInt(int64(i), 10) + ")" } - return _Type_name[_Type_index[i]:_Type_index[i+1]] + return _Type_name[_Type_index[idx]:_Type_index[idx+1]] } diff --git a/vendor/go.opentelemetry.io/otel/attribute/value.go b/vendor/go.opentelemetry.io/otel/attribute/value.go index 9ea0ecbb..653c33a8 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/value.go +++ b/vendor/go.opentelemetry.io/otel/attribute/value.go @@ -9,8 +9,7 @@ import ( "reflect" "strconv" - "go.opentelemetry.io/otel/internal" - "go.opentelemetry.io/otel/internal/attribute" + attribute "go.opentelemetry.io/otel/attribute/internal" ) //go:generate stringer -type=Type @@ -23,7 +22,7 @@ type Value struct { vtype Type numeric uint64 stringly string - slice interface{} + slice any } const ( @@ -51,7 +50,7 @@ const ( func BoolValue(v bool) Value { return Value{ vtype: BOOL, - numeric: internal.BoolToRaw(v), + numeric: boolToRaw(v), } } @@ -82,7 +81,7 @@ func IntSliceValue(v []int) Value { func Int64Value(v int64) Value { return Value{ vtype: INT64, - numeric: internal.Int64ToRaw(v), + numeric: int64ToRaw(v), } } @@ -95,7 +94,7 @@ func Int64SliceValue(v []int64) Value { func Float64Value(v float64) Value { return Value{ vtype: FLOAT64, - numeric: internal.Float64ToRaw(v), + numeric: float64ToRaw(v), } } @@ -125,7 +124,7 @@ func (v Value) Type() Type { // AsBool returns the bool value. Make sure that the Value's type is // BOOL. func (v Value) AsBool() bool { - return internal.RawToBool(v.numeric) + return rawToBool(v.numeric) } // AsBoolSlice returns the []bool value. Make sure that the Value's type is @@ -144,7 +143,7 @@ func (v Value) asBoolSlice() []bool { // AsInt64 returns the int64 value. Make sure that the Value's type is // INT64. func (v Value) AsInt64() int64 { - return internal.RawToInt64(v.numeric) + return rawToInt64(v.numeric) } // AsInt64Slice returns the []int64 value. Make sure that the Value's type is @@ -163,7 +162,7 @@ func (v Value) asInt64Slice() []int64 { // AsFloat64 returns the float64 value. Make sure that the Value's // type is FLOAT64. func (v Value) AsFloat64() float64 { - return internal.RawToFloat64(v.numeric) + return rawToFloat64(v.numeric) } // AsFloat64Slice returns the []float64 value. Make sure that the Value's type is @@ -200,8 +199,8 @@ func (v Value) asStringSlice() []string { type unknownValueType struct{} -// AsInterface returns Value's data as interface{}. -func (v Value) AsInterface() interface{} { +// AsInterface returns Value's data as any. +func (v Value) AsInterface() any { switch v.Type() { case BOOL: return v.AsBool() @@ -263,7 +262,7 @@ func (v Value) Emit() string { func (v Value) MarshalJSON() ([]byte, error) { var jsonVal struct { Type string - Value interface{} + Value any } jsonVal.Type = v.Type().String() jsonVal.Value = v.AsInterface() diff --git a/vendor/go.opentelemetry.io/otel/baggage/baggage.go b/vendor/go.opentelemetry.io/otel/baggage/baggage.go index 0e1fe242..78e98c4c 100644 --- a/vendor/go.opentelemetry.io/otel/baggage/baggage.go +++ b/vendor/go.opentelemetry.io/otel/baggage/baggage.go @@ -648,7 +648,7 @@ func parsePropertyInternal(s string) (p Property, ok bool) { // If we couldn't find any valid key character, // it means the key is either empty or invalid. if keyStart == keyEnd { - return + return p, ok } // Skip spaces after the key: " key< >= value ". @@ -658,13 +658,13 @@ func parsePropertyInternal(s string) (p Property, ok bool) { // A key can have no value, like: " key ". ok = true p.key = s[keyStart:keyEnd] - return + return p, ok } // If we have not reached the end and we can't find the '=' delimiter, // it means the property is invalid. if s[index] != keyValueDelimiter[0] { - return + return p, ok } // Attempting to parse the value. @@ -690,14 +690,14 @@ func parsePropertyInternal(s string) (p Property, ok bool) { // we have not reached the end, it means the property is // invalid, something like: " key = value value1". if index != len(s) { - return + return p, ok } // Decode a percent-encoded value. rawVal := s[valueStart:valueEnd] unescapeVal, err := url.PathUnescape(rawVal) if err != nil { - return + return p, ok } value := replaceInvalidUTF8Sequences(len(rawVal), unescapeVal) @@ -706,7 +706,7 @@ func parsePropertyInternal(s string) (p Property, ok bool) { p.hasValue = true p.value = value - return + return p, ok } func skipSpace(s string, offset int) int { @@ -812,7 +812,7 @@ var safeKeyCharset = [utf8.RuneSelf]bool{ // validateBaggageName checks if the string is a valid OpenTelemetry Baggage name. // Baggage name is a valid, non-empty UTF-8 string. func validateBaggageName(s string) bool { - if len(s) == 0 { + if s == "" { return false } @@ -828,7 +828,7 @@ func validateBaggageValue(s string) bool { // validateKey checks if the string is a valid W3C Baggage key. func validateKey(s string) bool { - if len(s) == 0 { + if s == "" { return false } diff --git a/vendor/go.opentelemetry.io/otel/codes/codes.go b/vendor/go.opentelemetry.io/otel/codes/codes.go index 49a35b12..d48847ed 100644 --- a/vendor/go.opentelemetry.io/otel/codes/codes.go +++ b/vendor/go.opentelemetry.io/otel/codes/codes.go @@ -67,7 +67,7 @@ func (c *Code) UnmarshalJSON(b []byte) error { return errors.New("nil receiver passed to UnmarshalJSON") } - var x interface{} + var x any if err := json.Unmarshal(b, &x); err != nil { return err } @@ -102,5 +102,5 @@ func (c *Code) MarshalJSON() ([]byte, error) { if !ok { return nil, fmt.Errorf("invalid code: %d", *c) } - return []byte(fmt.Sprintf("%q", str)), nil + return fmt.Appendf(nil, "%q", str), nil } diff --git a/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile b/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile index e4c4a753..cadb87cc 100644 --- a/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile +++ b/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile @@ -1,3 +1,4 @@ # This is a renovate-friendly source of Docker images. -FROM python:3.13.2-slim-bullseye@sha256:31b581c8218e1f3c58672481b3b7dba8e898852866b408c6a984c22832523935 AS python -FROM otel/weaver:v0.13.2@sha256:ae7346b992e477f629ea327e0979e8a416a97f7956ab1f7e95ac1f44edf1a893 AS weaver +FROM python:3.13.6-slim-bullseye@sha256:e98b521460ee75bca92175c16247bdf7275637a8faaeb2bcfa19d879ae5c4b9a AS python +FROM otel/weaver:v0.19.0@sha256:3d20814cef548f1d31f27f054fb4cd6a05125641a9f7cc29fc7eb234e8052cd9 AS weaver +FROM avtodev/markdown-lint:v1@sha256:6aeedc2f49138ce7a1cd0adffc1b1c0321b841dc2102408967d9301c031949ee AS markdown diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/LICENSE b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/LICENSE index 261eeb9e..f1aee0f1 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/LICENSE +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/LICENSE @@ -199,3 +199,33 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +-------------------------------------------------------------------------------- + +Copyright 2009 The Go Authors. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google LLC nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/client.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/client.go index 05abd92e..6a80ec12 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/client.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/client.go @@ -5,9 +5,12 @@ package otlploggrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlplog/o import ( "context" - "fmt" + "errors" + "sync/atomic" "time" + collogpb "go.opentelemetry.io/proto/otlp/collector/logs/v1" + logpb "go.opentelemetry.io/proto/otlp/logs/v1" "google.golang.org/genproto/googleapis/rpc/errdetails" "google.golang.org/grpc" "google.golang.org/grpc/backoff" @@ -18,10 +21,9 @@ import ( "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" - "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal" + "go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/observ" "go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/retry" - collogpb "go.opentelemetry.io/proto/otlp/collector/logs/v1" - logpb "go.opentelemetry.io/proto/otlp/logs/v1" ) // The methods of this type are not expected to be called concurrently. @@ -37,6 +39,8 @@ type client struct { ourConn bool conn *grpc.ClientConn lsc collogpb.LogsServiceClient + + instrumentation *observ.Instrumentation } // Used for testing. @@ -71,7 +75,18 @@ func newClient(cfg config) (*client, error) { c.lsc = collogpb.NewLogsServiceClient(c.conn) - return c, nil + var err error + id := nextExporterID() + c.instrumentation, err = observ.NewInstrumentation(id, c.conn.CanonicalTarget()) + return c, err +} + +var exporterN atomic.Int64 + +// nextExporterID returns the next unique ID for an exporter. +func nextExporterID() int64 { + const inc = 1 + return exporterN.Add(inc) - inc } func newGRPCDialOptions(cfg config) []grpc.DialOption { @@ -85,11 +100,12 @@ func newGRPCDialOptions(cfg config) []grpc.DialOption { dialOpts = append(dialOpts, grpc.WithDefaultServiceConfig(cfg.serviceConfig.Value)) } // Prioritize GRPCCredentials over Insecure (passing both is an error). - if cfg.gRPCCredentials.Value != nil { + switch { + case cfg.gRPCCredentials.Value != nil: dialOpts = append(dialOpts, grpc.WithTransportCredentials(cfg.gRPCCredentials.Value)) - } else if cfg.insecure.Value { + case cfg.insecure.Value: dialOpts = append(dialOpts, grpc.WithTransportCredentials(insecure.NewCredentials())) - } else { + default: // Default to using the host's root CA. dialOpts = append(dialOpts, grpc.WithTransportCredentials( credentials.NewTLS(nil), @@ -119,7 +135,7 @@ func newGRPCDialOptions(cfg config) []grpc.DialOption { // The otlplog.Exporter synchronizes access to client methods, and // ensures this is not called after the Exporter is shutdown. Only thing // to do here is send data. -func (c *client) UploadLogs(ctx context.Context, rl []*logpb.ResourceLogs) error { +func (c *client) UploadLogs(ctx context.Context, rl []*logpb.ResourceLogs) (uploadErr error) { select { case <-ctx.Done(): // Do not upload if the context is already expired. @@ -130,7 +146,15 @@ func (c *client) UploadLogs(ctx context.Context, rl []*logpb.ResourceLogs) error ctx, cancel := c.exportContext(ctx) defer cancel() - return c.requestFunc(ctx, func(ctx context.Context) error { + count := int64(len(rl)) + if c.instrumentation != nil { + eo := c.instrumentation.ExportLogs(ctx, count) + defer func() { + eo.End(uploadErr) + }() + } + + return errors.Join(uploadErr, c.requestFunc(ctx, func(ctx context.Context) error { resp, err := c.lsc.Export(ctx, &collogpb.ExportLogsServiceRequest{ ResourceLogs: rl, }) @@ -138,8 +162,8 @@ func (c *client) UploadLogs(ctx context.Context, rl []*logpb.ResourceLogs) error msg := resp.PartialSuccess.GetErrorMessage() n := resp.PartialSuccess.GetRejectedLogRecords() if n != 0 || msg != "" { - err := fmt.Errorf("OTLP partial success: %s (%d log records rejected)", msg, n) - otel.Handle(err) + err := internal.LogPartialSuccessError(n, msg) + uploadErr = errors.Join(uploadErr, err) } } // nil is converted to OK. @@ -148,7 +172,7 @@ func (c *client) UploadLogs(ctx context.Context, rl []*logpb.ResourceLogs) error return nil } return err - }) + })) } // Shutdown shuts down the client, freeing all resources. @@ -192,7 +216,7 @@ func (c *client) exportContext(parent context.Context) (context.Context, context ) if c.exportTimeout > 0 { - ctx, cancel = context.WithTimeout(parent, c.exportTimeout) + ctx, cancel = context.WithTimeoutCause(parent, c.exportTimeout, errors.New("exporter export timeout")) } else { ctx, cancel = context.WithCancel(parent) } @@ -215,9 +239,9 @@ func newNoopClient() *noopClient { return &noopClient{} } -func (c *noopClient) UploadLogs(context.Context, []*logpb.ResourceLogs) error { return nil } +func (*noopClient) UploadLogs(context.Context, []*logpb.ResourceLogs) error { return nil } -func (c *noopClient) Shutdown(context.Context) error { return nil } +func (*noopClient) Shutdown(context.Context) error { return nil } // retryable returns if err identifies a request that can be retried and a // duration to wait for if an explicit throttle time is included in err. @@ -228,6 +252,8 @@ func retryable(err error) (bool, time.Duration) { func retryableGRPCStatus(s *status.Status) (bool, time.Duration) { switch s.Code() { + // Follows the retryable error codes defined in + // https://opentelemetry.io/docs/specs/otlp/#failures case codes.Canceled, codes.DeadlineExceeded, codes.Aborted, diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/config.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/config.go index cd33a168..3fda9fcb 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/config.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/config.go @@ -13,6 +13,7 @@ import ( "strconv" "strings" "time" + "unicode" "google.golang.org/grpc" "google.golang.org/grpc/credentials" @@ -359,8 +360,9 @@ func WithTimeout(duration time.Duration) Option { // explicitly returns a backoff time in the response, that time will take // precedence over these settings. // -// These settings do not define any network retry strategy. That is entirely -// handled by the gRPC ClientConn. +// These settings define the retry strategy implemented by the exporter. +// These settings do not define any network retry strategy. +// That is handled by the gRPC ClientConn. // // If unset, the default retry policy will be used. It will retry the export // 5 seconds after receiving a retryable error and increase exponentially @@ -442,13 +444,15 @@ func convHeaders(s string) (map[string]string, error) { continue } - escKey, e := url.PathUnescape(rawKey) - if e != nil { + key := strings.TrimSpace(rawKey) + + // Validate the key. + if !isValidHeaderKey(key) { err = errors.Join(err, fmt.Errorf("invalid header key: %s", rawKey)) continue } - key := strings.TrimSpace(escKey) + // Only decode the value. escVal, e := url.PathUnescape(rawVal) if e != nil { err = errors.Join(err, fmt.Errorf("invalid header value: %s", rawVal)) @@ -559,7 +563,7 @@ func loadCertificates(certPath, keyPath string) ([]tls.Certificate, error) { func insecureFromScheme(prev setting[bool], scheme string) setting[bool] { if scheme == "https" { return newSetting(false) - } else if len(scheme) > 0 { + } else if scheme != "" { return newSetting(true) } @@ -651,3 +655,22 @@ func fallback[T any](val T) resolver[T] { return s } } + +func isValidHeaderKey(key string) bool { + if key == "" { + return false + } + for _, c := range key { + if !isTokenChar(c) { + return false + } + } + return true +} + +func isTokenChar(c rune) bool { + return c <= unicode.MaxASCII && (unicode.IsLetter(c) || + unicode.IsDigit(c) || + c == '!' || c == '#' || c == '$' || c == '%' || c == '&' || c == '\'' || c == '*' || + c == '+' || c == '-' || c == '.' || c == '^' || c == '_' || c == '`' || c == '|' || c == '~') +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/exporter.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/exporter.go index 66895c3a..898eecf7 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/exporter.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/exporter.go @@ -8,9 +8,10 @@ import ( "sync" "sync/atomic" + logpb "go.opentelemetry.io/proto/otlp/logs/v1" + "go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/transform" "go.opentelemetry.io/otel/sdk/log" - logpb "go.opentelemetry.io/proto/otlp/logs/v1" ) type logClient interface { @@ -88,6 +89,6 @@ func (e *Exporter) Shutdown(ctx context.Context) error { } // ForceFlush does nothing. The Exporter holds no state. -func (e *Exporter) ForceFlush(ctx context.Context) error { +func (*Exporter) ForceFlush(context.Context) error { return nil } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/gen.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/gen.go new file mode 100644 index 00000000..797bfc95 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/gen.go @@ -0,0 +1,20 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package internal provides internal functionality for the otlploggrpc +// package. +package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal" + +//go:generate gotmpl --body=../../../../../internal/shared/otlp/observ/target.go.tmpl "--data={ \"pkg\": \"observ\", \"pkg_path\": \"go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/observ\" }" --out=observ/target.go +//go:generate gotmpl --body=../../../../../internal/shared/otlp/observ/target_test.go.tmpl "--data={ \"pkg\": \"observ\" }" --out=observ/target_test.go + +//go:generate gotmpl --body=../../../../../internal/shared/x/x.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc\" }" --out=x/x.go +//go:generate gotmpl --body=../../../../../internal/shared/x/x_test.go.tmpl "--data={}" --out=x/x_test.go + +//go:generate gotmpl --body=../../../../../internal/shared/otlp/retry/retry.go.tmpl "--data={}" --out=retry/retry.go +//go:generate gotmpl --body=../../../../../internal/shared/otlp/retry/retry_test.go.tmpl "--data={}" --out=retry/retry_test.go + +//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlplog/transform/attr_test.go.tmpl "--data={}" --out=transform/attr_test.go +//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlplog/transform/log.go.tmpl "--data={}" --out=transform/log.go +//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlplog/transform/log_attr_test.go.tmpl "--data={}" --out=transform/log_attr_test.go +//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlplog/transform/log_test.go.tmpl "--data={}" --out=transform/log_test.go diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/observ/instrumentation.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/observ/instrumentation.go new file mode 100644 index 00000000..c1ff2e13 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/observ/instrumentation.go @@ -0,0 +1,309 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package observ provides observability metrics for OTLP log exporters. +// This is an experimental feature controlled by the x.Observability feature flag. +package observ // import "go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/observ" + +import ( + "context" + "errors" + "fmt" + "sync" + "time" + + "google.golang.org/grpc/status" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal" + "go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/x" + "go.opentelemetry.io/otel/internal/global" + "go.opentelemetry.io/otel/metric" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" + "go.opentelemetry.io/otel/semconv/v1.37.0/otelconv" +) + +const ( + // ScopeName is the unique name of the meter used for instrumentation. + ScopeName = "go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/observ" + + // Version is the current version of this instrumentation. + // + // This matches the version of the exporter. + Version = internal.Version +) + +var ( + attrsPool = &sync.Pool{ + New: func() any { + const n = 1 + // component.name + 1 + // component.type + 1 + // server.addr + 1 + // server.port + 1 + // error.type + 1 // rpc.grpc.status_code + s := make([]attribute.KeyValue, 0, n) + // Return a pointer to a slice instead of a slice itself + // to avoid allocations on every call. + return &s + }, + } + addOpPool = &sync.Pool{ + New: func() any { + const n = 1 // WithAttributeSet + o := make([]metric.AddOption, 0, n) + return &o + }, + } + recordOptPool = &sync.Pool{ + New: func() any { + const n = 1 // WithAttributeSet + o := make([]metric.RecordOption, 0, n) + return &o + }, + } +) + +func get[T any](p *sync.Pool) *[]T { return p.Get().(*[]T) } +func put[T any](p *sync.Pool, s *[]T) { + *s = (*s)[:0] + p.Put(s) +} + +// GetComponentName returns the constant name for the exporter with the +// provided id. +func GetComponentName(id int64) string { + return fmt.Sprintf("%s/%d", otelconv.ComponentTypeOtlpGRPCLogExporter, id) +} + +// getPresetAttrs builds the preset attributes for instrumentation. +func getPresetAttrs(id int64, target string) []attribute.KeyValue { + serverAttrs := ServerAddrAttrs(target) + attrs := make([]attribute.KeyValue, 0, 2+len(serverAttrs)) + + attrs = append( + attrs, + semconv.OTelComponentName(GetComponentName(id)), + semconv.OTelComponentTypeOtlpGRPCLogExporter, + ) + attrs = append(attrs, serverAttrs...) + + return attrs +} + +// Instrumentation is experimental instrumentation for the exporter. +type Instrumentation struct { + logInflightMetric metric.Int64UpDownCounter + logExportedMetric metric.Int64Counter + logExportedDurationMetric metric.Float64Histogram + + presetAttrs []attribute.KeyValue + addOpt metric.AddOption + recOpt metric.RecordOption +} + +// NewInstrumentation returns instrumentation for otlplog grpc exporter. +func NewInstrumentation(id int64, target string) (*Instrumentation, error) { + if !x.Observability.Enabled() { + return nil, nil + } + + i := &Instrumentation{} + + mp := otel.GetMeterProvider() + m := mp.Meter( + ScopeName, + metric.WithInstrumentationVersion(Version), + metric.WithSchemaURL(semconv.SchemaURL), + ) + + var err error + + logInflightMetric, e := otelconv.NewSDKExporterLogInflight(m) + if e != nil { + e = fmt.Errorf("failed to create log inflight metric: %w", e) + err = errors.Join(err, e) + } + i.logInflightMetric = logInflightMetric.Inst() + + logExportedMetric, e := otelconv.NewSDKExporterLogExported(m) + if e != nil { + e = fmt.Errorf("failed to create log exported metric: %w", e) + err = errors.Join(err, e) + } + i.logExportedMetric = logExportedMetric.Inst() + + logOpDurationMetric, e := otelconv.NewSDKExporterOperationDuration(m) + if e != nil { + e = fmt.Errorf("failed to create log operation duration metric: %w", e) + err = errors.Join(err, e) + } + i.logExportedDurationMetric = logOpDurationMetric.Inst() + if err != nil { + return nil, err + } + + i.presetAttrs = getPresetAttrs(id, target) + + i.addOpt = metric.WithAttributeSet(attribute.NewSet(i.presetAttrs...)) + i.recOpt = metric.WithAttributeSet(attribute.NewSet(append( + // Default to OK status code. + []attribute.KeyValue{semconv.RPCGRPCStatusCodeOk}, + i.presetAttrs..., + )...)) + return i, nil +} + +// ExportLogs instruments the ExportLogs method of the exporter. It returns +// an [ExportOp] that must have its [ExportOp.End] method called when the +// ExportLogs method returns. +func (i *Instrumentation) ExportLogs(ctx context.Context, count int64) ExportOp { + start := time.Now() + addOpt := get[metric.AddOption](addOpPool) + defer put(addOpPool, addOpt) + + *addOpt = append(*addOpt, i.addOpt) + + i.logInflightMetric.Add(ctx, count, *addOpt...) + + return ExportOp{ + nLogs: count, + ctx: ctx, + start: start, + inst: i, + } +} + +// ExportOp tracks the operation being observed by [Instrumentation.ExportLogs]. +type ExportOp struct { + nLogs int64 + ctx context.Context + start time.Time + + inst *Instrumentation +} + +// End completes the observation of the operation being observed by a call to +// [Instrumentation.ExportLogs]. +// Any error that is encountered is provided as err. +// +// If err is not nil, all logs will be recorded as failures unless error is of +// type [internal.PartialSuccess]. In the case of a PartialSuccess, the number +// of successfully exported logs will be determined by inspecting the +// RejectedItems field of the PartialSuccess. +func (e ExportOp) End(err error) { + addOpt := get[metric.AddOption](addOpPool) + defer put(addOpPool, addOpt) + *addOpt = append(*addOpt, e.inst.addOpt) + + e.inst.logInflightMetric.Add(e.ctx, -e.nLogs, *addOpt...) + success := successful(e.nLogs, err) + e.inst.logExportedMetric.Add(e.ctx, success, *addOpt...) + + if err != nil { + // Add the error.type attribute to the attribute set. + attrs := get[attribute.KeyValue](attrsPool) + defer put(attrsPool, attrs) + *attrs = append(*attrs, e.inst.presetAttrs...) + *attrs = append(*attrs, semconv.ErrorType(err)) + + o := metric.WithAttributeSet(attribute.NewSet(*attrs...)) + + // Reset addOpt with new attribute set + *addOpt = append((*addOpt)[:0], o) + + e.inst.logExportedMetric.Add(e.ctx, e.nLogs-success, *addOpt...) + } + + recordOpt := get[metric.RecordOption](recordOptPool) + defer put(recordOptPool, recordOpt) + *recordOpt = append(*recordOpt, e.inst.recordOption(err)) + e.inst.logExportedDurationMetric.Record(e.ctx, time.Since(e.start).Seconds(), *recordOpt...) +} + +func (i *Instrumentation) recordOption(err error) metric.RecordOption { + if err == nil { + return i.recOpt + } + attrs := get[attribute.KeyValue](attrsPool) + defer put(attrsPool, attrs) + + *attrs = append(*attrs, i.presetAttrs...) + code := int64(status.Code(err)) + *attrs = append( + *attrs, + semconv.RPCGRPCStatusCodeKey.Int64(code), + semconv.ErrorType(err), + ) + + return metric.WithAttributeSet(attribute.NewSet(*attrs...)) +} + +// successful returns the number of successfully exported logs out of the n +// that were exported based on the provided error. +// +// If err is nil, n is returned. All logs were successfully exported. +// +// If err is not nil and not an [internal.PartialSuccess] error, 0 is returned. +// It is assumed all logs failed to be exported. +// +// If err is an [internal.PartialSuccess] error, the number of successfully +// exported logs is computed by subtracting the RejectedItems field from n. If +// RejectedItems is negative, n is returned. If RejectedItems is greater than +// n, 0 is returned. +func successful(n int64, err error) int64 { + if err == nil { + return n // All logs successfully exported. + } + // Split rejection calculation so successful is inlineable. + return n - rejectedCount(n, err) +} + +var errPool = sync.Pool{ + New: func() any { + return new(internal.PartialSuccess) + }, +} + +// rejectedCount returns how many out of the n logs exporter were rejected based on +// the provided non-nil err. +func rejectedCount(n int64, err error) int64 { + ps := errPool.Get().(*internal.PartialSuccess) + defer errPool.Put(ps) + + // check for partial success + if errors.As(err, ps) { + return min(max(ps.RejectedItems, 0), n) + } + // all logs exporter + return n +} + +// ServerAddrAttrs is a function that extracts server address and port attributes +// from a target string. +func ServerAddrAttrs(target string) []attribute.KeyValue { + addr, port, err := ParseCanonicalTarget(target) + if err != nil || (addr == "" && port < 0) { + if err != nil { + global.Debug("failed to parse target", "target", target, "error", err) + } + return nil + } + + // Unix domain sockets: return only the path as server.address + if port == -1 { + return []attribute.KeyValue{semconv.ServerAddress(addr)} + } + + // For network addresses, only include port if it's valid (> 0) + if port > 0 { + return []attribute.KeyValue{ + semconv.ServerAddress(addr), + semconv.ServerPort(port), + } + } + + // Port is 0 or invalid, only return address + return []attribute.KeyValue{semconv.ServerAddress(addr)} +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/observ/target.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/observ/target.go new file mode 100644 index 00000000..186f00e5 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/observ/target.go @@ -0,0 +1,143 @@ +// Code generated by gotmpl. DO NOT MODIFY. +// source: internal/shared/otlp/observ/target.go.tmpl + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package observ // import "go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/observ" + +import ( + "errors" + "fmt" + "net" + "net/netip" + "strconv" + "strings" +) + +const ( + schemeUnix = "unix" + schemeUnixAbstract = "unix-abstract" +) + +// ParseCanonicalTarget parses a target string and returns the extracted host +// (domain address or IP), the target port, or an error. +// +// If no port is specified, -1 is returned. +// +// If no host is specified, an empty string is returned. +// +// The target string is expected to always have the form +// "://[authority]/". For example: +// - "dns:///example.com:42" +// - "dns://8.8.8.8/example.com:42" +// - "unix:///path/to/socket" +// - "unix-abstract:///socket-name" +// - "passthrough:///192.34.2.1:42" +// +// The target is expected to come from the CanonicalTarget method of a gRPC +// Client. +func ParseCanonicalTarget(target string) (string, int, error) { + const sep = "://" + + // Find scheme. Do not allocate the string by using url.Parse. + idx := strings.Index(target, sep) + if idx == -1 { + return "", -1, fmt.Errorf("invalid target %q: missing scheme", target) + } + scheme, endpoint := target[:idx], target[idx+len(sep):] + + // Check for unix schemes. + if scheme == schemeUnix || scheme == schemeUnixAbstract { + return parseUnix(endpoint) + } + + // Strip leading slash and any authority. + if i := strings.Index(endpoint, "/"); i != -1 { + endpoint = endpoint[i+1:] + } + + // DNS, passthrough, and custom resolvers. + return parseEndpoint(endpoint) +} + +// parseUnix parses unix socket targets. +func parseUnix(endpoint string) (string, int, error) { + // Format: unix[-abstract]://path + // + // We should have "/path" (empty authority) if valid. + if len(endpoint) >= 1 && endpoint[0] == '/' { + // Return the full path including leading slash. + return endpoint, -1, nil + } + + // If there's no leading slash, it means there might be an authority + // Check for authority case (should error): "authority/path" + if slashIdx := strings.Index(endpoint, "/"); slashIdx > 0 { + return "", -1, fmt.Errorf("invalid (non-empty) authority: %s", endpoint[:slashIdx]) + } + + return "", -1, errors.New("invalid unix target format") +} + +// parseEndpoint parses an endpoint from a gRPC target. +// +// It supports the following formats: +// - "host" +// - "host%zone" +// - "host:port" +// - "host%zone:port" +// - "ipv4" +// - "ipv4%zone" +// - "ipv4:port" +// - "ipv4%zone:port" +// - "ipv6" +// - "ipv6%zone" +// - "[ipv6]" +// - "[ipv6%zone]" +// - "[ipv6]:port" +// - "[ipv6%zone]:port" +// +// It returns the host or host%zone (domain address or IP), the port (or -1 if +// not specified), or an error if the input is not a valid. +func parseEndpoint(endpoint string) (string, int, error) { + // First check if the endpoint is just an IP address. + if ip := parseIP(endpoint); ip != "" { + return ip, -1, nil + } + + // If there's no colon, there is no port (IPv6 with no port checked above). + if !strings.Contains(endpoint, ":") { + return endpoint, -1, nil + } + + host, portStr, err := net.SplitHostPort(endpoint) + if err != nil { + return "", -1, fmt.Errorf("invalid host:port %q: %w", endpoint, err) + } + + const base, bitSize = 10, 16 + port16, err := strconv.ParseUint(portStr, base, bitSize) + if err != nil { + return "", -1, fmt.Errorf("invalid port %q: %w", portStr, err) + } + port := int(port16) // port is guaranteed to be in the range [0, 65535]. + + return host, port, nil +} + +// parseIP attempts to parse the entire endpoint as an IP address. +// It returns the normalized string form of the IP if successful, +// or an empty string if parsing fails. +func parseIP(ip string) string { + // Strip leading and trailing brackets for IPv6 addresses. + if len(ip) >= 2 && ip[0] == '[' && ip[len(ip)-1] == ']' { + ip = ip[1 : len(ip)-1] + } + addr, err := netip.ParseAddr(ip) + if err != nil { + return "" + } + // Return the normalized string form of the IP. + return addr.String() +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/partialsuccess.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/partialsuccess.go new file mode 100644 index 00000000..076f05fc --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/partialsuccess.go @@ -0,0 +1,43 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal" + +import "fmt" + +// PartialSuccess represents the underlying error for all handling +// OTLP partial success messages. Use `errors.Is(err, +// PartialSuccess{})` to test whether an error passed to the OTel +// error handler belongs to this category. +type PartialSuccess struct { + ErrorMessage string + RejectedItems int64 + RejectedKind string +} + +var _ error = PartialSuccess{} + +// Error implements the error interface. +func (ps PartialSuccess) Error() string { + msg := ps.ErrorMessage + if msg == "" { + msg = "empty message" + } + return fmt.Sprintf("OTLP partial success: %s (%d %s rejected)", msg, ps.RejectedItems, ps.RejectedKind) +} + +// Is supports the errors.Is() interface. +func (PartialSuccess) Is(err error) bool { + _, ok := err.(PartialSuccess) + return ok +} + +// LogPartialSuccessError returns an error describing a partial success +// response for the log signal. +func LogPartialSuccessError(itemsRejected int64, errorMessage string) error { + return PartialSuccess{ + ErrorMessage: errorMessage, + RejectedItems: itemsRejected, + RejectedKind: "logs", + } +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/retry/retry.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/retry/retry.go index f2da1238..1a5684d6 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/retry/retry.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/retry/retry.go @@ -1,4 +1,4 @@ -// Code created by gotmpl. DO NOT MODIFY. +// Code generated by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/retry/retry.go.tmpl // Copyright The OpenTelemetry Authors @@ -14,7 +14,7 @@ import ( "fmt" "time" - "github.com/cenkalti/backoff/v4" + "github.com/cenkalti/backoff/v5" ) // DefaultConfig are the recommended defaults to use. @@ -77,12 +77,12 @@ func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc { RandomizationFactor: backoff.DefaultRandomizationFactor, Multiplier: backoff.DefaultMultiplier, MaxInterval: c.MaxInterval, - MaxElapsedTime: c.MaxElapsedTime, - Stop: backoff.Stop, - Clock: backoff.SystemClock, } b.Reset() + maxElapsedTime := c.MaxElapsedTime + startTime := time.Now() + for { err := fn(ctx) if err == nil { @@ -94,21 +94,22 @@ func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc { return err } - bOff := b.NextBackOff() - if bOff == backoff.Stop { + // Check if context is canceled before attempting to wait and retry. + if ctx.Err() != nil { + return fmt.Errorf("%w: %w", ctx.Err(), err) + } + + if maxElapsedTime != 0 && time.Since(startTime) > maxElapsedTime { return fmt.Errorf("max retry time elapsed: %w", err) } // Wait for the greater of the backoff or throttle delay. - var delay time.Duration - if bOff > throttle { - delay = bOff - } else { - elapsed := b.GetElapsedTime() - if b.MaxElapsedTime != 0 && elapsed+throttle > b.MaxElapsedTime { - return fmt.Errorf("max retry time would elapse: %w", err) - } - delay = throttle + bOff := b.NextBackOff() + delay := max(throttle, bOff) + + elapsed := time.Since(startTime) + if maxElapsedTime != 0 && elapsed+throttle > maxElapsedTime { + return fmt.Errorf("max retry time would elapse: %w", err) } if ctxErr := waitFunc(ctx, delay); ctxErr != nil { @@ -136,7 +137,7 @@ func wait(ctx context.Context, delay time.Duration) error { select { case <-timer.C: default: - return ctx.Err() + return context.Cause(ctx) } case <-timer.C: } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/transform/log.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/transform/log.go index dfeecf59..7bb3967f 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/transform/log.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/transform/log.go @@ -1,4 +1,4 @@ -// Code created by gotmpl. DO NOT MODIFY. +// Code generated by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/otlplog/transform/log.go.tmpl // Copyright The OpenTelemetry Authors @@ -257,7 +257,7 @@ func stringSliceValues(vals []string) []*cpb.AnyValue { return converted } -// Attrs transforms a slice of [api.KeyValue] into OTLP key-values. +// LogAttrs transforms a slice of [api.KeyValue] into OTLP key-values. func LogAttrs(attrs []api.KeyValue) []*cpb.KeyValue { if len(attrs) == 0 { return nil diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/version.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/version.go new file mode 100644 index 00000000..d2e47664 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/version.go @@ -0,0 +1,8 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal" + +// Version is the current release version of the OpenTelemetry otlploggrpc +// exporter in use. +const Version = "0.14.0" diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/x/README.md b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/x/README.md new file mode 100644 index 00000000..7d73c7e7 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/x/README.md @@ -0,0 +1,36 @@ +# Experimental Features + +The `otlploggrpc` exporter contains features that have not yet stabilized in the OpenTelemetry specification. +These features are added to the `otlploggrpc` exporter prior to stabilization in the specification so that users can start experimenting with them and provide feedback. + +These features may change in backwards incompatible ways as feedback is applied. +See the [Compatibility and Stability](#compatibility-and-stability) section for more information. + +## Features + +- [Observability](#observability) + +### Observability + +The `otlploggrpc` exporter can be configured to provide observability about itself using OpenTelemetry metrics. + +To opt-in, set the environment variable `OTEL_GO_X_OBSERVABILITY` to `true`. + +When enabled, the exporter will create the following metrics using the global `MeterProvider`: + +- `otel.sdk.exporter.log.inflight` +- `otel.sdk.exporter.log.exported` +- `otel.sdk.exporter.operation.duration` + +Please see the [Semantic conventions for OpenTelemetry SDK metrics] documentation for more details on these metrics. + +[Semantic conventions for OpenTelemetry SDK metrics]: https://github.com/open-telemetry/semantic-conventions/blob/v1.36.0/docs/otel/sdk-metrics.md + +## Compatibility and Stability + +Experimental features do not fall within the scope of the OpenTelemetry Go versioning and stability [policy](../../../../../../VERSIONING.md). +These features may be removed or modified in successive version releases, including patch versions. + +When an experimental feature is promoted to a stable feature, a migration path will be included in the changelog entry of the release. +There is no guarantee that any environment variable feature flags that enabled the experimental feature will be supported by the stable version. +If they are supported, they may be accompanied with a deprecation notice stating a timeline for the removal of that support. diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/x/features.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/x/features.go new file mode 100644 index 00000000..0ed1c81a --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/x/features.go @@ -0,0 +1,23 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package x documents experimental features for [go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc]. +package x // import "go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/x" + +import "strings" + +// Observability is an experimental feature flag that determines if exporter +// observability metrics are enabled. +// +// To enable this feature set the OTEL_GO_X_OBSERVABILITY environment variable +// to the case-insensitive string value of "true" (i.e. "True" and "TRUE" +// will also enable this). +var Observability = newFeature( + []string{"OBSERVABILITY"}, + func(v string) (string, bool) { + if strings.EqualFold(v, "true") { + return v, true + } + return "", false + }, +) diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/x/x.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/x/x.go new file mode 100644 index 00000000..e2d50ced --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/x/x.go @@ -0,0 +1,58 @@ +// Code generated by gotmpl. DO NOT MODIFY. +// source: internal/shared/x/x.go.tmpl + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package x documents experimental features for [go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc]. +package x // import "go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/x" + +import ( + "os" +) + +// Feature is an experimental feature control flag. It provides a uniform way +// to interact with these feature flags and parse their values. +type Feature[T any] struct { + keys []string + parse func(v string) (T, bool) +} + +func newFeature[T any](suffix []string, parse func(string) (T, bool)) Feature[T] { + const envKeyRoot = "OTEL_GO_X_" + keys := make([]string, 0, len(suffix)) + for _, s := range suffix { + keys = append(keys, envKeyRoot+s) + } + return Feature[T]{ + keys: keys, + parse: parse, + } +} + +// Keys returns the environment variable keys that can be set to enable the +// feature. +func (f Feature[T]) Keys() []string { return f.keys } + +// Lookup returns the user configured value for the feature and true if the +// user has enabled the feature. Otherwise, if the feature is not enabled, a +// zero-value and false are returned. +func (f Feature[T]) Lookup() (v T, ok bool) { + // https://github.com/open-telemetry/opentelemetry-specification/blob/62effed618589a0bec416a87e559c0a9d96289bb/specification/configuration/sdk-environment-variables.md#parsing-empty-value + // + // > The SDK MUST interpret an empty value of an environment variable the + // > same way as when the variable is unset. + for _, key := range f.keys { + vRaw := os.Getenv(key) + if vRaw != "" { + return f.parse(vRaw) + } + } + return v, ok +} + +// Enabled reports whether the feature is enabled. +func (f Feature[T]) Enabled() bool { + _, ok := f.Lookup() + return ok +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/version.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/version.go index a68ed059..818ecf9e 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/version.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/version.go @@ -5,5 +5,5 @@ package otlploggrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlplog/o // Version is the current release version of the OpenTelemetry OTLP over gRPC logs exporter in use. func Version() string { - return "0.11.0" + return "0.14.0" } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/LICENSE b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/LICENSE index 261eeb9e..f1aee0f1 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/LICENSE +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/LICENSE @@ -199,3 +199,33 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +-------------------------------------------------------------------------------- + +Copyright 2009 The Go Authors. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google LLC nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/exporter.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/exporter.go index 3f0a518a..30446bd2 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/exporter.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/exporter.go @@ -94,7 +94,7 @@ func NewUnstarted(client Client) *Exporter { } // MarshalLog is the marshaling function used by the logging system to represent this Exporter. -func (e *Exporter) MarshalLog() interface{} { +func (e *Exporter) MarshalLog() any { return struct { Type string Client Client diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/attribute.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/attribute.go index 4571a5ca..d9bfd6e1 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/attribute.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/attribute.go @@ -1,12 +1,15 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 +// Package tracetransform provides conversion functionality for the otlptrace +// exporters. package tracetransform // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform" import ( + commonpb "go.opentelemetry.io/proto/otlp/common/v1" + "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/sdk/resource" - commonpb "go.opentelemetry.io/proto/otlp/common/v1" ) // KeyValues transforms a slice of attribute KeyValues into OTLP key-values. diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/instrumentation.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/instrumentation.go index 2e7690e4..43359c89 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/instrumentation.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/instrumentation.go @@ -4,8 +4,9 @@ package tracetransform // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform" import ( - "go.opentelemetry.io/otel/sdk/instrumentation" commonpb "go.opentelemetry.io/proto/otlp/common/v1" + + "go.opentelemetry.io/otel/sdk/instrumentation" ) func InstrumentationScope(il instrumentation.Scope) *commonpb.InstrumentationScope { diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/resource.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/resource.go index db7b698a..526bb5e0 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/resource.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/resource.go @@ -4,8 +4,9 @@ package tracetransform // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform" import ( - "go.opentelemetry.io/otel/sdk/resource" resourcepb "go.opentelemetry.io/proto/otlp/resource/v1" + + "go.opentelemetry.io/otel/sdk/resource" ) // Resource transforms a Resource into an OTLP Resource. diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/span.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/span.go index bf27ef02..d431fc45 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/span.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/span.go @@ -6,12 +6,13 @@ package tracetransform // import "go.opentelemetry.io/otel/exporters/otlp/otlptr import ( "math" + tracepb "go.opentelemetry.io/proto/otlp/trace/v1" + "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/sdk/instrumentation" tracesdk "go.opentelemetry.io/otel/sdk/trace" "go.opentelemetry.io/otel/trace" - tracepb "go.opentelemetry.io/proto/otlp/trace/v1" ) // Spans transforms a slice of OpenTelemetry spans into a slice of OTLP @@ -112,7 +113,7 @@ func span(sd tracesdk.ReadOnlySpan) *tracepb.Span { if psid := sd.Parent().SpanID(); psid.IsValid() { s.ParentSpanId = psid[:] } - s.Flags = buildSpanFlags(sd.Parent()) + s.Flags = buildSpanFlagsWith(sd.SpanContext().TraceFlags(), sd.Parent()) return s } @@ -154,12 +155,11 @@ func links(links []tracesdk.Link) []*tracepb.Span_Link { for _, otLink := range links { // This redefinition is necessary to prevent otLink.*ID[:] copies // being reused -- in short we need a new otLink per iteration. - otLink := otLink tid := otLink.SpanContext.TraceID() sid := otLink.SpanContext.SpanID() - flags := buildSpanFlags(otLink.SpanContext) + flags := buildSpanFlagsWith(otLink.SpanContext.TraceFlags(), otLink.SpanContext) sl = append(sl, &tracepb.Span_Link{ TraceId: tid[:], @@ -172,13 +172,15 @@ func links(links []tracesdk.Link) []*tracepb.Span_Link { return sl } -func buildSpanFlags(sc trace.SpanContext) uint32 { - flags := tracepb.SpanFlags_SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK - if sc.IsRemote() { - flags |= tracepb.SpanFlags_SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK +func buildSpanFlagsWith(tf trace.TraceFlags, parent trace.SpanContext) uint32 { + // Lower 8 bits are the W3C TraceFlags; always indicate that we know whether the parent is remote + flags := uint32(tf) | uint32(tracepb.SpanFlags_SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK) + // Set the parent-is-remote bit when applicable + if parent.IsRemote() { + flags |= uint32(tracepb.SpanFlags_SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK) } - return uint32(flags) // nolint:gosec // Flags is a bitmask and can't be negative + return flags // nolint:gosec // Flags is a bitmask and can't be negative } // spanEvents transforms span Events to an OTLP span events. @@ -189,7 +191,7 @@ func spanEvents(es []tracesdk.Event) []*tracepb.Span_Event { events := make([]*tracepb.Span_Event, len(es)) // Transform message events - for i := 0; i < len(es); i++ { + for i := range es { events[i] = &tracepb.Span_Event{ Name: es[i].Name, TimeUnixNano: uint64(max(0, es[i].Time.UnixNano())), // nolint:gosec // Overflow checked. diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/LICENSE b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/LICENSE index 261eeb9e..f1aee0f1 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/LICENSE +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/LICENSE @@ -199,3 +199,33 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +-------------------------------------------------------------------------------- + +Copyright 2009 The Go Authors. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google LLC nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go index 8409b5f8..76b7cd46 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go @@ -9,19 +9,20 @@ import ( "sync" "time" + coltracepb "go.opentelemetry.io/proto/otlp/collector/trace/v1" + tracepb "go.opentelemetry.io/proto/otlp/trace/v1" "google.golang.org/genproto/googleapis/rpc/errdetails" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" - "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/exporters/otlp/otlptrace" "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal" + "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/counter" + "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/observ" "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig" "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry" - coltracepb "go.opentelemetry.io/proto/otlp/collector/trace/v1" - tracepb "go.opentelemetry.io/proto/otlp/trace/v1" ) type client struct { @@ -45,6 +46,9 @@ type client struct { conn *grpc.ClientConn tscMu sync.RWMutex tsc coltracepb.TraceServiceClient + + instID int64 + inst *observ.Instrumentation } // Compile time check *client implements otlptrace.Client. @@ -68,6 +72,7 @@ func newClient(opts ...Option) *client { stopCtx: ctx, stopFunc: cancel, conn: cfg.GRPCConn, + instID: counter.NextExporterID(), } if len(cfg.Traces.Headers) > 0 { @@ -92,13 +97,24 @@ func (c *client) Start(context.Context) error { c.conn = conn } + // Initialize the instrumentation if not already done. + // + // Initialize here instead of NewClient to allow any errors to be passed + // back to the caller and so that any setup of the environment variables to + // enable instrumentation can be set via code. + var err error + if c.inst == nil { + target := c.conn.CanonicalTarget() + c.inst, err = observ.NewInstrumentation(c.instID, target) + } + // The otlptrace.Client interface states this method is called just once, // so no need to check if already started. c.tscMu.Lock() c.tsc = coltracepb.NewTraceServiceClient(c.conn) c.tscMu.Unlock() - return nil + return err } var errAlreadyStopped = errors.New("the client is already stopped") @@ -174,7 +190,7 @@ var errShutdown = errors.New("the client is shutdown") // // Retryable errors from the server will be handled according to any // RetryConfig the client was created with. -func (c *client) UploadTraces(ctx context.Context, protoSpans []*tracepb.ResourceSpans) error { +func (c *client) UploadTraces(ctx context.Context, protoSpans []*tracepb.ResourceSpans) (uploadErr error) { // Hold a read lock to ensure a shut down initiated after this starts does // not abandon the export. This read lock acquire has less priority than a // write lock acquire (i.e. Stop), meaning if the client is shutting down @@ -189,6 +205,12 @@ func (c *client) UploadTraces(ctx context.Context, protoSpans []*tracepb.Resourc ctx, cancel := c.exportContext(ctx) defer cancel() + var code codes.Code + if c.inst != nil { + op := c.inst.ExportSpans(ctx, len(protoSpans)) + defer func() { op.End(uploadErr, code) }() + } + return c.requestFunc(ctx, func(iCtx context.Context) error { resp, err := c.tsc.Export(iCtx, &coltracepb.ExportTraceServiceRequest{ ResourceSpans: protoSpans, @@ -197,16 +219,17 @@ func (c *client) UploadTraces(ctx context.Context, protoSpans []*tracepb.Resourc msg := resp.PartialSuccess.GetErrorMessage() n := resp.PartialSuccess.GetRejectedSpans() if n != 0 || msg != "" { - err := internal.TracePartialSuccessError(n, msg) - otel.Handle(err) + e := internal.TracePartialSuccessError(n, msg) + uploadErr = errors.Join(uploadErr, e) } } // nil is converted to OK. - if status.Code(err) == codes.OK { + code = status.Code(err) + if code == codes.OK { // Success. - return nil + return uploadErr } - return err + return errors.Join(uploadErr, err) }) } @@ -223,7 +246,7 @@ func (c *client) exportContext(parent context.Context) (context.Context, context ) if c.exportTimeout > 0 { - ctx, cancel = context.WithTimeout(parent, c.exportTimeout) + ctx, cancel = context.WithTimeoutCause(parent, c.exportTimeout, errors.New("exporter export timeout")) } else { ctx, cancel = context.WithCancel(parent) } @@ -289,7 +312,7 @@ func throttleDelay(s *status.Status) (bool, time.Duration) { } // MarshalLog is the marshaling function used by the logging system to represent this Client. -func (c *client) MarshalLog() interface{} { +func (c *client) MarshalLog() any { return struct { Type string Endpoint string diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/counter/counter.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/counter/counter.go new file mode 100644 index 00000000..323b2a2c --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/counter/counter.go @@ -0,0 +1,31 @@ +// Code generated by gotmpl. DO NOT MODIFY. +// source: internal/shared/counter/counter.go.tmpl + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package counter provides a simple counter for generating unique IDs. +// +// This package is used to generate unique IDs while allowing testing packages +// to reset the counter. +package counter // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/counter" + +import "sync/atomic" + +// exporterN is a global 0-based count of the number of exporters created. +var exporterN atomic.Int64 + +// NextExporterID returns the next unique ID for an exporter. +func NextExporterID() int64 { + const inc = 1 + return exporterN.Add(inc) - inc +} + +// SetExporterID sets the exporter ID counter to v and returns the previous +// value. +// +// This function is useful for testing purposes, allowing you to reset the +// counter. It should not be used in production code. +func SetExporterID(v int64) int64 { + return exporterN.Swap(v) +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig.go index 4abf48d1..6eacdf31 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig.go @@ -1,9 +1,11 @@ -// Code created by gotmpl. DO NOT MODIFY. +// Code generated by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/envconfig/envconfig.go.tmpl // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 +// Package envconfig provides functionality to parse configuration from +// environment variables. package envconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig" import ( diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/gen.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/gen.go index 97cd6c54..7fe9c9f3 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/gen.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/gen.go @@ -1,6 +1,7 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 +// Package internal provides internal functionally for the otlptracegrpc package. package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal" //go:generate gotmpl --body=../../../../../internal/shared/otlp/partialsuccess.go.tmpl "--data={}" --out=partialsuccess.go @@ -22,3 +23,12 @@ package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/ot //go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlptracetest/collector.go.tmpl "--data={}" --out=otlptracetest/collector.go //go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlptracetest/data.go.tmpl "--data={}" --out=otlptracetest/data.go //go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlptracetest/otlptest.go.tmpl "--data={}" --out=otlptracetest/otlptest.go + +//go:generate gotmpl --body=../../../../../internal/shared/x/x.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc\" }" --out=x/x.go +//go:generate gotmpl --body=../../../../../internal/shared/x/x_test.go.tmpl "--data={}" --out=x/x_test.go + +//go:generate gotmpl --body=../../../../../internal/shared/otlp/observ/target.go.tmpl "--data={ \"pkg\": \"observ\", \"pkg_path\": \"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/observ\" }" --out=observ/target.go +//go:generate gotmpl --body=../../../../../internal/shared/otlp/observ/target_test.go.tmpl "--data={ \"pkg\": \"observ\" }" --out=observ/target_test.go + +//go:generate gotmpl --body=../../../../../internal/shared/counter/counter.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/counter\" }" --out=counter/counter.go +//go:generate gotmpl --body=../../../../../internal/shared/counter/counter_test.go.tmpl "--data={}" --out=counter/counter_test.go diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/observ/doc.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/observ/doc.go new file mode 100644 index 00000000..0dd54e4b --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/observ/doc.go @@ -0,0 +1,6 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package observ provides experimental observability instrumentation for the +// otlptracegrpc exporter. +package observ // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/observ" diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/observ/instrumentation.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/observ/instrumentation.go new file mode 100644 index 00000000..2257fcc8 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/observ/instrumentation.go @@ -0,0 +1,341 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package observ // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/observ" + +import ( + "context" + "errors" + "fmt" + "sync" + "time" + + "google.golang.org/grpc/codes" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal" + "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/x" + "go.opentelemetry.io/otel/internal/global" + "go.opentelemetry.io/otel/metric" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" + "go.opentelemetry.io/otel/semconv/v1.37.0/otelconv" +) + +const ( + // ScopeName is the unique name of the meter used for instrumentation. + ScopeName = "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/observ" + + // SchemaURL is the schema URL of the metrics produced by this + // instrumentation. + SchemaURL = semconv.SchemaURL + + // Version is the current version of this instrumentation. + // + // This matches the version of the exporter. + Version = internal.Version +) + +var ( + measureAttrsPool = &sync.Pool{ + New: func() any { + const n = 1 + // component.name + 1 + // component.type + 1 + // server.addr + 1 + // server.port + 1 + // error.type + 1 // rpc.grpc.status_code + s := make([]attribute.KeyValue, 0, n) + // Return a pointer to a slice instead of a slice itself + // to avoid allocations on every call. + return &s + }, + } + + addOptPool = &sync.Pool{ + New: func() any { + const n = 1 // WithAttributeSet + o := make([]metric.AddOption, 0, n) + return &o + }, + } + + recordOptPool = &sync.Pool{ + New: func() any { + const n = 1 // WithAttributeSet + o := make([]metric.RecordOption, 0, n) + return &o + }, + } +) + +func get[T any](p *sync.Pool) *[]T { return p.Get().(*[]T) } + +func put[T any](p *sync.Pool, s *[]T) { + *s = (*s)[:0] // Reset. + p.Put(s) +} + +// ComponentName returns the component name for the exporter with the +// provided ID. +func ComponentName(id int64) string { + t := semconv.OTelComponentTypeOtlpGRPCSpanExporter.Value.AsString() + return fmt.Sprintf("%s/%d", t, id) +} + +// Instrumentation is experimental instrumentation for the exporter. +type Instrumentation struct { + inflightSpans metric.Int64UpDownCounter + exportedSpans metric.Int64Counter + opDuration metric.Float64Histogram + + attrs []attribute.KeyValue + addOpt metric.AddOption + recOpt metric.RecordOption +} + +// NewInstrumentation returns instrumentation for an OTLP over gPRC trace +// exporter with the provided ID using the global MeterProvider. +// +// The id should be the unique exporter instance ID. It is used +// to set the "component.name" attribute. +// +// The target is the endpoint the exporter is exporting to. +// +// If the experimental observability is disabled, nil is returned. +func NewInstrumentation(id int64, target string) (*Instrumentation, error) { + if !x.Observability.Enabled() { + return nil, nil + } + + attrs := BaseAttrs(id, target) + i := &Instrumentation{ + attrs: attrs, + addOpt: metric.WithAttributeSet(attribute.NewSet(attrs...)), + + // Do not modify attrs (NewSet sorts in-place), make a new slice. + recOpt: metric.WithAttributeSet(attribute.NewSet(append( + // Default to OK status code. + []attribute.KeyValue{semconv.RPCGRPCStatusCodeOk}, + attrs..., + )...)), + } + + mp := otel.GetMeterProvider() + m := mp.Meter( + ScopeName, + metric.WithInstrumentationVersion(Version), + metric.WithSchemaURL(SchemaURL), + ) + + var err error + + inflightSpans, e := otelconv.NewSDKExporterSpanInflight(m) + if e != nil { + e = fmt.Errorf("failed to create span inflight metric: %w", e) + err = errors.Join(err, e) + } + i.inflightSpans = inflightSpans.Inst() + + exportedSpans, e := otelconv.NewSDKExporterSpanExported(m) + if e != nil { + e = fmt.Errorf("failed to create span exported metric: %w", e) + err = errors.Join(err, e) + } + i.exportedSpans = exportedSpans.Inst() + + opDuration, e := otelconv.NewSDKExporterOperationDuration(m) + if e != nil { + e = fmt.Errorf("failed to create operation duration metric: %w", e) + err = errors.Join(err, e) + } + i.opDuration = opDuration.Inst() + + return i, err +} + +// BaseAttrs returns the base attributes for the exporter with the provided ID +// and target. +// +// The id should be the unique exporter instance ID. It is used +// to set the "component.name" attribute. +// +// The target is the gRPC target the exporter is exporting to. It is expected +// to be the output of the Client's CanonicalTarget method. +func BaseAttrs(id int64, target string) []attribute.KeyValue { + host, port, err := ParseCanonicalTarget(target) + if err != nil || (host == "" && port < 0) { + if err != nil { + global.Debug("failed to parse target", "target", target, "error", err) + } + return []attribute.KeyValue{ + semconv.OTelComponentName(ComponentName(id)), + semconv.OTelComponentTypeOtlpGRPCSpanExporter, + } + } + + // Do not use append so the slice is exactly allocated. + + if port < 0 { + return []attribute.KeyValue{ + semconv.OTelComponentName(ComponentName(id)), + semconv.OTelComponentTypeOtlpGRPCSpanExporter, + semconv.ServerAddress(host), + } + } + + if host == "" { + return []attribute.KeyValue{ + semconv.OTelComponentName(ComponentName(id)), + semconv.OTelComponentTypeOtlpGRPCSpanExporter, + semconv.ServerPort(port), + } + } + + return []attribute.KeyValue{ + semconv.OTelComponentName(ComponentName(id)), + semconv.OTelComponentTypeOtlpGRPCSpanExporter, + semconv.ServerAddress(host), + semconv.ServerPort(port), + } +} + +// ExportSpans instruments the ExportSpans method of the exporter. It returns +// an [ExportOp] that must have its [ExportOp.End] method called when the +// ExportSpans method returns. +func (i *Instrumentation) ExportSpans(ctx context.Context, nSpans int) ExportOp { + start := time.Now() + + addOpt := get[metric.AddOption](addOptPool) + defer put(addOptPool, addOpt) + *addOpt = append(*addOpt, i.addOpt) + i.inflightSpans.Add(ctx, int64(nSpans), *addOpt...) + + return ExportOp{ + ctx: ctx, + start: start, + nSpans: int64(nSpans), + inst: i, + } +} + +// ExportOp tracks the operation being observed by [Instrumentation.ExportSpans]. +type ExportOp struct { + ctx context.Context + start time.Time + nSpans int64 + + inst *Instrumentation +} + +// End completes the observation of the operation being observed by a call to +// [Instrumentation.ExportSpans]. +// +// Any error that is encountered is provided as err. +// +// If err is not nil, all spans will be recorded as failures unless error is of +// type [internal.PartialSuccess]. In the case of a PartialSuccess, the number +// of successfully exported spans will be determined by inspecting the +// RejectedItems field of the PartialSuccess. +func (e ExportOp) End(err error, code codes.Code) { + addOpt := get[metric.AddOption](addOptPool) + defer put(addOptPool, addOpt) + *addOpt = append(*addOpt, e.inst.addOpt) + + e.inst.inflightSpans.Add(e.ctx, -e.nSpans, *addOpt...) + + success := successful(e.nSpans, err) + // Record successfully exported spans, even if the value is 0 which are + // meaningful to distribution aggregations. + e.inst.exportedSpans.Add(e.ctx, success, *addOpt...) + + if err != nil { + attrs := get[attribute.KeyValue](measureAttrsPool) + defer put(measureAttrsPool, attrs) + *attrs = append(*attrs, e.inst.attrs...) + *attrs = append(*attrs, semconv.ErrorType(err)) + + // Do not inefficiently make a copy of attrs by using + // WithAttributes instead of WithAttributeSet. + o := metric.WithAttributeSet(attribute.NewSet(*attrs...)) + // Reset addOpt with new attribute set. + *addOpt = append((*addOpt)[:0], o) + + e.inst.exportedSpans.Add(e.ctx, e.nSpans-success, *addOpt...) + } + + recOpt := get[metric.RecordOption](recordOptPool) + defer put(recordOptPool, recOpt) + *recOpt = append(*recOpt, e.inst.recordOption(err, code)) + + d := time.Since(e.start).Seconds() + e.inst.opDuration.Record(e.ctx, d, *recOpt...) +} + +// recordOption returns a RecordOption with attributes representing the +// outcome of the operation being recorded. +// +// If err is nil and code is codes.OK, the default recOpt of the +// Instrumentation is returned. +// +// If err is not nil or code is not codes.OK, a new RecordOption is returned +// with the base attributes of the Instrumentation plus the rpc.grpc.status_code +// attribute set to the provided code, and if err is not nil, the error.type +// attribute set to the type of the error. +func (i *Instrumentation) recordOption(err error, code codes.Code) metric.RecordOption { + if err == nil && code == codes.OK { + return i.recOpt + } + + attrs := get[attribute.KeyValue](measureAttrsPool) + defer put(measureAttrsPool, attrs) + *attrs = append(*attrs, i.attrs...) + + c := int64(code) // uint32 -> int64. + *attrs = append(*attrs, semconv.RPCGRPCStatusCodeKey.Int64(c)) + if err != nil { + *attrs = append(*attrs, semconv.ErrorType(err)) + } + + // Do not inefficiently make a copy of attrs by using WithAttributes + // instead of WithAttributeSet. + return metric.WithAttributeSet(attribute.NewSet(*attrs...)) +} + +// successful returns the number of successfully exported spans out of the n +// that were exported based on the provided error. +// +// If err is nil, n is returned. All spans were successfully exported. +// +// If err is not nil and not an [internal.PartialSuccess] error, 0 is returned. +// It is assumed all spans failed to be exported. +// +// If err is an [internal.PartialSuccess] error, the number of successfully +// exported spans is computed by subtracting the RejectedItems field from n. If +// RejectedItems is negative, n is returned. If RejectedItems is greater than +// n, 0 is returned. +func successful(n int64, err error) int64 { + if err == nil { + return n // All spans successfully exported. + } + // Split rejection calculation so successful is inlinable. + return n - rejected(n, err) +} + +var errPartialPool = &sync.Pool{ + New: func() any { return new(internal.PartialSuccess) }, +} + +// rejected returns how many out of the n spans exporter were rejected based on +// the provided non-nil err. +func rejected(n int64, err error) int64 { + ps := errPartialPool.Get().(*internal.PartialSuccess) + defer errPartialPool.Put(ps) + // Check for partial success. + if errors.As(err, ps) { + // Bound RejectedItems to [0, n]. This should not be needed, + // but be defensive as this is from an external source. + return min(max(ps.RejectedItems, 0), n) + } + return n // All spans rejected. +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/observ/target.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/observ/target.go new file mode 100644 index 00000000..34eee27d --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/observ/target.go @@ -0,0 +1,143 @@ +// Code generated by gotmpl. DO NOT MODIFY. +// source: internal/shared/otlp/observ/target.go.tmpl + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package observ // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/observ" + +import ( + "errors" + "fmt" + "net" + "net/netip" + "strconv" + "strings" +) + +const ( + schemeUnix = "unix" + schemeUnixAbstract = "unix-abstract" +) + +// ParseCanonicalTarget parses a target string and returns the extracted host +// (domain address or IP), the target port, or an error. +// +// If no port is specified, -1 is returned. +// +// If no host is specified, an empty string is returned. +// +// The target string is expected to always have the form +// "://[authority]/". For example: +// - "dns:///example.com:42" +// - "dns://8.8.8.8/example.com:42" +// - "unix:///path/to/socket" +// - "unix-abstract:///socket-name" +// - "passthrough:///192.34.2.1:42" +// +// The target is expected to come from the CanonicalTarget method of a gRPC +// Client. +func ParseCanonicalTarget(target string) (string, int, error) { + const sep = "://" + + // Find scheme. Do not allocate the string by using url.Parse. + idx := strings.Index(target, sep) + if idx == -1 { + return "", -1, fmt.Errorf("invalid target %q: missing scheme", target) + } + scheme, endpoint := target[:idx], target[idx+len(sep):] + + // Check for unix schemes. + if scheme == schemeUnix || scheme == schemeUnixAbstract { + return parseUnix(endpoint) + } + + // Strip leading slash and any authority. + if i := strings.Index(endpoint, "/"); i != -1 { + endpoint = endpoint[i+1:] + } + + // DNS, passthrough, and custom resolvers. + return parseEndpoint(endpoint) +} + +// parseUnix parses unix socket targets. +func parseUnix(endpoint string) (string, int, error) { + // Format: unix[-abstract]://path + // + // We should have "/path" (empty authority) if valid. + if len(endpoint) >= 1 && endpoint[0] == '/' { + // Return the full path including leading slash. + return endpoint, -1, nil + } + + // If there's no leading slash, it means there might be an authority + // Check for authority case (should error): "authority/path" + if slashIdx := strings.Index(endpoint, "/"); slashIdx > 0 { + return "", -1, fmt.Errorf("invalid (non-empty) authority: %s", endpoint[:slashIdx]) + } + + return "", -1, errors.New("invalid unix target format") +} + +// parseEndpoint parses an endpoint from a gRPC target. +// +// It supports the following formats: +// - "host" +// - "host%zone" +// - "host:port" +// - "host%zone:port" +// - "ipv4" +// - "ipv4%zone" +// - "ipv4:port" +// - "ipv4%zone:port" +// - "ipv6" +// - "ipv6%zone" +// - "[ipv6]" +// - "[ipv6%zone]" +// - "[ipv6]:port" +// - "[ipv6%zone]:port" +// +// It returns the host or host%zone (domain address or IP), the port (or -1 if +// not specified), or an error if the input is not a valid. +func parseEndpoint(endpoint string) (string, int, error) { + // First check if the endpoint is just an IP address. + if ip := parseIP(endpoint); ip != "" { + return ip, -1, nil + } + + // If there's no colon, there is no port (IPv6 with no port checked above). + if !strings.Contains(endpoint, ":") { + return endpoint, -1, nil + } + + host, portStr, err := net.SplitHostPort(endpoint) + if err != nil { + return "", -1, fmt.Errorf("invalid host:port %q: %w", endpoint, err) + } + + const base, bitSize = 10, 16 + port16, err := strconv.ParseUint(portStr, base, bitSize) + if err != nil { + return "", -1, fmt.Errorf("invalid port %q: %w", portStr, err) + } + port := int(port16) // port is guaranteed to be in the range [0, 65535]. + + return host, port, nil +} + +// parseIP attempts to parse the entire endpoint as an IP address. +// It returns the normalized string form of the IP if successful, +// or an empty string if parsing fails. +func parseIP(ip string) string { + // Strip leading and trailing brackets for IPv6 addresses. + if len(ip) >= 2 && ip[0] == '[' && ip[len(ip)-1] == ']' { + ip = ip[1 : len(ip)-1] + } + addr, err := netip.ParseAddr(ip) + if err != nil { + return "" + } + // Return the normalized string form of the IP. + return addr.String() +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/envconfig.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/envconfig.go index 7bb189a9..1d840be2 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/envconfig.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/envconfig.go @@ -1,4 +1,4 @@ -// Code created by gotmpl. DO NOT MODIFY. +// Code generated by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/otlptrace/otlpconfig/envconfig.go.tmpl // Copyright The OpenTelemetry Authors @@ -77,8 +77,16 @@ func getOptionsFromEnv() []GenericOption { }), envconfig.WithCertPool("CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }), envconfig.WithCertPool("TRACES_CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }), - envconfig.WithClientCert("CLIENT_CERTIFICATE", "CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }), - envconfig.WithClientCert("TRACES_CLIENT_CERTIFICATE", "TRACES_CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }), + envconfig.WithClientCert( + "CLIENT_CERTIFICATE", + "CLIENT_KEY", + func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }, + ), + envconfig.WithClientCert( + "TRACES_CLIENT_CERTIFICATE", + "TRACES_CLIENT_KEY", + func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }, + ), withTLSConfig(tlsConf, func(c *tls.Config) { opts = append(opts, WithTLSClientConfig(c)) }), envconfig.WithBool("INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }), envconfig.WithBool("TRACES_INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }), diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go index 0a317d92..4f47117a 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go @@ -1,9 +1,10 @@ -// Code created by gotmpl. DO NOT MODIFY. +// Code generated by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/otlptrace/otlpconfig/options.go.tmpl // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 +// Package otlpconfig provides configuration for the otlptrace exporters. package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig" import ( @@ -52,7 +53,9 @@ type ( // gRPC configurations GRPCCredentials credentials.TransportCredentials - Proxy HTTPTransportProxyFunc + // HTTP configurations + Proxy HTTPTransportProxyFunc + HTTPClient *http.Client } Config struct { @@ -89,12 +92,11 @@ func NewHTTPConfig(opts ...HTTPOption) Config { return cfg } -// cleanPath returns a path with all spaces trimmed and all redundancies -// removed. If urlPath is empty or cleaning it results in an empty string, +// cleanPath returns a path with all spaces trimmed. If urlPath is empty, // defaultPath is returned instead. func cleanPath(urlPath string, defaultPath string) string { - tmp := path.Clean(strings.TrimSpace(urlPath)) - if tmp == "." { + tmp := strings.TrimSpace(urlPath) + if tmp == "" || tmp == "." { return defaultPath } if !path.IsAbs(tmp) { @@ -349,3 +351,10 @@ func WithProxy(pf HTTPTransportProxyFunc) GenericOption { return cfg }) } + +func WithHTTPClient(c *http.Client) GenericOption { + return newGenericOption(func(cfg Config) Config { + cfg.Traces.HTTPClient = c + return cfg + }) +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/optiontypes.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/optiontypes.go index 3d4f699d..91849038 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/optiontypes.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/optiontypes.go @@ -1,4 +1,4 @@ -// Code created by gotmpl. DO NOT MODIFY. +// Code generated by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/otlptrace/otlpconfig/optiontypes.go.tmpl // Copyright The OpenTelemetry Authors diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/tls.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/tls.go index 38b97a01..ba6e4118 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/tls.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/tls.go @@ -1,4 +1,4 @@ -// Code created by gotmpl. DO NOT MODIFY. +// Code generated by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/otlptrace/otlpconfig/tls.go.tmpl // Copyright The OpenTelemetry Authors diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/partialsuccess.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/partialsuccess.go index a12ea4c4..a811a07b 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/partialsuccess.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/partialsuccess.go @@ -1,4 +1,4 @@ -// Code created by gotmpl. DO NOT MODIFY. +// Code generated by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/partialsuccess.go // Copyright The OpenTelemetry Authors @@ -29,6 +29,17 @@ func (ps PartialSuccess) Error() string { return fmt.Sprintf("OTLP partial success: %s (%d %s rejected)", msg, ps.RejectedItems, ps.RejectedKind) } +// As returns true if ps can be assigned to target and makes the assignment. +// Otherwise, it returns false. This supports the errors.As() interface. +func (ps PartialSuccess) As(target any) bool { + t, ok := target.(*PartialSuccess) + if !ok { + return false + } + *t = ps + return true +} + // Is supports the errors.Is() interface. func (ps PartialSuccess) Is(err error) bool { _, ok := err.(PartialSuccess) diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry.go index 1c5450ab..a7b8e81a 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry.go @@ -1,4 +1,4 @@ -// Code created by gotmpl. DO NOT MODIFY. +// Code generated by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/retry/retry.go.tmpl // Copyright The OpenTelemetry Authors @@ -14,7 +14,7 @@ import ( "fmt" "time" - "github.com/cenkalti/backoff/v4" + "github.com/cenkalti/backoff/v5" ) // DefaultConfig are the recommended defaults to use. @@ -77,12 +77,12 @@ func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc { RandomizationFactor: backoff.DefaultRandomizationFactor, Multiplier: backoff.DefaultMultiplier, MaxInterval: c.MaxInterval, - MaxElapsedTime: c.MaxElapsedTime, - Stop: backoff.Stop, - Clock: backoff.SystemClock, } b.Reset() + maxElapsedTime := c.MaxElapsedTime + startTime := time.Now() + for { err := fn(ctx) if err == nil { @@ -94,21 +94,22 @@ func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc { return err } - bOff := b.NextBackOff() - if bOff == backoff.Stop { + // Check if context is canceled before attempting to wait and retry. + if ctx.Err() != nil { + return fmt.Errorf("%w: %w", ctx.Err(), err) + } + + if maxElapsedTime != 0 && time.Since(startTime) > maxElapsedTime { return fmt.Errorf("max retry time elapsed: %w", err) } // Wait for the greater of the backoff or throttle delay. - var delay time.Duration - if bOff > throttle { - delay = bOff - } else { - elapsed := b.GetElapsedTime() - if b.MaxElapsedTime != 0 && elapsed+throttle > b.MaxElapsedTime { - return fmt.Errorf("max retry time would elapse: %w", err) - } - delay = throttle + bOff := b.NextBackOff() + delay := max(throttle, bOff) + + elapsed := time.Since(startTime) + if maxElapsedTime != 0 && elapsed+throttle > maxElapsedTime { + return fmt.Errorf("max retry time would elapse: %w", err) } if ctxErr := waitFunc(ctx, delay); ctxErr != nil { @@ -136,7 +137,7 @@ func wait(ctx context.Context, delay time.Duration) error { select { case <-timer.C: default: - return ctx.Err() + return context.Cause(ctx) } case <-timer.C: } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/version.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/version.go new file mode 100644 index 00000000..e2d7cee1 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/version.go @@ -0,0 +1,8 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal" + +// Version is the current release version of the OpenTelemetry OTLP gRPC trace +// exporter in use. +const Version = "1.39.0" diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/x/README.md b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/x/README.md new file mode 100644 index 00000000..15a3011b --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/x/README.md @@ -0,0 +1,36 @@ +# Experimental Features + +The `otlptracegrpc` exporter contains features that have not yet stabilized in the OpenTelemetry specification. +These features are added to the `otlptracegrpc` exporter prior to stabilization in the specification so that users can start experimenting with them and provide feedback. + +These feature may change in backwards incompatible ways as feedback is applied. +See the [Compatibility and Stability](#compatibility-and-stability) section for more information. + +## Features + +- [Observability](#observability) + +### Observability + +The `otlptracegrpc` exporter provides a observability feature that allows you to monitor the SDK itself. + +To opt-in, set the environment variable `OTEL_GO_X_OBSERVABILITY` to `true`. + +When enabled, the SDK will create the following metrics using the global `MeterProvider`: + +- `otel.sdk.exporter.span.inflight` +- `otel.sdk.exporter.span.exported` +- `otel.sdk.exporter.operation.duration` + +Please see the [Semantic conventions for OpenTelemetry SDK metrics] documentation for more details on these metrics. + +[Semantic conventions for OpenTelemetry SDK metrics]: https://github.com/open-telemetry/semantic-conventions/blob/v1.37.0/docs/otel/sdk-metrics.md + +## Compatibility and Stability + +Experimental features do not fall within the scope of the OpenTelemetry Go versioning and stability [policy](../../../../../../VERSIONING.md). +These features may be removed or modified in successive version releases, including patch versions. + +When an experimental feature is promoted to a stable feature, a migration path will be included in the changelog entry of the release. +There is no guarantee that any environment variable feature flags that enabled the experimental feature will be supported by the stable version. +If they are supported, they may be accompanied with a deprecation notice stating a timeline for the removal of that support. diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/x/observ.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/x/observ.go new file mode 100644 index 00000000..4e89c652 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/x/observ.go @@ -0,0 +1,22 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package x // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/x" + +import "strings" + +// Observability is an experimental feature flag that determines if exporter +// observability metrics are enabled. +// +// To enable this feature set the OTEL_GO_X_OBSERVABILITY environment variable +// to the case-insensitive string value of "true" (i.e. "True" and "TRUE" +// will also enable this). +var Observability = newFeature( + []string{"OBSERVABILITY"}, + func(v string) (string, bool) { + if strings.EqualFold(v, "true") { + return v, true + } + return "", false + }, +) diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/x/x.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/x/x.go new file mode 100644 index 00000000..741ba62c --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/x/x.go @@ -0,0 +1,58 @@ +// Code generated by gotmpl. DO NOT MODIFY. +// source: internal/shared/x/x.go.tmpl + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package x documents experimental features for [go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc]. +package x // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/x" + +import ( + "os" +) + +// Feature is an experimental feature control flag. It provides a uniform way +// to interact with these feature flags and parse their values. +type Feature[T any] struct { + keys []string + parse func(v string) (T, bool) +} + +func newFeature[T any](suffix []string, parse func(string) (T, bool)) Feature[T] { + const envKeyRoot = "OTEL_GO_X_" + keys := make([]string, 0, len(suffix)) + for _, s := range suffix { + keys = append(keys, envKeyRoot+s) + } + return Feature[T]{ + keys: keys, + parse: parse, + } +} + +// Keys returns the environment variable keys that can be set to enable the +// feature. +func (f Feature[T]) Keys() []string { return f.keys } + +// Lookup returns the user configured value for the feature and true if the +// user has enabled the feature. Otherwise, if the feature is not enabled, a +// zero-value and false are returned. +func (f Feature[T]) Lookup() (v T, ok bool) { + // https://github.com/open-telemetry/opentelemetry-specification/blob/62effed618589a0bec416a87e559c0a9d96289bb/specification/configuration/sdk-environment-variables.md#parsing-empty-value + // + // > The SDK MUST interpret an empty value of an environment variable the + // > same way as when the variable is unset. + for _, key := range f.keys { + vRaw := os.Getenv(key) + if vRaw != "" { + return f.parse(vRaw) + } + } + return v, ok +} + +// Enabled reports whether the feature is enabled. +func (f Feature[T]) Enabled() bool { + _, ok := f.Lookup() + return ok +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go index 00ab1f20..2da22987 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go @@ -199,8 +199,9 @@ func WithTimeout(duration time.Duration) Option { // explicitly returns a backoff time in the response. That time will take // precedence over these settings. // -// These settings do not define any network retry strategy. That is entirely -// handled by the gRPC ClientConn. +// These settings define the retry strategy implemented by the exporter. +// These settings do not define any network retry strategy. +// That is handled by the gRPC ClientConn. // // If unset, the default retry policy will be used. It will retry the export // 5 seconds after receiving a retryable error and increase exponentially diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go index f156ee66..6838f3c4 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go @@ -5,5 +5,5 @@ package otlptrace // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace" // Version is the current release version of the OpenTelemetry OTLP trace exporter in use. func Version() string { - return "1.34.0" + return "1.39.0" } diff --git a/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh b/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh deleted file mode 100644 index 93e80ea3..00000000 --- a/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh +++ /dev/null @@ -1,30 +0,0 @@ -#!/usr/bin/env bash - -# Copyright The OpenTelemetry Authors -# SPDX-License-Identifier: Apache-2.0 - -set -euo pipefail - -top_dir='.' -if [[ $# -gt 0 ]]; then - top_dir="${1}" -fi - -p=$(pwd) -mod_dirs=() - -# Note `mapfile` does not exist in older bash versions: -# https://stackoverflow.com/questions/41475261/need-alternative-to-readarray-mapfile-for-script-on-older-version-of-bash - -while IFS= read -r line; do - mod_dirs+=("$line") -done < <(find "${top_dir}" -type f -name 'go.mod' -exec dirname {} \; | sort) - -for mod_dir in "${mod_dirs[@]}"; do - cd "${mod_dir}" - - while IFS= read -r line; do - echo ".${line#${p}}" - done < <(go list --find -f '{{.Name}}|{{.Dir}}' ./... | grep '^main|' | cut -f 2- -d '|') - cd "${p}" -done diff --git a/vendor/go.opentelemetry.io/otel/internal/gen.go b/vendor/go.opentelemetry.io/otel/internal/gen.go deleted file mode 100644 index 4259f032..00000000 --- a/vendor/go.opentelemetry.io/otel/internal/gen.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package internal // import "go.opentelemetry.io/otel/internal" - -//go:generate gotmpl --body=./shared/matchers/expectation.go.tmpl "--data={}" --out=matchers/expectation.go -//go:generate gotmpl --body=./shared/matchers/expecter.go.tmpl "--data={}" --out=matchers/expecter.go -//go:generate gotmpl --body=./shared/matchers/temporal_matcher.go.tmpl "--data={}" --out=matchers/temporal_matcher.go - -//go:generate gotmpl --body=./shared/internaltest/alignment.go.tmpl "--data={}" --out=internaltest/alignment.go -//go:generate gotmpl --body=./shared/internaltest/env.go.tmpl "--data={}" --out=internaltest/env.go -//go:generate gotmpl --body=./shared/internaltest/env_test.go.tmpl "--data={}" --out=internaltest/env_test.go -//go:generate gotmpl --body=./shared/internaltest/errors.go.tmpl "--data={}" --out=internaltest/errors.go -//go:generate gotmpl --body=./shared/internaltest/harness.go.tmpl "--data={\"matchersImportPath\": \"go.opentelemetry.io/otel/internal/matchers\"}" --out=internaltest/harness.go -//go:generate gotmpl --body=./shared/internaltest/text_map_carrier.go.tmpl "--data={}" --out=internaltest/text_map_carrier.go -//go:generate gotmpl --body=./shared/internaltest/text_map_carrier_test.go.tmpl "--data={}" --out=internaltest/text_map_carrier_test.go -//go:generate gotmpl --body=./shared/internaltest/text_map_propagator.go.tmpl "--data={}" --out=internaltest/text_map_propagator.go -//go:generate gotmpl --body=./shared/internaltest/text_map_propagator_test.go.tmpl "--data={}" --out=internaltest/text_map_propagator_test.go diff --git a/vendor/go.opentelemetry.io/otel/internal/global/handler.go b/vendor/go.opentelemetry.io/otel/internal/global/handler.go index c657ff8e..2e47b296 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/handler.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/handler.go @@ -1,6 +1,7 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 +// Package global provides the OpenTelemetry global API. package global // import "go.opentelemetry.io/otel/internal/global" import ( diff --git a/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go b/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go index adbca7d3..86d7f4ba 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go @@ -41,22 +41,22 @@ func GetLogger() logr.Logger { // Info prints messages about the general state of the API or SDK. // This should usually be less than 5 messages a minute. -func Info(msg string, keysAndValues ...interface{}) { +func Info(msg string, keysAndValues ...any) { GetLogger().V(4).Info(msg, keysAndValues...) } // Error prints messages about exceptional states of the API or SDK. -func Error(err error, msg string, keysAndValues ...interface{}) { +func Error(err error, msg string, keysAndValues ...any) { GetLogger().Error(err, msg, keysAndValues...) } // Debug prints messages about all internal changes in the API or SDK. -func Debug(msg string, keysAndValues ...interface{}) { +func Debug(msg string, keysAndValues ...any) { GetLogger().V(8).Info(msg, keysAndValues...) } // Warn prints messages about warnings in the API or SDK. // Not an error but is likely more important than an informational event. -func Warn(msg string, keysAndValues ...interface{}) { +func Warn(msg string, keysAndValues ...any) { GetLogger().V(1).Info(msg, keysAndValues...) } diff --git a/vendor/go.opentelemetry.io/otel/internal/global/meter.go b/vendor/go.opentelemetry.io/otel/internal/global/meter.go index a6acd8dc..6db969f7 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/meter.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/meter.go @@ -105,7 +105,7 @@ type delegatedInstrument interface { setDelegate(metric.Meter) } -// instID are the identifying properties of a instrument. +// instID are the identifying properties of an instrument. type instID struct { // name is the name of the stream. name string @@ -169,7 +169,10 @@ func (m *meter) Int64Counter(name string, options ...metric.Int64CounterOption) return i, nil } -func (m *meter) Int64UpDownCounter(name string, options ...metric.Int64UpDownCounterOption) (metric.Int64UpDownCounter, error) { +func (m *meter) Int64UpDownCounter( + name string, + options ...metric.Int64UpDownCounterOption, +) (metric.Int64UpDownCounter, error) { m.mtx.Lock() defer m.mtx.Unlock() @@ -238,7 +241,10 @@ func (m *meter) Int64Gauge(name string, options ...metric.Int64GaugeOption) (met return i, nil } -func (m *meter) Int64ObservableCounter(name string, options ...metric.Int64ObservableCounterOption) (metric.Int64ObservableCounter, error) { +func (m *meter) Int64ObservableCounter( + name string, + options ...metric.Int64ObservableCounterOption, +) (metric.Int64ObservableCounter, error) { m.mtx.Lock() defer m.mtx.Unlock() @@ -261,7 +267,10 @@ func (m *meter) Int64ObservableCounter(name string, options ...metric.Int64Obser return i, nil } -func (m *meter) Int64ObservableUpDownCounter(name string, options ...metric.Int64ObservableUpDownCounterOption) (metric.Int64ObservableUpDownCounter, error) { +func (m *meter) Int64ObservableUpDownCounter( + name string, + options ...metric.Int64ObservableUpDownCounterOption, +) (metric.Int64ObservableUpDownCounter, error) { m.mtx.Lock() defer m.mtx.Unlock() @@ -284,7 +293,10 @@ func (m *meter) Int64ObservableUpDownCounter(name string, options ...metric.Int6 return i, nil } -func (m *meter) Int64ObservableGauge(name string, options ...metric.Int64ObservableGaugeOption) (metric.Int64ObservableGauge, error) { +func (m *meter) Int64ObservableGauge( + name string, + options ...metric.Int64ObservableGaugeOption, +) (metric.Int64ObservableGauge, error) { m.mtx.Lock() defer m.mtx.Unlock() @@ -330,7 +342,10 @@ func (m *meter) Float64Counter(name string, options ...metric.Float64CounterOpti return i, nil } -func (m *meter) Float64UpDownCounter(name string, options ...metric.Float64UpDownCounterOption) (metric.Float64UpDownCounter, error) { +func (m *meter) Float64UpDownCounter( + name string, + options ...metric.Float64UpDownCounterOption, +) (metric.Float64UpDownCounter, error) { m.mtx.Lock() defer m.mtx.Unlock() @@ -353,7 +368,10 @@ func (m *meter) Float64UpDownCounter(name string, options ...metric.Float64UpDow return i, nil } -func (m *meter) Float64Histogram(name string, options ...metric.Float64HistogramOption) (metric.Float64Histogram, error) { +func (m *meter) Float64Histogram( + name string, + options ...metric.Float64HistogramOption, +) (metric.Float64Histogram, error) { m.mtx.Lock() defer m.mtx.Unlock() @@ -399,7 +417,10 @@ func (m *meter) Float64Gauge(name string, options ...metric.Float64GaugeOption) return i, nil } -func (m *meter) Float64ObservableCounter(name string, options ...metric.Float64ObservableCounterOption) (metric.Float64ObservableCounter, error) { +func (m *meter) Float64ObservableCounter( + name string, + options ...metric.Float64ObservableCounterOption, +) (metric.Float64ObservableCounter, error) { m.mtx.Lock() defer m.mtx.Unlock() @@ -422,7 +443,10 @@ func (m *meter) Float64ObservableCounter(name string, options ...metric.Float64O return i, nil } -func (m *meter) Float64ObservableUpDownCounter(name string, options ...metric.Float64ObservableUpDownCounterOption) (metric.Float64ObservableUpDownCounter, error) { +func (m *meter) Float64ObservableUpDownCounter( + name string, + options ...metric.Float64ObservableUpDownCounterOption, +) (metric.Float64ObservableUpDownCounter, error) { m.mtx.Lock() defer m.mtx.Unlock() @@ -445,7 +469,10 @@ func (m *meter) Float64ObservableUpDownCounter(name string, options ...metric.Fl return i, nil } -func (m *meter) Float64ObservableGauge(name string, options ...metric.Float64ObservableGaugeOption) (metric.Float64ObservableGauge, error) { +func (m *meter) Float64ObservableGauge( + name string, + options ...metric.Float64ObservableGaugeOption, +) (metric.Float64ObservableGauge, error) { m.mtx.Lock() defer m.mtx.Unlock() diff --git a/vendor/go.opentelemetry.io/otel/internal/global/trace.go b/vendor/go.opentelemetry.io/otel/internal/global/trace.go index 8982aa0d..bf5cf311 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/trace.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/trace.go @@ -26,6 +26,7 @@ import ( "sync/atomic" "go.opentelemetry.io/auto/sdk" + "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/trace" @@ -158,7 +159,18 @@ func (t *tracer) Start(ctx context.Context, name string, opts ...trace.SpanStart // a nonRecordingSpan by default. var autoInstEnabled = new(bool) -func (t *tracer) newSpan(ctx context.Context, autoSpan *bool, name string, opts []trace.SpanStartOption) (context.Context, trace.Span) { +// newSpan is called by tracer.Start so auto-instrumentation can attach an eBPF +// uprobe to this code. +// +// "noinline" pragma prevents the method from ever being inlined. +// +//go:noinline +func (t *tracer) newSpan( + ctx context.Context, + autoSpan *bool, + name string, + opts []trace.SpanStartOption, +) (context.Context, trace.Span) { // autoInstEnabled is passed to newSpan via the autoSpan parameter. This is // so the auto-instrumentation can define a uprobe for (*t).newSpan and be // provided with the address of the bool autoInstEnabled points to. It diff --git a/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go b/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go deleted file mode 100644 index b2fe3e41..00000000 --- a/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package internal // import "go.opentelemetry.io/otel/internal" - -import ( - "math" - "unsafe" -) - -func BoolToRaw(b bool) uint64 { // nolint:revive // b is not a control flag. - if b { - return 1 - } - return 0 -} - -func RawToBool(r uint64) bool { - return r != 0 -} - -func Int64ToRaw(i int64) uint64 { - // Assumes original was a valid int64 (overflow not checked). - return uint64(i) // nolint: gosec -} - -func RawToInt64(r uint64) int64 { - // Assumes original was a valid int64 (overflow not checked). - return int64(r) // nolint: gosec -} - -func Float64ToRaw(f float64) uint64 { - return math.Float64bits(f) -} - -func RawToFloat64(r uint64) float64 { - return math.Float64frombits(r) -} - -func RawPtrToFloat64Ptr(r *uint64) *float64 { - // Assumes original was a valid *float64 (overflow not checked). - return (*float64)(unsafe.Pointer(r)) // nolint: gosec -} - -func RawPtrToInt64Ptr(r *uint64) *int64 { - // Assumes original was a valid *int64 (overflow not checked). - return (*int64)(unsafe.Pointer(r)) // nolint: gosec -} diff --git a/vendor/go.opentelemetry.io/otel/log/LICENSE b/vendor/go.opentelemetry.io/otel/log/LICENSE index 261eeb9e..f1aee0f1 100644 --- a/vendor/go.opentelemetry.io/otel/log/LICENSE +++ b/vendor/go.opentelemetry.io/otel/log/LICENSE @@ -199,3 +199,33 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +-------------------------------------------------------------------------------- + +Copyright 2009 The Go Authors. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google LLC nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/vendor/go.opentelemetry.io/otel/log/doc.go b/vendor/go.opentelemetry.io/otel/log/doc.go index 18cbd1cb..b7a085c6 100644 --- a/vendor/go.opentelemetry.io/otel/log/doc.go +++ b/vendor/go.opentelemetry.io/otel/log/doc.go @@ -4,10 +4,19 @@ /* Package log provides the OpenTelemetry Logs API. -This package is intended to be used by bridges between existing logging -libraries and OpenTelemetry. Users should not directly use this package as a -logging library. Instead, install one of the bridges listed in the -[registry], and use the associated logging library. +This API is separate from its implementation so the instrumentation built from +it is reusable. See [go.opentelemetry.io/otel/sdk/log] for the official +OpenTelemetry implementation of this API. + +The log package provides the OpenTelemetry Logs API, which serves as a standard +interface for generating and managing log records within the OpenTelemetry ecosystem. +This package allows users to emit LogRecords, enabling structured, context-rich logging +that can be easily integrated with observability tools. It ensures that log data is captured +in a way that is consistent with OpenTelemetry's data model. + +This package can be used to create bridges between existing logging libraries and OpenTelemetry. +Log bridges allow integrating the existing logging setups with OpenTelemetry. +Log bridges can be found in the [registry]. # API Implementations diff --git a/vendor/go.opentelemetry.io/otel/log/embedded/embedded.go b/vendor/go.opentelemetry.io/otel/log/embedded/embedded.go index a3714c4c..9b401b2b 100644 --- a/vendor/go.opentelemetry.io/otel/log/embedded/embedded.go +++ b/vendor/go.opentelemetry.io/otel/log/embedded/embedded.go @@ -4,33 +4,33 @@ // Package embedded provides interfaces embedded within the [OpenTelemetry Logs // Bridge API]. // -// Implementers of the [OpenTelemetry Logs Bridge API] can embed the relevant +// Implementers of the [OpenTelemetry Logs API] can embed the relevant // type from this package into their implementation directly. Doing so will // result in a compilation error for users when the [OpenTelemetry Logs Bridge // API] is extended (which is something that can happen without a major version // bump of the API package). // -// [OpenTelemetry Logs Bridge API]: https://pkg.go.dev/go.opentelemetry.io/otel/log +// [OpenTelemetry Logs API]: https://pkg.go.dev/go.opentelemetry.io/otel/log package embedded // import "go.opentelemetry.io/otel/log/embedded" -// LoggerProvider is embedded in the [Logs Bridge API LoggerProvider]. +// LoggerProvider is embedded in the [Logs API LoggerProvider]. // -// Embed this interface in your implementation of the [Logs Bridge API +// Embed this interface in your implementation of the [Logs API // LoggerProvider] if you want users to experience a compilation error, // signaling they need to update to your latest implementation, when the [Logs // Bridge API LoggerProvider] interface is extended (which is something that // can happen without a major version bump of the API package). // -// [Logs Bridge API LoggerProvider]: https://pkg.go.dev/go.opentelemetry.io/otel/log#LoggerProvider +// [Logs API LoggerProvider]: https://pkg.go.dev/go.opentelemetry.io/otel/log#LoggerProvider type LoggerProvider interface{ loggerProvider() } -// Logger is embedded in [Logs Bridge API Logger]. +// Logger is embedded in [Logs API Logger]. // -// Embed this interface in your implementation of the [Logs Bridge API Logger] +// Embed this interface in your implementation of the [Logs API Logger] // if you want users to experience a compilation error, signaling they need to -// update to your latest implementation, when the [Logs Bridge API Logger] +// update to your latest implementation, when the [Logs API Logger] // interface is extended (which is something that can happen without a major // version bump of the API package). // -// [Logs Bridge API Logger]: https://pkg.go.dev/go.opentelemetry.io/otel/log#Logger +// [Logs API Logger]: https://pkg.go.dev/go.opentelemetry.io/otel/log#Logger type Logger interface{ logger() } diff --git a/vendor/go.opentelemetry.io/otel/log/global/log.go b/vendor/go.opentelemetry.io/otel/log/global/log.go index 71ec5779..bfdb1847 100644 --- a/vendor/go.opentelemetry.io/otel/log/global/log.go +++ b/vendor/go.opentelemetry.io/otel/log/global/log.go @@ -3,7 +3,7 @@ /* Package global provides access to a global implementation of the OpenTelemetry -Logs Bridge API. +Logs API. This package is experimental. It will be deprecated and removed when the [log] package becomes stable. Its functionality will be migrated to diff --git a/vendor/go.opentelemetry.io/otel/log/internal/global/log.go b/vendor/go.opentelemetry.io/otel/log/internal/global/log.go index d97ee966..e463acbf 100644 --- a/vendor/go.opentelemetry.io/otel/log/internal/global/log.go +++ b/vendor/go.opentelemetry.io/otel/log/internal/global/log.go @@ -1,6 +1,8 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 +// Package global is the internal implementation of the OpenTelemetry global +// Logs API. package global // import "go.opentelemetry.io/otel/log/internal/global" import ( diff --git a/vendor/go.opentelemetry.io/otel/log/keyvalue.go b/vendor/go.opentelemetry.io/otel/log/keyvalue.go index 73e4e7dc..f87cee04 100644 --- a/vendor/go.opentelemetry.io/otel/log/keyvalue.go +++ b/vendor/go.opentelemetry.io/otel/log/keyvalue.go @@ -242,10 +242,10 @@ func (v Value) Kind() Kind { } } -// Empty returns if v does not hold any value. +// Empty reports whether v does not hold any value. func (v Value) Empty() bool { return v.Kind() == KindEmpty } -// Equal returns if v is equal to w. +// Equal reports whether v is equal to w. func (v Value) Equal(w Value) bool { k1 := v.Kind() k2 := w.Kind() @@ -301,7 +301,7 @@ func (v Value) String() string { case KindBool: return strconv.FormatBool(v.asBool()) case KindBytes: - return fmt.Sprint(v.asBytes()) + return fmt.Sprint(v.asBytes()) // nolint:staticcheck // Use fmt.Sprint to encode as slice. case KindMap: return fmt.Sprint(v.asMap()) case KindSlice: @@ -326,7 +326,7 @@ type KeyValue struct { Value Value } -// Equal returns if a is equal to b. +// Equal reports whether a is equal to b. func (a KeyValue) Equal(b KeyValue) bool { return a.Key == b.Key && a.Value.Equal(b.Value) } diff --git a/vendor/go.opentelemetry.io/otel/log/kind_string.go b/vendor/go.opentelemetry.io/otel/log/kind_string.go index bdfaa186..b4f9e533 100644 --- a/vendor/go.opentelemetry.io/otel/log/kind_string.go +++ b/vendor/go.opentelemetry.io/otel/log/kind_string.go @@ -23,8 +23,9 @@ const _Kind_name = "EmptyBoolFloat64Int64StringBytesSliceMap" var _Kind_index = [...]uint8{0, 5, 9, 16, 21, 27, 32, 37, 40} func (i Kind) String() string { - if i < 0 || i >= Kind(len(_Kind_index)-1) { + idx := int(i) - 0 + if i < 0 || idx >= len(_Kind_index)-1 { return "Kind(" + strconv.FormatInt(int64(i), 10) + ")" } - return _Kind_name[_Kind_index[i]:_Kind_index[i+1]] + return _Kind_name[_Kind_index[idx]:_Kind_index[idx+1]] } diff --git a/vendor/go.opentelemetry.io/otel/log/logger.go b/vendor/go.opentelemetry.io/otel/log/logger.go index 1205f08e..d9decebd 100644 --- a/vendor/go.opentelemetry.io/otel/log/logger.go +++ b/vendor/go.opentelemetry.io/otel/log/logger.go @@ -5,6 +5,7 @@ package log // import "go.opentelemetry.io/otel/log" import ( "context" + "slices" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/log/embedded" @@ -30,7 +31,7 @@ type Logger interface { // concurrently. Emit(ctx context.Context, record Record) - // Enabled returns whether the Logger emits for the given context and + // Enabled reports whether the Logger emits for the given context and // param. // // This is useful for users that want to know if a [Record] @@ -114,13 +115,52 @@ func WithInstrumentationVersion(version string) LoggerOption { }) } +// mergeSets returns the union of keys between a and b. Any duplicate keys will +// use the value associated with b. +func mergeSets(a, b attribute.Set) attribute.Set { + // NewMergeIterator uses the first value for any duplicates. + iter := attribute.NewMergeIterator(&b, &a) + merged := make([]attribute.KeyValue, 0, a.Len()+b.Len()) + for iter.Next() { + merged = append(merged, iter.Attribute()) + } + return attribute.NewSet(merged...) +} + // WithInstrumentationAttributes returns a [LoggerOption] that sets the // instrumentation attributes of a [Logger]. // -// The passed attributes will be de-duplicated. +// This is equivalent to calling WithInstrumentationAttributeSet with an +// [attribute.Set] created from a clone of the passed attributes. +// [WithInstrumentationAttributeSet] is recommended for more control. +// +// If multiple [WithInstrumentationAttributes] or [WithInstrumentationAttributeSet] +// options are passed, the attributes will be merged together in the order +// they are passed. Attributes with duplicate keys will use the last value passed. func WithInstrumentationAttributes(attr ...attribute.KeyValue) LoggerOption { + set := attribute.NewSet(slices.Clone(attr)...) + return WithInstrumentationAttributeSet(set) +} + +// WithInstrumentationAttributeSet returns a [LoggerOption] that adds the +// instrumentation attributes of a [Logger]. +// +// If multiple [WithInstrumentationAttributes] or [WithInstrumentationAttributeSet] +// options are passed, the attributes will be merged together in the order +// they are passed. Attributes with duplicate keys will use the last value passed. +func WithInstrumentationAttributeSet(set attribute.Set) LoggerOption { + if set.Len() == 0 { + return loggerOptionFunc(func(config LoggerConfig) LoggerConfig { + return config + }) + } + return loggerOptionFunc(func(config LoggerConfig) LoggerConfig { - config.attrs = attribute.NewSet(attr...) + if config.attrs.Len() == 0 { + config.attrs = set + } else { + config.attrs = mergeSets(config.attrs, set) + } return config }) } @@ -136,5 +176,6 @@ func WithSchemaURL(schemaURL string) LoggerOption { // EnabledParameters represents payload for [Logger]'s Enabled method. type EnabledParameters struct { - Severity Severity + Severity Severity + EventName string } diff --git a/vendor/go.opentelemetry.io/otel/log/noop/noop.go b/vendor/go.opentelemetry.io/otel/log/noop/noop.go index f45a7c7e..d779e5d8 100644 --- a/vendor/go.opentelemetry.io/otel/log/noop/noop.go +++ b/vendor/go.opentelemetry.io/otel/log/noop/noop.go @@ -4,14 +4,14 @@ // Package noop provides an implementation of the [OpenTelemetry Logs Bridge // API] that produces no telemetry and minimizes used computation resources. // -// Using this package to implement the [OpenTelemetry Logs Bridge API] will +// Using this package to implement the [OpenTelemetry Logs API] will // effectively disable OpenTelemetry. // // This implementation can be embedded in other implementations of the -// [OpenTelemetry Logs Bridge API]. Doing so will mean the implementation +// [OpenTelemetry Logs API]. Doing so will mean the implementation // defaults to no operation for methods it does not implement. // -// [OpenTelemetry Logs Bridge API]: https://pkg.go.dev/go.opentelemetry.io/otel/log +// [OpenTelemetry Logs API]: https://pkg.go.dev/go.opentelemetry.io/otel/log package noop // import "go.opentelemetry.io/otel/log/noop" import ( diff --git a/vendor/go.opentelemetry.io/otel/log/record.go b/vendor/go.opentelemetry.io/otel/log/record.go index 4d2f32d0..adde7a0d 100644 --- a/vendor/go.opentelemetry.io/otel/log/record.go +++ b/vendor/go.opentelemetry.io/otel/log/record.go @@ -142,3 +142,11 @@ func (r *Record) AddAttributes(attrs ...KeyValue) { func (r *Record) AttributesLen() int { return r.nFront + len(r.back) } + +// Clone returns a copy of the record with no shared state. +// The original record and the clone can both be modified without interfering with each other. +func (r *Record) Clone() Record { + res := *r + res.back = slices.Clone(r.back) + return res +} diff --git a/vendor/go.opentelemetry.io/otel/log/severity_string.go b/vendor/go.opentelemetry.io/otel/log/severity_string.go index 4c20fa5e..fb94caea 100644 --- a/vendor/go.opentelemetry.io/otel/log/severity_string.go +++ b/vendor/go.opentelemetry.io/otel/log/severity_string.go @@ -40,8 +40,9 @@ const _Severity_name = "UNDEFINEDTRACETRACE2TRACE3TRACE4DEBUGDEBUG2DEBUG3DEBUG4I var _Severity_index = [...]uint8{0, 9, 14, 20, 26, 32, 37, 43, 49, 55, 59, 64, 69, 74, 78, 83, 88, 93, 98, 104, 110, 116, 121, 127, 133, 139} func (i Severity) String() string { - if i < 0 || i >= Severity(len(_Severity_index)-1) { + idx := int(i) - 0 + if i < 0 || idx >= len(_Severity_index)-1 { return "Severity(" + strconv.FormatInt(int64(i), 10) + ")" } - return _Severity_name[_Severity_index[i]:_Severity_index[i+1]] + return _Severity_name[_Severity_index[idx]:_Severity_index[idx+1]] } diff --git a/vendor/go.opentelemetry.io/otel/metric.go b/vendor/go.opentelemetry.io/otel/metric.go index 1e6473b3..527d9aec 100644 --- a/vendor/go.opentelemetry.io/otel/metric.go +++ b/vendor/go.opentelemetry.io/otel/metric.go @@ -11,7 +11,7 @@ import ( // Meter returns a Meter from the global MeterProvider. The name must be the // name of the library providing instrumentation. This name may be the same as // the instrumented code only if that code provides built-in instrumentation. -// If the name is empty, then a implementation defined default name will be +// If the name is empty, then an implementation defined default name will be // used instead. // // If this is called before a global MeterProvider is registered the returned diff --git a/vendor/go.opentelemetry.io/otel/metric/LICENSE b/vendor/go.opentelemetry.io/otel/metric/LICENSE index 261eeb9e..f1aee0f1 100644 --- a/vendor/go.opentelemetry.io/otel/metric/LICENSE +++ b/vendor/go.opentelemetry.io/otel/metric/LICENSE @@ -199,3 +199,33 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +-------------------------------------------------------------------------------- + +Copyright 2009 The Go Authors. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google LLC nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go b/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go index f8435d8f..b7fc973a 100644 --- a/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go +++ b/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go @@ -106,7 +106,9 @@ type Float64ObservableUpDownCounterConfig struct { // NewFloat64ObservableUpDownCounterConfig returns a new // [Float64ObservableUpDownCounterConfig] with all opts applied. -func NewFloat64ObservableUpDownCounterConfig(opts ...Float64ObservableUpDownCounterOption) Float64ObservableUpDownCounterConfig { +func NewFloat64ObservableUpDownCounterConfig( + opts ...Float64ObservableUpDownCounterOption, +) Float64ObservableUpDownCounterConfig { var config Float64ObservableUpDownCounterConfig for _, o := range opts { config = o.applyFloat64ObservableUpDownCounter(config) @@ -239,12 +241,16 @@ type float64CallbackOpt struct { cback Float64Callback } -func (o float64CallbackOpt) applyFloat64ObservableCounter(cfg Float64ObservableCounterConfig) Float64ObservableCounterConfig { +func (o float64CallbackOpt) applyFloat64ObservableCounter( + cfg Float64ObservableCounterConfig, +) Float64ObservableCounterConfig { cfg.callbacks = append(cfg.callbacks, o.cback) return cfg } -func (o float64CallbackOpt) applyFloat64ObservableUpDownCounter(cfg Float64ObservableUpDownCounterConfig) Float64ObservableUpDownCounterConfig { +func (o float64CallbackOpt) applyFloat64ObservableUpDownCounter( + cfg Float64ObservableUpDownCounterConfig, +) Float64ObservableUpDownCounterConfig { cfg.callbacks = append(cfg.callbacks, o.cback) return cfg } diff --git a/vendor/go.opentelemetry.io/otel/metric/asyncint64.go b/vendor/go.opentelemetry.io/otel/metric/asyncint64.go index e079aaef..4404b71a 100644 --- a/vendor/go.opentelemetry.io/otel/metric/asyncint64.go +++ b/vendor/go.opentelemetry.io/otel/metric/asyncint64.go @@ -105,7 +105,9 @@ type Int64ObservableUpDownCounterConfig struct { // NewInt64ObservableUpDownCounterConfig returns a new // [Int64ObservableUpDownCounterConfig] with all opts applied. -func NewInt64ObservableUpDownCounterConfig(opts ...Int64ObservableUpDownCounterOption) Int64ObservableUpDownCounterConfig { +func NewInt64ObservableUpDownCounterConfig( + opts ...Int64ObservableUpDownCounterOption, +) Int64ObservableUpDownCounterConfig { var config Int64ObservableUpDownCounterConfig for _, o := range opts { config = o.applyInt64ObservableUpDownCounter(config) @@ -242,7 +244,9 @@ func (o int64CallbackOpt) applyInt64ObservableCounter(cfg Int64ObservableCounter return cfg } -func (o int64CallbackOpt) applyInt64ObservableUpDownCounter(cfg Int64ObservableUpDownCounterConfig) Int64ObservableUpDownCounterConfig { +func (o int64CallbackOpt) applyInt64ObservableUpDownCounter( + cfg Int64ObservableUpDownCounterConfig, +) Int64ObservableUpDownCounterConfig { cfg.callbacks = append(cfg.callbacks, o.cback) return cfg } diff --git a/vendor/go.opentelemetry.io/otel/metric/config.go b/vendor/go.opentelemetry.io/otel/metric/config.go index d9e3b13e..e42dd6e7 100644 --- a/vendor/go.opentelemetry.io/otel/metric/config.go +++ b/vendor/go.opentelemetry.io/otel/metric/config.go @@ -3,7 +3,11 @@ package metric // import "go.opentelemetry.io/otel/metric" -import "go.opentelemetry.io/otel/attribute" +import ( + "slices" + + "go.opentelemetry.io/otel/attribute" +) // MeterConfig contains options for Meters. type MeterConfig struct { @@ -62,12 +66,38 @@ func WithInstrumentationVersion(version string) MeterOption { }) } -// WithInstrumentationAttributes sets the instrumentation attributes. +// WithInstrumentationAttributes adds the instrumentation attributes. +// +// This is equivalent to calling [WithInstrumentationAttributeSet] with an +// [attribute.Set] created from a clone of the passed attributes. +// [WithInstrumentationAttributeSet] is recommended for more control. // -// The passed attributes will be de-duplicated. +// If multiple [WithInstrumentationAttributes] or [WithInstrumentationAttributeSet] +// options are passed, the attributes will be merged together in the order +// they are passed. Attributes with duplicate keys will use the last value passed. func WithInstrumentationAttributes(attr ...attribute.KeyValue) MeterOption { + set := attribute.NewSet(slices.Clone(attr)...) + return WithInstrumentationAttributeSet(set) +} + +// WithInstrumentationAttributeSet adds the instrumentation attributes. +// +// If multiple [WithInstrumentationAttributes] or [WithInstrumentationAttributeSet] +// options are passed, the attributes will be merged together in the order +// they are passed. Attributes with duplicate keys will use the last value passed. +func WithInstrumentationAttributeSet(set attribute.Set) MeterOption { + if set.Len() == 0 { + return meterOptionFunc(func(config MeterConfig) MeterConfig { + return config + }) + } + return meterOptionFunc(func(config MeterConfig) MeterConfig { - config.attrs = attribute.NewSet(attr...) + if config.attrs.Len() == 0 { + config.attrs = set + } else { + config.attrs = mergeSets(config.attrs, set) + } return config }) } diff --git a/vendor/go.opentelemetry.io/otel/metric/instrument.go b/vendor/go.opentelemetry.io/otel/metric/instrument.go index a535782e..9f48d5f1 100644 --- a/vendor/go.opentelemetry.io/otel/metric/instrument.go +++ b/vendor/go.opentelemetry.io/otel/metric/instrument.go @@ -63,7 +63,9 @@ func (o descOpt) applyFloat64ObservableCounter(c Float64ObservableCounterConfig) return c } -func (o descOpt) applyFloat64ObservableUpDownCounter(c Float64ObservableUpDownCounterConfig) Float64ObservableUpDownCounterConfig { +func (o descOpt) applyFloat64ObservableUpDownCounter( + c Float64ObservableUpDownCounterConfig, +) Float64ObservableUpDownCounterConfig { c.description = string(o) return c } @@ -98,7 +100,9 @@ func (o descOpt) applyInt64ObservableCounter(c Int64ObservableCounterConfig) Int return c } -func (o descOpt) applyInt64ObservableUpDownCounter(c Int64ObservableUpDownCounterConfig) Int64ObservableUpDownCounterConfig { +func (o descOpt) applyInt64ObservableUpDownCounter( + c Int64ObservableUpDownCounterConfig, +) Int64ObservableUpDownCounterConfig { c.description = string(o) return c } @@ -138,7 +142,9 @@ func (o unitOpt) applyFloat64ObservableCounter(c Float64ObservableCounterConfig) return c } -func (o unitOpt) applyFloat64ObservableUpDownCounter(c Float64ObservableUpDownCounterConfig) Float64ObservableUpDownCounterConfig { +func (o unitOpt) applyFloat64ObservableUpDownCounter( + c Float64ObservableUpDownCounterConfig, +) Float64ObservableUpDownCounterConfig { c.unit = string(o) return c } @@ -173,7 +179,9 @@ func (o unitOpt) applyInt64ObservableCounter(c Int64ObservableCounterConfig) Int return c } -func (o unitOpt) applyInt64ObservableUpDownCounter(c Int64ObservableUpDownCounterConfig) Int64ObservableUpDownCounterConfig { +func (o unitOpt) applyInt64ObservableUpDownCounter( + c Int64ObservableUpDownCounterConfig, +) Int64ObservableUpDownCounterConfig { c.unit = string(o) return c } diff --git a/vendor/go.opentelemetry.io/otel/metric/meter.go b/vendor/go.opentelemetry.io/otel/metric/meter.go index 14e08c24..fdd2a701 100644 --- a/vendor/go.opentelemetry.io/otel/metric/meter.go +++ b/vendor/go.opentelemetry.io/otel/metric/meter.go @@ -110,7 +110,10 @@ type Meter interface { // The name needs to conform to the OpenTelemetry instrument name syntax. // See the Instrument Name section of the package documentation for more // information. - Int64ObservableUpDownCounter(name string, options ...Int64ObservableUpDownCounterOption) (Int64ObservableUpDownCounter, error) + Int64ObservableUpDownCounter( + name string, + options ...Int64ObservableUpDownCounterOption, + ) (Int64ObservableUpDownCounter, error) // Int64ObservableGauge returns a new Int64ObservableGauge instrument // identified by name and configured with options. The instrument is used @@ -194,7 +197,10 @@ type Meter interface { // The name needs to conform to the OpenTelemetry instrument name syntax. // See the Instrument Name section of the package documentation for more // information. - Float64ObservableUpDownCounter(name string, options ...Float64ObservableUpDownCounterOption) (Float64ObservableUpDownCounter, error) + Float64ObservableUpDownCounter( + name string, + options ...Float64ObservableUpDownCounterOption, + ) (Float64ObservableUpDownCounter, error) // Float64ObservableGauge returns a new Float64ObservableGauge instrument // identified by name and configured with options. The instrument is used diff --git a/vendor/go.opentelemetry.io/otel/metric/noop/noop.go b/vendor/go.opentelemetry.io/otel/metric/noop/noop.go index ca6fcbdc..9afb69e5 100644 --- a/vendor/go.opentelemetry.io/otel/metric/noop/noop.go +++ b/vendor/go.opentelemetry.io/otel/metric/noop/noop.go @@ -86,13 +86,19 @@ func (Meter) Int64Gauge(string, ...metric.Int64GaugeOption) (metric.Int64Gauge, // Int64ObservableCounter returns an ObservableCounter used to record int64 // measurements that produces no telemetry. -func (Meter) Int64ObservableCounter(string, ...metric.Int64ObservableCounterOption) (metric.Int64ObservableCounter, error) { +func (Meter) Int64ObservableCounter( + string, + ...metric.Int64ObservableCounterOption, +) (metric.Int64ObservableCounter, error) { return Int64ObservableCounter{}, nil } // Int64ObservableUpDownCounter returns an ObservableUpDownCounter used to // record int64 measurements that produces no telemetry. -func (Meter) Int64ObservableUpDownCounter(string, ...metric.Int64ObservableUpDownCounterOption) (metric.Int64ObservableUpDownCounter, error) { +func (Meter) Int64ObservableUpDownCounter( + string, + ...metric.Int64ObservableUpDownCounterOption, +) (metric.Int64ObservableUpDownCounter, error) { return Int64ObservableUpDownCounter{}, nil } @@ -128,19 +134,28 @@ func (Meter) Float64Gauge(string, ...metric.Float64GaugeOption) (metric.Float64G // Float64ObservableCounter returns an ObservableCounter used to record int64 // measurements that produces no telemetry. -func (Meter) Float64ObservableCounter(string, ...metric.Float64ObservableCounterOption) (metric.Float64ObservableCounter, error) { +func (Meter) Float64ObservableCounter( + string, + ...metric.Float64ObservableCounterOption, +) (metric.Float64ObservableCounter, error) { return Float64ObservableCounter{}, nil } // Float64ObservableUpDownCounter returns an ObservableUpDownCounter used to // record int64 measurements that produces no telemetry. -func (Meter) Float64ObservableUpDownCounter(string, ...metric.Float64ObservableUpDownCounterOption) (metric.Float64ObservableUpDownCounter, error) { +func (Meter) Float64ObservableUpDownCounter( + string, + ...metric.Float64ObservableUpDownCounterOption, +) (metric.Float64ObservableUpDownCounter, error) { return Float64ObservableUpDownCounter{}, nil } // Float64ObservableGauge returns an ObservableGauge used to record int64 // measurements that produces no telemetry. -func (Meter) Float64ObservableGauge(string, ...metric.Float64ObservableGaugeOption) (metric.Float64ObservableGauge, error) { +func (Meter) Float64ObservableGauge( + string, + ...metric.Float64ObservableGaugeOption, +) (metric.Float64ObservableGauge, error) { return Float64ObservableGauge{}, nil } diff --git a/vendor/go.opentelemetry.io/otel/propagation/baggage.go b/vendor/go.opentelemetry.io/otel/propagation/baggage.go index 552263ba..05188260 100644 --- a/vendor/go.opentelemetry.io/otel/propagation/baggage.go +++ b/vendor/go.opentelemetry.io/otel/propagation/baggage.go @@ -20,7 +20,7 @@ type Baggage struct{} var _ TextMapPropagator = Baggage{} // Inject sets baggage key-values from ctx into the carrier. -func (b Baggage) Inject(ctx context.Context, carrier TextMapCarrier) { +func (Baggage) Inject(ctx context.Context, carrier TextMapCarrier) { bStr := baggage.FromContext(ctx).String() if bStr != "" { carrier.Set(baggageHeader, bStr) @@ -28,7 +28,21 @@ func (b Baggage) Inject(ctx context.Context, carrier TextMapCarrier) { } // Extract returns a copy of parent with the baggage from the carrier added. -func (b Baggage) Extract(parent context.Context, carrier TextMapCarrier) context.Context { +// If carrier implements [ValuesGetter] (e.g. [HeaderCarrier]), Values is invoked +// for multiple values extraction. Otherwise, Get is called. +func (Baggage) Extract(parent context.Context, carrier TextMapCarrier) context.Context { + if multiCarrier, ok := carrier.(ValuesGetter); ok { + return extractMultiBaggage(parent, multiCarrier) + } + return extractSingleBaggage(parent, carrier) +} + +// Fields returns the keys who's values are set with Inject. +func (Baggage) Fields() []string { + return []string{baggageHeader} +} + +func extractSingleBaggage(parent context.Context, carrier TextMapCarrier) context.Context { bStr := carrier.Get(baggageHeader) if bStr == "" { return parent @@ -41,7 +55,23 @@ func (b Baggage) Extract(parent context.Context, carrier TextMapCarrier) context return baggage.ContextWithBaggage(parent, bag) } -// Fields returns the keys who's values are set with Inject. -func (b Baggage) Fields() []string { - return []string{baggageHeader} +func extractMultiBaggage(parent context.Context, carrier ValuesGetter) context.Context { + bVals := carrier.Values(baggageHeader) + if len(bVals) == 0 { + return parent + } + var members []baggage.Member + for _, bStr := range bVals { + currBag, err := baggage.Parse(bStr) + if err != nil { + continue + } + members = append(members, currBag.Members()...) + } + + b, err := baggage.New(members...) + if err != nil || b.Len() == 0 { + return parent + } + return baggage.ContextWithBaggage(parent, b) } diff --git a/vendor/go.opentelemetry.io/otel/propagation/propagation.go b/vendor/go.opentelemetry.io/otel/propagation/propagation.go index 8c8286aa..0a32c59a 100644 --- a/vendor/go.opentelemetry.io/otel/propagation/propagation.go +++ b/vendor/go.opentelemetry.io/otel/propagation/propagation.go @@ -9,6 +9,7 @@ import ( ) // TextMapCarrier is the storage medium used by a TextMapPropagator. +// See ValuesGetter for how a TextMapCarrier can get multiple values for a key. type TextMapCarrier interface { // DO NOT CHANGE: any modification will not be backwards compatible and // must never be done outside of a new major release. @@ -19,7 +20,7 @@ type TextMapCarrier interface { // must never be done outside of a new major release. // Set stores the key-value pair. - Set(key string, value string) + Set(key, value string) // DO NOT CHANGE: any modification will not be backwards compatible and // must never be done outside of a new major release. @@ -29,6 +30,18 @@ type TextMapCarrier interface { // must never be done outside of a new major release. } +// ValuesGetter can return multiple values for a single key, +// with contrast to TextMapCarrier.Get which returns a single value. +type ValuesGetter interface { + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Values returns all values associated with the passed key. + Values(key string) []string + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. +} + // MapCarrier is a TextMapCarrier that uses a map held in memory as a storage // medium for propagated key-value pairs. type MapCarrier map[string]string @@ -55,16 +68,27 @@ func (c MapCarrier) Keys() []string { return keys } -// HeaderCarrier adapts http.Header to satisfy the TextMapCarrier interface. +// HeaderCarrier adapts http.Header to satisfy the TextMapCarrier and ValuesGetter interfaces. type HeaderCarrier http.Header -// Get returns the value associated with the passed key. +// Compile time check that HeaderCarrier implements ValuesGetter. +var _ TextMapCarrier = HeaderCarrier{} + +// Compile time check that HeaderCarrier implements TextMapCarrier. +var _ ValuesGetter = HeaderCarrier{} + +// Get returns the first value associated with the passed key. func (hc HeaderCarrier) Get(key string) string { return http.Header(hc).Get(key) } +// Values returns all values associated with the passed key. +func (hc HeaderCarrier) Values(key string) []string { + return http.Header(hc).Values(key) +} + // Set stores the key-value pair. -func (hc HeaderCarrier) Set(key string, value string) { +func (hc HeaderCarrier) Set(key, value string) { http.Header(hc).Set(key, value) } @@ -89,6 +113,8 @@ type TextMapPropagator interface { // must never be done outside of a new major release. // Extract reads cross-cutting concerns from the carrier into a Context. + // Implementations may check if the carrier implements ValuesGetter, + // to support extraction of multiple values per key. Extract(ctx context.Context, carrier TextMapCarrier) context.Context // DO NOT CHANGE: any modification will not be backwards compatible and // must never be done outside of a new major release. diff --git a/vendor/go.opentelemetry.io/otel/propagation/trace_context.go b/vendor/go.opentelemetry.io/otel/propagation/trace_context.go index 6870e316..271ab71f 100644 --- a/vendor/go.opentelemetry.io/otel/propagation/trace_context.go +++ b/vendor/go.opentelemetry.io/otel/propagation/trace_context.go @@ -36,7 +36,7 @@ var ( ) // Inject injects the trace context from ctx into carrier. -func (tc TraceContext) Inject(ctx context.Context, carrier TextMapCarrier) { +func (TraceContext) Inject(ctx context.Context, carrier TextMapCarrier) { sc := trace.SpanContextFromContext(ctx) if !sc.IsValid() { return @@ -77,7 +77,7 @@ func (tc TraceContext) Extract(ctx context.Context, carrier TextMapCarrier) cont return trace.ContextWithRemoteSpanContext(ctx, sc) } -func (tc TraceContext) extract(carrier TextMapCarrier) trace.SpanContext { +func (TraceContext) extract(carrier TextMapCarrier) trace.SpanContext { h := carrier.Get(traceparentHeader) if h == "" { return trace.SpanContext{} @@ -111,7 +111,7 @@ func (tc TraceContext) extract(carrier TextMapCarrier) trace.SpanContext { } // Clear all flags other than the trace-context supported sampling bit. - scc.TraceFlags = trace.TraceFlags(opts[0]) & trace.FlagsSampled + scc.TraceFlags = trace.TraceFlags(opts[0]) & trace.FlagsSampled // nolint:gosec // slice size already checked. // Ignore the error returned here. Failure to parse tracestate MUST NOT // affect the parsing of traceparent according to the W3C tracecontext @@ -151,6 +151,6 @@ func extractPart(dst []byte, h *string, n int) bool { } // Fields returns the keys who's values are set with Inject. -func (tc TraceContext) Fields() []string { +func (TraceContext) Fields() []string { return []string{traceparentHeader, tracestateHeader} } diff --git a/vendor/go.opentelemetry.io/otel/renovate.json b/vendor/go.opentelemetry.io/otel/renovate.json index a6fa353f..fa5acf2d 100644 --- a/vendor/go.opentelemetry.io/otel/renovate.json +++ b/vendor/go.opentelemetry.io/otel/renovate.json @@ -1,7 +1,8 @@ { "$schema": "https://docs.renovatebot.com/renovate-schema.json", "extends": [ - "config:best-practices" + "config:best-practices", + "helpers:pinGitHubActionDigestsToSemver" ], "ignorePaths": [], "labels": ["Skip Changelog", "dependencies"], @@ -25,6 +26,10 @@ { "matchPackageNames": ["golang.org/x/**"], "groupName": "golang.org/x" + }, + { + "matchPackageNames": ["go.opentelemetry.io/otel/sdk/log/logtest"], + "enabled": false } ] } diff --git a/vendor/go.opentelemetry.io/otel/sdk/LICENSE b/vendor/go.opentelemetry.io/otel/sdk/LICENSE index 261eeb9e..f1aee0f1 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/LICENSE +++ b/vendor/go.opentelemetry.io/otel/sdk/LICENSE @@ -199,3 +199,33 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +-------------------------------------------------------------------------------- + +Copyright 2009 The Go Authors. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google LLC nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/vendor/go.opentelemetry.io/otel/sdk/internal/x/features.go b/vendor/go.opentelemetry.io/otel/sdk/internal/x/features.go new file mode 100644 index 00000000..bfeb73e8 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/internal/x/features.go @@ -0,0 +1,39 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package x documents experimental features for [go.opentelemetry.io/otel/sdk]. +package x // import "go.opentelemetry.io/otel/sdk/internal/x" + +import "strings" + +// Resource is an experimental feature flag that defines if resource detectors +// should be included experimental semantic conventions. +// +// To enable this feature set the OTEL_GO_X_RESOURCE environment variable +// to the case-insensitive string value of "true" (i.e. "True" and "TRUE" +// will also enable this). +var Resource = newFeature( + []string{"RESOURCE"}, + func(v string) (string, bool) { + if strings.EqualFold(v, "true") { + return v, true + } + return "", false + }, +) + +// Observability is an experimental feature flag that determines if SDK +// observability metrics are enabled. +// +// To enable this feature set the OTEL_GO_X_OBSERVABILITY environment variable +// to the case-insensitive string value of "true" (i.e. "True" and "TRUE" +// will also enable this). +var Observability = newFeature( + []string{"OBSERVABILITY", "SELF_OBSERVABILITY"}, + func(v string) (string, bool) { + if strings.EqualFold(v, "true") { + return v, true + } + return "", false + }, +) diff --git a/vendor/go.opentelemetry.io/otel/sdk/internal/x/x.go b/vendor/go.opentelemetry.io/otel/sdk/internal/x/x.go index 68d296cb..13347e56 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/internal/x/x.go +++ b/vendor/go.opentelemetry.io/otel/sdk/internal/x/x.go @@ -1,48 +1,38 @@ +// Code generated by gotmpl. DO NOT MODIFY. +// source: internal/shared/x/x.go.tmpl + // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -// Package x contains support for OTel SDK experimental features. -// -// This package should only be used for features defined in the specification. -// It should not be used for experiments or new project ideas. +// Package x documents experimental features for [go.opentelemetry.io/otel/sdk]. package x // import "go.opentelemetry.io/otel/sdk/internal/x" import ( "os" - "strings" ) -// Resource is an experimental feature flag that defines if resource detectors -// should be included experimental semantic conventions. -// -// To enable this feature set the OTEL_GO_X_RESOURCE environment variable -// to the case-insensitive string value of "true" (i.e. "True" and "TRUE" -// will also enable this). -var Resource = newFeature("RESOURCE", func(v string) (string, bool) { - if strings.ToLower(v) == "true" { - return v, true - } - return "", false -}) - // Feature is an experimental feature control flag. It provides a uniform way // to interact with these feature flags and parse their values. type Feature[T any] struct { - key string + keys []string parse func(v string) (T, bool) } -func newFeature[T any](suffix string, parse func(string) (T, bool)) Feature[T] { +func newFeature[T any](suffix []string, parse func(string) (T, bool)) Feature[T] { const envKeyRoot = "OTEL_GO_X_" + keys := make([]string, 0, len(suffix)) + for _, s := range suffix { + keys = append(keys, envKeyRoot+s) + } return Feature[T]{ - key: envKeyRoot + suffix, + keys: keys, parse: parse, } } -// Key returns the environment variable key that needs to be set to enable the +// Keys returns the environment variable keys that can be set to enable the // feature. -func (f Feature[T]) Key() string { return f.key } +func (f Feature[T]) Keys() []string { return f.keys } // Lookup returns the user configured value for the feature and true if the // user has enabled the feature. Otherwise, if the feature is not enabled, a @@ -52,14 +42,16 @@ func (f Feature[T]) Lookup() (v T, ok bool) { // // > The SDK MUST interpret an empty value of an environment variable the // > same way as when the variable is unset. - vRaw := os.Getenv(f.key) - if vRaw == "" { - return v, ok + for _, key := range f.keys { + vRaw := os.Getenv(key) + if vRaw != "" { + return f.parse(vRaw) + } } - return f.parse(vRaw) + return v, ok } -// Enabled returns if the feature is enabled. +// Enabled reports whether the feature is enabled. func (f Feature[T]) Enabled() bool { _, ok := f.Lookup() return ok diff --git a/vendor/go.opentelemetry.io/otel/sdk/log/LICENSE b/vendor/go.opentelemetry.io/otel/sdk/log/LICENSE index 261eeb9e..f1aee0f1 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/log/LICENSE +++ b/vendor/go.opentelemetry.io/otel/sdk/log/LICENSE @@ -199,3 +199,33 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +-------------------------------------------------------------------------------- + +Copyright 2009 The Go Authors. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google LLC nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/vendor/go.opentelemetry.io/otel/sdk/log/batch.go b/vendor/go.opentelemetry.io/otel/sdk/log/batch.go index 28c96926..c54407e6 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/log/batch.go +++ b/vendor/go.opentelemetry.io/otel/sdk/log/batch.go @@ -156,13 +156,20 @@ func (b *BatchProcessor) poll(interval time.Duration) (done chan struct{}) { global.Warn("dropped log records", "dropped", d) } - qLen := b.q.TryDequeue(buf, func(r []Record) bool { - ok := b.exporter.EnqueueExport(r) - if ok { - buf = slices.Clone(buf) - } - return ok - }) + var qLen int + // Don't copy data from queue unless exporter can accept more, it is very expensive. + if b.exporter.Ready() { + qLen = b.q.TryDequeue(buf, func(r []Record) bool { + ok := b.exporter.EnqueueExport(r) + if ok { + buf = slices.Clone(buf) + } + return ok + }) + } else { + qLen = b.q.Len() + } + if qLen >= b.batchSize { // There is another full batch ready. Immediately trigger // another export attempt. @@ -177,6 +184,11 @@ func (b *BatchProcessor) poll(interval time.Duration) (done chan struct{}) { return done } +// Enabled returns true, indicating this Processor will process all records. +func (*BatchProcessor) Enabled(context.Context, EnabledParameters) bool { + return true +} + // OnEmit batches provided log record. func (b *BatchProcessor) OnEmit(_ context.Context, r *Record) error { if b.stopped.Load() || b.q == nil { @@ -272,6 +284,13 @@ func newQueue(size int) *queue { } } +func (q *queue) Len() int { + q.Lock() + defer q.Unlock() + + return q.len +} + // Dropped returns the number of Records dropped during enqueueing since the // last time Dropped was called. func (q *queue) Dropped() uint64 { @@ -315,7 +334,7 @@ func (q *queue) TryDequeue(buf []Record, write func([]Record) bool) int { origRead := q.read n := min(len(buf), q.len) - for i := 0; i < n; i++ { + for i := range n { buf[i] = q.read.Value q.read = q.read.Next() } @@ -361,25 +380,25 @@ func newBatchConfig(options []BatchProcessorOption) batchConfig { c.maxQSize = c.maxQSize.Resolve( clearLessThanOne[int](), getenv[int](envarMaxQSize), - clearLessThanOne[int](), + clearLessThanOne[int](), // nolint:gocritic // the function argument is duplicated on purpose fallback[int](dfltMaxQSize), ) c.expInterval = c.expInterval.Resolve( clearLessThanOne[time.Duration](), getenv[time.Duration](envarExpInterval), - clearLessThanOne[time.Duration](), + clearLessThanOne[time.Duration](), // nolint:gocritic // the function argument is duplicated on purpose fallback[time.Duration](dfltExpInterval), ) c.expTimeout = c.expTimeout.Resolve( clearLessThanOne[time.Duration](), getenv[time.Duration](envarExpTimeout), - clearLessThanOne[time.Duration](), + clearLessThanOne[time.Duration](), // nolint:gocritic // the function argument is duplicated on purpose fallback[time.Duration](dfltExpTimeout), ) c.expMaxBatchSize = c.expMaxBatchSize.Resolve( clearLessThanOne[int](), getenv[int](envarExpMaxBatchSize), - clearLessThanOne[int](), + clearLessThanOne[int](), // nolint:gocritic // the function argument is duplicated on purpose clampMax[int](c.maxQSize.Value), fallback[int](dfltExpMaxBatchSize), ) diff --git a/vendor/go.opentelemetry.io/otel/sdk/log/doc.go b/vendor/go.opentelemetry.io/otel/sdk/log/doc.go index 6a1f1b0e..a27834a5 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/log/doc.go +++ b/vendor/go.opentelemetry.io/otel/sdk/log/doc.go @@ -30,7 +30,10 @@ should be used to describe the unique runtime environment instrumented code is being run on. That way when multiple instances of the code are collected at a single endpoint their origin is decipherable. +See [go.opentelemetry.io/otel/sdk/log/internal/x] for information about +the experimental features. + See [go.opentelemetry.io/otel/log] for more information about -the OpenTelemetry Logs Bridge API. +the OpenTelemetry Logs API. */ package log // import "go.opentelemetry.io/otel/sdk/log" diff --git a/vendor/go.opentelemetry.io/otel/sdk/log/exporter.go b/vendor/go.opentelemetry.io/otel/sdk/log/exporter.go index e4e3c540..a9d3c439 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/log/exporter.go +++ b/vendor/go.opentelemetry.io/otel/sdk/log/exporter.go @@ -119,7 +119,9 @@ func newTimeoutExporter(exp Exporter, timeout time.Duration) Exporter { // Export sets the timeout of ctx before calling the Exporter e wraps. func (e *timeoutExporter) Export(ctx context.Context, records []Record) error { - ctx, cancel := context.WithTimeout(ctx, e.timeout) + // This only used by the batch processor, and it takes processor timeout config. + // Thus, the error message points to the processor. So users know they should adjust the processor timeout. + ctx, cancel := context.WithTimeoutCause(ctx, e.timeout, errors.New("processor export timeout")) defer cancel() return e.Exporter.Export(ctx, records) } @@ -186,11 +188,10 @@ type bufferExporter struct { // newBufferExporter returns a new bufferExporter that wraps exporter. The // returned bufferExporter will buffer at most size number of export requests. -// If size is less than zero, zero will be used (i.e. only synchronous -// exporting will be supported). +// If size is less than 1, 1 will be used. func newBufferExporter(exporter Exporter, size int) *bufferExporter { - if size < 0 { - size = 0 + if size < 1 { + size = 1 } input := make(chan exportData, size) return &bufferExporter{ @@ -201,6 +202,10 @@ func newBufferExporter(exporter Exporter, size int) *bufferExporter { } } +func (e *bufferExporter) Ready() bool { + return len(e.input) != cap(e.input) +} + var errStopped = errors.New("exporter stopped") func (e *bufferExporter) enqueue(ctx context.Context, records []Record, rCh chan<- error) error { diff --git a/vendor/go.opentelemetry.io/otel/sdk/log/filter_processor.go b/vendor/go.opentelemetry.io/otel/sdk/log/filter_processor.go deleted file mode 100644 index 5b99a4a9..00000000 --- a/vendor/go.opentelemetry.io/otel/sdk/log/filter_processor.go +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package log // import "go.opentelemetry.io/otel/sdk/log" - -import ( - "context" - - "go.opentelemetry.io/otel/log" - "go.opentelemetry.io/otel/sdk/instrumentation" - "go.opentelemetry.io/otel/sdk/resource" -) - -// FilterProcessor is a [Processor] that knows, and can identify, what [Record] -// it will process or drop when it is passed to [Processor.OnEmit]. -// -// This is useful for users that want to know if a [log.Record] -// will be processed or dropped before they perform complex operations to -// construct the [log.Record]. -// -// The SDK's Logger.Enabled returns false -// if all the registered Processors implement FilterProcessor -// and they all return false. -// -// Processor implementations that choose to support this by satisfying this -// interface are expected to re-evaluate the [Record] passed to [Processor.OnEmit], -// it is not expected that the caller to OnEmit will use the functionality -// from this interface prior to calling OnEmit. -// -// See the [go.opentelemetry.io/contrib/processors/minsev] for an example use-case. -// It provides a Processor used to filter out [Record] -// that has a [log.Severity] below a threshold. -type FilterProcessor interface { - // Enabled returns whether the Processor will process for the given context - // and param. - // - // The passed param is likely to be a partial record information being - // provided (e.g a param with only the Severity set). - // If a Processor needs more information than is provided, it - // is said to be in an indeterminate state (see below). - // - // The returned value will be true when the Processor will process for the - // provided context and param, and will be false if the Logger will not - // emit. The returned value may be true or false in an indeterminate state. - // An implementation should default to returning true for an indeterminate - // state, but may return false if valid reasons in particular circumstances - // exist (e.g. performance, correctness). - // - // The param should not be held by the implementation. A copy should be - // made if the param needs to be held after the call returns. - // - // Implementations of this method need to be safe for a user to call - // concurrently. - Enabled(ctx context.Context, param EnabledParameters) bool -} - -// EnabledParameters represents payload for [FilterProcessor]'s Enabled method. -type EnabledParameters struct { - Resource resource.Resource - InstrumentationScope instrumentation.Scope - Severity log.Severity -} diff --git a/vendor/go.opentelemetry.io/otel/sdk/log/instrumentation.go b/vendor/go.opentelemetry.io/otel/sdk/log/instrumentation.go new file mode 100644 index 00000000..d40dbab5 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/log/instrumentation.go @@ -0,0 +1,39 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package log // import "go.opentelemetry.io/otel/sdk/log" + +import ( + "context" + "fmt" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/sdk" + "go.opentelemetry.io/otel/sdk/log/internal/x" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" + "go.opentelemetry.io/otel/semconv/v1.37.0/otelconv" +) + +// newRecordCounterIncr returns a function that increments the log record +// counter metric. If observability is disabled, it returns nil. +func newRecordCounterIncr() (func(context.Context), error) { + if !x.Observability.Enabled() { + return nil, nil + } + + m := otel.GetMeterProvider().Meter( + "go.opentelemetry.io/otel/sdk/log", + metric.WithInstrumentationVersion(sdk.Version()), + metric.WithSchemaURL(semconv.SchemaURL), + ) + + created, err := otelconv.NewSDKLogCreated(m) + if err != nil { + err = fmt.Errorf("failed to create log created metric: %w", err) + return nil, err + } + inst := created.Inst() + f := func(ctx context.Context) { inst.Add(ctx, 1) } + return f, nil +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/log/internal/observ/doc.go b/vendor/go.opentelemetry.io/otel/sdk/log/internal/observ/doc.go new file mode 100644 index 00000000..6879567c --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/log/internal/observ/doc.go @@ -0,0 +1,6 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package observ provides observability instrumentation for the OTel log SDK +// package. +package observ // import "go.opentelemetry.io/otel/sdk/log/internal/observ" diff --git a/vendor/go.opentelemetry.io/otel/sdk/log/internal/observ/simple_log_processor.go b/vendor/go.opentelemetry.io/otel/sdk/log/internal/observ/simple_log_processor.go new file mode 100644 index 00000000..932eec07 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/log/internal/observ/simple_log_processor.go @@ -0,0 +1,126 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package observ // import "go.opentelemetry.io/otel/sdk/log/internal/observ" + +import ( + "context" + "fmt" + "sync" + "sync/atomic" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/sdk" + "go.opentelemetry.io/otel/sdk/log/internal/x" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" + "go.opentelemetry.io/otel/semconv/v1.37.0/otelconv" +) + +const ( + // ScopeName is the name of the instrumentation scope. + ScopeName = "go.opentelemetry.io/otel/sdk/log/internal/observ" +) + +var measureAttrsPool = sync.Pool{ + New: func() any { + // "component.name" + "component.type" + "error.type" + const n = 1 + 1 + 1 + s := make([]attribute.KeyValue, 0, n) + // Return a pointer to a slice instead of a slice itself + // to avoid allocations on every call. + return &s + }, +} + +// simpleProcessorN is a global 0-based count of the number of simple processor created. +var simpleProcessorN atomic.Int64 + +// NextSimpleProcessorID returns the next unique ID for a simpleProcessor. +func NextSimpleProcessorID() int64 { + const inc = 1 + return simpleProcessorN.Add(inc) - inc +} + +// SetSimpleProcessorID sets the exporter ID counter to v and returns the previous +// value. +// +// This function is useful for testing purposes, allowing you to reset the +// counter. It should not be used in production code. +func SetSimpleProcessorID(v int64) int64 { + return simpleProcessorN.Swap(v) +} + +// GetSLPComponentName returns the component name attribute for a +// SimpleLogProcessor with the given ID. +func GetSLPComponentName(id int64) attribute.KeyValue { + t := otelconv.ComponentTypeSimpleLogProcessor + name := fmt.Sprintf("%s/%d", t, id) + return semconv.OTelComponentName(name) +} + +// SLP is the instrumentation for an OTel SDK SimpleLogProcessor. +type SLP struct { + processed metric.Int64Counter + attrs []attribute.KeyValue + addOpts []metric.AddOption +} + +// NewSLP returns instrumentation for an OTel SDK SimpleLogProcessor with the +// provided ID. +// +// If the experimental observability is disabled, nil is returned. +func NewSLP(id int64) (*SLP, error) { + if !x.Observability.Enabled() { + return nil, nil + } + + meter := otel.GetMeterProvider() + mt := meter.Meter( + ScopeName, + metric.WithInstrumentationVersion(sdk.Version()), + metric.WithSchemaURL(semconv.SchemaURL), + ) + + p, err := otelconv.NewSDKProcessorLogProcessed(mt) + if err != nil { + err = fmt.Errorf("failed to create a processed log metric: %w", err) + return nil, err + } + + name := GetSLPComponentName(id) + componentType := p.AttrComponentType(otelconv.ComponentTypeSimpleLogProcessor) + attrs := []attribute.KeyValue{name, componentType} + addOpts := []metric.AddOption{metric.WithAttributeSet(attribute.NewSet(attrs...))} + + return &SLP{ + processed: p.Inst(), + attrs: attrs, + addOpts: addOpts, + }, nil +} + +// LogProcessed records that a log has been processed by the SimpleLogProcessor. +// If err is non-nil, it records the processing error as an attribute. +func (slp *SLP) LogProcessed(ctx context.Context, err error) { + slp.processed.Add(ctx, 1, slp.addOption(err)...) +} + +func (slp *SLP) addOption(err error) []metric.AddOption { + if err == nil { + return slp.addOpts + } + attrs := measureAttrsPool.Get().(*[]attribute.KeyValue) + defer func() { + *attrs = (*attrs)[:0] // reset the slice + measureAttrsPool.Put(attrs) + }() + + *attrs = append(*attrs, slp.attrs...) + *attrs = append(*attrs, semconv.ErrorType(err)) + + // Do not inefficiently make a copy of attrs by using + // WithAttributes instead of WithAttributeSet. + return []metric.AddOption{metric.WithAttributeSet(attribute.NewSet(*attrs...))} +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/log/internal/x/README.md b/vendor/go.opentelemetry.io/otel/sdk/log/internal/x/README.md new file mode 100644 index 00000000..33176f78 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/log/internal/x/README.md @@ -0,0 +1,34 @@ +# Experimental Features + +The Logs SDK contains features that have not yet stabilized in the OpenTelemetry specification. +These features are added to the OpenTelemetry Go Logs SDK prior to stabilization in the specification so that users can start experimenting with them and provide feedback. + +These feature may change in backwards incompatible ways as feedback is applied. +See the [Compatibility and Stability](#compatibility-and-stability) section for more information. + +## Features + +- [Observability](#observability) + +### Observability + +The Logs SDK can be configured to provide observability about itself using OpenTelemetry metrics. + +To opt-in, set the environment variable `OTEL_GO_X_OBSERVABILITY` to `true`. + +When enabled, the SDK will create the following metrics using the global `MeterProvider`: + +- `otel.sdk.log.created` + +Please see the [Semantic conventions for OpenTelemetry SDK metrics] documentation for more details on these metrics. + +[Semantic conventions for OpenTelemetry SDK metrics]: https://github.com/open-telemetry/semantic-conventions/blob/v1.36.0/docs/otel/sdk-metrics.md + +## Compatibility and Stability + +Experimental features do not fall within the scope of the OpenTelemetry Go versioning and stability [policy](../../../../VERSIONING.md). +These features may be removed or modified in successive version releases, including patch versions. + +When an experimental feature is promoted to a stable feature, a migration path will be included in the changelog entry of the release. +There is no guarantee that any environment variable feature flags that enabled the experimental feature will be supported by the stable version. +If they are supported, they may be accompanied with a deprecation notice stating a timeline for the removal of that support. diff --git a/vendor/go.opentelemetry.io/otel/sdk/log/internal/x/features.go b/vendor/go.opentelemetry.io/otel/sdk/log/internal/x/features.go new file mode 100644 index 00000000..e0ac88a4 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/log/internal/x/features.go @@ -0,0 +1,23 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package x documents experimental features for [go.opentelemetry.io/otel/sdk/log]. +package x // import "go.opentelemetry.io/otel/sdk/log/internal/x" + +import "strings" + +// Observability is an experimental feature flag that determines if SDK +// observability metrics are enabled. +// +// To enable this feature set the OTEL_GO_X_OBSERVABILITY environment variable +// to the case-insensitive string value of "true" (i.e. "True" and "TRUE" +// will also enable this). +var Observability = newFeature( + []string{"OBSERVABILITY", "SELF_OBSERVABILITY"}, + func(v string) (string, bool) { + if strings.EqualFold(v, "true") { + return v, true + } + return "", false + }, +) diff --git a/vendor/go.opentelemetry.io/otel/sdk/log/internal/x/x.go b/vendor/go.opentelemetry.io/otel/sdk/log/internal/x/x.go new file mode 100644 index 00000000..e597efb1 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/log/internal/x/x.go @@ -0,0 +1,58 @@ +// Code generated by gotmpl. DO NOT MODIFY. +// source: internal/shared/x/x.go.tmpl + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package x documents experimental features for [go.opentelemetry.io/otel/sdk/log]. +package x // import "go.opentelemetry.io/otel/sdk/log/internal/x" + +import ( + "os" +) + +// Feature is an experimental feature control flag. It provides a uniform way +// to interact with these feature flags and parse their values. +type Feature[T any] struct { + keys []string + parse func(v string) (T, bool) +} + +func newFeature[T any](suffix []string, parse func(string) (T, bool)) Feature[T] { + const envKeyRoot = "OTEL_GO_X_" + keys := make([]string, 0, len(suffix)) + for _, s := range suffix { + keys = append(keys, envKeyRoot+s) + } + return Feature[T]{ + keys: keys, + parse: parse, + } +} + +// Keys returns the environment variable keys that can be set to enable the +// feature. +func (f Feature[T]) Keys() []string { return f.keys } + +// Lookup returns the user configured value for the feature and true if the +// user has enabled the feature. Otherwise, if the feature is not enabled, a +// zero-value and false are returned. +func (f Feature[T]) Lookup() (v T, ok bool) { + // https://github.com/open-telemetry/opentelemetry-specification/blob/62effed618589a0bec416a87e559c0a9d96289bb/specification/configuration/sdk-environment-variables.md#parsing-empty-value + // + // > The SDK MUST interpret an empty value of an environment variable the + // > same way as when the variable is unset. + for _, key := range f.keys { + vRaw := os.Getenv(key) + if vRaw != "" { + return f.parse(vRaw) + } + } + return v, ok +} + +// Enabled reports whether the feature is enabled. +func (f Feature[T]) Enabled() bool { + _, ok := f.Lookup() + return ok +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/log/logger.go b/vendor/go.opentelemetry.io/otel/sdk/log/logger.go index 6211d5d9..f43a867c 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/log/logger.go +++ b/vendor/go.opentelemetry.io/otel/sdk/log/logger.go @@ -24,13 +24,24 @@ type logger struct { provider *LoggerProvider instrumentationScope instrumentation.Scope + + // recCntIncr increments the count of log records created. It will be nil + // if observability is disabled. + recCntIncr func(context.Context) } func newLogger(p *LoggerProvider, scope instrumentation.Scope) *logger { - return &logger{ + l := &logger{ provider: p, instrumentationScope: scope, } + + var err error + l.recCntIncr, err = newRecordCounterIncr() + if err != nil { + otel.Handle(err) + } + return l } func (l *logger) Emit(ctx context.Context, r log.Record) { @@ -43,35 +54,25 @@ func (l *logger) Emit(ctx context.Context, r log.Record) { } // Enabled returns true if at least one Processor held by the LoggerProvider -// that created the logger will process param for the provided context and param. +// that created the logger will process for the provided context and param. // -// If it is not possible to definitively determine the param will be +// If it is not possible to definitively determine the record will be // processed, true will be returned by default. A value of false will only be // returned if it can be positively verified that no Processor will process. func (l *logger) Enabled(ctx context.Context, param log.EnabledParameters) bool { p := EnabledParameters{ - Resource: *l.provider.resource, InstrumentationScope: l.instrumentationScope, Severity: param.Severity, + EventName: param.EventName, } - // If there are more Processors than FilterProcessors, - // which means not all Processors are FilterProcessors, - // we cannot be sure that all Processors will drop the record. - // Therefore, return true. - // - // If all Processors are FilterProcessors, check if any is enabled. - return len(l.provider.processors) > len(l.provider.fltrProcessors) || anyEnabled(ctx, p, l.provider.fltrProcessors) -} - -func anyEnabled(ctx context.Context, param EnabledParameters, fltrs []FilterProcessor) bool { - for _, f := range fltrs { - if f.Enabled(ctx, param) { + for _, processor := range l.provider.processors { + if processor.Enabled(ctx, p) { // At least one Processor will process the Record. return true } } - // No Processor will process the record + // No Processor will process the record. return false } @@ -84,7 +85,6 @@ func (l *logger) newRecord(ctx context.Context, r log.Record) Record { observedTimestamp: r.ObservedTimestamp(), severity: r.Severity(), severityText: r.SeverityText(), - body: r.Body(), traceID: sc.TraceID(), spanID: sc.SpanID(), @@ -94,8 +94,15 @@ func (l *logger) newRecord(ctx context.Context, r log.Record) Record { scope: &l.instrumentationScope, attributeValueLengthLimit: l.provider.attributeValueLengthLimit, attributeCountLimit: l.provider.attributeCountLimit, + allowDupKeys: l.provider.allowDupKeys, + } + if l.recCntIncr != nil { + l.recCntIncr(ctx) } + // This ensures we deduplicate key-value collections in the log body + newRecord.SetBody(r.Body()) + // This field SHOULD be set once the event is observed by OpenTelemetry. if newRecord.observedTimestamp.IsZero() { newRecord.observedTimestamp = now() diff --git a/vendor/go.opentelemetry.io/otel/sdk/log/processor.go b/vendor/go.opentelemetry.io/otel/sdk/log/processor.go index c9b306f2..a2d53f3a 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/log/processor.go +++ b/vendor/go.opentelemetry.io/otel/sdk/log/processor.go @@ -5,6 +5,9 @@ package log // import "go.opentelemetry.io/otel/sdk/log" import ( "context" + + "go.opentelemetry.io/otel/log" + "go.opentelemetry.io/otel/sdk/instrumentation" ) // Processor handles the processing of log records. @@ -12,9 +15,36 @@ import ( // Any of the Processor's methods may be called concurrently with itself // or with other methods. It is the responsibility of the Processor to manage // this concurrency. -// -// See [FilterProcessor] for information about how a Processor can support filtering. type Processor interface { + // Enabled reports whether the Processor will process for the given context + // and param. + // + // The passed param is likely to be partial record information being + // provided (e.g. a param with only the Severity set). + // If a Processor needs more information than is provided, it + // is said to be in an indeterminate state (see below). + // + // The returned value will be true when the Processor will process for the + // provided context and param, and will be false if the Processor will not + // process. The returned value may be true or false in an indeterminate state. + // An implementation should default to returning true for an indeterminate + // state, but may return false if valid reasons in particular circumstances + // exist (e.g. performance, correctness). + // + // The param should not be held by the implementation. A copy should be + // made if the param needs to be held after the call returns. + // + // Processor implementations are expected to re-evaluate the [Record] passed + // to OnEmit. It is not expected that the caller to OnEmit will + // use the result from Enabled prior to calling OnEmit. + // + // The SDK's Logger.Enabled returns false if all the registered processors + // return false. Otherwise, it returns true. + // + // Implementations of this method need to be safe for a user to call + // concurrently. + Enabled(ctx context.Context, param EnabledParameters) bool + // OnEmit is called when a Record is emitted. // // OnEmit will be called independent of Enabled. Implementations need to @@ -32,7 +62,8 @@ type Processor interface { // they were registered using WithProcessor. // Implementations may synchronously modify the record so that the changes // are visible in the next registered processor. - // Notice that Record is not concurrent safe. Therefore, asynchronous + // + // Note that Record is not concurrent safe. Therefore, asynchronous // processing may cause race conditions. Use Record.Clone // to create a copy that shares no state with the original. OnEmit(ctx context.Context, record *Record) error @@ -54,3 +85,10 @@ type Processor interface { // appropriate error should be returned in these situations. ForceFlush(ctx context.Context) error } + +// EnabledParameters represents payload for [Processor]'s Enabled method. +type EnabledParameters struct { + InstrumentationScope instrumentation.Scope + Severity log.Severity + EventName string +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/log/provider.go b/vendor/go.opentelemetry.io/otel/sdk/log/provider.go index 096944ea..17dc1374 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/log/provider.go +++ b/vendor/go.opentelemetry.io/otel/sdk/log/provider.go @@ -27,11 +27,11 @@ const ( ) type providerConfig struct { - resource *resource.Resource - processors []Processor - fltrProcessors []FilterProcessor - attrCntLim setting[int] - attrValLenLim setting[int] + resource *resource.Resource + processors []Processor + attrCntLim setting[int] + attrValLenLim setting[int] + allowDupKeys setting[bool] } func newProviderConfig(opts []LoggerProviderOption) providerConfig { @@ -64,9 +64,9 @@ type LoggerProvider struct { resource *resource.Resource processors []Processor - fltrProcessors []FilterProcessor attributeCountLimit int attributeValueLengthLimit int + allowDupKeys bool loggersMu sync.Mutex loggers map[instrumentation.Scope]*logger @@ -90,9 +90,9 @@ func NewLoggerProvider(opts ...LoggerProviderOption) *LoggerProvider { return &LoggerProvider{ resource: cfg.resource, processors: cfg.processors, - fltrProcessors: cfg.fltrProcessors, attributeCountLimit: cfg.attrCntLim.Value, attributeValueLengthLimit: cfg.attrValLenLim.Value, + allowDupKeys: cfg.allowDupKeys.Value, } } @@ -205,14 +205,9 @@ func WithResource(res *resource.Resource) LoggerProviderOption { // // For production, use [NewBatchProcessor] to batch log records before they are exported. // For testing and debugging, use [NewSimpleProcessor] to synchronously export log records. -// -// See [FilterProcessor] for information about how a Processor can support filtering. func WithProcessor(processor Processor) LoggerProviderOption { return loggerProviderOptionFunc(func(cfg providerConfig) providerConfig { cfg.processors = append(cfg.processors, processor) - if f, ok := processor.(FilterProcessor); ok { - cfg.fltrProcessors = append(cfg.fltrProcessors, f) - } return cfg }) } @@ -236,7 +231,7 @@ func WithAttributeCountLimit(limit int) LoggerProviderOption { }) } -// AttributeValueLengthLimit sets the maximum allowed attribute value length. +// WithAttributeValueLengthLimit sets the maximum allowed attribute value length. // // This limit only applies to string and string slice attribute values. // Any string longer than this value will be truncated to this length. @@ -254,3 +249,21 @@ func WithAttributeValueLengthLimit(limit int) LoggerProviderOption { return cfg }) } + +// WithAllowKeyDuplication sets whether deduplication is skipped for log attributes or other key-value collections. +// +// By default, the key-value collections within a log record are deduplicated to comply with the OpenTelemetry Specification. +// Deduplication means that if multiple key–value pairs with the same key are present, only a single pair +// is retained and others are discarded. +// +// Disabling deduplication with this option can improve performance e.g. of adding attributes to the log record. +// +// Note that if you disable deduplication, you are responsible for ensuring that duplicate +// key-value pairs within in a single collection are not emitted, +// or that the telemetry receiver can handle such duplicates. +func WithAllowKeyDuplication() LoggerProviderOption { + return loggerProviderOptionFunc(func(cfg providerConfig) providerConfig { + cfg.allowDupKeys = newSetting(true) + return cfg + }) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/log/record.go b/vendor/go.opentelemetry.io/otel/sdk/log/record.go index a13fcac7..5b830b7e 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/log/record.go +++ b/vendor/go.opentelemetry.io/otel/sdk/log/record.go @@ -27,7 +27,25 @@ var logAttrDropped = sync.OnceFunc(func() { global.Warn("limit reached: dropping log Record attributes") }) -// indexPool is a pool of index maps used for de-duplication. +// uniquePool is a pool of unique attributes used for attributes de-duplication. +var uniquePool = sync.Pool{ + New: func() any { return new([]log.KeyValue) }, +} + +func getUnique() *[]log.KeyValue { + return uniquePool.Get().(*[]log.KeyValue) +} + +func putUnique(v *[]log.KeyValue) { + // To reduce peak allocation. + const maxUniqueSize = 1028 + if cap(*v) <= maxUniqueSize { + *v = (*v)[:0] + uniquePool.Put(v) + } +} + +// indexPool is a pool of index maps used for attributes de-duplication. var indexPool = sync.Pool{ New: func() any { return make(map[string]int) }, } @@ -41,6 +59,20 @@ func putIndex(index map[string]int) { indexPool.Put(index) } +// seenPool is a pool of seen keys used for maps de-duplication. +var seenPool = sync.Pool{ + New: func() any { return make(map[string]struct{}) }, +} + +func getSeen() map[string]struct{} { + return seenPool.Get().(map[string]struct{}) +} + +func putSeen(seen map[string]struct{}) { + clear(seen) + seenPool.Put(seen) +} + // Record is a log record emitted by the Logger. // A log record with non-empty event name is interpreted as an event record. // @@ -93,6 +125,9 @@ type Record struct { attributeValueLengthLimit int attributeCountLimit int + // specifies whether we should deduplicate any key value collections or not + allowDupKeys bool + noCmp [0]func() //nolint: unused // This is indeed used. } @@ -167,7 +202,11 @@ func (r *Record) Body() log.Value { // SetBody sets the body of the log record. func (r *Record) SetBody(v log.Value) { - r.body = v + if !r.allowDupKeys { + r.body = r.dedupeBodyCollections(v) + } else { + r.body = v + } } // WalkAttributes walks all attributes the log record holds by calling f for @@ -192,56 +231,67 @@ func (r *Record) AddAttributes(attrs ...log.KeyValue) { if n == 0 { // Avoid the more complex duplicate map lookups below. var drop int - attrs, drop = dedup(attrs) - r.setDropped(drop) + if !r.allowDupKeys { + attrs, drop = dedup(attrs) + r.setDropped(drop) + } - attrs, drop = head(attrs, r.attributeCountLimit) + attrs, drop := head(attrs, r.attributeCountLimit) r.addDropped(drop) r.addAttrs(attrs) return } - // Used to find duplicates between attrs and existing attributes in r. - rIndex := r.attrIndex() - defer putIndex(rIndex) - - // Unique attrs that need to be added to r. This uses the same underlying - // array as attrs. - // - // Note, do not iterate attrs twice by just calling dedup(attrs) here. - unique := attrs[:0] - // Used to find duplicates within attrs itself. The index value is the - // index of the element in unique. - uIndex := getIndex() - defer putIndex(uIndex) - - // Deduplicate attrs within the scope of all existing attributes. - for _, a := range attrs { - // Last-value-wins for any duplicates in attrs. - idx, found := uIndex[a.Key] - if found { - r.addDropped(1) - unique[idx] = a - continue - } + if !r.allowDupKeys { + // Use a slice from the pool to avoid modifying the original. + // Note, do not iterate attrs twice by just calling dedup(attrs) here. + unique := getUnique() + defer putUnique(unique) + + // Used to find duplicates between attrs and existing attributes in r. + rIndex := r.attrIndex() + defer putIndex(rIndex) + + // Used to find duplicates within attrs itself. + // The index value is the index of the element in unique. + uIndex := getIndex() + defer putIndex(uIndex) + + dropped := 0 + + // Deduplicate attrs within the scope of all existing attributes. + for _, a := range attrs { + // Last-value-wins for any duplicates in attrs. + idx, found := uIndex[a.Key] + if found { + dropped++ + (*unique)[idx] = a + continue + } - idx, found = rIndex[a.Key] - if found { - // New attrs overwrite any existing with the same key. - r.addDropped(1) - if idx < 0 { - r.front[-(idx + 1)] = a + idx, found = rIndex[a.Key] + if found { + // New attrs overwrite any existing with the same key. + dropped++ + if idx < 0 { + r.front[-(idx + 1)] = a + } else { + r.back[idx] = a + } } else { - r.back[idx] = a + // Unique attribute. + (*unique) = append(*unique, a) + uIndex[a.Key] = len(*unique) - 1 } - } else { - // Unique attribute. - unique = append(unique, a) - uIndex[a.Key] = len(unique) - 1 + } + + if dropped > 0 { + attrs = make([]log.KeyValue, len(*unique)) + copy(attrs, *unique) + r.addDropped(dropped) } } - attrs = unique if r.attributeCountLimit > 0 && n+len(attrs) > r.attributeCountLimit { // Truncate the now unique attributes to comply with limit. @@ -283,22 +333,27 @@ func (r *Record) addAttrs(attrs []log.KeyValue) { var i int for i = 0; i < len(attrs) && r.nFront < len(r.front); i++ { a := attrs[i] - r.front[r.nFront] = r.applyAttrLimits(a) + r.front[r.nFront] = r.applyAttrLimitsAndDedup(a) r.nFront++ } - for j, a := range attrs[i:] { - attrs[i+j] = r.applyAttrLimits(a) - } + // Make a copy to avoid modifying the original. + j := len(r.back) r.back = slices.Grow(r.back, len(attrs[i:])) r.back = append(r.back, attrs[i:]...) + for i, a := range r.back[j:] { + r.back[i+j] = r.applyAttrLimitsAndDedup(a) + } } // SetAttributes sets (and overrides) attributes to the log record. func (r *Record) SetAttributes(attrs ...log.KeyValue) { var drop int - attrs, drop = dedup(attrs) - r.setDropped(drop) + r.setDropped(0) + if !r.allowDupKeys { + attrs, drop = dedup(attrs) + r.setDropped(drop) + } attrs, drop = head(attrs, r.attributeCountLimit) r.addDropped(drop) @@ -307,13 +362,13 @@ func (r *Record) SetAttributes(attrs ...log.KeyValue) { var i int for i = 0; i < len(attrs) && r.nFront < len(r.front); i++ { a := attrs[i] - r.front[r.nFront] = r.applyAttrLimits(a) + r.front[r.nFront] = r.applyAttrLimitsAndDedup(a) r.nFront++ } r.back = slices.Clone(attrs[i:]) for i, a := range r.back { - r.back[i] = r.applyAttrLimits(a) + r.back[i] = r.applyAttrLimitsAndDedup(a) } } @@ -328,20 +383,31 @@ func head(kvs []log.KeyValue, n int) (out []log.KeyValue, dropped int) { // dedup deduplicates kvs front-to-back with the last value saved. func dedup(kvs []log.KeyValue) (unique []log.KeyValue, dropped int) { + if len(kvs) <= 1 { + return kvs, 0 // No deduplication needed. + } + index := getIndex() defer putIndex(index) - - unique = kvs[:0] // Use the same underlying array as kvs. + u := getUnique() + defer putUnique(u) for _, a := range kvs { idx, found := index[a.Key] if found { dropped++ - unique[idx] = a + (*u)[idx] = a } else { - unique = append(unique, a) - index[a.Key] = len(unique) - 1 + *u = append(*u, a) + index[a.Key] = len(*u) - 1 } } + + if dropped == 0 { + return kvs, 0 + } + + unique = make([]log.KeyValue, len(*u)) + copy(unique, *u) return unique, dropped } @@ -387,11 +453,8 @@ func (r *Record) SetTraceFlags(flags trace.TraceFlags) { } // Resource returns the entity that collected the log. -func (r *Record) Resource() resource.Resource { - if r.resource == nil { - return *resource.Empty() - } - return *r.resource +func (r *Record) Resource() *resource.Resource { + return r.resource } // InstrumentationScope returns the scope that the Logger was created with. @@ -410,37 +473,212 @@ func (r *Record) Clone() Record { return res } -func (r *Record) applyAttrLimits(attr log.KeyValue) log.KeyValue { - attr.Value = r.applyValueLimits(attr.Value) +func (r *Record) applyAttrLimitsAndDedup(attr log.KeyValue) log.KeyValue { + attr.Value = r.applyValueLimitsAndDedup(attr.Value) return attr } -func (r *Record) applyValueLimits(val log.Value) log.Value { +func (r *Record) applyValueLimitsAndDedup(val log.Value) log.Value { switch val.Kind() { case log.KindString: s := val.AsString() - if len(s) > r.attributeValueLengthLimit { + if r.attributeValueLengthLimit >= 0 && len(s) > r.attributeValueLengthLimit { val = log.StringValue(truncate(r.attributeValueLengthLimit, s)) } case log.KindSlice: sl := val.AsSlice() - for i := range sl { - sl[i] = r.applyValueLimits(sl[i]) + + // First check if any limits need to be applied. + needsChange := false + for _, v := range sl { + if r.needsValueLimitsOrDedup(v) { + needsChange = true + break + } } - val = log.SliceValue(sl...) + + if needsChange { + // Create a new slice to avoid modifying the original. + newSl := make([]log.Value, len(sl)) + for i, item := range sl { + newSl[i] = r.applyValueLimitsAndDedup(item) + } + val = log.SliceValue(newSl...) + } + case log.KindMap: - // Deduplicate then truncate. Do not do at the same time to avoid - // wasted truncation operations. - kvs, dropped := dedup(val.AsMap()) - r.addDropped(dropped) - for i := range kvs { - kvs[i] = r.applyAttrLimits(kvs[i]) + kvs := val.AsMap() + var newKvs []log.KeyValue + var dropped int + + if !r.allowDupKeys { + // Deduplicate then truncate. + // Do not do at the same time to avoid wasted truncation operations. + newKvs, dropped = dedup(kvs) + r.addDropped(dropped) + } else { + newKvs = kvs + } + + // Check if any attribute limits need to be applied. + needsChange := false + if dropped > 0 { + needsChange = true // Already changed by dedup. + } else { + for _, kv := range newKvs { + if r.needsValueLimitsOrDedup(kv.Value) { + needsChange = true + break + } + } + } + + if needsChange { + // Only create new slice if changes are needed. + if dropped == 0 { + // Make a copy to avoid modifying the original. + newKvs = make([]log.KeyValue, len(kvs)) + copy(newKvs, kvs) + } + + for i := range newKvs { + newKvs[i] = r.applyAttrLimitsAndDedup(newKvs[i]) + } + val = log.MapValue(newKvs...) } - val = log.MapValue(kvs...) } return val } +// needsValueLimitsOrDedup checks if a value would be modified by applyValueLimitsAndDedup. +func (r *Record) needsValueLimitsOrDedup(val log.Value) bool { + switch val.Kind() { + case log.KindString: + return r.attributeValueLengthLimit >= 0 && len(val.AsString()) > r.attributeValueLengthLimit + case log.KindSlice: + for _, v := range val.AsSlice() { + if r.needsValueLimitsOrDedup(v) { + return true + } + } + case log.KindMap: + kvs := val.AsMap() + if !r.allowDupKeys && len(kvs) > 1 { + // Check for duplicates. + hasDuplicates := func() bool { + seen := getSeen() + defer putSeen(seen) + for _, kv := range kvs { + if _, ok := seen[kv.Key]; ok { + return true + } + seen[kv.Key] = struct{}{} + } + return false + }() + if hasDuplicates { + return true + } + } + for _, kv := range kvs { + if r.needsValueLimitsOrDedup(kv.Value) { + return true + } + } + } + return false +} + +func (r *Record) dedupeBodyCollections(val log.Value) log.Value { + switch val.Kind() { + case log.KindSlice: + sl := val.AsSlice() + + // Check if any nested values need deduplication. + needsChange := false + for _, item := range sl { + if r.needsBodyDedup(item) { + needsChange = true + break + } + } + + if needsChange { + // Create a new slice to avoid modifying the original. + newSl := make([]log.Value, len(sl)) + for i, item := range sl { + newSl[i] = r.dedupeBodyCollections(item) + } + val = log.SliceValue(newSl...) + } + + case log.KindMap: + kvs := val.AsMap() + newKvs, dropped := dedup(kvs) + + // Check if any nested values need deduplication. + needsValueChange := false + for _, kv := range newKvs { + if r.needsBodyDedup(kv.Value) { + needsValueChange = true + break + } + } + + if dropped > 0 || needsValueChange { + // Only create new value if changes are needed. + if dropped == 0 { + // Make a copy to avoid modifying the original. + newKvs = make([]log.KeyValue, len(kvs)) + copy(newKvs, kvs) + } + + for i := range newKvs { + newKvs[i].Value = r.dedupeBodyCollections(newKvs[i].Value) + } + val = log.MapValue(newKvs...) + } + } + return val +} + +// needsBodyDedup checks if a value would be modified by dedupeBodyCollections. +func (r *Record) needsBodyDedup(val log.Value) bool { + switch val.Kind() { + case log.KindSlice: + for _, item := range val.AsSlice() { + if r.needsBodyDedup(item) { + return true + } + } + case log.KindMap: + kvs := val.AsMap() + if len(kvs) > 1 { + // Check for duplicates. + hasDuplicates := func() bool { + seen := getSeen() + defer putSeen(seen) + for _, kv := range kvs { + if _, ok := seen[kv.Key]; ok { + return true + } + seen[kv.Key] = struct{}{} + } + return false + }() + if hasDuplicates { + return true + } + } + for _, kv := range kvs { + if r.needsBodyDedup(kv.Value) { + return true + } + } + } + return false +} + // truncate returns a truncated version of s such that it contains less than // the limit number of characters. Truncation is applied by returning the limit // number of valid characters contained in s. diff --git a/vendor/go.opentelemetry.io/otel/sdk/log/simple.go b/vendor/go.opentelemetry.io/otel/sdk/log/simple.go index 002e52ca..d71f945b 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/log/simple.go +++ b/vendor/go.opentelemetry.io/otel/sdk/log/simple.go @@ -6,6 +6,9 @@ package log // import "go.opentelemetry.io/otel/sdk/log" import ( "context" "sync" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/sdk/log/internal/observ" ) // Compile-time check SimpleProcessor implements Processor. @@ -17,8 +20,8 @@ var _ Processor = (*SimpleProcessor)(nil) type SimpleProcessor struct { mu sync.Mutex exporter Exporter - - noCmp [0]func() //nolint: unused // This is indeed used. + inst *observ.SLP + noCmp [0]func() //nolint: unused // This is indeed used. } // NewSimpleProcessor is a simple Processor adapter. @@ -30,7 +33,15 @@ type SimpleProcessor struct { // [NewBatchProcessor] instead. However, there may be exceptions where certain // [Exporter] implementations perform better with this Processor. func NewSimpleProcessor(exporter Exporter, _ ...SimpleProcessorOption) *SimpleProcessor { - return &SimpleProcessor{exporter: exporter} + slp := &SimpleProcessor{ + exporter: exporter, + } + var err error + slp.inst, err = observ.NewSLP(observ.NextSimpleProcessorID()) + if err != nil { + otel.Handle(err) + } + return slp } var simpleProcRecordsPool = sync.Pool{ @@ -40,8 +51,13 @@ var simpleProcRecordsPool = sync.Pool{ }, } +// Enabled returns true, indicating this Processor will process all records. +func (*SimpleProcessor) Enabled(context.Context, EnabledParameters) bool { + return true +} + // OnEmit batches provided log record. -func (s *SimpleProcessor) OnEmit(ctx context.Context, r *Record) error { +func (s *SimpleProcessor) OnEmit(ctx context.Context, r *Record) (err error) { if s.exporter == nil { return nil } @@ -55,6 +71,11 @@ func (s *SimpleProcessor) OnEmit(ctx context.Context, r *Record) error { simpleProcRecordsPool.Put(records) }() + if s.inst != nil { + defer func() { + s.inst.LogProcessed(ctx, err) + }() + } return s.exporter.Export(ctx, *records) } diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go b/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go index cf3c88e1..3f20eb7a 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go @@ -13,7 +13,7 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/sdk" - semconv "go.opentelemetry.io/otel/semconv/v1.26.0" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" ) type ( @@ -72,7 +72,7 @@ func StringDetector(schemaURL string, k attribute.Key, f func() (string, error)) // Detect returns a *Resource that describes the string as a value // corresponding to attribute.Key as well as the specific schemaURL. -func (sd stringDetector) Detect(ctx context.Context) (*Resource, error) { +func (sd stringDetector) Detect(context.Context) (*Resource, error) { value, err := sd.F() if err != nil { return nil, fmt.Errorf("%s: %w", string(sd.K), err) diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/container.go b/vendor/go.opentelemetry.io/otel/sdk/resource/container.go index 5ecd859a..bbe142d2 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/container.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/container.go @@ -11,7 +11,7 @@ import ( "os" "regexp" - semconv "go.opentelemetry.io/otel/semconv/v1.26.0" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" ) type containerIDProvider func() (string, error) @@ -27,7 +27,7 @@ const cgroupPath = "/proc/self/cgroup" // Detect returns a *Resource that describes the id of the container. // If no container id found, an empty resource will be returned. -func (cgroupContainerIDDetector) Detect(ctx context.Context) (*Resource, error) { +func (cgroupContainerIDDetector) Detect(context.Context) (*Resource, error) { containerID, err := containerID() if err != nil { return nil, err diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/env.go b/vendor/go.opentelemetry.io/otel/sdk/resource/env.go index 813f0562..4a1b017e 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/env.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/env.go @@ -12,7 +12,7 @@ import ( "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" - semconv "go.opentelemetry.io/otel/semconv/v1.26.0" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" ) const ( diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go index 2d0f6549..5fed33d4 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go @@ -8,7 +8,7 @@ import ( "errors" "strings" - semconv "go.opentelemetry.io/otel/semconv/v1.26.0" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" ) type hostIDProvider func() (string, error) @@ -96,7 +96,7 @@ func (r *hostIDReaderLinux) read() (string, error) { type hostIDDetector struct{} // Detect returns a *Resource containing the platform specific host id. -func (hostIDDetector) Detect(ctx context.Context) (*Resource, error) { +func (hostIDDetector) Detect(context.Context) (*Resource, error) { hostID, err := hostID() if err != nil { return nil, err diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_bsd.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_bsd.go index cc8b8938..4c1c30f2 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_bsd.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_bsd.go @@ -2,7 +2,6 @@ // SPDX-License-Identifier: Apache-2.0 //go:build dragonfly || freebsd || netbsd || openbsd || solaris -// +build dragonfly freebsd netbsd openbsd solaris package resource // import "go.opentelemetry.io/otel/sdk/resource" diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_linux.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_linux.go index f84f1732..4a26096c 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_linux.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_linux.go @@ -2,7 +2,6 @@ // SPDX-License-Identifier: Apache-2.0 //go:build linux -// +build linux package resource // import "go.opentelemetry.io/otel/sdk/resource" diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_unsupported.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_unsupported.go index df12c44c..63ad2fa4 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_unsupported.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_unsupported.go @@ -2,7 +2,6 @@ // SPDX-License-Identifier: Apache-2.0 //go:build !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !windows -// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows package resource // import "go.opentelemetry.io/otel/sdk/resource" diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go index 3677c83d..2b8ca20b 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go @@ -2,7 +2,6 @@ // SPDX-License-Identifier: Apache-2.0 //go:build windows -// +build windows package resource // import "go.opentelemetry.io/otel/sdk/resource" diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os.go index 8a48ab4f..51da76e8 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/os.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os.go @@ -8,7 +8,7 @@ import ( "strings" "go.opentelemetry.io/otel/attribute" - semconv "go.opentelemetry.io/otel/semconv/v1.26.0" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" ) type osDescriptionProvider func() (string, error) @@ -32,7 +32,7 @@ type ( // Detect returns a *Resource that describes the operating system type the // service is running on. -func (osTypeDetector) Detect(ctx context.Context) (*Resource, error) { +func (osTypeDetector) Detect(context.Context) (*Resource, error) { osType := runtimeOS() osTypeAttribute := mapRuntimeOSToSemconvOSType(osType) @@ -45,7 +45,7 @@ func (osTypeDetector) Detect(ctx context.Context) (*Resource, error) { // Detect returns a *Resource that describes the operating system the // service is running on. -func (osDescriptionDetector) Detect(ctx context.Context) (*Resource, error) { +func (osDescriptionDetector) Detect(context.Context) (*Resource, error) { description, err := osDescription() if err != nil { return nil, err diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go index f537e5ca..a1763267 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go @@ -2,7 +2,6 @@ // SPDX-License-Identifier: Apache-2.0 //go:build aix || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos -// +build aix dragonfly freebsd linux netbsd openbsd solaris zos package resource // import "go.opentelemetry.io/otel/sdk/resource" @@ -63,12 +62,12 @@ func parseOSReleaseFile(file io.Reader) map[string]string { return values } -// skip returns true if the line is blank or starts with a '#' character, and +// skip reports whether the line is blank or starts with a '#' character, and // therefore should be skipped from processing. func skip(line string) bool { line = strings.TrimSpace(line) - return len(line) == 0 || strings.HasPrefix(line, "#") + return line == "" || strings.HasPrefix(line, "#") } // parse attempts to split the provided line on the first '=' character, and then @@ -76,7 +75,7 @@ func skip(line string) bool { func parse(line string) (string, string, bool) { k, v, found := strings.Cut(line, "=") - if !found || len(k) == 0 { + if !found || k == "" { return "", "", false } diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_unix.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_unix.go index a6ff26a4..6c50ab68 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/os_unix.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os_unix.go @@ -2,7 +2,6 @@ // SPDX-License-Identifier: Apache-2.0 //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos package resource // import "go.opentelemetry.io/otel/sdk/resource" diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_unsupported.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_unsupported.go index a77742b0..25f62953 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/os_unsupported.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os_unsupported.go @@ -2,7 +2,6 @@ // SPDX-License-Identifier: Apache-2.0 //go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !windows && !zos -// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows,!zos package resource // import "go.opentelemetry.io/otel/sdk/resource" diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/process.go b/vendor/go.opentelemetry.io/otel/sdk/resource/process.go index 085fe68f..138e5772 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/process.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/process.go @@ -11,7 +11,7 @@ import ( "path/filepath" "runtime" - semconv "go.opentelemetry.io/otel/semconv/v1.26.0" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" ) type ( @@ -112,19 +112,19 @@ type ( // Detect returns a *Resource that describes the process identifier (PID) of the // executing process. -func (processPIDDetector) Detect(ctx context.Context) (*Resource, error) { +func (processPIDDetector) Detect(context.Context) (*Resource, error) { return NewWithAttributes(semconv.SchemaURL, semconv.ProcessPID(pid())), nil } // Detect returns a *Resource that describes the name of the process executable. -func (processExecutableNameDetector) Detect(ctx context.Context) (*Resource, error) { +func (processExecutableNameDetector) Detect(context.Context) (*Resource, error) { executableName := filepath.Base(commandArgs()[0]) return NewWithAttributes(semconv.SchemaURL, semconv.ProcessExecutableName(executableName)), nil } // Detect returns a *Resource that describes the full path of the process executable. -func (processExecutablePathDetector) Detect(ctx context.Context) (*Resource, error) { +func (processExecutablePathDetector) Detect(context.Context) (*Resource, error) { executablePath, err := executablePath() if err != nil { return nil, err @@ -135,13 +135,13 @@ func (processExecutablePathDetector) Detect(ctx context.Context) (*Resource, err // Detect returns a *Resource that describes all the command arguments as received // by the process. -func (processCommandArgsDetector) Detect(ctx context.Context) (*Resource, error) { +func (processCommandArgsDetector) Detect(context.Context) (*Resource, error) { return NewWithAttributes(semconv.SchemaURL, semconv.ProcessCommandArgs(commandArgs()...)), nil } // Detect returns a *Resource that describes the username of the user that owns the // process. -func (processOwnerDetector) Detect(ctx context.Context) (*Resource, error) { +func (processOwnerDetector) Detect(context.Context) (*Resource, error) { owner, err := owner() if err != nil { return nil, err @@ -152,17 +152,17 @@ func (processOwnerDetector) Detect(ctx context.Context) (*Resource, error) { // Detect returns a *Resource that describes the name of the compiler used to compile // this process image. -func (processRuntimeNameDetector) Detect(ctx context.Context) (*Resource, error) { +func (processRuntimeNameDetector) Detect(context.Context) (*Resource, error) { return NewWithAttributes(semconv.SchemaURL, semconv.ProcessRuntimeName(runtimeName())), nil } // Detect returns a *Resource that describes the version of the runtime of this process. -func (processRuntimeVersionDetector) Detect(ctx context.Context) (*Resource, error) { +func (processRuntimeVersionDetector) Detect(context.Context) (*Resource, error) { return NewWithAttributes(semconv.SchemaURL, semconv.ProcessRuntimeVersion(runtimeVersion())), nil } // Detect returns a *Resource that describes the runtime of this process. -func (processRuntimeDescriptionDetector) Detect(ctx context.Context) (*Resource, error) { +func (processRuntimeDescriptionDetector) Detect(context.Context) (*Resource, error) { runtimeDescription := fmt.Sprintf( "go version %s %s/%s", runtimeVersion(), runtimeOS(), runtimeArch()) diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go b/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go index 09b91e1e..28e1e4f7 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go @@ -112,7 +112,7 @@ func (r *Resource) String() string { } // MarshalLog is the marshaling function used by the logging system to represent this Resource. -func (r *Resource) MarshalLog() interface{} { +func (r *Resource) MarshalLog() any { return struct { Attributes attribute.Set SchemaURL string @@ -148,7 +148,7 @@ func (r *Resource) Iter() attribute.Iterator { return r.attrs.Iter() } -// Equal returns whether r and o represent the same resource. Two resources can +// Equal reports whether r and o represent the same resource. Two resources can // be equal even if they have different schema URLs. // // See the documentation on the [Resource] type for the pitfalls of using == diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go index 6872cbb4..7d15cbb9 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go @@ -5,20 +5,24 @@ package trace // import "go.opentelemetry.io/otel/sdk/trace" import ( "context" + "errors" "sync" "sync/atomic" "time" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/internal/global" - "go.opentelemetry.io/otel/sdk/internal/env" + "go.opentelemetry.io/otel/sdk/trace/internal/env" + "go.opentelemetry.io/otel/sdk/trace/internal/observ" "go.opentelemetry.io/otel/trace" ) // Defaults for BatchSpanProcessorOptions. const ( - DefaultMaxQueueSize = 2048 - DefaultScheduleDelay = 5000 + DefaultMaxQueueSize = 2048 + // DefaultScheduleDelay is the delay interval between two consecutive exports, in milliseconds. + DefaultScheduleDelay = 5000 + // DefaultExportTimeout is the duration after which an export is cancelled, in milliseconds. DefaultExportTimeout = 30000 DefaultMaxExportBatchSize = 512 ) @@ -66,6 +70,8 @@ type batchSpanProcessor struct { queue chan ReadOnlySpan dropped uint32 + inst *observ.BSP + batch []ReadOnlySpan batchMutex sync.Mutex timer *time.Timer @@ -86,11 +92,7 @@ func NewBatchSpanProcessor(exporter SpanExporter, options ...BatchSpanProcessorO maxExportBatchSize := env.BatchSpanProcessorMaxExportBatchSize(DefaultMaxExportBatchSize) if maxExportBatchSize > maxQueueSize { - if DefaultMaxExportBatchSize > maxQueueSize { - maxExportBatchSize = maxQueueSize - } else { - maxExportBatchSize = DefaultMaxExportBatchSize - } + maxExportBatchSize = min(DefaultMaxExportBatchSize, maxQueueSize) } o := BatchSpanProcessorOptions{ @@ -111,6 +113,16 @@ func NewBatchSpanProcessor(exporter SpanExporter, options ...BatchSpanProcessorO stopCh: make(chan struct{}), } + var err error + bsp.inst, err = observ.NewBSP( + nextProcessorID(), + func() int64 { return int64(len(bsp.queue)) }, + int64(bsp.o.MaxQueueSize), + ) + if err != nil { + otel.Handle(err) + } + bsp.stopWait.Add(1) go func() { defer bsp.stopWait.Done() @@ -121,8 +133,16 @@ func NewBatchSpanProcessor(exporter SpanExporter, options ...BatchSpanProcessorO return bsp } +var processorIDCounter atomic.Int64 + +// nextProcessorID returns an identifier for this batch span processor, +// starting with 0 and incrementing by 1 each time it is called. +func nextProcessorID() int64 { + return processorIDCounter.Add(1) - 1 +} + // OnStart method does nothing. -func (bsp *batchSpanProcessor) OnStart(parent context.Context, s ReadWriteSpan) {} +func (*batchSpanProcessor) OnStart(context.Context, ReadWriteSpan) {} // OnEnd method enqueues a ReadOnlySpan for later processing. func (bsp *batchSpanProcessor) OnEnd(s ReadOnlySpan) { @@ -161,6 +181,9 @@ func (bsp *batchSpanProcessor) Shutdown(ctx context.Context) error { case <-ctx.Done(): err = ctx.Err() } + if bsp.inst != nil { + err = errors.Join(err, bsp.inst.Shutdown()) + } }) return err } @@ -170,7 +193,7 @@ type forceFlushSpan struct { flushed chan struct{} } -func (f forceFlushSpan) SpanContext() trace.SpanContext { +func (forceFlushSpan) SpanContext() trace.SpanContext { return trace.NewSpanContext(trace.SpanContextConfig{TraceFlags: trace.FlagsSampled}) } @@ -267,12 +290,15 @@ func (bsp *batchSpanProcessor) exportSpans(ctx context.Context) error { if bsp.o.ExportTimeout > 0 { var cancel context.CancelFunc - ctx, cancel = context.WithTimeout(ctx, bsp.o.ExportTimeout) + ctx, cancel = context.WithTimeoutCause(ctx, bsp.o.ExportTimeout, errors.New("processor export timeout")) defer cancel() } if l := len(bsp.batch); l > 0 { global.Debug("exporting spans", "count", len(bsp.batch), "total_dropped", atomic.LoadUint32(&bsp.dropped)) + if bsp.inst != nil { + bsp.inst.Processed(ctx, int64(l)) + } err := bsp.e.ExportSpans(ctx, bsp.batch) // A new batch is always created after exporting, even if the batch failed to be exported. @@ -381,11 +407,14 @@ func (bsp *batchSpanProcessor) enqueueBlockOnQueueFull(ctx context.Context, sd R case bsp.queue <- sd: return true case <-ctx.Done(): + if bsp.inst != nil { + bsp.inst.ProcessedQueueFull(ctx, 1) + } return false } } -func (bsp *batchSpanProcessor) enqueueDrop(_ context.Context, sd ReadOnlySpan) bool { +func (bsp *batchSpanProcessor) enqueueDrop(ctx context.Context, sd ReadOnlySpan) bool { if !sd.SpanContext().IsSampled() { return false } @@ -395,12 +424,15 @@ func (bsp *batchSpanProcessor) enqueueDrop(_ context.Context, sd ReadOnlySpan) b return true default: atomic.AddUint32(&bsp.dropped, 1) + if bsp.inst != nil { + bsp.inst.ProcessedQueueFull(ctx, 1) + } } return false } // MarshalLog is the marshaling function used by the logging system to represent this Span Processor. -func (bsp *batchSpanProcessor) MarshalLog() interface{} { +func (bsp *batchSpanProcessor) MarshalLog() any { return struct { Type string SpanExporter SpanExporter diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/doc.go b/vendor/go.opentelemetry.io/otel/sdk/trace/doc.go index 1f60524e..b502c7d4 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/doc.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/doc.go @@ -6,5 +6,8 @@ Package trace contains support for OpenTelemetry distributed tracing. The following assumes a basic familiarity with OpenTelemetry concepts. See https://opentelemetry.io. + +See [go.opentelemetry.io/otel/sdk/internal/x] for information about +the experimental features. */ package trace // import "go.opentelemetry.io/otel/sdk/trace" diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go b/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go index 925bcf99..3649322a 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go @@ -5,10 +5,8 @@ package trace // import "go.opentelemetry.io/otel/sdk/trace" import ( "context" - crand "crypto/rand" "encoding/binary" - "math/rand" - "sync" + "math/rand/v2" "go.opentelemetry.io/otel/trace" ) @@ -29,20 +27,15 @@ type IDGenerator interface { // must never be done outside of a new major release. } -type randomIDGenerator struct { - sync.Mutex - randSource *rand.Rand -} +type randomIDGenerator struct{} var _ IDGenerator = &randomIDGenerator{} // NewSpanID returns a non-zero span ID from a randomly-chosen sequence. -func (gen *randomIDGenerator) NewSpanID(ctx context.Context, traceID trace.TraceID) trace.SpanID { - gen.Lock() - defer gen.Unlock() +func (*randomIDGenerator) NewSpanID(context.Context, trace.TraceID) trace.SpanID { sid := trace.SpanID{} for { - _, _ = gen.randSource.Read(sid[:]) + binary.NativeEndian.PutUint64(sid[:], rand.Uint64()) if sid.IsValid() { break } @@ -52,19 +45,18 @@ func (gen *randomIDGenerator) NewSpanID(ctx context.Context, traceID trace.Trace // NewIDs returns a non-zero trace ID and a non-zero span ID from a // randomly-chosen sequence. -func (gen *randomIDGenerator) NewIDs(ctx context.Context) (trace.TraceID, trace.SpanID) { - gen.Lock() - defer gen.Unlock() +func (*randomIDGenerator) NewIDs(context.Context) (trace.TraceID, trace.SpanID) { tid := trace.TraceID{} sid := trace.SpanID{} for { - _, _ = gen.randSource.Read(tid[:]) + binary.NativeEndian.PutUint64(tid[:8], rand.Uint64()) + binary.NativeEndian.PutUint64(tid[8:], rand.Uint64()) if tid.IsValid() { break } } for { - _, _ = gen.randSource.Read(sid[:]) + binary.NativeEndian.PutUint64(sid[:], rand.Uint64()) if sid.IsValid() { break } @@ -73,9 +65,5 @@ func (gen *randomIDGenerator) NewIDs(ctx context.Context) (trace.TraceID, trace. } func defaultIDGenerator() IDGenerator { - gen := &randomIDGenerator{} - var rngSeed int64 - _ = binary.Read(crand.Reader, binary.LittleEndian, &rngSeed) - gen.randSource = rand.New(rand.NewSource(rngSeed)) - return gen + return &randomIDGenerator{} } diff --git a/vendor/go.opentelemetry.io/otel/sdk/internal/env/env.go b/vendor/go.opentelemetry.io/otel/sdk/trace/internal/env/env.go similarity index 97% rename from vendor/go.opentelemetry.io/otel/sdk/internal/env/env.go rename to vendor/go.opentelemetry.io/otel/sdk/trace/internal/env/env.go index 07923ed8..58f68df4 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/internal/env/env.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/internal/env/env.go @@ -1,7 +1,9 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -package env // import "go.opentelemetry.io/otel/sdk/internal/env" +// Package env provides types and functionality for environment variable support +// in the OpenTelemetry SDK. +package env // import "go.opentelemetry.io/otel/sdk/trace/internal/env" import ( "os" diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/batch_span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/batch_span_processor.go new file mode 100644 index 00000000..bd7fe236 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/batch_span_processor.go @@ -0,0 +1,119 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package observ // import "go.opentelemetry.io/otel/sdk/trace/internal/observ" + +import ( + "context" + "errors" + "fmt" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/sdk" + "go.opentelemetry.io/otel/sdk/internal/x" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" + "go.opentelemetry.io/otel/semconv/v1.37.0/otelconv" +) + +const ( + // ScopeName is the name of the instrumentation scope. + ScopeName = "go.opentelemetry.io/otel/sdk/trace/internal/observ" + + // SchemaURL is the schema URL of the instrumentation. + SchemaURL = semconv.SchemaURL +) + +// ErrQueueFull is the attribute value for the "queue_full" error type. +var ErrQueueFull = otelconv.SDKProcessorSpanProcessed{}.AttrErrorType( + otelconv.ErrorTypeAttr("queue_full"), +) + +// BSPComponentName returns the component name attribute for a +// BatchSpanProcessor with the given ID. +func BSPComponentName(id int64) attribute.KeyValue { + t := otelconv.ComponentTypeBatchingSpanProcessor + name := fmt.Sprintf("%s/%d", t, id) + return semconv.OTelComponentName(name) +} + +// BSP is the instrumentation for an OTel SDK BatchSpanProcessor. +type BSP struct { + reg metric.Registration + + processed metric.Int64Counter + processedOpts []metric.AddOption + processedQueueFullOpts []metric.AddOption +} + +func NewBSP(id int64, qLen func() int64, qMax int64) (*BSP, error) { + if !x.Observability.Enabled() { + return nil, nil + } + + meter := otel.GetMeterProvider().Meter( + ScopeName, + metric.WithInstrumentationVersion(sdk.Version()), + metric.WithSchemaURL(SchemaURL), + ) + + qCap, err := otelconv.NewSDKProcessorSpanQueueCapacity(meter) + if err != nil { + err = fmt.Errorf("failed to create BSP queue capacity metric: %w", err) + } + qCapInst := qCap.Inst() + + qSize, e := otelconv.NewSDKProcessorSpanQueueSize(meter) + if e != nil { + e := fmt.Errorf("failed to create BSP queue size metric: %w", e) + err = errors.Join(err, e) + } + qSizeInst := qSize.Inst() + + cmpntT := semconv.OTelComponentTypeBatchingSpanProcessor + cmpnt := BSPComponentName(id) + set := attribute.NewSet(cmpnt, cmpntT) + + obsOpts := []metric.ObserveOption{metric.WithAttributeSet(set)} + reg, e := meter.RegisterCallback( + func(_ context.Context, o metric.Observer) error { + o.ObserveInt64(qSizeInst, qLen(), obsOpts...) + o.ObserveInt64(qCapInst, qMax, obsOpts...) + return nil + }, + qSizeInst, + qCapInst, + ) + if e != nil { + e := fmt.Errorf("failed to register BSP queue size/capacity callback: %w", e) + err = errors.Join(err, e) + } + + processed, e := otelconv.NewSDKProcessorSpanProcessed(meter) + if e != nil { + e := fmt.Errorf("failed to create BSP processed spans metric: %w", e) + err = errors.Join(err, e) + } + processedOpts := []metric.AddOption{metric.WithAttributeSet(set)} + + set = attribute.NewSet(cmpnt, cmpntT, ErrQueueFull) + processedQueueFullOpts := []metric.AddOption{metric.WithAttributeSet(set)} + + return &BSP{ + reg: reg, + processed: processed.Inst(), + processedOpts: processedOpts, + processedQueueFullOpts: processedQueueFullOpts, + }, err +} + +func (b *BSP) Shutdown() error { return b.reg.Unregister() } + +func (b *BSP) Processed(ctx context.Context, n int64) { + b.processed.Add(ctx, n, b.processedOpts...) +} + +func (b *BSP) ProcessedQueueFull(ctx context.Context, n int64) { + b.processed.Add(ctx, n, b.processedQueueFullOpts...) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/doc.go b/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/doc.go new file mode 100644 index 00000000..b542121e --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/doc.go @@ -0,0 +1,6 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package observ provides observability instrumentation for the OTel trace SDK +// package. +package observ // import "go.opentelemetry.io/otel/sdk/trace/internal/observ" diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/simple_span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/simple_span_processor.go new file mode 100644 index 00000000..7d338706 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/simple_span_processor.go @@ -0,0 +1,97 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package observ // import "go.opentelemetry.io/otel/sdk/trace/internal/observ" + +import ( + "context" + "fmt" + "sync" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/sdk" + "go.opentelemetry.io/otel/sdk/internal/x" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" + "go.opentelemetry.io/otel/semconv/v1.37.0/otelconv" +) + +var measureAttrsPool = sync.Pool{ + New: func() any { + // "component.name" + "component.type" + "error.type" + const n = 1 + 1 + 1 + s := make([]attribute.KeyValue, 0, n) + // Return a pointer to a slice instead of a slice itself + // to avoid allocations on every call. + return &s + }, +} + +// SSP is the instrumentation for an OTel SDK SimpleSpanProcessor. +type SSP struct { + spansProcessedCounter metric.Int64Counter + addOpts []metric.AddOption + attrs []attribute.KeyValue +} + +// SSPComponentName returns the component name attribute for a +// SimpleSpanProcessor with the given ID. +func SSPComponentName(id int64) attribute.KeyValue { + t := otelconv.ComponentTypeSimpleSpanProcessor + name := fmt.Sprintf("%s/%d", t, id) + return semconv.OTelComponentName(name) +} + +// NewSSP returns instrumentation for an OTel SDK SimpleSpanProcessor with the +// provided ID. +// +// If the experimental observability is disabled, nil is returned. +func NewSSP(id int64) (*SSP, error) { + if !x.Observability.Enabled() { + return nil, nil + } + + meter := otel.GetMeterProvider().Meter( + ScopeName, + metric.WithInstrumentationVersion(sdk.Version()), + metric.WithSchemaURL(SchemaURL), + ) + spansProcessedCounter, err := otelconv.NewSDKProcessorSpanProcessed(meter) + if err != nil { + err = fmt.Errorf("failed to create SSP processed spans metric: %w", err) + } + + componentName := SSPComponentName(id) + componentType := spansProcessedCounter.AttrComponentType(otelconv.ComponentTypeSimpleSpanProcessor) + attrs := []attribute.KeyValue{componentName, componentType} + addOpts := []metric.AddOption{metric.WithAttributeSet(attribute.NewSet(attrs...))} + + return &SSP{ + spansProcessedCounter: spansProcessedCounter.Inst(), + addOpts: addOpts, + attrs: attrs, + }, err +} + +// SpanProcessed records that a span has been processed by the SimpleSpanProcessor. +// If err is non-nil, it records the processing error as an attribute. +func (ssp *SSP) SpanProcessed(ctx context.Context, err error) { + ssp.spansProcessedCounter.Add(ctx, 1, ssp.addOption(err)...) +} + +func (ssp *SSP) addOption(err error) []metric.AddOption { + if err == nil { + return ssp.addOpts + } + attrs := measureAttrsPool.Get().(*[]attribute.KeyValue) + defer func() { + *attrs = (*attrs)[:0] // reset the slice for reuse + measureAttrsPool.Put(attrs) + }() + *attrs = append(*attrs, ssp.attrs...) + *attrs = append(*attrs, semconv.ErrorType(err)) + // Do not inefficiently make a copy of attrs by using + // WithAttributes instead of WithAttributeSet. + return []metric.AddOption{metric.WithAttributeSet(attribute.NewSet(*attrs...))} +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/tracer.go b/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/tracer.go new file mode 100644 index 00000000..a8a16458 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/tracer.go @@ -0,0 +1,223 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package observ // import "go.opentelemetry.io/otel/sdk/trace/internal/observ" + +import ( + "context" + "errors" + "fmt" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/sdk" + "go.opentelemetry.io/otel/sdk/internal/x" + "go.opentelemetry.io/otel/semconv/v1.37.0/otelconv" + "go.opentelemetry.io/otel/trace" +) + +var meterOpts = []metric.MeterOption{ + metric.WithInstrumentationVersion(sdk.Version()), + metric.WithSchemaURL(SchemaURL), +} + +// Tracer is instrumentation for an OTel SDK Tracer. +type Tracer struct { + enabled bool + + live metric.Int64UpDownCounter + started metric.Int64Counter +} + +func NewTracer() (Tracer, error) { + if !x.Observability.Enabled() { + return Tracer{}, nil + } + meter := otel.GetMeterProvider().Meter(ScopeName, meterOpts...) + + var err error + l, e := otelconv.NewSDKSpanLive(meter) + if e != nil { + e = fmt.Errorf("failed to create span live metric: %w", e) + err = errors.Join(err, e) + } + + s, e := otelconv.NewSDKSpanStarted(meter) + if e != nil { + e = fmt.Errorf("failed to create span started metric: %w", e) + err = errors.Join(err, e) + } + + return Tracer{enabled: true, live: l.Inst(), started: s.Inst()}, err +} + +func (t Tracer) Enabled() bool { return t.enabled } + +func (t Tracer) SpanStarted(ctx context.Context, psc trace.SpanContext, span trace.Span) { + key := spanStartedKey{ + parent: parentStateNoParent, + sampling: samplingStateDrop, + } + + if psc.IsValid() { + if psc.IsRemote() { + key.parent = parentStateRemoteParent + } else { + key.parent = parentStateLocalParent + } + } + + if span.IsRecording() { + if span.SpanContext().IsSampled() { + key.sampling = samplingStateRecordAndSample + } else { + key.sampling = samplingStateRecordOnly + } + } + + opts := spanStartedOpts[key] + t.started.Add(ctx, 1, opts...) +} + +func (t Tracer) SpanLive(ctx context.Context, span trace.Span) { + t.spanLive(ctx, 1, span) +} + +func (t Tracer) SpanEnded(ctx context.Context, span trace.Span) { + t.spanLive(ctx, -1, span) +} + +func (t Tracer) spanLive(ctx context.Context, value int64, span trace.Span) { + key := spanLiveKey{sampled: span.SpanContext().IsSampled()} + opts := spanLiveOpts[key] + t.live.Add(ctx, value, opts...) +} + +type parentState int + +const ( + parentStateNoParent parentState = iota + parentStateLocalParent + parentStateRemoteParent +) + +type samplingState int + +const ( + samplingStateDrop samplingState = iota + samplingStateRecordOnly + samplingStateRecordAndSample +) + +type spanStartedKey struct { + parent parentState + sampling samplingState +} + +var spanStartedOpts = map[spanStartedKey][]metric.AddOption{ + { + parentStateNoParent, + samplingStateDrop, + }: { + metric.WithAttributeSet(attribute.NewSet( + otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginNone), + otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultDrop), + )), + }, + { + parentStateLocalParent, + samplingStateDrop, + }: { + metric.WithAttributeSet(attribute.NewSet( + otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginLocal), + otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultDrop), + )), + }, + { + parentStateRemoteParent, + samplingStateDrop, + }: { + metric.WithAttributeSet(attribute.NewSet( + otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginRemote), + otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultDrop), + )), + }, + + { + parentStateNoParent, + samplingStateRecordOnly, + }: { + metric.WithAttributeSet(attribute.NewSet( + otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginNone), + otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordOnly), + )), + }, + { + parentStateLocalParent, + samplingStateRecordOnly, + }: { + metric.WithAttributeSet(attribute.NewSet( + otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginLocal), + otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordOnly), + )), + }, + { + parentStateRemoteParent, + samplingStateRecordOnly, + }: { + metric.WithAttributeSet(attribute.NewSet( + otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginRemote), + otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordOnly), + )), + }, + + { + parentStateNoParent, + samplingStateRecordAndSample, + }: { + metric.WithAttributeSet(attribute.NewSet( + otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginNone), + otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordAndSample), + )), + }, + { + parentStateLocalParent, + samplingStateRecordAndSample, + }: { + metric.WithAttributeSet(attribute.NewSet( + otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginLocal), + otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordAndSample), + )), + }, + { + parentStateRemoteParent, + samplingStateRecordAndSample, + }: { + metric.WithAttributeSet(attribute.NewSet( + otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginRemote), + otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordAndSample), + )), + }, +} + +type spanLiveKey struct { + sampled bool +} + +var spanLiveOpts = map[spanLiveKey][]metric.AddOption{ + {true}: { + metric.WithAttributeSet(attribute.NewSet( + otelconv.SDKSpanLive{}.AttrSpanSamplingResult( + otelconv.SpanSamplingResultRecordAndSample, + ), + )), + }, + {false}: { + metric.WithAttributeSet(attribute.NewSet( + otelconv.SDKSpanLive{}.AttrSpanSamplingResult( + otelconv.SpanSamplingResultRecordOnly, + ), + )), + }, +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go b/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go index 185aa7c0..d2cf4ebd 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go @@ -13,14 +13,13 @@ import ( "go.opentelemetry.io/otel/internal/global" "go.opentelemetry.io/otel/sdk/instrumentation" "go.opentelemetry.io/otel/sdk/resource" + "go.opentelemetry.io/otel/sdk/trace/internal/observ" "go.opentelemetry.io/otel/trace" "go.opentelemetry.io/otel/trace/embedded" "go.opentelemetry.io/otel/trace/noop" ) -const ( - defaultTracerName = "go.opentelemetry.io/otel/sdk/tracer" -) +const defaultTracerName = "go.opentelemetry.io/otel/sdk/tracer" // tracerProviderConfig. type tracerProviderConfig struct { @@ -45,7 +44,7 @@ type tracerProviderConfig struct { } // MarshalLog is the marshaling function used by the logging system to represent this Provider. -func (cfg tracerProviderConfig) MarshalLog() interface{} { +func (cfg tracerProviderConfig) MarshalLog() any { return struct { SpanProcessors []SpanProcessor SamplerType string @@ -159,6 +158,13 @@ func (p *TracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T provider: p, instrumentationScope: is, } + + var err error + t.inst, err = observ.NewTracer() + if err != nil { + otel.Handle(err) + } + p.namedTracer[is] = t } return t, ok @@ -169,7 +175,17 @@ func (p *TracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T // slowing down all tracing consumers. // - Logging code may be instrumented with tracing and deadlock because it could try // acquiring the same non-reentrant mutex. - global.Info("Tracer created", "name", name, "version", is.Version, "schemaURL", is.SchemaURL, "attributes", is.Attributes) + global.Info( + "Tracer created", + "name", + name, + "version", + is.Version, + "schemaURL", + is.SchemaURL, + "attributes", + is.Attributes, + ) } return t } diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go b/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go index aa7b262d..689663d4 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go @@ -110,14 +110,14 @@ func TraceIDRatioBased(fraction float64) Sampler { type alwaysOnSampler struct{} -func (as alwaysOnSampler) ShouldSample(p SamplingParameters) SamplingResult { +func (alwaysOnSampler) ShouldSample(p SamplingParameters) SamplingResult { return SamplingResult{ Decision: RecordAndSample, Tracestate: trace.SpanContextFromContext(p.ParentContext).TraceState(), } } -func (as alwaysOnSampler) Description() string { +func (alwaysOnSampler) Description() string { return "AlwaysOnSampler" } @@ -131,14 +131,14 @@ func AlwaysSample() Sampler { type alwaysOffSampler struct{} -func (as alwaysOffSampler) ShouldSample(p SamplingParameters) SamplingResult { +func (alwaysOffSampler) ShouldSample(p SamplingParameters) SamplingResult { return SamplingResult{ Decision: Drop, Tracestate: trace.SpanContextFromContext(p.ParentContext).TraceState(), } } -func (as alwaysOffSampler) Description() string { +func (alwaysOffSampler) Description() string { return "AlwaysOffSampler" } diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go index 664e13e0..771e427a 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go @@ -6,9 +6,12 @@ package trace // import "go.opentelemetry.io/otel/sdk/trace" import ( "context" "sync" + "sync/atomic" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/internal/global" + "go.opentelemetry.io/otel/sdk/trace/internal/observ" + "go.opentelemetry.io/otel/trace" ) // simpleSpanProcessor is a SpanProcessor that synchronously sends all @@ -17,6 +20,8 @@ type simpleSpanProcessor struct { exporterMu sync.Mutex exporter SpanExporter stopOnce sync.Once + + inst *observ.SSP } var _ SpanProcessor = (*simpleSpanProcessor)(nil) @@ -33,24 +38,48 @@ func NewSimpleSpanProcessor(exporter SpanExporter) SpanProcessor { ssp := &simpleSpanProcessor{ exporter: exporter, } + + var err error + ssp.inst, err = observ.NewSSP(nextSimpleProcessorID()) + if err != nil { + otel.Handle(err) + } + global.Warn("SimpleSpanProcessor is not recommended for production use, consider using BatchSpanProcessor instead.") return ssp } +var simpleProcessorIDCounter atomic.Int64 + +// nextSimpleProcessorID returns an identifier for this simple span processor, +// starting with 0 and incrementing by 1 each time it is called. +func nextSimpleProcessorID() int64 { + return simpleProcessorIDCounter.Add(1) - 1 +} + // OnStart does nothing. -func (ssp *simpleSpanProcessor) OnStart(context.Context, ReadWriteSpan) {} +func (*simpleSpanProcessor) OnStart(context.Context, ReadWriteSpan) {} // OnEnd immediately exports a ReadOnlySpan. func (ssp *simpleSpanProcessor) OnEnd(s ReadOnlySpan) { ssp.exporterMu.Lock() defer ssp.exporterMu.Unlock() + var err error if ssp.exporter != nil && s.SpanContext().TraceFlags().IsSampled() { - if err := ssp.exporter.ExportSpans(context.Background(), []ReadOnlySpan{s}); err != nil { + err = ssp.exporter.ExportSpans(context.Background(), []ReadOnlySpan{s}) + if err != nil { otel.Handle(err) } } + + if ssp.inst != nil { + // Add the span to the context to ensure the metric is recorded + // with the correct span context. + ctx := trace.ContextWithSpanContext(context.Background(), s.SpanContext()) + ssp.inst.SpanProcessed(ctx, err) + } } // Shutdown shuts down the exporter this SimpleSpanProcessor exports to. @@ -104,13 +133,13 @@ func (ssp *simpleSpanProcessor) Shutdown(ctx context.Context) error { } // ForceFlush does nothing as there is no data to flush. -func (ssp *simpleSpanProcessor) ForceFlush(context.Context) error { +func (*simpleSpanProcessor) ForceFlush(context.Context) error { return nil } // MarshalLog is the marshaling function used by the logging system to represent // this Span Processor. -func (ssp *simpleSpanProcessor) MarshalLog() interface{} { +func (ssp *simpleSpanProcessor) MarshalLog() any { return struct { Type string Exporter SpanExporter diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go b/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go index d511d0f2..63aa3378 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go @@ -35,7 +35,7 @@ type snapshot struct { var _ ReadOnlySpan = snapshot{} -func (s snapshot) private() {} +func (snapshot) private() {} // Name returns the name of the span. func (s snapshot) Name() string { diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go index 8f4fc385..8cfd9f62 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/span.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go @@ -20,7 +20,7 @@ import ( "go.opentelemetry.io/otel/internal/global" "go.opentelemetry.io/otel/sdk/instrumentation" "go.opentelemetry.io/otel/sdk/resource" - semconv "go.opentelemetry.io/otel/semconv/v1.26.0" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" "go.opentelemetry.io/otel/trace" "go.opentelemetry.io/otel/trace/embedded" ) @@ -61,6 +61,7 @@ type ReadOnlySpan interface { InstrumentationScope() instrumentation.Scope // InstrumentationLibrary returns information about the instrumentation // library that created the span. + // // Deprecated: please use InstrumentationScope instead. InstrumentationLibrary() instrumentation.Library //nolint:staticcheck // This method needs to be define for backwards compatibility // Resource returns information about the entity that produced the span. @@ -150,6 +151,12 @@ type recordingSpan struct { // tracer is the SDK tracer that created this span. tracer *tracer + + // origCtx is the context used when starting this span that has the + // recordingSpan instance set as the active span. If not nil, it is used + // when ending the span to ensure any metrics are recorded with a context + // containing this span without requiring an additional allocation. + origCtx context.Context } var ( @@ -157,6 +164,10 @@ var ( _ runtimeTracer = (*recordingSpan)(nil) ) +func (s *recordingSpan) setOrigCtx(ctx context.Context) { + s.origCtx = ctx +} + // SpanContext returns the SpanContext of this span. func (s *recordingSpan) SpanContext() trace.SpanContext { if s == nil { @@ -165,7 +176,7 @@ func (s *recordingSpan) SpanContext() trace.SpanContext { return s.spanContext } -// IsRecording returns if this span is being recorded. If this span has ended +// IsRecording reports whether this span is being recorded. If this span has ended // this will return false. func (s *recordingSpan) IsRecording() bool { if s == nil { @@ -177,7 +188,7 @@ func (s *recordingSpan) IsRecording() bool { return s.isRecording() } -// isRecording returns if this span is being recorded. If this span has ended +// isRecording reports whether this span is being recorded. If this span has ended // this will return false. // // This method assumes s.mu.Lock is held by the caller. @@ -495,6 +506,17 @@ func (s *recordingSpan) End(options ...trace.SpanEndOption) { } s.mu.Unlock() + if s.tracer.inst.Enabled() { + ctx := s.origCtx + if ctx == nil { + // This should not happen as the origCtx should be set, but + // ensure trace information is propagated in the case of an + // error. + ctx = trace.ContextWithSpan(context.Background(), s) + } + defer s.tracer.inst.SpanEnded(ctx, s) + } + sps := s.tracer.provider.getSpanProcessors() if len(sps) == 0 { return @@ -545,7 +567,7 @@ func (s *recordingSpan) RecordError(err error, opts ...trace.EventOption) { s.addEvent(semconv.ExceptionEventName, opts...) } -func typeStr(i interface{}) string { +func typeStr(i any) string { t := reflect.TypeOf(i) if t.PkgPath() == "" && t.Name() == "" { // Likely a builtin type. diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span_limits.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span_limits.go index bec5e209..321d9743 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/span_limits.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/span_limits.go @@ -3,7 +3,7 @@ package trace // import "go.opentelemetry.io/otel/sdk/trace" -import "go.opentelemetry.io/otel/sdk/internal/env" +import "go.opentelemetry.io/otel/sdk/trace/internal/env" const ( // DefaultAttributeValueLengthLimit is the default maximum allowed diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go b/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go index 43419d3b..e1d08fd4 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go @@ -8,6 +8,7 @@ import ( "time" "go.opentelemetry.io/otel/sdk/instrumentation" + "go.opentelemetry.io/otel/sdk/trace/internal/observ" "go.opentelemetry.io/otel/trace" "go.opentelemetry.io/otel/trace/embedded" ) @@ -17,6 +18,8 @@ type tracer struct { provider *TracerProvider instrumentationScope instrumentation.Scope + + inst observ.Tracer } var _ trace.Tracer = &tracer{} @@ -26,7 +29,11 @@ var _ trace.Tracer = &tracer{} // The Span is created with the provided name and as a child of any existing // span context found in the passed context. The created Span will be // configured appropriately by any SpanOption passed. -func (tr *tracer) Start(ctx context.Context, name string, options ...trace.SpanStartOption) (context.Context, trace.Span) { +func (tr *tracer) Start( + ctx context.Context, + name string, + options ...trace.SpanStartOption, +) (context.Context, trace.Span) { config := trace.NewSpanStartConfig(options...) if ctx == nil { @@ -42,17 +49,32 @@ func (tr *tracer) Start(ctx context.Context, name string, options ...trace.SpanS } s := tr.newSpan(ctx, name, &config) + newCtx := trace.ContextWithSpan(ctx, s) + if tr.inst.Enabled() { + if o, ok := s.(interface{ setOrigCtx(context.Context) }); ok { + // If this is a recording span, store the original context. + // This allows later retrieval of baggage and other information + // that may have been stored in the context at span start time and + // to avoid the allocation of repeatedly calling + // trace.ContextWithSpan. + o.setOrigCtx(newCtx) + } + psc := trace.SpanContextFromContext(ctx) + tr.inst.SpanStarted(newCtx, psc, s) + } + if rw, ok := s.(ReadWriteSpan); ok && s.IsRecording() { sps := tr.provider.getSpanProcessors() for _, sp := range sps { + // Use original context. sp.sp.OnStart(ctx, rw) } } if rtt, ok := s.(runtimeTracer); ok { - ctx = rtt.runtimeTrace(ctx) + newCtx = rtt.runtimeTrace(newCtx) } - return trace.ContextWithSpan(ctx, s), s + return newCtx, s } type runtimeTracer interface { @@ -108,11 +130,17 @@ func (tr *tracer) newSpan(ctx context.Context, name string, config *trace.SpanCo if !isRecording(samplingResult) { return tr.newNonRecordingSpan(sc) } - return tr.newRecordingSpan(psc, sc, name, samplingResult, config) + return tr.newRecordingSpan(ctx, psc, sc, name, samplingResult, config) } // newRecordingSpan returns a new configured recordingSpan. -func (tr *tracer) newRecordingSpan(psc, sc trace.SpanContext, name string, sr SamplingResult, config *trace.SpanConfig) *recordingSpan { +func (tr *tracer) newRecordingSpan( + ctx context.Context, + psc, sc trace.SpanContext, + name string, + sr SamplingResult, + config *trace.SpanConfig, +) *recordingSpan { startTime := config.Timestamp() if startTime.IsZero() { startTime = time.Now() @@ -144,6 +172,13 @@ func (tr *tracer) newRecordingSpan(psc, sc trace.SpanContext, name string, sr Sa s.SetAttributes(sr.Attributes...) s.SetAttributes(config.Attributes()...) + if tr.inst.Enabled() { + // Propagate any existing values from the context with the new span to + // the measurement context. + ctx = trace.ContextWithSpan(ctx, s) + tr.inst.SpanLive(ctx, s) + } + return s } diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/version.go b/vendor/go.opentelemetry.io/otel/sdk/trace/version.go deleted file mode 100644 index b84dd2c5..00000000 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/version.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package trace // import "go.opentelemetry.io/otel/sdk/trace" - -// version is the current release version of the metric SDK in use. -func version() string { - return "1.16.0-rc.1" -} diff --git a/vendor/go.opentelemetry.io/otel/sdk/version.go b/vendor/go.opentelemetry.io/otel/sdk/version.go index 2b797fbd..0a3b3661 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/version.go +++ b/vendor/go.opentelemetry.io/otel/sdk/version.go @@ -1,9 +1,10 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 +// Package sdk provides the OpenTelemetry default SDK for Go. package sdk // import "go.opentelemetry.io/otel/sdk" // Version is the current release version of the OpenTelemetry SDK in use. func Version() string { - return "1.35.0" + return "1.39.0" } diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/README.md deleted file mode 100644 index 87b842c5..00000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# Semconv v1.17.0 - -[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/semconv/v1.17.0)](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.17.0) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/doc.go deleted file mode 100644 index e087c9c0..00000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/doc.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Package semconv implements OpenTelemetry semantic conventions. -// -// OpenTelemetry semantic conventions are agreed standardized naming -// patterns for OpenTelemetry things. This package represents the conventions -// as of the v1.17.0 version of the OpenTelemetry specification. -package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/event.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/event.go deleted file mode 100644 index c7b804bb..00000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/event.go +++ /dev/null @@ -1,188 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated from semantic convention specification. DO NOT EDIT. - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" - -import "go.opentelemetry.io/otel/attribute" - -// This semantic convention defines the attributes used to represent a feature -// flag evaluation as an event. -const ( - // FeatureFlagKeyKey is the attribute Key conforming to the - // "feature_flag.key" semantic conventions. It represents the unique - // identifier of the feature flag. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: 'logo-color' - FeatureFlagKeyKey = attribute.Key("feature_flag.key") - - // FeatureFlagProviderNameKey is the attribute Key conforming to the - // "feature_flag.provider_name" semantic conventions. It represents the - // name of the service provider that performs the flag evaluation. - // - // Type: string - // RequirementLevel: Recommended - // Stability: stable - // Examples: 'Flag Manager' - FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider_name") - - // FeatureFlagVariantKey is the attribute Key conforming to the - // "feature_flag.variant" semantic conventions. It represents the sHOULD be - // a semantic identifier for a value. If one is unavailable, a stringified - // version of the value can be used. - // - // Type: string - // RequirementLevel: Recommended - // Stability: stable - // Examples: 'red', 'true', 'on' - // Note: A semantic identifier, commonly referred to as a variant, provides - // a means - // for referring to a value without including the value itself. This can - // provide additional context for understanding the meaning behind a value. - // For example, the variant `red` maybe be used for the value `#c05543`. - // - // A stringified version of the value can be used in situations where a - // semantic identifier is unavailable. String representation of the value - // should be determined by the implementer. - FeatureFlagVariantKey = attribute.Key("feature_flag.variant") -) - -// FeatureFlagKey returns an attribute KeyValue conforming to the -// "feature_flag.key" semantic conventions. It represents the unique identifier -// of the feature flag. -func FeatureFlagKey(val string) attribute.KeyValue { - return FeatureFlagKeyKey.String(val) -} - -// FeatureFlagProviderName returns an attribute KeyValue conforming to the -// "feature_flag.provider_name" semantic conventions. It represents the name of -// the service provider that performs the flag evaluation. -func FeatureFlagProviderName(val string) attribute.KeyValue { - return FeatureFlagProviderNameKey.String(val) -} - -// FeatureFlagVariant returns an attribute KeyValue conforming to the -// "feature_flag.variant" semantic conventions. It represents the sHOULD be a -// semantic identifier for a value. If one is unavailable, a stringified -// version of the value can be used. -func FeatureFlagVariant(val string) attribute.KeyValue { - return FeatureFlagVariantKey.String(val) -} - -// RPC received/sent message. -const ( - // MessageTypeKey is the attribute Key conforming to the "message.type" - // semantic conventions. It represents the whether this is a received or - // sent message. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - MessageTypeKey = attribute.Key("message.type") - - // MessageIDKey is the attribute Key conforming to the "message.id" - // semantic conventions. It represents the mUST be calculated as two - // different counters starting from `1` one for sent messages and one for - // received message. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Note: This way we guarantee that the values will be consistent between - // different implementations. - MessageIDKey = attribute.Key("message.id") - - // MessageCompressedSizeKey is the attribute Key conforming to the - // "message.compressed_size" semantic conventions. It represents the - // compressed size of the message in bytes. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - MessageCompressedSizeKey = attribute.Key("message.compressed_size") - - // MessageUncompressedSizeKey is the attribute Key conforming to the - // "message.uncompressed_size" semantic conventions. It represents the - // uncompressed size of the message in bytes. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - MessageUncompressedSizeKey = attribute.Key("message.uncompressed_size") -) - -var ( - // sent - MessageTypeSent = MessageTypeKey.String("SENT") - // received - MessageTypeReceived = MessageTypeKey.String("RECEIVED") -) - -// MessageID returns an attribute KeyValue conforming to the "message.id" -// semantic conventions. It represents the mUST be calculated as two different -// counters starting from `1` one for sent messages and one for received -// message. -func MessageID(val int) attribute.KeyValue { - return MessageIDKey.Int(val) -} - -// MessageCompressedSize returns an attribute KeyValue conforming to the -// "message.compressed_size" semantic conventions. It represents the compressed -// size of the message in bytes. -func MessageCompressedSize(val int) attribute.KeyValue { - return MessageCompressedSizeKey.Int(val) -} - -// MessageUncompressedSize returns an attribute KeyValue conforming to the -// "message.uncompressed_size" semantic conventions. It represents the -// uncompressed size of the message in bytes. -func MessageUncompressedSize(val int) attribute.KeyValue { - return MessageUncompressedSizeKey.Int(val) -} - -// The attributes used to report a single exception associated with a span. -const ( - // ExceptionEscapedKey is the attribute Key conforming to the - // "exception.escaped" semantic conventions. It represents the sHOULD be - // set to true if the exception event is recorded at a point where it is - // known that the exception is escaping the scope of the span. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: stable - // Note: An exception is considered to have escaped (or left) the scope of - // a span, - // if that span is ended while the exception is still logically "in - // flight". - // This may be actually "in flight" in some languages (e.g. if the - // exception - // is passed to a Context manager's `__exit__` method in Python) but will - // usually be caught at the point of recording the exception in most - // languages. - // - // It is usually not possible to determine at the point where an exception - // is thrown - // whether it will escape the scope of a span. - // However, it is trivial to know that an exception - // will escape, if one checks for an active exception just before ending - // the span, - // as done in the [example above](#recording-an-exception). - // - // It follows that an exception may still escape the scope of the span - // even if the `exception.escaped` attribute was not set or set to false, - // since the event might have been recorded at a time where it was not - // clear whether the exception will escape. - ExceptionEscapedKey = attribute.Key("exception.escaped") -) - -// ExceptionEscaped returns an attribute KeyValue conforming to the -// "exception.escaped" semantic conventions. It represents the sHOULD be set to -// true if the exception event is recorded at a point where it is known that -// the exception is escaping the scope of the span. -func ExceptionEscaped(val bool) attribute.KeyValue { - return ExceptionEscapedKey.Bool(val) -} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/http.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/http.go deleted file mode 100644 index d318221e..00000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/http.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" - -// HTTP scheme attributes. -var ( - HTTPSchemeHTTP = HTTPSchemeKey.String("http") - HTTPSchemeHTTPS = HTTPSchemeKey.String("https") -) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go deleted file mode 100644 index 7e365e82..00000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go +++ /dev/null @@ -1,1999 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated from semantic convention specification. DO NOT EDIT. - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" - -import "go.opentelemetry.io/otel/attribute" - -// The web browser in which the application represented by the resource is -// running. The `browser.*` attributes MUST be used only for resources that -// represent applications running in a web browser (regardless of whether -// running on a mobile or desktop device). -const ( - // BrowserBrandsKey is the attribute Key conforming to the "browser.brands" - // semantic conventions. It represents the array of brand name and version - // separated by a space - // - // Type: string[] - // RequirementLevel: Optional - // Stability: stable - // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99' - // Note: This value is intended to be taken from the [UA client hints - // API](https://wicg.github.io/ua-client-hints/#interface) - // (`navigator.userAgentData.brands`). - BrowserBrandsKey = attribute.Key("browser.brands") - - // BrowserPlatformKey is the attribute Key conforming to the - // "browser.platform" semantic conventions. It represents the platform on - // which the browser is running - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'Windows', 'macOS', 'Android' - // Note: This value is intended to be taken from the [UA client hints - // API](https://wicg.github.io/ua-client-hints/#interface) - // (`navigator.userAgentData.platform`). If unavailable, the legacy - // `navigator.platform` API SHOULD NOT be used instead and this attribute - // SHOULD be left unset in order for the values to be consistent. - // The list of possible values is defined in the [W3C User-Agent Client - // Hints - // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform). - // Note that some (but not all) of these values can overlap with values in - // the [`os.type` and `os.name` attributes](./os.md). However, for - // consistency, the values in the `browser.platform` attribute should - // capture the exact value that the user agent provides. - BrowserPlatformKey = attribute.Key("browser.platform") - - // BrowserMobileKey is the attribute Key conforming to the "browser.mobile" - // semantic conventions. It represents a boolean that is true if the - // browser is running on a mobile device - // - // Type: boolean - // RequirementLevel: Optional - // Stability: stable - // Note: This value is intended to be taken from the [UA client hints - // API](https://wicg.github.io/ua-client-hints/#interface) - // (`navigator.userAgentData.mobile`). If unavailable, this attribute - // SHOULD be left unset. - BrowserMobileKey = attribute.Key("browser.mobile") - - // BrowserUserAgentKey is the attribute Key conforming to the - // "browser.user_agent" semantic conventions. It represents the full - // user-agent string provided by the browser - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) - // AppleWebKit/537.36 (KHTML, ' - // 'like Gecko) Chrome/95.0.4638.54 Safari/537.36' - // Note: The user-agent value SHOULD be provided only from browsers that do - // not have a mechanism to retrieve brands and platform individually from - // the User-Agent Client Hints API. To retrieve the value, the legacy - // `navigator.userAgent` API can be used. - BrowserUserAgentKey = attribute.Key("browser.user_agent") - - // BrowserLanguageKey is the attribute Key conforming to the - // "browser.language" semantic conventions. It represents the preferred - // language of the user using the browser - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'en', 'en-US', 'fr', 'fr-FR' - // Note: This value is intended to be taken from the Navigator API - // `navigator.language`. - BrowserLanguageKey = attribute.Key("browser.language") -) - -// BrowserBrands returns an attribute KeyValue conforming to the -// "browser.brands" semantic conventions. It represents the array of brand name -// and version separated by a space -func BrowserBrands(val ...string) attribute.KeyValue { - return BrowserBrandsKey.StringSlice(val) -} - -// BrowserPlatform returns an attribute KeyValue conforming to the -// "browser.platform" semantic conventions. It represents the platform on which -// the browser is running -func BrowserPlatform(val string) attribute.KeyValue { - return BrowserPlatformKey.String(val) -} - -// BrowserMobile returns an attribute KeyValue conforming to the -// "browser.mobile" semantic conventions. It represents a boolean that is true -// if the browser is running on a mobile device -func BrowserMobile(val bool) attribute.KeyValue { - return BrowserMobileKey.Bool(val) -} - -// BrowserUserAgent returns an attribute KeyValue conforming to the -// "browser.user_agent" semantic conventions. It represents the full user-agent -// string provided by the browser -func BrowserUserAgent(val string) attribute.KeyValue { - return BrowserUserAgentKey.String(val) -} - -// BrowserLanguage returns an attribute KeyValue conforming to the -// "browser.language" semantic conventions. It represents the preferred -// language of the user using the browser -func BrowserLanguage(val string) attribute.KeyValue { - return BrowserLanguageKey.String(val) -} - -// A cloud environment (e.g. GCP, Azure, AWS) -const ( - // CloudProviderKey is the attribute Key conforming to the "cloud.provider" - // semantic conventions. It represents the name of the cloud provider. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - CloudProviderKey = attribute.Key("cloud.provider") - - // CloudAccountIDKey is the attribute Key conforming to the - // "cloud.account.id" semantic conventions. It represents the cloud account - // ID the resource is assigned to. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '111111111111', 'opentelemetry' - CloudAccountIDKey = attribute.Key("cloud.account.id") - - // CloudRegionKey is the attribute Key conforming to the "cloud.region" - // semantic conventions. It represents the geographical region the resource - // is running. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'us-central1', 'us-east-1' - // Note: Refer to your provider's docs to see the available regions, for - // example [Alibaba Cloud - // regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS - // regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/), - // [Azure - // regions](https://azure.microsoft.com/en-us/global-infrastructure/geographies/), - // [Google Cloud regions](https://cloud.google.com/about/locations), or - // [Tencent Cloud - // regions](https://intl.cloud.tencent.com/document/product/213/6091). - CloudRegionKey = attribute.Key("cloud.region") - - // CloudAvailabilityZoneKey is the attribute Key conforming to the - // "cloud.availability_zone" semantic conventions. It represents the cloud - // regions often have multiple, isolated locations known as zones to - // increase availability. Availability zone represents the zone where the - // resource is running. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'us-east-1c' - // Note: Availability zones are called "zones" on Alibaba Cloud and Google - // Cloud. - CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone") - - // CloudPlatformKey is the attribute Key conforming to the "cloud.platform" - // semantic conventions. It represents the cloud platform in use. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Note: The prefix of the service SHOULD match the one specified in - // `cloud.provider`. - CloudPlatformKey = attribute.Key("cloud.platform") -) - -var ( - // Alibaba Cloud - CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud") - // Amazon Web Services - CloudProviderAWS = CloudProviderKey.String("aws") - // Microsoft Azure - CloudProviderAzure = CloudProviderKey.String("azure") - // Google Cloud Platform - CloudProviderGCP = CloudProviderKey.String("gcp") - // IBM Cloud - CloudProviderIbmCloud = CloudProviderKey.String("ibm_cloud") - // Tencent Cloud - CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud") -) - -var ( - // Alibaba Cloud Elastic Compute Service - CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs") - // Alibaba Cloud Function Compute - CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc") - // Red Hat OpenShift on Alibaba Cloud - CloudPlatformAlibabaCloudOpenshift = CloudPlatformKey.String("alibaba_cloud_openshift") - // AWS Elastic Compute Cloud - CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2") - // AWS Elastic Container Service - CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs") - // AWS Elastic Kubernetes Service - CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks") - // AWS Lambda - CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda") - // AWS Elastic Beanstalk - CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk") - // AWS App Runner - CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner") - // Red Hat OpenShift on AWS (ROSA) - CloudPlatformAWSOpenshift = CloudPlatformKey.String("aws_openshift") - // Azure Virtual Machines - CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm") - // Azure Container Instances - CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances") - // Azure Kubernetes Service - CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks") - // Azure Functions - CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions") - // Azure App Service - CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service") - // Azure Red Hat OpenShift - CloudPlatformAzureOpenshift = CloudPlatformKey.String("azure_openshift") - // Google Cloud Compute Engine (GCE) - CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine") - // Google Cloud Run - CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run") - // Google Cloud Kubernetes Engine (GKE) - CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine") - // Google Cloud Functions (GCF) - CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions") - // Google Cloud App Engine (GAE) - CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine") - // Red Hat OpenShift on Google Cloud - CloudPlatformGoogleCloudOpenshift = CloudPlatformKey.String("google_cloud_openshift") - // Red Hat OpenShift on IBM Cloud - CloudPlatformIbmCloudOpenshift = CloudPlatformKey.String("ibm_cloud_openshift") - // Tencent Cloud Cloud Virtual Machine (CVM) - CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm") - // Tencent Cloud Elastic Kubernetes Service (EKS) - CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks") - // Tencent Cloud Serverless Cloud Function (SCF) - CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf") -) - -// CloudAccountID returns an attribute KeyValue conforming to the -// "cloud.account.id" semantic conventions. It represents the cloud account ID -// the resource is assigned to. -func CloudAccountID(val string) attribute.KeyValue { - return CloudAccountIDKey.String(val) -} - -// CloudRegion returns an attribute KeyValue conforming to the -// "cloud.region" semantic conventions. It represents the geographical region -// the resource is running. -func CloudRegion(val string) attribute.KeyValue { - return CloudRegionKey.String(val) -} - -// CloudAvailabilityZone returns an attribute KeyValue conforming to the -// "cloud.availability_zone" semantic conventions. It represents the cloud -// regions often have multiple, isolated locations known as zones to increase -// availability. Availability zone represents the zone where the resource is -// running. -func CloudAvailabilityZone(val string) attribute.KeyValue { - return CloudAvailabilityZoneKey.String(val) -} - -// Resources used by AWS Elastic Container Service (ECS). -const ( - // AWSECSContainerARNKey is the attribute Key conforming to the - // "aws.ecs.container.arn" semantic conventions. It represents the Amazon - // Resource Name (ARN) of an [ECS container - // instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: - // 'arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9' - AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn") - - // AWSECSClusterARNKey is the attribute Key conforming to the - // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an - // [ECS - // cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' - AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn") - - // AWSECSLaunchtypeKey is the attribute Key conforming to the - // "aws.ecs.launchtype" semantic conventions. It represents the [launch - // type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html) - // for an ECS task. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype") - - // AWSECSTaskARNKey is the attribute Key conforming to the - // "aws.ecs.task.arn" semantic conventions. It represents the ARN of an - // [ECS task - // definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: - // 'arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b' - AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn") - - // AWSECSTaskFamilyKey is the attribute Key conforming to the - // "aws.ecs.task.family" semantic conventions. It represents the task - // definition family this task definition is a member of. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'opentelemetry-family' - AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family") - - // AWSECSTaskRevisionKey is the attribute Key conforming to the - // "aws.ecs.task.revision" semantic conventions. It represents the revision - // for this task definition. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '8', '26' - AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision") -) - -var ( - // ec2 - AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2") - // fargate - AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") -) - -// AWSECSContainerARN returns an attribute KeyValue conforming to the -// "aws.ecs.container.arn" semantic conventions. It represents the Amazon -// Resource Name (ARN) of an [ECS container -// instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). -func AWSECSContainerARN(val string) attribute.KeyValue { - return AWSECSContainerARNKey.String(val) -} - -// AWSECSClusterARN returns an attribute KeyValue conforming to the -// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an [ECS -// cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). -func AWSECSClusterARN(val string) attribute.KeyValue { - return AWSECSClusterARNKey.String(val) -} - -// AWSECSTaskARN returns an attribute KeyValue conforming to the -// "aws.ecs.task.arn" semantic conventions. It represents the ARN of an [ECS -// task -// definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html). -func AWSECSTaskARN(val string) attribute.KeyValue { - return AWSECSTaskARNKey.String(val) -} - -// AWSECSTaskFamily returns an attribute KeyValue conforming to the -// "aws.ecs.task.family" semantic conventions. It represents the task -// definition family this task definition is a member of. -func AWSECSTaskFamily(val string) attribute.KeyValue { - return AWSECSTaskFamilyKey.String(val) -} - -// AWSECSTaskRevision returns an attribute KeyValue conforming to the -// "aws.ecs.task.revision" semantic conventions. It represents the revision for -// this task definition. -func AWSECSTaskRevision(val string) attribute.KeyValue { - return AWSECSTaskRevisionKey.String(val) -} - -// Resources used by AWS Elastic Kubernetes Service (EKS). -const ( - // AWSEKSClusterARNKey is the attribute Key conforming to the - // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an - // EKS cluster. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' - AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn") -) - -// AWSEKSClusterARN returns an attribute KeyValue conforming to the -// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS -// cluster. -func AWSEKSClusterARN(val string) attribute.KeyValue { - return AWSEKSClusterARNKey.String(val) -} - -// Resources specific to Amazon Web Services. -const ( - // AWSLogGroupNamesKey is the attribute Key conforming to the - // "aws.log.group.names" semantic conventions. It represents the name(s) of - // the AWS log group(s) an application is writing to. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: stable - // Examples: '/aws/lambda/my-function', 'opentelemetry-service' - // Note: Multiple log groups must be supported for cases like - // multi-container applications, where a single application has sidecar - // containers, and each write to their own log group. - AWSLogGroupNamesKey = attribute.Key("aws.log.group.names") - - // AWSLogGroupARNsKey is the attribute Key conforming to the - // "aws.log.group.arns" semantic conventions. It represents the Amazon - // Resource Name(s) (ARN) of the AWS log group(s). - // - // Type: string[] - // RequirementLevel: Optional - // Stability: stable - // Examples: - // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*' - // Note: See the [log group ARN format - // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). - AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns") - - // AWSLogStreamNamesKey is the attribute Key conforming to the - // "aws.log.stream.names" semantic conventions. It represents the name(s) - // of the AWS log stream(s) an application is writing to. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: stable - // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' - AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names") - - // AWSLogStreamARNsKey is the attribute Key conforming to the - // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of - // the AWS log stream(s). - // - // Type: string[] - // RequirementLevel: Optional - // Stability: stable - // Examples: - // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' - // Note: See the [log stream ARN format - // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). - // One log group can contain several log streams, so these ARNs necessarily - // identify both a log group and a log stream. - AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns") -) - -// AWSLogGroupNames returns an attribute KeyValue conforming to the -// "aws.log.group.names" semantic conventions. It represents the name(s) of the -// AWS log group(s) an application is writing to. -func AWSLogGroupNames(val ...string) attribute.KeyValue { - return AWSLogGroupNamesKey.StringSlice(val) -} - -// AWSLogGroupARNs returns an attribute KeyValue conforming to the -// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource -// Name(s) (ARN) of the AWS log group(s). -func AWSLogGroupARNs(val ...string) attribute.KeyValue { - return AWSLogGroupARNsKey.StringSlice(val) -} - -// AWSLogStreamNames returns an attribute KeyValue conforming to the -// "aws.log.stream.names" semantic conventions. It represents the name(s) of -// the AWS log stream(s) an application is writing to. -func AWSLogStreamNames(val ...string) attribute.KeyValue { - return AWSLogStreamNamesKey.StringSlice(val) -} - -// AWSLogStreamARNs returns an attribute KeyValue conforming to the -// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the -// AWS log stream(s). -func AWSLogStreamARNs(val ...string) attribute.KeyValue { - return AWSLogStreamARNsKey.StringSlice(val) -} - -// A container instance. -const ( - // ContainerNameKey is the attribute Key conforming to the "container.name" - // semantic conventions. It represents the container name used by container - // runtime. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'opentelemetry-autoconf' - ContainerNameKey = attribute.Key("container.name") - - // ContainerIDKey is the attribute Key conforming to the "container.id" - // semantic conventions. It represents the container ID. Usually a UUID, as - // for example used to [identify Docker - // containers](https://docs.docker.com/engine/reference/run/#container-identification). - // The UUID might be abbreviated. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'a3bf90e006b2' - ContainerIDKey = attribute.Key("container.id") - - // ContainerRuntimeKey is the attribute Key conforming to the - // "container.runtime" semantic conventions. It represents the container - // runtime managing this container. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'docker', 'containerd', 'rkt' - ContainerRuntimeKey = attribute.Key("container.runtime") - - // ContainerImageNameKey is the attribute Key conforming to the - // "container.image.name" semantic conventions. It represents the name of - // the image the container was built on. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'gcr.io/opentelemetry/operator' - ContainerImageNameKey = attribute.Key("container.image.name") - - // ContainerImageTagKey is the attribute Key conforming to the - // "container.image.tag" semantic conventions. It represents the container - // image tag. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '0.1' - ContainerImageTagKey = attribute.Key("container.image.tag") -) - -// ContainerName returns an attribute KeyValue conforming to the -// "container.name" semantic conventions. It represents the container name used -// by container runtime. -func ContainerName(val string) attribute.KeyValue { - return ContainerNameKey.String(val) -} - -// ContainerID returns an attribute KeyValue conforming to the -// "container.id" semantic conventions. It represents the container ID. Usually -// a UUID, as for example used to [identify Docker -// containers](https://docs.docker.com/engine/reference/run/#container-identification). -// The UUID might be abbreviated. -func ContainerID(val string) attribute.KeyValue { - return ContainerIDKey.String(val) -} - -// ContainerRuntime returns an attribute KeyValue conforming to the -// "container.runtime" semantic conventions. It represents the container -// runtime managing this container. -func ContainerRuntime(val string) attribute.KeyValue { - return ContainerRuntimeKey.String(val) -} - -// ContainerImageName returns an attribute KeyValue conforming to the -// "container.image.name" semantic conventions. It represents the name of the -// image the container was built on. -func ContainerImageName(val string) attribute.KeyValue { - return ContainerImageNameKey.String(val) -} - -// ContainerImageTag returns an attribute KeyValue conforming to the -// "container.image.tag" semantic conventions. It represents the container -// image tag. -func ContainerImageTag(val string) attribute.KeyValue { - return ContainerImageTagKey.String(val) -} - -// The software deployment. -const ( - // DeploymentEnvironmentKey is the attribute Key conforming to the - // "deployment.environment" semantic conventions. It represents the name of - // the [deployment - // environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka - // deployment tier). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'staging', 'production' - DeploymentEnvironmentKey = attribute.Key("deployment.environment") -) - -// DeploymentEnvironment returns an attribute KeyValue conforming to the -// "deployment.environment" semantic conventions. It represents the name of the -// [deployment -// environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka -// deployment tier). -func DeploymentEnvironment(val string) attribute.KeyValue { - return DeploymentEnvironmentKey.String(val) -} - -// The device on which the process represented by this resource is running. -const ( - // DeviceIDKey is the attribute Key conforming to the "device.id" semantic - // conventions. It represents a unique identifier representing the device - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092' - // Note: The device identifier MUST only be defined using the values - // outlined below. This value is not an advertising identifier and MUST NOT - // be used as such. On iOS (Swift or Objective-C), this value MUST be equal - // to the [vendor - // identifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-identifierforvendor). - // On Android (Java or Kotlin), this value MUST be equal to the Firebase - // Installation ID or a globally unique UUID which is persisted across - // sessions in your application. More information can be found - // [here](https://developer.android.com/training/articles/user-data-ids) on - // best practices and exact implementation details. Caution should be taken - // when storing personal data or anything which can identify a user. GDPR - // and data protection laws may apply, ensure you do your own due - // diligence. - DeviceIDKey = attribute.Key("device.id") - - // DeviceModelIdentifierKey is the attribute Key conforming to the - // "device.model.identifier" semantic conventions. It represents the model - // identifier for the device - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'iPhone3,4', 'SM-G920F' - // Note: It's recommended this value represents a machine readable version - // of the model identifier rather than the market or consumer-friendly name - // of the device. - DeviceModelIdentifierKey = attribute.Key("device.model.identifier") - - // DeviceModelNameKey is the attribute Key conforming to the - // "device.model.name" semantic conventions. It represents the marketing - // name for the device model - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6' - // Note: It's recommended this value represents a human readable version of - // the device model rather than a machine readable alternative. - DeviceModelNameKey = attribute.Key("device.model.name") - - // DeviceManufacturerKey is the attribute Key conforming to the - // "device.manufacturer" semantic conventions. It represents the name of - // the device manufacturer - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'Apple', 'Samsung' - // Note: The Android OS provides this field via - // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER). - // iOS apps SHOULD hardcode the value `Apple`. - DeviceManufacturerKey = attribute.Key("device.manufacturer") -) - -// DeviceID returns an attribute KeyValue conforming to the "device.id" -// semantic conventions. It represents a unique identifier representing the -// device -func DeviceID(val string) attribute.KeyValue { - return DeviceIDKey.String(val) -} - -// DeviceModelIdentifier returns an attribute KeyValue conforming to the -// "device.model.identifier" semantic conventions. It represents the model -// identifier for the device -func DeviceModelIdentifier(val string) attribute.KeyValue { - return DeviceModelIdentifierKey.String(val) -} - -// DeviceModelName returns an attribute KeyValue conforming to the -// "device.model.name" semantic conventions. It represents the marketing name -// for the device model -func DeviceModelName(val string) attribute.KeyValue { - return DeviceModelNameKey.String(val) -} - -// DeviceManufacturer returns an attribute KeyValue conforming to the -// "device.manufacturer" semantic conventions. It represents the name of the -// device manufacturer -func DeviceManufacturer(val string) attribute.KeyValue { - return DeviceManufacturerKey.String(val) -} - -// A serverless instance. -const ( - // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic - // conventions. It represents the name of the single function that this - // runtime instance executes. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: 'my-function', 'myazurefunctionapp/some-function-name' - // Note: This is the name of the function as configured/deployed on the - // FaaS - // platform and is usually different from the name of the callback - // function (which may be stored in the - // [`code.namespace`/`code.function`](../../trace/semantic_conventions/span-general.md#source-code-attributes) - // span attributes). - // - // For some cloud providers, the above definition is ambiguous. The - // following - // definition of function name MUST be used for this attribute - // (and consequently the span name) for the listed cloud - // providers/products: - // - // * **Azure:** The full name `/`, i.e., function app name - // followed by a forward slash followed by the function name (this form - // can also be seen in the resource JSON for the function). - // This means that a span attribute MUST be used, as an Azure function - // app can host multiple functions that would usually share - // a TracerProvider (see also the `faas.id` attribute). - FaaSNameKey = attribute.Key("faas.name") - - // FaaSIDKey is the attribute Key conforming to the "faas.id" semantic - // conventions. It represents the unique ID of the single function that - // this runtime instance executes. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'arn:aws:lambda:us-west-2:123456789012:function:my-function' - // Note: On some cloud providers, it may not be possible to determine the - // full ID at startup, - // so consider setting `faas.id` as a span attribute instead. - // - // The exact value to use for `faas.id` depends on the cloud provider: - // - // * **AWS Lambda:** The function - // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). - // Take care not to use the "invoked ARN" directly but replace any - // [alias - // suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html) - // with the resolved function version, as the same runtime instance may - // be invokable with - // multiple different aliases. - // * **GCP:** The [URI of the - // resource](https://cloud.google.com/iam/docs/full-resource-names) - // * **Azure:** The [Fully Qualified Resource - // ID](https://docs.microsoft.com/en-us/rest/api/resources/resources/get-by-id) - // of the invoked function, - // *not* the function app, having the form - // `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/`. - // This means that a span attribute MUST be used, as an Azure function - // app can host multiple functions that would usually share - // a TracerProvider. - FaaSIDKey = attribute.Key("faas.id") - - // FaaSVersionKey is the attribute Key conforming to the "faas.version" - // semantic conventions. It represents the immutable version of the - // function being executed. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '26', 'pinkfroid-00002' - // Note: Depending on the cloud provider and platform, use: - // - // * **AWS Lambda:** The [function - // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html) - // (an integer represented as a decimal string). - // * **Google Cloud Run:** The - // [revision](https://cloud.google.com/run/docs/managing/revisions) - // (i.e., the function name plus the revision suffix). - // * **Google Cloud Functions:** The value of the - // [`K_REVISION` environment - // variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically). - // * **Azure Functions:** Not applicable. Do not set this attribute. - FaaSVersionKey = attribute.Key("faas.version") - - // FaaSInstanceKey is the attribute Key conforming to the "faas.instance" - // semantic conventions. It represents the execution environment ID as a - // string, that will be potentially reused for other invocations to the - // same function/function version. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de' - // Note: * **AWS Lambda:** Use the (full) log stream name. - FaaSInstanceKey = attribute.Key("faas.instance") - - // FaaSMaxMemoryKey is the attribute Key conforming to the - // "faas.max_memory" semantic conventions. It represents the amount of - // memory available to the serverless function in MiB. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 128 - // Note: It's recommended to set this attribute since e.g. too little - // memory can easily stop a Java AWS Lambda function from working - // correctly. On AWS Lambda, the environment variable - // `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information. - FaaSMaxMemoryKey = attribute.Key("faas.max_memory") -) - -// FaaSName returns an attribute KeyValue conforming to the "faas.name" -// semantic conventions. It represents the name of the single function that -// this runtime instance executes. -func FaaSName(val string) attribute.KeyValue { - return FaaSNameKey.String(val) -} - -// FaaSID returns an attribute KeyValue conforming to the "faas.id" semantic -// conventions. It represents the unique ID of the single function that this -// runtime instance executes. -func FaaSID(val string) attribute.KeyValue { - return FaaSIDKey.String(val) -} - -// FaaSVersion returns an attribute KeyValue conforming to the -// "faas.version" semantic conventions. It represents the immutable version of -// the function being executed. -func FaaSVersion(val string) attribute.KeyValue { - return FaaSVersionKey.String(val) -} - -// FaaSInstance returns an attribute KeyValue conforming to the -// "faas.instance" semantic conventions. It represents the execution -// environment ID as a string, that will be potentially reused for other -// invocations to the same function/function version. -func FaaSInstance(val string) attribute.KeyValue { - return FaaSInstanceKey.String(val) -} - -// FaaSMaxMemory returns an attribute KeyValue conforming to the -// "faas.max_memory" semantic conventions. It represents the amount of memory -// available to the serverless function in MiB. -func FaaSMaxMemory(val int) attribute.KeyValue { - return FaaSMaxMemoryKey.Int(val) -} - -// A host is defined as a general computing instance. -const ( - // HostIDKey is the attribute Key conforming to the "host.id" semantic - // conventions. It represents the unique host ID. For Cloud, this must be - // the instance_id assigned by the cloud provider. For non-containerized - // Linux systems, the `machine-id` located in `/etc/machine-id` or - // `/var/lib/dbus/machine-id` may be used. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'fdbf79e8af94cb7f9e8df36789187052' - HostIDKey = attribute.Key("host.id") - - // HostNameKey is the attribute Key conforming to the "host.name" semantic - // conventions. It represents the name of the host. On Unix systems, it may - // contain what the hostname command returns, or the fully qualified - // hostname, or another name specified by the user. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'opentelemetry-test' - HostNameKey = attribute.Key("host.name") - - // HostTypeKey is the attribute Key conforming to the "host.type" semantic - // conventions. It represents the type of host. For Cloud, this must be the - // machine type. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'n1-standard-1' - HostTypeKey = attribute.Key("host.type") - - // HostArchKey is the attribute Key conforming to the "host.arch" semantic - // conventions. It represents the CPU architecture the host system is - // running on. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - HostArchKey = attribute.Key("host.arch") - - // HostImageNameKey is the attribute Key conforming to the - // "host.image.name" semantic conventions. It represents the name of the VM - // image or OS install the host was instantiated from. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905' - HostImageNameKey = attribute.Key("host.image.name") - - // HostImageIDKey is the attribute Key conforming to the "host.image.id" - // semantic conventions. It represents the vM image ID. For Cloud, this - // value is from the provider. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'ami-07b06b442921831e5' - HostImageIDKey = attribute.Key("host.image.id") - - // HostImageVersionKey is the attribute Key conforming to the - // "host.image.version" semantic conventions. It represents the version - // string of the VM image as defined in [Version - // Attributes](README.md#version-attributes). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '0.1' - HostImageVersionKey = attribute.Key("host.image.version") -) - -var ( - // AMD64 - HostArchAMD64 = HostArchKey.String("amd64") - // ARM32 - HostArchARM32 = HostArchKey.String("arm32") - // ARM64 - HostArchARM64 = HostArchKey.String("arm64") - // Itanium - HostArchIA64 = HostArchKey.String("ia64") - // 32-bit PowerPC - HostArchPPC32 = HostArchKey.String("ppc32") - // 64-bit PowerPC - HostArchPPC64 = HostArchKey.String("ppc64") - // IBM z/Architecture - HostArchS390x = HostArchKey.String("s390x") - // 32-bit x86 - HostArchX86 = HostArchKey.String("x86") -) - -// HostID returns an attribute KeyValue conforming to the "host.id" semantic -// conventions. It represents the unique host ID. For Cloud, this must be the -// instance_id assigned by the cloud provider. For non-containerized Linux -// systems, the `machine-id` located in `/etc/machine-id` or -// `/var/lib/dbus/machine-id` may be used. -func HostID(val string) attribute.KeyValue { - return HostIDKey.String(val) -} - -// HostName returns an attribute KeyValue conforming to the "host.name" -// semantic conventions. It represents the name of the host. On Unix systems, -// it may contain what the hostname command returns, or the fully qualified -// hostname, or another name specified by the user. -func HostName(val string) attribute.KeyValue { - return HostNameKey.String(val) -} - -// HostType returns an attribute KeyValue conforming to the "host.type" -// semantic conventions. It represents the type of host. For Cloud, this must -// be the machine type. -func HostType(val string) attribute.KeyValue { - return HostTypeKey.String(val) -} - -// HostImageName returns an attribute KeyValue conforming to the -// "host.image.name" semantic conventions. It represents the name of the VM -// image or OS install the host was instantiated from. -func HostImageName(val string) attribute.KeyValue { - return HostImageNameKey.String(val) -} - -// HostImageID returns an attribute KeyValue conforming to the -// "host.image.id" semantic conventions. It represents the vM image ID. For -// Cloud, this value is from the provider. -func HostImageID(val string) attribute.KeyValue { - return HostImageIDKey.String(val) -} - -// HostImageVersion returns an attribute KeyValue conforming to the -// "host.image.version" semantic conventions. It represents the version string -// of the VM image as defined in [Version -// Attributes](README.md#version-attributes). -func HostImageVersion(val string) attribute.KeyValue { - return HostImageVersionKey.String(val) -} - -// A Kubernetes Cluster. -const ( - // K8SClusterNameKey is the attribute Key conforming to the - // "k8s.cluster.name" semantic conventions. It represents the name of the - // cluster. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'opentelemetry-cluster' - K8SClusterNameKey = attribute.Key("k8s.cluster.name") -) - -// K8SClusterName returns an attribute KeyValue conforming to the -// "k8s.cluster.name" semantic conventions. It represents the name of the -// cluster. -func K8SClusterName(val string) attribute.KeyValue { - return K8SClusterNameKey.String(val) -} - -// A Kubernetes Node object. -const ( - // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name" - // semantic conventions. It represents the name of the Node. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'node-1' - K8SNodeNameKey = attribute.Key("k8s.node.name") - - // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid" - // semantic conventions. It represents the UID of the Node. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2' - K8SNodeUIDKey = attribute.Key("k8s.node.uid") -) - -// K8SNodeName returns an attribute KeyValue conforming to the -// "k8s.node.name" semantic conventions. It represents the name of the Node. -func K8SNodeName(val string) attribute.KeyValue { - return K8SNodeNameKey.String(val) -} - -// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid" -// semantic conventions. It represents the UID of the Node. -func K8SNodeUID(val string) attribute.KeyValue { - return K8SNodeUIDKey.String(val) -} - -// A Kubernetes Namespace. -const ( - // K8SNamespaceNameKey is the attribute Key conforming to the - // "k8s.namespace.name" semantic conventions. It represents the name of the - // namespace that the pod is running in. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'default' - K8SNamespaceNameKey = attribute.Key("k8s.namespace.name") -) - -// K8SNamespaceName returns an attribute KeyValue conforming to the -// "k8s.namespace.name" semantic conventions. It represents the name of the -// namespace that the pod is running in. -func K8SNamespaceName(val string) attribute.KeyValue { - return K8SNamespaceNameKey.String(val) -} - -// A Kubernetes Pod object. -const ( - // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid" - // semantic conventions. It represents the UID of the Pod. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SPodUIDKey = attribute.Key("k8s.pod.uid") - - // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name" - // semantic conventions. It represents the name of the Pod. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'opentelemetry-pod-autoconf' - K8SPodNameKey = attribute.Key("k8s.pod.name") -) - -// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid" -// semantic conventions. It represents the UID of the Pod. -func K8SPodUID(val string) attribute.KeyValue { - return K8SPodUIDKey.String(val) -} - -// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name" -// semantic conventions. It represents the name of the Pod. -func K8SPodName(val string) attribute.KeyValue { - return K8SPodNameKey.String(val) -} - -// A container in a -// [PodTemplate](https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates). -const ( - // K8SContainerNameKey is the attribute Key conforming to the - // "k8s.container.name" semantic conventions. It represents the name of the - // Container from Pod specification, must be unique within a Pod. Container - // runtime usually uses different globally unique name (`container.name`). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'redis' - K8SContainerNameKey = attribute.Key("k8s.container.name") - - // K8SContainerRestartCountKey is the attribute Key conforming to the - // "k8s.container.restart_count" semantic conventions. It represents the - // number of times the container was restarted. This attribute can be used - // to identify a particular container (running or stopped) within a - // container spec. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 0, 2 - K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count") -) - -// K8SContainerName returns an attribute KeyValue conforming to the -// "k8s.container.name" semantic conventions. It represents the name of the -// Container from Pod specification, must be unique within a Pod. Container -// runtime usually uses different globally unique name (`container.name`). -func K8SContainerName(val string) attribute.KeyValue { - return K8SContainerNameKey.String(val) -} - -// K8SContainerRestartCount returns an attribute KeyValue conforming to the -// "k8s.container.restart_count" semantic conventions. It represents the number -// of times the container was restarted. This attribute can be used to identify -// a particular container (running or stopped) within a container spec. -func K8SContainerRestartCount(val int) attribute.KeyValue { - return K8SContainerRestartCountKey.Int(val) -} - -// A Kubernetes ReplicaSet object. -const ( - // K8SReplicaSetUIDKey is the attribute Key conforming to the - // "k8s.replicaset.uid" semantic conventions. It represents the UID of the - // ReplicaSet. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid") - - // K8SReplicaSetNameKey is the attribute Key conforming to the - // "k8s.replicaset.name" semantic conventions. It represents the name of - // the ReplicaSet. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'opentelemetry' - K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name") -) - -// K8SReplicaSetUID returns an attribute KeyValue conforming to the -// "k8s.replicaset.uid" semantic conventions. It represents the UID of the -// ReplicaSet. -func K8SReplicaSetUID(val string) attribute.KeyValue { - return K8SReplicaSetUIDKey.String(val) -} - -// K8SReplicaSetName returns an attribute KeyValue conforming to the -// "k8s.replicaset.name" semantic conventions. It represents the name of the -// ReplicaSet. -func K8SReplicaSetName(val string) attribute.KeyValue { - return K8SReplicaSetNameKey.String(val) -} - -// A Kubernetes Deployment object. -const ( - // K8SDeploymentUIDKey is the attribute Key conforming to the - // "k8s.deployment.uid" semantic conventions. It represents the UID of the - // Deployment. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") - - // K8SDeploymentNameKey is the attribute Key conforming to the - // "k8s.deployment.name" semantic conventions. It represents the name of - // the Deployment. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'opentelemetry' - K8SDeploymentNameKey = attribute.Key("k8s.deployment.name") -) - -// K8SDeploymentUID returns an attribute KeyValue conforming to the -// "k8s.deployment.uid" semantic conventions. It represents the UID of the -// Deployment. -func K8SDeploymentUID(val string) attribute.KeyValue { - return K8SDeploymentUIDKey.String(val) -} - -// K8SDeploymentName returns an attribute KeyValue conforming to the -// "k8s.deployment.name" semantic conventions. It represents the name of the -// Deployment. -func K8SDeploymentName(val string) attribute.KeyValue { - return K8SDeploymentNameKey.String(val) -} - -// A Kubernetes StatefulSet object. -const ( - // K8SStatefulSetUIDKey is the attribute Key conforming to the - // "k8s.statefulset.uid" semantic conventions. It represents the UID of the - // StatefulSet. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid") - - // K8SStatefulSetNameKey is the attribute Key conforming to the - // "k8s.statefulset.name" semantic conventions. It represents the name of - // the StatefulSet. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'opentelemetry' - K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name") -) - -// K8SStatefulSetUID returns an attribute KeyValue conforming to the -// "k8s.statefulset.uid" semantic conventions. It represents the UID of the -// StatefulSet. -func K8SStatefulSetUID(val string) attribute.KeyValue { - return K8SStatefulSetUIDKey.String(val) -} - -// K8SStatefulSetName returns an attribute KeyValue conforming to the -// "k8s.statefulset.name" semantic conventions. It represents the name of the -// StatefulSet. -func K8SStatefulSetName(val string) attribute.KeyValue { - return K8SStatefulSetNameKey.String(val) -} - -// A Kubernetes DaemonSet object. -const ( - // K8SDaemonSetUIDKey is the attribute Key conforming to the - // "k8s.daemonset.uid" semantic conventions. It represents the UID of the - // DaemonSet. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid") - - // K8SDaemonSetNameKey is the attribute Key conforming to the - // "k8s.daemonset.name" semantic conventions. It represents the name of the - // DaemonSet. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'opentelemetry' - K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name") -) - -// K8SDaemonSetUID returns an attribute KeyValue conforming to the -// "k8s.daemonset.uid" semantic conventions. It represents the UID of the -// DaemonSet. -func K8SDaemonSetUID(val string) attribute.KeyValue { - return K8SDaemonSetUIDKey.String(val) -} - -// K8SDaemonSetName returns an attribute KeyValue conforming to the -// "k8s.daemonset.name" semantic conventions. It represents the name of the -// DaemonSet. -func K8SDaemonSetName(val string) attribute.KeyValue { - return K8SDaemonSetNameKey.String(val) -} - -// A Kubernetes Job object. -const ( - // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid" - // semantic conventions. It represents the UID of the Job. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SJobUIDKey = attribute.Key("k8s.job.uid") - - // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name" - // semantic conventions. It represents the name of the Job. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'opentelemetry' - K8SJobNameKey = attribute.Key("k8s.job.name") -) - -// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid" -// semantic conventions. It represents the UID of the Job. -func K8SJobUID(val string) attribute.KeyValue { - return K8SJobUIDKey.String(val) -} - -// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name" -// semantic conventions. It represents the name of the Job. -func K8SJobName(val string) attribute.KeyValue { - return K8SJobNameKey.String(val) -} - -// A Kubernetes CronJob object. -const ( - // K8SCronJobUIDKey is the attribute Key conforming to the - // "k8s.cronjob.uid" semantic conventions. It represents the UID of the - // CronJob. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid") - - // K8SCronJobNameKey is the attribute Key conforming to the - // "k8s.cronjob.name" semantic conventions. It represents the name of the - // CronJob. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'opentelemetry' - K8SCronJobNameKey = attribute.Key("k8s.cronjob.name") -) - -// K8SCronJobUID returns an attribute KeyValue conforming to the -// "k8s.cronjob.uid" semantic conventions. It represents the UID of the -// CronJob. -func K8SCronJobUID(val string) attribute.KeyValue { - return K8SCronJobUIDKey.String(val) -} - -// K8SCronJobName returns an attribute KeyValue conforming to the -// "k8s.cronjob.name" semantic conventions. It represents the name of the -// CronJob. -func K8SCronJobName(val string) attribute.KeyValue { - return K8SCronJobNameKey.String(val) -} - -// The operating system (OS) on which the process represented by this resource -// is running. -const ( - // OSTypeKey is the attribute Key conforming to the "os.type" semantic - // conventions. It represents the operating system type. - // - // Type: Enum - // RequirementLevel: Required - // Stability: stable - OSTypeKey = attribute.Key("os.type") - - // OSDescriptionKey is the attribute Key conforming to the "os.description" - // semantic conventions. It represents the human readable (not intended to - // be parsed) OS version information, like e.g. reported by `ver` or - // `lsb_release -a` commands. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 - // LTS' - OSDescriptionKey = attribute.Key("os.description") - - // OSNameKey is the attribute Key conforming to the "os.name" semantic - // conventions. It represents the human readable operating system name. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'iOS', 'Android', 'Ubuntu' - OSNameKey = attribute.Key("os.name") - - // OSVersionKey is the attribute Key conforming to the "os.version" - // semantic conventions. It represents the version string of the operating - // system as defined in [Version - // Attributes](../../resource/semantic_conventions/README.md#version-attributes). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '14.2.1', '18.04.1' - OSVersionKey = attribute.Key("os.version") -) - -var ( - // Microsoft Windows - OSTypeWindows = OSTypeKey.String("windows") - // Linux - OSTypeLinux = OSTypeKey.String("linux") - // Apple Darwin - OSTypeDarwin = OSTypeKey.String("darwin") - // FreeBSD - OSTypeFreeBSD = OSTypeKey.String("freebsd") - // NetBSD - OSTypeNetBSD = OSTypeKey.String("netbsd") - // OpenBSD - OSTypeOpenBSD = OSTypeKey.String("openbsd") - // DragonFly BSD - OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd") - // HP-UX (Hewlett Packard Unix) - OSTypeHPUX = OSTypeKey.String("hpux") - // AIX (Advanced Interactive eXecutive) - OSTypeAIX = OSTypeKey.String("aix") - // SunOS, Oracle Solaris - OSTypeSolaris = OSTypeKey.String("solaris") - // IBM z/OS - OSTypeZOS = OSTypeKey.String("z_os") -) - -// OSDescription returns an attribute KeyValue conforming to the -// "os.description" semantic conventions. It represents the human readable (not -// intended to be parsed) OS version information, like e.g. reported by `ver` -// or `lsb_release -a` commands. -func OSDescription(val string) attribute.KeyValue { - return OSDescriptionKey.String(val) -} - -// OSName returns an attribute KeyValue conforming to the "os.name" semantic -// conventions. It represents the human readable operating system name. -func OSName(val string) attribute.KeyValue { - return OSNameKey.String(val) -} - -// OSVersion returns an attribute KeyValue conforming to the "os.version" -// semantic conventions. It represents the version string of the operating -// system as defined in [Version -// Attributes](../../resource/semantic_conventions/README.md#version-attributes). -func OSVersion(val string) attribute.KeyValue { - return OSVersionKey.String(val) -} - -// An operating system process. -const ( - // ProcessPIDKey is the attribute Key conforming to the "process.pid" - // semantic conventions. It represents the process identifier (PID). - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 1234 - ProcessPIDKey = attribute.Key("process.pid") - - // ProcessParentPIDKey is the attribute Key conforming to the - // "process.parent_pid" semantic conventions. It represents the parent - // Process identifier (PID). - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 111 - ProcessParentPIDKey = attribute.Key("process.parent_pid") - - // ProcessExecutableNameKey is the attribute Key conforming to the - // "process.executable.name" semantic conventions. It represents the name - // of the process executable. On Linux based systems, can be set to the - // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name - // of `GetProcessImageFileNameW`. - // - // Type: string - // RequirementLevel: ConditionallyRequired (See alternative attributes - // below.) - // Stability: stable - // Examples: 'otelcol' - ProcessExecutableNameKey = attribute.Key("process.executable.name") - - // ProcessExecutablePathKey is the attribute Key conforming to the - // "process.executable.path" semantic conventions. It represents the full - // path to the process executable. On Linux based systems, can be set to - // the target of `proc/[pid]/exe`. On Windows, can be set to the result of - // `GetProcessImageFileNameW`. - // - // Type: string - // RequirementLevel: ConditionallyRequired (See alternative attributes - // below.) - // Stability: stable - // Examples: '/usr/bin/cmd/otelcol' - ProcessExecutablePathKey = attribute.Key("process.executable.path") - - // ProcessCommandKey is the attribute Key conforming to the - // "process.command" semantic conventions. It represents the command used - // to launch the process (i.e. the command name). On Linux based systems, - // can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can - // be set to the first parameter extracted from `GetCommandLineW`. - // - // Type: string - // RequirementLevel: ConditionallyRequired (See alternative attributes - // below.) - // Stability: stable - // Examples: 'cmd/otelcol' - ProcessCommandKey = attribute.Key("process.command") - - // ProcessCommandLineKey is the attribute Key conforming to the - // "process.command_line" semantic conventions. It represents the full - // command used to launch the process as a single string representing the - // full command. On Windows, can be set to the result of `GetCommandLineW`. - // Do not set this if you have to assemble it just for monitoring; use - // `process.command_args` instead. - // - // Type: string - // RequirementLevel: ConditionallyRequired (See alternative attributes - // below.) - // Stability: stable - // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"' - ProcessCommandLineKey = attribute.Key("process.command_line") - - // ProcessCommandArgsKey is the attribute Key conforming to the - // "process.command_args" semantic conventions. It represents the all the - // command arguments (including the command/executable itself) as received - // by the process. On Linux-based systems (and some other Unixoid systems - // supporting procfs), can be set according to the list of null-delimited - // strings extracted from `proc/[pid]/cmdline`. For libc-based executables, - // this would be the full argv vector passed to `main`. - // - // Type: string[] - // RequirementLevel: ConditionallyRequired (See alternative attributes - // below.) - // Stability: stable - // Examples: 'cmd/otecol', '--config=config.yaml' - ProcessCommandArgsKey = attribute.Key("process.command_args") - - // ProcessOwnerKey is the attribute Key conforming to the "process.owner" - // semantic conventions. It represents the username of the user that owns - // the process. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'root' - ProcessOwnerKey = attribute.Key("process.owner") -) - -// ProcessPID returns an attribute KeyValue conforming to the "process.pid" -// semantic conventions. It represents the process identifier (PID). -func ProcessPID(val int) attribute.KeyValue { - return ProcessPIDKey.Int(val) -} - -// ProcessParentPID returns an attribute KeyValue conforming to the -// "process.parent_pid" semantic conventions. It represents the parent Process -// identifier (PID). -func ProcessParentPID(val int) attribute.KeyValue { - return ProcessParentPIDKey.Int(val) -} - -// ProcessExecutableName returns an attribute KeyValue conforming to the -// "process.executable.name" semantic conventions. It represents the name of -// the process executable. On Linux based systems, can be set to the `Name` in -// `proc/[pid]/status`. On Windows, can be set to the base name of -// `GetProcessImageFileNameW`. -func ProcessExecutableName(val string) attribute.KeyValue { - return ProcessExecutableNameKey.String(val) -} - -// ProcessExecutablePath returns an attribute KeyValue conforming to the -// "process.executable.path" semantic conventions. It represents the full path -// to the process executable. On Linux based systems, can be set to the target -// of `proc/[pid]/exe`. On Windows, can be set to the result of -// `GetProcessImageFileNameW`. -func ProcessExecutablePath(val string) attribute.KeyValue { - return ProcessExecutablePathKey.String(val) -} - -// ProcessCommand returns an attribute KeyValue conforming to the -// "process.command" semantic conventions. It represents the command used to -// launch the process (i.e. the command name). On Linux based systems, can be -// set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to -// the first parameter extracted from `GetCommandLineW`. -func ProcessCommand(val string) attribute.KeyValue { - return ProcessCommandKey.String(val) -} - -// ProcessCommandLine returns an attribute KeyValue conforming to the -// "process.command_line" semantic conventions. It represents the full command -// used to launch the process as a single string representing the full command. -// On Windows, can be set to the result of `GetCommandLineW`. Do not set this -// if you have to assemble it just for monitoring; use `process.command_args` -// instead. -func ProcessCommandLine(val string) attribute.KeyValue { - return ProcessCommandLineKey.String(val) -} - -// ProcessCommandArgs returns an attribute KeyValue conforming to the -// "process.command_args" semantic conventions. It represents the all the -// command arguments (including the command/executable itself) as received by -// the process. On Linux-based systems (and some other Unixoid systems -// supporting procfs), can be set according to the list of null-delimited -// strings extracted from `proc/[pid]/cmdline`. For libc-based executables, -// this would be the full argv vector passed to `main`. -func ProcessCommandArgs(val ...string) attribute.KeyValue { - return ProcessCommandArgsKey.StringSlice(val) -} - -// ProcessOwner returns an attribute KeyValue conforming to the -// "process.owner" semantic conventions. It represents the username of the user -// that owns the process. -func ProcessOwner(val string) attribute.KeyValue { - return ProcessOwnerKey.String(val) -} - -// The single (language) runtime instance which is monitored. -const ( - // ProcessRuntimeNameKey is the attribute Key conforming to the - // "process.runtime.name" semantic conventions. It represents the name of - // the runtime of this process. For compiled native binaries, this SHOULD - // be the name of the compiler. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'OpenJDK Runtime Environment' - ProcessRuntimeNameKey = attribute.Key("process.runtime.name") - - // ProcessRuntimeVersionKey is the attribute Key conforming to the - // "process.runtime.version" semantic conventions. It represents the - // version of the runtime of this process, as returned by the runtime - // without modification. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '14.0.2' - ProcessRuntimeVersionKey = attribute.Key("process.runtime.version") - - // ProcessRuntimeDescriptionKey is the attribute Key conforming to the - // "process.runtime.description" semantic conventions. It represents an - // additional description about the runtime of the process, for example a - // specific vendor customization of the runtime environment. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0' - ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description") -) - -// ProcessRuntimeName returns an attribute KeyValue conforming to the -// "process.runtime.name" semantic conventions. It represents the name of the -// runtime of this process. For compiled native binaries, this SHOULD be the -// name of the compiler. -func ProcessRuntimeName(val string) attribute.KeyValue { - return ProcessRuntimeNameKey.String(val) -} - -// ProcessRuntimeVersion returns an attribute KeyValue conforming to the -// "process.runtime.version" semantic conventions. It represents the version of -// the runtime of this process, as returned by the runtime without -// modification. -func ProcessRuntimeVersion(val string) attribute.KeyValue { - return ProcessRuntimeVersionKey.String(val) -} - -// ProcessRuntimeDescription returns an attribute KeyValue conforming to the -// "process.runtime.description" semantic conventions. It represents an -// additional description about the runtime of the process, for example a -// specific vendor customization of the runtime environment. -func ProcessRuntimeDescription(val string) attribute.KeyValue { - return ProcessRuntimeDescriptionKey.String(val) -} - -// A service instance. -const ( - // ServiceNameKey is the attribute Key conforming to the "service.name" - // semantic conventions. It represents the logical name of the service. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: 'shoppingcart' - // Note: MUST be the same for all instances of horizontally scaled - // services. If the value was not specified, SDKs MUST fallback to - // `unknown_service:` concatenated with - // [`process.executable.name`](process.md#process), e.g. - // `unknown_service:bash`. If `process.executable.name` is not available, - // the value MUST be set to `unknown_service`. - ServiceNameKey = attribute.Key("service.name") - - // ServiceNamespaceKey is the attribute Key conforming to the - // "service.namespace" semantic conventions. It represents a namespace for - // `service.name`. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'Shop' - // Note: A string value having a meaning that helps to distinguish a group - // of services, for example the team name that owns a group of services. - // `service.name` is expected to be unique within the same namespace. If - // `service.namespace` is not specified in the Resource then `service.name` - // is expected to be unique for all services that have no explicit - // namespace defined (so the empty/unspecified namespace is simply one more - // valid namespace). Zero-length namespace string is assumed equal to - // unspecified namespace. - ServiceNamespaceKey = attribute.Key("service.namespace") - - // ServiceInstanceIDKey is the attribute Key conforming to the - // "service.instance.id" semantic conventions. It represents the string ID - // of the service instance. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '627cc493-f310-47de-96bd-71410b7dec09' - // Note: MUST be unique for each instance of the same - // `service.namespace,service.name` pair (in other words - // `service.namespace,service.name,service.instance.id` triplet MUST be - // globally unique). The ID helps to distinguish instances of the same - // service that exist at the same time (e.g. instances of a horizontally - // scaled service). It is preferable for the ID to be persistent and stay - // the same for the lifetime of the service instance, however it is - // acceptable that the ID is ephemeral and changes during important - // lifetime events for the service (e.g. service restarts). If the service - // has no inherent unique ID that can be used as the value of this - // attribute it is recommended to generate a random Version 1 or Version 4 - // RFC 4122 UUID (services aiming for reproducible UUIDs may also use - // Version 5, see RFC 4122 for more recommendations). - ServiceInstanceIDKey = attribute.Key("service.instance.id") - - // ServiceVersionKey is the attribute Key conforming to the - // "service.version" semantic conventions. It represents the version string - // of the service API or implementation. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '2.0.0' - ServiceVersionKey = attribute.Key("service.version") -) - -// ServiceName returns an attribute KeyValue conforming to the -// "service.name" semantic conventions. It represents the logical name of the -// service. -func ServiceName(val string) attribute.KeyValue { - return ServiceNameKey.String(val) -} - -// ServiceNamespace returns an attribute KeyValue conforming to the -// "service.namespace" semantic conventions. It represents a namespace for -// `service.name`. -func ServiceNamespace(val string) attribute.KeyValue { - return ServiceNamespaceKey.String(val) -} - -// ServiceInstanceID returns an attribute KeyValue conforming to the -// "service.instance.id" semantic conventions. It represents the string ID of -// the service instance. -func ServiceInstanceID(val string) attribute.KeyValue { - return ServiceInstanceIDKey.String(val) -} - -// ServiceVersion returns an attribute KeyValue conforming to the -// "service.version" semantic conventions. It represents the version string of -// the service API or implementation. -func ServiceVersion(val string) attribute.KeyValue { - return ServiceVersionKey.String(val) -} - -// The telemetry SDK used to capture data recorded by the instrumentation -// libraries. -const ( - // TelemetrySDKNameKey is the attribute Key conforming to the - // "telemetry.sdk.name" semantic conventions. It represents the name of the - // telemetry SDK as defined above. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'opentelemetry' - TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name") - - // TelemetrySDKLanguageKey is the attribute Key conforming to the - // "telemetry.sdk.language" semantic conventions. It represents the - // language of the telemetry SDK. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language") - - // TelemetrySDKVersionKey is the attribute Key conforming to the - // "telemetry.sdk.version" semantic conventions. It represents the version - // string of the telemetry SDK. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '1.2.3' - TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version") - - // TelemetryAutoVersionKey is the attribute Key conforming to the - // "telemetry.auto.version" semantic conventions. It represents the version - // string of the auto instrumentation agent, if used. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '1.2.3' - TelemetryAutoVersionKey = attribute.Key("telemetry.auto.version") -) - -var ( - // cpp - TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp") - // dotnet - TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet") - // erlang - TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang") - // go - TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go") - // java - TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java") - // nodejs - TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs") - // php - TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php") - // python - TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python") - // ruby - TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby") - // webjs - TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs") - // swift - TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift") -) - -// TelemetrySDKName returns an attribute KeyValue conforming to the -// "telemetry.sdk.name" semantic conventions. It represents the name of the -// telemetry SDK as defined above. -func TelemetrySDKName(val string) attribute.KeyValue { - return TelemetrySDKNameKey.String(val) -} - -// TelemetrySDKVersion returns an attribute KeyValue conforming to the -// "telemetry.sdk.version" semantic conventions. It represents the version -// string of the telemetry SDK. -func TelemetrySDKVersion(val string) attribute.KeyValue { - return TelemetrySDKVersionKey.String(val) -} - -// TelemetryAutoVersion returns an attribute KeyValue conforming to the -// "telemetry.auto.version" semantic conventions. It represents the version -// string of the auto instrumentation agent, if used. -func TelemetryAutoVersion(val string) attribute.KeyValue { - return TelemetryAutoVersionKey.String(val) -} - -// Resource describing the packaged software running the application code. Web -// engines are typically executed using process.runtime. -const ( - // WebEngineNameKey is the attribute Key conforming to the "webengine.name" - // semantic conventions. It represents the name of the web engine. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: 'WildFly' - WebEngineNameKey = attribute.Key("webengine.name") - - // WebEngineVersionKey is the attribute Key conforming to the - // "webengine.version" semantic conventions. It represents the version of - // the web engine. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '21.0.0' - WebEngineVersionKey = attribute.Key("webengine.version") - - // WebEngineDescriptionKey is the attribute Key conforming to the - // "webengine.description" semantic conventions. It represents the - // additional description of the web engine (e.g. detailed version and - // edition information). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - - // 2.2.2.Final' - WebEngineDescriptionKey = attribute.Key("webengine.description") -) - -// WebEngineName returns an attribute KeyValue conforming to the -// "webengine.name" semantic conventions. It represents the name of the web -// engine. -func WebEngineName(val string) attribute.KeyValue { - return WebEngineNameKey.String(val) -} - -// WebEngineVersion returns an attribute KeyValue conforming to the -// "webengine.version" semantic conventions. It represents the version of the -// web engine. -func WebEngineVersion(val string) attribute.KeyValue { - return WebEngineVersionKey.String(val) -} - -// WebEngineDescription returns an attribute KeyValue conforming to the -// "webengine.description" semantic conventions. It represents the additional -// description of the web engine (e.g. detailed version and edition -// information). -func WebEngineDescription(val string) attribute.KeyValue { - return WebEngineDescriptionKey.String(val) -} - -// Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's -// concepts. -const ( - // OtelScopeNameKey is the attribute Key conforming to the - // "otel.scope.name" semantic conventions. It represents the name of the - // instrumentation scope - (`InstrumentationScope.Name` in OTLP). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'io.opentelemetry.contrib.mongodb' - OtelScopeNameKey = attribute.Key("otel.scope.name") - - // OtelScopeVersionKey is the attribute Key conforming to the - // "otel.scope.version" semantic conventions. It represents the version of - // the instrumentation scope - (`InstrumentationScope.Version` in OTLP). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '1.0.0' - OtelScopeVersionKey = attribute.Key("otel.scope.version") -) - -// OtelScopeName returns an attribute KeyValue conforming to the -// "otel.scope.name" semantic conventions. It represents the name of the -// instrumentation scope - (`InstrumentationScope.Name` in OTLP). -func OtelScopeName(val string) attribute.KeyValue { - return OtelScopeNameKey.String(val) -} - -// OtelScopeVersion returns an attribute KeyValue conforming to the -// "otel.scope.version" semantic conventions. It represents the version of the -// instrumentation scope - (`InstrumentationScope.Version` in OTLP). -func OtelScopeVersion(val string) attribute.KeyValue { - return OtelScopeVersionKey.String(val) -} - -// Span attributes used by non-OTLP exporters to represent OpenTelemetry -// Scope's concepts. -const ( - // OtelLibraryNameKey is the attribute Key conforming to the - // "otel.library.name" semantic conventions. It represents the deprecated, - // use the `otel.scope.name` attribute. - // - // Type: string - // RequirementLevel: Optional - // Stability: deprecated - // Examples: 'io.opentelemetry.contrib.mongodb' - OtelLibraryNameKey = attribute.Key("otel.library.name") - - // OtelLibraryVersionKey is the attribute Key conforming to the - // "otel.library.version" semantic conventions. It represents the - // deprecated, use the `otel.scope.version` attribute. - // - // Type: string - // RequirementLevel: Optional - // Stability: deprecated - // Examples: '1.0.0' - OtelLibraryVersionKey = attribute.Key("otel.library.version") -) - -// OtelLibraryName returns an attribute KeyValue conforming to the -// "otel.library.name" semantic conventions. It represents the deprecated, use -// the `otel.scope.name` attribute. -func OtelLibraryName(val string) attribute.KeyValue { - return OtelLibraryNameKey.String(val) -} - -// OtelLibraryVersion returns an attribute KeyValue conforming to the -// "otel.library.version" semantic conventions. It represents the deprecated, -// use the `otel.scope.version` attribute. -func OtelLibraryVersion(val string) attribute.KeyValue { - return OtelLibraryVersionKey.String(val) -} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go deleted file mode 100644 index 21497bb6..00000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go +++ /dev/null @@ -1,3364 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated from semantic convention specification. DO NOT EDIT. - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" - -import "go.opentelemetry.io/otel/attribute" - -// The shared attributes used to report a single exception associated with a -// span or log. -const ( - // ExceptionTypeKey is the attribute Key conforming to the "exception.type" - // semantic conventions. It represents the type of the exception (its - // fully-qualified class name, if applicable). The dynamic type of the - // exception should be preferred over the static type in languages that - // support it. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'java.net.ConnectException', 'OSError' - ExceptionTypeKey = attribute.Key("exception.type") - - // ExceptionMessageKey is the attribute Key conforming to the - // "exception.message" semantic conventions. It represents the exception - // message. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'Division by zero', "Can't convert 'int' object to str - // implicitly" - ExceptionMessageKey = attribute.Key("exception.message") - - // ExceptionStacktraceKey is the attribute Key conforming to the - // "exception.stacktrace" semantic conventions. It represents a stacktrace - // as a string in the natural representation for the language runtime. The - // representation is to be determined and documented by each language SIG. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test - // exception\\n at ' - // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' - // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' - // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' - ExceptionStacktraceKey = attribute.Key("exception.stacktrace") -) - -// ExceptionType returns an attribute KeyValue conforming to the -// "exception.type" semantic conventions. It represents the type of the -// exception (its fully-qualified class name, if applicable). The dynamic type -// of the exception should be preferred over the static type in languages that -// support it. -func ExceptionType(val string) attribute.KeyValue { - return ExceptionTypeKey.String(val) -} - -// ExceptionMessage returns an attribute KeyValue conforming to the -// "exception.message" semantic conventions. It represents the exception -// message. -func ExceptionMessage(val string) attribute.KeyValue { - return ExceptionMessageKey.String(val) -} - -// ExceptionStacktrace returns an attribute KeyValue conforming to the -// "exception.stacktrace" semantic conventions. It represents a stacktrace as a -// string in the natural representation for the language runtime. The -// representation is to be determined and documented by each language SIG. -func ExceptionStacktrace(val string) attribute.KeyValue { - return ExceptionStacktraceKey.String(val) -} - -// Attributes for Events represented using Log Records. -const ( - // EventNameKey is the attribute Key conforming to the "event.name" - // semantic conventions. It represents the name identifies the event. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: 'click', 'exception' - EventNameKey = attribute.Key("event.name") - - // EventDomainKey is the attribute Key conforming to the "event.domain" - // semantic conventions. It represents the domain identifies the business - // context for the events. - // - // Type: Enum - // RequirementLevel: Required - // Stability: stable - // Note: Events across different domains may have same `event.name`, yet be - // unrelated events. - EventDomainKey = attribute.Key("event.domain") -) - -var ( - // Events from browser apps - EventDomainBrowser = EventDomainKey.String("browser") - // Events from mobile apps - EventDomainDevice = EventDomainKey.String("device") - // Events from Kubernetes - EventDomainK8S = EventDomainKey.String("k8s") -) - -// EventName returns an attribute KeyValue conforming to the "event.name" -// semantic conventions. It represents the name identifies the event. -func EventName(val string) attribute.KeyValue { - return EventNameKey.String(val) -} - -// Span attributes used by AWS Lambda (in addition to general `faas` -// attributes). -const ( - // AWSLambdaInvokedARNKey is the attribute Key conforming to the - // "aws.lambda.invoked_arn" semantic conventions. It represents the full - // invoked ARN as provided on the `Context` passed to the function - // (`Lambda-Runtime-Invoked-Function-ARN` header on the - // `/runtime/invocation/next` applicable). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias' - // Note: This may be different from `faas.id` if an alias is involved. - AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn") -) - -// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the -// "aws.lambda.invoked_arn" semantic conventions. It represents the full -// invoked ARN as provided on the `Context` passed to the function -// (`Lambda-Runtime-Invoked-Function-ARN` header on the -// `/runtime/invocation/next` applicable). -func AWSLambdaInvokedARN(val string) attribute.KeyValue { - return AWSLambdaInvokedARNKey.String(val) -} - -// Attributes for CloudEvents. CloudEvents is a specification on how to define -// event data in a standard way. These attributes can be attached to spans when -// performing operations with CloudEvents, regardless of the protocol being -// used. -const ( - // CloudeventsEventIDKey is the attribute Key conforming to the - // "cloudevents.event_id" semantic conventions. It represents the - // [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) - // uniquely identifies the event. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001' - CloudeventsEventIDKey = attribute.Key("cloudevents.event_id") - - // CloudeventsEventSourceKey is the attribute Key conforming to the - // "cloudevents.event_source" semantic conventions. It represents the - // [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) - // identifies the context in which an event happened. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: 'https://github.com/cloudevents', - // '/cloudevents/spec/pull/123', 'my-service' - CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source") - - // CloudeventsEventSpecVersionKey is the attribute Key conforming to the - // "cloudevents.event_spec_version" semantic conventions. It represents the - // [version of the CloudEvents - // specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) - // which the event uses. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '1.0' - CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version") - - // CloudeventsEventTypeKey is the attribute Key conforming to the - // "cloudevents.event_type" semantic conventions. It represents the - // [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) - // contains a value describing the type of event related to the originating - // occurrence. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'com.github.pull_request.opened', - // 'com.example.object.deleted.v2' - CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type") - - // CloudeventsEventSubjectKey is the attribute Key conforming to the - // "cloudevents.event_subject" semantic conventions. It represents the - // [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) - // of the event in the context of the event producer (identified by - // source). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'mynewfile.jpg' - CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject") -) - -// CloudeventsEventID returns an attribute KeyValue conforming to the -// "cloudevents.event_id" semantic conventions. It represents the -// [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) -// uniquely identifies the event. -func CloudeventsEventID(val string) attribute.KeyValue { - return CloudeventsEventIDKey.String(val) -} - -// CloudeventsEventSource returns an attribute KeyValue conforming to the -// "cloudevents.event_source" semantic conventions. It represents the -// [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) -// identifies the context in which an event happened. -func CloudeventsEventSource(val string) attribute.KeyValue { - return CloudeventsEventSourceKey.String(val) -} - -// CloudeventsEventSpecVersion returns an attribute KeyValue conforming to -// the "cloudevents.event_spec_version" semantic conventions. It represents the -// [version of the CloudEvents -// specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) -// which the event uses. -func CloudeventsEventSpecVersion(val string) attribute.KeyValue { - return CloudeventsEventSpecVersionKey.String(val) -} - -// CloudeventsEventType returns an attribute KeyValue conforming to the -// "cloudevents.event_type" semantic conventions. It represents the -// [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) -// contains a value describing the type of event related to the originating -// occurrence. -func CloudeventsEventType(val string) attribute.KeyValue { - return CloudeventsEventTypeKey.String(val) -} - -// CloudeventsEventSubject returns an attribute KeyValue conforming to the -// "cloudevents.event_subject" semantic conventions. It represents the -// [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) -// of the event in the context of the event producer (identified by source). -func CloudeventsEventSubject(val string) attribute.KeyValue { - return CloudeventsEventSubjectKey.String(val) -} - -// Semantic conventions for the OpenTracing Shim -const ( - // OpentracingRefTypeKey is the attribute Key conforming to the - // "opentracing.ref_type" semantic conventions. It represents the - // parent-child Reference type - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Note: The causal relationship between a child Span and a parent Span. - OpentracingRefTypeKey = attribute.Key("opentracing.ref_type") -) - -var ( - // The parent Span depends on the child Span in some capacity - OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of") - // The parent Span does not depend in any way on the result of the child Span - OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from") -) - -// The attributes used to perform database client calls. -const ( - // DBSystemKey is the attribute Key conforming to the "db.system" semantic - // conventions. It represents an identifier for the database management - // system (DBMS) product being used. See below for a list of well-known - // identifiers. - // - // Type: Enum - // RequirementLevel: Required - // Stability: stable - DBSystemKey = attribute.Key("db.system") - - // DBConnectionStringKey is the attribute Key conforming to the - // "db.connection_string" semantic conventions. It represents the - // connection string used to connect to the database. It is recommended to - // remove embedded credentials. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;' - DBConnectionStringKey = attribute.Key("db.connection_string") - - // DBUserKey is the attribute Key conforming to the "db.user" semantic - // conventions. It represents the username for accessing the database. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'readonly_user', 'reporting_user' - DBUserKey = attribute.Key("db.user") - - // DBJDBCDriverClassnameKey is the attribute Key conforming to the - // "db.jdbc.driver_classname" semantic conventions. It represents the - // fully-qualified class name of the [Java Database Connectivity - // (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) - // driver used to connect. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'org.postgresql.Driver', - // 'com.microsoft.sqlserver.jdbc.SQLServerDriver' - DBJDBCDriverClassnameKey = attribute.Key("db.jdbc.driver_classname") - - // DBNameKey is the attribute Key conforming to the "db.name" semantic - // conventions. It represents the this attribute is used to report the name - // of the database being accessed. For commands that switch the database, - // this should be set to the target database (even if the command fails). - // - // Type: string - // RequirementLevel: ConditionallyRequired (If applicable.) - // Stability: stable - // Examples: 'customers', 'main' - // Note: In some SQL databases, the database name to be used is called - // "schema name". In case there are multiple layers that could be - // considered for database name (e.g. Oracle instance name and schema - // name), the database name to be used is the more specific layer (e.g. - // Oracle schema name). - DBNameKey = attribute.Key("db.name") - - // DBStatementKey is the attribute Key conforming to the "db.statement" - // semantic conventions. It represents the database statement being - // executed. - // - // Type: string - // RequirementLevel: ConditionallyRequired (If applicable and not - // explicitly disabled via instrumentation configuration.) - // Stability: stable - // Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"' - // Note: The value may be sanitized to exclude sensitive information. - DBStatementKey = attribute.Key("db.statement") - - // DBOperationKey is the attribute Key conforming to the "db.operation" - // semantic conventions. It represents the name of the operation being - // executed, e.g. the [MongoDB command - // name](https://docs.mongodb.com/manual/reference/command/#database-operations) - // such as `findAndModify`, or the SQL keyword. - // - // Type: string - // RequirementLevel: ConditionallyRequired (If `db.statement` is not - // applicable.) - // Stability: stable - // Examples: 'findAndModify', 'HMSET', 'SELECT' - // Note: When setting this to an SQL keyword, it is not recommended to - // attempt any client-side parsing of `db.statement` just to get this - // property, but it should be set if the operation name is provided by the - // library being instrumented. If the SQL statement has an ambiguous - // operation, or performs more than one operation, this value may be - // omitted. - DBOperationKey = attribute.Key("db.operation") -) - -var ( - // Some other SQL database. Fallback only. See notes - DBSystemOtherSQL = DBSystemKey.String("other_sql") - // Microsoft SQL Server - DBSystemMSSQL = DBSystemKey.String("mssql") - // MySQL - DBSystemMySQL = DBSystemKey.String("mysql") - // Oracle Database - DBSystemOracle = DBSystemKey.String("oracle") - // IBM DB2 - DBSystemDB2 = DBSystemKey.String("db2") - // PostgreSQL - DBSystemPostgreSQL = DBSystemKey.String("postgresql") - // Amazon Redshift - DBSystemRedshift = DBSystemKey.String("redshift") - // Apache Hive - DBSystemHive = DBSystemKey.String("hive") - // Cloudscape - DBSystemCloudscape = DBSystemKey.String("cloudscape") - // HyperSQL DataBase - DBSystemHSQLDB = DBSystemKey.String("hsqldb") - // Progress Database - DBSystemProgress = DBSystemKey.String("progress") - // SAP MaxDB - DBSystemMaxDB = DBSystemKey.String("maxdb") - // SAP HANA - DBSystemHanaDB = DBSystemKey.String("hanadb") - // Ingres - DBSystemIngres = DBSystemKey.String("ingres") - // FirstSQL - DBSystemFirstSQL = DBSystemKey.String("firstsql") - // EnterpriseDB - DBSystemEDB = DBSystemKey.String("edb") - // InterSystems Caché - DBSystemCache = DBSystemKey.String("cache") - // Adabas (Adaptable Database System) - DBSystemAdabas = DBSystemKey.String("adabas") - // Firebird - DBSystemFirebird = DBSystemKey.String("firebird") - // Apache Derby - DBSystemDerby = DBSystemKey.String("derby") - // FileMaker - DBSystemFilemaker = DBSystemKey.String("filemaker") - // Informix - DBSystemInformix = DBSystemKey.String("informix") - // InstantDB - DBSystemInstantDB = DBSystemKey.String("instantdb") - // InterBase - DBSystemInterbase = DBSystemKey.String("interbase") - // MariaDB - DBSystemMariaDB = DBSystemKey.String("mariadb") - // Netezza - DBSystemNetezza = DBSystemKey.String("netezza") - // Pervasive PSQL - DBSystemPervasive = DBSystemKey.String("pervasive") - // PointBase - DBSystemPointbase = DBSystemKey.String("pointbase") - // SQLite - DBSystemSqlite = DBSystemKey.String("sqlite") - // Sybase - DBSystemSybase = DBSystemKey.String("sybase") - // Teradata - DBSystemTeradata = DBSystemKey.String("teradata") - // Vertica - DBSystemVertica = DBSystemKey.String("vertica") - // H2 - DBSystemH2 = DBSystemKey.String("h2") - // ColdFusion IMQ - DBSystemColdfusion = DBSystemKey.String("coldfusion") - // Apache Cassandra - DBSystemCassandra = DBSystemKey.String("cassandra") - // Apache HBase - DBSystemHBase = DBSystemKey.String("hbase") - // MongoDB - DBSystemMongoDB = DBSystemKey.String("mongodb") - // Redis - DBSystemRedis = DBSystemKey.String("redis") - // Couchbase - DBSystemCouchbase = DBSystemKey.String("couchbase") - // CouchDB - DBSystemCouchDB = DBSystemKey.String("couchdb") - // Microsoft Azure Cosmos DB - DBSystemCosmosDB = DBSystemKey.String("cosmosdb") - // Amazon DynamoDB - DBSystemDynamoDB = DBSystemKey.String("dynamodb") - // Neo4j - DBSystemNeo4j = DBSystemKey.String("neo4j") - // Apache Geode - DBSystemGeode = DBSystemKey.String("geode") - // Elasticsearch - DBSystemElasticsearch = DBSystemKey.String("elasticsearch") - // Memcached - DBSystemMemcached = DBSystemKey.String("memcached") - // CockroachDB - DBSystemCockroachdb = DBSystemKey.String("cockroachdb") - // OpenSearch - DBSystemOpensearch = DBSystemKey.String("opensearch") - // ClickHouse - DBSystemClickhouse = DBSystemKey.String("clickhouse") -) - -// DBConnectionString returns an attribute KeyValue conforming to the -// "db.connection_string" semantic conventions. It represents the connection -// string used to connect to the database. It is recommended to remove embedded -// credentials. -func DBConnectionString(val string) attribute.KeyValue { - return DBConnectionStringKey.String(val) -} - -// DBUser returns an attribute KeyValue conforming to the "db.user" semantic -// conventions. It represents the username for accessing the database. -func DBUser(val string) attribute.KeyValue { - return DBUserKey.String(val) -} - -// DBJDBCDriverClassname returns an attribute KeyValue conforming to the -// "db.jdbc.driver_classname" semantic conventions. It represents the -// fully-qualified class name of the [Java Database Connectivity -// (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver -// used to connect. -func DBJDBCDriverClassname(val string) attribute.KeyValue { - return DBJDBCDriverClassnameKey.String(val) -} - -// DBName returns an attribute KeyValue conforming to the "db.name" semantic -// conventions. It represents the this attribute is used to report the name of -// the database being accessed. For commands that switch the database, this -// should be set to the target database (even if the command fails). -func DBName(val string) attribute.KeyValue { - return DBNameKey.String(val) -} - -// DBStatement returns an attribute KeyValue conforming to the -// "db.statement" semantic conventions. It represents the database statement -// being executed. -func DBStatement(val string) attribute.KeyValue { - return DBStatementKey.String(val) -} - -// DBOperation returns an attribute KeyValue conforming to the -// "db.operation" semantic conventions. It represents the name of the operation -// being executed, e.g. the [MongoDB command -// name](https://docs.mongodb.com/manual/reference/command/#database-operations) -// such as `findAndModify`, or the SQL keyword. -func DBOperation(val string) attribute.KeyValue { - return DBOperationKey.String(val) -} - -// Connection-level attributes for Microsoft SQL Server -const ( - // DBMSSQLInstanceNameKey is the attribute Key conforming to the - // "db.mssql.instance_name" semantic conventions. It represents the - // Microsoft SQL Server [instance - // name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) - // connecting to. This name is used to determine the port of a named - // instance. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'MSSQLSERVER' - // Note: If setting a `db.mssql.instance_name`, `net.peer.port` is no - // longer required (but still recommended if non-standard). - DBMSSQLInstanceNameKey = attribute.Key("db.mssql.instance_name") -) - -// DBMSSQLInstanceName returns an attribute KeyValue conforming to the -// "db.mssql.instance_name" semantic conventions. It represents the Microsoft -// SQL Server [instance -// name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) -// connecting to. This name is used to determine the port of a named instance. -func DBMSSQLInstanceName(val string) attribute.KeyValue { - return DBMSSQLInstanceNameKey.String(val) -} - -// Call-level attributes for Cassandra -const ( - // DBCassandraPageSizeKey is the attribute Key conforming to the - // "db.cassandra.page_size" semantic conventions. It represents the fetch - // size used for paging, i.e. how many rows will be returned at once. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 5000 - DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size") - - // DBCassandraConsistencyLevelKey is the attribute Key conforming to the - // "db.cassandra.consistency_level" semantic conventions. It represents the - // consistency level of the query. Based on consistency values from - // [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html). - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level") - - // DBCassandraTableKey is the attribute Key conforming to the - // "db.cassandra.table" semantic conventions. It represents the name of the - // primary table that the operation is acting upon, including the keyspace - // name (if applicable). - // - // Type: string - // RequirementLevel: Recommended - // Stability: stable - // Examples: 'mytable' - // Note: This mirrors the db.sql.table attribute but references cassandra - // rather than sql. It is not recommended to attempt any client-side - // parsing of `db.statement` just to get this property, but it should be - // set if it is provided by the library being instrumented. If the - // operation is acting upon an anonymous table, or more than one table, - // this value MUST NOT be set. - DBCassandraTableKey = attribute.Key("db.cassandra.table") - - // DBCassandraIdempotenceKey is the attribute Key conforming to the - // "db.cassandra.idempotence" semantic conventions. It represents the - // whether or not the query is idempotent. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: stable - DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence") - - // DBCassandraSpeculativeExecutionCountKey is the attribute Key conforming - // to the "db.cassandra.speculative_execution_count" semantic conventions. - // It represents the number of times a query was speculatively executed. - // Not set or `0` if the query was not executed speculatively. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 0, 2 - DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count") - - // DBCassandraCoordinatorIDKey is the attribute Key conforming to the - // "db.cassandra.coordinator.id" semantic conventions. It represents the ID - // of the coordinating node for a query. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af' - DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id") - - // DBCassandraCoordinatorDCKey is the attribute Key conforming to the - // "db.cassandra.coordinator.dc" semantic conventions. It represents the - // data center of the coordinating node for a query. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'us-west-2' - DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc") -) - -var ( - // all - DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all") - // each_quorum - DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum") - // quorum - DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum") - // local_quorum - DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum") - // one - DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one") - // two - DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two") - // three - DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three") - // local_one - DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one") - // any - DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any") - // serial - DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial") - // local_serial - DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial") -) - -// DBCassandraPageSize returns an attribute KeyValue conforming to the -// "db.cassandra.page_size" semantic conventions. It represents the fetch size -// used for paging, i.e. how many rows will be returned at once. -func DBCassandraPageSize(val int) attribute.KeyValue { - return DBCassandraPageSizeKey.Int(val) -} - -// DBCassandraTable returns an attribute KeyValue conforming to the -// "db.cassandra.table" semantic conventions. It represents the name of the -// primary table that the operation is acting upon, including the keyspace name -// (if applicable). -func DBCassandraTable(val string) attribute.KeyValue { - return DBCassandraTableKey.String(val) -} - -// DBCassandraIdempotence returns an attribute KeyValue conforming to the -// "db.cassandra.idempotence" semantic conventions. It represents the whether -// or not the query is idempotent. -func DBCassandraIdempotence(val bool) attribute.KeyValue { - return DBCassandraIdempotenceKey.Bool(val) -} - -// DBCassandraSpeculativeExecutionCount returns an attribute KeyValue -// conforming to the "db.cassandra.speculative_execution_count" semantic -// conventions. It represents the number of times a query was speculatively -// executed. Not set or `0` if the query was not executed speculatively. -func DBCassandraSpeculativeExecutionCount(val int) attribute.KeyValue { - return DBCassandraSpeculativeExecutionCountKey.Int(val) -} - -// DBCassandraCoordinatorID returns an attribute KeyValue conforming to the -// "db.cassandra.coordinator.id" semantic conventions. It represents the ID of -// the coordinating node for a query. -func DBCassandraCoordinatorID(val string) attribute.KeyValue { - return DBCassandraCoordinatorIDKey.String(val) -} - -// DBCassandraCoordinatorDC returns an attribute KeyValue conforming to the -// "db.cassandra.coordinator.dc" semantic conventions. It represents the data -// center of the coordinating node for a query. -func DBCassandraCoordinatorDC(val string) attribute.KeyValue { - return DBCassandraCoordinatorDCKey.String(val) -} - -// Call-level attributes for Redis -const ( - // DBRedisDBIndexKey is the attribute Key conforming to the - // "db.redis.database_index" semantic conventions. It represents the index - // of the database being accessed as used in the [`SELECT` - // command](https://redis.io/commands/select), provided as an integer. To - // be used instead of the generic `db.name` attribute. - // - // Type: int - // RequirementLevel: ConditionallyRequired (If other than the default - // database (`0`).) - // Stability: stable - // Examples: 0, 1, 15 - DBRedisDBIndexKey = attribute.Key("db.redis.database_index") -) - -// DBRedisDBIndex returns an attribute KeyValue conforming to the -// "db.redis.database_index" semantic conventions. It represents the index of -// the database being accessed as used in the [`SELECT` -// command](https://redis.io/commands/select), provided as an integer. To be -// used instead of the generic `db.name` attribute. -func DBRedisDBIndex(val int) attribute.KeyValue { - return DBRedisDBIndexKey.Int(val) -} - -// Call-level attributes for MongoDB -const ( - // DBMongoDBCollectionKey is the attribute Key conforming to the - // "db.mongodb.collection" semantic conventions. It represents the - // collection being accessed within the database stated in `db.name`. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: 'customers', 'products' - DBMongoDBCollectionKey = attribute.Key("db.mongodb.collection") -) - -// DBMongoDBCollection returns an attribute KeyValue conforming to the -// "db.mongodb.collection" semantic conventions. It represents the collection -// being accessed within the database stated in `db.name`. -func DBMongoDBCollection(val string) attribute.KeyValue { - return DBMongoDBCollectionKey.String(val) -} - -// Call-level attributes for SQL databases -const ( - // DBSQLTableKey is the attribute Key conforming to the "db.sql.table" - // semantic conventions. It represents the name of the primary table that - // the operation is acting upon, including the database name (if - // applicable). - // - // Type: string - // RequirementLevel: Recommended - // Stability: stable - // Examples: 'public.users', 'customers' - // Note: It is not recommended to attempt any client-side parsing of - // `db.statement` just to get this property, but it should be set if it is - // provided by the library being instrumented. If the operation is acting - // upon an anonymous table, or more than one table, this value MUST NOT be - // set. - DBSQLTableKey = attribute.Key("db.sql.table") -) - -// DBSQLTable returns an attribute KeyValue conforming to the "db.sql.table" -// semantic conventions. It represents the name of the primary table that the -// operation is acting upon, including the database name (if applicable). -func DBSQLTable(val string) attribute.KeyValue { - return DBSQLTableKey.String(val) -} - -// Span attributes used by non-OTLP exporters to represent OpenTelemetry Span's -// concepts. -const ( - // OtelStatusCodeKey is the attribute Key conforming to the - // "otel.status_code" semantic conventions. It represents the name of the - // code, either "OK" or "ERROR". MUST NOT be set if the status code is - // UNSET. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - OtelStatusCodeKey = attribute.Key("otel.status_code") - - // OtelStatusDescriptionKey is the attribute Key conforming to the - // "otel.status_description" semantic conventions. It represents the - // description of the Status if it has a value, otherwise not set. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'resource not found' - OtelStatusDescriptionKey = attribute.Key("otel.status_description") -) - -var ( - // The operation has been validated by an Application developer or Operator to have completed successfully - OtelStatusCodeOk = OtelStatusCodeKey.String("OK") - // The operation contains an error - OtelStatusCodeError = OtelStatusCodeKey.String("ERROR") -) - -// OtelStatusDescription returns an attribute KeyValue conforming to the -// "otel.status_description" semantic conventions. It represents the -// description of the Status if it has a value, otherwise not set. -func OtelStatusDescription(val string) attribute.KeyValue { - return OtelStatusDescriptionKey.String(val) -} - -// This semantic convention describes an instance of a function that runs -// without provisioning or managing of servers (also known as serverless -// functions or Function as a Service (FaaS)) with spans. -const ( - // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger" - // semantic conventions. It represents the type of the trigger which caused - // this function execution. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Note: For the server/consumer span on the incoming side, - // `faas.trigger` MUST be set. - // - // Clients invoking FaaS instances usually cannot set `faas.trigger`, - // since they would typically need to look in the payload to determine - // the event type. If clients set it, it should be the same as the - // trigger that corresponding incoming would have (i.e., this has - // nothing to do with the underlying transport used to make the API - // call to invoke the lambda, which is often HTTP). - FaaSTriggerKey = attribute.Key("faas.trigger") - - // FaaSExecutionKey is the attribute Key conforming to the "faas.execution" - // semantic conventions. It represents the execution ID of the current - // function execution. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28' - FaaSExecutionKey = attribute.Key("faas.execution") -) - -var ( - // A response to some data source operation such as a database or filesystem read/write - FaaSTriggerDatasource = FaaSTriggerKey.String("datasource") - // To provide an answer to an inbound HTTP request - FaaSTriggerHTTP = FaaSTriggerKey.String("http") - // A function is set to be executed when messages are sent to a messaging system - FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub") - // A function is scheduled to be executed regularly - FaaSTriggerTimer = FaaSTriggerKey.String("timer") - // If none of the others apply - FaaSTriggerOther = FaaSTriggerKey.String("other") -) - -// FaaSExecution returns an attribute KeyValue conforming to the -// "faas.execution" semantic conventions. It represents the execution ID of the -// current function execution. -func FaaSExecution(val string) attribute.KeyValue { - return FaaSExecutionKey.String(val) -} - -// Semantic Convention for FaaS triggered as a response to some data source -// operation such as a database or filesystem read/write. -const ( - // FaaSDocumentCollectionKey is the attribute Key conforming to the - // "faas.document.collection" semantic conventions. It represents the name - // of the source on which the triggering operation was performed. For - // example, in Cloud Storage or S3 corresponds to the bucket name, and in - // Cosmos DB to the database name. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: 'myBucketName', 'myDBName' - FaaSDocumentCollectionKey = attribute.Key("faas.document.collection") - - // FaaSDocumentOperationKey is the attribute Key conforming to the - // "faas.document.operation" semantic conventions. It represents the - // describes the type of the operation that was performed on the data. - // - // Type: Enum - // RequirementLevel: Required - // Stability: stable - FaaSDocumentOperationKey = attribute.Key("faas.document.operation") - - // FaaSDocumentTimeKey is the attribute Key conforming to the - // "faas.document.time" semantic conventions. It represents a string - // containing the time when the data was accessed in the [ISO - // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format - // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '2020-01-23T13:47:06Z' - FaaSDocumentTimeKey = attribute.Key("faas.document.time") - - // FaaSDocumentNameKey is the attribute Key conforming to the - // "faas.document.name" semantic conventions. It represents the document - // name/table subjected to the operation. For example, in Cloud Storage or - // S3 is the name of the file, and in Cosmos DB the table name. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'myFile.txt', 'myTableName' - FaaSDocumentNameKey = attribute.Key("faas.document.name") -) - -var ( - // When a new object is created - FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert") - // When an object is modified - FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit") - // When an object is deleted - FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete") -) - -// FaaSDocumentCollection returns an attribute KeyValue conforming to the -// "faas.document.collection" semantic conventions. It represents the name of -// the source on which the triggering operation was performed. For example, in -// Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the -// database name. -func FaaSDocumentCollection(val string) attribute.KeyValue { - return FaaSDocumentCollectionKey.String(val) -} - -// FaaSDocumentTime returns an attribute KeyValue conforming to the -// "faas.document.time" semantic conventions. It represents a string containing -// the time when the data was accessed in the [ISO -// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format -// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). -func FaaSDocumentTime(val string) attribute.KeyValue { - return FaaSDocumentTimeKey.String(val) -} - -// FaaSDocumentName returns an attribute KeyValue conforming to the -// "faas.document.name" semantic conventions. It represents the document -// name/table subjected to the operation. For example, in Cloud Storage or S3 -// is the name of the file, and in Cosmos DB the table name. -func FaaSDocumentName(val string) attribute.KeyValue { - return FaaSDocumentNameKey.String(val) -} - -// Semantic Convention for FaaS scheduled to be executed regularly. -const ( - // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic - // conventions. It represents a string containing the function invocation - // time in the [ISO - // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format - // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '2020-01-23T13:47:06Z' - FaaSTimeKey = attribute.Key("faas.time") - - // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic - // conventions. It represents a string containing the schedule period as - // [Cron - // Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '0/5 * * * ? *' - FaaSCronKey = attribute.Key("faas.cron") -) - -// FaaSTime returns an attribute KeyValue conforming to the "faas.time" -// semantic conventions. It represents a string containing the function -// invocation time in the [ISO -// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format -// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). -func FaaSTime(val string) attribute.KeyValue { - return FaaSTimeKey.String(val) -} - -// FaaSCron returns an attribute KeyValue conforming to the "faas.cron" -// semantic conventions. It represents a string containing the schedule period -// as [Cron -// Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). -func FaaSCron(val string) attribute.KeyValue { - return FaaSCronKey.String(val) -} - -// Contains additional attributes for incoming FaaS spans. -const ( - // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart" - // semantic conventions. It represents a boolean that is true if the - // serverless function is executed for the first time (aka cold-start). - // - // Type: boolean - // RequirementLevel: Optional - // Stability: stable - FaaSColdstartKey = attribute.Key("faas.coldstart") -) - -// FaaSColdstart returns an attribute KeyValue conforming to the -// "faas.coldstart" semantic conventions. It represents a boolean that is true -// if the serverless function is executed for the first time (aka cold-start). -func FaaSColdstart(val bool) attribute.KeyValue { - return FaaSColdstartKey.Bool(val) -} - -// Contains additional attributes for outgoing FaaS spans. -const ( - // FaaSInvokedNameKey is the attribute Key conforming to the - // "faas.invoked_name" semantic conventions. It represents the name of the - // invoked function. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: 'my-function' - // Note: SHOULD be equal to the `faas.name` resource attribute of the - // invoked function. - FaaSInvokedNameKey = attribute.Key("faas.invoked_name") - - // FaaSInvokedProviderKey is the attribute Key conforming to the - // "faas.invoked_provider" semantic conventions. It represents the cloud - // provider of the invoked function. - // - // Type: Enum - // RequirementLevel: Required - // Stability: stable - // Note: SHOULD be equal to the `cloud.provider` resource attribute of the - // invoked function. - FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider") - - // FaaSInvokedRegionKey is the attribute Key conforming to the - // "faas.invoked_region" semantic conventions. It represents the cloud - // region of the invoked function. - // - // Type: string - // RequirementLevel: ConditionallyRequired (For some cloud providers, like - // AWS or GCP, the region in which a function is hosted is essential to - // uniquely identify the function and also part of its endpoint. Since it's - // part of the endpoint being called, the region is always known to - // clients. In these cases, `faas.invoked_region` MUST be set accordingly. - // If the region is unknown to the client or not required for identifying - // the invoked function, setting `faas.invoked_region` is optional.) - // Stability: stable - // Examples: 'eu-central-1' - // Note: SHOULD be equal to the `cloud.region` resource attribute of the - // invoked function. - FaaSInvokedRegionKey = attribute.Key("faas.invoked_region") -) - -var ( - // Alibaba Cloud - FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud") - // Amazon Web Services - FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws") - // Microsoft Azure - FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure") - // Google Cloud Platform - FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp") - // Tencent Cloud - FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud") -) - -// FaaSInvokedName returns an attribute KeyValue conforming to the -// "faas.invoked_name" semantic conventions. It represents the name of the -// invoked function. -func FaaSInvokedName(val string) attribute.KeyValue { - return FaaSInvokedNameKey.String(val) -} - -// FaaSInvokedRegion returns an attribute KeyValue conforming to the -// "faas.invoked_region" semantic conventions. It represents the cloud region -// of the invoked function. -func FaaSInvokedRegion(val string) attribute.KeyValue { - return FaaSInvokedRegionKey.String(val) -} - -// These attributes may be used for any network related operation. -const ( - // NetTransportKey is the attribute Key conforming to the "net.transport" - // semantic conventions. It represents the transport protocol used. See - // note below. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - NetTransportKey = attribute.Key("net.transport") - - // NetAppProtocolNameKey is the attribute Key conforming to the - // "net.app.protocol.name" semantic conventions. It represents the - // application layer protocol used. The value SHOULD be normalized to - // lowercase. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'amqp', 'http', 'mqtt' - NetAppProtocolNameKey = attribute.Key("net.app.protocol.name") - - // NetAppProtocolVersionKey is the attribute Key conforming to the - // "net.app.protocol.version" semantic conventions. It represents the - // version of the application layer protocol used. See note below. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '3.1.1' - // Note: `net.app.protocol.version` refers to the version of the protocol - // used and might be different from the protocol client's version. If the - // HTTP client used has a version of `0.27.2`, but sends HTTP version - // `1.1`, this attribute should be set to `1.1`. - NetAppProtocolVersionKey = attribute.Key("net.app.protocol.version") - - // NetSockPeerNameKey is the attribute Key conforming to the - // "net.sock.peer.name" semantic conventions. It represents the remote - // socket peer name. - // - // Type: string - // RequirementLevel: Recommended (If available and different from - // `net.peer.name` and if `net.sock.peer.addr` is set.) - // Stability: stable - // Examples: 'proxy.example.com' - NetSockPeerNameKey = attribute.Key("net.sock.peer.name") - - // NetSockPeerAddrKey is the attribute Key conforming to the - // "net.sock.peer.addr" semantic conventions. It represents the remote - // socket peer address: IPv4 or IPv6 for internet protocols, path for local - // communication, - // [etc](https://man7.org/linux/man-pages/man7/address_families.7.html). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '127.0.0.1', '/tmp/mysql.sock' - NetSockPeerAddrKey = attribute.Key("net.sock.peer.addr") - - // NetSockPeerPortKey is the attribute Key conforming to the - // "net.sock.peer.port" semantic conventions. It represents the remote - // socket peer port. - // - // Type: int - // RequirementLevel: Recommended (If defined for the address family and if - // different than `net.peer.port` and if `net.sock.peer.addr` is set.) - // Stability: stable - // Examples: 16456 - NetSockPeerPortKey = attribute.Key("net.sock.peer.port") - - // NetSockFamilyKey is the attribute Key conforming to the - // "net.sock.family" semantic conventions. It represents the protocol - // [address - // family](https://man7.org/linux/man-pages/man7/address_families.7.html) - // which is used for communication. - // - // Type: Enum - // RequirementLevel: ConditionallyRequired (If different than `inet` and if - // any of `net.sock.peer.addr` or `net.sock.host.addr` are set. Consumers - // of telemetry SHOULD accept both IPv4 and IPv6 formats for the address in - // `net.sock.peer.addr` if `net.sock.family` is not set. This is to support - // instrumentations that follow previous versions of this document.) - // Stability: stable - // Examples: 'inet6', 'bluetooth' - NetSockFamilyKey = attribute.Key("net.sock.family") - - // NetPeerNameKey is the attribute Key conforming to the "net.peer.name" - // semantic conventions. It represents the logical remote hostname, see - // note below. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'example.com' - // Note: `net.peer.name` SHOULD NOT be set if capturing it would require an - // extra DNS lookup. - NetPeerNameKey = attribute.Key("net.peer.name") - - // NetPeerPortKey is the attribute Key conforming to the "net.peer.port" - // semantic conventions. It represents the logical remote port number - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 80, 8080, 443 - NetPeerPortKey = attribute.Key("net.peer.port") - - // NetHostNameKey is the attribute Key conforming to the "net.host.name" - // semantic conventions. It represents the logical local hostname or - // similar, see note below. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'localhost' - NetHostNameKey = attribute.Key("net.host.name") - - // NetHostPortKey is the attribute Key conforming to the "net.host.port" - // semantic conventions. It represents the logical local port number, - // preferably the one that the peer used to connect - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 8080 - NetHostPortKey = attribute.Key("net.host.port") - - // NetSockHostAddrKey is the attribute Key conforming to the - // "net.sock.host.addr" semantic conventions. It represents the local - // socket address. Useful in case of a multi-IP host. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '192.168.0.1' - NetSockHostAddrKey = attribute.Key("net.sock.host.addr") - - // NetSockHostPortKey is the attribute Key conforming to the - // "net.sock.host.port" semantic conventions. It represents the local - // socket port number. - // - // Type: int - // RequirementLevel: Recommended (If defined for the address family and if - // different than `net.host.port` and if `net.sock.host.addr` is set.) - // Stability: stable - // Examples: 35555 - NetSockHostPortKey = attribute.Key("net.sock.host.port") - - // NetHostConnectionTypeKey is the attribute Key conforming to the - // "net.host.connection.type" semantic conventions. It represents the - // internet connection type currently being used by the host. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'wifi' - NetHostConnectionTypeKey = attribute.Key("net.host.connection.type") - - // NetHostConnectionSubtypeKey is the attribute Key conforming to the - // "net.host.connection.subtype" semantic conventions. It represents the - // this describes more details regarding the connection.type. It may be the - // type of cell technology connection, but it could be used for describing - // details about a wifi connection. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'LTE' - NetHostConnectionSubtypeKey = attribute.Key("net.host.connection.subtype") - - // NetHostCarrierNameKey is the attribute Key conforming to the - // "net.host.carrier.name" semantic conventions. It represents the name of - // the mobile carrier. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'sprint' - NetHostCarrierNameKey = attribute.Key("net.host.carrier.name") - - // NetHostCarrierMccKey is the attribute Key conforming to the - // "net.host.carrier.mcc" semantic conventions. It represents the mobile - // carrier country code. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '310' - NetHostCarrierMccKey = attribute.Key("net.host.carrier.mcc") - - // NetHostCarrierMncKey is the attribute Key conforming to the - // "net.host.carrier.mnc" semantic conventions. It represents the mobile - // carrier network code. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '001' - NetHostCarrierMncKey = attribute.Key("net.host.carrier.mnc") - - // NetHostCarrierIccKey is the attribute Key conforming to the - // "net.host.carrier.icc" semantic conventions. It represents the ISO - // 3166-1 alpha-2 2-character country code associated with the mobile - // carrier network. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'DE' - NetHostCarrierIccKey = attribute.Key("net.host.carrier.icc") -) - -var ( - // ip_tcp - NetTransportTCP = NetTransportKey.String("ip_tcp") - // ip_udp - NetTransportUDP = NetTransportKey.String("ip_udp") - // Named or anonymous pipe. See note below - NetTransportPipe = NetTransportKey.String("pipe") - // In-process communication - NetTransportInProc = NetTransportKey.String("inproc") - // Something else (non IP-based) - NetTransportOther = NetTransportKey.String("other") -) - -var ( - // IPv4 address - NetSockFamilyInet = NetSockFamilyKey.String("inet") - // IPv6 address - NetSockFamilyInet6 = NetSockFamilyKey.String("inet6") - // Unix domain socket path - NetSockFamilyUnix = NetSockFamilyKey.String("unix") -) - -var ( - // wifi - NetHostConnectionTypeWifi = NetHostConnectionTypeKey.String("wifi") - // wired - NetHostConnectionTypeWired = NetHostConnectionTypeKey.String("wired") - // cell - NetHostConnectionTypeCell = NetHostConnectionTypeKey.String("cell") - // unavailable - NetHostConnectionTypeUnavailable = NetHostConnectionTypeKey.String("unavailable") - // unknown - NetHostConnectionTypeUnknown = NetHostConnectionTypeKey.String("unknown") -) - -var ( - // GPRS - NetHostConnectionSubtypeGprs = NetHostConnectionSubtypeKey.String("gprs") - // EDGE - NetHostConnectionSubtypeEdge = NetHostConnectionSubtypeKey.String("edge") - // UMTS - NetHostConnectionSubtypeUmts = NetHostConnectionSubtypeKey.String("umts") - // CDMA - NetHostConnectionSubtypeCdma = NetHostConnectionSubtypeKey.String("cdma") - // EVDO Rel. 0 - NetHostConnectionSubtypeEvdo0 = NetHostConnectionSubtypeKey.String("evdo_0") - // EVDO Rev. A - NetHostConnectionSubtypeEvdoA = NetHostConnectionSubtypeKey.String("evdo_a") - // CDMA2000 1XRTT - NetHostConnectionSubtypeCdma20001xrtt = NetHostConnectionSubtypeKey.String("cdma2000_1xrtt") - // HSDPA - NetHostConnectionSubtypeHsdpa = NetHostConnectionSubtypeKey.String("hsdpa") - // HSUPA - NetHostConnectionSubtypeHsupa = NetHostConnectionSubtypeKey.String("hsupa") - // HSPA - NetHostConnectionSubtypeHspa = NetHostConnectionSubtypeKey.String("hspa") - // IDEN - NetHostConnectionSubtypeIden = NetHostConnectionSubtypeKey.String("iden") - // EVDO Rev. B - NetHostConnectionSubtypeEvdoB = NetHostConnectionSubtypeKey.String("evdo_b") - // LTE - NetHostConnectionSubtypeLte = NetHostConnectionSubtypeKey.String("lte") - // EHRPD - NetHostConnectionSubtypeEhrpd = NetHostConnectionSubtypeKey.String("ehrpd") - // HSPAP - NetHostConnectionSubtypeHspap = NetHostConnectionSubtypeKey.String("hspap") - // GSM - NetHostConnectionSubtypeGsm = NetHostConnectionSubtypeKey.String("gsm") - // TD-SCDMA - NetHostConnectionSubtypeTdScdma = NetHostConnectionSubtypeKey.String("td_scdma") - // IWLAN - NetHostConnectionSubtypeIwlan = NetHostConnectionSubtypeKey.String("iwlan") - // 5G NR (New Radio) - NetHostConnectionSubtypeNr = NetHostConnectionSubtypeKey.String("nr") - // 5G NRNSA (New Radio Non-Standalone) - NetHostConnectionSubtypeNrnsa = NetHostConnectionSubtypeKey.String("nrnsa") - // LTE CA - NetHostConnectionSubtypeLteCa = NetHostConnectionSubtypeKey.String("lte_ca") -) - -// NetAppProtocolName returns an attribute KeyValue conforming to the -// "net.app.protocol.name" semantic conventions. It represents the application -// layer protocol used. The value SHOULD be normalized to lowercase. -func NetAppProtocolName(val string) attribute.KeyValue { - return NetAppProtocolNameKey.String(val) -} - -// NetAppProtocolVersion returns an attribute KeyValue conforming to the -// "net.app.protocol.version" semantic conventions. It represents the version -// of the application layer protocol used. See note below. -func NetAppProtocolVersion(val string) attribute.KeyValue { - return NetAppProtocolVersionKey.String(val) -} - -// NetSockPeerName returns an attribute KeyValue conforming to the -// "net.sock.peer.name" semantic conventions. It represents the remote socket -// peer name. -func NetSockPeerName(val string) attribute.KeyValue { - return NetSockPeerNameKey.String(val) -} - -// NetSockPeerAddr returns an attribute KeyValue conforming to the -// "net.sock.peer.addr" semantic conventions. It represents the remote socket -// peer address: IPv4 or IPv6 for internet protocols, path for local -// communication, -// [etc](https://man7.org/linux/man-pages/man7/address_families.7.html). -func NetSockPeerAddr(val string) attribute.KeyValue { - return NetSockPeerAddrKey.String(val) -} - -// NetSockPeerPort returns an attribute KeyValue conforming to the -// "net.sock.peer.port" semantic conventions. It represents the remote socket -// peer port. -func NetSockPeerPort(val int) attribute.KeyValue { - return NetSockPeerPortKey.Int(val) -} - -// NetPeerName returns an attribute KeyValue conforming to the -// "net.peer.name" semantic conventions. It represents the logical remote -// hostname, see note below. -func NetPeerName(val string) attribute.KeyValue { - return NetPeerNameKey.String(val) -} - -// NetPeerPort returns an attribute KeyValue conforming to the -// "net.peer.port" semantic conventions. It represents the logical remote port -// number -func NetPeerPort(val int) attribute.KeyValue { - return NetPeerPortKey.Int(val) -} - -// NetHostName returns an attribute KeyValue conforming to the -// "net.host.name" semantic conventions. It represents the logical local -// hostname or similar, see note below. -func NetHostName(val string) attribute.KeyValue { - return NetHostNameKey.String(val) -} - -// NetHostPort returns an attribute KeyValue conforming to the -// "net.host.port" semantic conventions. It represents the logical local port -// number, preferably the one that the peer used to connect -func NetHostPort(val int) attribute.KeyValue { - return NetHostPortKey.Int(val) -} - -// NetSockHostAddr returns an attribute KeyValue conforming to the -// "net.sock.host.addr" semantic conventions. It represents the local socket -// address. Useful in case of a multi-IP host. -func NetSockHostAddr(val string) attribute.KeyValue { - return NetSockHostAddrKey.String(val) -} - -// NetSockHostPort returns an attribute KeyValue conforming to the -// "net.sock.host.port" semantic conventions. It represents the local socket -// port number. -func NetSockHostPort(val int) attribute.KeyValue { - return NetSockHostPortKey.Int(val) -} - -// NetHostCarrierName returns an attribute KeyValue conforming to the -// "net.host.carrier.name" semantic conventions. It represents the name of the -// mobile carrier. -func NetHostCarrierName(val string) attribute.KeyValue { - return NetHostCarrierNameKey.String(val) -} - -// NetHostCarrierMcc returns an attribute KeyValue conforming to the -// "net.host.carrier.mcc" semantic conventions. It represents the mobile -// carrier country code. -func NetHostCarrierMcc(val string) attribute.KeyValue { - return NetHostCarrierMccKey.String(val) -} - -// NetHostCarrierMnc returns an attribute KeyValue conforming to the -// "net.host.carrier.mnc" semantic conventions. It represents the mobile -// carrier network code. -func NetHostCarrierMnc(val string) attribute.KeyValue { - return NetHostCarrierMncKey.String(val) -} - -// NetHostCarrierIcc returns an attribute KeyValue conforming to the -// "net.host.carrier.icc" semantic conventions. It represents the ISO 3166-1 -// alpha-2 2-character country code associated with the mobile carrier network. -func NetHostCarrierIcc(val string) attribute.KeyValue { - return NetHostCarrierIccKey.String(val) -} - -// Operations that access some remote service. -const ( - // PeerServiceKey is the attribute Key conforming to the "peer.service" - // semantic conventions. It represents the - // [`service.name`](../../resource/semantic_conventions/README.md#service) - // of the remote service. SHOULD be equal to the actual `service.name` - // resource attribute of the remote service if any. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'AuthTokenCache' - PeerServiceKey = attribute.Key("peer.service") -) - -// PeerService returns an attribute KeyValue conforming to the -// "peer.service" semantic conventions. It represents the -// [`service.name`](../../resource/semantic_conventions/README.md#service) of -// the remote service. SHOULD be equal to the actual `service.name` resource -// attribute of the remote service if any. -func PeerService(val string) attribute.KeyValue { - return PeerServiceKey.String(val) -} - -// These attributes may be used for any operation with an authenticated and/or -// authorized enduser. -const ( - // EnduserIDKey is the attribute Key conforming to the "enduser.id" - // semantic conventions. It represents the username or client_id extracted - // from the access token or - // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header - // in the inbound request from outside the system. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'username' - EnduserIDKey = attribute.Key("enduser.id") - - // EnduserRoleKey is the attribute Key conforming to the "enduser.role" - // semantic conventions. It represents the actual/assumed role the client - // is making the request under extracted from token or application security - // context. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'admin' - EnduserRoleKey = attribute.Key("enduser.role") - - // EnduserScopeKey is the attribute Key conforming to the "enduser.scope" - // semantic conventions. It represents the scopes or granted authorities - // the client currently possesses extracted from token or application - // security context. The value would come from the scope associated with an - // [OAuth 2.0 Access - // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute - // value in a [SAML 2.0 - // Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'read:message, write:files' - EnduserScopeKey = attribute.Key("enduser.scope") -) - -// EnduserID returns an attribute KeyValue conforming to the "enduser.id" -// semantic conventions. It represents the username or client_id extracted from -// the access token or -// [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in -// the inbound request from outside the system. -func EnduserID(val string) attribute.KeyValue { - return EnduserIDKey.String(val) -} - -// EnduserRole returns an attribute KeyValue conforming to the -// "enduser.role" semantic conventions. It represents the actual/assumed role -// the client is making the request under extracted from token or application -// security context. -func EnduserRole(val string) attribute.KeyValue { - return EnduserRoleKey.String(val) -} - -// EnduserScope returns an attribute KeyValue conforming to the -// "enduser.scope" semantic conventions. It represents the scopes or granted -// authorities the client currently possesses extracted from token or -// application security context. The value would come from the scope associated -// with an [OAuth 2.0 Access -// Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute -// value in a [SAML 2.0 -// Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). -func EnduserScope(val string) attribute.KeyValue { - return EnduserScopeKey.String(val) -} - -// These attributes may be used for any operation to store information about a -// thread that started a span. -const ( - // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic - // conventions. It represents the current "managed" thread ID (as opposed - // to OS thread ID). - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 42 - ThreadIDKey = attribute.Key("thread.id") - - // ThreadNameKey is the attribute Key conforming to the "thread.name" - // semantic conventions. It represents the current thread name. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'main' - ThreadNameKey = attribute.Key("thread.name") -) - -// ThreadID returns an attribute KeyValue conforming to the "thread.id" -// semantic conventions. It represents the current "managed" thread ID (as -// opposed to OS thread ID). -func ThreadID(val int) attribute.KeyValue { - return ThreadIDKey.Int(val) -} - -// ThreadName returns an attribute KeyValue conforming to the "thread.name" -// semantic conventions. It represents the current thread name. -func ThreadName(val string) attribute.KeyValue { - return ThreadNameKey.String(val) -} - -// These attributes allow to report this unit of code and therefore to provide -// more context about the span. -const ( - // CodeFunctionKey is the attribute Key conforming to the "code.function" - // semantic conventions. It represents the method or function name, or - // equivalent (usually rightmost part of the code unit's name). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'serveRequest' - CodeFunctionKey = attribute.Key("code.function") - - // CodeNamespaceKey is the attribute Key conforming to the "code.namespace" - // semantic conventions. It represents the "namespace" within which - // `code.function` is defined. Usually the qualified class or module name, - // such that `code.namespace` + some separator + `code.function` form a - // unique identifier for the code unit. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'com.example.MyHTTPService' - CodeNamespaceKey = attribute.Key("code.namespace") - - // CodeFilepathKey is the attribute Key conforming to the "code.filepath" - // semantic conventions. It represents the source code file name that - // identifies the code unit as uniquely as possible (preferably an absolute - // file path). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '/usr/local/MyApplication/content_root/app/index.php' - CodeFilepathKey = attribute.Key("code.filepath") - - // CodeLineNumberKey is the attribute Key conforming to the "code.lineno" - // semantic conventions. It represents the line number in `code.filepath` - // best representing the operation. It SHOULD point within the code unit - // named in `code.function`. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 42 - CodeLineNumberKey = attribute.Key("code.lineno") - - // CodeColumnKey is the attribute Key conforming to the "code.column" - // semantic conventions. It represents the column number in `code.filepath` - // best representing the operation. It SHOULD point within the code unit - // named in `code.function`. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 16 - CodeColumnKey = attribute.Key("code.column") -) - -// CodeFunction returns an attribute KeyValue conforming to the -// "code.function" semantic conventions. It represents the method or function -// name, or equivalent (usually rightmost part of the code unit's name). -func CodeFunction(val string) attribute.KeyValue { - return CodeFunctionKey.String(val) -} - -// CodeNamespace returns an attribute KeyValue conforming to the -// "code.namespace" semantic conventions. It represents the "namespace" within -// which `code.function` is defined. Usually the qualified class or module -// name, such that `code.namespace` + some separator + `code.function` form a -// unique identifier for the code unit. -func CodeNamespace(val string) attribute.KeyValue { - return CodeNamespaceKey.String(val) -} - -// CodeFilepath returns an attribute KeyValue conforming to the -// "code.filepath" semantic conventions. It represents the source code file -// name that identifies the code unit as uniquely as possible (preferably an -// absolute file path). -func CodeFilepath(val string) attribute.KeyValue { - return CodeFilepathKey.String(val) -} - -// CodeLineNumber returns an attribute KeyValue conforming to the "code.lineno" -// semantic conventions. It represents the line number in `code.filepath` best -// representing the operation. It SHOULD point within the code unit named in -// `code.function`. -func CodeLineNumber(val int) attribute.KeyValue { - return CodeLineNumberKey.Int(val) -} - -// CodeColumn returns an attribute KeyValue conforming to the "code.column" -// semantic conventions. It represents the column number in `code.filepath` -// best representing the operation. It SHOULD point within the code unit named -// in `code.function`. -func CodeColumn(val int) attribute.KeyValue { - return CodeColumnKey.Int(val) -} - -// Semantic conventions for HTTP client and server Spans. -const ( - // HTTPMethodKey is the attribute Key conforming to the "http.method" - // semantic conventions. It represents the hTTP request method. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: 'GET', 'POST', 'HEAD' - HTTPMethodKey = attribute.Key("http.method") - - // HTTPStatusCodeKey is the attribute Key conforming to the - // "http.status_code" semantic conventions. It represents the [HTTP - // response status code](https://tools.ietf.org/html/rfc7231#section-6). - // - // Type: int - // RequirementLevel: ConditionallyRequired (If and only if one was - // received/sent.) - // Stability: stable - // Examples: 200 - HTTPStatusCodeKey = attribute.Key("http.status_code") - - // HTTPFlavorKey is the attribute Key conforming to the "http.flavor" - // semantic conventions. It represents the kind of HTTP protocol used. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Note: If `net.transport` is not specified, it can be assumed to be - // `IP.TCP` except if `http.flavor` is `QUIC`, in which case `IP.UDP` is - // assumed. - HTTPFlavorKey = attribute.Key("http.flavor") - - // HTTPUserAgentKey is the attribute Key conforming to the - // "http.user_agent" semantic conventions. It represents the value of the - // [HTTP - // User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) - // header sent by the client. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'CERN-LineMode/2.15 libwww/2.17b3' - HTTPUserAgentKey = attribute.Key("http.user_agent") - - // HTTPRequestContentLengthKey is the attribute Key conforming to the - // "http.request_content_length" semantic conventions. It represents the - // size of the request payload body in bytes. This is the number of bytes - // transferred excluding headers and is often, but not always, present as - // the - // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) - // header. For requests using transport encoding, this should be the - // compressed size. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 3495 - HTTPRequestContentLengthKey = attribute.Key("http.request_content_length") - - // HTTPResponseContentLengthKey is the attribute Key conforming to the - // "http.response_content_length" semantic conventions. It represents the - // size of the response payload body in bytes. This is the number of bytes - // transferred excluding headers and is often, but not always, present as - // the - // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) - // header. For requests using transport encoding, this should be the - // compressed size. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 3495 - HTTPResponseContentLengthKey = attribute.Key("http.response_content_length") -) - -var ( - // HTTP/1.0 - HTTPFlavorHTTP10 = HTTPFlavorKey.String("1.0") - // HTTP/1.1 - HTTPFlavorHTTP11 = HTTPFlavorKey.String("1.1") - // HTTP/2 - HTTPFlavorHTTP20 = HTTPFlavorKey.String("2.0") - // HTTP/3 - HTTPFlavorHTTP30 = HTTPFlavorKey.String("3.0") - // SPDY protocol - HTTPFlavorSPDY = HTTPFlavorKey.String("SPDY") - // QUIC protocol - HTTPFlavorQUIC = HTTPFlavorKey.String("QUIC") -) - -// HTTPMethod returns an attribute KeyValue conforming to the "http.method" -// semantic conventions. It represents the hTTP request method. -func HTTPMethod(val string) attribute.KeyValue { - return HTTPMethodKey.String(val) -} - -// HTTPStatusCode returns an attribute KeyValue conforming to the -// "http.status_code" semantic conventions. It represents the [HTTP response -// status code](https://tools.ietf.org/html/rfc7231#section-6). -func HTTPStatusCode(val int) attribute.KeyValue { - return HTTPStatusCodeKey.Int(val) -} - -// HTTPUserAgent returns an attribute KeyValue conforming to the -// "http.user_agent" semantic conventions. It represents the value of the [HTTP -// User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) -// header sent by the client. -func HTTPUserAgent(val string) attribute.KeyValue { - return HTTPUserAgentKey.String(val) -} - -// HTTPRequestContentLength returns an attribute KeyValue conforming to the -// "http.request_content_length" semantic conventions. It represents the size -// of the request payload body in bytes. This is the number of bytes -// transferred excluding headers and is often, but not always, present as the -// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) -// header. For requests using transport encoding, this should be the compressed -// size. -func HTTPRequestContentLength(val int) attribute.KeyValue { - return HTTPRequestContentLengthKey.Int(val) -} - -// HTTPResponseContentLength returns an attribute KeyValue conforming to the -// "http.response_content_length" semantic conventions. It represents the size -// of the response payload body in bytes. This is the number of bytes -// transferred excluding headers and is often, but not always, present as the -// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) -// header. For requests using transport encoding, this should be the compressed -// size. -func HTTPResponseContentLength(val int) attribute.KeyValue { - return HTTPResponseContentLengthKey.Int(val) -} - -// Semantic Convention for HTTP Client -const ( - // HTTPURLKey is the attribute Key conforming to the "http.url" semantic - // conventions. It represents the full HTTP request URL in the form - // `scheme://host[:port]/path?query[#fragment]`. Usually the fragment is - // not transmitted over HTTP, but if it is known, it should be included - // nevertheless. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv' - // Note: `http.url` MUST NOT contain credentials passed via URL in form of - // `https://username:password@www.example.com/`. In such case the - // attribute's value should be `https://www.example.com/`. - HTTPURLKey = attribute.Key("http.url") - - // HTTPResendCountKey is the attribute Key conforming to the - // "http.resend_count" semantic conventions. It represents the ordinal - // number of request resending attempt (for any reason, including - // redirects). - // - // Type: int - // RequirementLevel: Recommended (if and only if request was retried.) - // Stability: stable - // Examples: 3 - // Note: The resend count SHOULD be updated each time an HTTP request gets - // resent by the client, regardless of what was the cause of the resending - // (e.g. redirection, authorization failure, 503 Server Unavailable, - // network issues, or any other). - HTTPResendCountKey = attribute.Key("http.resend_count") -) - -// HTTPURL returns an attribute KeyValue conforming to the "http.url" -// semantic conventions. It represents the full HTTP request URL in the form -// `scheme://host[:port]/path?query[#fragment]`. Usually the fragment is not -// transmitted over HTTP, but if it is known, it should be included -// nevertheless. -func HTTPURL(val string) attribute.KeyValue { - return HTTPURLKey.String(val) -} - -// HTTPResendCount returns an attribute KeyValue conforming to the -// "http.resend_count" semantic conventions. It represents the ordinal number -// of request resending attempt (for any reason, including redirects). -func HTTPResendCount(val int) attribute.KeyValue { - return HTTPResendCountKey.Int(val) -} - -// Semantic Convention for HTTP Server -const ( - // HTTPSchemeKey is the attribute Key conforming to the "http.scheme" - // semantic conventions. It represents the URI scheme identifying the used - // protocol. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: 'http', 'https' - HTTPSchemeKey = attribute.Key("http.scheme") - - // HTTPTargetKey is the attribute Key conforming to the "http.target" - // semantic conventions. It represents the full request target as passed in - // a HTTP request line or equivalent. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: '/path/12314/?q=ddds' - HTTPTargetKey = attribute.Key("http.target") - - // HTTPRouteKey is the attribute Key conforming to the "http.route" - // semantic conventions. It represents the matched route (path template in - // the format used by the respective server framework). See note below - // - // Type: string - // RequirementLevel: ConditionallyRequired (If and only if it's available) - // Stability: stable - // Examples: '/users/:userID?', '{controller}/{action}/{id?}' - // Note: 'http.route' MUST NOT be populated when this is not supported by - // the HTTP server framework as the route attribute should have - // low-cardinality and the URI path can NOT substitute it. - HTTPRouteKey = attribute.Key("http.route") - - // HTTPClientIPKey is the attribute Key conforming to the "http.client_ip" - // semantic conventions. It represents the IP address of the original - // client behind all proxies, if known (e.g. from - // [X-Forwarded-For](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For)). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '83.164.160.102' - // Note: This is not necessarily the same as `net.sock.peer.addr`, which - // would - // identify the network-level peer, which may be a proxy. - // - // This attribute should be set when a source of information different - // from the one used for `net.sock.peer.addr`, is available even if that - // other - // source just confirms the same value as `net.sock.peer.addr`. - // Rationale: For `net.sock.peer.addr`, one typically does not know if it - // comes from a proxy, reverse proxy, or the actual client. Setting - // `http.client_ip` when it's the same as `net.sock.peer.addr` means that - // one is at least somewhat confident that the address is not that of - // the closest proxy. - HTTPClientIPKey = attribute.Key("http.client_ip") -) - -// HTTPScheme returns an attribute KeyValue conforming to the "http.scheme" -// semantic conventions. It represents the URI scheme identifying the used -// protocol. -func HTTPScheme(val string) attribute.KeyValue { - return HTTPSchemeKey.String(val) -} - -// HTTPTarget returns an attribute KeyValue conforming to the "http.target" -// semantic conventions. It represents the full request target as passed in a -// HTTP request line or equivalent. -func HTTPTarget(val string) attribute.KeyValue { - return HTTPTargetKey.String(val) -} - -// HTTPRoute returns an attribute KeyValue conforming to the "http.route" -// semantic conventions. It represents the matched route (path template in the -// format used by the respective server framework). See note below -func HTTPRoute(val string) attribute.KeyValue { - return HTTPRouteKey.String(val) -} - -// HTTPClientIP returns an attribute KeyValue conforming to the -// "http.client_ip" semantic conventions. It represents the IP address of the -// original client behind all proxies, if known (e.g. from -// [X-Forwarded-For](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For)). -func HTTPClientIP(val string) attribute.KeyValue { - return HTTPClientIPKey.String(val) -} - -// Attributes that exist for multiple DynamoDB request types. -const ( - // AWSDynamoDBTableNamesKey is the attribute Key conforming to the - // "aws.dynamodb.table_names" semantic conventions. It represents the keys - // in the `RequestItems` object field. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: stable - // Examples: 'Users', 'Cats' - AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names") - - // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the - // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the - // JSON-serialized value of each item in the `ConsumedCapacity` response - // field. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: stable - // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { - // "string" : { "CapacityUnits": number, "ReadCapacityUnits": number, - // "WriteCapacityUnits": number } }, "LocalSecondaryIndexes": { "string" : - // { "CapacityUnits": number, "ReadCapacityUnits": number, - // "WriteCapacityUnits": number } }, "ReadCapacityUnits": number, "Table": - // { "CapacityUnits": number, "ReadCapacityUnits": number, - // "WriteCapacityUnits": number }, "TableName": "string", - // "WriteCapacityUnits": number }' - AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity") - - // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to - // the "aws.dynamodb.item_collection_metrics" semantic conventions. It - // represents the JSON-serialized value of the `ItemCollectionMetrics` - // response field. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": - // blob, "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { - // "string" : "AttributeValue" }, "N": "string", "NS": [ "string" ], - // "NULL": boolean, "S": "string", "SS": [ "string" ] } }, - // "SizeEstimateRangeGB": [ number ] } ] }' - AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics") - - // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to - // the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It - // represents the value of the `ProvisionedThroughput.ReadCapacityUnits` - // request parameter. - // - // Type: double - // RequirementLevel: Optional - // Stability: stable - // Examples: 1.0, 2.0 - AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity") - - // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming - // to the "aws.dynamodb.provisioned_write_capacity" semantic conventions. - // It represents the value of the - // `ProvisionedThroughput.WriteCapacityUnits` request parameter. - // - // Type: double - // RequirementLevel: Optional - // Stability: stable - // Examples: 1.0, 2.0 - AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity") - - // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the - // "aws.dynamodb.consistent_read" semantic conventions. It represents the - // value of the `ConsistentRead` request parameter. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: stable - AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read") - - // AWSDynamoDBProjectionKey is the attribute Key conforming to the - // "aws.dynamodb.projection" semantic conventions. It represents the value - // of the `ProjectionExpression` request parameter. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'Title', 'Title, Price, Color', 'Title, Description, - // RelatedItems, ProductReviews' - AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection") - - // AWSDynamoDBLimitKey is the attribute Key conforming to the - // "aws.dynamodb.limit" semantic conventions. It represents the value of - // the `Limit` request parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 10 - AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit") - - // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the - // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the - // value of the `AttributesToGet` request parameter. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: stable - // Examples: 'lives', 'id' - AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get") - - // AWSDynamoDBIndexNameKey is the attribute Key conforming to the - // "aws.dynamodb.index_name" semantic conventions. It represents the value - // of the `IndexName` request parameter. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'name_to_group' - AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name") - - // AWSDynamoDBSelectKey is the attribute Key conforming to the - // "aws.dynamodb.select" semantic conventions. It represents the value of - // the `Select` request parameter. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'ALL_ATTRIBUTES', 'COUNT' - AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select") -) - -// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the -// "aws.dynamodb.table_names" semantic conventions. It represents the keys in -// the `RequestItems` object field. -func AWSDynamoDBTableNames(val ...string) attribute.KeyValue { - return AWSDynamoDBTableNamesKey.StringSlice(val) -} - -// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to -// the "aws.dynamodb.consumed_capacity" semantic conventions. It represents the -// JSON-serialized value of each item in the `ConsumedCapacity` response field. -func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue { - return AWSDynamoDBConsumedCapacityKey.StringSlice(val) -} - -// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming -// to the "aws.dynamodb.item_collection_metrics" semantic conventions. It -// represents the JSON-serialized value of the `ItemCollectionMetrics` response -// field. -func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue { - return AWSDynamoDBItemCollectionMetricsKey.String(val) -} - -// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue -// conforming to the "aws.dynamodb.provisioned_read_capacity" semantic -// conventions. It represents the value of the -// `ProvisionedThroughput.ReadCapacityUnits` request parameter. -func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue { - return AWSDynamoDBProvisionedReadCapacityKey.Float64(val) -} - -// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue -// conforming to the "aws.dynamodb.provisioned_write_capacity" semantic -// conventions. It represents the value of the -// `ProvisionedThroughput.WriteCapacityUnits` request parameter. -func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue { - return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val) -} - -// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the -// "aws.dynamodb.consistent_read" semantic conventions. It represents the value -// of the `ConsistentRead` request parameter. -func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue { - return AWSDynamoDBConsistentReadKey.Bool(val) -} - -// AWSDynamoDBProjection returns an attribute KeyValue conforming to the -// "aws.dynamodb.projection" semantic conventions. It represents the value of -// the `ProjectionExpression` request parameter. -func AWSDynamoDBProjection(val string) attribute.KeyValue { - return AWSDynamoDBProjectionKey.String(val) -} - -// AWSDynamoDBLimit returns an attribute KeyValue conforming to the -// "aws.dynamodb.limit" semantic conventions. It represents the value of the -// `Limit` request parameter. -func AWSDynamoDBLimit(val int) attribute.KeyValue { - return AWSDynamoDBLimitKey.Int(val) -} - -// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to -// the "aws.dynamodb.attributes_to_get" semantic conventions. It represents the -// value of the `AttributesToGet` request parameter. -func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue { - return AWSDynamoDBAttributesToGetKey.StringSlice(val) -} - -// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the -// "aws.dynamodb.index_name" semantic conventions. It represents the value of -// the `IndexName` request parameter. -func AWSDynamoDBIndexName(val string) attribute.KeyValue { - return AWSDynamoDBIndexNameKey.String(val) -} - -// AWSDynamoDBSelect returns an attribute KeyValue conforming to the -// "aws.dynamodb.select" semantic conventions. It represents the value of the -// `Select` request parameter. -func AWSDynamoDBSelect(val string) attribute.KeyValue { - return AWSDynamoDBSelectKey.String(val) -} - -// DynamoDB.CreateTable -const ( - // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to - // the "aws.dynamodb.global_secondary_indexes" semantic conventions. It - // represents the JSON-serialized value of each item of the - // `GlobalSecondaryIndexes` request field - // - // Type: string[] - // RequirementLevel: Optional - // Stability: stable - // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": - // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ - // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": { - // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }' - AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes") - - // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to - // the "aws.dynamodb.local_secondary_indexes" semantic conventions. It - // represents the JSON-serialized value of each item of the - // `LocalSecondaryIndexes` request field. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: stable - // Examples: '{ "IndexARN": "string", "IndexName": "string", - // "IndexSizeBytes": number, "ItemCount": number, "KeySchema": [ { - // "AttributeName": "string", "KeyType": "string" } ], "Projection": { - // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" } }' - AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes") -) - -// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue -// conforming to the "aws.dynamodb.global_secondary_indexes" semantic -// conventions. It represents the JSON-serialized value of each item of the -// `GlobalSecondaryIndexes` request field -func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue { - return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val) -} - -// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming -// to the "aws.dynamodb.local_secondary_indexes" semantic conventions. It -// represents the JSON-serialized value of each item of the -// `LocalSecondaryIndexes` request field. -func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue { - return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val) -} - -// DynamoDB.ListTables -const ( - // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the - // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents - // the value of the `ExclusiveStartTableName` request parameter. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'Users', 'CatsTable' - AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table") - - // AWSDynamoDBTableCountKey is the attribute Key conforming to the - // "aws.dynamodb.table_count" semantic conventions. It represents the the - // number of items in the `TableNames` response parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 20 - AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count") -) - -// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming -// to the "aws.dynamodb.exclusive_start_table" semantic conventions. It -// represents the value of the `ExclusiveStartTableName` request parameter. -func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue { - return AWSDynamoDBExclusiveStartTableKey.String(val) -} - -// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the -// "aws.dynamodb.table_count" semantic conventions. It represents the the -// number of items in the `TableNames` response parameter. -func AWSDynamoDBTableCount(val int) attribute.KeyValue { - return AWSDynamoDBTableCountKey.Int(val) -} - -// DynamoDB.Query -const ( - // AWSDynamoDBScanForwardKey is the attribute Key conforming to the - // "aws.dynamodb.scan_forward" semantic conventions. It represents the - // value of the `ScanIndexForward` request parameter. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: stable - AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward") -) - -// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the -// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of -// the `ScanIndexForward` request parameter. -func AWSDynamoDBScanForward(val bool) attribute.KeyValue { - return AWSDynamoDBScanForwardKey.Bool(val) -} - -// DynamoDB.Scan -const ( - // AWSDynamoDBSegmentKey is the attribute Key conforming to the - // "aws.dynamodb.segment" semantic conventions. It represents the value of - // the `Segment` request parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 10 - AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment") - - // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the - // "aws.dynamodb.total_segments" semantic conventions. It represents the - // value of the `TotalSegments` request parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 100 - AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments") - - // AWSDynamoDBCountKey is the attribute Key conforming to the - // "aws.dynamodb.count" semantic conventions. It represents the value of - // the `Count` response parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 10 - AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count") - - // AWSDynamoDBScannedCountKey is the attribute Key conforming to the - // "aws.dynamodb.scanned_count" semantic conventions. It represents the - // value of the `ScannedCount` response parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 50 - AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count") -) - -// AWSDynamoDBSegment returns an attribute KeyValue conforming to the -// "aws.dynamodb.segment" semantic conventions. It represents the value of the -// `Segment` request parameter. -func AWSDynamoDBSegment(val int) attribute.KeyValue { - return AWSDynamoDBSegmentKey.Int(val) -} - -// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the -// "aws.dynamodb.total_segments" semantic conventions. It represents the value -// of the `TotalSegments` request parameter. -func AWSDynamoDBTotalSegments(val int) attribute.KeyValue { - return AWSDynamoDBTotalSegmentsKey.Int(val) -} - -// AWSDynamoDBCount returns an attribute KeyValue conforming to the -// "aws.dynamodb.count" semantic conventions. It represents the value of the -// `Count` response parameter. -func AWSDynamoDBCount(val int) attribute.KeyValue { - return AWSDynamoDBCountKey.Int(val) -} - -// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the -// "aws.dynamodb.scanned_count" semantic conventions. It represents the value -// of the `ScannedCount` response parameter. -func AWSDynamoDBScannedCount(val int) attribute.KeyValue { - return AWSDynamoDBScannedCountKey.Int(val) -} - -// DynamoDB.UpdateTable -const ( - // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to - // the "aws.dynamodb.attribute_definitions" semantic conventions. It - // represents the JSON-serialized value of each item in the - // `AttributeDefinitions` request field. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: stable - // Examples: '{ "AttributeName": "string", "AttributeType": "string" }' - AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions") - - // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key - // conforming to the "aws.dynamodb.global_secondary_index_updates" semantic - // conventions. It represents the JSON-serialized value of each item in the - // the `GlobalSecondaryIndexUpdates` request field. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: stable - // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ { - // "AttributeName": "string", "KeyType": "string" } ], "Projection": { - // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, - // "ProvisionedThroughput": { "ReadCapacityUnits": number, - // "WriteCapacityUnits": number } }' - AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates") -) - -// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming -// to the "aws.dynamodb.attribute_definitions" semantic conventions. It -// represents the JSON-serialized value of each item in the -// `AttributeDefinitions` request field. -func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue { - return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val) -} - -// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue -// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic -// conventions. It represents the JSON-serialized value of each item in the the -// `GlobalSecondaryIndexUpdates` request field. -func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue { - return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val) -} - -// Semantic conventions to apply when instrumenting the GraphQL implementation. -// They map GraphQL operations to attributes on a Span. -const ( - // GraphqlOperationNameKey is the attribute Key conforming to the - // "graphql.operation.name" semantic conventions. It represents the name of - // the operation being executed. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'findBookByID' - GraphqlOperationNameKey = attribute.Key("graphql.operation.name") - - // GraphqlOperationTypeKey is the attribute Key conforming to the - // "graphql.operation.type" semantic conventions. It represents the type of - // the operation being executed. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'query', 'mutation', 'subscription' - GraphqlOperationTypeKey = attribute.Key("graphql.operation.type") - - // GraphqlDocumentKey is the attribute Key conforming to the - // "graphql.document" semantic conventions. It represents the GraphQL - // document being executed. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'query findBookByID { bookByID(id: ?) { name } }' - // Note: The value may be sanitized to exclude sensitive information. - GraphqlDocumentKey = attribute.Key("graphql.document") -) - -var ( - // GraphQL query - GraphqlOperationTypeQuery = GraphqlOperationTypeKey.String("query") - // GraphQL mutation - GraphqlOperationTypeMutation = GraphqlOperationTypeKey.String("mutation") - // GraphQL subscription - GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription") -) - -// GraphqlOperationName returns an attribute KeyValue conforming to the -// "graphql.operation.name" semantic conventions. It represents the name of the -// operation being executed. -func GraphqlOperationName(val string) attribute.KeyValue { - return GraphqlOperationNameKey.String(val) -} - -// GraphqlDocument returns an attribute KeyValue conforming to the -// "graphql.document" semantic conventions. It represents the GraphQL document -// being executed. -func GraphqlDocument(val string) attribute.KeyValue { - return GraphqlDocumentKey.String(val) -} - -// Semantic convention describing per-message attributes populated on messaging -// spans or links. -const ( - // MessagingMessageIDKey is the attribute Key conforming to the - // "messaging.message.id" semantic conventions. It represents a value used - // by the messaging system as an identifier for the message, represented as - // a string. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '452a7c7c7c7048c2f887f61572b18fc2' - MessagingMessageIDKey = attribute.Key("messaging.message.id") - - // MessagingMessageConversationIDKey is the attribute Key conforming to the - // "messaging.message.conversation_id" semantic conventions. It represents - // the [conversation ID](#conversations) identifying the conversation to - // which the message belongs, represented as a string. Sometimes called - // "Correlation ID". - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'MyConversationID' - MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id") - - // MessagingMessagePayloadSizeBytesKey is the attribute Key conforming to - // the "messaging.message.payload_size_bytes" semantic conventions. It - // represents the (uncompressed) size of the message payload in bytes. Also - // use this attribute if it is unknown whether the compressed or - // uncompressed payload size is reported. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 2738 - MessagingMessagePayloadSizeBytesKey = attribute.Key("messaging.message.payload_size_bytes") - - // MessagingMessagePayloadCompressedSizeBytesKey is the attribute Key - // conforming to the "messaging.message.payload_compressed_size_bytes" - // semantic conventions. It represents the compressed size of the message - // payload in bytes. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 2048 - MessagingMessagePayloadCompressedSizeBytesKey = attribute.Key("messaging.message.payload_compressed_size_bytes") -) - -// MessagingMessageID returns an attribute KeyValue conforming to the -// "messaging.message.id" semantic conventions. It represents a value used by -// the messaging system as an identifier for the message, represented as a -// string. -func MessagingMessageID(val string) attribute.KeyValue { - return MessagingMessageIDKey.String(val) -} - -// MessagingMessageConversationID returns an attribute KeyValue conforming -// to the "messaging.message.conversation_id" semantic conventions. It -// represents the [conversation ID](#conversations) identifying the -// conversation to which the message belongs, represented as a string. -// Sometimes called "Correlation ID". -func MessagingMessageConversationID(val string) attribute.KeyValue { - return MessagingMessageConversationIDKey.String(val) -} - -// MessagingMessagePayloadSizeBytes returns an attribute KeyValue conforming -// to the "messaging.message.payload_size_bytes" semantic conventions. It -// represents the (uncompressed) size of the message payload in bytes. Also use -// this attribute if it is unknown whether the compressed or uncompressed -// payload size is reported. -func MessagingMessagePayloadSizeBytes(val int) attribute.KeyValue { - return MessagingMessagePayloadSizeBytesKey.Int(val) -} - -// MessagingMessagePayloadCompressedSizeBytes returns an attribute KeyValue -// conforming to the "messaging.message.payload_compressed_size_bytes" semantic -// conventions. It represents the compressed size of the message payload in -// bytes. -func MessagingMessagePayloadCompressedSizeBytes(val int) attribute.KeyValue { - return MessagingMessagePayloadCompressedSizeBytesKey.Int(val) -} - -// Semantic convention for attributes that describe messaging destination on -// broker -const ( - // MessagingDestinationNameKey is the attribute Key conforming to the - // "messaging.destination.name" semantic conventions. It represents the - // message destination name - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'MyQueue', 'MyTopic' - // Note: Destination name SHOULD uniquely identify a specific queue, topic - // or other entity within the broker. If - // the broker does not have such notion, the destination name SHOULD - // uniquely identify the broker. - MessagingDestinationNameKey = attribute.Key("messaging.destination.name") - - // MessagingDestinationKindKey is the attribute Key conforming to the - // "messaging.destination.kind" semantic conventions. It represents the - // kind of message destination - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - MessagingDestinationKindKey = attribute.Key("messaging.destination.kind") - - // MessagingDestinationTemplateKey is the attribute Key conforming to the - // "messaging.destination.template" semantic conventions. It represents the - // low cardinality representation of the messaging destination name - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '/customers/{customerID}' - // Note: Destination names could be constructed from templates. An example - // would be a destination name involving a user name or product id. - // Although the destination name in this case is of high cardinality, the - // underlying template is of low cardinality and can be effectively used - // for grouping and aggregation. - MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template") - - // MessagingDestinationTemporaryKey is the attribute Key conforming to the - // "messaging.destination.temporary" semantic conventions. It represents a - // boolean that is true if the message destination is temporary and might - // not exist anymore after messages are processed. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: stable - MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary") - - // MessagingDestinationAnonymousKey is the attribute Key conforming to the - // "messaging.destination.anonymous" semantic conventions. It represents a - // boolean that is true if the message destination is anonymous (could be - // unnamed or have auto-generated name). - // - // Type: boolean - // RequirementLevel: Optional - // Stability: stable - MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous") -) - -var ( - // A message sent to a queue - MessagingDestinationKindQueue = MessagingDestinationKindKey.String("queue") - // A message sent to a topic - MessagingDestinationKindTopic = MessagingDestinationKindKey.String("topic") -) - -// MessagingDestinationName returns an attribute KeyValue conforming to the -// "messaging.destination.name" semantic conventions. It represents the message -// destination name -func MessagingDestinationName(val string) attribute.KeyValue { - return MessagingDestinationNameKey.String(val) -} - -// MessagingDestinationTemplate returns an attribute KeyValue conforming to -// the "messaging.destination.template" semantic conventions. It represents the -// low cardinality representation of the messaging destination name -func MessagingDestinationTemplate(val string) attribute.KeyValue { - return MessagingDestinationTemplateKey.String(val) -} - -// MessagingDestinationTemporary returns an attribute KeyValue conforming to -// the "messaging.destination.temporary" semantic conventions. It represents a -// boolean that is true if the message destination is temporary and might not -// exist anymore after messages are processed. -func MessagingDestinationTemporary(val bool) attribute.KeyValue { - return MessagingDestinationTemporaryKey.Bool(val) -} - -// MessagingDestinationAnonymous returns an attribute KeyValue conforming to -// the "messaging.destination.anonymous" semantic conventions. It represents a -// boolean that is true if the message destination is anonymous (could be -// unnamed or have auto-generated name). -func MessagingDestinationAnonymous(val bool) attribute.KeyValue { - return MessagingDestinationAnonymousKey.Bool(val) -} - -// Semantic convention for attributes that describe messaging source on broker -const ( - // MessagingSourceNameKey is the attribute Key conforming to the - // "messaging.source.name" semantic conventions. It represents the message - // source name - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'MyQueue', 'MyTopic' - // Note: Source name SHOULD uniquely identify a specific queue, topic, or - // other entity within the broker. If - // the broker does not have such notion, the source name SHOULD uniquely - // identify the broker. - MessagingSourceNameKey = attribute.Key("messaging.source.name") - - // MessagingSourceKindKey is the attribute Key conforming to the - // "messaging.source.kind" semantic conventions. It represents the kind of - // message source - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - MessagingSourceKindKey = attribute.Key("messaging.source.kind") - - // MessagingSourceTemplateKey is the attribute Key conforming to the - // "messaging.source.template" semantic conventions. It represents the low - // cardinality representation of the messaging source name - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '/customers/{customerID}' - // Note: Source names could be constructed from templates. An example would - // be a source name involving a user name or product id. Although the - // source name in this case is of high cardinality, the underlying template - // is of low cardinality and can be effectively used for grouping and - // aggregation. - MessagingSourceTemplateKey = attribute.Key("messaging.source.template") - - // MessagingSourceTemporaryKey is the attribute Key conforming to the - // "messaging.source.temporary" semantic conventions. It represents a - // boolean that is true if the message source is temporary and might not - // exist anymore after messages are processed. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: stable - MessagingSourceTemporaryKey = attribute.Key("messaging.source.temporary") - - // MessagingSourceAnonymousKey is the attribute Key conforming to the - // "messaging.source.anonymous" semantic conventions. It represents a - // boolean that is true if the message source is anonymous (could be - // unnamed or have auto-generated name). - // - // Type: boolean - // RequirementLevel: Optional - // Stability: stable - MessagingSourceAnonymousKey = attribute.Key("messaging.source.anonymous") -) - -var ( - // A message received from a queue - MessagingSourceKindQueue = MessagingSourceKindKey.String("queue") - // A message received from a topic - MessagingSourceKindTopic = MessagingSourceKindKey.String("topic") -) - -// MessagingSourceName returns an attribute KeyValue conforming to the -// "messaging.source.name" semantic conventions. It represents the message -// source name -func MessagingSourceName(val string) attribute.KeyValue { - return MessagingSourceNameKey.String(val) -} - -// MessagingSourceTemplate returns an attribute KeyValue conforming to the -// "messaging.source.template" semantic conventions. It represents the low -// cardinality representation of the messaging source name -func MessagingSourceTemplate(val string) attribute.KeyValue { - return MessagingSourceTemplateKey.String(val) -} - -// MessagingSourceTemporary returns an attribute KeyValue conforming to the -// "messaging.source.temporary" semantic conventions. It represents a boolean -// that is true if the message source is temporary and might not exist anymore -// after messages are processed. -func MessagingSourceTemporary(val bool) attribute.KeyValue { - return MessagingSourceTemporaryKey.Bool(val) -} - -// MessagingSourceAnonymous returns an attribute KeyValue conforming to the -// "messaging.source.anonymous" semantic conventions. It represents a boolean -// that is true if the message source is anonymous (could be unnamed or have -// auto-generated name). -func MessagingSourceAnonymous(val bool) attribute.KeyValue { - return MessagingSourceAnonymousKey.Bool(val) -} - -// General attributes used in messaging systems. -const ( - // MessagingSystemKey is the attribute Key conforming to the - // "messaging.system" semantic conventions. It represents a string - // identifying the messaging system. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: 'kafka', 'rabbitmq', 'rocketmq', 'activemq', 'AmazonSQS' - MessagingSystemKey = attribute.Key("messaging.system") - - // MessagingOperationKey is the attribute Key conforming to the - // "messaging.operation" semantic conventions. It represents a string - // identifying the kind of messaging operation as defined in the [Operation - // names](#operation-names) section above. - // - // Type: Enum - // RequirementLevel: Required - // Stability: stable - // Note: If a custom value is used, it MUST be of low cardinality. - MessagingOperationKey = attribute.Key("messaging.operation") - - // MessagingBatchMessageCountKey is the attribute Key conforming to the - // "messaging.batch.message_count" semantic conventions. It represents the - // number of messages sent, received, or processed in the scope of the - // batching operation. - // - // Type: int - // RequirementLevel: ConditionallyRequired (If the span describes an - // operation on a batch of messages.) - // Stability: stable - // Examples: 0, 1, 2 - // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on - // spans that operate with a single message. When a messaging client - // library supports both batch and single-message API for the same - // operation, instrumentations SHOULD use `messaging.batch.message_count` - // for batching APIs and SHOULD NOT use it for single-message APIs. - MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count") -) - -var ( - // publish - MessagingOperationPublish = MessagingOperationKey.String("publish") - // receive - MessagingOperationReceive = MessagingOperationKey.String("receive") - // process - MessagingOperationProcess = MessagingOperationKey.String("process") -) - -// MessagingSystem returns an attribute KeyValue conforming to the -// "messaging.system" semantic conventions. It represents a string identifying -// the messaging system. -func MessagingSystem(val string) attribute.KeyValue { - return MessagingSystemKey.String(val) -} - -// MessagingBatchMessageCount returns an attribute KeyValue conforming to -// the "messaging.batch.message_count" semantic conventions. It represents the -// number of messages sent, received, or processed in the scope of the batching -// operation. -func MessagingBatchMessageCount(val int) attribute.KeyValue { - return MessagingBatchMessageCountKey.Int(val) -} - -// Semantic convention for a consumer of messages received from a messaging -// system -const ( - // MessagingConsumerIDKey is the attribute Key conforming to the - // "messaging.consumer.id" semantic conventions. It represents the - // identifier for the consumer receiving a message. For Kafka, set it to - // `{messaging.kafka.consumer.group} - {messaging.kafka.client_id}`, if - // both are present, or only `messaging.kafka.consumer.group`. For brokers, - // such as RabbitMQ and Artemis, set it to the `client_id` of the client - // consuming the message. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'mygroup - client-6' - MessagingConsumerIDKey = attribute.Key("messaging.consumer.id") -) - -// MessagingConsumerID returns an attribute KeyValue conforming to the -// "messaging.consumer.id" semantic conventions. It represents the identifier -// for the consumer receiving a message. For Kafka, set it to -// `{messaging.kafka.consumer.group} - {messaging.kafka.client_id}`, if both -// are present, or only `messaging.kafka.consumer.group`. For brokers, such as -// RabbitMQ and Artemis, set it to the `client_id` of the client consuming the -// message. -func MessagingConsumerID(val string) attribute.KeyValue { - return MessagingConsumerIDKey.String(val) -} - -// Attributes for RabbitMQ -const ( - // MessagingRabbitmqDestinationRoutingKeyKey is the attribute Key - // conforming to the "messaging.rabbitmq.destination.routing_key" semantic - // conventions. It represents the rabbitMQ message routing key. - // - // Type: string - // RequirementLevel: ConditionallyRequired (If not empty.) - // Stability: stable - // Examples: 'myKey' - MessagingRabbitmqDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key") -) - -// MessagingRabbitmqDestinationRoutingKey returns an attribute KeyValue -// conforming to the "messaging.rabbitmq.destination.routing_key" semantic -// conventions. It represents the rabbitMQ message routing key. -func MessagingRabbitmqDestinationRoutingKey(val string) attribute.KeyValue { - return MessagingRabbitmqDestinationRoutingKeyKey.String(val) -} - -// Attributes for Apache Kafka -const ( - // MessagingKafkaMessageKeyKey is the attribute Key conforming to the - // "messaging.kafka.message.key" semantic conventions. It represents the - // message keys in Kafka are used for grouping alike messages to ensure - // they're processed on the same partition. They differ from - // `messaging.message.id` in that they're not unique. If the key is `null`, - // the attribute MUST NOT be set. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'myKey' - // Note: If the key type is not string, it's string representation has to - // be supplied for the attribute. If the key has no unambiguous, canonical - // string form, don't include its value. - MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key") - - // MessagingKafkaConsumerGroupKey is the attribute Key conforming to the - // "messaging.kafka.consumer.group" semantic conventions. It represents the - // name of the Kafka Consumer Group that is handling the message. Only - // applies to consumers, not producers. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'my-group' - MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer.group") - - // MessagingKafkaClientIDKey is the attribute Key conforming to the - // "messaging.kafka.client_id" semantic conventions. It represents the - // client ID for the Consumer or Producer that is handling the message. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'client-5' - MessagingKafkaClientIDKey = attribute.Key("messaging.kafka.client_id") - - // MessagingKafkaDestinationPartitionKey is the attribute Key conforming to - // the "messaging.kafka.destination.partition" semantic conventions. It - // represents the partition the message is sent to. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 2 - MessagingKafkaDestinationPartitionKey = attribute.Key("messaging.kafka.destination.partition") - - // MessagingKafkaSourcePartitionKey is the attribute Key conforming to the - // "messaging.kafka.source.partition" semantic conventions. It represents - // the partition the message is received from. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 2 - MessagingKafkaSourcePartitionKey = attribute.Key("messaging.kafka.source.partition") - - // MessagingKafkaMessageOffsetKey is the attribute Key conforming to the - // "messaging.kafka.message.offset" semantic conventions. It represents the - // offset of a record in the corresponding Kafka partition. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 42 - MessagingKafkaMessageOffsetKey = attribute.Key("messaging.kafka.message.offset") - - // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the - // "messaging.kafka.message.tombstone" semantic conventions. It represents - // a boolean that is true if the message is a tombstone. - // - // Type: boolean - // RequirementLevel: ConditionallyRequired (If value is `true`. When - // missing, the value is assumed to be `false`.) - // Stability: stable - MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone") -) - -// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the -// "messaging.kafka.message.key" semantic conventions. It represents the -// message keys in Kafka are used for grouping alike messages to ensure they're -// processed on the same partition. They differ from `messaging.message.id` in -// that they're not unique. If the key is `null`, the attribute MUST NOT be -// set. -func MessagingKafkaMessageKey(val string) attribute.KeyValue { - return MessagingKafkaMessageKeyKey.String(val) -} - -// MessagingKafkaConsumerGroup returns an attribute KeyValue conforming to -// the "messaging.kafka.consumer.group" semantic conventions. It represents the -// name of the Kafka Consumer Group that is handling the message. Only applies -// to consumers, not producers. -func MessagingKafkaConsumerGroup(val string) attribute.KeyValue { - return MessagingKafkaConsumerGroupKey.String(val) -} - -// MessagingKafkaClientID returns an attribute KeyValue conforming to the -// "messaging.kafka.client_id" semantic conventions. It represents the client -// ID for the Consumer or Producer that is handling the message. -func MessagingKafkaClientID(val string) attribute.KeyValue { - return MessagingKafkaClientIDKey.String(val) -} - -// MessagingKafkaDestinationPartition returns an attribute KeyValue -// conforming to the "messaging.kafka.destination.partition" semantic -// conventions. It represents the partition the message is sent to. -func MessagingKafkaDestinationPartition(val int) attribute.KeyValue { - return MessagingKafkaDestinationPartitionKey.Int(val) -} - -// MessagingKafkaSourcePartition returns an attribute KeyValue conforming to -// the "messaging.kafka.source.partition" semantic conventions. It represents -// the partition the message is received from. -func MessagingKafkaSourcePartition(val int) attribute.KeyValue { - return MessagingKafkaSourcePartitionKey.Int(val) -} - -// MessagingKafkaMessageOffset returns an attribute KeyValue conforming to -// the "messaging.kafka.message.offset" semantic conventions. It represents the -// offset of a record in the corresponding Kafka partition. -func MessagingKafkaMessageOffset(val int) attribute.KeyValue { - return MessagingKafkaMessageOffsetKey.Int(val) -} - -// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming -// to the "messaging.kafka.message.tombstone" semantic conventions. It -// represents a boolean that is true if the message is a tombstone. -func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue { - return MessagingKafkaMessageTombstoneKey.Bool(val) -} - -// Attributes for Apache RocketMQ -const ( - // MessagingRocketmqNamespaceKey is the attribute Key conforming to the - // "messaging.rocketmq.namespace" semantic conventions. It represents the - // namespace of RocketMQ resources, resources in different namespaces are - // individual. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: 'myNamespace' - MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace") - - // MessagingRocketmqClientGroupKey is the attribute Key conforming to the - // "messaging.rocketmq.client_group" semantic conventions. It represents - // the name of the RocketMQ producer/consumer group that is handling the - // message. The client type is identified by the SpanKind. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: 'myConsumerGroup' - MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group") - - // MessagingRocketmqClientIDKey is the attribute Key conforming to the - // "messaging.rocketmq.client_id" semantic conventions. It represents the - // unique identifier for each client. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: 'myhost@8742@s8083jm' - MessagingRocketmqClientIDKey = attribute.Key("messaging.rocketmq.client_id") - - // MessagingRocketmqMessageDeliveryTimestampKey is the attribute Key - // conforming to the "messaging.rocketmq.message.delivery_timestamp" - // semantic conventions. It represents the timestamp in milliseconds that - // the delay message is expected to be delivered to consumer. - // - // Type: int - // RequirementLevel: ConditionallyRequired (If the message type is delay - // and delay time level is not specified.) - // Stability: stable - // Examples: 1665987217045 - MessagingRocketmqMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp") - - // MessagingRocketmqMessageDelayTimeLevelKey is the attribute Key - // conforming to the "messaging.rocketmq.message.delay_time_level" semantic - // conventions. It represents the delay time level for delay message, which - // determines the message delay time. - // - // Type: int - // RequirementLevel: ConditionallyRequired (If the message type is delay - // and delivery timestamp is not specified.) - // Stability: stable - // Examples: 3 - MessagingRocketmqMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level") - - // MessagingRocketmqMessageGroupKey is the attribute Key conforming to the - // "messaging.rocketmq.message.group" semantic conventions. It represents - // the it is essential for FIFO message. Messages that belong to the same - // message group are always processed one by one within the same consumer - // group. - // - // Type: string - // RequirementLevel: ConditionallyRequired (If the message type is FIFO.) - // Stability: stable - // Examples: 'myMessageGroup' - MessagingRocketmqMessageGroupKey = attribute.Key("messaging.rocketmq.message.group") - - // MessagingRocketmqMessageTypeKey is the attribute Key conforming to the - // "messaging.rocketmq.message.type" semantic conventions. It represents - // the type of message. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message.type") - - // MessagingRocketmqMessageTagKey is the attribute Key conforming to the - // "messaging.rocketmq.message.tag" semantic conventions. It represents the - // secondary classifier of message besides topic. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'tagA' - MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message.tag") - - // MessagingRocketmqMessageKeysKey is the attribute Key conforming to the - // "messaging.rocketmq.message.keys" semantic conventions. It represents - // the key(s) of message, another way to mark message besides message id. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: stable - // Examples: 'keyA', 'keyB' - MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys") - - // MessagingRocketmqConsumptionModelKey is the attribute Key conforming to - // the "messaging.rocketmq.consumption_model" semantic conventions. It - // represents the model of message consumption. This only applies to - // consumer spans. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model") -) - -var ( - // Normal message - MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal") - // FIFO message - MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo") - // Delay message - MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay") - // Transaction message - MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction") -) - -var ( - // Clustering consumption model - MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering") - // Broadcasting consumption model - MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting") -) - -// MessagingRocketmqNamespace returns an attribute KeyValue conforming to -// the "messaging.rocketmq.namespace" semantic conventions. It represents the -// namespace of RocketMQ resources, resources in different namespaces are -// individual. -func MessagingRocketmqNamespace(val string) attribute.KeyValue { - return MessagingRocketmqNamespaceKey.String(val) -} - -// MessagingRocketmqClientGroup returns an attribute KeyValue conforming to -// the "messaging.rocketmq.client_group" semantic conventions. It represents -// the name of the RocketMQ producer/consumer group that is handling the -// message. The client type is identified by the SpanKind. -func MessagingRocketmqClientGroup(val string) attribute.KeyValue { - return MessagingRocketmqClientGroupKey.String(val) -} - -// MessagingRocketmqClientID returns an attribute KeyValue conforming to the -// "messaging.rocketmq.client_id" semantic conventions. It represents the -// unique identifier for each client. -func MessagingRocketmqClientID(val string) attribute.KeyValue { - return MessagingRocketmqClientIDKey.String(val) -} - -// MessagingRocketmqMessageDeliveryTimestamp returns an attribute KeyValue -// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic -// conventions. It represents the timestamp in milliseconds that the delay -// message is expected to be delivered to consumer. -func MessagingRocketmqMessageDeliveryTimestamp(val int) attribute.KeyValue { - return MessagingRocketmqMessageDeliveryTimestampKey.Int(val) -} - -// MessagingRocketmqMessageDelayTimeLevel returns an attribute KeyValue -// conforming to the "messaging.rocketmq.message.delay_time_level" semantic -// conventions. It represents the delay time level for delay message, which -// determines the message delay time. -func MessagingRocketmqMessageDelayTimeLevel(val int) attribute.KeyValue { - return MessagingRocketmqMessageDelayTimeLevelKey.Int(val) -} - -// MessagingRocketmqMessageGroup returns an attribute KeyValue conforming to -// the "messaging.rocketmq.message.group" semantic conventions. It represents -// the it is essential for FIFO message. Messages that belong to the same -// message group are always processed one by one within the same consumer -// group. -func MessagingRocketmqMessageGroup(val string) attribute.KeyValue { - return MessagingRocketmqMessageGroupKey.String(val) -} - -// MessagingRocketmqMessageTag returns an attribute KeyValue conforming to -// the "messaging.rocketmq.message.tag" semantic conventions. It represents the -// secondary classifier of message besides topic. -func MessagingRocketmqMessageTag(val string) attribute.KeyValue { - return MessagingRocketmqMessageTagKey.String(val) -} - -// MessagingRocketmqMessageKeys returns an attribute KeyValue conforming to -// the "messaging.rocketmq.message.keys" semantic conventions. It represents -// the key(s) of message, another way to mark message besides message id. -func MessagingRocketmqMessageKeys(val ...string) attribute.KeyValue { - return MessagingRocketmqMessageKeysKey.StringSlice(val) -} - -// Semantic conventions for remote procedure calls. -const ( - // RPCSystemKey is the attribute Key conforming to the "rpc.system" - // semantic conventions. It represents a string identifying the remoting - // system. See below for a list of well-known identifiers. - // - // Type: Enum - // RequirementLevel: Required - // Stability: stable - RPCSystemKey = attribute.Key("rpc.system") - - // RPCServiceKey is the attribute Key conforming to the "rpc.service" - // semantic conventions. It represents the full (logical) name of the - // service being called, including its package name, if applicable. - // - // Type: string - // RequirementLevel: Recommended - // Stability: stable - // Examples: 'myservice.EchoService' - // Note: This is the logical name of the service from the RPC interface - // perspective, which can be different from the name of any implementing - // class. The `code.namespace` attribute may be used to store the latter - // (despite the attribute name, it may include a class name; e.g., class - // with method actually executing the call on the server side, RPC client - // stub class on the client side). - RPCServiceKey = attribute.Key("rpc.service") - - // RPCMethodKey is the attribute Key conforming to the "rpc.method" - // semantic conventions. It represents the name of the (logical) method - // being called, must be equal to the $method part in the span name. - // - // Type: string - // RequirementLevel: Recommended - // Stability: stable - // Examples: 'exampleMethod' - // Note: This is the logical name of the method from the RPC interface - // perspective, which can be different from the name of any implementing - // method/function. The `code.function` attribute may be used to store the - // latter (e.g., method actually executing the call on the server side, RPC - // client stub method on the client side). - RPCMethodKey = attribute.Key("rpc.method") -) - -var ( - // gRPC - RPCSystemGRPC = RPCSystemKey.String("grpc") - // Java RMI - RPCSystemJavaRmi = RPCSystemKey.String("java_rmi") - // .NET WCF - RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf") - // Apache Dubbo - RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo") -) - -// RPCService returns an attribute KeyValue conforming to the "rpc.service" -// semantic conventions. It represents the full (logical) name of the service -// being called, including its package name, if applicable. -func RPCService(val string) attribute.KeyValue { - return RPCServiceKey.String(val) -} - -// RPCMethod returns an attribute KeyValue conforming to the "rpc.method" -// semantic conventions. It represents the name of the (logical) method being -// called, must be equal to the $method part in the span name. -func RPCMethod(val string) attribute.KeyValue { - return RPCMethodKey.String(val) -} - -// Tech-specific attributes for gRPC. -const ( - // RPCGRPCStatusCodeKey is the attribute Key conforming to the - // "rpc.grpc.status_code" semantic conventions. It represents the [numeric - // status - // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of - // the gRPC request. - // - // Type: Enum - // RequirementLevel: Required - // Stability: stable - RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") -) - -var ( - // OK - RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0) - // CANCELLED - RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1) - // UNKNOWN - RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2) - // INVALID_ARGUMENT - RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3) - // DEADLINE_EXCEEDED - RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4) - // NOT_FOUND - RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5) - // ALREADY_EXISTS - RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6) - // PERMISSION_DENIED - RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7) - // RESOURCE_EXHAUSTED - RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8) - // FAILED_PRECONDITION - RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9) - // ABORTED - RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10) - // OUT_OF_RANGE - RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11) - // UNIMPLEMENTED - RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12) - // INTERNAL - RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13) - // UNAVAILABLE - RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14) - // DATA_LOSS - RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15) - // UNAUTHENTICATED - RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16) -) - -// Tech-specific attributes for [JSON RPC](https://www.jsonrpc.org/). -const ( - // RPCJsonrpcVersionKey is the attribute Key conforming to the - // "rpc.jsonrpc.version" semantic conventions. It represents the protocol - // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 - // does not specify this, the value can be omitted. - // - // Type: string - // RequirementLevel: ConditionallyRequired (If other than the default - // version (`1.0`)) - // Stability: stable - // Examples: '2.0', '1.0' - RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version") - - // RPCJsonrpcRequestIDKey is the attribute Key conforming to the - // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` - // property of request or response. Since protocol allows id to be int, - // string, `null` or missing (for notifications), value is expected to be - // cast to string for simplicity. Use empty string in case of `null` value. - // Omit entirely if this is a notification. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '10', 'request-7', '' - RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id") - - // RPCJsonrpcErrorCodeKey is the attribute Key conforming to the - // "rpc.jsonrpc.error_code" semantic conventions. It represents the - // `error.code` property of response if it is an error response. - // - // Type: int - // RequirementLevel: ConditionallyRequired (If response is not successful.) - // Stability: stable - // Examples: -32700, 100 - RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code") - - // RPCJsonrpcErrorMessageKey is the attribute Key conforming to the - // "rpc.jsonrpc.error_message" semantic conventions. It represents the - // `error.message` property of response if it is an error response. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'Parse error', 'User already exists' - RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message") -) - -// RPCJsonrpcVersion returns an attribute KeyValue conforming to the -// "rpc.jsonrpc.version" semantic conventions. It represents the protocol -// version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 -// does not specify this, the value can be omitted. -func RPCJsonrpcVersion(val string) attribute.KeyValue { - return RPCJsonrpcVersionKey.String(val) -} - -// RPCJsonrpcRequestID returns an attribute KeyValue conforming to the -// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` -// property of request or response. Since protocol allows id to be int, string, -// `null` or missing (for notifications), value is expected to be cast to -// string for simplicity. Use empty string in case of `null` value. Omit -// entirely if this is a notification. -func RPCJsonrpcRequestID(val string) attribute.KeyValue { - return RPCJsonrpcRequestIDKey.String(val) -} - -// RPCJsonrpcErrorCode returns an attribute KeyValue conforming to the -// "rpc.jsonrpc.error_code" semantic conventions. It represents the -// `error.code` property of response if it is an error response. -func RPCJsonrpcErrorCode(val int) attribute.KeyValue { - return RPCJsonrpcErrorCodeKey.Int(val) -} - -// RPCJsonrpcErrorMessage returns an attribute KeyValue conforming to the -// "rpc.jsonrpc.error_message" semantic conventions. It represents the -// `error.message` property of response if it is an error response. -func RPCJsonrpcErrorMessage(val string) attribute.KeyValue { - return RPCJsonrpcErrorMessageKey.String(val) -} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/README.md deleted file mode 100644 index 2de1fc3c..00000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# Semconv v1.26.0 - -[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/semconv/v1.26.0)](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.26.0) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/attribute_group.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/attribute_group.go deleted file mode 100644 index d8dc822b..00000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/attribute_group.go +++ /dev/null @@ -1,8996 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated from semantic convention specification. DO NOT EDIT. - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0" - -import "go.opentelemetry.io/otel/attribute" - -// The Android platform on which the Android application is running. -const ( - // AndroidOSAPILevelKey is the attribute Key conforming to the - // "android.os.api_level" semantic conventions. It represents the uniquely - // identifies the framework API revision offered by a version - // (`os.version`) of the android operating system. More information can be - // found - // [here](https://developer.android.com/guide/topics/manifest/uses-sdk-element#APILevels). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '33', '32' - AndroidOSAPILevelKey = attribute.Key("android.os.api_level") -) - -// AndroidOSAPILevel returns an attribute KeyValue conforming to the -// "android.os.api_level" semantic conventions. It represents the uniquely -// identifies the framework API revision offered by a version (`os.version`) of -// the android operating system. More information can be found -// [here](https://developer.android.com/guide/topics/manifest/uses-sdk-element#APILevels). -func AndroidOSAPILevel(val string) attribute.KeyValue { - return AndroidOSAPILevelKey.String(val) -} - -// ASP.NET Core attributes -const ( - // AspnetcoreRateLimitingResultKey is the attribute Key conforming to the - // "aspnetcore.rate_limiting.result" semantic conventions. It represents - // the rate-limiting result, shows whether the lease was acquired or - // contains a rejection reason - // - // Type: Enum - // RequirementLevel: Required - // Stability: stable - // Examples: 'acquired', 'request_canceled' - AspnetcoreRateLimitingResultKey = attribute.Key("aspnetcore.rate_limiting.result") - - // AspnetcoreDiagnosticsHandlerTypeKey is the attribute Key conforming to - // the "aspnetcore.diagnostics.handler.type" semantic conventions. It - // represents the full type name of the - // [`IExceptionHandler`](https://learn.microsoft.com/dotnet/api/microsoft.aspnetcore.diagnostics.iexceptionhandler) - // implementation that handled the exception. - // - // Type: string - // RequirementLevel: ConditionallyRequired (if and only if the exception - // was handled by this handler.) - // Stability: stable - // Examples: 'Contoso.MyHandler' - AspnetcoreDiagnosticsHandlerTypeKey = attribute.Key("aspnetcore.diagnostics.handler.type") - - // AspnetcoreDiagnosticsExceptionResultKey is the attribute Key conforming - // to the "aspnetcore.diagnostics.exception.result" semantic conventions. - // It represents the aSP.NET Core exception middleware handling result - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'handled', 'unhandled' - AspnetcoreDiagnosticsExceptionResultKey = attribute.Key("aspnetcore.diagnostics.exception.result") - - // AspnetcoreRateLimitingPolicyKey is the attribute Key conforming to the - // "aspnetcore.rate_limiting.policy" semantic conventions. It represents - // the rate limiting policy name. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'fixed', 'sliding', 'token' - AspnetcoreRateLimitingPolicyKey = attribute.Key("aspnetcore.rate_limiting.policy") - - // AspnetcoreRequestIsUnhandledKey is the attribute Key conforming to the - // "aspnetcore.request.is_unhandled" semantic conventions. It represents - // the flag indicating if request was handled by the application pipeline. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: stable - // Examples: True - AspnetcoreRequestIsUnhandledKey = attribute.Key("aspnetcore.request.is_unhandled") - - // AspnetcoreRoutingIsFallbackKey is the attribute Key conforming to the - // "aspnetcore.routing.is_fallback" semantic conventions. It represents a - // value that indicates whether the matched route is a fallback route. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: stable - // Examples: True - AspnetcoreRoutingIsFallbackKey = attribute.Key("aspnetcore.routing.is_fallback") - - // AspnetcoreRoutingMatchStatusKey is the attribute Key conforming to the - // "aspnetcore.routing.match_status" semantic conventions. It represents - // the match result - success or failure - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'success', 'failure' - AspnetcoreRoutingMatchStatusKey = attribute.Key("aspnetcore.routing.match_status") -) - -var ( - // Lease was acquired - AspnetcoreRateLimitingResultAcquired = AspnetcoreRateLimitingResultKey.String("acquired") - // Lease request was rejected by the endpoint limiter - AspnetcoreRateLimitingResultEndpointLimiter = AspnetcoreRateLimitingResultKey.String("endpoint_limiter") - // Lease request was rejected by the global limiter - AspnetcoreRateLimitingResultGlobalLimiter = AspnetcoreRateLimitingResultKey.String("global_limiter") - // Lease request was canceled - AspnetcoreRateLimitingResultRequestCanceled = AspnetcoreRateLimitingResultKey.String("request_canceled") -) - -var ( - // Exception was handled by the exception handling middleware - AspnetcoreDiagnosticsExceptionResultHandled = AspnetcoreDiagnosticsExceptionResultKey.String("handled") - // Exception was not handled by the exception handling middleware - AspnetcoreDiagnosticsExceptionResultUnhandled = AspnetcoreDiagnosticsExceptionResultKey.String("unhandled") - // Exception handling was skipped because the response had started - AspnetcoreDiagnosticsExceptionResultSkipped = AspnetcoreDiagnosticsExceptionResultKey.String("skipped") - // Exception handling didn't run because the request was aborted - AspnetcoreDiagnosticsExceptionResultAborted = AspnetcoreDiagnosticsExceptionResultKey.String("aborted") -) - -var ( - // Match succeeded - AspnetcoreRoutingMatchStatusSuccess = AspnetcoreRoutingMatchStatusKey.String("success") - // Match failed - AspnetcoreRoutingMatchStatusFailure = AspnetcoreRoutingMatchStatusKey.String("failure") -) - -// AspnetcoreDiagnosticsHandlerType returns an attribute KeyValue conforming -// to the "aspnetcore.diagnostics.handler.type" semantic conventions. It -// represents the full type name of the -// [`IExceptionHandler`](https://learn.microsoft.com/dotnet/api/microsoft.aspnetcore.diagnostics.iexceptionhandler) -// implementation that handled the exception. -func AspnetcoreDiagnosticsHandlerType(val string) attribute.KeyValue { - return AspnetcoreDiagnosticsHandlerTypeKey.String(val) -} - -// AspnetcoreRateLimitingPolicy returns an attribute KeyValue conforming to -// the "aspnetcore.rate_limiting.policy" semantic conventions. It represents -// the rate limiting policy name. -func AspnetcoreRateLimitingPolicy(val string) attribute.KeyValue { - return AspnetcoreRateLimitingPolicyKey.String(val) -} - -// AspnetcoreRequestIsUnhandled returns an attribute KeyValue conforming to -// the "aspnetcore.request.is_unhandled" semantic conventions. It represents -// the flag indicating if request was handled by the application pipeline. -func AspnetcoreRequestIsUnhandled(val bool) attribute.KeyValue { - return AspnetcoreRequestIsUnhandledKey.Bool(val) -} - -// AspnetcoreRoutingIsFallback returns an attribute KeyValue conforming to -// the "aspnetcore.routing.is_fallback" semantic conventions. It represents a -// value that indicates whether the matched route is a fallback route. -func AspnetcoreRoutingIsFallback(val bool) attribute.KeyValue { - return AspnetcoreRoutingIsFallbackKey.Bool(val) -} - -// Generic attributes for AWS services. -const ( - // AWSRequestIDKey is the attribute Key conforming to the "aws.request_id" - // semantic conventions. It represents the AWS request ID as returned in - // the response headers `x-amz-request-id` or `x-amz-requestid`. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '79b9da39-b7ae-508a-a6bc-864b2829c622', 'C9ER4AJX75574TDJ' - AWSRequestIDKey = attribute.Key("aws.request_id") -) - -// AWSRequestID returns an attribute KeyValue conforming to the -// "aws.request_id" semantic conventions. It represents the AWS request ID as -// returned in the response headers `x-amz-request-id` or `x-amz-requestid`. -func AWSRequestID(val string) attribute.KeyValue { - return AWSRequestIDKey.String(val) -} - -// Attributes for AWS DynamoDB. -const ( - // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to - // the "aws.dynamodb.attribute_definitions" semantic conventions. It - // represents the JSON-serialized value of each item in the - // `AttributeDefinitions` request field. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: '{ "AttributeName": "string", "AttributeType": "string" }' - AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions") - - // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the - // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the - // value of the `AttributesToGet` request parameter. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'lives', 'id' - AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get") - - // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the - // "aws.dynamodb.consistent_read" semantic conventions. It represents the - // value of the `ConsistentRead` request parameter. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read") - - // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the - // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the - // JSON-serialized value of each item in the `ConsumedCapacity` response - // field. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { - // "string" : { "CapacityUnits": number, "ReadCapacityUnits": number, - // "WriteCapacityUnits": number } }, "LocalSecondaryIndexes": { "string" : - // { "CapacityUnits": number, "ReadCapacityUnits": number, - // "WriteCapacityUnits": number } }, "ReadCapacityUnits": number, "Table": - // { "CapacityUnits": number, "ReadCapacityUnits": number, - // "WriteCapacityUnits": number }, "TableName": "string", - // "WriteCapacityUnits": number }' - AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity") - - // AWSDynamoDBCountKey is the attribute Key conforming to the - // "aws.dynamodb.count" semantic conventions. It represents the value of - // the `Count` response parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 10 - AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count") - - // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the - // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents - // the value of the `ExclusiveStartTableName` request parameter. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Users', 'CatsTable' - AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table") - - // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key - // conforming to the "aws.dynamodb.global_secondary_index_updates" semantic - // conventions. It represents the JSON-serialized value of each item in the - // `GlobalSecondaryIndexUpdates` request field. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ { - // "AttributeName": "string", "KeyType": "string" } ], "Projection": { - // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, - // "ProvisionedThroughput": { "ReadCapacityUnits": number, - // "WriteCapacityUnits": number } }' - AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates") - - // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to - // the "aws.dynamodb.global_secondary_indexes" semantic conventions. It - // represents the JSON-serialized value of each item of the - // `GlobalSecondaryIndexes` request field - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": - // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ - // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": { - // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }' - AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes") - - // AWSDynamoDBIndexNameKey is the attribute Key conforming to the - // "aws.dynamodb.index_name" semantic conventions. It represents the value - // of the `IndexName` request parameter. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'name_to_group' - AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name") - - // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to - // the "aws.dynamodb.item_collection_metrics" semantic conventions. It - // represents the JSON-serialized value of the `ItemCollectionMetrics` - // response field. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": - // blob, "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { - // "string" : "AttributeValue" }, "N": "string", "NS": [ "string" ], - // "NULL": boolean, "S": "string", "SS": [ "string" ] } }, - // "SizeEstimateRangeGB": [ number ] } ] }' - AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics") - - // AWSDynamoDBLimitKey is the attribute Key conforming to the - // "aws.dynamodb.limit" semantic conventions. It represents the value of - // the `Limit` request parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 10 - AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit") - - // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to - // the "aws.dynamodb.local_secondary_indexes" semantic conventions. It - // represents the JSON-serialized value of each item of the - // `LocalSecondaryIndexes` request field. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: '{ "IndexARN": "string", "IndexName": "string", - // "IndexSizeBytes": number, "ItemCount": number, "KeySchema": [ { - // "AttributeName": "string", "KeyType": "string" } ], "Projection": { - // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" } }' - AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes") - - // AWSDynamoDBProjectionKey is the attribute Key conforming to the - // "aws.dynamodb.projection" semantic conventions. It represents the value - // of the `ProjectionExpression` request parameter. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Title', 'Title, Price, Color', 'Title, Description, - // RelatedItems, ProductReviews' - AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection") - - // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to - // the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It - // represents the value of the `ProvisionedThroughput.ReadCapacityUnits` - // request parameter. - // - // Type: double - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1.0, 2.0 - AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity") - - // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming - // to the "aws.dynamodb.provisioned_write_capacity" semantic conventions. - // It represents the value of the - // `ProvisionedThroughput.WriteCapacityUnits` request parameter. - // - // Type: double - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1.0, 2.0 - AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity") - - // AWSDynamoDBScanForwardKey is the attribute Key conforming to the - // "aws.dynamodb.scan_forward" semantic conventions. It represents the - // value of the `ScanIndexForward` request parameter. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward") - - // AWSDynamoDBScannedCountKey is the attribute Key conforming to the - // "aws.dynamodb.scanned_count" semantic conventions. It represents the - // value of the `ScannedCount` response parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 50 - AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count") - - // AWSDynamoDBSegmentKey is the attribute Key conforming to the - // "aws.dynamodb.segment" semantic conventions. It represents the value of - // the `Segment` request parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 10 - AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment") - - // AWSDynamoDBSelectKey is the attribute Key conforming to the - // "aws.dynamodb.select" semantic conventions. It represents the value of - // the `Select` request parameter. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'ALL_ATTRIBUTES', 'COUNT' - AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select") - - // AWSDynamoDBTableCountKey is the attribute Key conforming to the - // "aws.dynamodb.table_count" semantic conventions. It represents the - // number of items in the `TableNames` response parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 20 - AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count") - - // AWSDynamoDBTableNamesKey is the attribute Key conforming to the - // "aws.dynamodb.table_names" semantic conventions. It represents the keys - // in the `RequestItems` object field. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Users', 'Cats' - AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names") - - // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the - // "aws.dynamodb.total_segments" semantic conventions. It represents the - // value of the `TotalSegments` request parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 100 - AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments") -) - -// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming -// to the "aws.dynamodb.attribute_definitions" semantic conventions. It -// represents the JSON-serialized value of each item in the -// `AttributeDefinitions` request field. -func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue { - return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val) -} - -// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to -// the "aws.dynamodb.attributes_to_get" semantic conventions. It represents the -// value of the `AttributesToGet` request parameter. -func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue { - return AWSDynamoDBAttributesToGetKey.StringSlice(val) -} - -// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the -// "aws.dynamodb.consistent_read" semantic conventions. It represents the value -// of the `ConsistentRead` request parameter. -func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue { - return AWSDynamoDBConsistentReadKey.Bool(val) -} - -// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to -// the "aws.dynamodb.consumed_capacity" semantic conventions. It represents the -// JSON-serialized value of each item in the `ConsumedCapacity` response field. -func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue { - return AWSDynamoDBConsumedCapacityKey.StringSlice(val) -} - -// AWSDynamoDBCount returns an attribute KeyValue conforming to the -// "aws.dynamodb.count" semantic conventions. It represents the value of the -// `Count` response parameter. -func AWSDynamoDBCount(val int) attribute.KeyValue { - return AWSDynamoDBCountKey.Int(val) -} - -// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming -// to the "aws.dynamodb.exclusive_start_table" semantic conventions. It -// represents the value of the `ExclusiveStartTableName` request parameter. -func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue { - return AWSDynamoDBExclusiveStartTableKey.String(val) -} - -// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue -// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic -// conventions. It represents the JSON-serialized value of each item in the -// `GlobalSecondaryIndexUpdates` request field. -func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue { - return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val) -} - -// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue -// conforming to the "aws.dynamodb.global_secondary_indexes" semantic -// conventions. It represents the JSON-serialized value of each item of the -// `GlobalSecondaryIndexes` request field -func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue { - return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val) -} - -// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the -// "aws.dynamodb.index_name" semantic conventions. It represents the value of -// the `IndexName` request parameter. -func AWSDynamoDBIndexName(val string) attribute.KeyValue { - return AWSDynamoDBIndexNameKey.String(val) -} - -// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming -// to the "aws.dynamodb.item_collection_metrics" semantic conventions. It -// represents the JSON-serialized value of the `ItemCollectionMetrics` response -// field. -func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue { - return AWSDynamoDBItemCollectionMetricsKey.String(val) -} - -// AWSDynamoDBLimit returns an attribute KeyValue conforming to the -// "aws.dynamodb.limit" semantic conventions. It represents the value of the -// `Limit` request parameter. -func AWSDynamoDBLimit(val int) attribute.KeyValue { - return AWSDynamoDBLimitKey.Int(val) -} - -// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming -// to the "aws.dynamodb.local_secondary_indexes" semantic conventions. It -// represents the JSON-serialized value of each item of the -// `LocalSecondaryIndexes` request field. -func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue { - return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val) -} - -// AWSDynamoDBProjection returns an attribute KeyValue conforming to the -// "aws.dynamodb.projection" semantic conventions. It represents the value of -// the `ProjectionExpression` request parameter. -func AWSDynamoDBProjection(val string) attribute.KeyValue { - return AWSDynamoDBProjectionKey.String(val) -} - -// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue -// conforming to the "aws.dynamodb.provisioned_read_capacity" semantic -// conventions. It represents the value of the -// `ProvisionedThroughput.ReadCapacityUnits` request parameter. -func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue { - return AWSDynamoDBProvisionedReadCapacityKey.Float64(val) -} - -// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue -// conforming to the "aws.dynamodb.provisioned_write_capacity" semantic -// conventions. It represents the value of the -// `ProvisionedThroughput.WriteCapacityUnits` request parameter. -func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue { - return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val) -} - -// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the -// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of -// the `ScanIndexForward` request parameter. -func AWSDynamoDBScanForward(val bool) attribute.KeyValue { - return AWSDynamoDBScanForwardKey.Bool(val) -} - -// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the -// "aws.dynamodb.scanned_count" semantic conventions. It represents the value -// of the `ScannedCount` response parameter. -func AWSDynamoDBScannedCount(val int) attribute.KeyValue { - return AWSDynamoDBScannedCountKey.Int(val) -} - -// AWSDynamoDBSegment returns an attribute KeyValue conforming to the -// "aws.dynamodb.segment" semantic conventions. It represents the value of the -// `Segment` request parameter. -func AWSDynamoDBSegment(val int) attribute.KeyValue { - return AWSDynamoDBSegmentKey.Int(val) -} - -// AWSDynamoDBSelect returns an attribute KeyValue conforming to the -// "aws.dynamodb.select" semantic conventions. It represents the value of the -// `Select` request parameter. -func AWSDynamoDBSelect(val string) attribute.KeyValue { - return AWSDynamoDBSelectKey.String(val) -} - -// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the -// "aws.dynamodb.table_count" semantic conventions. It represents the number of -// items in the `TableNames` response parameter. -func AWSDynamoDBTableCount(val int) attribute.KeyValue { - return AWSDynamoDBTableCountKey.Int(val) -} - -// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the -// "aws.dynamodb.table_names" semantic conventions. It represents the keys in -// the `RequestItems` object field. -func AWSDynamoDBTableNames(val ...string) attribute.KeyValue { - return AWSDynamoDBTableNamesKey.StringSlice(val) -} - -// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the -// "aws.dynamodb.total_segments" semantic conventions. It represents the value -// of the `TotalSegments` request parameter. -func AWSDynamoDBTotalSegments(val int) attribute.KeyValue { - return AWSDynamoDBTotalSegmentsKey.Int(val) -} - -// Attributes for AWS Elastic Container Service (ECS). -const ( - // AWSECSTaskIDKey is the attribute Key conforming to the "aws.ecs.task.id" - // semantic conventions. It represents the ID of a running ECS task. The ID - // MUST be extracted from `task.arn`. - // - // Type: string - // RequirementLevel: ConditionallyRequired (If and only if `task.arn` is - // populated.) - // Stability: experimental - // Examples: '10838bed-421f-43ef-870a-f43feacbbb5b', - // '23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd' - AWSECSTaskIDKey = attribute.Key("aws.ecs.task.id") - - // AWSECSClusterARNKey is the attribute Key conforming to the - // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an - // [ECS - // cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' - AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn") - - // AWSECSContainerARNKey is the attribute Key conforming to the - // "aws.ecs.container.arn" semantic conventions. It represents the Amazon - // Resource Name (ARN) of an [ECS container - // instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9' - AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn") - - // AWSECSLaunchtypeKey is the attribute Key conforming to the - // "aws.ecs.launchtype" semantic conventions. It represents the [launch - // type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html) - // for an ECS task. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype") - - // AWSECSTaskARNKey is the attribute Key conforming to the - // "aws.ecs.task.arn" semantic conventions. It represents the ARN of a - // running [ECS - // task](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#ecs-resource-ids). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b', - // 'arn:aws:ecs:us-west-1:123456789123:task/my-cluster/task-id/23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd' - AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn") - - // AWSECSTaskFamilyKey is the attribute Key conforming to the - // "aws.ecs.task.family" semantic conventions. It represents the family - // name of the [ECS task - // definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html) - // used to create the ECS task. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry-family' - AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family") - - // AWSECSTaskRevisionKey is the attribute Key conforming to the - // "aws.ecs.task.revision" semantic conventions. It represents the revision - // for the task definition used to create the ECS task. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '8', '26' - AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision") -) - -var ( - // ec2 - AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2") - // fargate - AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") -) - -// AWSECSTaskID returns an attribute KeyValue conforming to the -// "aws.ecs.task.id" semantic conventions. It represents the ID of a running -// ECS task. The ID MUST be extracted from `task.arn`. -func AWSECSTaskID(val string) attribute.KeyValue { - return AWSECSTaskIDKey.String(val) -} - -// AWSECSClusterARN returns an attribute KeyValue conforming to the -// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an [ECS -// cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). -func AWSECSClusterARN(val string) attribute.KeyValue { - return AWSECSClusterARNKey.String(val) -} - -// AWSECSContainerARN returns an attribute KeyValue conforming to the -// "aws.ecs.container.arn" semantic conventions. It represents the Amazon -// Resource Name (ARN) of an [ECS container -// instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). -func AWSECSContainerARN(val string) attribute.KeyValue { - return AWSECSContainerARNKey.String(val) -} - -// AWSECSTaskARN returns an attribute KeyValue conforming to the -// "aws.ecs.task.arn" semantic conventions. It represents the ARN of a running -// [ECS -// task](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#ecs-resource-ids). -func AWSECSTaskARN(val string) attribute.KeyValue { - return AWSECSTaskARNKey.String(val) -} - -// AWSECSTaskFamily returns an attribute KeyValue conforming to the -// "aws.ecs.task.family" semantic conventions. It represents the family name of -// the [ECS task -// definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html) -// used to create the ECS task. -func AWSECSTaskFamily(val string) attribute.KeyValue { - return AWSECSTaskFamilyKey.String(val) -} - -// AWSECSTaskRevision returns an attribute KeyValue conforming to the -// "aws.ecs.task.revision" semantic conventions. It represents the revision for -// the task definition used to create the ECS task. -func AWSECSTaskRevision(val string) attribute.KeyValue { - return AWSECSTaskRevisionKey.String(val) -} - -// Attributes for AWS Elastic Kubernetes Service (EKS). -const ( - // AWSEKSClusterARNKey is the attribute Key conforming to the - // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an - // EKS cluster. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' - AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn") -) - -// AWSEKSClusterARN returns an attribute KeyValue conforming to the -// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS -// cluster. -func AWSEKSClusterARN(val string) attribute.KeyValue { - return AWSEKSClusterARNKey.String(val) -} - -// Attributes for AWS Logs. -const ( - // AWSLogGroupARNsKey is the attribute Key conforming to the - // "aws.log.group.arns" semantic conventions. It represents the Amazon - // Resource Name(s) (ARN) of the AWS log group(s). - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*' - // Note: See the [log group ARN format - // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). - AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns") - - // AWSLogGroupNamesKey is the attribute Key conforming to the - // "aws.log.group.names" semantic conventions. It represents the name(s) of - // the AWS log group(s) an application is writing to. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/aws/lambda/my-function', 'opentelemetry-service' - // Note: Multiple log groups must be supported for cases like - // multi-container applications, where a single application has sidecar - // containers, and each write to their own log group. - AWSLogGroupNamesKey = attribute.Key("aws.log.group.names") - - // AWSLogStreamARNsKey is the attribute Key conforming to the - // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of - // the AWS log stream(s). - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' - // Note: See the [log stream ARN format - // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). - // One log group can contain several log streams, so these ARNs necessarily - // identify both a log group and a log stream. - AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns") - - // AWSLogStreamNamesKey is the attribute Key conforming to the - // "aws.log.stream.names" semantic conventions. It represents the name(s) - // of the AWS log stream(s) an application is writing to. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' - AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names") -) - -// AWSLogGroupARNs returns an attribute KeyValue conforming to the -// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource -// Name(s) (ARN) of the AWS log group(s). -func AWSLogGroupARNs(val ...string) attribute.KeyValue { - return AWSLogGroupARNsKey.StringSlice(val) -} - -// AWSLogGroupNames returns an attribute KeyValue conforming to the -// "aws.log.group.names" semantic conventions. It represents the name(s) of the -// AWS log group(s) an application is writing to. -func AWSLogGroupNames(val ...string) attribute.KeyValue { - return AWSLogGroupNamesKey.StringSlice(val) -} - -// AWSLogStreamARNs returns an attribute KeyValue conforming to the -// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the -// AWS log stream(s). -func AWSLogStreamARNs(val ...string) attribute.KeyValue { - return AWSLogStreamARNsKey.StringSlice(val) -} - -// AWSLogStreamNames returns an attribute KeyValue conforming to the -// "aws.log.stream.names" semantic conventions. It represents the name(s) of -// the AWS log stream(s) an application is writing to. -func AWSLogStreamNames(val ...string) attribute.KeyValue { - return AWSLogStreamNamesKey.StringSlice(val) -} - -// Attributes for AWS Lambda. -const ( - // AWSLambdaInvokedARNKey is the attribute Key conforming to the - // "aws.lambda.invoked_arn" semantic conventions. It represents the full - // invoked ARN as provided on the `Context` passed to the function - // (`Lambda-Runtime-Invoked-Function-ARN` header on the - // `/runtime/invocation/next` applicable). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias' - // Note: This may be different from `cloud.resource_id` if an alias is - // involved. - AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn") -) - -// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the -// "aws.lambda.invoked_arn" semantic conventions. It represents the full -// invoked ARN as provided on the `Context` passed to the function -// (`Lambda-Runtime-Invoked-Function-ARN` header on the -// `/runtime/invocation/next` applicable). -func AWSLambdaInvokedARN(val string) attribute.KeyValue { - return AWSLambdaInvokedARNKey.String(val) -} - -// Attributes for AWS S3. -const ( - // AWSS3BucketKey is the attribute Key conforming to the "aws.s3.bucket" - // semantic conventions. It represents the S3 bucket name the request - // refers to. Corresponds to the `--bucket` parameter of the [S3 - // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) - // operations. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'some-bucket-name' - // Note: The `bucket` attribute is applicable to all S3 operations that - // reference a bucket, i.e. that require the bucket name as a mandatory - // parameter. - // This applies to almost all S3 operations except `list-buckets`. - AWSS3BucketKey = attribute.Key("aws.s3.bucket") - - // AWSS3CopySourceKey is the attribute Key conforming to the - // "aws.s3.copy_source" semantic conventions. It represents the source - // object (in the form `bucket`/`key`) for the copy operation. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'someFile.yml' - // Note: The `copy_source` attribute applies to S3 copy operations and - // corresponds to the `--copy-source` parameter - // of the [copy-object operation within the S3 - // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html). - // This applies in particular to the following operations: - // - // - - // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) - // - - // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) - AWSS3CopySourceKey = attribute.Key("aws.s3.copy_source") - - // AWSS3DeleteKey is the attribute Key conforming to the "aws.s3.delete" - // semantic conventions. It represents the delete request container that - // specifies the objects to be deleted. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'Objects=[{Key=string,VersionID=string},{Key=string,VersionID=string}],Quiet=boolean' - // Note: The `delete` attribute is only applicable to the - // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) - // operation. - // The `delete` attribute corresponds to the `--delete` parameter of the - // [delete-objects operation within the S3 - // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-objects.html). - AWSS3DeleteKey = attribute.Key("aws.s3.delete") - - // AWSS3KeyKey is the attribute Key conforming to the "aws.s3.key" semantic - // conventions. It represents the S3 object key the request refers to. - // Corresponds to the `--key` parameter of the [S3 - // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) - // operations. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'someFile.yml' - // Note: The `key` attribute is applicable to all object-related S3 - // operations, i.e. that require the object key as a mandatory parameter. - // This applies in particular to the following operations: - // - // - - // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) - // - - // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) - // - - // [get-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/get-object.html) - // - - // [head-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/head-object.html) - // - - // [put-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/put-object.html) - // - - // [restore-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/restore-object.html) - // - - // [select-object-content](https://docs.aws.amazon.com/cli/latest/reference/s3api/select-object-content.html) - // - - // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) - // - - // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) - // - - // [create-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/create-multipart-upload.html) - // - - // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) - // - - // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) - // - - // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) - AWSS3KeyKey = attribute.Key("aws.s3.key") - - // AWSS3PartNumberKey is the attribute Key conforming to the - // "aws.s3.part_number" semantic conventions. It represents the part number - // of the part being uploaded in a multipart-upload operation. This is a - // positive integer between 1 and 10,000. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 3456 - // Note: The `part_number` attribute is only applicable to the - // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) - // and - // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) - // operations. - // The `part_number` attribute corresponds to the `--part-number` parameter - // of the - // [upload-part operation within the S3 - // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html). - AWSS3PartNumberKey = attribute.Key("aws.s3.part_number") - - // AWSS3UploadIDKey is the attribute Key conforming to the - // "aws.s3.upload_id" semantic conventions. It represents the upload ID - // that identifies the multipart upload. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ' - // Note: The `upload_id` attribute applies to S3 multipart-upload - // operations and corresponds to the `--upload-id` parameter - // of the [S3 - // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) - // multipart operations. - // This applies in particular to the following operations: - // - // - - // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) - // - - // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) - // - - // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) - // - - // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) - // - - // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) - AWSS3UploadIDKey = attribute.Key("aws.s3.upload_id") -) - -// AWSS3Bucket returns an attribute KeyValue conforming to the -// "aws.s3.bucket" semantic conventions. It represents the S3 bucket name the -// request refers to. Corresponds to the `--bucket` parameter of the [S3 -// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) -// operations. -func AWSS3Bucket(val string) attribute.KeyValue { - return AWSS3BucketKey.String(val) -} - -// AWSS3CopySource returns an attribute KeyValue conforming to the -// "aws.s3.copy_source" semantic conventions. It represents the source object -// (in the form `bucket`/`key`) for the copy operation. -func AWSS3CopySource(val string) attribute.KeyValue { - return AWSS3CopySourceKey.String(val) -} - -// AWSS3Delete returns an attribute KeyValue conforming to the -// "aws.s3.delete" semantic conventions. It represents the delete request -// container that specifies the objects to be deleted. -func AWSS3Delete(val string) attribute.KeyValue { - return AWSS3DeleteKey.String(val) -} - -// AWSS3Key returns an attribute KeyValue conforming to the "aws.s3.key" -// semantic conventions. It represents the S3 object key the request refers to. -// Corresponds to the `--key` parameter of the [S3 -// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) -// operations. -func AWSS3Key(val string) attribute.KeyValue { - return AWSS3KeyKey.String(val) -} - -// AWSS3PartNumber returns an attribute KeyValue conforming to the -// "aws.s3.part_number" semantic conventions. It represents the part number of -// the part being uploaded in a multipart-upload operation. This is a positive -// integer between 1 and 10,000. -func AWSS3PartNumber(val int) attribute.KeyValue { - return AWSS3PartNumberKey.Int(val) -} - -// AWSS3UploadID returns an attribute KeyValue conforming to the -// "aws.s3.upload_id" semantic conventions. It represents the upload ID that -// identifies the multipart upload. -func AWSS3UploadID(val string) attribute.KeyValue { - return AWSS3UploadIDKey.String(val) -} - -// The web browser attributes -const ( - // BrowserBrandsKey is the attribute Key conforming to the "browser.brands" - // semantic conventions. It represents the array of brand name and version - // separated by a space - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99' - // Note: This value is intended to be taken from the [UA client hints - // API](https://wicg.github.io/ua-client-hints/#interface) - // (`navigator.userAgentData.brands`). - BrowserBrandsKey = attribute.Key("browser.brands") - - // BrowserLanguageKey is the attribute Key conforming to the - // "browser.language" semantic conventions. It represents the preferred - // language of the user using the browser - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'en', 'en-US', 'fr', 'fr-FR' - // Note: This value is intended to be taken from the Navigator API - // `navigator.language`. - BrowserLanguageKey = attribute.Key("browser.language") - - // BrowserMobileKey is the attribute Key conforming to the "browser.mobile" - // semantic conventions. It represents a boolean that is true if the - // browser is running on a mobile device - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - // Note: This value is intended to be taken from the [UA client hints - // API](https://wicg.github.io/ua-client-hints/#interface) - // (`navigator.userAgentData.mobile`). If unavailable, this attribute - // SHOULD be left unset. - BrowserMobileKey = attribute.Key("browser.mobile") - - // BrowserPlatformKey is the attribute Key conforming to the - // "browser.platform" semantic conventions. It represents the platform on - // which the browser is running - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Windows', 'macOS', 'Android' - // Note: This value is intended to be taken from the [UA client hints - // API](https://wicg.github.io/ua-client-hints/#interface) - // (`navigator.userAgentData.platform`). If unavailable, the legacy - // `navigator.platform` API SHOULD NOT be used instead and this attribute - // SHOULD be left unset in order for the values to be consistent. - // The list of possible values is defined in the [W3C User-Agent Client - // Hints - // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform). - // Note that some (but not all) of these values can overlap with values in - // the [`os.type` and `os.name` attributes](./os.md). However, for - // consistency, the values in the `browser.platform` attribute should - // capture the exact value that the user agent provides. - BrowserPlatformKey = attribute.Key("browser.platform") -) - -// BrowserBrands returns an attribute KeyValue conforming to the -// "browser.brands" semantic conventions. It represents the array of brand name -// and version separated by a space -func BrowserBrands(val ...string) attribute.KeyValue { - return BrowserBrandsKey.StringSlice(val) -} - -// BrowserLanguage returns an attribute KeyValue conforming to the -// "browser.language" semantic conventions. It represents the preferred -// language of the user using the browser -func BrowserLanguage(val string) attribute.KeyValue { - return BrowserLanguageKey.String(val) -} - -// BrowserMobile returns an attribute KeyValue conforming to the -// "browser.mobile" semantic conventions. It represents a boolean that is true -// if the browser is running on a mobile device -func BrowserMobile(val bool) attribute.KeyValue { - return BrowserMobileKey.Bool(val) -} - -// BrowserPlatform returns an attribute KeyValue conforming to the -// "browser.platform" semantic conventions. It represents the platform on which -// the browser is running -func BrowserPlatform(val string) attribute.KeyValue { - return BrowserPlatformKey.String(val) -} - -// These attributes may be used to describe the client in a connection-based -// network interaction where there is one side that initiates the connection -// (the client is the side that initiates the connection). This covers all TCP -// network interactions since TCP is connection-based and one side initiates -// the connection (an exception is made for peer-to-peer communication over TCP -// where the "user-facing" surface of the protocol / API doesn't expose a clear -// notion of client and server). This also covers UDP network interactions -// where one side initiates the interaction, e.g. QUIC (HTTP/3) and DNS. -const ( - // ClientAddressKey is the attribute Key conforming to the "client.address" - // semantic conventions. It represents the client address - domain name if - // available without reverse DNS lookup; otherwise, IP address or Unix - // domain socket name. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'client.example.com', '10.1.2.80', '/tmp/my.sock' - // Note: When observed from the server side, and when communicating through - // an intermediary, `client.address` SHOULD represent the client address - // behind any intermediaries, for example proxies, if it's available. - ClientAddressKey = attribute.Key("client.address") - - // ClientPortKey is the attribute Key conforming to the "client.port" - // semantic conventions. It represents the client port number. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 65123 - // Note: When observed from the server side, and when communicating through - // an intermediary, `client.port` SHOULD represent the client port behind - // any intermediaries, for example proxies, if it's available. - ClientPortKey = attribute.Key("client.port") -) - -// ClientAddress returns an attribute KeyValue conforming to the -// "client.address" semantic conventions. It represents the client address - -// domain name if available without reverse DNS lookup; otherwise, IP address -// or Unix domain socket name. -func ClientAddress(val string) attribute.KeyValue { - return ClientAddressKey.String(val) -} - -// ClientPort returns an attribute KeyValue conforming to the "client.port" -// semantic conventions. It represents the client port number. -func ClientPort(val int) attribute.KeyValue { - return ClientPortKey.Int(val) -} - -// A cloud environment (e.g. GCP, Azure, AWS). -const ( - // CloudAccountIDKey is the attribute Key conforming to the - // "cloud.account.id" semantic conventions. It represents the cloud account - // ID the resource is assigned to. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '111111111111', 'opentelemetry' - CloudAccountIDKey = attribute.Key("cloud.account.id") - - // CloudAvailabilityZoneKey is the attribute Key conforming to the - // "cloud.availability_zone" semantic conventions. It represents the cloud - // regions often have multiple, isolated locations known as zones to - // increase availability. Availability zone represents the zone where the - // resource is running. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'us-east-1c' - // Note: Availability zones are called "zones" on Alibaba Cloud and Google - // Cloud. - CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone") - - // CloudPlatformKey is the attribute Key conforming to the "cloud.platform" - // semantic conventions. It represents the cloud platform in use. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Note: The prefix of the service SHOULD match the one specified in - // `cloud.provider`. - CloudPlatformKey = attribute.Key("cloud.platform") - - // CloudProviderKey is the attribute Key conforming to the "cloud.provider" - // semantic conventions. It represents the name of the cloud provider. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - CloudProviderKey = attribute.Key("cloud.provider") - - // CloudRegionKey is the attribute Key conforming to the "cloud.region" - // semantic conventions. It represents the geographical region the resource - // is running. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'us-central1', 'us-east-1' - // Note: Refer to your provider's docs to see the available regions, for - // example [Alibaba Cloud - // regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS - // regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/), - // [Azure - // regions](https://azure.microsoft.com/global-infrastructure/geographies/), - // [Google Cloud regions](https://cloud.google.com/about/locations), or - // [Tencent Cloud - // regions](https://www.tencentcloud.com/document/product/213/6091). - CloudRegionKey = attribute.Key("cloud.region") - - // CloudResourceIDKey is the attribute Key conforming to the - // "cloud.resource_id" semantic conventions. It represents the cloud - // provider-specific native identifier of the monitored cloud resource - // (e.g. an - // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) - // on AWS, a [fully qualified resource - // ID](https://learn.microsoft.com/rest/api/resources/resources/get-by-id) - // on Azure, a [full resource - // name](https://cloud.google.com/apis/design/resource_names#full_resource_name) - // on GCP) - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'arn:aws:lambda:REGION:ACCOUNT_ID:function:my-function', - // '//run.googleapis.com/projects/PROJECT_ID/locations/LOCATION_ID/services/SERVICE_ID', - // '/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/' - // Note: On some cloud providers, it may not be possible to determine the - // full ID at startup, - // so it may be necessary to set `cloud.resource_id` as a span attribute - // instead. - // - // The exact value to use for `cloud.resource_id` depends on the cloud - // provider. - // The following well-known definitions MUST be used if you set this - // attribute and they apply: - // - // * **AWS Lambda:** The function - // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). - // Take care not to use the "invoked ARN" directly but replace any - // [alias - // suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html) - // with the resolved function version, as the same runtime instance may - // be invokable with - // multiple different aliases. - // * **GCP:** The [URI of the - // resource](https://cloud.google.com/iam/docs/full-resource-names) - // * **Azure:** The [Fully Qualified Resource - // ID](https://docs.microsoft.com/rest/api/resources/resources/get-by-id) - // of the invoked function, - // *not* the function app, having the form - // `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/`. - // This means that a span attribute MUST be used, as an Azure function - // app can host multiple functions that would usually share - // a TracerProvider. - CloudResourceIDKey = attribute.Key("cloud.resource_id") -) - -var ( - // Alibaba Cloud Elastic Compute Service - CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs") - // Alibaba Cloud Function Compute - CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc") - // Red Hat OpenShift on Alibaba Cloud - CloudPlatformAlibabaCloudOpenshift = CloudPlatformKey.String("alibaba_cloud_openshift") - // AWS Elastic Compute Cloud - CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2") - // AWS Elastic Container Service - CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs") - // AWS Elastic Kubernetes Service - CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks") - // AWS Lambda - CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda") - // AWS Elastic Beanstalk - CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk") - // AWS App Runner - CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner") - // Red Hat OpenShift on AWS (ROSA) - CloudPlatformAWSOpenshift = CloudPlatformKey.String("aws_openshift") - // Azure Virtual Machines - CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm") - // Azure Container Apps - CloudPlatformAzureContainerApps = CloudPlatformKey.String("azure_container_apps") - // Azure Container Instances - CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances") - // Azure Kubernetes Service - CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks") - // Azure Functions - CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions") - // Azure App Service - CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service") - // Azure Red Hat OpenShift - CloudPlatformAzureOpenshift = CloudPlatformKey.String("azure_openshift") - // Google Bare Metal Solution (BMS) - CloudPlatformGCPBareMetalSolution = CloudPlatformKey.String("gcp_bare_metal_solution") - // Google Cloud Compute Engine (GCE) - CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine") - // Google Cloud Run - CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run") - // Google Cloud Kubernetes Engine (GKE) - CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine") - // Google Cloud Functions (GCF) - CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions") - // Google Cloud App Engine (GAE) - CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine") - // Red Hat OpenShift on Google Cloud - CloudPlatformGCPOpenshift = CloudPlatformKey.String("gcp_openshift") - // Red Hat OpenShift on IBM Cloud - CloudPlatformIbmCloudOpenshift = CloudPlatformKey.String("ibm_cloud_openshift") - // Tencent Cloud Cloud Virtual Machine (CVM) - CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm") - // Tencent Cloud Elastic Kubernetes Service (EKS) - CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks") - // Tencent Cloud Serverless Cloud Function (SCF) - CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf") -) - -var ( - // Alibaba Cloud - CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud") - // Amazon Web Services - CloudProviderAWS = CloudProviderKey.String("aws") - // Microsoft Azure - CloudProviderAzure = CloudProviderKey.String("azure") - // Google Cloud Platform - CloudProviderGCP = CloudProviderKey.String("gcp") - // Heroku Platform as a Service - CloudProviderHeroku = CloudProviderKey.String("heroku") - // IBM Cloud - CloudProviderIbmCloud = CloudProviderKey.String("ibm_cloud") - // Tencent Cloud - CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud") -) - -// CloudAccountID returns an attribute KeyValue conforming to the -// "cloud.account.id" semantic conventions. It represents the cloud account ID -// the resource is assigned to. -func CloudAccountID(val string) attribute.KeyValue { - return CloudAccountIDKey.String(val) -} - -// CloudAvailabilityZone returns an attribute KeyValue conforming to the -// "cloud.availability_zone" semantic conventions. It represents the cloud -// regions often have multiple, isolated locations known as zones to increase -// availability. Availability zone represents the zone where the resource is -// running. -func CloudAvailabilityZone(val string) attribute.KeyValue { - return CloudAvailabilityZoneKey.String(val) -} - -// CloudRegion returns an attribute KeyValue conforming to the -// "cloud.region" semantic conventions. It represents the geographical region -// the resource is running. -func CloudRegion(val string) attribute.KeyValue { - return CloudRegionKey.String(val) -} - -// CloudResourceID returns an attribute KeyValue conforming to the -// "cloud.resource_id" semantic conventions. It represents the cloud -// provider-specific native identifier of the monitored cloud resource (e.g. an -// [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) -// on AWS, a [fully qualified resource -// ID](https://learn.microsoft.com/rest/api/resources/resources/get-by-id) on -// Azure, a [full resource -// name](https://cloud.google.com/apis/design/resource_names#full_resource_name) -// on GCP) -func CloudResourceID(val string) attribute.KeyValue { - return CloudResourceIDKey.String(val) -} - -// Attributes for CloudEvents. -const ( - // CloudeventsEventIDKey is the attribute Key conforming to the - // "cloudevents.event_id" semantic conventions. It represents the - // [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) - // uniquely identifies the event. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001' - CloudeventsEventIDKey = attribute.Key("cloudevents.event_id") - - // CloudeventsEventSourceKey is the attribute Key conforming to the - // "cloudevents.event_source" semantic conventions. It represents the - // [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) - // identifies the context in which an event happened. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'https://github.com/cloudevents', - // '/cloudevents/spec/pull/123', 'my-service' - CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source") - - // CloudeventsEventSpecVersionKey is the attribute Key conforming to the - // "cloudevents.event_spec_version" semantic conventions. It represents the - // [version of the CloudEvents - // specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) - // which the event uses. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '1.0' - CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version") - - // CloudeventsEventSubjectKey is the attribute Key conforming to the - // "cloudevents.event_subject" semantic conventions. It represents the - // [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) - // of the event in the context of the event producer (identified by - // source). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'mynewfile.jpg' - CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject") - - // CloudeventsEventTypeKey is the attribute Key conforming to the - // "cloudevents.event_type" semantic conventions. It represents the - // [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) - // contains a value describing the type of event related to the originating - // occurrence. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'com.github.pull_request.opened', - // 'com.example.object.deleted.v2' - CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type") -) - -// CloudeventsEventID returns an attribute KeyValue conforming to the -// "cloudevents.event_id" semantic conventions. It represents the -// [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) -// uniquely identifies the event. -func CloudeventsEventID(val string) attribute.KeyValue { - return CloudeventsEventIDKey.String(val) -} - -// CloudeventsEventSource returns an attribute KeyValue conforming to the -// "cloudevents.event_source" semantic conventions. It represents the -// [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) -// identifies the context in which an event happened. -func CloudeventsEventSource(val string) attribute.KeyValue { - return CloudeventsEventSourceKey.String(val) -} - -// CloudeventsEventSpecVersion returns an attribute KeyValue conforming to -// the "cloudevents.event_spec_version" semantic conventions. It represents the -// [version of the CloudEvents -// specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) -// which the event uses. -func CloudeventsEventSpecVersion(val string) attribute.KeyValue { - return CloudeventsEventSpecVersionKey.String(val) -} - -// CloudeventsEventSubject returns an attribute KeyValue conforming to the -// "cloudevents.event_subject" semantic conventions. It represents the -// [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) -// of the event in the context of the event producer (identified by source). -func CloudeventsEventSubject(val string) attribute.KeyValue { - return CloudeventsEventSubjectKey.String(val) -} - -// CloudeventsEventType returns an attribute KeyValue conforming to the -// "cloudevents.event_type" semantic conventions. It represents the -// [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) -// contains a value describing the type of event related to the originating -// occurrence. -func CloudeventsEventType(val string) attribute.KeyValue { - return CloudeventsEventTypeKey.String(val) -} - -// These attributes allow to report this unit of code and therefore to provide -// more context about the span. -const ( - // CodeColumnKey is the attribute Key conforming to the "code.column" - // semantic conventions. It represents the column number in `code.filepath` - // best representing the operation. It SHOULD point within the code unit - // named in `code.function`. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 16 - CodeColumnKey = attribute.Key("code.column") - - // CodeFilepathKey is the attribute Key conforming to the "code.filepath" - // semantic conventions. It represents the source code file name that - // identifies the code unit as uniquely as possible (preferably an absolute - // file path). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/usr/local/MyApplication/content_root/app/index.php' - CodeFilepathKey = attribute.Key("code.filepath") - - // CodeFunctionKey is the attribute Key conforming to the "code.function" - // semantic conventions. It represents the method or function name, or - // equivalent (usually rightmost part of the code unit's name). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'serveRequest' - CodeFunctionKey = attribute.Key("code.function") - - // CodeLineNumberKey is the attribute Key conforming to the "code.lineno" - // semantic conventions. It represents the line number in `code.filepath` - // best representing the operation. It SHOULD point within the code unit - // named in `code.function`. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 42 - CodeLineNumberKey = attribute.Key("code.lineno") - - // CodeNamespaceKey is the attribute Key conforming to the "code.namespace" - // semantic conventions. It represents the "namespace" within which - // `code.function` is defined. Usually the qualified class or module name, - // such that `code.namespace` + some separator + `code.function` form a - // unique identifier for the code unit. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'com.example.MyHTTPService' - CodeNamespaceKey = attribute.Key("code.namespace") - - // CodeStacktraceKey is the attribute Key conforming to the - // "code.stacktrace" semantic conventions. It represents a stacktrace as a - // string in the natural representation for the language runtime. The - // representation is to be determined and documented by each language SIG. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'at - // com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' - // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' - // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' - CodeStacktraceKey = attribute.Key("code.stacktrace") -) - -// CodeColumn returns an attribute KeyValue conforming to the "code.column" -// semantic conventions. It represents the column number in `code.filepath` -// best representing the operation. It SHOULD point within the code unit named -// in `code.function`. -func CodeColumn(val int) attribute.KeyValue { - return CodeColumnKey.Int(val) -} - -// CodeFilepath returns an attribute KeyValue conforming to the -// "code.filepath" semantic conventions. It represents the source code file -// name that identifies the code unit as uniquely as possible (preferably an -// absolute file path). -func CodeFilepath(val string) attribute.KeyValue { - return CodeFilepathKey.String(val) -} - -// CodeFunction returns an attribute KeyValue conforming to the -// "code.function" semantic conventions. It represents the method or function -// name, or equivalent (usually rightmost part of the code unit's name). -func CodeFunction(val string) attribute.KeyValue { - return CodeFunctionKey.String(val) -} - -// CodeLineNumber returns an attribute KeyValue conforming to the "code.lineno" -// semantic conventions. It represents the line number in `code.filepath` best -// representing the operation. It SHOULD point within the code unit named in -// `code.function`. -func CodeLineNumber(val int) attribute.KeyValue { - return CodeLineNumberKey.Int(val) -} - -// CodeNamespace returns an attribute KeyValue conforming to the -// "code.namespace" semantic conventions. It represents the "namespace" within -// which `code.function` is defined. Usually the qualified class or module -// name, such that `code.namespace` + some separator + `code.function` form a -// unique identifier for the code unit. -func CodeNamespace(val string) attribute.KeyValue { - return CodeNamespaceKey.String(val) -} - -// CodeStacktrace returns an attribute KeyValue conforming to the -// "code.stacktrace" semantic conventions. It represents a stacktrace as a -// string in the natural representation for the language runtime. The -// representation is to be determined and documented by each language SIG. -func CodeStacktrace(val string) attribute.KeyValue { - return CodeStacktraceKey.String(val) -} - -// A container instance. -const ( - // ContainerCommandKey is the attribute Key conforming to the - // "container.command" semantic conventions. It represents the command used - // to run the container (i.e. the command name). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'otelcontribcol' - // Note: If using embedded credentials or sensitive data, it is recommended - // to remove them to prevent potential leakage. - ContainerCommandKey = attribute.Key("container.command") - - // ContainerCommandArgsKey is the attribute Key conforming to the - // "container.command_args" semantic conventions. It represents the all the - // command arguments (including the command/executable itself) run by the - // container. [2] - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'otelcontribcol, --config, config.yaml' - ContainerCommandArgsKey = attribute.Key("container.command_args") - - // ContainerCommandLineKey is the attribute Key conforming to the - // "container.command_line" semantic conventions. It represents the full - // command run by the container as a single string representing the full - // command. [2] - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'otelcontribcol --config config.yaml' - ContainerCommandLineKey = attribute.Key("container.command_line") - - // ContainerCPUStateKey is the attribute Key conforming to the - // "container.cpu.state" semantic conventions. It represents the CPU state - // for this data point. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'user', 'kernel' - ContainerCPUStateKey = attribute.Key("container.cpu.state") - - // ContainerIDKey is the attribute Key conforming to the "container.id" - // semantic conventions. It represents the container ID. Usually a UUID, as - // for example used to [identify Docker - // containers](https://docs.docker.com/engine/reference/run/#container-identification). - // The UUID might be abbreviated. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'a3bf90e006b2' - ContainerIDKey = attribute.Key("container.id") - - // ContainerImageIDKey is the attribute Key conforming to the - // "container.image.id" semantic conventions. It represents the runtime - // specific image identifier. Usually a hash algorithm followed by a UUID. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'sha256:19c92d0a00d1b66d897bceaa7319bee0dd38a10a851c60bcec9474aa3f01e50f' - // Note: Docker defines a sha256 of the image id; `container.image.id` - // corresponds to the `Image` field from the Docker container inspect - // [API](https://docs.docker.com/engine/api/v1.43/#tag/Container/operation/ContainerInspect) - // endpoint. - // K8S defines a link to the container registry repository with digest - // `"imageID": "registry.azurecr.io - // /namespace/service/dockerfile@sha256:bdeabd40c3a8a492eaf9e8e44d0ebbb84bac7ee25ac0cf8a7159d25f62555625"`. - // The ID is assigned by the container runtime and can vary in different - // environments. Consider using `oci.manifest.digest` if it is important to - // identify the same image in different environments/runtimes. - ContainerImageIDKey = attribute.Key("container.image.id") - - // ContainerImageNameKey is the attribute Key conforming to the - // "container.image.name" semantic conventions. It represents the name of - // the image the container was built on. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'gcr.io/opentelemetry/operator' - ContainerImageNameKey = attribute.Key("container.image.name") - - // ContainerImageRepoDigestsKey is the attribute Key conforming to the - // "container.image.repo_digests" semantic conventions. It represents the - // repo digests of the container image as provided by the container - // runtime. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb', - // 'internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578' - // Note: - // [Docker](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect) - // and - // [CRI](https://github.com/kubernetes/cri-api/blob/c75ef5b473bbe2d0a4fc92f82235efd665ea8e9f/pkg/apis/runtime/v1/api.proto#L1237-L1238) - // report those under the `RepoDigests` field. - ContainerImageRepoDigestsKey = attribute.Key("container.image.repo_digests") - - // ContainerImageTagsKey is the attribute Key conforming to the - // "container.image.tags" semantic conventions. It represents the container - // image tags. An example can be found in [Docker Image - // Inspect](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect). - // Should be only the `` section of the full name for example from - // `registry.example.com/my-org/my-image:`. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'v1.27.1', '3.5.7-0' - ContainerImageTagsKey = attribute.Key("container.image.tags") - - // ContainerNameKey is the attribute Key conforming to the "container.name" - // semantic conventions. It represents the container name used by container - // runtime. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry-autoconf' - ContainerNameKey = attribute.Key("container.name") - - // ContainerRuntimeKey is the attribute Key conforming to the - // "container.runtime" semantic conventions. It represents the container - // runtime managing this container. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'docker', 'containerd', 'rkt' - ContainerRuntimeKey = attribute.Key("container.runtime") -) - -var ( - // When tasks of the cgroup are in user mode (Linux). When all container processes are in user mode (Windows) - ContainerCPUStateUser = ContainerCPUStateKey.String("user") - // When CPU is used by the system (host OS) - ContainerCPUStateSystem = ContainerCPUStateKey.String("system") - // When tasks of the cgroup are in kernel mode (Linux). When all container processes are in kernel mode (Windows) - ContainerCPUStateKernel = ContainerCPUStateKey.String("kernel") -) - -// ContainerCommand returns an attribute KeyValue conforming to the -// "container.command" semantic conventions. It represents the command used to -// run the container (i.e. the command name). -func ContainerCommand(val string) attribute.KeyValue { - return ContainerCommandKey.String(val) -} - -// ContainerCommandArgs returns an attribute KeyValue conforming to the -// "container.command_args" semantic conventions. It represents the all the -// command arguments (including the command/executable itself) run by the -// container. [2] -func ContainerCommandArgs(val ...string) attribute.KeyValue { - return ContainerCommandArgsKey.StringSlice(val) -} - -// ContainerCommandLine returns an attribute KeyValue conforming to the -// "container.command_line" semantic conventions. It represents the full -// command run by the container as a single string representing the full -// command. [2] -func ContainerCommandLine(val string) attribute.KeyValue { - return ContainerCommandLineKey.String(val) -} - -// ContainerID returns an attribute KeyValue conforming to the -// "container.id" semantic conventions. It represents the container ID. Usually -// a UUID, as for example used to [identify Docker -// containers](https://docs.docker.com/engine/reference/run/#container-identification). -// The UUID might be abbreviated. -func ContainerID(val string) attribute.KeyValue { - return ContainerIDKey.String(val) -} - -// ContainerImageID returns an attribute KeyValue conforming to the -// "container.image.id" semantic conventions. It represents the runtime -// specific image identifier. Usually a hash algorithm followed by a UUID. -func ContainerImageID(val string) attribute.KeyValue { - return ContainerImageIDKey.String(val) -} - -// ContainerImageName returns an attribute KeyValue conforming to the -// "container.image.name" semantic conventions. It represents the name of the -// image the container was built on. -func ContainerImageName(val string) attribute.KeyValue { - return ContainerImageNameKey.String(val) -} - -// ContainerImageRepoDigests returns an attribute KeyValue conforming to the -// "container.image.repo_digests" semantic conventions. It represents the repo -// digests of the container image as provided by the container runtime. -func ContainerImageRepoDigests(val ...string) attribute.KeyValue { - return ContainerImageRepoDigestsKey.StringSlice(val) -} - -// ContainerImageTags returns an attribute KeyValue conforming to the -// "container.image.tags" semantic conventions. It represents the container -// image tags. An example can be found in [Docker Image -// Inspect](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect). -// Should be only the `` section of the full name for example from -// `registry.example.com/my-org/my-image:`. -func ContainerImageTags(val ...string) attribute.KeyValue { - return ContainerImageTagsKey.StringSlice(val) -} - -// ContainerName returns an attribute KeyValue conforming to the -// "container.name" semantic conventions. It represents the container name used -// by container runtime. -func ContainerName(val string) attribute.KeyValue { - return ContainerNameKey.String(val) -} - -// ContainerRuntime returns an attribute KeyValue conforming to the -// "container.runtime" semantic conventions. It represents the container -// runtime managing this container. -func ContainerRuntime(val string) attribute.KeyValue { - return ContainerRuntimeKey.String(val) -} - -// This group defines the attributes used to describe telemetry in the context -// of databases. -const ( - // DBClientConnectionsPoolNameKey is the attribute Key conforming to the - // "db.client.connections.pool.name" semantic conventions. It represents - // the name of the connection pool; unique within the instrumented - // application. In case the connection pool implementation doesn't provide - // a name, instrumentation should use a combination of `server.address` and - // `server.port` attributes formatted as `server.address:server.port`. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'myDataSource' - DBClientConnectionsPoolNameKey = attribute.Key("db.client.connections.pool.name") - - // DBClientConnectionsStateKey is the attribute Key conforming to the - // "db.client.connections.state" semantic conventions. It represents the - // state of a connection in the pool - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'idle' - DBClientConnectionsStateKey = attribute.Key("db.client.connections.state") - - // DBCollectionNameKey is the attribute Key conforming to the - // "db.collection.name" semantic conventions. It represents the name of a - // collection (table, container) within the database. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'public.users', 'customers' - // Note: If the collection name is parsed from the query, it SHOULD match - // the value provided in the query and may be qualified with the schema and - // database name. - // It is RECOMMENDED to capture the value as provided by the application - // without attempting to do any case normalization. - DBCollectionNameKey = attribute.Key("db.collection.name") - - // DBNamespaceKey is the attribute Key conforming to the "db.namespace" - // semantic conventions. It represents the name of the database, fully - // qualified within the server address and port. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'customers', 'test.users' - // Note: If a database system has multiple namespace components, they - // SHOULD be concatenated (potentially using database system specific - // conventions) from most general to most specific namespace component, and - // more specific namespaces SHOULD NOT be captured without the more general - // namespaces, to ensure that "startswith" queries for the more general - // namespaces will be valid. - // Semantic conventions for individual database systems SHOULD document - // what `db.namespace` means in the context of that system. - // It is RECOMMENDED to capture the value as provided by the application - // without attempting to do any case normalization. - DBNamespaceKey = attribute.Key("db.namespace") - - // DBOperationNameKey is the attribute Key conforming to the - // "db.operation.name" semantic conventions. It represents the name of the - // operation or command being executed. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'findAndModify', 'HMSET', 'SELECT' - // Note: It is RECOMMENDED to capture the value as provided by the - // application without attempting to do any case normalization. - DBOperationNameKey = attribute.Key("db.operation.name") - - // DBQueryTextKey is the attribute Key conforming to the "db.query.text" - // semantic conventions. It represents the database query being executed. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'SELECT * FROM wuser_table where username = ?', 'SET mykey - // "WuValue"' - DBQueryTextKey = attribute.Key("db.query.text") - - // DBSystemKey is the attribute Key conforming to the "db.system" semantic - // conventions. It represents the database management system (DBMS) product - // as identified by the client instrumentation. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Note: The actual DBMS may differ from the one identified by the client. - // For example, when using PostgreSQL client libraries to connect to a - // CockroachDB, the `db.system` is set to `postgresql` based on the - // instrumentation's best knowledge. - DBSystemKey = attribute.Key("db.system") -) - -var ( - // idle - DBClientConnectionsStateIdle = DBClientConnectionsStateKey.String("idle") - // used - DBClientConnectionsStateUsed = DBClientConnectionsStateKey.String("used") -) - -var ( - // Some other SQL database. Fallback only. See notes - DBSystemOtherSQL = DBSystemKey.String("other_sql") - // Microsoft SQL Server - DBSystemMSSQL = DBSystemKey.String("mssql") - // Microsoft SQL Server Compact - DBSystemMssqlcompact = DBSystemKey.String("mssqlcompact") - // MySQL - DBSystemMySQL = DBSystemKey.String("mysql") - // Oracle Database - DBSystemOracle = DBSystemKey.String("oracle") - // IBM DB2 - DBSystemDB2 = DBSystemKey.String("db2") - // PostgreSQL - DBSystemPostgreSQL = DBSystemKey.String("postgresql") - // Amazon Redshift - DBSystemRedshift = DBSystemKey.String("redshift") - // Apache Hive - DBSystemHive = DBSystemKey.String("hive") - // Cloudscape - DBSystemCloudscape = DBSystemKey.String("cloudscape") - // HyperSQL DataBase - DBSystemHSQLDB = DBSystemKey.String("hsqldb") - // Progress Database - DBSystemProgress = DBSystemKey.String("progress") - // SAP MaxDB - DBSystemMaxDB = DBSystemKey.String("maxdb") - // SAP HANA - DBSystemHanaDB = DBSystemKey.String("hanadb") - // Ingres - DBSystemIngres = DBSystemKey.String("ingres") - // FirstSQL - DBSystemFirstSQL = DBSystemKey.String("firstsql") - // EnterpriseDB - DBSystemEDB = DBSystemKey.String("edb") - // InterSystems Caché - DBSystemCache = DBSystemKey.String("cache") - // Adabas (Adaptable Database System) - DBSystemAdabas = DBSystemKey.String("adabas") - // Firebird - DBSystemFirebird = DBSystemKey.String("firebird") - // Apache Derby - DBSystemDerby = DBSystemKey.String("derby") - // FileMaker - DBSystemFilemaker = DBSystemKey.String("filemaker") - // Informix - DBSystemInformix = DBSystemKey.String("informix") - // InstantDB - DBSystemInstantDB = DBSystemKey.String("instantdb") - // InterBase - DBSystemInterbase = DBSystemKey.String("interbase") - // MariaDB - DBSystemMariaDB = DBSystemKey.String("mariadb") - // Netezza - DBSystemNetezza = DBSystemKey.String("netezza") - // Pervasive PSQL - DBSystemPervasive = DBSystemKey.String("pervasive") - // PointBase - DBSystemPointbase = DBSystemKey.String("pointbase") - // SQLite - DBSystemSqlite = DBSystemKey.String("sqlite") - // Sybase - DBSystemSybase = DBSystemKey.String("sybase") - // Teradata - DBSystemTeradata = DBSystemKey.String("teradata") - // Vertica - DBSystemVertica = DBSystemKey.String("vertica") - // H2 - DBSystemH2 = DBSystemKey.String("h2") - // ColdFusion IMQ - DBSystemColdfusion = DBSystemKey.String("coldfusion") - // Apache Cassandra - DBSystemCassandra = DBSystemKey.String("cassandra") - // Apache HBase - DBSystemHBase = DBSystemKey.String("hbase") - // MongoDB - DBSystemMongoDB = DBSystemKey.String("mongodb") - // Redis - DBSystemRedis = DBSystemKey.String("redis") - // Couchbase - DBSystemCouchbase = DBSystemKey.String("couchbase") - // CouchDB - DBSystemCouchDB = DBSystemKey.String("couchdb") - // Microsoft Azure Cosmos DB - DBSystemCosmosDB = DBSystemKey.String("cosmosdb") - // Amazon DynamoDB - DBSystemDynamoDB = DBSystemKey.String("dynamodb") - // Neo4j - DBSystemNeo4j = DBSystemKey.String("neo4j") - // Apache Geode - DBSystemGeode = DBSystemKey.String("geode") - // Elasticsearch - DBSystemElasticsearch = DBSystemKey.String("elasticsearch") - // Memcached - DBSystemMemcached = DBSystemKey.String("memcached") - // CockroachDB - DBSystemCockroachdb = DBSystemKey.String("cockroachdb") - // OpenSearch - DBSystemOpensearch = DBSystemKey.String("opensearch") - // ClickHouse - DBSystemClickhouse = DBSystemKey.String("clickhouse") - // Cloud Spanner - DBSystemSpanner = DBSystemKey.String("spanner") - // Trino - DBSystemTrino = DBSystemKey.String("trino") -) - -// DBClientConnectionsPoolName returns an attribute KeyValue conforming to -// the "db.client.connections.pool.name" semantic conventions. It represents -// the name of the connection pool; unique within the instrumented application. -// In case the connection pool implementation doesn't provide a name, -// instrumentation should use a combination of `server.address` and -// `server.port` attributes formatted as `server.address:server.port`. -func DBClientConnectionsPoolName(val string) attribute.KeyValue { - return DBClientConnectionsPoolNameKey.String(val) -} - -// DBCollectionName returns an attribute KeyValue conforming to the -// "db.collection.name" semantic conventions. It represents the name of a -// collection (table, container) within the database. -func DBCollectionName(val string) attribute.KeyValue { - return DBCollectionNameKey.String(val) -} - -// DBNamespace returns an attribute KeyValue conforming to the -// "db.namespace" semantic conventions. It represents the name of the database, -// fully qualified within the server address and port. -func DBNamespace(val string) attribute.KeyValue { - return DBNamespaceKey.String(val) -} - -// DBOperationName returns an attribute KeyValue conforming to the -// "db.operation.name" semantic conventions. It represents the name of the -// operation or command being executed. -func DBOperationName(val string) attribute.KeyValue { - return DBOperationNameKey.String(val) -} - -// DBQueryText returns an attribute KeyValue conforming to the -// "db.query.text" semantic conventions. It represents the database query being -// executed. -func DBQueryText(val string) attribute.KeyValue { - return DBQueryTextKey.String(val) -} - -// This group defines attributes for Cassandra. -const ( - // DBCassandraConsistencyLevelKey is the attribute Key conforming to the - // "db.cassandra.consistency_level" semantic conventions. It represents the - // consistency level of the query. Based on consistency values from - // [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html). - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level") - - // DBCassandraCoordinatorDCKey is the attribute Key conforming to the - // "db.cassandra.coordinator.dc" semantic conventions. It represents the - // data center of the coordinating node for a query. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'us-west-2' - DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc") - - // DBCassandraCoordinatorIDKey is the attribute Key conforming to the - // "db.cassandra.coordinator.id" semantic conventions. It represents the ID - // of the coordinating node for a query. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af' - DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id") - - // DBCassandraIdempotenceKey is the attribute Key conforming to the - // "db.cassandra.idempotence" semantic conventions. It represents the - // whether or not the query is idempotent. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence") - - // DBCassandraPageSizeKey is the attribute Key conforming to the - // "db.cassandra.page_size" semantic conventions. It represents the fetch - // size used for paging, i.e. how many rows will be returned at once. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 5000 - DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size") - - // DBCassandraSpeculativeExecutionCountKey is the attribute Key conforming - // to the "db.cassandra.speculative_execution_count" semantic conventions. - // It represents the number of times a query was speculatively executed. - // Not set or `0` if the query was not executed speculatively. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 0, 2 - DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count") -) - -var ( - // all - DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all") - // each_quorum - DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum") - // quorum - DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum") - // local_quorum - DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum") - // one - DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one") - // two - DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two") - // three - DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three") - // local_one - DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one") - // any - DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any") - // serial - DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial") - // local_serial - DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial") -) - -// DBCassandraCoordinatorDC returns an attribute KeyValue conforming to the -// "db.cassandra.coordinator.dc" semantic conventions. It represents the data -// center of the coordinating node for a query. -func DBCassandraCoordinatorDC(val string) attribute.KeyValue { - return DBCassandraCoordinatorDCKey.String(val) -} - -// DBCassandraCoordinatorID returns an attribute KeyValue conforming to the -// "db.cassandra.coordinator.id" semantic conventions. It represents the ID of -// the coordinating node for a query. -func DBCassandraCoordinatorID(val string) attribute.KeyValue { - return DBCassandraCoordinatorIDKey.String(val) -} - -// DBCassandraIdempotence returns an attribute KeyValue conforming to the -// "db.cassandra.idempotence" semantic conventions. It represents the whether -// or not the query is idempotent. -func DBCassandraIdempotence(val bool) attribute.KeyValue { - return DBCassandraIdempotenceKey.Bool(val) -} - -// DBCassandraPageSize returns an attribute KeyValue conforming to the -// "db.cassandra.page_size" semantic conventions. It represents the fetch size -// used for paging, i.e. how many rows will be returned at once. -func DBCassandraPageSize(val int) attribute.KeyValue { - return DBCassandraPageSizeKey.Int(val) -} - -// DBCassandraSpeculativeExecutionCount returns an attribute KeyValue -// conforming to the "db.cassandra.speculative_execution_count" semantic -// conventions. It represents the number of times a query was speculatively -// executed. Not set or `0` if the query was not executed speculatively. -func DBCassandraSpeculativeExecutionCount(val int) attribute.KeyValue { - return DBCassandraSpeculativeExecutionCountKey.Int(val) -} - -// This group defines attributes for Azure Cosmos DB. -const ( - // DBCosmosDBClientIDKey is the attribute Key conforming to the - // "db.cosmosdb.client_id" semantic conventions. It represents the unique - // Cosmos client instance id. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '3ba4827d-4422-483f-b59f-85b74211c11d' - DBCosmosDBClientIDKey = attribute.Key("db.cosmosdb.client_id") - - // DBCosmosDBConnectionModeKey is the attribute Key conforming to the - // "db.cosmosdb.connection_mode" semantic conventions. It represents the - // cosmos client connection mode. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - DBCosmosDBConnectionModeKey = attribute.Key("db.cosmosdb.connection_mode") - - // DBCosmosDBOperationTypeKey is the attribute Key conforming to the - // "db.cosmosdb.operation_type" semantic conventions. It represents the - // cosmosDB Operation Type. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - DBCosmosDBOperationTypeKey = attribute.Key("db.cosmosdb.operation_type") - - // DBCosmosDBRequestChargeKey is the attribute Key conforming to the - // "db.cosmosdb.request_charge" semantic conventions. It represents the rU - // consumed for that operation - // - // Type: double - // RequirementLevel: Optional - // Stability: experimental - // Examples: 46.18, 1.0 - DBCosmosDBRequestChargeKey = attribute.Key("db.cosmosdb.request_charge") - - // DBCosmosDBRequestContentLengthKey is the attribute Key conforming to the - // "db.cosmosdb.request_content_length" semantic conventions. It represents - // the request payload size in bytes - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - DBCosmosDBRequestContentLengthKey = attribute.Key("db.cosmosdb.request_content_length") - - // DBCosmosDBStatusCodeKey is the attribute Key conforming to the - // "db.cosmosdb.status_code" semantic conventions. It represents the cosmos - // DB status code. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 200, 201 - DBCosmosDBStatusCodeKey = attribute.Key("db.cosmosdb.status_code") - - // DBCosmosDBSubStatusCodeKey is the attribute Key conforming to the - // "db.cosmosdb.sub_status_code" semantic conventions. It represents the - // cosmos DB sub status code. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1000, 1002 - DBCosmosDBSubStatusCodeKey = attribute.Key("db.cosmosdb.sub_status_code") -) - -var ( - // Gateway (HTTP) connections mode - DBCosmosDBConnectionModeGateway = DBCosmosDBConnectionModeKey.String("gateway") - // Direct connection - DBCosmosDBConnectionModeDirect = DBCosmosDBConnectionModeKey.String("direct") -) - -var ( - // invalid - DBCosmosDBOperationTypeInvalid = DBCosmosDBOperationTypeKey.String("Invalid") - // create - DBCosmosDBOperationTypeCreate = DBCosmosDBOperationTypeKey.String("Create") - // patch - DBCosmosDBOperationTypePatch = DBCosmosDBOperationTypeKey.String("Patch") - // read - DBCosmosDBOperationTypeRead = DBCosmosDBOperationTypeKey.String("Read") - // read_feed - DBCosmosDBOperationTypeReadFeed = DBCosmosDBOperationTypeKey.String("ReadFeed") - // delete - DBCosmosDBOperationTypeDelete = DBCosmosDBOperationTypeKey.String("Delete") - // replace - DBCosmosDBOperationTypeReplace = DBCosmosDBOperationTypeKey.String("Replace") - // execute - DBCosmosDBOperationTypeExecute = DBCosmosDBOperationTypeKey.String("Execute") - // query - DBCosmosDBOperationTypeQuery = DBCosmosDBOperationTypeKey.String("Query") - // head - DBCosmosDBOperationTypeHead = DBCosmosDBOperationTypeKey.String("Head") - // head_feed - DBCosmosDBOperationTypeHeadFeed = DBCosmosDBOperationTypeKey.String("HeadFeed") - // upsert - DBCosmosDBOperationTypeUpsert = DBCosmosDBOperationTypeKey.String("Upsert") - // batch - DBCosmosDBOperationTypeBatch = DBCosmosDBOperationTypeKey.String("Batch") - // query_plan - DBCosmosDBOperationTypeQueryPlan = DBCosmosDBOperationTypeKey.String("QueryPlan") - // execute_javascript - DBCosmosDBOperationTypeExecuteJavascript = DBCosmosDBOperationTypeKey.String("ExecuteJavaScript") -) - -// DBCosmosDBClientID returns an attribute KeyValue conforming to the -// "db.cosmosdb.client_id" semantic conventions. It represents the unique -// Cosmos client instance id. -func DBCosmosDBClientID(val string) attribute.KeyValue { - return DBCosmosDBClientIDKey.String(val) -} - -// DBCosmosDBRequestCharge returns an attribute KeyValue conforming to the -// "db.cosmosdb.request_charge" semantic conventions. It represents the rU -// consumed for that operation -func DBCosmosDBRequestCharge(val float64) attribute.KeyValue { - return DBCosmosDBRequestChargeKey.Float64(val) -} - -// DBCosmosDBRequestContentLength returns an attribute KeyValue conforming -// to the "db.cosmosdb.request_content_length" semantic conventions. It -// represents the request payload size in bytes -func DBCosmosDBRequestContentLength(val int) attribute.KeyValue { - return DBCosmosDBRequestContentLengthKey.Int(val) -} - -// DBCosmosDBStatusCode returns an attribute KeyValue conforming to the -// "db.cosmosdb.status_code" semantic conventions. It represents the cosmos DB -// status code. -func DBCosmosDBStatusCode(val int) attribute.KeyValue { - return DBCosmosDBStatusCodeKey.Int(val) -} - -// DBCosmosDBSubStatusCode returns an attribute KeyValue conforming to the -// "db.cosmosdb.sub_status_code" semantic conventions. It represents the cosmos -// DB sub status code. -func DBCosmosDBSubStatusCode(val int) attribute.KeyValue { - return DBCosmosDBSubStatusCodeKey.Int(val) -} - -// This group defines attributes for Elasticsearch. -const ( - // DBElasticsearchClusterNameKey is the attribute Key conforming to the - // "db.elasticsearch.cluster.name" semantic conventions. It represents the - // represents the identifier of an Elasticsearch cluster. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'e9106fc68e3044f0b1475b04bf4ffd5f' - DBElasticsearchClusterNameKey = attribute.Key("db.elasticsearch.cluster.name") - - // DBElasticsearchNodeNameKey is the attribute Key conforming to the - // "db.elasticsearch.node.name" semantic conventions. It represents the - // represents the human-readable identifier of the node/instance to which a - // request was routed. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'instance-0000000001' - DBElasticsearchNodeNameKey = attribute.Key("db.elasticsearch.node.name") -) - -// DBElasticsearchClusterName returns an attribute KeyValue conforming to -// the "db.elasticsearch.cluster.name" semantic conventions. It represents the -// represents the identifier of an Elasticsearch cluster. -func DBElasticsearchClusterName(val string) attribute.KeyValue { - return DBElasticsearchClusterNameKey.String(val) -} - -// DBElasticsearchNodeName returns an attribute KeyValue conforming to the -// "db.elasticsearch.node.name" semantic conventions. It represents the -// represents the human-readable identifier of the node/instance to which a -// request was routed. -func DBElasticsearchNodeName(val string) attribute.KeyValue { - return DBElasticsearchNodeNameKey.String(val) -} - -// Attributes for software deployments. -const ( - // DeploymentEnvironmentKey is the attribute Key conforming to the - // "deployment.environment" semantic conventions. It represents the name of - // the [deployment - // environment](https://wikipedia.org/wiki/Deployment_environment) (aka - // deployment tier). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'staging', 'production' - // Note: `deployment.environment` does not affect the uniqueness - // constraints defined through - // the `service.namespace`, `service.name` and `service.instance.id` - // resource attributes. - // This implies that resources carrying the following attribute - // combinations MUST be - // considered to be identifying the same service: - // - // * `service.name=frontend`, `deployment.environment=production` - // * `service.name=frontend`, `deployment.environment=staging`. - DeploymentEnvironmentKey = attribute.Key("deployment.environment") -) - -// DeploymentEnvironment returns an attribute KeyValue conforming to the -// "deployment.environment" semantic conventions. It represents the name of the -// [deployment environment](https://wikipedia.org/wiki/Deployment_environment) -// (aka deployment tier). -func DeploymentEnvironment(val string) attribute.KeyValue { - return DeploymentEnvironmentKey.String(val) -} - -// Attributes that represents an occurrence of a lifecycle transition on the -// Android platform. -const ( - // AndroidStateKey is the attribute Key conforming to the "android.state" - // semantic conventions. It represents the deprecated use the - // `device.app.lifecycle` event definition including `android.state` as a - // payload field instead. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Note: The Android lifecycle states are defined in [Activity lifecycle - // callbacks](https://developer.android.com/guide/components/activities/activity-lifecycle#lc), - // and from which the `OS identifiers` are derived. - AndroidStateKey = attribute.Key("android.state") -) - -var ( - // Any time before Activity.onResume() or, if the app has no Activity, Context.startService() has been called in the app for the first time - AndroidStateCreated = AndroidStateKey.String("created") - // Any time after Activity.onPause() or, if the app has no Activity, Context.stopService() has been called when the app was in the foreground state - AndroidStateBackground = AndroidStateKey.String("background") - // Any time after Activity.onResume() or, if the app has no Activity, Context.startService() has been called when the app was in either the created or background states - AndroidStateForeground = AndroidStateKey.String("foreground") -) - -// These attributes may be used to describe the receiver of a network -// exchange/packet. These should be used when there is no client/server -// relationship between the two sides, or when that relationship is unknown. -// This covers low-level network interactions (e.g. packet tracing) where you -// don't know if there was a connection or which side initiated it. This also -// covers unidirectional UDP flows and peer-to-peer communication where the -// "user-facing" surface of the protocol / API doesn't expose a clear notion of -// client and server. -const ( - // DestinationAddressKey is the attribute Key conforming to the - // "destination.address" semantic conventions. It represents the - // destination address - domain name if available without reverse DNS - // lookup; otherwise, IP address or Unix domain socket name. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'destination.example.com', '10.1.2.80', '/tmp/my.sock' - // Note: When observed from the source side, and when communicating through - // an intermediary, `destination.address` SHOULD represent the destination - // address behind any intermediaries, for example proxies, if it's - // available. - DestinationAddressKey = attribute.Key("destination.address") - - // DestinationPortKey is the attribute Key conforming to the - // "destination.port" semantic conventions. It represents the destination - // port number - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 3389, 2888 - DestinationPortKey = attribute.Key("destination.port") -) - -// DestinationAddress returns an attribute KeyValue conforming to the -// "destination.address" semantic conventions. It represents the destination -// address - domain name if available without reverse DNS lookup; otherwise, IP -// address or Unix domain socket name. -func DestinationAddress(val string) attribute.KeyValue { - return DestinationAddressKey.String(val) -} - -// DestinationPort returns an attribute KeyValue conforming to the -// "destination.port" semantic conventions. It represents the destination port -// number -func DestinationPort(val int) attribute.KeyValue { - return DestinationPortKey.Int(val) -} - -// Describes device attributes. -const ( - // DeviceIDKey is the attribute Key conforming to the "device.id" semantic - // conventions. It represents a unique identifier representing the device - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092' - // Note: The device identifier MUST only be defined using the values - // outlined below. This value is not an advertising identifier and MUST NOT - // be used as such. On iOS (Swift or Objective-C), this value MUST be equal - // to the [vendor - // identifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-identifierforvendor). - // On Android (Java or Kotlin), this value MUST be equal to the Firebase - // Installation ID or a globally unique UUID which is persisted across - // sessions in your application. More information can be found - // [here](https://developer.android.com/training/articles/user-data-ids) on - // best practices and exact implementation details. Caution should be taken - // when storing personal data or anything which can identify a user. GDPR - // and data protection laws may apply, ensure you do your own due - // diligence. - DeviceIDKey = attribute.Key("device.id") - - // DeviceManufacturerKey is the attribute Key conforming to the - // "device.manufacturer" semantic conventions. It represents the name of - // the device manufacturer - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Apple', 'Samsung' - // Note: The Android OS provides this field via - // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER). - // iOS apps SHOULD hardcode the value `Apple`. - DeviceManufacturerKey = attribute.Key("device.manufacturer") - - // DeviceModelIdentifierKey is the attribute Key conforming to the - // "device.model.identifier" semantic conventions. It represents the model - // identifier for the device - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'iPhone3,4', 'SM-G920F' - // Note: It's recommended this value represents a machine-readable version - // of the model identifier rather than the market or consumer-friendly name - // of the device. - DeviceModelIdentifierKey = attribute.Key("device.model.identifier") - - // DeviceModelNameKey is the attribute Key conforming to the - // "device.model.name" semantic conventions. It represents the marketing - // name for the device model - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6' - // Note: It's recommended this value represents a human-readable version of - // the device model rather than a machine-readable alternative. - DeviceModelNameKey = attribute.Key("device.model.name") -) - -// DeviceID returns an attribute KeyValue conforming to the "device.id" -// semantic conventions. It represents a unique identifier representing the -// device -func DeviceID(val string) attribute.KeyValue { - return DeviceIDKey.String(val) -} - -// DeviceManufacturer returns an attribute KeyValue conforming to the -// "device.manufacturer" semantic conventions. It represents the name of the -// device manufacturer -func DeviceManufacturer(val string) attribute.KeyValue { - return DeviceManufacturerKey.String(val) -} - -// DeviceModelIdentifier returns an attribute KeyValue conforming to the -// "device.model.identifier" semantic conventions. It represents the model -// identifier for the device -func DeviceModelIdentifier(val string) attribute.KeyValue { - return DeviceModelIdentifierKey.String(val) -} - -// DeviceModelName returns an attribute KeyValue conforming to the -// "device.model.name" semantic conventions. It represents the marketing name -// for the device model -func DeviceModelName(val string) attribute.KeyValue { - return DeviceModelNameKey.String(val) -} - -// These attributes may be used for any disk related operation. -const ( - // DiskIoDirectionKey is the attribute Key conforming to the - // "disk.io.direction" semantic conventions. It represents the disk IO - // operation direction. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'read' - DiskIoDirectionKey = attribute.Key("disk.io.direction") -) - -var ( - // read - DiskIoDirectionRead = DiskIoDirectionKey.String("read") - // write - DiskIoDirectionWrite = DiskIoDirectionKey.String("write") -) - -// The shared attributes used to report a DNS query. -const ( - // DNSQuestionNameKey is the attribute Key conforming to the - // "dns.question.name" semantic conventions. It represents the name being - // queried. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'www.example.com', 'opentelemetry.io' - // Note: If the name field contains non-printable characters (below 32 or - // above 126), those characters should be represented as escaped base 10 - // integers (\DDD). Back slashes and quotes should be escaped. Tabs, - // carriage returns, and line feeds should be converted to \t, \r, and \n - // respectively. - DNSQuestionNameKey = attribute.Key("dns.question.name") -) - -// DNSQuestionName returns an attribute KeyValue conforming to the -// "dns.question.name" semantic conventions. It represents the name being -// queried. -func DNSQuestionName(val string) attribute.KeyValue { - return DNSQuestionNameKey.String(val) -} - -// Attributes for operations with an authenticated and/or authorized enduser. -const ( - // EnduserIDKey is the attribute Key conforming to the "enduser.id" - // semantic conventions. It represents the username or client_id extracted - // from the access token or - // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header - // in the inbound request from outside the system. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'username' - EnduserIDKey = attribute.Key("enduser.id") - - // EnduserRoleKey is the attribute Key conforming to the "enduser.role" - // semantic conventions. It represents the actual/assumed role the client - // is making the request under extracted from token or application security - // context. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'admin' - EnduserRoleKey = attribute.Key("enduser.role") - - // EnduserScopeKey is the attribute Key conforming to the "enduser.scope" - // semantic conventions. It represents the scopes or granted authorities - // the client currently possesses extracted from token or application - // security context. The value would come from the scope associated with an - // [OAuth 2.0 Access - // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute - // value in a [SAML 2.0 - // Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'read:message, write:files' - EnduserScopeKey = attribute.Key("enduser.scope") -) - -// EnduserID returns an attribute KeyValue conforming to the "enduser.id" -// semantic conventions. It represents the username or client_id extracted from -// the access token or -// [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in -// the inbound request from outside the system. -func EnduserID(val string) attribute.KeyValue { - return EnduserIDKey.String(val) -} - -// EnduserRole returns an attribute KeyValue conforming to the -// "enduser.role" semantic conventions. It represents the actual/assumed role -// the client is making the request under extracted from token or application -// security context. -func EnduserRole(val string) attribute.KeyValue { - return EnduserRoleKey.String(val) -} - -// EnduserScope returns an attribute KeyValue conforming to the -// "enduser.scope" semantic conventions. It represents the scopes or granted -// authorities the client currently possesses extracted from token or -// application security context. The value would come from the scope associated -// with an [OAuth 2.0 Access -// Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute -// value in a [SAML 2.0 -// Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). -func EnduserScope(val string) attribute.KeyValue { - return EnduserScopeKey.String(val) -} - -// The shared attributes used to report an error. -const ( - // ErrorTypeKey is the attribute Key conforming to the "error.type" - // semantic conventions. It represents the describes a class of error the - // operation ended with. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'timeout', 'java.net.UnknownHostException', - // 'server_certificate_invalid', '500' - // Note: The `error.type` SHOULD be predictable, and SHOULD have low - // cardinality. - // - // When `error.type` is set to a type (e.g., an exception type), its - // canonical class name identifying the type within the artifact SHOULD be - // used. - // - // Instrumentations SHOULD document the list of errors they report. - // - // The cardinality of `error.type` within one instrumentation library - // SHOULD be low. - // Telemetry consumers that aggregate data from multiple instrumentation - // libraries and applications - // should be prepared for `error.type` to have high cardinality at query - // time when no - // additional filters are applied. - // - // If the operation has completed successfully, instrumentations SHOULD NOT - // set `error.type`. - // - // If a specific domain defines its own set of error identifiers (such as - // HTTP or gRPC status codes), - // it's RECOMMENDED to: - // - // * Use a domain-specific attribute - // * Set `error.type` to capture all errors, regardless of whether they are - // defined within the domain-specific set or not. - ErrorTypeKey = attribute.Key("error.type") -) - -var ( - // A fallback error value to be used when the instrumentation doesn't define a custom value - ErrorTypeOther = ErrorTypeKey.String("_OTHER") -) - -// Attributes for Events represented using Log Records. -const ( - // EventNameKey is the attribute Key conforming to the "event.name" - // semantic conventions. It represents the identifies the class / type of - // event. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'browser.mouse.click', 'device.app.lifecycle' - // Note: Event names are subject to the same rules as [attribute - // names](https://github.com/open-telemetry/opentelemetry-specification/tree/v1.33.0/specification/common/attribute-naming.md). - // Notably, event names are namespaced to avoid collisions and provide a - // clean separation of semantics for events in separate domains like - // browser, mobile, and kubernetes. - EventNameKey = attribute.Key("event.name") -) - -// EventName returns an attribute KeyValue conforming to the "event.name" -// semantic conventions. It represents the identifies the class / type of -// event. -func EventName(val string) attribute.KeyValue { - return EventNameKey.String(val) -} - -// The shared attributes used to report a single exception associated with a -// span or log. -const ( - // ExceptionEscapedKey is the attribute Key conforming to the - // "exception.escaped" semantic conventions. It represents the sHOULD be - // set to true if the exception event is recorded at a point where it is - // known that the exception is escaping the scope of the span. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: stable - // Note: An exception is considered to have escaped (or left) the scope of - // a span, - // if that span is ended while the exception is still logically "in - // flight". - // This may be actually "in flight" in some languages (e.g. if the - // exception - // is passed to a Context manager's `__exit__` method in Python) but will - // usually be caught at the point of recording the exception in most - // languages. - // - // It is usually not possible to determine at the point where an exception - // is thrown - // whether it will escape the scope of a span. - // However, it is trivial to know that an exception - // will escape, if one checks for an active exception just before ending - // the span, - // as done in the [example for recording span - // exceptions](https://opentelemetry.io/docs/specs/semconv/exceptions/exceptions-spans/#recording-an-exception). - // - // It follows that an exception may still escape the scope of the span - // even if the `exception.escaped` attribute was not set or set to false, - // since the event might have been recorded at a time where it was not - // clear whether the exception will escape. - ExceptionEscapedKey = attribute.Key("exception.escaped") - - // ExceptionMessageKey is the attribute Key conforming to the - // "exception.message" semantic conventions. It represents the exception - // message. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'Division by zero', "Can't convert 'int' object to str - // implicitly" - ExceptionMessageKey = attribute.Key("exception.message") - - // ExceptionStacktraceKey is the attribute Key conforming to the - // "exception.stacktrace" semantic conventions. It represents a stacktrace - // as a string in the natural representation for the language runtime. The - // representation is to be determined and documented by each language SIG. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test - // exception\\n at ' - // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' - // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' - // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' - ExceptionStacktraceKey = attribute.Key("exception.stacktrace") - - // ExceptionTypeKey is the attribute Key conforming to the "exception.type" - // semantic conventions. It represents the type of the exception (its - // fully-qualified class name, if applicable). The dynamic type of the - // exception should be preferred over the static type in languages that - // support it. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'java.net.ConnectException', 'OSError' - ExceptionTypeKey = attribute.Key("exception.type") -) - -// ExceptionEscaped returns an attribute KeyValue conforming to the -// "exception.escaped" semantic conventions. It represents the sHOULD be set to -// true if the exception event is recorded at a point where it is known that -// the exception is escaping the scope of the span. -func ExceptionEscaped(val bool) attribute.KeyValue { - return ExceptionEscapedKey.Bool(val) -} - -// ExceptionMessage returns an attribute KeyValue conforming to the -// "exception.message" semantic conventions. It represents the exception -// message. -func ExceptionMessage(val string) attribute.KeyValue { - return ExceptionMessageKey.String(val) -} - -// ExceptionStacktrace returns an attribute KeyValue conforming to the -// "exception.stacktrace" semantic conventions. It represents a stacktrace as a -// string in the natural representation for the language runtime. The -// representation is to be determined and documented by each language SIG. -func ExceptionStacktrace(val string) attribute.KeyValue { - return ExceptionStacktraceKey.String(val) -} - -// ExceptionType returns an attribute KeyValue conforming to the -// "exception.type" semantic conventions. It represents the type of the -// exception (its fully-qualified class name, if applicable). The dynamic type -// of the exception should be preferred over the static type in languages that -// support it. -func ExceptionType(val string) attribute.KeyValue { - return ExceptionTypeKey.String(val) -} - -// FaaS attributes -const ( - // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart" - // semantic conventions. It represents a boolean that is true if the - // serverless function is executed for the first time (aka cold-start). - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - FaaSColdstartKey = attribute.Key("faas.coldstart") - - // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic - // conventions. It represents a string containing the schedule period as - // [Cron - // Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '0/5 * * * ? *' - FaaSCronKey = attribute.Key("faas.cron") - - // FaaSDocumentCollectionKey is the attribute Key conforming to the - // "faas.document.collection" semantic conventions. It represents the name - // of the source on which the triggering operation was performed. For - // example, in Cloud Storage or S3 corresponds to the bucket name, and in - // Cosmos DB to the database name. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'myBucketName', 'myDBName' - FaaSDocumentCollectionKey = attribute.Key("faas.document.collection") - - // FaaSDocumentNameKey is the attribute Key conforming to the - // "faas.document.name" semantic conventions. It represents the document - // name/table subjected to the operation. For example, in Cloud Storage or - // S3 is the name of the file, and in Cosmos DB the table name. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'myFile.txt', 'myTableName' - FaaSDocumentNameKey = attribute.Key("faas.document.name") - - // FaaSDocumentOperationKey is the attribute Key conforming to the - // "faas.document.operation" semantic conventions. It represents the - // describes the type of the operation that was performed on the data. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - FaaSDocumentOperationKey = attribute.Key("faas.document.operation") - - // FaaSDocumentTimeKey is the attribute Key conforming to the - // "faas.document.time" semantic conventions. It represents a string - // containing the time when the data was accessed in the [ISO - // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format - // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2020-01-23T13:47:06Z' - FaaSDocumentTimeKey = attribute.Key("faas.document.time") - - // FaaSInstanceKey is the attribute Key conforming to the "faas.instance" - // semantic conventions. It represents the execution environment ID as a - // string, that will be potentially reused for other invocations to the - // same function/function version. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de' - // Note: * **AWS Lambda:** Use the (full) log stream name. - FaaSInstanceKey = attribute.Key("faas.instance") - - // FaaSInvocationIDKey is the attribute Key conforming to the - // "faas.invocation_id" semantic conventions. It represents the invocation - // ID of the current function invocation. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28' - FaaSInvocationIDKey = attribute.Key("faas.invocation_id") - - // FaaSInvokedNameKey is the attribute Key conforming to the - // "faas.invoked_name" semantic conventions. It represents the name of the - // invoked function. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'my-function' - // Note: SHOULD be equal to the `faas.name` resource attribute of the - // invoked function. - FaaSInvokedNameKey = attribute.Key("faas.invoked_name") - - // FaaSInvokedProviderKey is the attribute Key conforming to the - // "faas.invoked_provider" semantic conventions. It represents the cloud - // provider of the invoked function. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Note: SHOULD be equal to the `cloud.provider` resource attribute of the - // invoked function. - FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider") - - // FaaSInvokedRegionKey is the attribute Key conforming to the - // "faas.invoked_region" semantic conventions. It represents the cloud - // region of the invoked function. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'eu-central-1' - // Note: SHOULD be equal to the `cloud.region` resource attribute of the - // invoked function. - FaaSInvokedRegionKey = attribute.Key("faas.invoked_region") - - // FaaSMaxMemoryKey is the attribute Key conforming to the - // "faas.max_memory" semantic conventions. It represents the amount of - // memory available to the serverless function converted to Bytes. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 134217728 - // Note: It's recommended to set this attribute since e.g. too little - // memory can easily stop a Java AWS Lambda function from working - // correctly. On AWS Lambda, the environment variable - // `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information (which must - // be multiplied by 1,048,576). - FaaSMaxMemoryKey = attribute.Key("faas.max_memory") - - // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic - // conventions. It represents the name of the single function that this - // runtime instance executes. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'my-function', 'myazurefunctionapp/some-function-name' - // Note: This is the name of the function as configured/deployed on the - // FaaS - // platform and is usually different from the name of the callback - // function (which may be stored in the - // [`code.namespace`/`code.function`](/docs/general/attributes.md#source-code-attributes) - // span attributes). - // - // For some cloud providers, the above definition is ambiguous. The - // following - // definition of function name MUST be used for this attribute - // (and consequently the span name) for the listed cloud - // providers/products: - // - // * **Azure:** The full name `/`, i.e., function app name - // followed by a forward slash followed by the function name (this form - // can also be seen in the resource JSON for the function). - // This means that a span attribute MUST be used, as an Azure function - // app can host multiple functions that would usually share - // a TracerProvider (see also the `cloud.resource_id` attribute). - FaaSNameKey = attribute.Key("faas.name") - - // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic - // conventions. It represents a string containing the function invocation - // time in the [ISO - // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format - // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2020-01-23T13:47:06Z' - FaaSTimeKey = attribute.Key("faas.time") - - // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger" - // semantic conventions. It represents the type of the trigger which caused - // this function invocation. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - FaaSTriggerKey = attribute.Key("faas.trigger") - - // FaaSVersionKey is the attribute Key conforming to the "faas.version" - // semantic conventions. It represents the immutable version of the - // function being executed. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '26', 'pinkfroid-00002' - // Note: Depending on the cloud provider and platform, use: - // - // * **AWS Lambda:** The [function - // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html) - // (an integer represented as a decimal string). - // * **Google Cloud Run (Services):** The - // [revision](https://cloud.google.com/run/docs/managing/revisions) - // (i.e., the function name plus the revision suffix). - // * **Google Cloud Functions:** The value of the - // [`K_REVISION` environment - // variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically). - // * **Azure Functions:** Not applicable. Do not set this attribute. - FaaSVersionKey = attribute.Key("faas.version") -) - -var ( - // When a new object is created - FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert") - // When an object is modified - FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit") - // When an object is deleted - FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete") -) - -var ( - // Alibaba Cloud - FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud") - // Amazon Web Services - FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws") - // Microsoft Azure - FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure") - // Google Cloud Platform - FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp") - // Tencent Cloud - FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud") -) - -var ( - // A response to some data source operation such as a database or filesystem read/write - FaaSTriggerDatasource = FaaSTriggerKey.String("datasource") - // To provide an answer to an inbound HTTP request - FaaSTriggerHTTP = FaaSTriggerKey.String("http") - // A function is set to be executed when messages are sent to a messaging system - FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub") - // A function is scheduled to be executed regularly - FaaSTriggerTimer = FaaSTriggerKey.String("timer") - // If none of the others apply - FaaSTriggerOther = FaaSTriggerKey.String("other") -) - -// FaaSColdstart returns an attribute KeyValue conforming to the -// "faas.coldstart" semantic conventions. It represents a boolean that is true -// if the serverless function is executed for the first time (aka cold-start). -func FaaSColdstart(val bool) attribute.KeyValue { - return FaaSColdstartKey.Bool(val) -} - -// FaaSCron returns an attribute KeyValue conforming to the "faas.cron" -// semantic conventions. It represents a string containing the schedule period -// as [Cron -// Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). -func FaaSCron(val string) attribute.KeyValue { - return FaaSCronKey.String(val) -} - -// FaaSDocumentCollection returns an attribute KeyValue conforming to the -// "faas.document.collection" semantic conventions. It represents the name of -// the source on which the triggering operation was performed. For example, in -// Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the -// database name. -func FaaSDocumentCollection(val string) attribute.KeyValue { - return FaaSDocumentCollectionKey.String(val) -} - -// FaaSDocumentName returns an attribute KeyValue conforming to the -// "faas.document.name" semantic conventions. It represents the document -// name/table subjected to the operation. For example, in Cloud Storage or S3 -// is the name of the file, and in Cosmos DB the table name. -func FaaSDocumentName(val string) attribute.KeyValue { - return FaaSDocumentNameKey.String(val) -} - -// FaaSDocumentTime returns an attribute KeyValue conforming to the -// "faas.document.time" semantic conventions. It represents a string containing -// the time when the data was accessed in the [ISO -// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format -// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). -func FaaSDocumentTime(val string) attribute.KeyValue { - return FaaSDocumentTimeKey.String(val) -} - -// FaaSInstance returns an attribute KeyValue conforming to the -// "faas.instance" semantic conventions. It represents the execution -// environment ID as a string, that will be potentially reused for other -// invocations to the same function/function version. -func FaaSInstance(val string) attribute.KeyValue { - return FaaSInstanceKey.String(val) -} - -// FaaSInvocationID returns an attribute KeyValue conforming to the -// "faas.invocation_id" semantic conventions. It represents the invocation ID -// of the current function invocation. -func FaaSInvocationID(val string) attribute.KeyValue { - return FaaSInvocationIDKey.String(val) -} - -// FaaSInvokedName returns an attribute KeyValue conforming to the -// "faas.invoked_name" semantic conventions. It represents the name of the -// invoked function. -func FaaSInvokedName(val string) attribute.KeyValue { - return FaaSInvokedNameKey.String(val) -} - -// FaaSInvokedRegion returns an attribute KeyValue conforming to the -// "faas.invoked_region" semantic conventions. It represents the cloud region -// of the invoked function. -func FaaSInvokedRegion(val string) attribute.KeyValue { - return FaaSInvokedRegionKey.String(val) -} - -// FaaSMaxMemory returns an attribute KeyValue conforming to the -// "faas.max_memory" semantic conventions. It represents the amount of memory -// available to the serverless function converted to Bytes. -func FaaSMaxMemory(val int) attribute.KeyValue { - return FaaSMaxMemoryKey.Int(val) -} - -// FaaSName returns an attribute KeyValue conforming to the "faas.name" -// semantic conventions. It represents the name of the single function that -// this runtime instance executes. -func FaaSName(val string) attribute.KeyValue { - return FaaSNameKey.String(val) -} - -// FaaSTime returns an attribute KeyValue conforming to the "faas.time" -// semantic conventions. It represents a string containing the function -// invocation time in the [ISO -// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format -// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). -func FaaSTime(val string) attribute.KeyValue { - return FaaSTimeKey.String(val) -} - -// FaaSVersion returns an attribute KeyValue conforming to the -// "faas.version" semantic conventions. It represents the immutable version of -// the function being executed. -func FaaSVersion(val string) attribute.KeyValue { - return FaaSVersionKey.String(val) -} - -// Attributes for Feature Flags. -const ( - // FeatureFlagKeyKey is the attribute Key conforming to the - // "feature_flag.key" semantic conventions. It represents the unique - // identifier of the feature flag. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'logo-color' - FeatureFlagKeyKey = attribute.Key("feature_flag.key") - - // FeatureFlagProviderNameKey is the attribute Key conforming to the - // "feature_flag.provider_name" semantic conventions. It represents the - // name of the service provider that performs the flag evaluation. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Flag Manager' - FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider_name") - - // FeatureFlagVariantKey is the attribute Key conforming to the - // "feature_flag.variant" semantic conventions. It represents the sHOULD be - // a semantic identifier for a value. If one is unavailable, a stringified - // version of the value can be used. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'red', 'true', 'on' - // Note: A semantic identifier, commonly referred to as a variant, provides - // a means - // for referring to a value without including the value itself. This can - // provide additional context for understanding the meaning behind a value. - // For example, the variant `red` maybe be used for the value `#c05543`. - // - // A stringified version of the value can be used in situations where a - // semantic identifier is unavailable. String representation of the value - // should be determined by the implementer. - FeatureFlagVariantKey = attribute.Key("feature_flag.variant") -) - -// FeatureFlagKey returns an attribute KeyValue conforming to the -// "feature_flag.key" semantic conventions. It represents the unique identifier -// of the feature flag. -func FeatureFlagKey(val string) attribute.KeyValue { - return FeatureFlagKeyKey.String(val) -} - -// FeatureFlagProviderName returns an attribute KeyValue conforming to the -// "feature_flag.provider_name" semantic conventions. It represents the name of -// the service provider that performs the flag evaluation. -func FeatureFlagProviderName(val string) attribute.KeyValue { - return FeatureFlagProviderNameKey.String(val) -} - -// FeatureFlagVariant returns an attribute KeyValue conforming to the -// "feature_flag.variant" semantic conventions. It represents the sHOULD be a -// semantic identifier for a value. If one is unavailable, a stringified -// version of the value can be used. -func FeatureFlagVariant(val string) attribute.KeyValue { - return FeatureFlagVariantKey.String(val) -} - -// Describes file attributes. -const ( - // FileDirectoryKey is the attribute Key conforming to the "file.directory" - // semantic conventions. It represents the directory where the file is - // located. It should include the drive letter, when appropriate. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/home/user', 'C:\\Program Files\\MyApp' - FileDirectoryKey = attribute.Key("file.directory") - - // FileExtensionKey is the attribute Key conforming to the "file.extension" - // semantic conventions. It represents the file extension, excluding the - // leading dot. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'png', 'gz' - // Note: When the file name has multiple extensions (example.tar.gz), only - // the last one should be captured ("gz", not "tar.gz"). - FileExtensionKey = attribute.Key("file.extension") - - // FileNameKey is the attribute Key conforming to the "file.name" semantic - // conventions. It represents the name of the file including the extension, - // without the directory. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'example.png' - FileNameKey = attribute.Key("file.name") - - // FilePathKey is the attribute Key conforming to the "file.path" semantic - // conventions. It represents the full path to the file, including the file - // name. It should include the drive letter, when appropriate. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/home/alice/example.png', 'C:\\Program - // Files\\MyApp\\myapp.exe' - FilePathKey = attribute.Key("file.path") - - // FileSizeKey is the attribute Key conforming to the "file.size" semantic - // conventions. It represents the file size in bytes. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - FileSizeKey = attribute.Key("file.size") -) - -// FileDirectory returns an attribute KeyValue conforming to the -// "file.directory" semantic conventions. It represents the directory where the -// file is located. It should include the drive letter, when appropriate. -func FileDirectory(val string) attribute.KeyValue { - return FileDirectoryKey.String(val) -} - -// FileExtension returns an attribute KeyValue conforming to the -// "file.extension" semantic conventions. It represents the file extension, -// excluding the leading dot. -func FileExtension(val string) attribute.KeyValue { - return FileExtensionKey.String(val) -} - -// FileName returns an attribute KeyValue conforming to the "file.name" -// semantic conventions. It represents the name of the file including the -// extension, without the directory. -func FileName(val string) attribute.KeyValue { - return FileNameKey.String(val) -} - -// FilePath returns an attribute KeyValue conforming to the "file.path" -// semantic conventions. It represents the full path to the file, including the -// file name. It should include the drive letter, when appropriate. -func FilePath(val string) attribute.KeyValue { - return FilePathKey.String(val) -} - -// FileSize returns an attribute KeyValue conforming to the "file.size" -// semantic conventions. It represents the file size in bytes. -func FileSize(val int) attribute.KeyValue { - return FileSizeKey.Int(val) -} - -// Attributes for Google Cloud Run. -const ( - // GCPCloudRunJobExecutionKey is the attribute Key conforming to the - // "gcp.cloud_run.job.execution" semantic conventions. It represents the - // name of the Cloud Run - // [execution](https://cloud.google.com/run/docs/managing/job-executions) - // being run for the Job, as set by the - // [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) - // environment variable. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'job-name-xxxx', 'sample-job-mdw84' - GCPCloudRunJobExecutionKey = attribute.Key("gcp.cloud_run.job.execution") - - // GCPCloudRunJobTaskIndexKey is the attribute Key conforming to the - // "gcp.cloud_run.job.task_index" semantic conventions. It represents the - // index for a task within an execution as provided by the - // [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) - // environment variable. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 0, 1 - GCPCloudRunJobTaskIndexKey = attribute.Key("gcp.cloud_run.job.task_index") -) - -// GCPCloudRunJobExecution returns an attribute KeyValue conforming to the -// "gcp.cloud_run.job.execution" semantic conventions. It represents the name -// of the Cloud Run -// [execution](https://cloud.google.com/run/docs/managing/job-executions) being -// run for the Job, as set by the -// [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) -// environment variable. -func GCPCloudRunJobExecution(val string) attribute.KeyValue { - return GCPCloudRunJobExecutionKey.String(val) -} - -// GCPCloudRunJobTaskIndex returns an attribute KeyValue conforming to the -// "gcp.cloud_run.job.task_index" semantic conventions. It represents the index -// for a task within an execution as provided by the -// [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) -// environment variable. -func GCPCloudRunJobTaskIndex(val int) attribute.KeyValue { - return GCPCloudRunJobTaskIndexKey.Int(val) -} - -// Attributes for Google Compute Engine (GCE). -const ( - // GCPGceInstanceHostnameKey is the attribute Key conforming to the - // "gcp.gce.instance.hostname" semantic conventions. It represents the - // hostname of a GCE instance. This is the full value of the default or - // [custom - // hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'my-host1234.example.com', - // 'sample-vm.us-west1-b.c.my-project.internal' - GCPGceInstanceHostnameKey = attribute.Key("gcp.gce.instance.hostname") - - // GCPGceInstanceNameKey is the attribute Key conforming to the - // "gcp.gce.instance.name" semantic conventions. It represents the instance - // name of a GCE instance. This is the value provided by `host.name`, the - // visible name of the instance in the Cloud Console UI, and the prefix for - // the default hostname of the instance as defined by the [default internal - // DNS - // name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'instance-1', 'my-vm-name' - GCPGceInstanceNameKey = attribute.Key("gcp.gce.instance.name") -) - -// GCPGceInstanceHostname returns an attribute KeyValue conforming to the -// "gcp.gce.instance.hostname" semantic conventions. It represents the hostname -// of a GCE instance. This is the full value of the default or [custom -// hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm). -func GCPGceInstanceHostname(val string) attribute.KeyValue { - return GCPGceInstanceHostnameKey.String(val) -} - -// GCPGceInstanceName returns an attribute KeyValue conforming to the -// "gcp.gce.instance.name" semantic conventions. It represents the instance -// name of a GCE instance. This is the value provided by `host.name`, the -// visible name of the instance in the Cloud Console UI, and the prefix for the -// default hostname of the instance as defined by the [default internal DNS -// name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names). -func GCPGceInstanceName(val string) attribute.KeyValue { - return GCPGceInstanceNameKey.String(val) -} - -// The attributes used to describe telemetry in the context of LLM (Large -// Language Models) requests and responses. -const ( - // GenAiCompletionKey is the attribute Key conforming to the - // "gen_ai.completion" semantic conventions. It represents the full - // response received from the LLM. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: "[{'role': 'assistant', 'content': 'The capital of France is - // Paris.'}]" - // Note: It's RECOMMENDED to format completions as JSON string matching - // [OpenAI messages - // format](https://platform.openai.com/docs/guides/text-generation) - GenAiCompletionKey = attribute.Key("gen_ai.completion") - - // GenAiPromptKey is the attribute Key conforming to the "gen_ai.prompt" - // semantic conventions. It represents the full prompt sent to an LLM. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: "[{'role': 'user', 'content': 'What is the capital of - // France?'}]" - // Note: It's RECOMMENDED to format prompts as JSON string matching [OpenAI - // messages - // format](https://platform.openai.com/docs/guides/text-generation) - GenAiPromptKey = attribute.Key("gen_ai.prompt") - - // GenAiRequestMaxTokensKey is the attribute Key conforming to the - // "gen_ai.request.max_tokens" semantic conventions. It represents the - // maximum number of tokens the LLM generates for a request. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 100 - GenAiRequestMaxTokensKey = attribute.Key("gen_ai.request.max_tokens") - - // GenAiRequestModelKey is the attribute Key conforming to the - // "gen_ai.request.model" semantic conventions. It represents the name of - // the LLM a request is being made to. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'gpt-4' - GenAiRequestModelKey = attribute.Key("gen_ai.request.model") - - // GenAiRequestTemperatureKey is the attribute Key conforming to the - // "gen_ai.request.temperature" semantic conventions. It represents the - // temperature setting for the LLM request. - // - // Type: double - // RequirementLevel: Optional - // Stability: experimental - // Examples: 0.0 - GenAiRequestTemperatureKey = attribute.Key("gen_ai.request.temperature") - - // GenAiRequestTopPKey is the attribute Key conforming to the - // "gen_ai.request.top_p" semantic conventions. It represents the top_p - // sampling setting for the LLM request. - // - // Type: double - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1.0 - GenAiRequestTopPKey = attribute.Key("gen_ai.request.top_p") - - // GenAiResponseFinishReasonsKey is the attribute Key conforming to the - // "gen_ai.response.finish_reasons" semantic conventions. It represents the - // array of reasons the model stopped generating tokens, corresponding to - // each generation received. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'stop' - GenAiResponseFinishReasonsKey = attribute.Key("gen_ai.response.finish_reasons") - - // GenAiResponseIDKey is the attribute Key conforming to the - // "gen_ai.response.id" semantic conventions. It represents the unique - // identifier for the completion. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'chatcmpl-123' - GenAiResponseIDKey = attribute.Key("gen_ai.response.id") - - // GenAiResponseModelKey is the attribute Key conforming to the - // "gen_ai.response.model" semantic conventions. It represents the name of - // the LLM a response was generated from. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'gpt-4-0613' - GenAiResponseModelKey = attribute.Key("gen_ai.response.model") - - // GenAiSystemKey is the attribute Key conforming to the "gen_ai.system" - // semantic conventions. It represents the Generative AI product as - // identified by the client instrumentation. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'openai' - // Note: The actual GenAI product may differ from the one identified by the - // client. For example, when using OpenAI client libraries to communicate - // with Mistral, the `gen_ai.system` is set to `openai` based on the - // instrumentation's best knowledge. - GenAiSystemKey = attribute.Key("gen_ai.system") - - // GenAiUsageCompletionTokensKey is the attribute Key conforming to the - // "gen_ai.usage.completion_tokens" semantic conventions. It represents the - // number of tokens used in the LLM response (completion). - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 180 - GenAiUsageCompletionTokensKey = attribute.Key("gen_ai.usage.completion_tokens") - - // GenAiUsagePromptTokensKey is the attribute Key conforming to the - // "gen_ai.usage.prompt_tokens" semantic conventions. It represents the - // number of tokens used in the LLM prompt. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 100 - GenAiUsagePromptTokensKey = attribute.Key("gen_ai.usage.prompt_tokens") -) - -var ( - // OpenAI - GenAiSystemOpenai = GenAiSystemKey.String("openai") -) - -// GenAiCompletion returns an attribute KeyValue conforming to the -// "gen_ai.completion" semantic conventions. It represents the full response -// received from the LLM. -func GenAiCompletion(val string) attribute.KeyValue { - return GenAiCompletionKey.String(val) -} - -// GenAiPrompt returns an attribute KeyValue conforming to the -// "gen_ai.prompt" semantic conventions. It represents the full prompt sent to -// an LLM. -func GenAiPrompt(val string) attribute.KeyValue { - return GenAiPromptKey.String(val) -} - -// GenAiRequestMaxTokens returns an attribute KeyValue conforming to the -// "gen_ai.request.max_tokens" semantic conventions. It represents the maximum -// number of tokens the LLM generates for a request. -func GenAiRequestMaxTokens(val int) attribute.KeyValue { - return GenAiRequestMaxTokensKey.Int(val) -} - -// GenAiRequestModel returns an attribute KeyValue conforming to the -// "gen_ai.request.model" semantic conventions. It represents the name of the -// LLM a request is being made to. -func GenAiRequestModel(val string) attribute.KeyValue { - return GenAiRequestModelKey.String(val) -} - -// GenAiRequestTemperature returns an attribute KeyValue conforming to the -// "gen_ai.request.temperature" semantic conventions. It represents the -// temperature setting for the LLM request. -func GenAiRequestTemperature(val float64) attribute.KeyValue { - return GenAiRequestTemperatureKey.Float64(val) -} - -// GenAiRequestTopP returns an attribute KeyValue conforming to the -// "gen_ai.request.top_p" semantic conventions. It represents the top_p -// sampling setting for the LLM request. -func GenAiRequestTopP(val float64) attribute.KeyValue { - return GenAiRequestTopPKey.Float64(val) -} - -// GenAiResponseFinishReasons returns an attribute KeyValue conforming to -// the "gen_ai.response.finish_reasons" semantic conventions. It represents the -// array of reasons the model stopped generating tokens, corresponding to each -// generation received. -func GenAiResponseFinishReasons(val ...string) attribute.KeyValue { - return GenAiResponseFinishReasonsKey.StringSlice(val) -} - -// GenAiResponseID returns an attribute KeyValue conforming to the -// "gen_ai.response.id" semantic conventions. It represents the unique -// identifier for the completion. -func GenAiResponseID(val string) attribute.KeyValue { - return GenAiResponseIDKey.String(val) -} - -// GenAiResponseModel returns an attribute KeyValue conforming to the -// "gen_ai.response.model" semantic conventions. It represents the name of the -// LLM a response was generated from. -func GenAiResponseModel(val string) attribute.KeyValue { - return GenAiResponseModelKey.String(val) -} - -// GenAiUsageCompletionTokens returns an attribute KeyValue conforming to -// the "gen_ai.usage.completion_tokens" semantic conventions. It represents the -// number of tokens used in the LLM response (completion). -func GenAiUsageCompletionTokens(val int) attribute.KeyValue { - return GenAiUsageCompletionTokensKey.Int(val) -} - -// GenAiUsagePromptTokens returns an attribute KeyValue conforming to the -// "gen_ai.usage.prompt_tokens" semantic conventions. It represents the number -// of tokens used in the LLM prompt. -func GenAiUsagePromptTokens(val int) attribute.KeyValue { - return GenAiUsagePromptTokensKey.Int(val) -} - -// Attributes for GraphQL. -const ( - // GraphqlDocumentKey is the attribute Key conforming to the - // "graphql.document" semantic conventions. It represents the GraphQL - // document being executed. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'query findBookByID { bookByID(id: ?) { name } }' - // Note: The value may be sanitized to exclude sensitive information. - GraphqlDocumentKey = attribute.Key("graphql.document") - - // GraphqlOperationNameKey is the attribute Key conforming to the - // "graphql.operation.name" semantic conventions. It represents the name of - // the operation being executed. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'findBookByID' - GraphqlOperationNameKey = attribute.Key("graphql.operation.name") - - // GraphqlOperationTypeKey is the attribute Key conforming to the - // "graphql.operation.type" semantic conventions. It represents the type of - // the operation being executed. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'query', 'mutation', 'subscription' - GraphqlOperationTypeKey = attribute.Key("graphql.operation.type") -) - -var ( - // GraphQL query - GraphqlOperationTypeQuery = GraphqlOperationTypeKey.String("query") - // GraphQL mutation - GraphqlOperationTypeMutation = GraphqlOperationTypeKey.String("mutation") - // GraphQL subscription - GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription") -) - -// GraphqlDocument returns an attribute KeyValue conforming to the -// "graphql.document" semantic conventions. It represents the GraphQL document -// being executed. -func GraphqlDocument(val string) attribute.KeyValue { - return GraphqlDocumentKey.String(val) -} - -// GraphqlOperationName returns an attribute KeyValue conforming to the -// "graphql.operation.name" semantic conventions. It represents the name of the -// operation being executed. -func GraphqlOperationName(val string) attribute.KeyValue { - return GraphqlOperationNameKey.String(val) -} - -// Attributes for the Android platform on which the Android application is -// running. -const ( - // HerokuAppIDKey is the attribute Key conforming to the "heroku.app.id" - // semantic conventions. It represents the unique identifier for the - // application - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2daa2797-e42b-4624-9322-ec3f968df4da' - HerokuAppIDKey = attribute.Key("heroku.app.id") - - // HerokuReleaseCommitKey is the attribute Key conforming to the - // "heroku.release.commit" semantic conventions. It represents the commit - // hash for the current release - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'e6134959463efd8966b20e75b913cafe3f5ec' - HerokuReleaseCommitKey = attribute.Key("heroku.release.commit") - - // HerokuReleaseCreationTimestampKey is the attribute Key conforming to the - // "heroku.release.creation_timestamp" semantic conventions. It represents - // the time and date the release was created - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2022-10-23T18:00:42Z' - HerokuReleaseCreationTimestampKey = attribute.Key("heroku.release.creation_timestamp") -) - -// HerokuAppID returns an attribute KeyValue conforming to the -// "heroku.app.id" semantic conventions. It represents the unique identifier -// for the application -func HerokuAppID(val string) attribute.KeyValue { - return HerokuAppIDKey.String(val) -} - -// HerokuReleaseCommit returns an attribute KeyValue conforming to the -// "heroku.release.commit" semantic conventions. It represents the commit hash -// for the current release -func HerokuReleaseCommit(val string) attribute.KeyValue { - return HerokuReleaseCommitKey.String(val) -} - -// HerokuReleaseCreationTimestamp returns an attribute KeyValue conforming -// to the "heroku.release.creation_timestamp" semantic conventions. It -// represents the time and date the release was created -func HerokuReleaseCreationTimestamp(val string) attribute.KeyValue { - return HerokuReleaseCreationTimestampKey.String(val) -} - -// A host is defined as a computing instance. For example, physical servers, -// virtual machines, switches or disk array. -const ( - // HostArchKey is the attribute Key conforming to the "host.arch" semantic - // conventions. It represents the CPU architecture the host system is - // running on. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - HostArchKey = attribute.Key("host.arch") - - // HostCPUCacheL2SizeKey is the attribute Key conforming to the - // "host.cpu.cache.l2.size" semantic conventions. It represents the amount - // of level 2 memory cache available to the processor (in Bytes). - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 12288000 - HostCPUCacheL2SizeKey = attribute.Key("host.cpu.cache.l2.size") - - // HostCPUFamilyKey is the attribute Key conforming to the - // "host.cpu.family" semantic conventions. It represents the family or - // generation of the CPU. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '6', 'PA-RISC 1.1e' - HostCPUFamilyKey = attribute.Key("host.cpu.family") - - // HostCPUModelIDKey is the attribute Key conforming to the - // "host.cpu.model.id" semantic conventions. It represents the model - // identifier. It provides more granular information about the CPU, - // distinguishing it from other CPUs within the same family. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '6', '9000/778/B180L' - HostCPUModelIDKey = attribute.Key("host.cpu.model.id") - - // HostCPUModelNameKey is the attribute Key conforming to the - // "host.cpu.model.name" semantic conventions. It represents the model - // designation of the processor. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '11th Gen Intel(R) Core(TM) i7-1185G7 @ 3.00GHz' - HostCPUModelNameKey = attribute.Key("host.cpu.model.name") - - // HostCPUSteppingKey is the attribute Key conforming to the - // "host.cpu.stepping" semantic conventions. It represents the stepping or - // core revisions. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '1', 'r1p1' - HostCPUSteppingKey = attribute.Key("host.cpu.stepping") - - // HostCPUVendorIDKey is the attribute Key conforming to the - // "host.cpu.vendor.id" semantic conventions. It represents the processor - // manufacturer identifier. A maximum 12-character string. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'GenuineIntel' - // Note: [CPUID](https://wiki.osdev.org/CPUID) command returns the vendor - // ID string in EBX, EDX and ECX registers. Writing these to memory in this - // order results in a 12-character string. - HostCPUVendorIDKey = attribute.Key("host.cpu.vendor.id") - - // HostIDKey is the attribute Key conforming to the "host.id" semantic - // conventions. It represents the unique host ID. For Cloud, this must be - // the instance_id assigned by the cloud provider. For non-containerized - // systems, this should be the `machine-id`. See the table below for the - // sources to use to determine the `machine-id` based on operating system. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'fdbf79e8af94cb7f9e8df36789187052' - HostIDKey = attribute.Key("host.id") - - // HostImageIDKey is the attribute Key conforming to the "host.image.id" - // semantic conventions. It represents the vM image ID or host OS image ID. - // For Cloud, this value is from the provider. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'ami-07b06b442921831e5' - HostImageIDKey = attribute.Key("host.image.id") - - // HostImageNameKey is the attribute Key conforming to the - // "host.image.name" semantic conventions. It represents the name of the VM - // image or OS install the host was instantiated from. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905' - HostImageNameKey = attribute.Key("host.image.name") - - // HostImageVersionKey is the attribute Key conforming to the - // "host.image.version" semantic conventions. It represents the version - // string of the VM image or host OS as defined in [Version - // Attributes](/docs/resource/README.md#version-attributes). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '0.1' - HostImageVersionKey = attribute.Key("host.image.version") - - // HostIPKey is the attribute Key conforming to the "host.ip" semantic - // conventions. It represents the available IP addresses of the host, - // excluding loopback interfaces. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: '192.168.1.140', 'fe80::abc2:4a28:737a:609e' - // Note: IPv4 Addresses MUST be specified in dotted-quad notation. IPv6 - // addresses MUST be specified in the [RFC - // 5952](https://www.rfc-editor.org/rfc/rfc5952.html) format. - HostIPKey = attribute.Key("host.ip") - - // HostMacKey is the attribute Key conforming to the "host.mac" semantic - // conventions. It represents the available MAC addresses of the host, - // excluding loopback interfaces. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'AC-DE-48-23-45-67', 'AC-DE-48-23-45-67-01-9F' - // Note: MAC Addresses MUST be represented in [IEEE RA hexadecimal - // form](https://standards.ieee.org/wp-content/uploads/import/documents/tutorials/eui.pdf): - // as hyphen-separated octets in uppercase hexadecimal form from most to - // least significant. - HostMacKey = attribute.Key("host.mac") - - // HostNameKey is the attribute Key conforming to the "host.name" semantic - // conventions. It represents the name of the host. On Unix systems, it may - // contain what the hostname command returns, or the fully qualified - // hostname, or another name specified by the user. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry-test' - HostNameKey = attribute.Key("host.name") - - // HostTypeKey is the attribute Key conforming to the "host.type" semantic - // conventions. It represents the type of host. For Cloud, this must be the - // machine type. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'n1-standard-1' - HostTypeKey = attribute.Key("host.type") -) - -var ( - // AMD64 - HostArchAMD64 = HostArchKey.String("amd64") - // ARM32 - HostArchARM32 = HostArchKey.String("arm32") - // ARM64 - HostArchARM64 = HostArchKey.String("arm64") - // Itanium - HostArchIA64 = HostArchKey.String("ia64") - // 32-bit PowerPC - HostArchPPC32 = HostArchKey.String("ppc32") - // 64-bit PowerPC - HostArchPPC64 = HostArchKey.String("ppc64") - // IBM z/Architecture - HostArchS390x = HostArchKey.String("s390x") - // 32-bit x86 - HostArchX86 = HostArchKey.String("x86") -) - -// HostCPUCacheL2Size returns an attribute KeyValue conforming to the -// "host.cpu.cache.l2.size" semantic conventions. It represents the amount of -// level 2 memory cache available to the processor (in Bytes). -func HostCPUCacheL2Size(val int) attribute.KeyValue { - return HostCPUCacheL2SizeKey.Int(val) -} - -// HostCPUFamily returns an attribute KeyValue conforming to the -// "host.cpu.family" semantic conventions. It represents the family or -// generation of the CPU. -func HostCPUFamily(val string) attribute.KeyValue { - return HostCPUFamilyKey.String(val) -} - -// HostCPUModelID returns an attribute KeyValue conforming to the -// "host.cpu.model.id" semantic conventions. It represents the model -// identifier. It provides more granular information about the CPU, -// distinguishing it from other CPUs within the same family. -func HostCPUModelID(val string) attribute.KeyValue { - return HostCPUModelIDKey.String(val) -} - -// HostCPUModelName returns an attribute KeyValue conforming to the -// "host.cpu.model.name" semantic conventions. It represents the model -// designation of the processor. -func HostCPUModelName(val string) attribute.KeyValue { - return HostCPUModelNameKey.String(val) -} - -// HostCPUStepping returns an attribute KeyValue conforming to the -// "host.cpu.stepping" semantic conventions. It represents the stepping or core -// revisions. -func HostCPUStepping(val string) attribute.KeyValue { - return HostCPUSteppingKey.String(val) -} - -// HostCPUVendorID returns an attribute KeyValue conforming to the -// "host.cpu.vendor.id" semantic conventions. It represents the processor -// manufacturer identifier. A maximum 12-character string. -func HostCPUVendorID(val string) attribute.KeyValue { - return HostCPUVendorIDKey.String(val) -} - -// HostID returns an attribute KeyValue conforming to the "host.id" semantic -// conventions. It represents the unique host ID. For Cloud, this must be the -// instance_id assigned by the cloud provider. For non-containerized systems, -// this should be the `machine-id`. See the table below for the sources to use -// to determine the `machine-id` based on operating system. -func HostID(val string) attribute.KeyValue { - return HostIDKey.String(val) -} - -// HostImageID returns an attribute KeyValue conforming to the -// "host.image.id" semantic conventions. It represents the vM image ID or host -// OS image ID. For Cloud, this value is from the provider. -func HostImageID(val string) attribute.KeyValue { - return HostImageIDKey.String(val) -} - -// HostImageName returns an attribute KeyValue conforming to the -// "host.image.name" semantic conventions. It represents the name of the VM -// image or OS install the host was instantiated from. -func HostImageName(val string) attribute.KeyValue { - return HostImageNameKey.String(val) -} - -// HostImageVersion returns an attribute KeyValue conforming to the -// "host.image.version" semantic conventions. It represents the version string -// of the VM image or host OS as defined in [Version -// Attributes](/docs/resource/README.md#version-attributes). -func HostImageVersion(val string) attribute.KeyValue { - return HostImageVersionKey.String(val) -} - -// HostIP returns an attribute KeyValue conforming to the "host.ip" semantic -// conventions. It represents the available IP addresses of the host, excluding -// loopback interfaces. -func HostIP(val ...string) attribute.KeyValue { - return HostIPKey.StringSlice(val) -} - -// HostMac returns an attribute KeyValue conforming to the "host.mac" -// semantic conventions. It represents the available MAC addresses of the host, -// excluding loopback interfaces. -func HostMac(val ...string) attribute.KeyValue { - return HostMacKey.StringSlice(val) -} - -// HostName returns an attribute KeyValue conforming to the "host.name" -// semantic conventions. It represents the name of the host. On Unix systems, -// it may contain what the hostname command returns, or the fully qualified -// hostname, or another name specified by the user. -func HostName(val string) attribute.KeyValue { - return HostNameKey.String(val) -} - -// HostType returns an attribute KeyValue conforming to the "host.type" -// semantic conventions. It represents the type of host. For Cloud, this must -// be the machine type. -func HostType(val string) attribute.KeyValue { - return HostTypeKey.String(val) -} - -// Semantic convention attributes in the HTTP namespace. -const ( - // HTTPConnectionStateKey is the attribute Key conforming to the - // "http.connection.state" semantic conventions. It represents the state of - // the HTTP connection in the HTTP connection pool. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'active', 'idle' - HTTPConnectionStateKey = attribute.Key("http.connection.state") - - // HTTPRequestBodySizeKey is the attribute Key conforming to the - // "http.request.body.size" semantic conventions. It represents the size of - // the request payload body in bytes. This is the number of bytes - // transferred excluding headers and is often, but not always, present as - // the - // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) - // header. For requests using transport encoding, this should be the - // compressed size. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 3495 - HTTPRequestBodySizeKey = attribute.Key("http.request.body.size") - - // HTTPRequestMethodKey is the attribute Key conforming to the - // "http.request.method" semantic conventions. It represents the hTTP - // request method. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'GET', 'POST', 'HEAD' - // Note: HTTP request method value SHOULD be "known" to the - // instrumentation. - // By default, this convention defines "known" methods as the ones listed - // in [RFC9110](https://www.rfc-editor.org/rfc/rfc9110.html#name-methods) - // and the PATCH method defined in - // [RFC5789](https://www.rfc-editor.org/rfc/rfc5789.html). - // - // If the HTTP request method is not known to instrumentation, it MUST set - // the `http.request.method` attribute to `_OTHER`. - // - // If the HTTP instrumentation could end up converting valid HTTP request - // methods to `_OTHER`, then it MUST provide a way to override - // the list of known HTTP methods. If this override is done via environment - // variable, then the environment variable MUST be named - // OTEL_INSTRUMENTATION_HTTP_KNOWN_METHODS and support a comma-separated - // list of case-sensitive known HTTP methods - // (this list MUST be a full override of the default known method, it is - // not a list of known methods in addition to the defaults). - // - // HTTP method names are case-sensitive and `http.request.method` attribute - // value MUST match a known HTTP method name exactly. - // Instrumentations for specific web frameworks that consider HTTP methods - // to be case insensitive, SHOULD populate a canonical equivalent. - // Tracing instrumentations that do so, MUST also set - // `http.request.method_original` to the original value. - HTTPRequestMethodKey = attribute.Key("http.request.method") - - // HTTPRequestMethodOriginalKey is the attribute Key conforming to the - // "http.request.method_original" semantic conventions. It represents the - // original HTTP method sent by the client in the request line. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'GeT', 'ACL', 'foo' - HTTPRequestMethodOriginalKey = attribute.Key("http.request.method_original") - - // HTTPRequestResendCountKey is the attribute Key conforming to the - // "http.request.resend_count" semantic conventions. It represents the - // ordinal number of request resending attempt (for any reason, including - // redirects). - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 3 - // Note: The resend count SHOULD be updated each time an HTTP request gets - // resent by the client, regardless of what was the cause of the resending - // (e.g. redirection, authorization failure, 503 Server Unavailable, - // network issues, or any other). - HTTPRequestResendCountKey = attribute.Key("http.request.resend_count") - - // HTTPRequestSizeKey is the attribute Key conforming to the - // "http.request.size" semantic conventions. It represents the total size - // of the request in bytes. This should be the total number of bytes sent - // over the wire, including the request line (HTTP/1.1), framing (HTTP/2 - // and HTTP/3), headers, and request body if any. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1437 - HTTPRequestSizeKey = attribute.Key("http.request.size") - - // HTTPResponseBodySizeKey is the attribute Key conforming to the - // "http.response.body.size" semantic conventions. It represents the size - // of the response payload body in bytes. This is the number of bytes - // transferred excluding headers and is often, but not always, present as - // the - // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) - // header. For requests using transport encoding, this should be the - // compressed size. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 3495 - HTTPResponseBodySizeKey = attribute.Key("http.response.body.size") - - // HTTPResponseSizeKey is the attribute Key conforming to the - // "http.response.size" semantic conventions. It represents the total size - // of the response in bytes. This should be the total number of bytes sent - // over the wire, including the status line (HTTP/1.1), framing (HTTP/2 and - // HTTP/3), headers, and response body and trailers if any. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1437 - HTTPResponseSizeKey = attribute.Key("http.response.size") - - // HTTPResponseStatusCodeKey is the attribute Key conforming to the - // "http.response.status_code" semantic conventions. It represents the - // [HTTP response status - // code](https://tools.ietf.org/html/rfc7231#section-6). - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 200 - HTTPResponseStatusCodeKey = attribute.Key("http.response.status_code") - - // HTTPRouteKey is the attribute Key conforming to the "http.route" - // semantic conventions. It represents the matched route, that is, the path - // template in the format used by the respective server framework. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '/users/:userID?', '{controller}/{action}/{id?}' - // Note: MUST NOT be populated when this is not supported by the HTTP - // server framework as the route attribute should have low-cardinality and - // the URI path can NOT substitute it. - // SHOULD include the [application - // root](/docs/http/http-spans.md#http-server-definitions) if there is one. - HTTPRouteKey = attribute.Key("http.route") -) - -var ( - // active state - HTTPConnectionStateActive = HTTPConnectionStateKey.String("active") - // idle state - HTTPConnectionStateIdle = HTTPConnectionStateKey.String("idle") -) - -var ( - // CONNECT method - HTTPRequestMethodConnect = HTTPRequestMethodKey.String("CONNECT") - // DELETE method - HTTPRequestMethodDelete = HTTPRequestMethodKey.String("DELETE") - // GET method - HTTPRequestMethodGet = HTTPRequestMethodKey.String("GET") - // HEAD method - HTTPRequestMethodHead = HTTPRequestMethodKey.String("HEAD") - // OPTIONS method - HTTPRequestMethodOptions = HTTPRequestMethodKey.String("OPTIONS") - // PATCH method - HTTPRequestMethodPatch = HTTPRequestMethodKey.String("PATCH") - // POST method - HTTPRequestMethodPost = HTTPRequestMethodKey.String("POST") - // PUT method - HTTPRequestMethodPut = HTTPRequestMethodKey.String("PUT") - // TRACE method - HTTPRequestMethodTrace = HTTPRequestMethodKey.String("TRACE") - // Any HTTP method that the instrumentation has no prior knowledge of - HTTPRequestMethodOther = HTTPRequestMethodKey.String("_OTHER") -) - -// HTTPRequestBodySize returns an attribute KeyValue conforming to the -// "http.request.body.size" semantic conventions. It represents the size of the -// request payload body in bytes. This is the number of bytes transferred -// excluding headers and is often, but not always, present as the -// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) -// header. For requests using transport encoding, this should be the compressed -// size. -func HTTPRequestBodySize(val int) attribute.KeyValue { - return HTTPRequestBodySizeKey.Int(val) -} - -// HTTPRequestMethodOriginal returns an attribute KeyValue conforming to the -// "http.request.method_original" semantic conventions. It represents the -// original HTTP method sent by the client in the request line. -func HTTPRequestMethodOriginal(val string) attribute.KeyValue { - return HTTPRequestMethodOriginalKey.String(val) -} - -// HTTPRequestResendCount returns an attribute KeyValue conforming to the -// "http.request.resend_count" semantic conventions. It represents the ordinal -// number of request resending attempt (for any reason, including redirects). -func HTTPRequestResendCount(val int) attribute.KeyValue { - return HTTPRequestResendCountKey.Int(val) -} - -// HTTPRequestSize returns an attribute KeyValue conforming to the -// "http.request.size" semantic conventions. It represents the total size of -// the request in bytes. This should be the total number of bytes sent over the -// wire, including the request line (HTTP/1.1), framing (HTTP/2 and HTTP/3), -// headers, and request body if any. -func HTTPRequestSize(val int) attribute.KeyValue { - return HTTPRequestSizeKey.Int(val) -} - -// HTTPResponseBodySize returns an attribute KeyValue conforming to the -// "http.response.body.size" semantic conventions. It represents the size of -// the response payload body in bytes. This is the number of bytes transferred -// excluding headers and is often, but not always, present as the -// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) -// header. For requests using transport encoding, this should be the compressed -// size. -func HTTPResponseBodySize(val int) attribute.KeyValue { - return HTTPResponseBodySizeKey.Int(val) -} - -// HTTPResponseSize returns an attribute KeyValue conforming to the -// "http.response.size" semantic conventions. It represents the total size of -// the response in bytes. This should be the total number of bytes sent over -// the wire, including the status line (HTTP/1.1), framing (HTTP/2 and HTTP/3), -// headers, and response body and trailers if any. -func HTTPResponseSize(val int) attribute.KeyValue { - return HTTPResponseSizeKey.Int(val) -} - -// HTTPResponseStatusCode returns an attribute KeyValue conforming to the -// "http.response.status_code" semantic conventions. It represents the [HTTP -// response status code](https://tools.ietf.org/html/rfc7231#section-6). -func HTTPResponseStatusCode(val int) attribute.KeyValue { - return HTTPResponseStatusCodeKey.Int(val) -} - -// HTTPRoute returns an attribute KeyValue conforming to the "http.route" -// semantic conventions. It represents the matched route, that is, the path -// template in the format used by the respective server framework. -func HTTPRoute(val string) attribute.KeyValue { - return HTTPRouteKey.String(val) -} - -// Java Virtual machine related attributes. -const ( - // JvmBufferPoolNameKey is the attribute Key conforming to the - // "jvm.buffer.pool.name" semantic conventions. It represents the name of - // the buffer pool. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'mapped', 'direct' - // Note: Pool names are generally obtained via - // [BufferPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/BufferPoolMXBean.html#getName()). - JvmBufferPoolNameKey = attribute.Key("jvm.buffer.pool.name") - - // JvmGcActionKey is the attribute Key conforming to the "jvm.gc.action" - // semantic conventions. It represents the name of the garbage collector - // action. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'end of minor GC', 'end of major GC' - // Note: Garbage collector action is generally obtained via - // [GarbageCollectionNotificationInfo#getGcAction()](https://docs.oracle.com/en/java/javase/11/docs/api/jdk.management/com/sun/management/GarbageCollectionNotificationInfo.html#getGcAction()). - JvmGcActionKey = attribute.Key("jvm.gc.action") - - // JvmGcNameKey is the attribute Key conforming to the "jvm.gc.name" - // semantic conventions. It represents the name of the garbage collector. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'G1 Young Generation', 'G1 Old Generation' - // Note: Garbage collector name is generally obtained via - // [GarbageCollectionNotificationInfo#getGcName()](https://docs.oracle.com/en/java/javase/11/docs/api/jdk.management/com/sun/management/GarbageCollectionNotificationInfo.html#getGcName()). - JvmGcNameKey = attribute.Key("jvm.gc.name") - - // JvmMemoryPoolNameKey is the attribute Key conforming to the - // "jvm.memory.pool.name" semantic conventions. It represents the name of - // the memory pool. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'G1 Old Gen', 'G1 Eden space', 'G1 Survivor Space' - // Note: Pool names are generally obtained via - // [MemoryPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/MemoryPoolMXBean.html#getName()). - JvmMemoryPoolNameKey = attribute.Key("jvm.memory.pool.name") - - // JvmMemoryTypeKey is the attribute Key conforming to the - // "jvm.memory.type" semantic conventions. It represents the type of - // memory. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'heap', 'non_heap' - JvmMemoryTypeKey = attribute.Key("jvm.memory.type") - - // JvmThreadDaemonKey is the attribute Key conforming to the - // "jvm.thread.daemon" semantic conventions. It represents the whether the - // thread is daemon or not. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: stable - JvmThreadDaemonKey = attribute.Key("jvm.thread.daemon") - - // JvmThreadStateKey is the attribute Key conforming to the - // "jvm.thread.state" semantic conventions. It represents the state of the - // thread. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'runnable', 'blocked' - JvmThreadStateKey = attribute.Key("jvm.thread.state") -) - -var ( - // Heap memory - JvmMemoryTypeHeap = JvmMemoryTypeKey.String("heap") - // Non-heap memory - JvmMemoryTypeNonHeap = JvmMemoryTypeKey.String("non_heap") -) - -var ( - // A thread that has not yet started is in this state - JvmThreadStateNew = JvmThreadStateKey.String("new") - // A thread executing in the Java virtual machine is in this state - JvmThreadStateRunnable = JvmThreadStateKey.String("runnable") - // A thread that is blocked waiting for a monitor lock is in this state - JvmThreadStateBlocked = JvmThreadStateKey.String("blocked") - // A thread that is waiting indefinitely for another thread to perform a particular action is in this state - JvmThreadStateWaiting = JvmThreadStateKey.String("waiting") - // A thread that is waiting for another thread to perform an action for up to a specified waiting time is in this state - JvmThreadStateTimedWaiting = JvmThreadStateKey.String("timed_waiting") - // A thread that has exited is in this state - JvmThreadStateTerminated = JvmThreadStateKey.String("terminated") -) - -// JvmBufferPoolName returns an attribute KeyValue conforming to the -// "jvm.buffer.pool.name" semantic conventions. It represents the name of the -// buffer pool. -func JvmBufferPoolName(val string) attribute.KeyValue { - return JvmBufferPoolNameKey.String(val) -} - -// JvmGcAction returns an attribute KeyValue conforming to the -// "jvm.gc.action" semantic conventions. It represents the name of the garbage -// collector action. -func JvmGcAction(val string) attribute.KeyValue { - return JvmGcActionKey.String(val) -} - -// JvmGcName returns an attribute KeyValue conforming to the "jvm.gc.name" -// semantic conventions. It represents the name of the garbage collector. -func JvmGcName(val string) attribute.KeyValue { - return JvmGcNameKey.String(val) -} - -// JvmMemoryPoolName returns an attribute KeyValue conforming to the -// "jvm.memory.pool.name" semantic conventions. It represents the name of the -// memory pool. -func JvmMemoryPoolName(val string) attribute.KeyValue { - return JvmMemoryPoolNameKey.String(val) -} - -// JvmThreadDaemon returns an attribute KeyValue conforming to the -// "jvm.thread.daemon" semantic conventions. It represents the whether the -// thread is daemon or not. -func JvmThreadDaemon(val bool) attribute.KeyValue { - return JvmThreadDaemonKey.Bool(val) -} - -// Kubernetes resource attributes. -const ( - // K8SClusterNameKey is the attribute Key conforming to the - // "k8s.cluster.name" semantic conventions. It represents the name of the - // cluster. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry-cluster' - K8SClusterNameKey = attribute.Key("k8s.cluster.name") - - // K8SClusterUIDKey is the attribute Key conforming to the - // "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for - // the cluster, set to the UID of the `kube-system` namespace. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '218fc5a9-a5f1-4b54-aa05-46717d0ab26d' - // Note: K8S doesn't have support for obtaining a cluster ID. If this is - // ever - // added, we will recommend collecting the `k8s.cluster.uid` through the - // official APIs. In the meantime, we are able to use the `uid` of the - // `kube-system` namespace as a proxy for cluster ID. Read on for the - // rationale. - // - // Every object created in a K8S cluster is assigned a distinct UID. The - // `kube-system` namespace is used by Kubernetes itself and will exist - // for the lifetime of the cluster. Using the `uid` of the `kube-system` - // namespace is a reasonable proxy for the K8S ClusterID as it will only - // change if the cluster is rebuilt. Furthermore, Kubernetes UIDs are - // UUIDs as standardized by - // [ISO/IEC 9834-8 and ITU-T - // X.667](https://www.itu.int/ITU-T/studygroups/com17/oid.html). - // Which states: - // - // > If generated according to one of the mechanisms defined in Rec. - // ITU-T X.667 | ISO/IEC 9834-8, a UUID is either guaranteed to be - // different from all other UUIDs generated before 3603 A.D., or is - // extremely likely to be different (depending on the mechanism chosen). - // - // Therefore, UIDs between clusters should be extremely unlikely to - // conflict. - K8SClusterUIDKey = attribute.Key("k8s.cluster.uid") - - // K8SContainerNameKey is the attribute Key conforming to the - // "k8s.container.name" semantic conventions. It represents the name of the - // Container from Pod specification, must be unique within a Pod. Container - // runtime usually uses different globally unique name (`container.name`). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'redis' - K8SContainerNameKey = attribute.Key("k8s.container.name") - - // K8SContainerRestartCountKey is the attribute Key conforming to the - // "k8s.container.restart_count" semantic conventions. It represents the - // number of times the container was restarted. This attribute can be used - // to identify a particular container (running or stopped) within a - // container spec. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count") - - // K8SContainerStatusLastTerminatedReasonKey is the attribute Key - // conforming to the "k8s.container.status.last_terminated_reason" semantic - // conventions. It represents the last terminated reason of the Container. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Evicted', 'Error' - K8SContainerStatusLastTerminatedReasonKey = attribute.Key("k8s.container.status.last_terminated_reason") - - // K8SCronJobNameKey is the attribute Key conforming to the - // "k8s.cronjob.name" semantic conventions. It represents the name of the - // CronJob. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry' - K8SCronJobNameKey = attribute.Key("k8s.cronjob.name") - - // K8SCronJobUIDKey is the attribute Key conforming to the - // "k8s.cronjob.uid" semantic conventions. It represents the UID of the - // CronJob. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid") - - // K8SDaemonSetNameKey is the attribute Key conforming to the - // "k8s.daemonset.name" semantic conventions. It represents the name of the - // DaemonSet. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry' - K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name") - - // K8SDaemonSetUIDKey is the attribute Key conforming to the - // "k8s.daemonset.uid" semantic conventions. It represents the UID of the - // DaemonSet. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid") - - // K8SDeploymentNameKey is the attribute Key conforming to the - // "k8s.deployment.name" semantic conventions. It represents the name of - // the Deployment. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry' - K8SDeploymentNameKey = attribute.Key("k8s.deployment.name") - - // K8SDeploymentUIDKey is the attribute Key conforming to the - // "k8s.deployment.uid" semantic conventions. It represents the UID of the - // Deployment. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") - - // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name" - // semantic conventions. It represents the name of the Job. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry' - K8SJobNameKey = attribute.Key("k8s.job.name") - - // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid" - // semantic conventions. It represents the UID of the Job. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SJobUIDKey = attribute.Key("k8s.job.uid") - - // K8SNamespaceNameKey is the attribute Key conforming to the - // "k8s.namespace.name" semantic conventions. It represents the name of the - // namespace that the pod is running in. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'default' - K8SNamespaceNameKey = attribute.Key("k8s.namespace.name") - - // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name" - // semantic conventions. It represents the name of the Node. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'node-1' - K8SNodeNameKey = attribute.Key("k8s.node.name") - - // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid" - // semantic conventions. It represents the UID of the Node. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2' - K8SNodeUIDKey = attribute.Key("k8s.node.uid") - - // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name" - // semantic conventions. It represents the name of the Pod. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry-pod-autoconf' - K8SPodNameKey = attribute.Key("k8s.pod.name") - - // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid" - // semantic conventions. It represents the UID of the Pod. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SPodUIDKey = attribute.Key("k8s.pod.uid") - - // K8SReplicaSetNameKey is the attribute Key conforming to the - // "k8s.replicaset.name" semantic conventions. It represents the name of - // the ReplicaSet. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry' - K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name") - - // K8SReplicaSetUIDKey is the attribute Key conforming to the - // "k8s.replicaset.uid" semantic conventions. It represents the UID of the - // ReplicaSet. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid") - - // K8SStatefulSetNameKey is the attribute Key conforming to the - // "k8s.statefulset.name" semantic conventions. It represents the name of - // the StatefulSet. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry' - K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name") - - // K8SStatefulSetUIDKey is the attribute Key conforming to the - // "k8s.statefulset.uid" semantic conventions. It represents the UID of the - // StatefulSet. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid") -) - -// K8SClusterName returns an attribute KeyValue conforming to the -// "k8s.cluster.name" semantic conventions. It represents the name of the -// cluster. -func K8SClusterName(val string) attribute.KeyValue { - return K8SClusterNameKey.String(val) -} - -// K8SClusterUID returns an attribute KeyValue conforming to the -// "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for the -// cluster, set to the UID of the `kube-system` namespace. -func K8SClusterUID(val string) attribute.KeyValue { - return K8SClusterUIDKey.String(val) -} - -// K8SContainerName returns an attribute KeyValue conforming to the -// "k8s.container.name" semantic conventions. It represents the name of the -// Container from Pod specification, must be unique within a Pod. Container -// runtime usually uses different globally unique name (`container.name`). -func K8SContainerName(val string) attribute.KeyValue { - return K8SContainerNameKey.String(val) -} - -// K8SContainerRestartCount returns an attribute KeyValue conforming to the -// "k8s.container.restart_count" semantic conventions. It represents the number -// of times the container was restarted. This attribute can be used to identify -// a particular container (running or stopped) within a container spec. -func K8SContainerRestartCount(val int) attribute.KeyValue { - return K8SContainerRestartCountKey.Int(val) -} - -// K8SContainerStatusLastTerminatedReason returns an attribute KeyValue -// conforming to the "k8s.container.status.last_terminated_reason" semantic -// conventions. It represents the last terminated reason of the Container. -func K8SContainerStatusLastTerminatedReason(val string) attribute.KeyValue { - return K8SContainerStatusLastTerminatedReasonKey.String(val) -} - -// K8SCronJobName returns an attribute KeyValue conforming to the -// "k8s.cronjob.name" semantic conventions. It represents the name of the -// CronJob. -func K8SCronJobName(val string) attribute.KeyValue { - return K8SCronJobNameKey.String(val) -} - -// K8SCronJobUID returns an attribute KeyValue conforming to the -// "k8s.cronjob.uid" semantic conventions. It represents the UID of the -// CronJob. -func K8SCronJobUID(val string) attribute.KeyValue { - return K8SCronJobUIDKey.String(val) -} - -// K8SDaemonSetName returns an attribute KeyValue conforming to the -// "k8s.daemonset.name" semantic conventions. It represents the name of the -// DaemonSet. -func K8SDaemonSetName(val string) attribute.KeyValue { - return K8SDaemonSetNameKey.String(val) -} - -// K8SDaemonSetUID returns an attribute KeyValue conforming to the -// "k8s.daemonset.uid" semantic conventions. It represents the UID of the -// DaemonSet. -func K8SDaemonSetUID(val string) attribute.KeyValue { - return K8SDaemonSetUIDKey.String(val) -} - -// K8SDeploymentName returns an attribute KeyValue conforming to the -// "k8s.deployment.name" semantic conventions. It represents the name of the -// Deployment. -func K8SDeploymentName(val string) attribute.KeyValue { - return K8SDeploymentNameKey.String(val) -} - -// K8SDeploymentUID returns an attribute KeyValue conforming to the -// "k8s.deployment.uid" semantic conventions. It represents the UID of the -// Deployment. -func K8SDeploymentUID(val string) attribute.KeyValue { - return K8SDeploymentUIDKey.String(val) -} - -// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name" -// semantic conventions. It represents the name of the Job. -func K8SJobName(val string) attribute.KeyValue { - return K8SJobNameKey.String(val) -} - -// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid" -// semantic conventions. It represents the UID of the Job. -func K8SJobUID(val string) attribute.KeyValue { - return K8SJobUIDKey.String(val) -} - -// K8SNamespaceName returns an attribute KeyValue conforming to the -// "k8s.namespace.name" semantic conventions. It represents the name of the -// namespace that the pod is running in. -func K8SNamespaceName(val string) attribute.KeyValue { - return K8SNamespaceNameKey.String(val) -} - -// K8SNodeName returns an attribute KeyValue conforming to the -// "k8s.node.name" semantic conventions. It represents the name of the Node. -func K8SNodeName(val string) attribute.KeyValue { - return K8SNodeNameKey.String(val) -} - -// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid" -// semantic conventions. It represents the UID of the Node. -func K8SNodeUID(val string) attribute.KeyValue { - return K8SNodeUIDKey.String(val) -} - -// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name" -// semantic conventions. It represents the name of the Pod. -func K8SPodName(val string) attribute.KeyValue { - return K8SPodNameKey.String(val) -} - -// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid" -// semantic conventions. It represents the UID of the Pod. -func K8SPodUID(val string) attribute.KeyValue { - return K8SPodUIDKey.String(val) -} - -// K8SReplicaSetName returns an attribute KeyValue conforming to the -// "k8s.replicaset.name" semantic conventions. It represents the name of the -// ReplicaSet. -func K8SReplicaSetName(val string) attribute.KeyValue { - return K8SReplicaSetNameKey.String(val) -} - -// K8SReplicaSetUID returns an attribute KeyValue conforming to the -// "k8s.replicaset.uid" semantic conventions. It represents the UID of the -// ReplicaSet. -func K8SReplicaSetUID(val string) attribute.KeyValue { - return K8SReplicaSetUIDKey.String(val) -} - -// K8SStatefulSetName returns an attribute KeyValue conforming to the -// "k8s.statefulset.name" semantic conventions. It represents the name of the -// StatefulSet. -func K8SStatefulSetName(val string) attribute.KeyValue { - return K8SStatefulSetNameKey.String(val) -} - -// K8SStatefulSetUID returns an attribute KeyValue conforming to the -// "k8s.statefulset.uid" semantic conventions. It represents the UID of the -// StatefulSet. -func K8SStatefulSetUID(val string) attribute.KeyValue { - return K8SStatefulSetUIDKey.String(val) -} - -// Log attributes -const ( - // LogIostreamKey is the attribute Key conforming to the "log.iostream" - // semantic conventions. It represents the stream associated with the log. - // See below for a list of well-known values. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - LogIostreamKey = attribute.Key("log.iostream") -) - -var ( - // Logs from stdout stream - LogIostreamStdout = LogIostreamKey.String("stdout") - // Events from stderr stream - LogIostreamStderr = LogIostreamKey.String("stderr") -) - -// Attributes for a file to which log was emitted. -const ( - // LogFileNameKey is the attribute Key conforming to the "log.file.name" - // semantic conventions. It represents the basename of the file. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'audit.log' - LogFileNameKey = attribute.Key("log.file.name") - - // LogFileNameResolvedKey is the attribute Key conforming to the - // "log.file.name_resolved" semantic conventions. It represents the - // basename of the file, with symlinks resolved. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'uuid.log' - LogFileNameResolvedKey = attribute.Key("log.file.name_resolved") - - // LogFilePathKey is the attribute Key conforming to the "log.file.path" - // semantic conventions. It represents the full path to the file. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/var/log/mysql/audit.log' - LogFilePathKey = attribute.Key("log.file.path") - - // LogFilePathResolvedKey is the attribute Key conforming to the - // "log.file.path_resolved" semantic conventions. It represents the full - // path to the file, with symlinks resolved. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/var/lib/docker/uuid.log' - LogFilePathResolvedKey = attribute.Key("log.file.path_resolved") -) - -// LogFileName returns an attribute KeyValue conforming to the -// "log.file.name" semantic conventions. It represents the basename of the -// file. -func LogFileName(val string) attribute.KeyValue { - return LogFileNameKey.String(val) -} - -// LogFileNameResolved returns an attribute KeyValue conforming to the -// "log.file.name_resolved" semantic conventions. It represents the basename of -// the file, with symlinks resolved. -func LogFileNameResolved(val string) attribute.KeyValue { - return LogFileNameResolvedKey.String(val) -} - -// LogFilePath returns an attribute KeyValue conforming to the -// "log.file.path" semantic conventions. It represents the full path to the -// file. -func LogFilePath(val string) attribute.KeyValue { - return LogFilePathKey.String(val) -} - -// LogFilePathResolved returns an attribute KeyValue conforming to the -// "log.file.path_resolved" semantic conventions. It represents the full path -// to the file, with symlinks resolved. -func LogFilePathResolved(val string) attribute.KeyValue { - return LogFilePathResolvedKey.String(val) -} - -// The generic attributes that may be used in any Log Record. -const ( - // LogRecordUIDKey is the attribute Key conforming to the "log.record.uid" - // semantic conventions. It represents a unique identifier for the Log - // Record. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '01ARZ3NDEKTSV4RRFFQ69G5FAV' - // Note: If an id is provided, other log records with the same id will be - // considered duplicates and can be removed safely. This means, that two - // distinguishable log records MUST have different values. - // The id MAY be an [Universally Unique Lexicographically Sortable - // Identifier (ULID)](https://github.com/ulid/spec), but other identifiers - // (e.g. UUID) may be used as needed. - LogRecordUIDKey = attribute.Key("log.record.uid") -) - -// LogRecordUID returns an attribute KeyValue conforming to the -// "log.record.uid" semantic conventions. It represents a unique identifier for -// the Log Record. -func LogRecordUID(val string) attribute.KeyValue { - return LogRecordUIDKey.String(val) -} - -// Attributes describing telemetry around messaging systems and messaging -// activities. -const ( - // MessagingBatchMessageCountKey is the attribute Key conforming to the - // "messaging.batch.message_count" semantic conventions. It represents the - // number of messages sent, received, or processed in the scope of the - // batching operation. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 0, 1, 2 - // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on - // spans that operate with a single message. When a messaging client - // library supports both batch and single-message API for the same - // operation, instrumentations SHOULD use `messaging.batch.message_count` - // for batching APIs and SHOULD NOT use it for single-message APIs. - MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count") - - // MessagingClientIDKey is the attribute Key conforming to the - // "messaging.client.id" semantic conventions. It represents a unique - // identifier for the client that consumes or produces a message. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'client-5', 'myhost@8742@s8083jm' - MessagingClientIDKey = attribute.Key("messaging.client.id") - - // MessagingDestinationAnonymousKey is the attribute Key conforming to the - // "messaging.destination.anonymous" semantic conventions. It represents a - // boolean that is true if the message destination is anonymous (could be - // unnamed or have auto-generated name). - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous") - - // MessagingDestinationNameKey is the attribute Key conforming to the - // "messaging.destination.name" semantic conventions. It represents the - // message destination name - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'MyQueue', 'MyTopic' - // Note: Destination name SHOULD uniquely identify a specific queue, topic - // or other entity within the broker. If - // the broker doesn't have such notion, the destination name SHOULD - // uniquely identify the broker. - MessagingDestinationNameKey = attribute.Key("messaging.destination.name") - - // MessagingDestinationPartitionIDKey is the attribute Key conforming to - // the "messaging.destination.partition.id" semantic conventions. It - // represents the identifier of the partition messages are sent to or - // received from, unique within the `messaging.destination.name`. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '1' - MessagingDestinationPartitionIDKey = attribute.Key("messaging.destination.partition.id") - - // MessagingDestinationTemplateKey is the attribute Key conforming to the - // "messaging.destination.template" semantic conventions. It represents the - // low cardinality representation of the messaging destination name - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/customers/{customerID}' - // Note: Destination names could be constructed from templates. An example - // would be a destination name involving a user name or product id. - // Although the destination name in this case is of high cardinality, the - // underlying template is of low cardinality and can be effectively used - // for grouping and aggregation. - MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template") - - // MessagingDestinationTemporaryKey is the attribute Key conforming to the - // "messaging.destination.temporary" semantic conventions. It represents a - // boolean that is true if the message destination is temporary and might - // not exist anymore after messages are processed. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary") - - // MessagingDestinationPublishAnonymousKey is the attribute Key conforming - // to the "messaging.destination_publish.anonymous" semantic conventions. - // It represents a boolean that is true if the publish message destination - // is anonymous (could be unnamed or have auto-generated name). - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - MessagingDestinationPublishAnonymousKey = attribute.Key("messaging.destination_publish.anonymous") - - // MessagingDestinationPublishNameKey is the attribute Key conforming to - // the "messaging.destination_publish.name" semantic conventions. It - // represents the name of the original destination the message was - // published to - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'MyQueue', 'MyTopic' - // Note: The name SHOULD uniquely identify a specific queue, topic, or - // other entity within the broker. If - // the broker doesn't have such notion, the original destination name - // SHOULD uniquely identify the broker. - MessagingDestinationPublishNameKey = attribute.Key("messaging.destination_publish.name") - - // MessagingMessageBodySizeKey is the attribute Key conforming to the - // "messaging.message.body.size" semantic conventions. It represents the - // size of the message body in bytes. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1439 - // Note: This can refer to both the compressed or uncompressed body size. - // If both sizes are known, the uncompressed - // body size should be used. - MessagingMessageBodySizeKey = attribute.Key("messaging.message.body.size") - - // MessagingMessageConversationIDKey is the attribute Key conforming to the - // "messaging.message.conversation_id" semantic conventions. It represents - // the conversation ID identifying the conversation to which the message - // belongs, represented as a string. Sometimes called "Correlation ID". - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'MyConversationID' - MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id") - - // MessagingMessageEnvelopeSizeKey is the attribute Key conforming to the - // "messaging.message.envelope.size" semantic conventions. It represents - // the size of the message body and metadata in bytes. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 2738 - // Note: This can refer to both the compressed or uncompressed size. If - // both sizes are known, the uncompressed - // size should be used. - MessagingMessageEnvelopeSizeKey = attribute.Key("messaging.message.envelope.size") - - // MessagingMessageIDKey is the attribute Key conforming to the - // "messaging.message.id" semantic conventions. It represents a value used - // by the messaging system as an identifier for the message, represented as - // a string. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '452a7c7c7c7048c2f887f61572b18fc2' - MessagingMessageIDKey = attribute.Key("messaging.message.id") - - // MessagingOperationNameKey is the attribute Key conforming to the - // "messaging.operation.name" semantic conventions. It represents the - // system-specific name of the messaging operation. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'ack', 'nack', 'send' - MessagingOperationNameKey = attribute.Key("messaging.operation.name") - - // MessagingOperationTypeKey is the attribute Key conforming to the - // "messaging.operation.type" semantic conventions. It represents a string - // identifying the type of the messaging operation. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Note: If a custom value is used, it MUST be of low cardinality. - MessagingOperationTypeKey = attribute.Key("messaging.operation.type") - - // MessagingSystemKey is the attribute Key conforming to the - // "messaging.system" semantic conventions. It represents the messaging - // system as identified by the client instrumentation. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Note: The actual messaging system may differ from the one known by the - // client. For example, when using Kafka client libraries to communicate - // with Azure Event Hubs, the `messaging.system` is set to `kafka` based on - // the instrumentation's best knowledge. - MessagingSystemKey = attribute.Key("messaging.system") -) - -var ( - // One or more messages are provided for publishing to an intermediary. If a single message is published, the context of the "Publish" span can be used as the creation context and no "Create" span needs to be created - MessagingOperationTypePublish = MessagingOperationTypeKey.String("publish") - // A message is created. "Create" spans always refer to a single message and are used to provide a unique creation context for messages in batch publishing scenarios - MessagingOperationTypeCreate = MessagingOperationTypeKey.String("create") - // One or more messages are requested by a consumer. This operation refers to pull-based scenarios, where consumers explicitly call methods of messaging SDKs to receive messages - MessagingOperationTypeReceive = MessagingOperationTypeKey.String("receive") - // One or more messages are delivered to or processed by a consumer - MessagingOperationTypeDeliver = MessagingOperationTypeKey.String("process") - // One or more messages are settled - MessagingOperationTypeSettle = MessagingOperationTypeKey.String("settle") -) - -var ( - // Apache ActiveMQ - MessagingSystemActivemq = MessagingSystemKey.String("activemq") - // Amazon Simple Queue Service (SQS) - MessagingSystemAWSSqs = MessagingSystemKey.String("aws_sqs") - // Azure Event Grid - MessagingSystemEventgrid = MessagingSystemKey.String("eventgrid") - // Azure Event Hubs - MessagingSystemEventhubs = MessagingSystemKey.String("eventhubs") - // Azure Service Bus - MessagingSystemServicebus = MessagingSystemKey.String("servicebus") - // Google Cloud Pub/Sub - MessagingSystemGCPPubsub = MessagingSystemKey.String("gcp_pubsub") - // Java Message Service - MessagingSystemJms = MessagingSystemKey.String("jms") - // Apache Kafka - MessagingSystemKafka = MessagingSystemKey.String("kafka") - // RabbitMQ - MessagingSystemRabbitmq = MessagingSystemKey.String("rabbitmq") - // Apache RocketMQ - MessagingSystemRocketmq = MessagingSystemKey.String("rocketmq") -) - -// MessagingBatchMessageCount returns an attribute KeyValue conforming to -// the "messaging.batch.message_count" semantic conventions. It represents the -// number of messages sent, received, or processed in the scope of the batching -// operation. -func MessagingBatchMessageCount(val int) attribute.KeyValue { - return MessagingBatchMessageCountKey.Int(val) -} - -// MessagingClientID returns an attribute KeyValue conforming to the -// "messaging.client.id" semantic conventions. It represents a unique -// identifier for the client that consumes or produces a message. -func MessagingClientID(val string) attribute.KeyValue { - return MessagingClientIDKey.String(val) -} - -// MessagingDestinationAnonymous returns an attribute KeyValue conforming to -// the "messaging.destination.anonymous" semantic conventions. It represents a -// boolean that is true if the message destination is anonymous (could be -// unnamed or have auto-generated name). -func MessagingDestinationAnonymous(val bool) attribute.KeyValue { - return MessagingDestinationAnonymousKey.Bool(val) -} - -// MessagingDestinationName returns an attribute KeyValue conforming to the -// "messaging.destination.name" semantic conventions. It represents the message -// destination name -func MessagingDestinationName(val string) attribute.KeyValue { - return MessagingDestinationNameKey.String(val) -} - -// MessagingDestinationPartitionID returns an attribute KeyValue conforming -// to the "messaging.destination.partition.id" semantic conventions. It -// represents the identifier of the partition messages are sent to or received -// from, unique within the `messaging.destination.name`. -func MessagingDestinationPartitionID(val string) attribute.KeyValue { - return MessagingDestinationPartitionIDKey.String(val) -} - -// MessagingDestinationTemplate returns an attribute KeyValue conforming to -// the "messaging.destination.template" semantic conventions. It represents the -// low cardinality representation of the messaging destination name -func MessagingDestinationTemplate(val string) attribute.KeyValue { - return MessagingDestinationTemplateKey.String(val) -} - -// MessagingDestinationTemporary returns an attribute KeyValue conforming to -// the "messaging.destination.temporary" semantic conventions. It represents a -// boolean that is true if the message destination is temporary and might not -// exist anymore after messages are processed. -func MessagingDestinationTemporary(val bool) attribute.KeyValue { - return MessagingDestinationTemporaryKey.Bool(val) -} - -// MessagingDestinationPublishAnonymous returns an attribute KeyValue -// conforming to the "messaging.destination_publish.anonymous" semantic -// conventions. It represents a boolean that is true if the publish message -// destination is anonymous (could be unnamed or have auto-generated name). -func MessagingDestinationPublishAnonymous(val bool) attribute.KeyValue { - return MessagingDestinationPublishAnonymousKey.Bool(val) -} - -// MessagingDestinationPublishName returns an attribute KeyValue conforming -// to the "messaging.destination_publish.name" semantic conventions. It -// represents the name of the original destination the message was published to -func MessagingDestinationPublishName(val string) attribute.KeyValue { - return MessagingDestinationPublishNameKey.String(val) -} - -// MessagingMessageBodySize returns an attribute KeyValue conforming to the -// "messaging.message.body.size" semantic conventions. It represents the size -// of the message body in bytes. -func MessagingMessageBodySize(val int) attribute.KeyValue { - return MessagingMessageBodySizeKey.Int(val) -} - -// MessagingMessageConversationID returns an attribute KeyValue conforming -// to the "messaging.message.conversation_id" semantic conventions. It -// represents the conversation ID identifying the conversation to which the -// message belongs, represented as a string. Sometimes called "Correlation ID". -func MessagingMessageConversationID(val string) attribute.KeyValue { - return MessagingMessageConversationIDKey.String(val) -} - -// MessagingMessageEnvelopeSize returns an attribute KeyValue conforming to -// the "messaging.message.envelope.size" semantic conventions. It represents -// the size of the message body and metadata in bytes. -func MessagingMessageEnvelopeSize(val int) attribute.KeyValue { - return MessagingMessageEnvelopeSizeKey.Int(val) -} - -// MessagingMessageID returns an attribute KeyValue conforming to the -// "messaging.message.id" semantic conventions. It represents a value used by -// the messaging system as an identifier for the message, represented as a -// string. -func MessagingMessageID(val string) attribute.KeyValue { - return MessagingMessageIDKey.String(val) -} - -// MessagingOperationName returns an attribute KeyValue conforming to the -// "messaging.operation.name" semantic conventions. It represents the -// system-specific name of the messaging operation. -func MessagingOperationName(val string) attribute.KeyValue { - return MessagingOperationNameKey.String(val) -} - -// This group describes attributes specific to Apache Kafka. -const ( - // MessagingKafkaConsumerGroupKey is the attribute Key conforming to the - // "messaging.kafka.consumer.group" semantic conventions. It represents the - // name of the Kafka Consumer Group that is handling the message. Only - // applies to consumers, not producers. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'my-group' - MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer.group") - - // MessagingKafkaMessageKeyKey is the attribute Key conforming to the - // "messaging.kafka.message.key" semantic conventions. It represents the - // message keys in Kafka are used for grouping alike messages to ensure - // they're processed on the same partition. They differ from - // `messaging.message.id` in that they're not unique. If the key is `null`, - // the attribute MUST NOT be set. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'myKey' - // Note: If the key type is not string, it's string representation has to - // be supplied for the attribute. If the key has no unambiguous, canonical - // string form, don't include its value. - MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key") - - // MessagingKafkaMessageOffsetKey is the attribute Key conforming to the - // "messaging.kafka.message.offset" semantic conventions. It represents the - // offset of a record in the corresponding Kafka partition. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 42 - MessagingKafkaMessageOffsetKey = attribute.Key("messaging.kafka.message.offset") - - // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the - // "messaging.kafka.message.tombstone" semantic conventions. It represents - // a boolean that is true if the message is a tombstone. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone") -) - -// MessagingKafkaConsumerGroup returns an attribute KeyValue conforming to -// the "messaging.kafka.consumer.group" semantic conventions. It represents the -// name of the Kafka Consumer Group that is handling the message. Only applies -// to consumers, not producers. -func MessagingKafkaConsumerGroup(val string) attribute.KeyValue { - return MessagingKafkaConsumerGroupKey.String(val) -} - -// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the -// "messaging.kafka.message.key" semantic conventions. It represents the -// message keys in Kafka are used for grouping alike messages to ensure they're -// processed on the same partition. They differ from `messaging.message.id` in -// that they're not unique. If the key is `null`, the attribute MUST NOT be -// set. -func MessagingKafkaMessageKey(val string) attribute.KeyValue { - return MessagingKafkaMessageKeyKey.String(val) -} - -// MessagingKafkaMessageOffset returns an attribute KeyValue conforming to -// the "messaging.kafka.message.offset" semantic conventions. It represents the -// offset of a record in the corresponding Kafka partition. -func MessagingKafkaMessageOffset(val int) attribute.KeyValue { - return MessagingKafkaMessageOffsetKey.Int(val) -} - -// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming -// to the "messaging.kafka.message.tombstone" semantic conventions. It -// represents a boolean that is true if the message is a tombstone. -func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue { - return MessagingKafkaMessageTombstoneKey.Bool(val) -} - -// This group describes attributes specific to RabbitMQ. -const ( - // MessagingRabbitmqDestinationRoutingKeyKey is the attribute Key - // conforming to the "messaging.rabbitmq.destination.routing_key" semantic - // conventions. It represents the rabbitMQ message routing key. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'myKey' - MessagingRabbitmqDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key") - - // MessagingRabbitmqMessageDeliveryTagKey is the attribute Key conforming - // to the "messaging.rabbitmq.message.delivery_tag" semantic conventions. - // It represents the rabbitMQ message delivery tag - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 123 - MessagingRabbitmqMessageDeliveryTagKey = attribute.Key("messaging.rabbitmq.message.delivery_tag") -) - -// MessagingRabbitmqDestinationRoutingKey returns an attribute KeyValue -// conforming to the "messaging.rabbitmq.destination.routing_key" semantic -// conventions. It represents the rabbitMQ message routing key. -func MessagingRabbitmqDestinationRoutingKey(val string) attribute.KeyValue { - return MessagingRabbitmqDestinationRoutingKeyKey.String(val) -} - -// MessagingRabbitmqMessageDeliveryTag returns an attribute KeyValue -// conforming to the "messaging.rabbitmq.message.delivery_tag" semantic -// conventions. It represents the rabbitMQ message delivery tag -func MessagingRabbitmqMessageDeliveryTag(val int) attribute.KeyValue { - return MessagingRabbitmqMessageDeliveryTagKey.Int(val) -} - -// This group describes attributes specific to RocketMQ. -const ( - // MessagingRocketmqClientGroupKey is the attribute Key conforming to the - // "messaging.rocketmq.client_group" semantic conventions. It represents - // the name of the RocketMQ producer/consumer group that is handling the - // message. The client type is identified by the SpanKind. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'myConsumerGroup' - MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group") - - // MessagingRocketmqConsumptionModelKey is the attribute Key conforming to - // the "messaging.rocketmq.consumption_model" semantic conventions. It - // represents the model of message consumption. This only applies to - // consumer spans. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model") - - // MessagingRocketmqMessageDelayTimeLevelKey is the attribute Key - // conforming to the "messaging.rocketmq.message.delay_time_level" semantic - // conventions. It represents the delay time level for delay message, which - // determines the message delay time. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 3 - MessagingRocketmqMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level") - - // MessagingRocketmqMessageDeliveryTimestampKey is the attribute Key - // conforming to the "messaging.rocketmq.message.delivery_timestamp" - // semantic conventions. It represents the timestamp in milliseconds that - // the delay message is expected to be delivered to consumer. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1665987217045 - MessagingRocketmqMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp") - - // MessagingRocketmqMessageGroupKey is the attribute Key conforming to the - // "messaging.rocketmq.message.group" semantic conventions. It represents - // the it is essential for FIFO message. Messages that belong to the same - // message group are always processed one by one within the same consumer - // group. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'myMessageGroup' - MessagingRocketmqMessageGroupKey = attribute.Key("messaging.rocketmq.message.group") - - // MessagingRocketmqMessageKeysKey is the attribute Key conforming to the - // "messaging.rocketmq.message.keys" semantic conventions. It represents - // the key(s) of message, another way to mark message besides message id. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'keyA', 'keyB' - MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys") - - // MessagingRocketmqMessageTagKey is the attribute Key conforming to the - // "messaging.rocketmq.message.tag" semantic conventions. It represents the - // secondary classifier of message besides topic. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'tagA' - MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message.tag") - - // MessagingRocketmqMessageTypeKey is the attribute Key conforming to the - // "messaging.rocketmq.message.type" semantic conventions. It represents - // the type of message. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message.type") - - // MessagingRocketmqNamespaceKey is the attribute Key conforming to the - // "messaging.rocketmq.namespace" semantic conventions. It represents the - // namespace of RocketMQ resources, resources in different namespaces are - // individual. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'myNamespace' - MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace") -) - -var ( - // Clustering consumption model - MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering") - // Broadcasting consumption model - MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting") -) - -var ( - // Normal message - MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal") - // FIFO message - MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo") - // Delay message - MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay") - // Transaction message - MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction") -) - -// MessagingRocketmqClientGroup returns an attribute KeyValue conforming to -// the "messaging.rocketmq.client_group" semantic conventions. It represents -// the name of the RocketMQ producer/consumer group that is handling the -// message. The client type is identified by the SpanKind. -func MessagingRocketmqClientGroup(val string) attribute.KeyValue { - return MessagingRocketmqClientGroupKey.String(val) -} - -// MessagingRocketmqMessageDelayTimeLevel returns an attribute KeyValue -// conforming to the "messaging.rocketmq.message.delay_time_level" semantic -// conventions. It represents the delay time level for delay message, which -// determines the message delay time. -func MessagingRocketmqMessageDelayTimeLevel(val int) attribute.KeyValue { - return MessagingRocketmqMessageDelayTimeLevelKey.Int(val) -} - -// MessagingRocketmqMessageDeliveryTimestamp returns an attribute KeyValue -// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic -// conventions. It represents the timestamp in milliseconds that the delay -// message is expected to be delivered to consumer. -func MessagingRocketmqMessageDeliveryTimestamp(val int) attribute.KeyValue { - return MessagingRocketmqMessageDeliveryTimestampKey.Int(val) -} - -// MessagingRocketmqMessageGroup returns an attribute KeyValue conforming to -// the "messaging.rocketmq.message.group" semantic conventions. It represents -// the it is essential for FIFO message. Messages that belong to the same -// message group are always processed one by one within the same consumer -// group. -func MessagingRocketmqMessageGroup(val string) attribute.KeyValue { - return MessagingRocketmqMessageGroupKey.String(val) -} - -// MessagingRocketmqMessageKeys returns an attribute KeyValue conforming to -// the "messaging.rocketmq.message.keys" semantic conventions. It represents -// the key(s) of message, another way to mark message besides message id. -func MessagingRocketmqMessageKeys(val ...string) attribute.KeyValue { - return MessagingRocketmqMessageKeysKey.StringSlice(val) -} - -// MessagingRocketmqMessageTag returns an attribute KeyValue conforming to -// the "messaging.rocketmq.message.tag" semantic conventions. It represents the -// secondary classifier of message besides topic. -func MessagingRocketmqMessageTag(val string) attribute.KeyValue { - return MessagingRocketmqMessageTagKey.String(val) -} - -// MessagingRocketmqNamespace returns an attribute KeyValue conforming to -// the "messaging.rocketmq.namespace" semantic conventions. It represents the -// namespace of RocketMQ resources, resources in different namespaces are -// individual. -func MessagingRocketmqNamespace(val string) attribute.KeyValue { - return MessagingRocketmqNamespaceKey.String(val) -} - -// This group describes attributes specific to GCP Pub/Sub. -const ( - // MessagingGCPPubsubMessageAckDeadlineKey is the attribute Key conforming - // to the "messaging.gcp_pubsub.message.ack_deadline" semantic conventions. - // It represents the ack deadline in seconds set for the modify ack - // deadline request. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 10 - MessagingGCPPubsubMessageAckDeadlineKey = attribute.Key("messaging.gcp_pubsub.message.ack_deadline") - - // MessagingGCPPubsubMessageAckIDKey is the attribute Key conforming to the - // "messaging.gcp_pubsub.message.ack_id" semantic conventions. It - // represents the ack id for a given message. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'ack_id' - MessagingGCPPubsubMessageAckIDKey = attribute.Key("messaging.gcp_pubsub.message.ack_id") - - // MessagingGCPPubsubMessageDeliveryAttemptKey is the attribute Key - // conforming to the "messaging.gcp_pubsub.message.delivery_attempt" - // semantic conventions. It represents the delivery attempt for a given - // message. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 2 - MessagingGCPPubsubMessageDeliveryAttemptKey = attribute.Key("messaging.gcp_pubsub.message.delivery_attempt") - - // MessagingGCPPubsubMessageOrderingKeyKey is the attribute Key conforming - // to the "messaging.gcp_pubsub.message.ordering_key" semantic conventions. - // It represents the ordering key for a given message. If the attribute is - // not present, the message does not have an ordering key. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'ordering_key' - MessagingGCPPubsubMessageOrderingKeyKey = attribute.Key("messaging.gcp_pubsub.message.ordering_key") -) - -// MessagingGCPPubsubMessageAckDeadline returns an attribute KeyValue -// conforming to the "messaging.gcp_pubsub.message.ack_deadline" semantic -// conventions. It represents the ack deadline in seconds set for the modify -// ack deadline request. -func MessagingGCPPubsubMessageAckDeadline(val int) attribute.KeyValue { - return MessagingGCPPubsubMessageAckDeadlineKey.Int(val) -} - -// MessagingGCPPubsubMessageAckID returns an attribute KeyValue conforming -// to the "messaging.gcp_pubsub.message.ack_id" semantic conventions. It -// represents the ack id for a given message. -func MessagingGCPPubsubMessageAckID(val string) attribute.KeyValue { - return MessagingGCPPubsubMessageAckIDKey.String(val) -} - -// MessagingGCPPubsubMessageDeliveryAttempt returns an attribute KeyValue -// conforming to the "messaging.gcp_pubsub.message.delivery_attempt" semantic -// conventions. It represents the delivery attempt for a given message. -func MessagingGCPPubsubMessageDeliveryAttempt(val int) attribute.KeyValue { - return MessagingGCPPubsubMessageDeliveryAttemptKey.Int(val) -} - -// MessagingGCPPubsubMessageOrderingKey returns an attribute KeyValue -// conforming to the "messaging.gcp_pubsub.message.ordering_key" semantic -// conventions. It represents the ordering key for a given message. If the -// attribute is not present, the message does not have an ordering key. -func MessagingGCPPubsubMessageOrderingKey(val string) attribute.KeyValue { - return MessagingGCPPubsubMessageOrderingKeyKey.String(val) -} - -// This group describes attributes specific to Azure Service Bus. -const ( - // MessagingServicebusDestinationSubscriptionNameKey is the attribute Key - // conforming to the "messaging.servicebus.destination.subscription_name" - // semantic conventions. It represents the name of the subscription in the - // topic messages are received from. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'mySubscription' - MessagingServicebusDestinationSubscriptionNameKey = attribute.Key("messaging.servicebus.destination.subscription_name") - - // MessagingServicebusDispositionStatusKey is the attribute Key conforming - // to the "messaging.servicebus.disposition_status" semantic conventions. - // It represents the describes the [settlement - // type](https://learn.microsoft.com/azure/service-bus-messaging/message-transfers-locks-settlement#peeklock). - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - MessagingServicebusDispositionStatusKey = attribute.Key("messaging.servicebus.disposition_status") - - // MessagingServicebusMessageDeliveryCountKey is the attribute Key - // conforming to the "messaging.servicebus.message.delivery_count" semantic - // conventions. It represents the number of deliveries that have been - // attempted for this message. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 2 - MessagingServicebusMessageDeliveryCountKey = attribute.Key("messaging.servicebus.message.delivery_count") - - // MessagingServicebusMessageEnqueuedTimeKey is the attribute Key - // conforming to the "messaging.servicebus.message.enqueued_time" semantic - // conventions. It represents the UTC epoch seconds at which the message - // has been accepted and stored in the entity. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1701393730 - MessagingServicebusMessageEnqueuedTimeKey = attribute.Key("messaging.servicebus.message.enqueued_time") -) - -var ( - // Message is completed - MessagingServicebusDispositionStatusComplete = MessagingServicebusDispositionStatusKey.String("complete") - // Message is abandoned - MessagingServicebusDispositionStatusAbandon = MessagingServicebusDispositionStatusKey.String("abandon") - // Message is sent to dead letter queue - MessagingServicebusDispositionStatusDeadLetter = MessagingServicebusDispositionStatusKey.String("dead_letter") - // Message is deferred - MessagingServicebusDispositionStatusDefer = MessagingServicebusDispositionStatusKey.String("defer") -) - -// MessagingServicebusDestinationSubscriptionName returns an attribute -// KeyValue conforming to the -// "messaging.servicebus.destination.subscription_name" semantic conventions. -// It represents the name of the subscription in the topic messages are -// received from. -func MessagingServicebusDestinationSubscriptionName(val string) attribute.KeyValue { - return MessagingServicebusDestinationSubscriptionNameKey.String(val) -} - -// MessagingServicebusMessageDeliveryCount returns an attribute KeyValue -// conforming to the "messaging.servicebus.message.delivery_count" semantic -// conventions. It represents the number of deliveries that have been attempted -// for this message. -func MessagingServicebusMessageDeliveryCount(val int) attribute.KeyValue { - return MessagingServicebusMessageDeliveryCountKey.Int(val) -} - -// MessagingServicebusMessageEnqueuedTime returns an attribute KeyValue -// conforming to the "messaging.servicebus.message.enqueued_time" semantic -// conventions. It represents the UTC epoch seconds at which the message has -// been accepted and stored in the entity. -func MessagingServicebusMessageEnqueuedTime(val int) attribute.KeyValue { - return MessagingServicebusMessageEnqueuedTimeKey.Int(val) -} - -// This group describes attributes specific to Azure Event Hubs. -const ( - // MessagingEventhubsConsumerGroupKey is the attribute Key conforming to - // the "messaging.eventhubs.consumer.group" semantic conventions. It - // represents the name of the consumer group the event consumer is - // associated with. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'indexer' - MessagingEventhubsConsumerGroupKey = attribute.Key("messaging.eventhubs.consumer.group") - - // MessagingEventhubsMessageEnqueuedTimeKey is the attribute Key conforming - // to the "messaging.eventhubs.message.enqueued_time" semantic conventions. - // It represents the UTC epoch seconds at which the message has been - // accepted and stored in the entity. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1701393730 - MessagingEventhubsMessageEnqueuedTimeKey = attribute.Key("messaging.eventhubs.message.enqueued_time") -) - -// MessagingEventhubsConsumerGroup returns an attribute KeyValue conforming -// to the "messaging.eventhubs.consumer.group" semantic conventions. It -// represents the name of the consumer group the event consumer is associated -// with. -func MessagingEventhubsConsumerGroup(val string) attribute.KeyValue { - return MessagingEventhubsConsumerGroupKey.String(val) -} - -// MessagingEventhubsMessageEnqueuedTime returns an attribute KeyValue -// conforming to the "messaging.eventhubs.message.enqueued_time" semantic -// conventions. It represents the UTC epoch seconds at which the message has -// been accepted and stored in the entity. -func MessagingEventhubsMessageEnqueuedTime(val int) attribute.KeyValue { - return MessagingEventhubsMessageEnqueuedTimeKey.Int(val) -} - -// These attributes may be used for any network related operation. -const ( - // NetworkCarrierIccKey is the attribute Key conforming to the - // "network.carrier.icc" semantic conventions. It represents the ISO 3166-1 - // alpha-2 2-character country code associated with the mobile carrier - // network. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'DE' - NetworkCarrierIccKey = attribute.Key("network.carrier.icc") - - // NetworkCarrierMccKey is the attribute Key conforming to the - // "network.carrier.mcc" semantic conventions. It represents the mobile - // carrier country code. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '310' - NetworkCarrierMccKey = attribute.Key("network.carrier.mcc") - - // NetworkCarrierMncKey is the attribute Key conforming to the - // "network.carrier.mnc" semantic conventions. It represents the mobile - // carrier network code. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '001' - NetworkCarrierMncKey = attribute.Key("network.carrier.mnc") - - // NetworkCarrierNameKey is the attribute Key conforming to the - // "network.carrier.name" semantic conventions. It represents the name of - // the mobile carrier. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'sprint' - NetworkCarrierNameKey = attribute.Key("network.carrier.name") - - // NetworkConnectionSubtypeKey is the attribute Key conforming to the - // "network.connection.subtype" semantic conventions. It represents the - // this describes more details regarding the connection.type. It may be the - // type of cell technology connection, but it could be used for describing - // details about a wifi connection. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'LTE' - NetworkConnectionSubtypeKey = attribute.Key("network.connection.subtype") - - // NetworkConnectionTypeKey is the attribute Key conforming to the - // "network.connection.type" semantic conventions. It represents the - // internet connection type. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'wifi' - NetworkConnectionTypeKey = attribute.Key("network.connection.type") - - // NetworkIoDirectionKey is the attribute Key conforming to the - // "network.io.direction" semantic conventions. It represents the network - // IO operation direction. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'transmit' - NetworkIoDirectionKey = attribute.Key("network.io.direction") - - // NetworkLocalAddressKey is the attribute Key conforming to the - // "network.local.address" semantic conventions. It represents the local - // address of the network connection - IP address or Unix domain socket - // name. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '10.1.2.80', '/tmp/my.sock' - NetworkLocalAddressKey = attribute.Key("network.local.address") - - // NetworkLocalPortKey is the attribute Key conforming to the - // "network.local.port" semantic conventions. It represents the local port - // number of the network connection. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 65123 - NetworkLocalPortKey = attribute.Key("network.local.port") - - // NetworkPeerAddressKey is the attribute Key conforming to the - // "network.peer.address" semantic conventions. It represents the peer - // address of the network connection - IP address or Unix domain socket - // name. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '10.1.2.80', '/tmp/my.sock' - NetworkPeerAddressKey = attribute.Key("network.peer.address") - - // NetworkPeerPortKey is the attribute Key conforming to the - // "network.peer.port" semantic conventions. It represents the peer port - // number of the network connection. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 65123 - NetworkPeerPortKey = attribute.Key("network.peer.port") - - // NetworkProtocolNameKey is the attribute Key conforming to the - // "network.protocol.name" semantic conventions. It represents the [OSI - // application layer](https://osi-model.com/application-layer/) or non-OSI - // equivalent. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'amqp', 'http', 'mqtt' - // Note: The value SHOULD be normalized to lowercase. - NetworkProtocolNameKey = attribute.Key("network.protocol.name") - - // NetworkProtocolVersionKey is the attribute Key conforming to the - // "network.protocol.version" semantic conventions. It represents the - // actual version of the protocol used for network communication. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '1.1', '2' - // Note: If protocol version is subject to negotiation (for example using - // [ALPN](https://www.rfc-editor.org/rfc/rfc7301.html)), this attribute - // SHOULD be set to the negotiated version. If the actual protocol version - // is not known, this attribute SHOULD NOT be set. - NetworkProtocolVersionKey = attribute.Key("network.protocol.version") - - // NetworkTransportKey is the attribute Key conforming to the - // "network.transport" semantic conventions. It represents the [OSI - // transport layer](https://osi-model.com/transport-layer/) or - // [inter-process communication - // method](https://wikipedia.org/wiki/Inter-process_communication). - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'tcp', 'udp' - // Note: The value SHOULD be normalized to lowercase. - // - // Consider always setting the transport when setting a port number, since - // a port number is ambiguous without knowing the transport. For example - // different processes could be listening on TCP port 12345 and UDP port - // 12345. - NetworkTransportKey = attribute.Key("network.transport") - - // NetworkTypeKey is the attribute Key conforming to the "network.type" - // semantic conventions. It represents the [OSI network - // layer](https://osi-model.com/network-layer/) or non-OSI equivalent. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'ipv4', 'ipv6' - // Note: The value SHOULD be normalized to lowercase. - NetworkTypeKey = attribute.Key("network.type") -) - -var ( - // GPRS - NetworkConnectionSubtypeGprs = NetworkConnectionSubtypeKey.String("gprs") - // EDGE - NetworkConnectionSubtypeEdge = NetworkConnectionSubtypeKey.String("edge") - // UMTS - NetworkConnectionSubtypeUmts = NetworkConnectionSubtypeKey.String("umts") - // CDMA - NetworkConnectionSubtypeCdma = NetworkConnectionSubtypeKey.String("cdma") - // EVDO Rel. 0 - NetworkConnectionSubtypeEvdo0 = NetworkConnectionSubtypeKey.String("evdo_0") - // EVDO Rev. A - NetworkConnectionSubtypeEvdoA = NetworkConnectionSubtypeKey.String("evdo_a") - // CDMA2000 1XRTT - NetworkConnectionSubtypeCdma20001xrtt = NetworkConnectionSubtypeKey.String("cdma2000_1xrtt") - // HSDPA - NetworkConnectionSubtypeHsdpa = NetworkConnectionSubtypeKey.String("hsdpa") - // HSUPA - NetworkConnectionSubtypeHsupa = NetworkConnectionSubtypeKey.String("hsupa") - // HSPA - NetworkConnectionSubtypeHspa = NetworkConnectionSubtypeKey.String("hspa") - // IDEN - NetworkConnectionSubtypeIden = NetworkConnectionSubtypeKey.String("iden") - // EVDO Rev. B - NetworkConnectionSubtypeEvdoB = NetworkConnectionSubtypeKey.String("evdo_b") - // LTE - NetworkConnectionSubtypeLte = NetworkConnectionSubtypeKey.String("lte") - // EHRPD - NetworkConnectionSubtypeEhrpd = NetworkConnectionSubtypeKey.String("ehrpd") - // HSPAP - NetworkConnectionSubtypeHspap = NetworkConnectionSubtypeKey.String("hspap") - // GSM - NetworkConnectionSubtypeGsm = NetworkConnectionSubtypeKey.String("gsm") - // TD-SCDMA - NetworkConnectionSubtypeTdScdma = NetworkConnectionSubtypeKey.String("td_scdma") - // IWLAN - NetworkConnectionSubtypeIwlan = NetworkConnectionSubtypeKey.String("iwlan") - // 5G NR (New Radio) - NetworkConnectionSubtypeNr = NetworkConnectionSubtypeKey.String("nr") - // 5G NRNSA (New Radio Non-Standalone) - NetworkConnectionSubtypeNrnsa = NetworkConnectionSubtypeKey.String("nrnsa") - // LTE CA - NetworkConnectionSubtypeLteCa = NetworkConnectionSubtypeKey.String("lte_ca") -) - -var ( - // wifi - NetworkConnectionTypeWifi = NetworkConnectionTypeKey.String("wifi") - // wired - NetworkConnectionTypeWired = NetworkConnectionTypeKey.String("wired") - // cell - NetworkConnectionTypeCell = NetworkConnectionTypeKey.String("cell") - // unavailable - NetworkConnectionTypeUnavailable = NetworkConnectionTypeKey.String("unavailable") - // unknown - NetworkConnectionTypeUnknown = NetworkConnectionTypeKey.String("unknown") -) - -var ( - // transmit - NetworkIoDirectionTransmit = NetworkIoDirectionKey.String("transmit") - // receive - NetworkIoDirectionReceive = NetworkIoDirectionKey.String("receive") -) - -var ( - // TCP - NetworkTransportTCP = NetworkTransportKey.String("tcp") - // UDP - NetworkTransportUDP = NetworkTransportKey.String("udp") - // Named or anonymous pipe - NetworkTransportPipe = NetworkTransportKey.String("pipe") - // Unix domain socket - NetworkTransportUnix = NetworkTransportKey.String("unix") -) - -var ( - // IPv4 - NetworkTypeIpv4 = NetworkTypeKey.String("ipv4") - // IPv6 - NetworkTypeIpv6 = NetworkTypeKey.String("ipv6") -) - -// NetworkCarrierIcc returns an attribute KeyValue conforming to the -// "network.carrier.icc" semantic conventions. It represents the ISO 3166-1 -// alpha-2 2-character country code associated with the mobile carrier network. -func NetworkCarrierIcc(val string) attribute.KeyValue { - return NetworkCarrierIccKey.String(val) -} - -// NetworkCarrierMcc returns an attribute KeyValue conforming to the -// "network.carrier.mcc" semantic conventions. It represents the mobile carrier -// country code. -func NetworkCarrierMcc(val string) attribute.KeyValue { - return NetworkCarrierMccKey.String(val) -} - -// NetworkCarrierMnc returns an attribute KeyValue conforming to the -// "network.carrier.mnc" semantic conventions. It represents the mobile carrier -// network code. -func NetworkCarrierMnc(val string) attribute.KeyValue { - return NetworkCarrierMncKey.String(val) -} - -// NetworkCarrierName returns an attribute KeyValue conforming to the -// "network.carrier.name" semantic conventions. It represents the name of the -// mobile carrier. -func NetworkCarrierName(val string) attribute.KeyValue { - return NetworkCarrierNameKey.String(val) -} - -// NetworkLocalAddress returns an attribute KeyValue conforming to the -// "network.local.address" semantic conventions. It represents the local -// address of the network connection - IP address or Unix domain socket name. -func NetworkLocalAddress(val string) attribute.KeyValue { - return NetworkLocalAddressKey.String(val) -} - -// NetworkLocalPort returns an attribute KeyValue conforming to the -// "network.local.port" semantic conventions. It represents the local port -// number of the network connection. -func NetworkLocalPort(val int) attribute.KeyValue { - return NetworkLocalPortKey.Int(val) -} - -// NetworkPeerAddress returns an attribute KeyValue conforming to the -// "network.peer.address" semantic conventions. It represents the peer address -// of the network connection - IP address or Unix domain socket name. -func NetworkPeerAddress(val string) attribute.KeyValue { - return NetworkPeerAddressKey.String(val) -} - -// NetworkPeerPort returns an attribute KeyValue conforming to the -// "network.peer.port" semantic conventions. It represents the peer port number -// of the network connection. -func NetworkPeerPort(val int) attribute.KeyValue { - return NetworkPeerPortKey.Int(val) -} - -// NetworkProtocolName returns an attribute KeyValue conforming to the -// "network.protocol.name" semantic conventions. It represents the [OSI -// application layer](https://osi-model.com/application-layer/) or non-OSI -// equivalent. -func NetworkProtocolName(val string) attribute.KeyValue { - return NetworkProtocolNameKey.String(val) -} - -// NetworkProtocolVersion returns an attribute KeyValue conforming to the -// "network.protocol.version" semantic conventions. It represents the actual -// version of the protocol used for network communication. -func NetworkProtocolVersion(val string) attribute.KeyValue { - return NetworkProtocolVersionKey.String(val) -} - -// An OCI image manifest. -const ( - // OciManifestDigestKey is the attribute Key conforming to the - // "oci.manifest.digest" semantic conventions. It represents the digest of - // the OCI image manifest. For container images specifically is the digest - // by which the container image is known. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'sha256:e4ca62c0d62f3e886e684806dfe9d4e0cda60d54986898173c1083856cfda0f4' - // Note: Follows [OCI Image Manifest - // Specification](https://github.com/opencontainers/image-spec/blob/main/manifest.md), - // and specifically the [Digest - // property](https://github.com/opencontainers/image-spec/blob/main/descriptor.md#digests). - // An example can be found in [Example Image - // Manifest](https://docs.docker.com/registry/spec/manifest-v2-2/#example-image-manifest). - OciManifestDigestKey = attribute.Key("oci.manifest.digest") -) - -// OciManifestDigest returns an attribute KeyValue conforming to the -// "oci.manifest.digest" semantic conventions. It represents the digest of the -// OCI image manifest. For container images specifically is the digest by which -// the container image is known. -func OciManifestDigest(val string) attribute.KeyValue { - return OciManifestDigestKey.String(val) -} - -// Attributes used by the OpenTracing Shim layer. -const ( - // OpentracingRefTypeKey is the attribute Key conforming to the - // "opentracing.ref_type" semantic conventions. It represents the - // parent-child Reference type - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Note: The causal relationship between a child Span and a parent Span. - OpentracingRefTypeKey = attribute.Key("opentracing.ref_type") -) - -var ( - // The parent Span depends on the child Span in some capacity - OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of") - // The parent Span doesn't depend in any way on the result of the child Span - OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from") -) - -// The operating system (OS) on which the process represented by this resource -// is running. -const ( - // OSBuildIDKey is the attribute Key conforming to the "os.build_id" - // semantic conventions. It represents the unique identifier for a - // particular build or compilation of the operating system. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'TQ3C.230805.001.B2', '20E247', '22621' - OSBuildIDKey = attribute.Key("os.build_id") - - // OSDescriptionKey is the attribute Key conforming to the "os.description" - // semantic conventions. It represents the human readable (not intended to - // be parsed) OS version information, like e.g. reported by `ver` or - // `lsb_release -a` commands. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 - // LTS' - OSDescriptionKey = attribute.Key("os.description") - - // OSNameKey is the attribute Key conforming to the "os.name" semantic - // conventions. It represents the human readable operating system name. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'iOS', 'Android', 'Ubuntu' - OSNameKey = attribute.Key("os.name") - - // OSTypeKey is the attribute Key conforming to the "os.type" semantic - // conventions. It represents the operating system type. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - OSTypeKey = attribute.Key("os.type") - - // OSVersionKey is the attribute Key conforming to the "os.version" - // semantic conventions. It represents the version string of the operating - // system as defined in [Version - // Attributes](/docs/resource/README.md#version-attributes). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '14.2.1', '18.04.1' - OSVersionKey = attribute.Key("os.version") -) - -var ( - // Microsoft Windows - OSTypeWindows = OSTypeKey.String("windows") - // Linux - OSTypeLinux = OSTypeKey.String("linux") - // Apple Darwin - OSTypeDarwin = OSTypeKey.String("darwin") - // FreeBSD - OSTypeFreeBSD = OSTypeKey.String("freebsd") - // NetBSD - OSTypeNetBSD = OSTypeKey.String("netbsd") - // OpenBSD - OSTypeOpenBSD = OSTypeKey.String("openbsd") - // DragonFly BSD - OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd") - // HP-UX (Hewlett Packard Unix) - OSTypeHPUX = OSTypeKey.String("hpux") - // AIX (Advanced Interactive eXecutive) - OSTypeAIX = OSTypeKey.String("aix") - // SunOS, Oracle Solaris - OSTypeSolaris = OSTypeKey.String("solaris") - // IBM z/OS - OSTypeZOS = OSTypeKey.String("z_os") -) - -// OSBuildID returns an attribute KeyValue conforming to the "os.build_id" -// semantic conventions. It represents the unique identifier for a particular -// build or compilation of the operating system. -func OSBuildID(val string) attribute.KeyValue { - return OSBuildIDKey.String(val) -} - -// OSDescription returns an attribute KeyValue conforming to the -// "os.description" semantic conventions. It represents the human readable (not -// intended to be parsed) OS version information, like e.g. reported by `ver` -// or `lsb_release -a` commands. -func OSDescription(val string) attribute.KeyValue { - return OSDescriptionKey.String(val) -} - -// OSName returns an attribute KeyValue conforming to the "os.name" semantic -// conventions. It represents the human readable operating system name. -func OSName(val string) attribute.KeyValue { - return OSNameKey.String(val) -} - -// OSVersion returns an attribute KeyValue conforming to the "os.version" -// semantic conventions. It represents the version string of the operating -// system as defined in [Version -// Attributes](/docs/resource/README.md#version-attributes). -func OSVersion(val string) attribute.KeyValue { - return OSVersionKey.String(val) -} - -// Attributes reserved for OpenTelemetry -const ( - // OTelStatusCodeKey is the attribute Key conforming to the - // "otel.status_code" semantic conventions. It represents the name of the - // code, either "OK" or "ERROR". MUST NOT be set if the status code is - // UNSET. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - OTelStatusCodeKey = attribute.Key("otel.status_code") - - // OTelStatusDescriptionKey is the attribute Key conforming to the - // "otel.status_description" semantic conventions. It represents the - // description of the Status if it has a value, otherwise not set. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'resource not found' - OTelStatusDescriptionKey = attribute.Key("otel.status_description") -) - -var ( - // The operation has been validated by an Application developer or Operator to have completed successfully - OTelStatusCodeOk = OTelStatusCodeKey.String("OK") - // The operation contains an error - OTelStatusCodeError = OTelStatusCodeKey.String("ERROR") -) - -// OTelStatusDescription returns an attribute KeyValue conforming to the -// "otel.status_description" semantic conventions. It represents the -// description of the Status if it has a value, otherwise not set. -func OTelStatusDescription(val string) attribute.KeyValue { - return OTelStatusDescriptionKey.String(val) -} - -// Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's -// concepts. -const ( - // OTelScopeNameKey is the attribute Key conforming to the - // "otel.scope.name" semantic conventions. It represents the name of the - // instrumentation scope - (`InstrumentationScope.Name` in OTLP). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'io.opentelemetry.contrib.mongodb' - OTelScopeNameKey = attribute.Key("otel.scope.name") - - // OTelScopeVersionKey is the attribute Key conforming to the - // "otel.scope.version" semantic conventions. It represents the version of - // the instrumentation scope - (`InstrumentationScope.Version` in OTLP). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '1.0.0' - OTelScopeVersionKey = attribute.Key("otel.scope.version") -) - -// OTelScopeName returns an attribute KeyValue conforming to the -// "otel.scope.name" semantic conventions. It represents the name of the -// instrumentation scope - (`InstrumentationScope.Name` in OTLP). -func OTelScopeName(val string) attribute.KeyValue { - return OTelScopeNameKey.String(val) -} - -// OTelScopeVersion returns an attribute KeyValue conforming to the -// "otel.scope.version" semantic conventions. It represents the version of the -// instrumentation scope - (`InstrumentationScope.Version` in OTLP). -func OTelScopeVersion(val string) attribute.KeyValue { - return OTelScopeVersionKey.String(val) -} - -// Operations that access some remote service. -const ( - // PeerServiceKey is the attribute Key conforming to the "peer.service" - // semantic conventions. It represents the - // [`service.name`](/docs/resource/README.md#service) of the remote - // service. SHOULD be equal to the actual `service.name` resource attribute - // of the remote service if any. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'AuthTokenCache' - PeerServiceKey = attribute.Key("peer.service") -) - -// PeerService returns an attribute KeyValue conforming to the -// "peer.service" semantic conventions. It represents the -// [`service.name`](/docs/resource/README.md#service) of the remote service. -// SHOULD be equal to the actual `service.name` resource attribute of the -// remote service if any. -func PeerService(val string) attribute.KeyValue { - return PeerServiceKey.String(val) -} - -// An operating system process. -const ( - // ProcessCommandKey is the attribute Key conforming to the - // "process.command" semantic conventions. It represents the command used - // to launch the process (i.e. the command name). On Linux based systems, - // can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can - // be set to the first parameter extracted from `GetCommandLineW`. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'cmd/otelcol' - ProcessCommandKey = attribute.Key("process.command") - - // ProcessCommandArgsKey is the attribute Key conforming to the - // "process.command_args" semantic conventions. It represents the all the - // command arguments (including the command/executable itself) as received - // by the process. On Linux-based systems (and some other Unixoid systems - // supporting procfs), can be set according to the list of null-delimited - // strings extracted from `proc/[pid]/cmdline`. For libc-based executables, - // this would be the full argv vector passed to `main`. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'cmd/otecol', '--config=config.yaml' - ProcessCommandArgsKey = attribute.Key("process.command_args") - - // ProcessCommandLineKey is the attribute Key conforming to the - // "process.command_line" semantic conventions. It represents the full - // command used to launch the process as a single string representing the - // full command. On Windows, can be set to the result of `GetCommandLineW`. - // Do not set this if you have to assemble it just for monitoring; use - // `process.command_args` instead. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"' - ProcessCommandLineKey = attribute.Key("process.command_line") - - // ProcessContextSwitchTypeKey is the attribute Key conforming to the - // "process.context_switch_type" semantic conventions. It represents the - // specifies whether the context switches for this data point were - // voluntary or involuntary. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - ProcessContextSwitchTypeKey = attribute.Key("process.context_switch_type") - - // ProcessCreationTimeKey is the attribute Key conforming to the - // "process.creation.time" semantic conventions. It represents the date and - // time the process was created, in ISO 8601 format. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2023-11-21T09:25:34.853Z' - ProcessCreationTimeKey = attribute.Key("process.creation.time") - - // ProcessExecutableNameKey is the attribute Key conforming to the - // "process.executable.name" semantic conventions. It represents the name - // of the process executable. On Linux based systems, can be set to the - // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name - // of `GetProcessImageFileNameW`. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'otelcol' - ProcessExecutableNameKey = attribute.Key("process.executable.name") - - // ProcessExecutablePathKey is the attribute Key conforming to the - // "process.executable.path" semantic conventions. It represents the full - // path to the process executable. On Linux based systems, can be set to - // the target of `proc/[pid]/exe`. On Windows, can be set to the result of - // `GetProcessImageFileNameW`. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/usr/bin/cmd/otelcol' - ProcessExecutablePathKey = attribute.Key("process.executable.path") - - // ProcessExitCodeKey is the attribute Key conforming to the - // "process.exit.code" semantic conventions. It represents the exit code of - // the process. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 127 - ProcessExitCodeKey = attribute.Key("process.exit.code") - - // ProcessExitTimeKey is the attribute Key conforming to the - // "process.exit.time" semantic conventions. It represents the date and - // time the process exited, in ISO 8601 format. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2023-11-21T09:26:12.315Z' - ProcessExitTimeKey = attribute.Key("process.exit.time") - - // ProcessGroupLeaderPIDKey is the attribute Key conforming to the - // "process.group_leader.pid" semantic conventions. It represents the PID - // of the process's group leader. This is also the process group ID (PGID) - // of the process. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 23 - ProcessGroupLeaderPIDKey = attribute.Key("process.group_leader.pid") - - // ProcessInteractiveKey is the attribute Key conforming to the - // "process.interactive" semantic conventions. It represents the whether - // the process is connected to an interactive shell. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - ProcessInteractiveKey = attribute.Key("process.interactive") - - // ProcessOwnerKey is the attribute Key conforming to the "process.owner" - // semantic conventions. It represents the username of the user that owns - // the process. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'root' - ProcessOwnerKey = attribute.Key("process.owner") - - // ProcessPagingFaultTypeKey is the attribute Key conforming to the - // "process.paging.fault_type" semantic conventions. It represents the type - // of page fault for this data point. Type `major` is for major/hard page - // faults, and `minor` is for minor/soft page faults. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - ProcessPagingFaultTypeKey = attribute.Key("process.paging.fault_type") - - // ProcessParentPIDKey is the attribute Key conforming to the - // "process.parent_pid" semantic conventions. It represents the parent - // Process identifier (PPID). - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 111 - ProcessParentPIDKey = attribute.Key("process.parent_pid") - - // ProcessPIDKey is the attribute Key conforming to the "process.pid" - // semantic conventions. It represents the process identifier (PID). - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1234 - ProcessPIDKey = attribute.Key("process.pid") - - // ProcessRealUserIDKey is the attribute Key conforming to the - // "process.real_user.id" semantic conventions. It represents the real user - // ID (RUID) of the process. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1000 - ProcessRealUserIDKey = attribute.Key("process.real_user.id") - - // ProcessRealUserNameKey is the attribute Key conforming to the - // "process.real_user.name" semantic conventions. It represents the - // username of the real user of the process. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'operator' - ProcessRealUserNameKey = attribute.Key("process.real_user.name") - - // ProcessRuntimeDescriptionKey is the attribute Key conforming to the - // "process.runtime.description" semantic conventions. It represents an - // additional description about the runtime of the process, for example a - // specific vendor customization of the runtime environment. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0' - ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description") - - // ProcessRuntimeNameKey is the attribute Key conforming to the - // "process.runtime.name" semantic conventions. It represents the name of - // the runtime of this process. For compiled native binaries, this SHOULD - // be the name of the compiler. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'OpenJDK Runtime Environment' - ProcessRuntimeNameKey = attribute.Key("process.runtime.name") - - // ProcessRuntimeVersionKey is the attribute Key conforming to the - // "process.runtime.version" semantic conventions. It represents the - // version of the runtime of this process, as returned by the runtime - // without modification. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '14.0.2' - ProcessRuntimeVersionKey = attribute.Key("process.runtime.version") - - // ProcessSavedUserIDKey is the attribute Key conforming to the - // "process.saved_user.id" semantic conventions. It represents the saved - // user ID (SUID) of the process. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1002 - ProcessSavedUserIDKey = attribute.Key("process.saved_user.id") - - // ProcessSavedUserNameKey is the attribute Key conforming to the - // "process.saved_user.name" semantic conventions. It represents the - // username of the saved user. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'operator' - ProcessSavedUserNameKey = attribute.Key("process.saved_user.name") - - // ProcessSessionLeaderPIDKey is the attribute Key conforming to the - // "process.session_leader.pid" semantic conventions. It represents the PID - // of the process's session leader. This is also the session ID (SID) of - // the process. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 14 - ProcessSessionLeaderPIDKey = attribute.Key("process.session_leader.pid") - - // ProcessUserIDKey is the attribute Key conforming to the - // "process.user.id" semantic conventions. It represents the effective user - // ID (EUID) of the process. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1001 - ProcessUserIDKey = attribute.Key("process.user.id") - - // ProcessUserNameKey is the attribute Key conforming to the - // "process.user.name" semantic conventions. It represents the username of - // the effective user of the process. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'root' - ProcessUserNameKey = attribute.Key("process.user.name") - - // ProcessVpidKey is the attribute Key conforming to the "process.vpid" - // semantic conventions. It represents the virtual process identifier. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 12 - // Note: The process ID within a PID namespace. This is not necessarily - // unique across all processes on the host but it is unique within the - // process namespace that the process exists within. - ProcessVpidKey = attribute.Key("process.vpid") -) - -var ( - // voluntary - ProcessContextSwitchTypeVoluntary = ProcessContextSwitchTypeKey.String("voluntary") - // involuntary - ProcessContextSwitchTypeInvoluntary = ProcessContextSwitchTypeKey.String("involuntary") -) - -var ( - // major - ProcessPagingFaultTypeMajor = ProcessPagingFaultTypeKey.String("major") - // minor - ProcessPagingFaultTypeMinor = ProcessPagingFaultTypeKey.String("minor") -) - -// ProcessCommand returns an attribute KeyValue conforming to the -// "process.command" semantic conventions. It represents the command used to -// launch the process (i.e. the command name). On Linux based systems, can be -// set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to -// the first parameter extracted from `GetCommandLineW`. -func ProcessCommand(val string) attribute.KeyValue { - return ProcessCommandKey.String(val) -} - -// ProcessCommandArgs returns an attribute KeyValue conforming to the -// "process.command_args" semantic conventions. It represents the all the -// command arguments (including the command/executable itself) as received by -// the process. On Linux-based systems (and some other Unixoid systems -// supporting procfs), can be set according to the list of null-delimited -// strings extracted from `proc/[pid]/cmdline`. For libc-based executables, -// this would be the full argv vector passed to `main`. -func ProcessCommandArgs(val ...string) attribute.KeyValue { - return ProcessCommandArgsKey.StringSlice(val) -} - -// ProcessCommandLine returns an attribute KeyValue conforming to the -// "process.command_line" semantic conventions. It represents the full command -// used to launch the process as a single string representing the full command. -// On Windows, can be set to the result of `GetCommandLineW`. Do not set this -// if you have to assemble it just for monitoring; use `process.command_args` -// instead. -func ProcessCommandLine(val string) attribute.KeyValue { - return ProcessCommandLineKey.String(val) -} - -// ProcessCreationTime returns an attribute KeyValue conforming to the -// "process.creation.time" semantic conventions. It represents the date and -// time the process was created, in ISO 8601 format. -func ProcessCreationTime(val string) attribute.KeyValue { - return ProcessCreationTimeKey.String(val) -} - -// ProcessExecutableName returns an attribute KeyValue conforming to the -// "process.executable.name" semantic conventions. It represents the name of -// the process executable. On Linux based systems, can be set to the `Name` in -// `proc/[pid]/status`. On Windows, can be set to the base name of -// `GetProcessImageFileNameW`. -func ProcessExecutableName(val string) attribute.KeyValue { - return ProcessExecutableNameKey.String(val) -} - -// ProcessExecutablePath returns an attribute KeyValue conforming to the -// "process.executable.path" semantic conventions. It represents the full path -// to the process executable. On Linux based systems, can be set to the target -// of `proc/[pid]/exe`. On Windows, can be set to the result of -// `GetProcessImageFileNameW`. -func ProcessExecutablePath(val string) attribute.KeyValue { - return ProcessExecutablePathKey.String(val) -} - -// ProcessExitCode returns an attribute KeyValue conforming to the -// "process.exit.code" semantic conventions. It represents the exit code of the -// process. -func ProcessExitCode(val int) attribute.KeyValue { - return ProcessExitCodeKey.Int(val) -} - -// ProcessExitTime returns an attribute KeyValue conforming to the -// "process.exit.time" semantic conventions. It represents the date and time -// the process exited, in ISO 8601 format. -func ProcessExitTime(val string) attribute.KeyValue { - return ProcessExitTimeKey.String(val) -} - -// ProcessGroupLeaderPID returns an attribute KeyValue conforming to the -// "process.group_leader.pid" semantic conventions. It represents the PID of -// the process's group leader. This is also the process group ID (PGID) of the -// process. -func ProcessGroupLeaderPID(val int) attribute.KeyValue { - return ProcessGroupLeaderPIDKey.Int(val) -} - -// ProcessInteractive returns an attribute KeyValue conforming to the -// "process.interactive" semantic conventions. It represents the whether the -// process is connected to an interactive shell. -func ProcessInteractive(val bool) attribute.KeyValue { - return ProcessInteractiveKey.Bool(val) -} - -// ProcessOwner returns an attribute KeyValue conforming to the -// "process.owner" semantic conventions. It represents the username of the user -// that owns the process. -func ProcessOwner(val string) attribute.KeyValue { - return ProcessOwnerKey.String(val) -} - -// ProcessParentPID returns an attribute KeyValue conforming to the -// "process.parent_pid" semantic conventions. It represents the parent Process -// identifier (PPID). -func ProcessParentPID(val int) attribute.KeyValue { - return ProcessParentPIDKey.Int(val) -} - -// ProcessPID returns an attribute KeyValue conforming to the "process.pid" -// semantic conventions. It represents the process identifier (PID). -func ProcessPID(val int) attribute.KeyValue { - return ProcessPIDKey.Int(val) -} - -// ProcessRealUserID returns an attribute KeyValue conforming to the -// "process.real_user.id" semantic conventions. It represents the real user ID -// (RUID) of the process. -func ProcessRealUserID(val int) attribute.KeyValue { - return ProcessRealUserIDKey.Int(val) -} - -// ProcessRealUserName returns an attribute KeyValue conforming to the -// "process.real_user.name" semantic conventions. It represents the username of -// the real user of the process. -func ProcessRealUserName(val string) attribute.KeyValue { - return ProcessRealUserNameKey.String(val) -} - -// ProcessRuntimeDescription returns an attribute KeyValue conforming to the -// "process.runtime.description" semantic conventions. It represents an -// additional description about the runtime of the process, for example a -// specific vendor customization of the runtime environment. -func ProcessRuntimeDescription(val string) attribute.KeyValue { - return ProcessRuntimeDescriptionKey.String(val) -} - -// ProcessRuntimeName returns an attribute KeyValue conforming to the -// "process.runtime.name" semantic conventions. It represents the name of the -// runtime of this process. For compiled native binaries, this SHOULD be the -// name of the compiler. -func ProcessRuntimeName(val string) attribute.KeyValue { - return ProcessRuntimeNameKey.String(val) -} - -// ProcessRuntimeVersion returns an attribute KeyValue conforming to the -// "process.runtime.version" semantic conventions. It represents the version of -// the runtime of this process, as returned by the runtime without -// modification. -func ProcessRuntimeVersion(val string) attribute.KeyValue { - return ProcessRuntimeVersionKey.String(val) -} - -// ProcessSavedUserID returns an attribute KeyValue conforming to the -// "process.saved_user.id" semantic conventions. It represents the saved user -// ID (SUID) of the process. -func ProcessSavedUserID(val int) attribute.KeyValue { - return ProcessSavedUserIDKey.Int(val) -} - -// ProcessSavedUserName returns an attribute KeyValue conforming to the -// "process.saved_user.name" semantic conventions. It represents the username -// of the saved user. -func ProcessSavedUserName(val string) attribute.KeyValue { - return ProcessSavedUserNameKey.String(val) -} - -// ProcessSessionLeaderPID returns an attribute KeyValue conforming to the -// "process.session_leader.pid" semantic conventions. It represents the PID of -// the process's session leader. This is also the session ID (SID) of the -// process. -func ProcessSessionLeaderPID(val int) attribute.KeyValue { - return ProcessSessionLeaderPIDKey.Int(val) -} - -// ProcessUserID returns an attribute KeyValue conforming to the -// "process.user.id" semantic conventions. It represents the effective user ID -// (EUID) of the process. -func ProcessUserID(val int) attribute.KeyValue { - return ProcessUserIDKey.Int(val) -} - -// ProcessUserName returns an attribute KeyValue conforming to the -// "process.user.name" semantic conventions. It represents the username of the -// effective user of the process. -func ProcessUserName(val string) attribute.KeyValue { - return ProcessUserNameKey.String(val) -} - -// ProcessVpid returns an attribute KeyValue conforming to the -// "process.vpid" semantic conventions. It represents the virtual process -// identifier. -func ProcessVpid(val int) attribute.KeyValue { - return ProcessVpidKey.Int(val) -} - -// Attributes for process CPU -const ( - // ProcessCPUStateKey is the attribute Key conforming to the - // "process.cpu.state" semantic conventions. It represents the CPU state of - // the process. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - ProcessCPUStateKey = attribute.Key("process.cpu.state") -) - -var ( - // system - ProcessCPUStateSystem = ProcessCPUStateKey.String("system") - // user - ProcessCPUStateUser = ProcessCPUStateKey.String("user") - // wait - ProcessCPUStateWait = ProcessCPUStateKey.String("wait") -) - -// Attributes for remote procedure calls. -const ( - // RPCConnectRPCErrorCodeKey is the attribute Key conforming to the - // "rpc.connect_rpc.error_code" semantic conventions. It represents the - // [error codes](https://connect.build/docs/protocol/#error-codes) of the - // Connect request. Error codes are always string values. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - RPCConnectRPCErrorCodeKey = attribute.Key("rpc.connect_rpc.error_code") - - // RPCGRPCStatusCodeKey is the attribute Key conforming to the - // "rpc.grpc.status_code" semantic conventions. It represents the [numeric - // status - // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of - // the gRPC request. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") - - // RPCJsonrpcErrorCodeKey is the attribute Key conforming to the - // "rpc.jsonrpc.error_code" semantic conventions. It represents the - // `error.code` property of response if it is an error response. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: -32700, 100 - RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code") - - // RPCJsonrpcErrorMessageKey is the attribute Key conforming to the - // "rpc.jsonrpc.error_message" semantic conventions. It represents the - // `error.message` property of response if it is an error response. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Parse error', 'User already exists' - RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message") - - // RPCJsonrpcRequestIDKey is the attribute Key conforming to the - // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` - // property of request or response. Since protocol allows id to be int, - // string, `null` or missing (for notifications), value is expected to be - // cast to string for simplicity. Use empty string in case of `null` value. - // Omit entirely if this is a notification. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '10', 'request-7', '' - RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id") - - // RPCJsonrpcVersionKey is the attribute Key conforming to the - // "rpc.jsonrpc.version" semantic conventions. It represents the protocol - // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 - // doesn't specify this, the value can be omitted. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2.0', '1.0' - RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version") - - // RPCMessageCompressedSizeKey is the attribute Key conforming to the - // "rpc.message.compressed_size" semantic conventions. It represents the - // compressed size of the message in bytes. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - RPCMessageCompressedSizeKey = attribute.Key("rpc.message.compressed_size") - - // RPCMessageIDKey is the attribute Key conforming to the "rpc.message.id" - // semantic conventions. It represents the mUST be calculated as two - // different counters starting from `1` one for sent messages and one for - // received message. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Note: This way we guarantee that the values will be consistent between - // different implementations. - RPCMessageIDKey = attribute.Key("rpc.message.id") - - // RPCMessageTypeKey is the attribute Key conforming to the - // "rpc.message.type" semantic conventions. It represents the whether this - // is a received or sent message. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - RPCMessageTypeKey = attribute.Key("rpc.message.type") - - // RPCMessageUncompressedSizeKey is the attribute Key conforming to the - // "rpc.message.uncompressed_size" semantic conventions. It represents the - // uncompressed size of the message in bytes. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - RPCMessageUncompressedSizeKey = attribute.Key("rpc.message.uncompressed_size") - - // RPCMethodKey is the attribute Key conforming to the "rpc.method" - // semantic conventions. It represents the name of the (logical) method - // being called, must be equal to the $method part in the span name. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'exampleMethod' - // Note: This is the logical name of the method from the RPC interface - // perspective, which can be different from the name of any implementing - // method/function. The `code.function` attribute may be used to store the - // latter (e.g., method actually executing the call on the server side, RPC - // client stub method on the client side). - RPCMethodKey = attribute.Key("rpc.method") - - // RPCServiceKey is the attribute Key conforming to the "rpc.service" - // semantic conventions. It represents the full (logical) name of the - // service being called, including its package name, if applicable. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'myservice.EchoService' - // Note: This is the logical name of the service from the RPC interface - // perspective, which can be different from the name of any implementing - // class. The `code.namespace` attribute may be used to store the latter - // (despite the attribute name, it may include a class name; e.g., class - // with method actually executing the call on the server side, RPC client - // stub class on the client side). - RPCServiceKey = attribute.Key("rpc.service") - - // RPCSystemKey is the attribute Key conforming to the "rpc.system" - // semantic conventions. It represents a string identifying the remoting - // system. See below for a list of well-known identifiers. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - RPCSystemKey = attribute.Key("rpc.system") -) - -var ( - // cancelled - RPCConnectRPCErrorCodeCancelled = RPCConnectRPCErrorCodeKey.String("cancelled") - // unknown - RPCConnectRPCErrorCodeUnknown = RPCConnectRPCErrorCodeKey.String("unknown") - // invalid_argument - RPCConnectRPCErrorCodeInvalidArgument = RPCConnectRPCErrorCodeKey.String("invalid_argument") - // deadline_exceeded - RPCConnectRPCErrorCodeDeadlineExceeded = RPCConnectRPCErrorCodeKey.String("deadline_exceeded") - // not_found - RPCConnectRPCErrorCodeNotFound = RPCConnectRPCErrorCodeKey.String("not_found") - // already_exists - RPCConnectRPCErrorCodeAlreadyExists = RPCConnectRPCErrorCodeKey.String("already_exists") - // permission_denied - RPCConnectRPCErrorCodePermissionDenied = RPCConnectRPCErrorCodeKey.String("permission_denied") - // resource_exhausted - RPCConnectRPCErrorCodeResourceExhausted = RPCConnectRPCErrorCodeKey.String("resource_exhausted") - // failed_precondition - RPCConnectRPCErrorCodeFailedPrecondition = RPCConnectRPCErrorCodeKey.String("failed_precondition") - // aborted - RPCConnectRPCErrorCodeAborted = RPCConnectRPCErrorCodeKey.String("aborted") - // out_of_range - RPCConnectRPCErrorCodeOutOfRange = RPCConnectRPCErrorCodeKey.String("out_of_range") - // unimplemented - RPCConnectRPCErrorCodeUnimplemented = RPCConnectRPCErrorCodeKey.String("unimplemented") - // internal - RPCConnectRPCErrorCodeInternal = RPCConnectRPCErrorCodeKey.String("internal") - // unavailable - RPCConnectRPCErrorCodeUnavailable = RPCConnectRPCErrorCodeKey.String("unavailable") - // data_loss - RPCConnectRPCErrorCodeDataLoss = RPCConnectRPCErrorCodeKey.String("data_loss") - // unauthenticated - RPCConnectRPCErrorCodeUnauthenticated = RPCConnectRPCErrorCodeKey.String("unauthenticated") -) - -var ( - // OK - RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0) - // CANCELLED - RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1) - // UNKNOWN - RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2) - // INVALID_ARGUMENT - RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3) - // DEADLINE_EXCEEDED - RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4) - // NOT_FOUND - RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5) - // ALREADY_EXISTS - RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6) - // PERMISSION_DENIED - RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7) - // RESOURCE_EXHAUSTED - RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8) - // FAILED_PRECONDITION - RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9) - // ABORTED - RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10) - // OUT_OF_RANGE - RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11) - // UNIMPLEMENTED - RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12) - // INTERNAL - RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13) - // UNAVAILABLE - RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14) - // DATA_LOSS - RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15) - // UNAUTHENTICATED - RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16) -) - -var ( - // sent - RPCMessageTypeSent = RPCMessageTypeKey.String("SENT") - // received - RPCMessageTypeReceived = RPCMessageTypeKey.String("RECEIVED") -) - -var ( - // gRPC - RPCSystemGRPC = RPCSystemKey.String("grpc") - // Java RMI - RPCSystemJavaRmi = RPCSystemKey.String("java_rmi") - // .NET WCF - RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf") - // Apache Dubbo - RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo") - // Connect RPC - RPCSystemConnectRPC = RPCSystemKey.String("connect_rpc") -) - -// RPCJsonrpcErrorCode returns an attribute KeyValue conforming to the -// "rpc.jsonrpc.error_code" semantic conventions. It represents the -// `error.code` property of response if it is an error response. -func RPCJsonrpcErrorCode(val int) attribute.KeyValue { - return RPCJsonrpcErrorCodeKey.Int(val) -} - -// RPCJsonrpcErrorMessage returns an attribute KeyValue conforming to the -// "rpc.jsonrpc.error_message" semantic conventions. It represents the -// `error.message` property of response if it is an error response. -func RPCJsonrpcErrorMessage(val string) attribute.KeyValue { - return RPCJsonrpcErrorMessageKey.String(val) -} - -// RPCJsonrpcRequestID returns an attribute KeyValue conforming to the -// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` -// property of request or response. Since protocol allows id to be int, string, -// `null` or missing (for notifications), value is expected to be cast to -// string for simplicity. Use empty string in case of `null` value. Omit -// entirely if this is a notification. -func RPCJsonrpcRequestID(val string) attribute.KeyValue { - return RPCJsonrpcRequestIDKey.String(val) -} - -// RPCJsonrpcVersion returns an attribute KeyValue conforming to the -// "rpc.jsonrpc.version" semantic conventions. It represents the protocol -// version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 -// doesn't specify this, the value can be omitted. -func RPCJsonrpcVersion(val string) attribute.KeyValue { - return RPCJsonrpcVersionKey.String(val) -} - -// RPCMessageCompressedSize returns an attribute KeyValue conforming to the -// "rpc.message.compressed_size" semantic conventions. It represents the -// compressed size of the message in bytes. -func RPCMessageCompressedSize(val int) attribute.KeyValue { - return RPCMessageCompressedSizeKey.Int(val) -} - -// RPCMessageID returns an attribute KeyValue conforming to the -// "rpc.message.id" semantic conventions. It represents the mUST be calculated -// as two different counters starting from `1` one for sent messages and one -// for received message. -func RPCMessageID(val int) attribute.KeyValue { - return RPCMessageIDKey.Int(val) -} - -// RPCMessageUncompressedSize returns an attribute KeyValue conforming to -// the "rpc.message.uncompressed_size" semantic conventions. It represents the -// uncompressed size of the message in bytes. -func RPCMessageUncompressedSize(val int) attribute.KeyValue { - return RPCMessageUncompressedSizeKey.Int(val) -} - -// RPCMethod returns an attribute KeyValue conforming to the "rpc.method" -// semantic conventions. It represents the name of the (logical) method being -// called, must be equal to the $method part in the span name. -func RPCMethod(val string) attribute.KeyValue { - return RPCMethodKey.String(val) -} - -// RPCService returns an attribute KeyValue conforming to the "rpc.service" -// semantic conventions. It represents the full (logical) name of the service -// being called, including its package name, if applicable. -func RPCService(val string) attribute.KeyValue { - return RPCServiceKey.String(val) -} - -// These attributes may be used to describe the server in a connection-based -// network interaction where there is one side that initiates the connection -// (the client is the side that initiates the connection). This covers all TCP -// network interactions since TCP is connection-based and one side initiates -// the connection (an exception is made for peer-to-peer communication over TCP -// where the "user-facing" surface of the protocol / API doesn't expose a clear -// notion of client and server). This also covers UDP network interactions -// where one side initiates the interaction, e.g. QUIC (HTTP/3) and DNS. -const ( - // ServerAddressKey is the attribute Key conforming to the "server.address" - // semantic conventions. It represents the server domain name if available - // without reverse DNS lookup; otherwise, IP address or Unix domain socket - // name. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'example.com', '10.1.2.80', '/tmp/my.sock' - // Note: When observed from the client side, and when communicating through - // an intermediary, `server.address` SHOULD represent the server address - // behind any intermediaries, for example proxies, if it's available. - ServerAddressKey = attribute.Key("server.address") - - // ServerPortKey is the attribute Key conforming to the "server.port" - // semantic conventions. It represents the server port number. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 80, 8080, 443 - // Note: When observed from the client side, and when communicating through - // an intermediary, `server.port` SHOULD represent the server port behind - // any intermediaries, for example proxies, if it's available. - ServerPortKey = attribute.Key("server.port") -) - -// ServerAddress returns an attribute KeyValue conforming to the -// "server.address" semantic conventions. It represents the server domain name -// if available without reverse DNS lookup; otherwise, IP address or Unix -// domain socket name. -func ServerAddress(val string) attribute.KeyValue { - return ServerAddressKey.String(val) -} - -// ServerPort returns an attribute KeyValue conforming to the "server.port" -// semantic conventions. It represents the server port number. -func ServerPort(val int) attribute.KeyValue { - return ServerPortKey.Int(val) -} - -// A service instance. -const ( - // ServiceInstanceIDKey is the attribute Key conforming to the - // "service.instance.id" semantic conventions. It represents the string ID - // of the service instance. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '627cc493-f310-47de-96bd-71410b7dec09' - // Note: MUST be unique for each instance of the same - // `service.namespace,service.name` pair (in other words - // `service.namespace,service.name,service.instance.id` triplet MUST be - // globally unique). The ID helps to - // distinguish instances of the same service that exist at the same time - // (e.g. instances of a horizontally scaled - // service). - // - // Implementations, such as SDKs, are recommended to generate a random - // Version 1 or Version 4 [RFC - // 4122](https://www.ietf.org/rfc/rfc4122.txt) UUID, but are free to use an - // inherent unique ID as the source of - // this value if stability is desirable. In that case, the ID SHOULD be - // used as source of a UUID Version 5 and - // SHOULD use the following UUID as the namespace: - // `4d63009a-8d0f-11ee-aad7-4c796ed8e320`. - // - // UUIDs are typically recommended, as only an opaque value for the - // purposes of identifying a service instance is - // needed. Similar to what can be seen in the man page for the - // [`/etc/machine-id`](https://www.freedesktop.org/software/systemd/man/machine-id.html) - // file, the underlying - // data, such as pod name and namespace should be treated as confidential, - // being the user's choice to expose it - // or not via another resource attribute. - // - // For applications running behind an application server (like unicorn), we - // do not recommend using one identifier - // for all processes participating in the application. Instead, it's - // recommended each division (e.g. a worker - // thread in unicorn) to have its own instance.id. - // - // It's not recommended for a Collector to set `service.instance.id` if it - // can't unambiguously determine the - // service instance that is generating that telemetry. For instance, - // creating an UUID based on `pod.name` will - // likely be wrong, as the Collector might not know from which container - // within that pod the telemetry originated. - // However, Collectors can set the `service.instance.id` if they can - // unambiguously determine the service instance - // for that telemetry. This is typically the case for scraping receivers, - // as they know the target address and - // port. - ServiceInstanceIDKey = attribute.Key("service.instance.id") - - // ServiceNameKey is the attribute Key conforming to the "service.name" - // semantic conventions. It represents the logical name of the service. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'shoppingcart' - // Note: MUST be the same for all instances of horizontally scaled - // services. If the value was not specified, SDKs MUST fallback to - // `unknown_service:` concatenated with - // [`process.executable.name`](process.md), e.g. `unknown_service:bash`. If - // `process.executable.name` is not available, the value MUST be set to - // `unknown_service`. - ServiceNameKey = attribute.Key("service.name") - - // ServiceNamespaceKey is the attribute Key conforming to the - // "service.namespace" semantic conventions. It represents a namespace for - // `service.name`. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Shop' - // Note: A string value having a meaning that helps to distinguish a group - // of services, for example the team name that owns a group of services. - // `service.name` is expected to be unique within the same namespace. If - // `service.namespace` is not specified in the Resource then `service.name` - // is expected to be unique for all services that have no explicit - // namespace defined (so the empty/unspecified namespace is simply one more - // valid namespace). Zero-length namespace string is assumed equal to - // unspecified namespace. - ServiceNamespaceKey = attribute.Key("service.namespace") - - // ServiceVersionKey is the attribute Key conforming to the - // "service.version" semantic conventions. It represents the version string - // of the service API or implementation. The format is not defined by these - // conventions. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '2.0.0', 'a01dbef8a' - ServiceVersionKey = attribute.Key("service.version") -) - -// ServiceInstanceID returns an attribute KeyValue conforming to the -// "service.instance.id" semantic conventions. It represents the string ID of -// the service instance. -func ServiceInstanceID(val string) attribute.KeyValue { - return ServiceInstanceIDKey.String(val) -} - -// ServiceName returns an attribute KeyValue conforming to the -// "service.name" semantic conventions. It represents the logical name of the -// service. -func ServiceName(val string) attribute.KeyValue { - return ServiceNameKey.String(val) -} - -// ServiceNamespace returns an attribute KeyValue conforming to the -// "service.namespace" semantic conventions. It represents a namespace for -// `service.name`. -func ServiceNamespace(val string) attribute.KeyValue { - return ServiceNamespaceKey.String(val) -} - -// ServiceVersion returns an attribute KeyValue conforming to the -// "service.version" semantic conventions. It represents the version string of -// the service API or implementation. The format is not defined by these -// conventions. -func ServiceVersion(val string) attribute.KeyValue { - return ServiceVersionKey.String(val) -} - -// Session is defined as the period of time encompassing all activities -// performed by the application and the actions executed by the end user. -// Consequently, a Session is represented as a collection of Logs, Events, and -// Spans emitted by the Client Application throughout the Session's duration. -// Each Session is assigned a unique identifier, which is included as an -// attribute in the Logs, Events, and Spans generated during the Session's -// lifecycle. -// When a session reaches end of life, typically due to user inactivity or -// session timeout, a new session identifier will be assigned. The previous -// session identifier may be provided by the instrumentation so that telemetry -// backends can link the two sessions. -const ( - // SessionIDKey is the attribute Key conforming to the "session.id" - // semantic conventions. It represents a unique id to identify a session. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '00112233-4455-6677-8899-aabbccddeeff' - SessionIDKey = attribute.Key("session.id") - - // SessionPreviousIDKey is the attribute Key conforming to the - // "session.previous_id" semantic conventions. It represents the previous - // `session.id` for this user, when known. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '00112233-4455-6677-8899-aabbccddeeff' - SessionPreviousIDKey = attribute.Key("session.previous_id") -) - -// SessionID returns an attribute KeyValue conforming to the "session.id" -// semantic conventions. It represents a unique id to identify a session. -func SessionID(val string) attribute.KeyValue { - return SessionIDKey.String(val) -} - -// SessionPreviousID returns an attribute KeyValue conforming to the -// "session.previous_id" semantic conventions. It represents the previous -// `session.id` for this user, when known. -func SessionPreviousID(val string) attribute.KeyValue { - return SessionPreviousIDKey.String(val) -} - -// SignalR attributes -const ( - // SignalrConnectionStatusKey is the attribute Key conforming to the - // "signalr.connection.status" semantic conventions. It represents the - // signalR HTTP connection closure status. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'app_shutdown', 'timeout' - SignalrConnectionStatusKey = attribute.Key("signalr.connection.status") - - // SignalrTransportKey is the attribute Key conforming to the - // "signalr.transport" semantic conventions. It represents the [SignalR - // transport - // type](https://github.com/dotnet/aspnetcore/blob/main/src/SignalR/docs/specs/TransportProtocols.md) - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'web_sockets', 'long_polling' - SignalrTransportKey = attribute.Key("signalr.transport") -) - -var ( - // The connection was closed normally - SignalrConnectionStatusNormalClosure = SignalrConnectionStatusKey.String("normal_closure") - // The connection was closed due to a timeout - SignalrConnectionStatusTimeout = SignalrConnectionStatusKey.String("timeout") - // The connection was closed because the app is shutting down - SignalrConnectionStatusAppShutdown = SignalrConnectionStatusKey.String("app_shutdown") -) - -var ( - // ServerSentEvents protocol - SignalrTransportServerSentEvents = SignalrTransportKey.String("server_sent_events") - // LongPolling protocol - SignalrTransportLongPolling = SignalrTransportKey.String("long_polling") - // WebSockets protocol - SignalrTransportWebSockets = SignalrTransportKey.String("web_sockets") -) - -// These attributes may be used to describe the sender of a network -// exchange/packet. These should be used when there is no client/server -// relationship between the two sides, or when that relationship is unknown. -// This covers low-level network interactions (e.g. packet tracing) where you -// don't know if there was a connection or which side initiated it. This also -// covers unidirectional UDP flows and peer-to-peer communication where the -// "user-facing" surface of the protocol / API doesn't expose a clear notion of -// client and server. -const ( - // SourceAddressKey is the attribute Key conforming to the "source.address" - // semantic conventions. It represents the source address - domain name if - // available without reverse DNS lookup; otherwise, IP address or Unix - // domain socket name. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'source.example.com', '10.1.2.80', '/tmp/my.sock' - // Note: When observed from the destination side, and when communicating - // through an intermediary, `source.address` SHOULD represent the source - // address behind any intermediaries, for example proxies, if it's - // available. - SourceAddressKey = attribute.Key("source.address") - - // SourcePortKey is the attribute Key conforming to the "source.port" - // semantic conventions. It represents the source port number - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 3389, 2888 - SourcePortKey = attribute.Key("source.port") -) - -// SourceAddress returns an attribute KeyValue conforming to the -// "source.address" semantic conventions. It represents the source address - -// domain name if available without reverse DNS lookup; otherwise, IP address -// or Unix domain socket name. -func SourceAddress(val string) attribute.KeyValue { - return SourceAddressKey.String(val) -} - -// SourcePort returns an attribute KeyValue conforming to the "source.port" -// semantic conventions. It represents the source port number -func SourcePort(val int) attribute.KeyValue { - return SourcePortKey.Int(val) -} - -// Describes System attributes -const ( - // SystemDeviceKey is the attribute Key conforming to the "system.device" - // semantic conventions. It represents the device identifier - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '(identifier)' - SystemDeviceKey = attribute.Key("system.device") -) - -// SystemDevice returns an attribute KeyValue conforming to the -// "system.device" semantic conventions. It represents the device identifier -func SystemDevice(val string) attribute.KeyValue { - return SystemDeviceKey.String(val) -} - -// Describes System CPU attributes -const ( - // SystemCPULogicalNumberKey is the attribute Key conforming to the - // "system.cpu.logical_number" semantic conventions. It represents the - // logical CPU number [0..n-1] - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1 - SystemCPULogicalNumberKey = attribute.Key("system.cpu.logical_number") - - // SystemCPUStateKey is the attribute Key conforming to the - // "system.cpu.state" semantic conventions. It represents the state of the - // CPU - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'idle', 'interrupt' - SystemCPUStateKey = attribute.Key("system.cpu.state") -) - -var ( - // user - SystemCPUStateUser = SystemCPUStateKey.String("user") - // system - SystemCPUStateSystem = SystemCPUStateKey.String("system") - // nice - SystemCPUStateNice = SystemCPUStateKey.String("nice") - // idle - SystemCPUStateIdle = SystemCPUStateKey.String("idle") - // iowait - SystemCPUStateIowait = SystemCPUStateKey.String("iowait") - // interrupt - SystemCPUStateInterrupt = SystemCPUStateKey.String("interrupt") - // steal - SystemCPUStateSteal = SystemCPUStateKey.String("steal") -) - -// SystemCPULogicalNumber returns an attribute KeyValue conforming to the -// "system.cpu.logical_number" semantic conventions. It represents the logical -// CPU number [0..n-1] -func SystemCPULogicalNumber(val int) attribute.KeyValue { - return SystemCPULogicalNumberKey.Int(val) -} - -// Describes System Memory attributes -const ( - // SystemMemoryStateKey is the attribute Key conforming to the - // "system.memory.state" semantic conventions. It represents the memory - // state - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'free', 'cached' - SystemMemoryStateKey = attribute.Key("system.memory.state") -) - -var ( - // used - SystemMemoryStateUsed = SystemMemoryStateKey.String("used") - // free - SystemMemoryStateFree = SystemMemoryStateKey.String("free") - // shared - SystemMemoryStateShared = SystemMemoryStateKey.String("shared") - // buffers - SystemMemoryStateBuffers = SystemMemoryStateKey.String("buffers") - // cached - SystemMemoryStateCached = SystemMemoryStateKey.String("cached") -) - -// Describes System Memory Paging attributes -const ( - // SystemPagingDirectionKey is the attribute Key conforming to the - // "system.paging.direction" semantic conventions. It represents the paging - // access direction - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'in' - SystemPagingDirectionKey = attribute.Key("system.paging.direction") - - // SystemPagingStateKey is the attribute Key conforming to the - // "system.paging.state" semantic conventions. It represents the memory - // paging state - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'free' - SystemPagingStateKey = attribute.Key("system.paging.state") - - // SystemPagingTypeKey is the attribute Key conforming to the - // "system.paging.type" semantic conventions. It represents the memory - // paging type - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'minor' - SystemPagingTypeKey = attribute.Key("system.paging.type") -) - -var ( - // in - SystemPagingDirectionIn = SystemPagingDirectionKey.String("in") - // out - SystemPagingDirectionOut = SystemPagingDirectionKey.String("out") -) - -var ( - // used - SystemPagingStateUsed = SystemPagingStateKey.String("used") - // free - SystemPagingStateFree = SystemPagingStateKey.String("free") -) - -var ( - // major - SystemPagingTypeMajor = SystemPagingTypeKey.String("major") - // minor - SystemPagingTypeMinor = SystemPagingTypeKey.String("minor") -) - -// Describes Filesystem attributes -const ( - // SystemFilesystemModeKey is the attribute Key conforming to the - // "system.filesystem.mode" semantic conventions. It represents the - // filesystem mode - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'rw, ro' - SystemFilesystemModeKey = attribute.Key("system.filesystem.mode") - - // SystemFilesystemMountpointKey is the attribute Key conforming to the - // "system.filesystem.mountpoint" semantic conventions. It represents the - // filesystem mount path - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/mnt/data' - SystemFilesystemMountpointKey = attribute.Key("system.filesystem.mountpoint") - - // SystemFilesystemStateKey is the attribute Key conforming to the - // "system.filesystem.state" semantic conventions. It represents the - // filesystem state - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'used' - SystemFilesystemStateKey = attribute.Key("system.filesystem.state") - - // SystemFilesystemTypeKey is the attribute Key conforming to the - // "system.filesystem.type" semantic conventions. It represents the - // filesystem type - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'ext4' - SystemFilesystemTypeKey = attribute.Key("system.filesystem.type") -) - -var ( - // used - SystemFilesystemStateUsed = SystemFilesystemStateKey.String("used") - // free - SystemFilesystemStateFree = SystemFilesystemStateKey.String("free") - // reserved - SystemFilesystemStateReserved = SystemFilesystemStateKey.String("reserved") -) - -var ( - // fat32 - SystemFilesystemTypeFat32 = SystemFilesystemTypeKey.String("fat32") - // exfat - SystemFilesystemTypeExfat = SystemFilesystemTypeKey.String("exfat") - // ntfs - SystemFilesystemTypeNtfs = SystemFilesystemTypeKey.String("ntfs") - // refs - SystemFilesystemTypeRefs = SystemFilesystemTypeKey.String("refs") - // hfsplus - SystemFilesystemTypeHfsplus = SystemFilesystemTypeKey.String("hfsplus") - // ext4 - SystemFilesystemTypeExt4 = SystemFilesystemTypeKey.String("ext4") -) - -// SystemFilesystemMode returns an attribute KeyValue conforming to the -// "system.filesystem.mode" semantic conventions. It represents the filesystem -// mode -func SystemFilesystemMode(val string) attribute.KeyValue { - return SystemFilesystemModeKey.String(val) -} - -// SystemFilesystemMountpoint returns an attribute KeyValue conforming to -// the "system.filesystem.mountpoint" semantic conventions. It represents the -// filesystem mount path -func SystemFilesystemMountpoint(val string) attribute.KeyValue { - return SystemFilesystemMountpointKey.String(val) -} - -// Describes Network attributes -const ( - // SystemNetworkStateKey is the attribute Key conforming to the - // "system.network.state" semantic conventions. It represents a stateless - // protocol MUST NOT set this attribute - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'close_wait' - SystemNetworkStateKey = attribute.Key("system.network.state") -) - -var ( - // close - SystemNetworkStateClose = SystemNetworkStateKey.String("close") - // close_wait - SystemNetworkStateCloseWait = SystemNetworkStateKey.String("close_wait") - // closing - SystemNetworkStateClosing = SystemNetworkStateKey.String("closing") - // delete - SystemNetworkStateDelete = SystemNetworkStateKey.String("delete") - // established - SystemNetworkStateEstablished = SystemNetworkStateKey.String("established") - // fin_wait_1 - SystemNetworkStateFinWait1 = SystemNetworkStateKey.String("fin_wait_1") - // fin_wait_2 - SystemNetworkStateFinWait2 = SystemNetworkStateKey.String("fin_wait_2") - // last_ack - SystemNetworkStateLastAck = SystemNetworkStateKey.String("last_ack") - // listen - SystemNetworkStateListen = SystemNetworkStateKey.String("listen") - // syn_recv - SystemNetworkStateSynRecv = SystemNetworkStateKey.String("syn_recv") - // syn_sent - SystemNetworkStateSynSent = SystemNetworkStateKey.String("syn_sent") - // time_wait - SystemNetworkStateTimeWait = SystemNetworkStateKey.String("time_wait") -) - -// Describes System Process attributes -const ( - // SystemProcessStatusKey is the attribute Key conforming to the - // "system.process.status" semantic conventions. It represents the process - // state, e.g., [Linux Process State - // Codes](https://man7.org/linux/man-pages/man1/ps.1.html#PROCESS_STATE_CODES) - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'running' - SystemProcessStatusKey = attribute.Key("system.process.status") -) - -var ( - // running - SystemProcessStatusRunning = SystemProcessStatusKey.String("running") - // sleeping - SystemProcessStatusSleeping = SystemProcessStatusKey.String("sleeping") - // stopped - SystemProcessStatusStopped = SystemProcessStatusKey.String("stopped") - // defunct - SystemProcessStatusDefunct = SystemProcessStatusKey.String("defunct") -) - -// Attributes for telemetry SDK. -const ( - // TelemetrySDKLanguageKey is the attribute Key conforming to the - // "telemetry.sdk.language" semantic conventions. It represents the - // language of the telemetry SDK. - // - // Type: Enum - // RequirementLevel: Required - // Stability: stable - TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language") - - // TelemetrySDKNameKey is the attribute Key conforming to the - // "telemetry.sdk.name" semantic conventions. It represents the name of the - // telemetry SDK as defined above. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: 'opentelemetry' - // Note: The OpenTelemetry SDK MUST set the `telemetry.sdk.name` attribute - // to `opentelemetry`. - // If another SDK, like a fork or a vendor-provided implementation, is - // used, this SDK MUST set the - // `telemetry.sdk.name` attribute to the fully-qualified class or module - // name of this SDK's main entry point - // or another suitable identifier depending on the language. - // The identifier `opentelemetry` is reserved and MUST NOT be used in this - // case. - // All custom identifiers SHOULD be stable across different versions of an - // implementation. - TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name") - - // TelemetrySDKVersionKey is the attribute Key conforming to the - // "telemetry.sdk.version" semantic conventions. It represents the version - // string of the telemetry SDK. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: '1.2.3' - TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version") - - // TelemetryDistroNameKey is the attribute Key conforming to the - // "telemetry.distro.name" semantic conventions. It represents the name of - // the auto instrumentation agent or distribution, if used. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'parts-unlimited-java' - // Note: Official auto instrumentation agents and distributions SHOULD set - // the `telemetry.distro.name` attribute to - // a string starting with `opentelemetry-`, e.g. - // `opentelemetry-java-instrumentation`. - TelemetryDistroNameKey = attribute.Key("telemetry.distro.name") - - // TelemetryDistroVersionKey is the attribute Key conforming to the - // "telemetry.distro.version" semantic conventions. It represents the - // version string of the auto instrumentation agent or distribution, if - // used. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '1.2.3' - TelemetryDistroVersionKey = attribute.Key("telemetry.distro.version") -) - -var ( - // cpp - TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp") - // dotnet - TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet") - // erlang - TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang") - // go - TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go") - // java - TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java") - // nodejs - TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs") - // php - TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php") - // python - TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python") - // ruby - TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby") - // rust - TelemetrySDKLanguageRust = TelemetrySDKLanguageKey.String("rust") - // swift - TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift") - // webjs - TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs") -) - -// TelemetrySDKName returns an attribute KeyValue conforming to the -// "telemetry.sdk.name" semantic conventions. It represents the name of the -// telemetry SDK as defined above. -func TelemetrySDKName(val string) attribute.KeyValue { - return TelemetrySDKNameKey.String(val) -} - -// TelemetrySDKVersion returns an attribute KeyValue conforming to the -// "telemetry.sdk.version" semantic conventions. It represents the version -// string of the telemetry SDK. -func TelemetrySDKVersion(val string) attribute.KeyValue { - return TelemetrySDKVersionKey.String(val) -} - -// TelemetryDistroName returns an attribute KeyValue conforming to the -// "telemetry.distro.name" semantic conventions. It represents the name of the -// auto instrumentation agent or distribution, if used. -func TelemetryDistroName(val string) attribute.KeyValue { - return TelemetryDistroNameKey.String(val) -} - -// TelemetryDistroVersion returns an attribute KeyValue conforming to the -// "telemetry.distro.version" semantic conventions. It represents the version -// string of the auto instrumentation agent or distribution, if used. -func TelemetryDistroVersion(val string) attribute.KeyValue { - return TelemetryDistroVersionKey.String(val) -} - -// These attributes may be used for any operation to store information about a -// thread that started a span. -const ( - // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic - // conventions. It represents the current "managed" thread ID (as opposed - // to OS thread ID). - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 42 - ThreadIDKey = attribute.Key("thread.id") - - // ThreadNameKey is the attribute Key conforming to the "thread.name" - // semantic conventions. It represents the current thread name. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'main' - ThreadNameKey = attribute.Key("thread.name") -) - -// ThreadID returns an attribute KeyValue conforming to the "thread.id" -// semantic conventions. It represents the current "managed" thread ID (as -// opposed to OS thread ID). -func ThreadID(val int) attribute.KeyValue { - return ThreadIDKey.Int(val) -} - -// ThreadName returns an attribute KeyValue conforming to the "thread.name" -// semantic conventions. It represents the current thread name. -func ThreadName(val string) attribute.KeyValue { - return ThreadNameKey.String(val) -} - -// Semantic convention attributes in the TLS namespace. -const ( - // TLSCipherKey is the attribute Key conforming to the "tls.cipher" - // semantic conventions. It represents the string indicating the - // [cipher](https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5) - // used during the current connection. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'TLS_RSA_WITH_3DES_EDE_CBC_SHA', - // 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' - // Note: The values allowed for `tls.cipher` MUST be one of the - // `Descriptions` of the [registered TLS Cipher - // Suits](https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#table-tls-parameters-4). - TLSCipherKey = attribute.Key("tls.cipher") - - // TLSClientCertificateKey is the attribute Key conforming to the - // "tls.client.certificate" semantic conventions. It represents the - // pEM-encoded stand-alone certificate offered by the client. This is - // usually mutually-exclusive of `client.certificate_chain` since this - // value also exists in that list. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'MII...' - TLSClientCertificateKey = attribute.Key("tls.client.certificate") - - // TLSClientCertificateChainKey is the attribute Key conforming to the - // "tls.client.certificate_chain" semantic conventions. It represents the - // array of PEM-encoded certificates that make up the certificate chain - // offered by the client. This is usually mutually-exclusive of - // `client.certificate` since that value should be the first certificate in - // the chain. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'MII...', 'MI...' - TLSClientCertificateChainKey = attribute.Key("tls.client.certificate_chain") - - // TLSClientHashMd5Key is the attribute Key conforming to the - // "tls.client.hash.md5" semantic conventions. It represents the - // certificate fingerprint using the MD5 digest of DER-encoded version of - // certificate offered by the client. For consistency with other hash - // values, this value should be formatted as an uppercase hash. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC' - TLSClientHashMd5Key = attribute.Key("tls.client.hash.md5") - - // TLSClientHashSha1Key is the attribute Key conforming to the - // "tls.client.hash.sha1" semantic conventions. It represents the - // certificate fingerprint using the SHA1 digest of DER-encoded version of - // certificate offered by the client. For consistency with other hash - // values, this value should be formatted as an uppercase hash. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '9E393D93138888D288266C2D915214D1D1CCEB2A' - TLSClientHashSha1Key = attribute.Key("tls.client.hash.sha1") - - // TLSClientHashSha256Key is the attribute Key conforming to the - // "tls.client.hash.sha256" semantic conventions. It represents the - // certificate fingerprint using the SHA256 digest of DER-encoded version - // of certificate offered by the client. For consistency with other hash - // values, this value should be formatted as an uppercase hash. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // '0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0' - TLSClientHashSha256Key = attribute.Key("tls.client.hash.sha256") - - // TLSClientIssuerKey is the attribute Key conforming to the - // "tls.client.issuer" semantic conventions. It represents the - // distinguished name of - // [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) - // of the issuer of the x.509 certificate presented by the client. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'CN=Example Root CA, OU=Infrastructure Team, DC=example, - // DC=com' - TLSClientIssuerKey = attribute.Key("tls.client.issuer") - - // TLSClientJa3Key is the attribute Key conforming to the "tls.client.ja3" - // semantic conventions. It represents a hash that identifies clients based - // on how they perform an SSL/TLS handshake. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'd4e5b18d6b55c71272893221c96ba240' - TLSClientJa3Key = attribute.Key("tls.client.ja3") - - // TLSClientNotAfterKey is the attribute Key conforming to the - // "tls.client.not_after" semantic conventions. It represents the date/Time - // indicating when client certificate is no longer considered valid. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2021-01-01T00:00:00.000Z' - TLSClientNotAfterKey = attribute.Key("tls.client.not_after") - - // TLSClientNotBeforeKey is the attribute Key conforming to the - // "tls.client.not_before" semantic conventions. It represents the - // date/Time indicating when client certificate is first considered valid. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '1970-01-01T00:00:00.000Z' - TLSClientNotBeforeKey = attribute.Key("tls.client.not_before") - - // TLSClientServerNameKey is the attribute Key conforming to the - // "tls.client.server_name" semantic conventions. It represents the also - // called an SNI, this tells the server which hostname to which the client - // is attempting to connect to. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry.io' - TLSClientServerNameKey = attribute.Key("tls.client.server_name") - - // TLSClientSubjectKey is the attribute Key conforming to the - // "tls.client.subject" semantic conventions. It represents the - // distinguished name of subject of the x.509 certificate presented by the - // client. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'CN=myclient, OU=Documentation Team, DC=example, DC=com' - TLSClientSubjectKey = attribute.Key("tls.client.subject") - - // TLSClientSupportedCiphersKey is the attribute Key conforming to the - // "tls.client.supported_ciphers" semantic conventions. It represents the - // array of ciphers offered by the client during the client hello. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: '"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", - // "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", "..."' - TLSClientSupportedCiphersKey = attribute.Key("tls.client.supported_ciphers") - - // TLSCurveKey is the attribute Key conforming to the "tls.curve" semantic - // conventions. It represents the string indicating the curve used for the - // given cipher, when applicable - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'secp256r1' - TLSCurveKey = attribute.Key("tls.curve") - - // TLSEstablishedKey is the attribute Key conforming to the - // "tls.established" semantic conventions. It represents the boolean flag - // indicating if the TLS negotiation was successful and transitioned to an - // encrypted tunnel. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - // Examples: True - TLSEstablishedKey = attribute.Key("tls.established") - - // TLSNextProtocolKey is the attribute Key conforming to the - // "tls.next_protocol" semantic conventions. It represents the string - // indicating the protocol being tunneled. Per the values in the [IANA - // registry](https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids), - // this string should be lower case. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'http/1.1' - TLSNextProtocolKey = attribute.Key("tls.next_protocol") - - // TLSProtocolNameKey is the attribute Key conforming to the - // "tls.protocol.name" semantic conventions. It represents the normalized - // lowercase protocol name parsed from original string of the negotiated - // [SSL/TLS protocol - // version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES) - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - TLSProtocolNameKey = attribute.Key("tls.protocol.name") - - // TLSProtocolVersionKey is the attribute Key conforming to the - // "tls.protocol.version" semantic conventions. It represents the numeric - // part of the version parsed from the original string of the negotiated - // [SSL/TLS protocol - // version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES) - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '1.2', '3' - TLSProtocolVersionKey = attribute.Key("tls.protocol.version") - - // TLSResumedKey is the attribute Key conforming to the "tls.resumed" - // semantic conventions. It represents the boolean flag indicating if this - // TLS connection was resumed from an existing TLS negotiation. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - // Examples: True - TLSResumedKey = attribute.Key("tls.resumed") - - // TLSServerCertificateKey is the attribute Key conforming to the - // "tls.server.certificate" semantic conventions. It represents the - // pEM-encoded stand-alone certificate offered by the server. This is - // usually mutually-exclusive of `server.certificate_chain` since this - // value also exists in that list. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'MII...' - TLSServerCertificateKey = attribute.Key("tls.server.certificate") - - // TLSServerCertificateChainKey is the attribute Key conforming to the - // "tls.server.certificate_chain" semantic conventions. It represents the - // array of PEM-encoded certificates that make up the certificate chain - // offered by the server. This is usually mutually-exclusive of - // `server.certificate` since that value should be the first certificate in - // the chain. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'MII...', 'MI...' - TLSServerCertificateChainKey = attribute.Key("tls.server.certificate_chain") - - // TLSServerHashMd5Key is the attribute Key conforming to the - // "tls.server.hash.md5" semantic conventions. It represents the - // certificate fingerprint using the MD5 digest of DER-encoded version of - // certificate offered by the server. For consistency with other hash - // values, this value should be formatted as an uppercase hash. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC' - TLSServerHashMd5Key = attribute.Key("tls.server.hash.md5") - - // TLSServerHashSha1Key is the attribute Key conforming to the - // "tls.server.hash.sha1" semantic conventions. It represents the - // certificate fingerprint using the SHA1 digest of DER-encoded version of - // certificate offered by the server. For consistency with other hash - // values, this value should be formatted as an uppercase hash. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '9E393D93138888D288266C2D915214D1D1CCEB2A' - TLSServerHashSha1Key = attribute.Key("tls.server.hash.sha1") - - // TLSServerHashSha256Key is the attribute Key conforming to the - // "tls.server.hash.sha256" semantic conventions. It represents the - // certificate fingerprint using the SHA256 digest of DER-encoded version - // of certificate offered by the server. For consistency with other hash - // values, this value should be formatted as an uppercase hash. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // '0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0' - TLSServerHashSha256Key = attribute.Key("tls.server.hash.sha256") - - // TLSServerIssuerKey is the attribute Key conforming to the - // "tls.server.issuer" semantic conventions. It represents the - // distinguished name of - // [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) - // of the issuer of the x.509 certificate presented by the client. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'CN=Example Root CA, OU=Infrastructure Team, DC=example, - // DC=com' - TLSServerIssuerKey = attribute.Key("tls.server.issuer") - - // TLSServerJa3sKey is the attribute Key conforming to the - // "tls.server.ja3s" semantic conventions. It represents a hash that - // identifies servers based on how they perform an SSL/TLS handshake. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'd4e5b18d6b55c71272893221c96ba240' - TLSServerJa3sKey = attribute.Key("tls.server.ja3s") - - // TLSServerNotAfterKey is the attribute Key conforming to the - // "tls.server.not_after" semantic conventions. It represents the date/Time - // indicating when server certificate is no longer considered valid. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2021-01-01T00:00:00.000Z' - TLSServerNotAfterKey = attribute.Key("tls.server.not_after") - - // TLSServerNotBeforeKey is the attribute Key conforming to the - // "tls.server.not_before" semantic conventions. It represents the - // date/Time indicating when server certificate is first considered valid. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '1970-01-01T00:00:00.000Z' - TLSServerNotBeforeKey = attribute.Key("tls.server.not_before") - - // TLSServerSubjectKey is the attribute Key conforming to the - // "tls.server.subject" semantic conventions. It represents the - // distinguished name of subject of the x.509 certificate presented by the - // server. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'CN=myserver, OU=Documentation Team, DC=example, DC=com' - TLSServerSubjectKey = attribute.Key("tls.server.subject") -) - -var ( - // ssl - TLSProtocolNameSsl = TLSProtocolNameKey.String("ssl") - // tls - TLSProtocolNameTLS = TLSProtocolNameKey.String("tls") -) - -// TLSCipher returns an attribute KeyValue conforming to the "tls.cipher" -// semantic conventions. It represents the string indicating the -// [cipher](https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5) used -// during the current connection. -func TLSCipher(val string) attribute.KeyValue { - return TLSCipherKey.String(val) -} - -// TLSClientCertificate returns an attribute KeyValue conforming to the -// "tls.client.certificate" semantic conventions. It represents the pEM-encoded -// stand-alone certificate offered by the client. This is usually -// mutually-exclusive of `client.certificate_chain` since this value also -// exists in that list. -func TLSClientCertificate(val string) attribute.KeyValue { - return TLSClientCertificateKey.String(val) -} - -// TLSClientCertificateChain returns an attribute KeyValue conforming to the -// "tls.client.certificate_chain" semantic conventions. It represents the array -// of PEM-encoded certificates that make up the certificate chain offered by -// the client. This is usually mutually-exclusive of `client.certificate` since -// that value should be the first certificate in the chain. -func TLSClientCertificateChain(val ...string) attribute.KeyValue { - return TLSClientCertificateChainKey.StringSlice(val) -} - -// TLSClientHashMd5 returns an attribute KeyValue conforming to the -// "tls.client.hash.md5" semantic conventions. It represents the certificate -// fingerprint using the MD5 digest of DER-encoded version of certificate -// offered by the client. For consistency with other hash values, this value -// should be formatted as an uppercase hash. -func TLSClientHashMd5(val string) attribute.KeyValue { - return TLSClientHashMd5Key.String(val) -} - -// TLSClientHashSha1 returns an attribute KeyValue conforming to the -// "tls.client.hash.sha1" semantic conventions. It represents the certificate -// fingerprint using the SHA1 digest of DER-encoded version of certificate -// offered by the client. For consistency with other hash values, this value -// should be formatted as an uppercase hash. -func TLSClientHashSha1(val string) attribute.KeyValue { - return TLSClientHashSha1Key.String(val) -} - -// TLSClientHashSha256 returns an attribute KeyValue conforming to the -// "tls.client.hash.sha256" semantic conventions. It represents the certificate -// fingerprint using the SHA256 digest of DER-encoded version of certificate -// offered by the client. For consistency with other hash values, this value -// should be formatted as an uppercase hash. -func TLSClientHashSha256(val string) attribute.KeyValue { - return TLSClientHashSha256Key.String(val) -} - -// TLSClientIssuer returns an attribute KeyValue conforming to the -// "tls.client.issuer" semantic conventions. It represents the distinguished -// name of -// [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) of -// the issuer of the x.509 certificate presented by the client. -func TLSClientIssuer(val string) attribute.KeyValue { - return TLSClientIssuerKey.String(val) -} - -// TLSClientJa3 returns an attribute KeyValue conforming to the -// "tls.client.ja3" semantic conventions. It represents a hash that identifies -// clients based on how they perform an SSL/TLS handshake. -func TLSClientJa3(val string) attribute.KeyValue { - return TLSClientJa3Key.String(val) -} - -// TLSClientNotAfter returns an attribute KeyValue conforming to the -// "tls.client.not_after" semantic conventions. It represents the date/Time -// indicating when client certificate is no longer considered valid. -func TLSClientNotAfter(val string) attribute.KeyValue { - return TLSClientNotAfterKey.String(val) -} - -// TLSClientNotBefore returns an attribute KeyValue conforming to the -// "tls.client.not_before" semantic conventions. It represents the date/Time -// indicating when client certificate is first considered valid. -func TLSClientNotBefore(val string) attribute.KeyValue { - return TLSClientNotBeforeKey.String(val) -} - -// TLSClientServerName returns an attribute KeyValue conforming to the -// "tls.client.server_name" semantic conventions. It represents the also called -// an SNI, this tells the server which hostname to which the client is -// attempting to connect to. -func TLSClientServerName(val string) attribute.KeyValue { - return TLSClientServerNameKey.String(val) -} - -// TLSClientSubject returns an attribute KeyValue conforming to the -// "tls.client.subject" semantic conventions. It represents the distinguished -// name of subject of the x.509 certificate presented by the client. -func TLSClientSubject(val string) attribute.KeyValue { - return TLSClientSubjectKey.String(val) -} - -// TLSClientSupportedCiphers returns an attribute KeyValue conforming to the -// "tls.client.supported_ciphers" semantic conventions. It represents the array -// of ciphers offered by the client during the client hello. -func TLSClientSupportedCiphers(val ...string) attribute.KeyValue { - return TLSClientSupportedCiphersKey.StringSlice(val) -} - -// TLSCurve returns an attribute KeyValue conforming to the "tls.curve" -// semantic conventions. It represents the string indicating the curve used for -// the given cipher, when applicable -func TLSCurve(val string) attribute.KeyValue { - return TLSCurveKey.String(val) -} - -// TLSEstablished returns an attribute KeyValue conforming to the -// "tls.established" semantic conventions. It represents the boolean flag -// indicating if the TLS negotiation was successful and transitioned to an -// encrypted tunnel. -func TLSEstablished(val bool) attribute.KeyValue { - return TLSEstablishedKey.Bool(val) -} - -// TLSNextProtocol returns an attribute KeyValue conforming to the -// "tls.next_protocol" semantic conventions. It represents the string -// indicating the protocol being tunneled. Per the values in the [IANA -// registry](https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids), -// this string should be lower case. -func TLSNextProtocol(val string) attribute.KeyValue { - return TLSNextProtocolKey.String(val) -} - -// TLSProtocolVersion returns an attribute KeyValue conforming to the -// "tls.protocol.version" semantic conventions. It represents the numeric part -// of the version parsed from the original string of the negotiated [SSL/TLS -// protocol -// version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES) -func TLSProtocolVersion(val string) attribute.KeyValue { - return TLSProtocolVersionKey.String(val) -} - -// TLSResumed returns an attribute KeyValue conforming to the "tls.resumed" -// semantic conventions. It represents the boolean flag indicating if this TLS -// connection was resumed from an existing TLS negotiation. -func TLSResumed(val bool) attribute.KeyValue { - return TLSResumedKey.Bool(val) -} - -// TLSServerCertificate returns an attribute KeyValue conforming to the -// "tls.server.certificate" semantic conventions. It represents the pEM-encoded -// stand-alone certificate offered by the server. This is usually -// mutually-exclusive of `server.certificate_chain` since this value also -// exists in that list. -func TLSServerCertificate(val string) attribute.KeyValue { - return TLSServerCertificateKey.String(val) -} - -// TLSServerCertificateChain returns an attribute KeyValue conforming to the -// "tls.server.certificate_chain" semantic conventions. It represents the array -// of PEM-encoded certificates that make up the certificate chain offered by -// the server. This is usually mutually-exclusive of `server.certificate` since -// that value should be the first certificate in the chain. -func TLSServerCertificateChain(val ...string) attribute.KeyValue { - return TLSServerCertificateChainKey.StringSlice(val) -} - -// TLSServerHashMd5 returns an attribute KeyValue conforming to the -// "tls.server.hash.md5" semantic conventions. It represents the certificate -// fingerprint using the MD5 digest of DER-encoded version of certificate -// offered by the server. For consistency with other hash values, this value -// should be formatted as an uppercase hash. -func TLSServerHashMd5(val string) attribute.KeyValue { - return TLSServerHashMd5Key.String(val) -} - -// TLSServerHashSha1 returns an attribute KeyValue conforming to the -// "tls.server.hash.sha1" semantic conventions. It represents the certificate -// fingerprint using the SHA1 digest of DER-encoded version of certificate -// offered by the server. For consistency with other hash values, this value -// should be formatted as an uppercase hash. -func TLSServerHashSha1(val string) attribute.KeyValue { - return TLSServerHashSha1Key.String(val) -} - -// TLSServerHashSha256 returns an attribute KeyValue conforming to the -// "tls.server.hash.sha256" semantic conventions. It represents the certificate -// fingerprint using the SHA256 digest of DER-encoded version of certificate -// offered by the server. For consistency with other hash values, this value -// should be formatted as an uppercase hash. -func TLSServerHashSha256(val string) attribute.KeyValue { - return TLSServerHashSha256Key.String(val) -} - -// TLSServerIssuer returns an attribute KeyValue conforming to the -// "tls.server.issuer" semantic conventions. It represents the distinguished -// name of -// [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) of -// the issuer of the x.509 certificate presented by the client. -func TLSServerIssuer(val string) attribute.KeyValue { - return TLSServerIssuerKey.String(val) -} - -// TLSServerJa3s returns an attribute KeyValue conforming to the -// "tls.server.ja3s" semantic conventions. It represents a hash that identifies -// servers based on how they perform an SSL/TLS handshake. -func TLSServerJa3s(val string) attribute.KeyValue { - return TLSServerJa3sKey.String(val) -} - -// TLSServerNotAfter returns an attribute KeyValue conforming to the -// "tls.server.not_after" semantic conventions. It represents the date/Time -// indicating when server certificate is no longer considered valid. -func TLSServerNotAfter(val string) attribute.KeyValue { - return TLSServerNotAfterKey.String(val) -} - -// TLSServerNotBefore returns an attribute KeyValue conforming to the -// "tls.server.not_before" semantic conventions. It represents the date/Time -// indicating when server certificate is first considered valid. -func TLSServerNotBefore(val string) attribute.KeyValue { - return TLSServerNotBeforeKey.String(val) -} - -// TLSServerSubject returns an attribute KeyValue conforming to the -// "tls.server.subject" semantic conventions. It represents the distinguished -// name of subject of the x.509 certificate presented by the server. -func TLSServerSubject(val string) attribute.KeyValue { - return TLSServerSubjectKey.String(val) -} - -// Attributes describing URL. -const ( - // URLDomainKey is the attribute Key conforming to the "url.domain" - // semantic conventions. It represents the domain extracted from the - // `url.full`, such as "opentelemetry.io". - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'www.foo.bar', 'opentelemetry.io', '3.12.167.2', - // '[1080:0:0:0:8:800:200C:417A]' - // Note: In some cases a URL may refer to an IP and/or port directly, - // without a domain name. In this case, the IP address would go to the - // domain field. If the URL contains a [literal IPv6 - // address](https://www.rfc-editor.org/rfc/rfc2732#section-2) enclosed by - // `[` and `]`, the `[` and `]` characters should also be captured in the - // domain field. - URLDomainKey = attribute.Key("url.domain") - - // URLExtensionKey is the attribute Key conforming to the "url.extension" - // semantic conventions. It represents the file extension extracted from - // the `url.full`, excluding the leading dot. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'png', 'gz' - // Note: The file extension is only set if it exists, as not every url has - // a file extension. When the file name has multiple extensions - // `example.tar.gz`, only the last one should be captured `gz`, not - // `tar.gz`. - URLExtensionKey = attribute.Key("url.extension") - - // URLFragmentKey is the attribute Key conforming to the "url.fragment" - // semantic conventions. It represents the [URI - // fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'SemConv' - URLFragmentKey = attribute.Key("url.fragment") - - // URLFullKey is the attribute Key conforming to the "url.full" semantic - // conventions. It represents the absolute URL describing a network - // resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986) - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv', - // '//localhost' - // Note: For network calls, URL usually has - // `scheme://host[:port][path][?query][#fragment]` format, where the - // fragment is not transmitted over HTTP, but if it is known, it SHOULD be - // included nevertheless. - // `url.full` MUST NOT contain credentials passed via URL in form of - // `https://username:password@www.example.com/`. In such case username and - // password SHOULD be redacted and attribute's value SHOULD be - // `https://REDACTED:REDACTED@www.example.com/`. - // `url.full` SHOULD capture the absolute URL when it is available (or can - // be reconstructed). Sensitive content provided in `url.full` SHOULD be - // scrubbed when instrumentations can identify it. - URLFullKey = attribute.Key("url.full") - - // URLOriginalKey is the attribute Key conforming to the "url.original" - // semantic conventions. It represents the unmodified original URL as seen - // in the event source. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv', - // 'search?q=OpenTelemetry' - // Note: In network monitoring, the observed URL may be a full URL, whereas - // in access logs, the URL is often just represented as a path. This field - // is meant to represent the URL as it was observed, complete or not. - // `url.original` might contain credentials passed via URL in form of - // `https://username:password@www.example.com/`. In such case password and - // username SHOULD NOT be redacted and attribute's value SHOULD remain the - // same. - URLOriginalKey = attribute.Key("url.original") - - // URLPathKey is the attribute Key conforming to the "url.path" semantic - // conventions. It represents the [URI - // path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '/search' - // Note: Sensitive content provided in `url.path` SHOULD be scrubbed when - // instrumentations can identify it. - URLPathKey = attribute.Key("url.path") - - // URLPortKey is the attribute Key conforming to the "url.port" semantic - // conventions. It represents the port extracted from the `url.full` - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 443 - URLPortKey = attribute.Key("url.port") - - // URLQueryKey is the attribute Key conforming to the "url.query" semantic - // conventions. It represents the [URI - // query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'q=OpenTelemetry' - // Note: Sensitive content provided in `url.query` SHOULD be scrubbed when - // instrumentations can identify it. - URLQueryKey = attribute.Key("url.query") - - // URLRegisteredDomainKey is the attribute Key conforming to the - // "url.registered_domain" semantic conventions. It represents the highest - // registered url domain, stripped of the subdomain. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'example.com', 'foo.co.uk' - // Note: This value can be determined precisely with the [public suffix - // list](http://publicsuffix.org). For example, the registered domain for - // `foo.example.com` is `example.com`. Trying to approximate this by simply - // taking the last two labels will not work well for TLDs such as `co.uk`. - URLRegisteredDomainKey = attribute.Key("url.registered_domain") - - // URLSchemeKey is the attribute Key conforming to the "url.scheme" - // semantic conventions. It represents the [URI - // scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component - // identifying the used protocol. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'https', 'ftp', 'telnet' - URLSchemeKey = attribute.Key("url.scheme") - - // URLSubdomainKey is the attribute Key conforming to the "url.subdomain" - // semantic conventions. It represents the subdomain portion of a fully - // qualified domain name includes all of the names except the host name - // under the registered_domain. In a partially qualified domain, or if the - // qualification level of the full name cannot be determined, subdomain - // contains all of the names below the registered domain. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'east', 'sub2.sub1' - // Note: The subdomain portion of `www.east.mydomain.co.uk` is `east`. If - // the domain has multiple levels of subdomain, such as - // `sub2.sub1.example.com`, the subdomain field should contain `sub2.sub1`, - // with no trailing period. - URLSubdomainKey = attribute.Key("url.subdomain") - - // URLTemplateKey is the attribute Key conforming to the "url.template" - // semantic conventions. It represents the low-cardinality template of an - // [absolute path - // reference](https://www.rfc-editor.org/rfc/rfc3986#section-4.2). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/users/{id}', '/users/:id', '/users?id={id}' - URLTemplateKey = attribute.Key("url.template") - - // URLTopLevelDomainKey is the attribute Key conforming to the - // "url.top_level_domain" semantic conventions. It represents the effective - // top level domain (eTLD), also known as the domain suffix, is the last - // part of the domain name. For example, the top level domain for - // example.com is `com`. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'com', 'co.uk' - // Note: This value can be determined precisely with the [public suffix - // list](http://publicsuffix.org). - URLTopLevelDomainKey = attribute.Key("url.top_level_domain") -) - -// URLDomain returns an attribute KeyValue conforming to the "url.domain" -// semantic conventions. It represents the domain extracted from the -// `url.full`, such as "opentelemetry.io". -func URLDomain(val string) attribute.KeyValue { - return URLDomainKey.String(val) -} - -// URLExtension returns an attribute KeyValue conforming to the -// "url.extension" semantic conventions. It represents the file extension -// extracted from the `url.full`, excluding the leading dot. -func URLExtension(val string) attribute.KeyValue { - return URLExtensionKey.String(val) -} - -// URLFragment returns an attribute KeyValue conforming to the -// "url.fragment" semantic conventions. It represents the [URI -// fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component -func URLFragment(val string) attribute.KeyValue { - return URLFragmentKey.String(val) -} - -// URLFull returns an attribute KeyValue conforming to the "url.full" -// semantic conventions. It represents the absolute URL describing a network -// resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986) -func URLFull(val string) attribute.KeyValue { - return URLFullKey.String(val) -} - -// URLOriginal returns an attribute KeyValue conforming to the -// "url.original" semantic conventions. It represents the unmodified original -// URL as seen in the event source. -func URLOriginal(val string) attribute.KeyValue { - return URLOriginalKey.String(val) -} - -// URLPath returns an attribute KeyValue conforming to the "url.path" -// semantic conventions. It represents the [URI -// path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component -func URLPath(val string) attribute.KeyValue { - return URLPathKey.String(val) -} - -// URLPort returns an attribute KeyValue conforming to the "url.port" -// semantic conventions. It represents the port extracted from the `url.full` -func URLPort(val int) attribute.KeyValue { - return URLPortKey.Int(val) -} - -// URLQuery returns an attribute KeyValue conforming to the "url.query" -// semantic conventions. It represents the [URI -// query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component -func URLQuery(val string) attribute.KeyValue { - return URLQueryKey.String(val) -} - -// URLRegisteredDomain returns an attribute KeyValue conforming to the -// "url.registered_domain" semantic conventions. It represents the highest -// registered url domain, stripped of the subdomain. -func URLRegisteredDomain(val string) attribute.KeyValue { - return URLRegisteredDomainKey.String(val) -} - -// URLScheme returns an attribute KeyValue conforming to the "url.scheme" -// semantic conventions. It represents the [URI -// scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component -// identifying the used protocol. -func URLScheme(val string) attribute.KeyValue { - return URLSchemeKey.String(val) -} - -// URLSubdomain returns an attribute KeyValue conforming to the -// "url.subdomain" semantic conventions. It represents the subdomain portion of -// a fully qualified domain name includes all of the names except the host name -// under the registered_domain. In a partially qualified domain, or if the -// qualification level of the full name cannot be determined, subdomain -// contains all of the names below the registered domain. -func URLSubdomain(val string) attribute.KeyValue { - return URLSubdomainKey.String(val) -} - -// URLTemplate returns an attribute KeyValue conforming to the -// "url.template" semantic conventions. It represents the low-cardinality -// template of an [absolute path -// reference](https://www.rfc-editor.org/rfc/rfc3986#section-4.2). -func URLTemplate(val string) attribute.KeyValue { - return URLTemplateKey.String(val) -} - -// URLTopLevelDomain returns an attribute KeyValue conforming to the -// "url.top_level_domain" semantic conventions. It represents the effective top -// level domain (eTLD), also known as the domain suffix, is the last part of -// the domain name. For example, the top level domain for example.com is `com`. -func URLTopLevelDomain(val string) attribute.KeyValue { - return URLTopLevelDomainKey.String(val) -} - -// Describes user-agent attributes. -const ( - // UserAgentNameKey is the attribute Key conforming to the - // "user_agent.name" semantic conventions. It represents the name of the - // user-agent extracted from original. Usually refers to the browser's - // name. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Safari', 'YourApp' - // Note: [Example](https://www.whatsmyua.info) of extracting browser's name - // from original string. In the case of using a user-agent for non-browser - // products, such as microservices with multiple names/versions inside the - // `user_agent.original`, the most significant name SHOULD be selected. In - // such a scenario it should align with `user_agent.version` - UserAgentNameKey = attribute.Key("user_agent.name") - - // UserAgentOriginalKey is the attribute Key conforming to the - // "user_agent.original" semantic conventions. It represents the value of - // the [HTTP - // User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) - // header sent by the client. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'CERN-LineMode/2.15 libwww/2.17b3', 'Mozilla/5.0 (iPhone; CPU - // iPhone OS 14_7_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) - // Version/14.1.2 Mobile/15E148 Safari/604.1', 'YourApp/1.0.0 - // grpc-java-okhttp/1.27.2' - UserAgentOriginalKey = attribute.Key("user_agent.original") - - // UserAgentVersionKey is the attribute Key conforming to the - // "user_agent.version" semantic conventions. It represents the version of - // the user-agent extracted from original. Usually refers to the browser's - // version - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '14.1.2', '1.0.0' - // Note: [Example](https://www.whatsmyua.info) of extracting browser's - // version from original string. In the case of using a user-agent for - // non-browser products, such as microservices with multiple names/versions - // inside the `user_agent.original`, the most significant version SHOULD be - // selected. In such a scenario it should align with `user_agent.name` - UserAgentVersionKey = attribute.Key("user_agent.version") -) - -// UserAgentName returns an attribute KeyValue conforming to the -// "user_agent.name" semantic conventions. It represents the name of the -// user-agent extracted from original. Usually refers to the browser's name. -func UserAgentName(val string) attribute.KeyValue { - return UserAgentNameKey.String(val) -} - -// UserAgentOriginal returns an attribute KeyValue conforming to the -// "user_agent.original" semantic conventions. It represents the value of the -// [HTTP -// User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) -// header sent by the client. -func UserAgentOriginal(val string) attribute.KeyValue { - return UserAgentOriginalKey.String(val) -} - -// UserAgentVersion returns an attribute KeyValue conforming to the -// "user_agent.version" semantic conventions. It represents the version of the -// user-agent extracted from original. Usually refers to the browser's version -func UserAgentVersion(val string) attribute.KeyValue { - return UserAgentVersionKey.String(val) -} - -// The attributes used to describe the packaged software running the -// application code. -const ( - // WebEngineDescriptionKey is the attribute Key conforming to the - // "webengine.description" semantic conventions. It represents the - // additional description of the web engine (e.g. detailed version and - // edition information). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - - // 2.2.2.Final' - WebEngineDescriptionKey = attribute.Key("webengine.description") - - // WebEngineNameKey is the attribute Key conforming to the "webengine.name" - // semantic conventions. It represents the name of the web engine. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'WildFly' - WebEngineNameKey = attribute.Key("webengine.name") - - // WebEngineVersionKey is the attribute Key conforming to the - // "webengine.version" semantic conventions. It represents the version of - // the web engine. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '21.0.0' - WebEngineVersionKey = attribute.Key("webengine.version") -) - -// WebEngineDescription returns an attribute KeyValue conforming to the -// "webengine.description" semantic conventions. It represents the additional -// description of the web engine (e.g. detailed version and edition -// information). -func WebEngineDescription(val string) attribute.KeyValue { - return WebEngineDescriptionKey.String(val) -} - -// WebEngineName returns an attribute KeyValue conforming to the -// "webengine.name" semantic conventions. It represents the name of the web -// engine. -func WebEngineName(val string) attribute.KeyValue { - return WebEngineNameKey.String(val) -} - -// WebEngineVersion returns an attribute KeyValue conforming to the -// "webengine.version" semantic conventions. It represents the version of the -// web engine. -func WebEngineVersion(val string) attribute.KeyValue { - return WebEngineVersionKey.String(val) -} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/exception.go deleted file mode 100644 index bfaee0d5..00000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/exception.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0" - -const ( - // ExceptionEventName is the name of the Span event representing an exception. - ExceptionEventName = "exception" -) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/metric.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/metric.go deleted file mode 100644 index fcdb9f48..00000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/metric.go +++ /dev/null @@ -1,1307 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated from semantic convention specification. DO NOT EDIT. - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0" - -const ( - - // ContainerCPUTime is the metric conforming to the "container.cpu.time" - // semantic conventions. It represents the total CPU time consumed. - // Instrument: counter - // Unit: s - // Stability: Experimental - ContainerCPUTimeName = "container.cpu.time" - ContainerCPUTimeUnit = "s" - ContainerCPUTimeDescription = "Total CPU time consumed" - - // ContainerMemoryUsage is the metric conforming to the - // "container.memory.usage" semantic conventions. It represents the memory - // usage of the container. - // Instrument: counter - // Unit: By - // Stability: Experimental - ContainerMemoryUsageName = "container.memory.usage" - ContainerMemoryUsageUnit = "By" - ContainerMemoryUsageDescription = "Memory usage of the container." - - // ContainerDiskIo is the metric conforming to the "container.disk.io" semantic - // conventions. It represents the disk bytes for the container. - // Instrument: counter - // Unit: By - // Stability: Experimental - ContainerDiskIoName = "container.disk.io" - ContainerDiskIoUnit = "By" - ContainerDiskIoDescription = "Disk bytes for the container." - - // ContainerNetworkIo is the metric conforming to the "container.network.io" - // semantic conventions. It represents the network bytes for the container. - // Instrument: counter - // Unit: By - // Stability: Experimental - ContainerNetworkIoName = "container.network.io" - ContainerNetworkIoUnit = "By" - ContainerNetworkIoDescription = "Network bytes for the container." - - // DBClientOperationDuration is the metric conforming to the - // "db.client.operation.duration" semantic conventions. It represents the - // duration of database client operations. - // Instrument: histogram - // Unit: s - // Stability: Experimental - DBClientOperationDurationName = "db.client.operation.duration" - DBClientOperationDurationUnit = "s" - DBClientOperationDurationDescription = "Duration of database client operations." - - // DBClientConnectionCount is the metric conforming to the - // "db.client.connection.count" semantic conventions. It represents the number - // of connections that are currently in state described by the `state` - // attribute. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Experimental - DBClientConnectionCountName = "db.client.connection.count" - DBClientConnectionCountUnit = "{connection}" - DBClientConnectionCountDescription = "The number of connections that are currently in state described by the `state` attribute" - - // DBClientConnectionIdleMax is the metric conforming to the - // "db.client.connection.idle.max" semantic conventions. It represents the - // maximum number of idle open connections allowed. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Experimental - DBClientConnectionIdleMaxName = "db.client.connection.idle.max" - DBClientConnectionIdleMaxUnit = "{connection}" - DBClientConnectionIdleMaxDescription = "The maximum number of idle open connections allowed" - - // DBClientConnectionIdleMin is the metric conforming to the - // "db.client.connection.idle.min" semantic conventions. It represents the - // minimum number of idle open connections allowed. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Experimental - DBClientConnectionIdleMinName = "db.client.connection.idle.min" - DBClientConnectionIdleMinUnit = "{connection}" - DBClientConnectionIdleMinDescription = "The minimum number of idle open connections allowed" - - // DBClientConnectionMax is the metric conforming to the - // "db.client.connection.max" semantic conventions. It represents the maximum - // number of open connections allowed. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Experimental - DBClientConnectionMaxName = "db.client.connection.max" - DBClientConnectionMaxUnit = "{connection}" - DBClientConnectionMaxDescription = "The maximum number of open connections allowed" - - // DBClientConnectionPendingRequests is the metric conforming to the - // "db.client.connection.pending_requests" semantic conventions. It represents - // the number of pending requests for an open connection, cumulative for the - // entire pool. - // Instrument: updowncounter - // Unit: {request} - // Stability: Experimental - DBClientConnectionPendingRequestsName = "db.client.connection.pending_requests" - DBClientConnectionPendingRequestsUnit = "{request}" - DBClientConnectionPendingRequestsDescription = "The number of pending requests for an open connection, cumulative for the entire pool" - - // DBClientConnectionTimeouts is the metric conforming to the - // "db.client.connection.timeouts" semantic conventions. It represents the - // number of connection timeouts that have occurred trying to obtain a - // connection from the pool. - // Instrument: counter - // Unit: {timeout} - // Stability: Experimental - DBClientConnectionTimeoutsName = "db.client.connection.timeouts" - DBClientConnectionTimeoutsUnit = "{timeout}" - DBClientConnectionTimeoutsDescription = "The number of connection timeouts that have occurred trying to obtain a connection from the pool" - - // DBClientConnectionCreateTime is the metric conforming to the - // "db.client.connection.create_time" semantic conventions. It represents the - // time it took to create a new connection. - // Instrument: histogram - // Unit: s - // Stability: Experimental - DBClientConnectionCreateTimeName = "db.client.connection.create_time" - DBClientConnectionCreateTimeUnit = "s" - DBClientConnectionCreateTimeDescription = "The time it took to create a new connection" - - // DBClientConnectionWaitTime is the metric conforming to the - // "db.client.connection.wait_time" semantic conventions. It represents the - // time it took to obtain an open connection from the pool. - // Instrument: histogram - // Unit: s - // Stability: Experimental - DBClientConnectionWaitTimeName = "db.client.connection.wait_time" - DBClientConnectionWaitTimeUnit = "s" - DBClientConnectionWaitTimeDescription = "The time it took to obtain an open connection from the pool" - - // DBClientConnectionUseTime is the metric conforming to the - // "db.client.connection.use_time" semantic conventions. It represents the time - // between borrowing a connection and returning it to the pool. - // Instrument: histogram - // Unit: s - // Stability: Experimental - DBClientConnectionUseTimeName = "db.client.connection.use_time" - DBClientConnectionUseTimeUnit = "s" - DBClientConnectionUseTimeDescription = "The time between borrowing a connection and returning it to the pool" - - // DBClientConnectionsUsage is the metric conforming to the - // "db.client.connections.usage" semantic conventions. It represents the - // deprecated, use `db.client.connection.count` instead. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Experimental - DBClientConnectionsUsageName = "db.client.connections.usage" - DBClientConnectionsUsageUnit = "{connection}" - DBClientConnectionsUsageDescription = "Deprecated, use `db.client.connection.count` instead." - - // DBClientConnectionsIdleMax is the metric conforming to the - // "db.client.connections.idle.max" semantic conventions. It represents the - // deprecated, use `db.client.connection.idle.max` instead. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Experimental - DBClientConnectionsIdleMaxName = "db.client.connections.idle.max" - DBClientConnectionsIdleMaxUnit = "{connection}" - DBClientConnectionsIdleMaxDescription = "Deprecated, use `db.client.connection.idle.max` instead." - - // DBClientConnectionsIdleMin is the metric conforming to the - // "db.client.connections.idle.min" semantic conventions. It represents the - // deprecated, use `db.client.connection.idle.min` instead. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Experimental - DBClientConnectionsIdleMinName = "db.client.connections.idle.min" - DBClientConnectionsIdleMinUnit = "{connection}" - DBClientConnectionsIdleMinDescription = "Deprecated, use `db.client.connection.idle.min` instead." - - // DBClientConnectionsMax is the metric conforming to the - // "db.client.connections.max" semantic conventions. It represents the - // deprecated, use `db.client.connection.max` instead. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Experimental - DBClientConnectionsMaxName = "db.client.connections.max" - DBClientConnectionsMaxUnit = "{connection}" - DBClientConnectionsMaxDescription = "Deprecated, use `db.client.connection.max` instead." - - // DBClientConnectionsPendingRequests is the metric conforming to the - // "db.client.connections.pending_requests" semantic conventions. It represents - // the deprecated, use `db.client.connection.pending_requests` instead. - // Instrument: updowncounter - // Unit: {request} - // Stability: Experimental - DBClientConnectionsPendingRequestsName = "db.client.connections.pending_requests" - DBClientConnectionsPendingRequestsUnit = "{request}" - DBClientConnectionsPendingRequestsDescription = "Deprecated, use `db.client.connection.pending_requests` instead." - - // DBClientConnectionsTimeouts is the metric conforming to the - // "db.client.connections.timeouts" semantic conventions. It represents the - // deprecated, use `db.client.connection.timeouts` instead. - // Instrument: counter - // Unit: {timeout} - // Stability: Experimental - DBClientConnectionsTimeoutsName = "db.client.connections.timeouts" - DBClientConnectionsTimeoutsUnit = "{timeout}" - DBClientConnectionsTimeoutsDescription = "Deprecated, use `db.client.connection.timeouts` instead." - - // DBClientConnectionsCreateTime is the metric conforming to the - // "db.client.connections.create_time" semantic conventions. It represents the - // deprecated, use `db.client.connection.create_time` instead. Note: the unit - // also changed from `ms` to `s`. - // Instrument: histogram - // Unit: ms - // Stability: Experimental - DBClientConnectionsCreateTimeName = "db.client.connections.create_time" - DBClientConnectionsCreateTimeUnit = "ms" - DBClientConnectionsCreateTimeDescription = "Deprecated, use `db.client.connection.create_time` instead. Note: the unit also changed from `ms` to `s`." - - // DBClientConnectionsWaitTime is the metric conforming to the - // "db.client.connections.wait_time" semantic conventions. It represents the - // deprecated, use `db.client.connection.wait_time` instead. Note: the unit - // also changed from `ms` to `s`. - // Instrument: histogram - // Unit: ms - // Stability: Experimental - DBClientConnectionsWaitTimeName = "db.client.connections.wait_time" - DBClientConnectionsWaitTimeUnit = "ms" - DBClientConnectionsWaitTimeDescription = "Deprecated, use `db.client.connection.wait_time` instead. Note: the unit also changed from `ms` to `s`." - - // DBClientConnectionsUseTime is the metric conforming to the - // "db.client.connections.use_time" semantic conventions. It represents the - // deprecated, use `db.client.connection.use_time` instead. Note: the unit also - // changed from `ms` to `s`. - // Instrument: histogram - // Unit: ms - // Stability: Experimental - DBClientConnectionsUseTimeName = "db.client.connections.use_time" - DBClientConnectionsUseTimeUnit = "ms" - DBClientConnectionsUseTimeDescription = "Deprecated, use `db.client.connection.use_time` instead. Note: the unit also changed from `ms` to `s`." - - // DNSLookupDuration is the metric conforming to the "dns.lookup.duration" - // semantic conventions. It represents the measures the time taken to perform a - // DNS lookup. - // Instrument: histogram - // Unit: s - // Stability: Experimental - DNSLookupDurationName = "dns.lookup.duration" - DNSLookupDurationUnit = "s" - DNSLookupDurationDescription = "Measures the time taken to perform a DNS lookup." - - // AspnetcoreRoutingMatchAttempts is the metric conforming to the - // "aspnetcore.routing.match_attempts" semantic conventions. It represents the - // number of requests that were attempted to be matched to an endpoint. - // Instrument: counter - // Unit: {match_attempt} - // Stability: Stable - AspnetcoreRoutingMatchAttemptsName = "aspnetcore.routing.match_attempts" - AspnetcoreRoutingMatchAttemptsUnit = "{match_attempt}" - AspnetcoreRoutingMatchAttemptsDescription = "Number of requests that were attempted to be matched to an endpoint." - - // AspnetcoreDiagnosticsExceptions is the metric conforming to the - // "aspnetcore.diagnostics.exceptions" semantic conventions. It represents the - // number of exceptions caught by exception handling middleware. - // Instrument: counter - // Unit: {exception} - // Stability: Stable - AspnetcoreDiagnosticsExceptionsName = "aspnetcore.diagnostics.exceptions" - AspnetcoreDiagnosticsExceptionsUnit = "{exception}" - AspnetcoreDiagnosticsExceptionsDescription = "Number of exceptions caught by exception handling middleware." - - // AspnetcoreRateLimitingActiveRequestLeases is the metric conforming to the - // "aspnetcore.rate_limiting.active_request_leases" semantic conventions. It - // represents the number of requests that are currently active on the server - // that hold a rate limiting lease. - // Instrument: updowncounter - // Unit: {request} - // Stability: Stable - AspnetcoreRateLimitingActiveRequestLeasesName = "aspnetcore.rate_limiting.active_request_leases" - AspnetcoreRateLimitingActiveRequestLeasesUnit = "{request}" - AspnetcoreRateLimitingActiveRequestLeasesDescription = "Number of requests that are currently active on the server that hold a rate limiting lease." - - // AspnetcoreRateLimitingRequestLeaseDuration is the metric conforming to the - // "aspnetcore.rate_limiting.request_lease.duration" semantic conventions. It - // represents the duration of rate limiting lease held by requests on the - // server. - // Instrument: histogram - // Unit: s - // Stability: Stable - AspnetcoreRateLimitingRequestLeaseDurationName = "aspnetcore.rate_limiting.request_lease.duration" - AspnetcoreRateLimitingRequestLeaseDurationUnit = "s" - AspnetcoreRateLimitingRequestLeaseDurationDescription = "The duration of rate limiting lease held by requests on the server." - - // AspnetcoreRateLimitingRequestTimeInQueue is the metric conforming to the - // "aspnetcore.rate_limiting.request.time_in_queue" semantic conventions. It - // represents the time the request spent in a queue waiting to acquire a rate - // limiting lease. - // Instrument: histogram - // Unit: s - // Stability: Stable - AspnetcoreRateLimitingRequestTimeInQueueName = "aspnetcore.rate_limiting.request.time_in_queue" - AspnetcoreRateLimitingRequestTimeInQueueUnit = "s" - AspnetcoreRateLimitingRequestTimeInQueueDescription = "The time the request spent in a queue waiting to acquire a rate limiting lease." - - // AspnetcoreRateLimitingQueuedRequests is the metric conforming to the - // "aspnetcore.rate_limiting.queued_requests" semantic conventions. It - // represents the number of requests that are currently queued, waiting to - // acquire a rate limiting lease. - // Instrument: updowncounter - // Unit: {request} - // Stability: Stable - AspnetcoreRateLimitingQueuedRequestsName = "aspnetcore.rate_limiting.queued_requests" - AspnetcoreRateLimitingQueuedRequestsUnit = "{request}" - AspnetcoreRateLimitingQueuedRequestsDescription = "Number of requests that are currently queued, waiting to acquire a rate limiting lease." - - // AspnetcoreRateLimitingRequests is the metric conforming to the - // "aspnetcore.rate_limiting.requests" semantic conventions. It represents the - // number of requests that tried to acquire a rate limiting lease. - // Instrument: counter - // Unit: {request} - // Stability: Stable - AspnetcoreRateLimitingRequestsName = "aspnetcore.rate_limiting.requests" - AspnetcoreRateLimitingRequestsUnit = "{request}" - AspnetcoreRateLimitingRequestsDescription = "Number of requests that tried to acquire a rate limiting lease." - - // KestrelActiveConnections is the metric conforming to the - // "kestrel.active_connections" semantic conventions. It represents the number - // of connections that are currently active on the server. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Stable - KestrelActiveConnectionsName = "kestrel.active_connections" - KestrelActiveConnectionsUnit = "{connection}" - KestrelActiveConnectionsDescription = "Number of connections that are currently active on the server." - - // KestrelConnectionDuration is the metric conforming to the - // "kestrel.connection.duration" semantic conventions. It represents the - // duration of connections on the server. - // Instrument: histogram - // Unit: s - // Stability: Stable - KestrelConnectionDurationName = "kestrel.connection.duration" - KestrelConnectionDurationUnit = "s" - KestrelConnectionDurationDescription = "The duration of connections on the server." - - // KestrelRejectedConnections is the metric conforming to the - // "kestrel.rejected_connections" semantic conventions. It represents the - // number of connections rejected by the server. - // Instrument: counter - // Unit: {connection} - // Stability: Stable - KestrelRejectedConnectionsName = "kestrel.rejected_connections" - KestrelRejectedConnectionsUnit = "{connection}" - KestrelRejectedConnectionsDescription = "Number of connections rejected by the server." - - // KestrelQueuedConnections is the metric conforming to the - // "kestrel.queued_connections" semantic conventions. It represents the number - // of connections that are currently queued and are waiting to start. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Stable - KestrelQueuedConnectionsName = "kestrel.queued_connections" - KestrelQueuedConnectionsUnit = "{connection}" - KestrelQueuedConnectionsDescription = "Number of connections that are currently queued and are waiting to start." - - // KestrelQueuedRequests is the metric conforming to the - // "kestrel.queued_requests" semantic conventions. It represents the number of - // HTTP requests on multiplexed connections (HTTP/2 and HTTP/3) that are - // currently queued and are waiting to start. - // Instrument: updowncounter - // Unit: {request} - // Stability: Stable - KestrelQueuedRequestsName = "kestrel.queued_requests" - KestrelQueuedRequestsUnit = "{request}" - KestrelQueuedRequestsDescription = "Number of HTTP requests on multiplexed connections (HTTP/2 and HTTP/3) that are currently queued and are waiting to start." - - // KestrelUpgradedConnections is the metric conforming to the - // "kestrel.upgraded_connections" semantic conventions. It represents the - // number of connections that are currently upgraded (WebSockets). . - // Instrument: updowncounter - // Unit: {connection} - // Stability: Stable - KestrelUpgradedConnectionsName = "kestrel.upgraded_connections" - KestrelUpgradedConnectionsUnit = "{connection}" - KestrelUpgradedConnectionsDescription = "Number of connections that are currently upgraded (WebSockets). ." - - // KestrelTLSHandshakeDuration is the metric conforming to the - // "kestrel.tls_handshake.duration" semantic conventions. It represents the - // duration of TLS handshakes on the server. - // Instrument: histogram - // Unit: s - // Stability: Stable - KestrelTLSHandshakeDurationName = "kestrel.tls_handshake.duration" - KestrelTLSHandshakeDurationUnit = "s" - KestrelTLSHandshakeDurationDescription = "The duration of TLS handshakes on the server." - - // KestrelActiveTLSHandshakes is the metric conforming to the - // "kestrel.active_tls_handshakes" semantic conventions. It represents the - // number of TLS handshakes that are currently in progress on the server. - // Instrument: updowncounter - // Unit: {handshake} - // Stability: Stable - KestrelActiveTLSHandshakesName = "kestrel.active_tls_handshakes" - KestrelActiveTLSHandshakesUnit = "{handshake}" - KestrelActiveTLSHandshakesDescription = "Number of TLS handshakes that are currently in progress on the server." - - // SignalrServerConnectionDuration is the metric conforming to the - // "signalr.server.connection.duration" semantic conventions. It represents the - // duration of connections on the server. - // Instrument: histogram - // Unit: s - // Stability: Stable - SignalrServerConnectionDurationName = "signalr.server.connection.duration" - SignalrServerConnectionDurationUnit = "s" - SignalrServerConnectionDurationDescription = "The duration of connections on the server." - - // SignalrServerActiveConnections is the metric conforming to the - // "signalr.server.active_connections" semantic conventions. It represents the - // number of connections that are currently active on the server. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Stable - SignalrServerActiveConnectionsName = "signalr.server.active_connections" - SignalrServerActiveConnectionsUnit = "{connection}" - SignalrServerActiveConnectionsDescription = "Number of connections that are currently active on the server." - - // FaaSInvokeDuration is the metric conforming to the "faas.invoke_duration" - // semantic conventions. It represents the measures the duration of the - // function's logic execution. - // Instrument: histogram - // Unit: s - // Stability: Experimental - FaaSInvokeDurationName = "faas.invoke_duration" - FaaSInvokeDurationUnit = "s" - FaaSInvokeDurationDescription = "Measures the duration of the function's logic execution" - - // FaaSInitDuration is the metric conforming to the "faas.init_duration" - // semantic conventions. It represents the measures the duration of the - // function's initialization, such as a cold start. - // Instrument: histogram - // Unit: s - // Stability: Experimental - FaaSInitDurationName = "faas.init_duration" - FaaSInitDurationUnit = "s" - FaaSInitDurationDescription = "Measures the duration of the function's initialization, such as a cold start" - - // FaaSColdstarts is the metric conforming to the "faas.coldstarts" semantic - // conventions. It represents the number of invocation cold starts. - // Instrument: counter - // Unit: {coldstart} - // Stability: Experimental - FaaSColdstartsName = "faas.coldstarts" - FaaSColdstartsUnit = "{coldstart}" - FaaSColdstartsDescription = "Number of invocation cold starts" - - // FaaSErrors is the metric conforming to the "faas.errors" semantic - // conventions. It represents the number of invocation errors. - // Instrument: counter - // Unit: {error} - // Stability: Experimental - FaaSErrorsName = "faas.errors" - FaaSErrorsUnit = "{error}" - FaaSErrorsDescription = "Number of invocation errors" - - // FaaSInvocations is the metric conforming to the "faas.invocations" semantic - // conventions. It represents the number of successful invocations. - // Instrument: counter - // Unit: {invocation} - // Stability: Experimental - FaaSInvocationsName = "faas.invocations" - FaaSInvocationsUnit = "{invocation}" - FaaSInvocationsDescription = "Number of successful invocations" - - // FaaSTimeouts is the metric conforming to the "faas.timeouts" semantic - // conventions. It represents the number of invocation timeouts. - // Instrument: counter - // Unit: {timeout} - // Stability: Experimental - FaaSTimeoutsName = "faas.timeouts" - FaaSTimeoutsUnit = "{timeout}" - FaaSTimeoutsDescription = "Number of invocation timeouts" - - // FaaSMemUsage is the metric conforming to the "faas.mem_usage" semantic - // conventions. It represents the distribution of max memory usage per - // invocation. - // Instrument: histogram - // Unit: By - // Stability: Experimental - FaaSMemUsageName = "faas.mem_usage" - FaaSMemUsageUnit = "By" - FaaSMemUsageDescription = "Distribution of max memory usage per invocation" - - // FaaSCPUUsage is the metric conforming to the "faas.cpu_usage" semantic - // conventions. It represents the distribution of CPU usage per invocation. - // Instrument: histogram - // Unit: s - // Stability: Experimental - FaaSCPUUsageName = "faas.cpu_usage" - FaaSCPUUsageUnit = "s" - FaaSCPUUsageDescription = "Distribution of CPU usage per invocation" - - // FaaSNetIo is the metric conforming to the "faas.net_io" semantic - // conventions. It represents the distribution of net I/O usage per invocation. - // Instrument: histogram - // Unit: By - // Stability: Experimental - FaaSNetIoName = "faas.net_io" - FaaSNetIoUnit = "By" - FaaSNetIoDescription = "Distribution of net I/O usage per invocation" - - // HTTPServerRequestDuration is the metric conforming to the - // "http.server.request.duration" semantic conventions. It represents the - // duration of HTTP server requests. - // Instrument: histogram - // Unit: s - // Stability: Stable - HTTPServerRequestDurationName = "http.server.request.duration" - HTTPServerRequestDurationUnit = "s" - HTTPServerRequestDurationDescription = "Duration of HTTP server requests." - - // HTTPServerActiveRequests is the metric conforming to the - // "http.server.active_requests" semantic conventions. It represents the number - // of active HTTP server requests. - // Instrument: updowncounter - // Unit: {request} - // Stability: Experimental - HTTPServerActiveRequestsName = "http.server.active_requests" - HTTPServerActiveRequestsUnit = "{request}" - HTTPServerActiveRequestsDescription = "Number of active HTTP server requests." - - // HTTPServerRequestBodySize is the metric conforming to the - // "http.server.request.body.size" semantic conventions. It represents the size - // of HTTP server request bodies. - // Instrument: histogram - // Unit: By - // Stability: Experimental - HTTPServerRequestBodySizeName = "http.server.request.body.size" - HTTPServerRequestBodySizeUnit = "By" - HTTPServerRequestBodySizeDescription = "Size of HTTP server request bodies." - - // HTTPServerResponseBodySize is the metric conforming to the - // "http.server.response.body.size" semantic conventions. It represents the - // size of HTTP server response bodies. - // Instrument: histogram - // Unit: By - // Stability: Experimental - HTTPServerResponseBodySizeName = "http.server.response.body.size" - HTTPServerResponseBodySizeUnit = "By" - HTTPServerResponseBodySizeDescription = "Size of HTTP server response bodies." - - // HTTPClientRequestDuration is the metric conforming to the - // "http.client.request.duration" semantic conventions. It represents the - // duration of HTTP client requests. - // Instrument: histogram - // Unit: s - // Stability: Stable - HTTPClientRequestDurationName = "http.client.request.duration" - HTTPClientRequestDurationUnit = "s" - HTTPClientRequestDurationDescription = "Duration of HTTP client requests." - - // HTTPClientRequestBodySize is the metric conforming to the - // "http.client.request.body.size" semantic conventions. It represents the size - // of HTTP client request bodies. - // Instrument: histogram - // Unit: By - // Stability: Experimental - HTTPClientRequestBodySizeName = "http.client.request.body.size" - HTTPClientRequestBodySizeUnit = "By" - HTTPClientRequestBodySizeDescription = "Size of HTTP client request bodies." - - // HTTPClientResponseBodySize is the metric conforming to the - // "http.client.response.body.size" semantic conventions. It represents the - // size of HTTP client response bodies. - // Instrument: histogram - // Unit: By - // Stability: Experimental - HTTPClientResponseBodySizeName = "http.client.response.body.size" - HTTPClientResponseBodySizeUnit = "By" - HTTPClientResponseBodySizeDescription = "Size of HTTP client response bodies." - - // HTTPClientOpenConnections is the metric conforming to the - // "http.client.open_connections" semantic conventions. It represents the - // number of outbound HTTP connections that are currently active or idle on the - // client. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Experimental - HTTPClientOpenConnectionsName = "http.client.open_connections" - HTTPClientOpenConnectionsUnit = "{connection}" - HTTPClientOpenConnectionsDescription = "Number of outbound HTTP connections that are currently active or idle on the client." - - // HTTPClientConnectionDuration is the metric conforming to the - // "http.client.connection.duration" semantic conventions. It represents the - // duration of the successfully established outbound HTTP connections. - // Instrument: histogram - // Unit: s - // Stability: Experimental - HTTPClientConnectionDurationName = "http.client.connection.duration" - HTTPClientConnectionDurationUnit = "s" - HTTPClientConnectionDurationDescription = "The duration of the successfully established outbound HTTP connections." - - // HTTPClientActiveRequests is the metric conforming to the - // "http.client.active_requests" semantic conventions. It represents the number - // of active HTTP requests. - // Instrument: updowncounter - // Unit: {request} - // Stability: Experimental - HTTPClientActiveRequestsName = "http.client.active_requests" - HTTPClientActiveRequestsUnit = "{request}" - HTTPClientActiveRequestsDescription = "Number of active HTTP requests." - - // JvmMemoryInit is the metric conforming to the "jvm.memory.init" semantic - // conventions. It represents the measure of initial memory requested. - // Instrument: updowncounter - // Unit: By - // Stability: Experimental - JvmMemoryInitName = "jvm.memory.init" - JvmMemoryInitUnit = "By" - JvmMemoryInitDescription = "Measure of initial memory requested." - - // JvmSystemCPUUtilization is the metric conforming to the - // "jvm.system.cpu.utilization" semantic conventions. It represents the recent - // CPU utilization for the whole system as reported by the JVM. - // Instrument: gauge - // Unit: 1 - // Stability: Experimental - JvmSystemCPUUtilizationName = "jvm.system.cpu.utilization" - JvmSystemCPUUtilizationUnit = "1" - JvmSystemCPUUtilizationDescription = "Recent CPU utilization for the whole system as reported by the JVM." - - // JvmSystemCPULoad1m is the metric conforming to the "jvm.system.cpu.load_1m" - // semantic conventions. It represents the average CPU load of the whole system - // for the last minute as reported by the JVM. - // Instrument: gauge - // Unit: {run_queue_item} - // Stability: Experimental - JvmSystemCPULoad1mName = "jvm.system.cpu.load_1m" - JvmSystemCPULoad1mUnit = "{run_queue_item}" - JvmSystemCPULoad1mDescription = "Average CPU load of the whole system for the last minute as reported by the JVM." - - // JvmBufferMemoryUsage is the metric conforming to the - // "jvm.buffer.memory.usage" semantic conventions. It represents the measure of - // memory used by buffers. - // Instrument: updowncounter - // Unit: By - // Stability: Experimental - JvmBufferMemoryUsageName = "jvm.buffer.memory.usage" - JvmBufferMemoryUsageUnit = "By" - JvmBufferMemoryUsageDescription = "Measure of memory used by buffers." - - // JvmBufferMemoryLimit is the metric conforming to the - // "jvm.buffer.memory.limit" semantic conventions. It represents the measure of - // total memory capacity of buffers. - // Instrument: updowncounter - // Unit: By - // Stability: Experimental - JvmBufferMemoryLimitName = "jvm.buffer.memory.limit" - JvmBufferMemoryLimitUnit = "By" - JvmBufferMemoryLimitDescription = "Measure of total memory capacity of buffers." - - // JvmBufferCount is the metric conforming to the "jvm.buffer.count" semantic - // conventions. It represents the number of buffers in the pool. - // Instrument: updowncounter - // Unit: {buffer} - // Stability: Experimental - JvmBufferCountName = "jvm.buffer.count" - JvmBufferCountUnit = "{buffer}" - JvmBufferCountDescription = "Number of buffers in the pool." - - // JvmMemoryUsed is the metric conforming to the "jvm.memory.used" semantic - // conventions. It represents the measure of memory used. - // Instrument: updowncounter - // Unit: By - // Stability: Stable - JvmMemoryUsedName = "jvm.memory.used" - JvmMemoryUsedUnit = "By" - JvmMemoryUsedDescription = "Measure of memory used." - - // JvmMemoryCommitted is the metric conforming to the "jvm.memory.committed" - // semantic conventions. It represents the measure of memory committed. - // Instrument: updowncounter - // Unit: By - // Stability: Stable - JvmMemoryCommittedName = "jvm.memory.committed" - JvmMemoryCommittedUnit = "By" - JvmMemoryCommittedDescription = "Measure of memory committed." - - // JvmMemoryLimit is the metric conforming to the "jvm.memory.limit" semantic - // conventions. It represents the measure of max obtainable memory. - // Instrument: updowncounter - // Unit: By - // Stability: Stable - JvmMemoryLimitName = "jvm.memory.limit" - JvmMemoryLimitUnit = "By" - JvmMemoryLimitDescription = "Measure of max obtainable memory." - - // JvmMemoryUsedAfterLastGc is the metric conforming to the - // "jvm.memory.used_after_last_gc" semantic conventions. It represents the - // measure of memory used, as measured after the most recent garbage collection - // event on this pool. - // Instrument: updowncounter - // Unit: By - // Stability: Stable - JvmMemoryUsedAfterLastGcName = "jvm.memory.used_after_last_gc" - JvmMemoryUsedAfterLastGcUnit = "By" - JvmMemoryUsedAfterLastGcDescription = "Measure of memory used, as measured after the most recent garbage collection event on this pool." - - // JvmGcDuration is the metric conforming to the "jvm.gc.duration" semantic - // conventions. It represents the duration of JVM garbage collection actions. - // Instrument: histogram - // Unit: s - // Stability: Stable - JvmGcDurationName = "jvm.gc.duration" - JvmGcDurationUnit = "s" - JvmGcDurationDescription = "Duration of JVM garbage collection actions." - - // JvmThreadCount is the metric conforming to the "jvm.thread.count" semantic - // conventions. It represents the number of executing platform threads. - // Instrument: updowncounter - // Unit: {thread} - // Stability: Stable - JvmThreadCountName = "jvm.thread.count" - JvmThreadCountUnit = "{thread}" - JvmThreadCountDescription = "Number of executing platform threads." - - // JvmClassLoaded is the metric conforming to the "jvm.class.loaded" semantic - // conventions. It represents the number of classes loaded since JVM start. - // Instrument: counter - // Unit: {class} - // Stability: Stable - JvmClassLoadedName = "jvm.class.loaded" - JvmClassLoadedUnit = "{class}" - JvmClassLoadedDescription = "Number of classes loaded since JVM start." - - // JvmClassUnloaded is the metric conforming to the "jvm.class.unloaded" - // semantic conventions. It represents the number of classes unloaded since JVM - // start. - // Instrument: counter - // Unit: {class} - // Stability: Stable - JvmClassUnloadedName = "jvm.class.unloaded" - JvmClassUnloadedUnit = "{class}" - JvmClassUnloadedDescription = "Number of classes unloaded since JVM start." - - // JvmClassCount is the metric conforming to the "jvm.class.count" semantic - // conventions. It represents the number of classes currently loaded. - // Instrument: updowncounter - // Unit: {class} - // Stability: Stable - JvmClassCountName = "jvm.class.count" - JvmClassCountUnit = "{class}" - JvmClassCountDescription = "Number of classes currently loaded." - - // JvmCPUCount is the metric conforming to the "jvm.cpu.count" semantic - // conventions. It represents the number of processors available to the Java - // virtual machine. - // Instrument: updowncounter - // Unit: {cpu} - // Stability: Stable - JvmCPUCountName = "jvm.cpu.count" - JvmCPUCountUnit = "{cpu}" - JvmCPUCountDescription = "Number of processors available to the Java virtual machine." - - // JvmCPUTime is the metric conforming to the "jvm.cpu.time" semantic - // conventions. It represents the cPU time used by the process as reported by - // the JVM. - // Instrument: counter - // Unit: s - // Stability: Stable - JvmCPUTimeName = "jvm.cpu.time" - JvmCPUTimeUnit = "s" - JvmCPUTimeDescription = "CPU time used by the process as reported by the JVM." - - // JvmCPURecentUtilization is the metric conforming to the - // "jvm.cpu.recent_utilization" semantic conventions. It represents the recent - // CPU utilization for the process as reported by the JVM. - // Instrument: gauge - // Unit: 1 - // Stability: Stable - JvmCPURecentUtilizationName = "jvm.cpu.recent_utilization" - JvmCPURecentUtilizationUnit = "1" - JvmCPURecentUtilizationDescription = "Recent CPU utilization for the process as reported by the JVM." - - // MessagingPublishDuration is the metric conforming to the - // "messaging.publish.duration" semantic conventions. It represents the - // measures the duration of publish operation. - // Instrument: histogram - // Unit: s - // Stability: Experimental - MessagingPublishDurationName = "messaging.publish.duration" - MessagingPublishDurationUnit = "s" - MessagingPublishDurationDescription = "Measures the duration of publish operation." - - // MessagingReceiveDuration is the metric conforming to the - // "messaging.receive.duration" semantic conventions. It represents the - // measures the duration of receive operation. - // Instrument: histogram - // Unit: s - // Stability: Experimental - MessagingReceiveDurationName = "messaging.receive.duration" - MessagingReceiveDurationUnit = "s" - MessagingReceiveDurationDescription = "Measures the duration of receive operation." - - // MessagingProcessDuration is the metric conforming to the - // "messaging.process.duration" semantic conventions. It represents the - // measures the duration of process operation. - // Instrument: histogram - // Unit: s - // Stability: Experimental - MessagingProcessDurationName = "messaging.process.duration" - MessagingProcessDurationUnit = "s" - MessagingProcessDurationDescription = "Measures the duration of process operation." - - // MessagingPublishMessages is the metric conforming to the - // "messaging.publish.messages" semantic conventions. It represents the - // measures the number of published messages. - // Instrument: counter - // Unit: {message} - // Stability: Experimental - MessagingPublishMessagesName = "messaging.publish.messages" - MessagingPublishMessagesUnit = "{message}" - MessagingPublishMessagesDescription = "Measures the number of published messages." - - // MessagingReceiveMessages is the metric conforming to the - // "messaging.receive.messages" semantic conventions. It represents the - // measures the number of received messages. - // Instrument: counter - // Unit: {message} - // Stability: Experimental - MessagingReceiveMessagesName = "messaging.receive.messages" - MessagingReceiveMessagesUnit = "{message}" - MessagingReceiveMessagesDescription = "Measures the number of received messages." - - // MessagingProcessMessages is the metric conforming to the - // "messaging.process.messages" semantic conventions. It represents the - // measures the number of processed messages. - // Instrument: counter - // Unit: {message} - // Stability: Experimental - MessagingProcessMessagesName = "messaging.process.messages" - MessagingProcessMessagesUnit = "{message}" - MessagingProcessMessagesDescription = "Measures the number of processed messages." - - // ProcessCPUTime is the metric conforming to the "process.cpu.time" semantic - // conventions. It represents the total CPU seconds broken down by different - // states. - // Instrument: counter - // Unit: s - // Stability: Experimental - ProcessCPUTimeName = "process.cpu.time" - ProcessCPUTimeUnit = "s" - ProcessCPUTimeDescription = "Total CPU seconds broken down by different states." - - // ProcessCPUUtilization is the metric conforming to the - // "process.cpu.utilization" semantic conventions. It represents the difference - // in process.cpu.time since the last measurement, divided by the elapsed time - // and number of CPUs available to the process. - // Instrument: gauge - // Unit: 1 - // Stability: Experimental - ProcessCPUUtilizationName = "process.cpu.utilization" - ProcessCPUUtilizationUnit = "1" - ProcessCPUUtilizationDescription = "Difference in process.cpu.time since the last measurement, divided by the elapsed time and number of CPUs available to the process." - - // ProcessMemoryUsage is the metric conforming to the "process.memory.usage" - // semantic conventions. It represents the amount of physical memory in use. - // Instrument: updowncounter - // Unit: By - // Stability: Experimental - ProcessMemoryUsageName = "process.memory.usage" - ProcessMemoryUsageUnit = "By" - ProcessMemoryUsageDescription = "The amount of physical memory in use." - - // ProcessMemoryVirtual is the metric conforming to the - // "process.memory.virtual" semantic conventions. It represents the amount of - // committed virtual memory. - // Instrument: updowncounter - // Unit: By - // Stability: Experimental - ProcessMemoryVirtualName = "process.memory.virtual" - ProcessMemoryVirtualUnit = "By" - ProcessMemoryVirtualDescription = "The amount of committed virtual memory." - - // ProcessDiskIo is the metric conforming to the "process.disk.io" semantic - // conventions. It represents the disk bytes transferred. - // Instrument: counter - // Unit: By - // Stability: Experimental - ProcessDiskIoName = "process.disk.io" - ProcessDiskIoUnit = "By" - ProcessDiskIoDescription = "Disk bytes transferred." - - // ProcessNetworkIo is the metric conforming to the "process.network.io" - // semantic conventions. It represents the network bytes transferred. - // Instrument: counter - // Unit: By - // Stability: Experimental - ProcessNetworkIoName = "process.network.io" - ProcessNetworkIoUnit = "By" - ProcessNetworkIoDescription = "Network bytes transferred." - - // ProcessThreadCount is the metric conforming to the "process.thread.count" - // semantic conventions. It represents the process threads count. - // Instrument: updowncounter - // Unit: {thread} - // Stability: Experimental - ProcessThreadCountName = "process.thread.count" - ProcessThreadCountUnit = "{thread}" - ProcessThreadCountDescription = "Process threads count." - - // ProcessOpenFileDescriptorCount is the metric conforming to the - // "process.open_file_descriptor.count" semantic conventions. It represents the - // number of file descriptors in use by the process. - // Instrument: updowncounter - // Unit: {count} - // Stability: Experimental - ProcessOpenFileDescriptorCountName = "process.open_file_descriptor.count" - ProcessOpenFileDescriptorCountUnit = "{count}" - ProcessOpenFileDescriptorCountDescription = "Number of file descriptors in use by the process." - - // ProcessContextSwitches is the metric conforming to the - // "process.context_switches" semantic conventions. It represents the number of - // times the process has been context switched. - // Instrument: counter - // Unit: {count} - // Stability: Experimental - ProcessContextSwitchesName = "process.context_switches" - ProcessContextSwitchesUnit = "{count}" - ProcessContextSwitchesDescription = "Number of times the process has been context switched." - - // ProcessPagingFaults is the metric conforming to the "process.paging.faults" - // semantic conventions. It represents the number of page faults the process - // has made. - // Instrument: counter - // Unit: {fault} - // Stability: Experimental - ProcessPagingFaultsName = "process.paging.faults" - ProcessPagingFaultsUnit = "{fault}" - ProcessPagingFaultsDescription = "Number of page faults the process has made." - - // RPCServerDuration is the metric conforming to the "rpc.server.duration" - // semantic conventions. It represents the measures the duration of inbound - // RPC. - // Instrument: histogram - // Unit: ms - // Stability: Experimental - RPCServerDurationName = "rpc.server.duration" - RPCServerDurationUnit = "ms" - RPCServerDurationDescription = "Measures the duration of inbound RPC." - - // RPCServerRequestSize is the metric conforming to the - // "rpc.server.request.size" semantic conventions. It represents the measures - // the size of RPC request messages (uncompressed). - // Instrument: histogram - // Unit: By - // Stability: Experimental - RPCServerRequestSizeName = "rpc.server.request.size" - RPCServerRequestSizeUnit = "By" - RPCServerRequestSizeDescription = "Measures the size of RPC request messages (uncompressed)." - - // RPCServerResponseSize is the metric conforming to the - // "rpc.server.response.size" semantic conventions. It represents the measures - // the size of RPC response messages (uncompressed). - // Instrument: histogram - // Unit: By - // Stability: Experimental - RPCServerResponseSizeName = "rpc.server.response.size" - RPCServerResponseSizeUnit = "By" - RPCServerResponseSizeDescription = "Measures the size of RPC response messages (uncompressed)." - - // RPCServerRequestsPerRPC is the metric conforming to the - // "rpc.server.requests_per_rpc" semantic conventions. It represents the - // measures the number of messages received per RPC. - // Instrument: histogram - // Unit: {count} - // Stability: Experimental - RPCServerRequestsPerRPCName = "rpc.server.requests_per_rpc" - RPCServerRequestsPerRPCUnit = "{count}" - RPCServerRequestsPerRPCDescription = "Measures the number of messages received per RPC." - - // RPCServerResponsesPerRPC is the metric conforming to the - // "rpc.server.responses_per_rpc" semantic conventions. It represents the - // measures the number of messages sent per RPC. - // Instrument: histogram - // Unit: {count} - // Stability: Experimental - RPCServerResponsesPerRPCName = "rpc.server.responses_per_rpc" - RPCServerResponsesPerRPCUnit = "{count}" - RPCServerResponsesPerRPCDescription = "Measures the number of messages sent per RPC." - - // RPCClientDuration is the metric conforming to the "rpc.client.duration" - // semantic conventions. It represents the measures the duration of outbound - // RPC. - // Instrument: histogram - // Unit: ms - // Stability: Experimental - RPCClientDurationName = "rpc.client.duration" - RPCClientDurationUnit = "ms" - RPCClientDurationDescription = "Measures the duration of outbound RPC." - - // RPCClientRequestSize is the metric conforming to the - // "rpc.client.request.size" semantic conventions. It represents the measures - // the size of RPC request messages (uncompressed). - // Instrument: histogram - // Unit: By - // Stability: Experimental - RPCClientRequestSizeName = "rpc.client.request.size" - RPCClientRequestSizeUnit = "By" - RPCClientRequestSizeDescription = "Measures the size of RPC request messages (uncompressed)." - - // RPCClientResponseSize is the metric conforming to the - // "rpc.client.response.size" semantic conventions. It represents the measures - // the size of RPC response messages (uncompressed). - // Instrument: histogram - // Unit: By - // Stability: Experimental - RPCClientResponseSizeName = "rpc.client.response.size" - RPCClientResponseSizeUnit = "By" - RPCClientResponseSizeDescription = "Measures the size of RPC response messages (uncompressed)." - - // RPCClientRequestsPerRPC is the metric conforming to the - // "rpc.client.requests_per_rpc" semantic conventions. It represents the - // measures the number of messages received per RPC. - // Instrument: histogram - // Unit: {count} - // Stability: Experimental - RPCClientRequestsPerRPCName = "rpc.client.requests_per_rpc" - RPCClientRequestsPerRPCUnit = "{count}" - RPCClientRequestsPerRPCDescription = "Measures the number of messages received per RPC." - - // RPCClientResponsesPerRPC is the metric conforming to the - // "rpc.client.responses_per_rpc" semantic conventions. It represents the - // measures the number of messages sent per RPC. - // Instrument: histogram - // Unit: {count} - // Stability: Experimental - RPCClientResponsesPerRPCName = "rpc.client.responses_per_rpc" - RPCClientResponsesPerRPCUnit = "{count}" - RPCClientResponsesPerRPCDescription = "Measures the number of messages sent per RPC." - - // SystemCPUTime is the metric conforming to the "system.cpu.time" semantic - // conventions. It represents the seconds each logical CPU spent on each mode. - // Instrument: counter - // Unit: s - // Stability: Experimental - SystemCPUTimeName = "system.cpu.time" - SystemCPUTimeUnit = "s" - SystemCPUTimeDescription = "Seconds each logical CPU spent on each mode" - - // SystemCPUUtilization is the metric conforming to the - // "system.cpu.utilization" semantic conventions. It represents the difference - // in system.cpu.time since the last measurement, divided by the elapsed time - // and number of logical CPUs. - // Instrument: gauge - // Unit: 1 - // Stability: Experimental - SystemCPUUtilizationName = "system.cpu.utilization" - SystemCPUUtilizationUnit = "1" - SystemCPUUtilizationDescription = "Difference in system.cpu.time since the last measurement, divided by the elapsed time and number of logical CPUs" - - // SystemCPUFrequency is the metric conforming to the "system.cpu.frequency" - // semantic conventions. It represents the reports the current frequency of the - // CPU in Hz. - // Instrument: gauge - // Unit: {Hz} - // Stability: Experimental - SystemCPUFrequencyName = "system.cpu.frequency" - SystemCPUFrequencyUnit = "{Hz}" - SystemCPUFrequencyDescription = "Reports the current frequency of the CPU in Hz" - - // SystemCPUPhysicalCount is the metric conforming to the - // "system.cpu.physical.count" semantic conventions. It represents the reports - // the number of actual physical processor cores on the hardware. - // Instrument: updowncounter - // Unit: {cpu} - // Stability: Experimental - SystemCPUPhysicalCountName = "system.cpu.physical.count" - SystemCPUPhysicalCountUnit = "{cpu}" - SystemCPUPhysicalCountDescription = "Reports the number of actual physical processor cores on the hardware" - - // SystemCPULogicalCount is the metric conforming to the - // "system.cpu.logical.count" semantic conventions. It represents the reports - // the number of logical (virtual) processor cores created by the operating - // system to manage multitasking. - // Instrument: updowncounter - // Unit: {cpu} - // Stability: Experimental - SystemCPULogicalCountName = "system.cpu.logical.count" - SystemCPULogicalCountUnit = "{cpu}" - SystemCPULogicalCountDescription = "Reports the number of logical (virtual) processor cores created by the operating system to manage multitasking" - - // SystemMemoryUsage is the metric conforming to the "system.memory.usage" - // semantic conventions. It represents the reports memory in use by state. - // Instrument: updowncounter - // Unit: By - // Stability: Experimental - SystemMemoryUsageName = "system.memory.usage" - SystemMemoryUsageUnit = "By" - SystemMemoryUsageDescription = "Reports memory in use by state." - - // SystemMemoryLimit is the metric conforming to the "system.memory.limit" - // semantic conventions. It represents the total memory available in the - // system. - // Instrument: updowncounter - // Unit: By - // Stability: Experimental - SystemMemoryLimitName = "system.memory.limit" - SystemMemoryLimitUnit = "By" - SystemMemoryLimitDescription = "Total memory available in the system." - - // SystemMemoryShared is the metric conforming to the "system.memory.shared" - // semantic conventions. It represents the shared memory used (mostly by - // tmpfs). - // Instrument: updowncounter - // Unit: By - // Stability: Experimental - SystemMemorySharedName = "system.memory.shared" - SystemMemorySharedUnit = "By" - SystemMemorySharedDescription = "Shared memory used (mostly by tmpfs)." - - // SystemMemoryUtilization is the metric conforming to the - // "system.memory.utilization" semantic conventions. - // Instrument: gauge - // Unit: 1 - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemMemoryUtilizationName = "system.memory.utilization" - SystemMemoryUtilizationUnit = "1" - - // SystemPagingUsage is the metric conforming to the "system.paging.usage" - // semantic conventions. It represents the unix swap or windows pagefile usage. - // Instrument: updowncounter - // Unit: By - // Stability: Experimental - SystemPagingUsageName = "system.paging.usage" - SystemPagingUsageUnit = "By" - SystemPagingUsageDescription = "Unix swap or windows pagefile usage" - - // SystemPagingUtilization is the metric conforming to the - // "system.paging.utilization" semantic conventions. - // Instrument: gauge - // Unit: 1 - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemPagingUtilizationName = "system.paging.utilization" - SystemPagingUtilizationUnit = "1" - - // SystemPagingFaults is the metric conforming to the "system.paging.faults" - // semantic conventions. - // Instrument: counter - // Unit: {fault} - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemPagingFaultsName = "system.paging.faults" - SystemPagingFaultsUnit = "{fault}" - - // SystemPagingOperations is the metric conforming to the - // "system.paging.operations" semantic conventions. - // Instrument: counter - // Unit: {operation} - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemPagingOperationsName = "system.paging.operations" - SystemPagingOperationsUnit = "{operation}" - - // SystemDiskIo is the metric conforming to the "system.disk.io" semantic - // conventions. - // Instrument: counter - // Unit: By - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemDiskIoName = "system.disk.io" - SystemDiskIoUnit = "By" - - // SystemDiskOperations is the metric conforming to the - // "system.disk.operations" semantic conventions. - // Instrument: counter - // Unit: {operation} - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemDiskOperationsName = "system.disk.operations" - SystemDiskOperationsUnit = "{operation}" - - // SystemDiskIoTime is the metric conforming to the "system.disk.io_time" - // semantic conventions. It represents the time disk spent activated. - // Instrument: counter - // Unit: s - // Stability: Experimental - SystemDiskIoTimeName = "system.disk.io_time" - SystemDiskIoTimeUnit = "s" - SystemDiskIoTimeDescription = "Time disk spent activated" - - // SystemDiskOperationTime is the metric conforming to the - // "system.disk.operation_time" semantic conventions. It represents the sum of - // the time each operation took to complete. - // Instrument: counter - // Unit: s - // Stability: Experimental - SystemDiskOperationTimeName = "system.disk.operation_time" - SystemDiskOperationTimeUnit = "s" - SystemDiskOperationTimeDescription = "Sum of the time each operation took to complete" - - // SystemDiskMerged is the metric conforming to the "system.disk.merged" - // semantic conventions. - // Instrument: counter - // Unit: {operation} - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemDiskMergedName = "system.disk.merged" - SystemDiskMergedUnit = "{operation}" - - // SystemFilesystemUsage is the metric conforming to the - // "system.filesystem.usage" semantic conventions. - // Instrument: updowncounter - // Unit: By - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemFilesystemUsageName = "system.filesystem.usage" - SystemFilesystemUsageUnit = "By" - - // SystemFilesystemUtilization is the metric conforming to the - // "system.filesystem.utilization" semantic conventions. - // Instrument: gauge - // Unit: 1 - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemFilesystemUtilizationName = "system.filesystem.utilization" - SystemFilesystemUtilizationUnit = "1" - - // SystemNetworkDropped is the metric conforming to the - // "system.network.dropped" semantic conventions. It represents the count of - // packets that are dropped or discarded even though there was no error. - // Instrument: counter - // Unit: {packet} - // Stability: Experimental - SystemNetworkDroppedName = "system.network.dropped" - SystemNetworkDroppedUnit = "{packet}" - SystemNetworkDroppedDescription = "Count of packets that are dropped or discarded even though there was no error" - - // SystemNetworkPackets is the metric conforming to the - // "system.network.packets" semantic conventions. - // Instrument: counter - // Unit: {packet} - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemNetworkPacketsName = "system.network.packets" - SystemNetworkPacketsUnit = "{packet}" - - // SystemNetworkErrors is the metric conforming to the "system.network.errors" - // semantic conventions. It represents the count of network errors detected. - // Instrument: counter - // Unit: {error} - // Stability: Experimental - SystemNetworkErrorsName = "system.network.errors" - SystemNetworkErrorsUnit = "{error}" - SystemNetworkErrorsDescription = "Count of network errors detected" - - // SystemNetworkIo is the metric conforming to the "system.network.io" semantic - // conventions. - // Instrument: counter - // Unit: By - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemNetworkIoName = "system.network.io" - SystemNetworkIoUnit = "By" - - // SystemNetworkConnections is the metric conforming to the - // "system.network.connections" semantic conventions. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemNetworkConnectionsName = "system.network.connections" - SystemNetworkConnectionsUnit = "{connection}" - - // SystemProcessCount is the metric conforming to the "system.process.count" - // semantic conventions. It represents the total number of processes in each - // state. - // Instrument: updowncounter - // Unit: {process} - // Stability: Experimental - SystemProcessCountName = "system.process.count" - SystemProcessCountUnit = "{process}" - SystemProcessCountDescription = "Total number of processes in each state" - - // SystemProcessCreated is the metric conforming to the - // "system.process.created" semantic conventions. It represents the total - // number of processes created over uptime of the host. - // Instrument: counter - // Unit: {process} - // Stability: Experimental - SystemProcessCreatedName = "system.process.created" - SystemProcessCreatedUnit = "{process}" - SystemProcessCreatedDescription = "Total number of processes created over uptime of the host" - - // SystemLinuxMemoryAvailable is the metric conforming to the - // "system.linux.memory.available" semantic conventions. It represents an - // estimate of how much memory is available for starting new applications, - // without causing swapping. - // Instrument: updowncounter - // Unit: By - // Stability: Experimental - SystemLinuxMemoryAvailableName = "system.linux.memory.available" - SystemLinuxMemoryAvailableUnit = "By" - SystemLinuxMemoryAvailableDescription = "An estimate of how much memory is available for starting new applications, without causing swapping" -) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/schema.go deleted file mode 100644 index 4c87c7ad..00000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/schema.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0" - -// SchemaURL is the schema URL that matches the version of the semantic conventions -// that this package defines. Semconv packages starting from v1.4.0 must declare -// non-empty schema URL in the form https://opentelemetry.io/schemas/ -const SchemaURL = "https://opentelemetry.io/schemas/1.26.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/MIGRATION.md b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/MIGRATION.md new file mode 100644 index 00000000..24805478 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/MIGRATION.md @@ -0,0 +1,41 @@ + +# Migration from v1.36.0 to v1.37.0 + +The `go.opentelemetry.io/otel/semconv/v1.37.0` package should be a drop-in replacement for `go.opentelemetry.io/otel/semconv/v1.36.0` with the following exceptions. + +## Removed + +The following declarations have been removed. +Refer to the [OpenTelemetry Semantic Conventions documentation] for deprecation instructions. + +If the type is not listed in the documentation as deprecated, it has been removed in this version due to lack of applicability or use. +If you use any of these non-deprecated declarations in your Go application, please [open an issue] describing your use-case. + +- `ContainerRuntime` +- `ContainerRuntimeKey` +- `GenAIOpenAIRequestServiceTierAuto` +- `GenAIOpenAIRequestServiceTierDefault` +- `GenAIOpenAIRequestServiceTierKey` +- `GenAIOpenAIResponseServiceTier` +- `GenAIOpenAIResponseServiceTierKey` +- `GenAIOpenAIResponseSystemFingerprint` +- `GenAIOpenAIResponseSystemFingerprintKey` +- `GenAISystemAWSBedrock` +- `GenAISystemAnthropic` +- `GenAISystemAzureAIInference` +- `GenAISystemAzureAIOpenAI` +- `GenAISystemCohere` +- `GenAISystemDeepseek` +- `GenAISystemGCPGemini` +- `GenAISystemGCPGenAI` +- `GenAISystemGCPVertexAI` +- `GenAISystemGroq` +- `GenAISystemIBMWatsonxAI` +- `GenAISystemKey` +- `GenAISystemMistralAI` +- `GenAISystemOpenAI` +- `GenAISystemPerplexity` +- `GenAISystemXai` + +[OpenTelemetry Semantic Conventions documentation]: https://github.com/open-telemetry/semantic-conventions +[open an issue]: https://github.com/open-telemetry/opentelemetry-go/issues/new?template=Blank+issue diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/README.md new file mode 100644 index 00000000..d795247f --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/README.md @@ -0,0 +1,3 @@ +# Semconv v1.37.0 + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/semconv/v1.37.0)](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.37.0) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/attribute_group.go b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/attribute_group.go new file mode 100644 index 00000000..b6b27498 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/attribute_group.go @@ -0,0 +1,15193 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.37.0" + +import "go.opentelemetry.io/otel/attribute" + +// Namespace: android +const ( + // AndroidAppStateKey is the attribute Key conforming to the "android.app.state" + // semantic conventions. It represents the this attribute represents the state + // of the application. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "created" + // Note: The Android lifecycle states are defined in + // [Activity lifecycle callbacks], and from which the `OS identifiers` are + // derived. + // + // [Activity lifecycle callbacks]: https://developer.android.com/guide/components/activities/activity-lifecycle#lc + AndroidAppStateKey = attribute.Key("android.app.state") + + // AndroidOSAPILevelKey is the attribute Key conforming to the + // "android.os.api_level" semantic conventions. It represents the uniquely + // identifies the framework API revision offered by a version (`os.version`) of + // the android operating system. More information can be found in the + // [Android API levels documentation]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "33", "32" + // + // [Android API levels documentation]: https://developer.android.com/guide/topics/manifest/uses-sdk-element#ApiLevels + AndroidOSAPILevelKey = attribute.Key("android.os.api_level") +) + +// AndroidOSAPILevel returns an attribute KeyValue conforming to the +// "android.os.api_level" semantic conventions. It represents the uniquely +// identifies the framework API revision offered by a version (`os.version`) of +// the android operating system. More information can be found in the +// [Android API levels documentation]. +// +// [Android API levels documentation]: https://developer.android.com/guide/topics/manifest/uses-sdk-element#ApiLevels +func AndroidOSAPILevel(val string) attribute.KeyValue { + return AndroidOSAPILevelKey.String(val) +} + +// Enum values for android.app.state +var ( + // Any time before Activity.onResume() or, if the app has no Activity, + // Context.startService() has been called in the app for the first time. + // + // Stability: development + AndroidAppStateCreated = AndroidAppStateKey.String("created") + // Any time after Activity.onPause() or, if the app has no Activity, + // Context.stopService() has been called when the app was in the foreground + // state. + // + // Stability: development + AndroidAppStateBackground = AndroidAppStateKey.String("background") + // Any time after Activity.onResume() or, if the app has no Activity, + // Context.startService() has been called when the app was in either the created + // or background states. + // + // Stability: development + AndroidAppStateForeground = AndroidAppStateKey.String("foreground") +) + +// Namespace: app +const ( + // AppBuildIDKey is the attribute Key conforming to the "app.build_id" semantic + // conventions. It represents the unique identifier for a particular build or + // compilation of the application. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "6cff0a7e-cefc-4668-96f5-1273d8b334d0", + // "9f2b833506aa6973a92fde9733e6271f", "my-app-1.0.0-code-123" + AppBuildIDKey = attribute.Key("app.build_id") + + // AppInstallationIDKey is the attribute Key conforming to the + // "app.installation.id" semantic conventions. It represents a unique identifier + // representing the installation of an application on a specific device. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2ab2916d-a51f-4ac8-80ee-45ac31a28092" + // Note: Its value SHOULD persist across launches of the same application + // installation, including through application upgrades. + // It SHOULD change if the application is uninstalled or if all applications of + // the vendor are uninstalled. + // Additionally, users might be able to reset this value (e.g. by clearing + // application data). + // If an app is installed multiple times on the same device (e.g. in different + // accounts on Android), each `app.installation.id` SHOULD have a different + // value. + // If multiple OpenTelemetry SDKs are used within the same application, they + // SHOULD use the same value for `app.installation.id`. + // Hardware IDs (e.g. serial number, IMEI, MAC address) MUST NOT be used as the + // `app.installation.id`. + // + // For iOS, this value SHOULD be equal to the [vendor identifier]. + // + // For Android, examples of `app.installation.id` implementations include: + // + // - [Firebase Installation ID]. + // - A globally unique UUID which is persisted across sessions in your + // application. + // - [App set ID]. + // - [`Settings.getString(Settings.Secure.ANDROID_ID)`]. + // + // More information about Android identifier best practices can be found in the + // [Android user data IDs guide]. + // + // [vendor identifier]: https://developer.apple.com/documentation/uikit/uidevice/identifierforvendor + // [Firebase Installation ID]: https://firebase.google.com/docs/projects/manage-installations + // [App set ID]: https://developer.android.com/identity/app-set-id + // [`Settings.getString(Settings.Secure.ANDROID_ID)`]: https://developer.android.com/reference/android/provider/Settings.Secure#ANDROID_ID + // [Android user data IDs guide]: https://developer.android.com/training/articles/user-data-ids + AppInstallationIDKey = attribute.Key("app.installation.id") + + // AppJankFrameCountKey is the attribute Key conforming to the + // "app.jank.frame_count" semantic conventions. It represents a number of frame + // renders that experienced jank. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 9, 42 + // Note: Depending on platform limitations, the value provided MAY be + // approximation. + AppJankFrameCountKey = attribute.Key("app.jank.frame_count") + + // AppJankPeriodKey is the attribute Key conforming to the "app.jank.period" + // semantic conventions. It represents the time period, in seconds, for which + // this jank is being reported. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1.0, 5.0, 10.24 + AppJankPeriodKey = attribute.Key("app.jank.period") + + // AppJankThresholdKey is the attribute Key conforming to the + // "app.jank.threshold" semantic conventions. It represents the minimum + // rendering threshold for this jank, in seconds. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 0.016, 0.7, 1.024 + AppJankThresholdKey = attribute.Key("app.jank.threshold") + + // AppScreenCoordinateXKey is the attribute Key conforming to the + // "app.screen.coordinate.x" semantic conventions. It represents the x + // (horizontal) coordinate of a screen coordinate, in screen pixels. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 0, 131 + AppScreenCoordinateXKey = attribute.Key("app.screen.coordinate.x") + + // AppScreenCoordinateYKey is the attribute Key conforming to the + // "app.screen.coordinate.y" semantic conventions. It represents the y + // (vertical) component of a screen coordinate, in screen pixels. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 12, 99 + AppScreenCoordinateYKey = attribute.Key("app.screen.coordinate.y") + + // AppWidgetIDKey is the attribute Key conforming to the "app.widget.id" + // semantic conventions. It represents an identifier that uniquely + // differentiates this widget from other widgets in the same application. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "f9bc787d-ff05-48ad-90e1-fca1d46130b3", "submit_order_1829" + // Note: A widget is an application component, typically an on-screen visual GUI + // element. + AppWidgetIDKey = attribute.Key("app.widget.id") + + // AppWidgetNameKey is the attribute Key conforming to the "app.widget.name" + // semantic conventions. It represents the name of an application widget. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "submit", "attack", "Clear Cart" + // Note: A widget is an application component, typically an on-screen visual GUI + // element. + AppWidgetNameKey = attribute.Key("app.widget.name") +) + +// AppBuildID returns an attribute KeyValue conforming to the "app.build_id" +// semantic conventions. It represents the unique identifier for a particular +// build or compilation of the application. +func AppBuildID(val string) attribute.KeyValue { + return AppBuildIDKey.String(val) +} + +// AppInstallationID returns an attribute KeyValue conforming to the +// "app.installation.id" semantic conventions. It represents a unique identifier +// representing the installation of an application on a specific device. +func AppInstallationID(val string) attribute.KeyValue { + return AppInstallationIDKey.String(val) +} + +// AppJankFrameCount returns an attribute KeyValue conforming to the +// "app.jank.frame_count" semantic conventions. It represents a number of frame +// renders that experienced jank. +func AppJankFrameCount(val int) attribute.KeyValue { + return AppJankFrameCountKey.Int(val) +} + +// AppJankPeriod returns an attribute KeyValue conforming to the +// "app.jank.period" semantic conventions. It represents the time period, in +// seconds, for which this jank is being reported. +func AppJankPeriod(val float64) attribute.KeyValue { + return AppJankPeriodKey.Float64(val) +} + +// AppJankThreshold returns an attribute KeyValue conforming to the +// "app.jank.threshold" semantic conventions. It represents the minimum rendering +// threshold for this jank, in seconds. +func AppJankThreshold(val float64) attribute.KeyValue { + return AppJankThresholdKey.Float64(val) +} + +// AppScreenCoordinateX returns an attribute KeyValue conforming to the +// "app.screen.coordinate.x" semantic conventions. It represents the x +// (horizontal) coordinate of a screen coordinate, in screen pixels. +func AppScreenCoordinateX(val int) attribute.KeyValue { + return AppScreenCoordinateXKey.Int(val) +} + +// AppScreenCoordinateY returns an attribute KeyValue conforming to the +// "app.screen.coordinate.y" semantic conventions. It represents the y (vertical) +// component of a screen coordinate, in screen pixels. +func AppScreenCoordinateY(val int) attribute.KeyValue { + return AppScreenCoordinateYKey.Int(val) +} + +// AppWidgetID returns an attribute KeyValue conforming to the "app.widget.id" +// semantic conventions. It represents an identifier that uniquely differentiates +// this widget from other widgets in the same application. +func AppWidgetID(val string) attribute.KeyValue { + return AppWidgetIDKey.String(val) +} + +// AppWidgetName returns an attribute KeyValue conforming to the +// "app.widget.name" semantic conventions. It represents the name of an +// application widget. +func AppWidgetName(val string) attribute.KeyValue { + return AppWidgetNameKey.String(val) +} + +// Namespace: artifact +const ( + // ArtifactAttestationFilenameKey is the attribute Key conforming to the + // "artifact.attestation.filename" semantic conventions. It represents the + // provenance filename of the built attestation which directly relates to the + // build artifact filename. This filename SHOULD accompany the artifact at + // publish time. See the [SLSA Relationship] specification for more information. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "golang-binary-amd64-v0.1.0.attestation", + // "docker-image-amd64-v0.1.0.intoto.json1", "release-1.tar.gz.attestation", + // "file-name-package.tar.gz.intoto.json1" + // + // [SLSA Relationship]: https://slsa.dev/spec/v1.0/distributing-provenance#relationship-between-artifacts-and-attestations + ArtifactAttestationFilenameKey = attribute.Key("artifact.attestation.filename") + + // ArtifactAttestationHashKey is the attribute Key conforming to the + // "artifact.attestation.hash" semantic conventions. It represents the full + // [hash value (see glossary)], of the built attestation. Some envelopes in the + // [software attestation space] also refer to this as the **digest**. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1b31dfcd5b7f9267bf2ff47651df1cfb9147b9e4df1f335accf65b4cda498408" + // + // [hash value (see glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf + // [software attestation space]: https://github.com/in-toto/attestation/tree/main/spec + ArtifactAttestationHashKey = attribute.Key("artifact.attestation.hash") + + // ArtifactAttestationIDKey is the attribute Key conforming to the + // "artifact.attestation.id" semantic conventions. It represents the id of the + // build [software attestation]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "123" + // + // [software attestation]: https://slsa.dev/attestation-model + ArtifactAttestationIDKey = attribute.Key("artifact.attestation.id") + + // ArtifactFilenameKey is the attribute Key conforming to the + // "artifact.filename" semantic conventions. It represents the human readable + // file name of the artifact, typically generated during build and release + // processes. Often includes the package name and version in the file name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "golang-binary-amd64-v0.1.0", "docker-image-amd64-v0.1.0", + // "release-1.tar.gz", "file-name-package.tar.gz" + // Note: This file name can also act as the [Package Name] + // in cases where the package ecosystem maps accordingly. + // Additionally, the artifact [can be published] + // for others, but that is not a guarantee. + // + // [Package Name]: https://slsa.dev/spec/v1.0/terminology#package-model + // [can be published]: https://slsa.dev/spec/v1.0/terminology#software-supply-chain + ArtifactFilenameKey = attribute.Key("artifact.filename") + + // ArtifactHashKey is the attribute Key conforming to the "artifact.hash" + // semantic conventions. It represents the full [hash value (see glossary)], + // often found in checksum.txt on a release of the artifact and used to verify + // package integrity. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "9ff4c52759e2c4ac70b7d517bc7fcdc1cda631ca0045271ddd1b192544f8a3e9" + // Note: The specific algorithm used to create the cryptographic hash value is + // not defined. In situations where an artifact has multiple + // cryptographic hashes, it is up to the implementer to choose which + // hash value to set here; this should be the most secure hash algorithm + // that is suitable for the situation and consistent with the + // corresponding attestation. The implementer can then provide the other + // hash values through an additional set of attribute extensions as they + // deem necessary. + // + // [hash value (see glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf + ArtifactHashKey = attribute.Key("artifact.hash") + + // ArtifactPurlKey is the attribute Key conforming to the "artifact.purl" + // semantic conventions. It represents the [Package URL] of the + // [package artifact] provides a standard way to identify and locate the + // packaged artifact. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "pkg:github/package-url/purl-spec@1209109710924", + // "pkg:npm/foo@12.12.3" + // + // [Package URL]: https://github.com/package-url/purl-spec + // [package artifact]: https://slsa.dev/spec/v1.0/terminology#package-model + ArtifactPurlKey = attribute.Key("artifact.purl") + + // ArtifactVersionKey is the attribute Key conforming to the "artifact.version" + // semantic conventions. It represents the version of the artifact. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "v0.1.0", "1.2.1", "122691-build" + ArtifactVersionKey = attribute.Key("artifact.version") +) + +// ArtifactAttestationFilename returns an attribute KeyValue conforming to the +// "artifact.attestation.filename" semantic conventions. It represents the +// provenance filename of the built attestation which directly relates to the +// build artifact filename. This filename SHOULD accompany the artifact at +// publish time. See the [SLSA Relationship] specification for more information. +// +// [SLSA Relationship]: https://slsa.dev/spec/v1.0/distributing-provenance#relationship-between-artifacts-and-attestations +func ArtifactAttestationFilename(val string) attribute.KeyValue { + return ArtifactAttestationFilenameKey.String(val) +} + +// ArtifactAttestationHash returns an attribute KeyValue conforming to the +// "artifact.attestation.hash" semantic conventions. It represents the full +// [hash value (see glossary)], of the built attestation. Some envelopes in the +// [software attestation space] also refer to this as the **digest**. +// +// [hash value (see glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf +// [software attestation space]: https://github.com/in-toto/attestation/tree/main/spec +func ArtifactAttestationHash(val string) attribute.KeyValue { + return ArtifactAttestationHashKey.String(val) +} + +// ArtifactAttestationID returns an attribute KeyValue conforming to the +// "artifact.attestation.id" semantic conventions. It represents the id of the +// build [software attestation]. +// +// [software attestation]: https://slsa.dev/attestation-model +func ArtifactAttestationID(val string) attribute.KeyValue { + return ArtifactAttestationIDKey.String(val) +} + +// ArtifactFilename returns an attribute KeyValue conforming to the +// "artifact.filename" semantic conventions. It represents the human readable +// file name of the artifact, typically generated during build and release +// processes. Often includes the package name and version in the file name. +func ArtifactFilename(val string) attribute.KeyValue { + return ArtifactFilenameKey.String(val) +} + +// ArtifactHash returns an attribute KeyValue conforming to the "artifact.hash" +// semantic conventions. It represents the full [hash value (see glossary)], +// often found in checksum.txt on a release of the artifact and used to verify +// package integrity. +// +// [hash value (see glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf +func ArtifactHash(val string) attribute.KeyValue { + return ArtifactHashKey.String(val) +} + +// ArtifactPurl returns an attribute KeyValue conforming to the "artifact.purl" +// semantic conventions. It represents the [Package URL] of the +// [package artifact] provides a standard way to identify and locate the packaged +// artifact. +// +// [Package URL]: https://github.com/package-url/purl-spec +// [package artifact]: https://slsa.dev/spec/v1.0/terminology#package-model +func ArtifactPurl(val string) attribute.KeyValue { + return ArtifactPurlKey.String(val) +} + +// ArtifactVersion returns an attribute KeyValue conforming to the +// "artifact.version" semantic conventions. It represents the version of the +// artifact. +func ArtifactVersion(val string) attribute.KeyValue { + return ArtifactVersionKey.String(val) +} + +// Namespace: aws +const ( + // AWSBedrockGuardrailIDKey is the attribute Key conforming to the + // "aws.bedrock.guardrail.id" semantic conventions. It represents the unique + // identifier of the AWS Bedrock Guardrail. A [guardrail] helps safeguard and + // prevent unwanted behavior from model responses or user messages. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "sgi5gkybzqak" + // + // [guardrail]: https://docs.aws.amazon.com/bedrock/latest/userguide/guardrails.html + AWSBedrockGuardrailIDKey = attribute.Key("aws.bedrock.guardrail.id") + + // AWSBedrockKnowledgeBaseIDKey is the attribute Key conforming to the + // "aws.bedrock.knowledge_base.id" semantic conventions. It represents the + // unique identifier of the AWS Bedrock Knowledge base. A [knowledge base] is a + // bank of information that can be queried by models to generate more relevant + // responses and augment prompts. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "XFWUPB9PAW" + // + // [knowledge base]: https://docs.aws.amazon.com/bedrock/latest/userguide/knowledge-base.html + AWSBedrockKnowledgeBaseIDKey = attribute.Key("aws.bedrock.knowledge_base.id") + + // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to the + // "aws.dynamodb.attribute_definitions" semantic conventions. It represents the + // JSON-serialized value of each item in the `AttributeDefinitions` request + // field. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "{ "AttributeName": "string", "AttributeType": "string" }" + AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions") + + // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the + // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the + // value of the `AttributesToGet` request parameter. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "lives", "id" + AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get") + + // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the + // "aws.dynamodb.consistent_read" semantic conventions. It represents the value + // of the `ConsistentRead` request parameter. + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read") + + // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the + // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the + // JSON-serialized value of each item in the `ConsumedCapacity` response field. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "{ "CapacityUnits": number, "GlobalSecondaryIndexes": { "string" : + // { "CapacityUnits": number, "ReadCapacityUnits": number, "WriteCapacityUnits": + // number } }, "LocalSecondaryIndexes": { "string" : { "CapacityUnits": number, + // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }, + // "ReadCapacityUnits": number, "Table": { "CapacityUnits": number, + // "ReadCapacityUnits": number, "WriteCapacityUnits": number }, "TableName": + // "string", "WriteCapacityUnits": number }" + AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity") + + // AWSDynamoDBCountKey is the attribute Key conforming to the + // "aws.dynamodb.count" semantic conventions. It represents the value of the + // `Count` response parameter. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 10 + AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count") + + // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the + // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents the + // value of the `ExclusiveStartTableName` request parameter. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Users", "CatsTable" + AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table") + + // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key conforming to + // the "aws.dynamodb.global_secondary_index_updates" semantic conventions. It + // represents the JSON-serialized value of each item in the + // `GlobalSecondaryIndexUpdates` request field. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "{ "Create": { "IndexName": "string", "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, + // "ProvisionedThroughput": { "ReadCapacityUnits": number, "WriteCapacityUnits": + // number } }" + AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates") + + // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to the + // "aws.dynamodb.global_secondary_indexes" semantic conventions. It represents + // the JSON-serialized value of each item of the `GlobalSecondaryIndexes` + // request field. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "{ "IndexName": "string", "KeySchema": [ { "AttributeName": + // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ + // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": { + // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }" + AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes") + + // AWSDynamoDBIndexNameKey is the attribute Key conforming to the + // "aws.dynamodb.index_name" semantic conventions. It represents the value of + // the `IndexName` request parameter. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "name_to_group" + AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name") + + // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to the + // "aws.dynamodb.item_collection_metrics" semantic conventions. It represents + // the JSON-serialized value of the `ItemCollectionMetrics` response field. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "{ "string" : [ { "ItemCollectionKey": { "string" : { "B": blob, + // "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { "string" : + // "AttributeValue" }, "N": "string", "NS": [ "string" ], "NULL": boolean, "S": + // "string", "SS": [ "string" ] } }, "SizeEstimateRangeGB": [ number ] } ] }" + AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics") + + // AWSDynamoDBLimitKey is the attribute Key conforming to the + // "aws.dynamodb.limit" semantic conventions. It represents the value of the + // `Limit` request parameter. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 10 + AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit") + + // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to the + // "aws.dynamodb.local_secondary_indexes" semantic conventions. It represents + // the JSON-serialized value of each item of the `LocalSecondaryIndexes` request + // field. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "{ "IndexArn": "string", "IndexName": "string", "IndexSizeBytes": + // number, "ItemCount": number, "KeySchema": [ { "AttributeName": "string", + // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ], + // "ProjectionType": "string" } }" + AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes") + + // AWSDynamoDBProjectionKey is the attribute Key conforming to the + // "aws.dynamodb.projection" semantic conventions. It represents the value of + // the `ProjectionExpression` request parameter. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Title", "Title, Price, Color", "Title, Description, RelatedItems, + // ProductReviews" + AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection") + + // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to the + // "aws.dynamodb.provisioned_read_capacity" semantic conventions. It represents + // the value of the `ProvisionedThroughput.ReadCapacityUnits` request parameter. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity") + + // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming to the + // "aws.dynamodb.provisioned_write_capacity" semantic conventions. It represents + // the value of the `ProvisionedThroughput.WriteCapacityUnits` request + // parameter. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity") + + // AWSDynamoDBScanForwardKey is the attribute Key conforming to the + // "aws.dynamodb.scan_forward" semantic conventions. It represents the value of + // the `ScanIndexForward` request parameter. + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward") + + // AWSDynamoDBScannedCountKey is the attribute Key conforming to the + // "aws.dynamodb.scanned_count" semantic conventions. It represents the value of + // the `ScannedCount` response parameter. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 50 + AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count") + + // AWSDynamoDBSegmentKey is the attribute Key conforming to the + // "aws.dynamodb.segment" semantic conventions. It represents the value of the + // `Segment` request parameter. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 10 + AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment") + + // AWSDynamoDBSelectKey is the attribute Key conforming to the + // "aws.dynamodb.select" semantic conventions. It represents the value of the + // `Select` request parameter. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "ALL_ATTRIBUTES", "COUNT" + AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select") + + // AWSDynamoDBTableCountKey is the attribute Key conforming to the + // "aws.dynamodb.table_count" semantic conventions. It represents the number of + // items in the `TableNames` response parameter. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 20 + AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count") + + // AWSDynamoDBTableNamesKey is the attribute Key conforming to the + // "aws.dynamodb.table_names" semantic conventions. It represents the keys in + // the `RequestItems` object field. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Users", "Cats" + AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names") + + // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the + // "aws.dynamodb.total_segments" semantic conventions. It represents the value + // of the `TotalSegments` request parameter. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 100 + AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments") + + // AWSECSClusterARNKey is the attribute Key conforming to the + // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an + // [ECS cluster]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster" + // + // [ECS cluster]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html + AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn") + + // AWSECSContainerARNKey is the attribute Key conforming to the + // "aws.ecs.container.arn" semantic conventions. It represents the Amazon + // Resource Name (ARN) of an [ECS container instance]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9" + // + // [ECS container instance]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html + AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn") + + // AWSECSLaunchtypeKey is the attribute Key conforming to the + // "aws.ecs.launchtype" semantic conventions. It represents the [launch type] + // for an ECS task. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // + // [launch type]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html + AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype") + + // AWSECSTaskARNKey is the attribute Key conforming to the "aws.ecs.task.arn" + // semantic conventions. It represents the ARN of a running [ECS task]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b", + // "arn:aws:ecs:us-west-1:123456789123:task/my-cluster/task-id/23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd" + // + // [ECS task]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#ecs-resource-ids + AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn") + + // AWSECSTaskFamilyKey is the attribute Key conforming to the + // "aws.ecs.task.family" semantic conventions. It represents the family name of + // the [ECS task definition] used to create the ECS task. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry-family" + // + // [ECS task definition]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html + AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family") + + // AWSECSTaskIDKey is the attribute Key conforming to the "aws.ecs.task.id" + // semantic conventions. It represents the ID of a running ECS task. The ID MUST + // be extracted from `task.arn`. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "10838bed-421f-43ef-870a-f43feacbbb5b", + // "23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd" + AWSECSTaskIDKey = attribute.Key("aws.ecs.task.id") + + // AWSECSTaskRevisionKey is the attribute Key conforming to the + // "aws.ecs.task.revision" semantic conventions. It represents the revision for + // the task definition used to create the ECS task. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "8", "26" + AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision") + + // AWSEKSClusterARNKey is the attribute Key conforming to the + // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS + // cluster. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster" + AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn") + + // AWSExtendedRequestIDKey is the attribute Key conforming to the + // "aws.extended_request_id" semantic conventions. It represents the AWS + // extended request ID as returned in the response header `x-amz-id-2`. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "wzHcyEWfmOGDIE5QOhTAqFDoDWP3y8IUvpNINCwL9N4TEHbUw0/gZJ+VZTmCNCWR7fezEN3eCiQ=" + AWSExtendedRequestIDKey = attribute.Key("aws.extended_request_id") + + // AWSKinesisStreamNameKey is the attribute Key conforming to the + // "aws.kinesis.stream_name" semantic conventions. It represents the name of the + // AWS Kinesis [stream] the request refers to. Corresponds to the + // `--stream-name` parameter of the Kinesis [describe-stream] operation. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "some-stream-name" + // + // [stream]: https://docs.aws.amazon.com/streams/latest/dev/introduction.html + // [describe-stream]: https://docs.aws.amazon.com/cli/latest/reference/kinesis/describe-stream.html + AWSKinesisStreamNameKey = attribute.Key("aws.kinesis.stream_name") + + // AWSLambdaInvokedARNKey is the attribute Key conforming to the + // "aws.lambda.invoked_arn" semantic conventions. It represents the full invoked + // ARN as provided on the `Context` passed to the function ( + // `Lambda-Runtime-Invoked-Function-Arn` header on the + // `/runtime/invocation/next` applicable). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "arn:aws:lambda:us-east-1:123456:function:myfunction:myalias" + // Note: This may be different from `cloud.resource_id` if an alias is involved. + AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn") + + // AWSLambdaResourceMappingIDKey is the attribute Key conforming to the + // "aws.lambda.resource_mapping.id" semantic conventions. It represents the UUID + // of the [AWS Lambda EvenSource Mapping]. An event source is mapped to a lambda + // function. It's contents are read by Lambda and used to trigger a function. + // This isn't available in the lambda execution context or the lambda runtime + // environtment. This is going to be populated by the AWS SDK for each language + // when that UUID is present. Some of these operations are + // Create/Delete/Get/List/Update EventSourceMapping. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "587ad24b-03b9-4413-8202-bbd56b36e5b7" + // + // [AWS Lambda EvenSource Mapping]: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-lambda-eventsourcemapping.html + AWSLambdaResourceMappingIDKey = attribute.Key("aws.lambda.resource_mapping.id") + + // AWSLogGroupARNsKey is the attribute Key conforming to the + // "aws.log.group.arns" semantic conventions. It represents the Amazon Resource + // Name(s) (ARN) of the AWS log group(s). + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*" + // Note: See the [log group ARN format documentation]. + // + // [log group ARN format documentation]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format + AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns") + + // AWSLogGroupNamesKey is the attribute Key conforming to the + // "aws.log.group.names" semantic conventions. It represents the name(s) of the + // AWS log group(s) an application is writing to. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/aws/lambda/my-function", "opentelemetry-service" + // Note: Multiple log groups must be supported for cases like multi-container + // applications, where a single application has sidecar containers, and each + // write to their own log group. + AWSLogGroupNamesKey = attribute.Key("aws.log.group.names") + + // AWSLogStreamARNsKey is the attribute Key conforming to the + // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the + // AWS log stream(s). + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b" + // Note: See the [log stream ARN format documentation]. One log group can + // contain several log streams, so these ARNs necessarily identify both a log + // group and a log stream. + // + // [log stream ARN format documentation]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format + AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns") + + // AWSLogStreamNamesKey is the attribute Key conforming to the + // "aws.log.stream.names" semantic conventions. It represents the name(s) of the + // AWS log stream(s) an application is writing to. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "logs/main/10838bed-421f-43ef-870a-f43feacbbb5b" + AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names") + + // AWSRequestIDKey is the attribute Key conforming to the "aws.request_id" + // semantic conventions. It represents the AWS request ID as returned in the + // response headers `x-amzn-requestid`, `x-amzn-request-id` or + // `x-amz-request-id`. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "79b9da39-b7ae-508a-a6bc-864b2829c622", "C9ER4AJX75574TDJ" + AWSRequestIDKey = attribute.Key("aws.request_id") + + // AWSS3BucketKey is the attribute Key conforming to the "aws.s3.bucket" + // semantic conventions. It represents the S3 bucket name the request refers to. + // Corresponds to the `--bucket` parameter of the [S3 API] operations. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "some-bucket-name" + // Note: The `bucket` attribute is applicable to all S3 operations that + // reference a bucket, i.e. that require the bucket name as a mandatory + // parameter. + // This applies to almost all S3 operations except `list-buckets`. + // + // [S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html + AWSS3BucketKey = attribute.Key("aws.s3.bucket") + + // AWSS3CopySourceKey is the attribute Key conforming to the + // "aws.s3.copy_source" semantic conventions. It represents the source object + // (in the form `bucket`/`key`) for the copy operation. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "someFile.yml" + // Note: The `copy_source` attribute applies to S3 copy operations and + // corresponds to the `--copy-source` parameter + // of the [copy-object operation within the S3 API]. + // This applies in particular to the following operations: + // + // - [copy-object] + // - [upload-part-copy] + // + // + // [copy-object operation within the S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html + // [copy-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html + // [upload-part-copy]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html + AWSS3CopySourceKey = attribute.Key("aws.s3.copy_source") + + // AWSS3DeleteKey is the attribute Key conforming to the "aws.s3.delete" + // semantic conventions. It represents the delete request container that + // specifies the objects to be deleted. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "Objects=[{Key=string,VersionId=string},{Key=string,VersionId=string}],Quiet=boolean" + // Note: The `delete` attribute is only applicable to the [delete-object] + // operation. + // The `delete` attribute corresponds to the `--delete` parameter of the + // [delete-objects operation within the S3 API]. + // + // [delete-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html + // [delete-objects operation within the S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-objects.html + AWSS3DeleteKey = attribute.Key("aws.s3.delete") + + // AWSS3KeyKey is the attribute Key conforming to the "aws.s3.key" semantic + // conventions. It represents the S3 object key the request refers to. + // Corresponds to the `--key` parameter of the [S3 API] operations. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "someFile.yml" + // Note: The `key` attribute is applicable to all object-related S3 operations, + // i.e. that require the object key as a mandatory parameter. + // This applies in particular to the following operations: + // + // - [copy-object] + // - [delete-object] + // - [get-object] + // - [head-object] + // - [put-object] + // - [restore-object] + // - [select-object-content] + // - [abort-multipart-upload] + // - [complete-multipart-upload] + // - [create-multipart-upload] + // - [list-parts] + // - [upload-part] + // - [upload-part-copy] + // + // + // [S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html + // [copy-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html + // [delete-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html + // [get-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/get-object.html + // [head-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/head-object.html + // [put-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/put-object.html + // [restore-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/restore-object.html + // [select-object-content]: https://docs.aws.amazon.com/cli/latest/reference/s3api/select-object-content.html + // [abort-multipart-upload]: https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html + // [complete-multipart-upload]: https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html + // [create-multipart-upload]: https://docs.aws.amazon.com/cli/latest/reference/s3api/create-multipart-upload.html + // [list-parts]: https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html + // [upload-part]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html + // [upload-part-copy]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html + AWSS3KeyKey = attribute.Key("aws.s3.key") + + // AWSS3PartNumberKey is the attribute Key conforming to the + // "aws.s3.part_number" semantic conventions. It represents the part number of + // the part being uploaded in a multipart-upload operation. This is a positive + // integer between 1 and 10,000. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 3456 + // Note: The `part_number` attribute is only applicable to the [upload-part] + // and [upload-part-copy] operations. + // The `part_number` attribute corresponds to the `--part-number` parameter of + // the + // [upload-part operation within the S3 API]. + // + // [upload-part]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html + // [upload-part-copy]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html + // [upload-part operation within the S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html + AWSS3PartNumberKey = attribute.Key("aws.s3.part_number") + + // AWSS3UploadIDKey is the attribute Key conforming to the "aws.s3.upload_id" + // semantic conventions. It represents the upload ID that identifies the + // multipart upload. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ" + // Note: The `upload_id` attribute applies to S3 multipart-upload operations and + // corresponds to the `--upload-id` parameter + // of the [S3 API] multipart operations. + // This applies in particular to the following operations: + // + // - [abort-multipart-upload] + // - [complete-multipart-upload] + // - [list-parts] + // - [upload-part] + // - [upload-part-copy] + // + // + // [S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html + // [abort-multipart-upload]: https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html + // [complete-multipart-upload]: https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html + // [list-parts]: https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html + // [upload-part]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html + // [upload-part-copy]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html + AWSS3UploadIDKey = attribute.Key("aws.s3.upload_id") + + // AWSSecretsmanagerSecretARNKey is the attribute Key conforming to the + // "aws.secretsmanager.secret.arn" semantic conventions. It represents the ARN + // of the Secret stored in the Secrets Mangger. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "arn:aws:secretsmanager:us-east-1:123456789012:secret:SecretName-6RandomCharacters" + AWSSecretsmanagerSecretARNKey = attribute.Key("aws.secretsmanager.secret.arn") + + // AWSSNSTopicARNKey is the attribute Key conforming to the "aws.sns.topic.arn" + // semantic conventions. It represents the ARN of the AWS SNS Topic. An Amazon + // SNS [topic] is a logical access point that acts as a communication channel. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "arn:aws:sns:us-east-1:123456789012:mystack-mytopic-NZJ5JSMVGFIE" + // + // [topic]: https://docs.aws.amazon.com/sns/latest/dg/sns-create-topic.html + AWSSNSTopicARNKey = attribute.Key("aws.sns.topic.arn") + + // AWSSQSQueueURLKey is the attribute Key conforming to the "aws.sqs.queue.url" + // semantic conventions. It represents the URL of the AWS SQS Queue. It's a + // unique identifier for a queue in Amazon Simple Queue Service (SQS) and is + // used to access the queue and perform actions on it. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "https://sqs.us-east-1.amazonaws.com/123456789012/MyQueue" + AWSSQSQueueURLKey = attribute.Key("aws.sqs.queue.url") + + // AWSStepFunctionsActivityARNKey is the attribute Key conforming to the + // "aws.step_functions.activity.arn" semantic conventions. It represents the ARN + // of the AWS Step Functions Activity. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "arn:aws:states:us-east-1:123456789012:activity:get-greeting" + AWSStepFunctionsActivityARNKey = attribute.Key("aws.step_functions.activity.arn") + + // AWSStepFunctionsStateMachineARNKey is the attribute Key conforming to the + // "aws.step_functions.state_machine.arn" semantic conventions. It represents + // the ARN of the AWS Step Functions State Machine. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "arn:aws:states:us-east-1:123456789012:stateMachine:myStateMachine:1" + AWSStepFunctionsStateMachineARNKey = attribute.Key("aws.step_functions.state_machine.arn") +) + +// AWSBedrockGuardrailID returns an attribute KeyValue conforming to the +// "aws.bedrock.guardrail.id" semantic conventions. It represents the unique +// identifier of the AWS Bedrock Guardrail. A [guardrail] helps safeguard and +// prevent unwanted behavior from model responses or user messages. +// +// [guardrail]: https://docs.aws.amazon.com/bedrock/latest/userguide/guardrails.html +func AWSBedrockGuardrailID(val string) attribute.KeyValue { + return AWSBedrockGuardrailIDKey.String(val) +} + +// AWSBedrockKnowledgeBaseID returns an attribute KeyValue conforming to the +// "aws.bedrock.knowledge_base.id" semantic conventions. It represents the unique +// identifier of the AWS Bedrock Knowledge base. A [knowledge base] is a bank of +// information that can be queried by models to generate more relevant responses +// and augment prompts. +// +// [knowledge base]: https://docs.aws.amazon.com/bedrock/latest/userguide/knowledge-base.html +func AWSBedrockKnowledgeBaseID(val string) attribute.KeyValue { + return AWSBedrockKnowledgeBaseIDKey.String(val) +} + +// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming to +// the "aws.dynamodb.attribute_definitions" semantic conventions. It represents +// the JSON-serialized value of each item in the `AttributeDefinitions` request +// field. +func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue { + return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val) +} + +// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to the +// "aws.dynamodb.attributes_to_get" semantic conventions. It represents the value +// of the `AttributesToGet` request parameter. +func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue { + return AWSDynamoDBAttributesToGetKey.StringSlice(val) +} + +// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the +// "aws.dynamodb.consistent_read" semantic conventions. It represents the value +// of the `ConsistentRead` request parameter. +func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue { + return AWSDynamoDBConsistentReadKey.Bool(val) +} + +// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to the +// "aws.dynamodb.consumed_capacity" semantic conventions. It represents the +// JSON-serialized value of each item in the `ConsumedCapacity` response field. +func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue { + return AWSDynamoDBConsumedCapacityKey.StringSlice(val) +} + +// AWSDynamoDBCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.count" semantic conventions. It represents the value of the +// `Count` response parameter. +func AWSDynamoDBCount(val int) attribute.KeyValue { + return AWSDynamoDBCountKey.Int(val) +} + +// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming to the +// "aws.dynamodb.exclusive_start_table" semantic conventions. It represents the +// value of the `ExclusiveStartTableName` request parameter. +func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue { + return AWSDynamoDBExclusiveStartTableKey.String(val) +} + +// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue +// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic +// conventions. It represents the JSON-serialized value of each item in the +// `GlobalSecondaryIndexUpdates` request field. +func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue { + return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val) +} + +// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue conforming to +// the "aws.dynamodb.global_secondary_indexes" semantic conventions. It +// represents the JSON-serialized value of each item of the +// `GlobalSecondaryIndexes` request field. +func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue { + return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val) +} + +// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the +// "aws.dynamodb.index_name" semantic conventions. It represents the value of the +// `IndexName` request parameter. +func AWSDynamoDBIndexName(val string) attribute.KeyValue { + return AWSDynamoDBIndexNameKey.String(val) +} + +// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming to +// the "aws.dynamodb.item_collection_metrics" semantic conventions. It represents +// the JSON-serialized value of the `ItemCollectionMetrics` response field. +func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue { + return AWSDynamoDBItemCollectionMetricsKey.String(val) +} + +// AWSDynamoDBLimit returns an attribute KeyValue conforming to the +// "aws.dynamodb.limit" semantic conventions. It represents the value of the +// `Limit` request parameter. +func AWSDynamoDBLimit(val int) attribute.KeyValue { + return AWSDynamoDBLimitKey.Int(val) +} + +// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming to +// the "aws.dynamodb.local_secondary_indexes" semantic conventions. It represents +// the JSON-serialized value of each item of the `LocalSecondaryIndexes` request +// field. +func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue { + return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val) +} + +// AWSDynamoDBProjection returns an attribute KeyValue conforming to the +// "aws.dynamodb.projection" semantic conventions. It represents the value of the +// `ProjectionExpression` request parameter. +func AWSDynamoDBProjection(val string) attribute.KeyValue { + return AWSDynamoDBProjectionKey.String(val) +} + +// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue conforming to +// the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It +// represents the value of the `ProvisionedThroughput.ReadCapacityUnits` request +// parameter. +func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue { + return AWSDynamoDBProvisionedReadCapacityKey.Float64(val) +} + +// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue conforming +// to the "aws.dynamodb.provisioned_write_capacity" semantic conventions. It +// represents the value of the `ProvisionedThroughput.WriteCapacityUnits` request +// parameter. +func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue { + return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val) +} + +// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the +// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of +// the `ScanIndexForward` request parameter. +func AWSDynamoDBScanForward(val bool) attribute.KeyValue { + return AWSDynamoDBScanForwardKey.Bool(val) +} + +// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.scanned_count" semantic conventions. It represents the value of +// the `ScannedCount` response parameter. +func AWSDynamoDBScannedCount(val int) attribute.KeyValue { + return AWSDynamoDBScannedCountKey.Int(val) +} + +// AWSDynamoDBSegment returns an attribute KeyValue conforming to the +// "aws.dynamodb.segment" semantic conventions. It represents the value of the +// `Segment` request parameter. +func AWSDynamoDBSegment(val int) attribute.KeyValue { + return AWSDynamoDBSegmentKey.Int(val) +} + +// AWSDynamoDBSelect returns an attribute KeyValue conforming to the +// "aws.dynamodb.select" semantic conventions. It represents the value of the +// `Select` request parameter. +func AWSDynamoDBSelect(val string) attribute.KeyValue { + return AWSDynamoDBSelectKey.String(val) +} + +// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.table_count" semantic conventions. It represents the number of +// items in the `TableNames` response parameter. +func AWSDynamoDBTableCount(val int) attribute.KeyValue { + return AWSDynamoDBTableCountKey.Int(val) +} + +// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the +// "aws.dynamodb.table_names" semantic conventions. It represents the keys in the +// `RequestItems` object field. +func AWSDynamoDBTableNames(val ...string) attribute.KeyValue { + return AWSDynamoDBTableNamesKey.StringSlice(val) +} + +// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the +// "aws.dynamodb.total_segments" semantic conventions. It represents the value of +// the `TotalSegments` request parameter. +func AWSDynamoDBTotalSegments(val int) attribute.KeyValue { + return AWSDynamoDBTotalSegmentsKey.Int(val) +} + +// AWSECSClusterARN returns an attribute KeyValue conforming to the +// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an +// [ECS cluster]. +// +// [ECS cluster]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html +func AWSECSClusterARN(val string) attribute.KeyValue { + return AWSECSClusterARNKey.String(val) +} + +// AWSECSContainerARN returns an attribute KeyValue conforming to the +// "aws.ecs.container.arn" semantic conventions. It represents the Amazon +// Resource Name (ARN) of an [ECS container instance]. +// +// [ECS container instance]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html +func AWSECSContainerARN(val string) attribute.KeyValue { + return AWSECSContainerARNKey.String(val) +} + +// AWSECSTaskARN returns an attribute KeyValue conforming to the +// "aws.ecs.task.arn" semantic conventions. It represents the ARN of a running +// [ECS task]. +// +// [ECS task]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#ecs-resource-ids +func AWSECSTaskARN(val string) attribute.KeyValue { + return AWSECSTaskARNKey.String(val) +} + +// AWSECSTaskFamily returns an attribute KeyValue conforming to the +// "aws.ecs.task.family" semantic conventions. It represents the family name of +// the [ECS task definition] used to create the ECS task. +// +// [ECS task definition]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html +func AWSECSTaskFamily(val string) attribute.KeyValue { + return AWSECSTaskFamilyKey.String(val) +} + +// AWSECSTaskID returns an attribute KeyValue conforming to the "aws.ecs.task.id" +// semantic conventions. It represents the ID of a running ECS task. The ID MUST +// be extracted from `task.arn`. +func AWSECSTaskID(val string) attribute.KeyValue { + return AWSECSTaskIDKey.String(val) +} + +// AWSECSTaskRevision returns an attribute KeyValue conforming to the +// "aws.ecs.task.revision" semantic conventions. It represents the revision for +// the task definition used to create the ECS task. +func AWSECSTaskRevision(val string) attribute.KeyValue { + return AWSECSTaskRevisionKey.String(val) +} + +// AWSEKSClusterARN returns an attribute KeyValue conforming to the +// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS +// cluster. +func AWSEKSClusterARN(val string) attribute.KeyValue { + return AWSEKSClusterARNKey.String(val) +} + +// AWSExtendedRequestID returns an attribute KeyValue conforming to the +// "aws.extended_request_id" semantic conventions. It represents the AWS extended +// request ID as returned in the response header `x-amz-id-2`. +func AWSExtendedRequestID(val string) attribute.KeyValue { + return AWSExtendedRequestIDKey.String(val) +} + +// AWSKinesisStreamName returns an attribute KeyValue conforming to the +// "aws.kinesis.stream_name" semantic conventions. It represents the name of the +// AWS Kinesis [stream] the request refers to. Corresponds to the `--stream-name` +// parameter of the Kinesis [describe-stream] operation. +// +// [stream]: https://docs.aws.amazon.com/streams/latest/dev/introduction.html +// [describe-stream]: https://docs.aws.amazon.com/cli/latest/reference/kinesis/describe-stream.html +func AWSKinesisStreamName(val string) attribute.KeyValue { + return AWSKinesisStreamNameKey.String(val) +} + +// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the +// "aws.lambda.invoked_arn" semantic conventions. It represents the full invoked +// ARN as provided on the `Context` passed to the function ( +// `Lambda-Runtime-Invoked-Function-Arn` header on the `/runtime/invocation/next` +// applicable). +func AWSLambdaInvokedARN(val string) attribute.KeyValue { + return AWSLambdaInvokedARNKey.String(val) +} + +// AWSLambdaResourceMappingID returns an attribute KeyValue conforming to the +// "aws.lambda.resource_mapping.id" semantic conventions. It represents the UUID +// of the [AWS Lambda EvenSource Mapping]. An event source is mapped to a lambda +// function. It's contents are read by Lambda and used to trigger a function. +// This isn't available in the lambda execution context or the lambda runtime +// environtment. This is going to be populated by the AWS SDK for each language +// when that UUID is present. Some of these operations are +// Create/Delete/Get/List/Update EventSourceMapping. +// +// [AWS Lambda EvenSource Mapping]: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-lambda-eventsourcemapping.html +func AWSLambdaResourceMappingID(val string) attribute.KeyValue { + return AWSLambdaResourceMappingIDKey.String(val) +} + +// AWSLogGroupARNs returns an attribute KeyValue conforming to the +// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource +// Name(s) (ARN) of the AWS log group(s). +func AWSLogGroupARNs(val ...string) attribute.KeyValue { + return AWSLogGroupARNsKey.StringSlice(val) +} + +// AWSLogGroupNames returns an attribute KeyValue conforming to the +// "aws.log.group.names" semantic conventions. It represents the name(s) of the +// AWS log group(s) an application is writing to. +func AWSLogGroupNames(val ...string) attribute.KeyValue { + return AWSLogGroupNamesKey.StringSlice(val) +} + +// AWSLogStreamARNs returns an attribute KeyValue conforming to the +// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the +// AWS log stream(s). +func AWSLogStreamARNs(val ...string) attribute.KeyValue { + return AWSLogStreamARNsKey.StringSlice(val) +} + +// AWSLogStreamNames returns an attribute KeyValue conforming to the +// "aws.log.stream.names" semantic conventions. It represents the name(s) of the +// AWS log stream(s) an application is writing to. +func AWSLogStreamNames(val ...string) attribute.KeyValue { + return AWSLogStreamNamesKey.StringSlice(val) +} + +// AWSRequestID returns an attribute KeyValue conforming to the "aws.request_id" +// semantic conventions. It represents the AWS request ID as returned in the +// response headers `x-amzn-requestid`, `x-amzn-request-id` or `x-amz-request-id` +// . +func AWSRequestID(val string) attribute.KeyValue { + return AWSRequestIDKey.String(val) +} + +// AWSS3Bucket returns an attribute KeyValue conforming to the "aws.s3.bucket" +// semantic conventions. It represents the S3 bucket name the request refers to. +// Corresponds to the `--bucket` parameter of the [S3 API] operations. +// +// [S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html +func AWSS3Bucket(val string) attribute.KeyValue { + return AWSS3BucketKey.String(val) +} + +// AWSS3CopySource returns an attribute KeyValue conforming to the +// "aws.s3.copy_source" semantic conventions. It represents the source object (in +// the form `bucket`/`key`) for the copy operation. +func AWSS3CopySource(val string) attribute.KeyValue { + return AWSS3CopySourceKey.String(val) +} + +// AWSS3Delete returns an attribute KeyValue conforming to the "aws.s3.delete" +// semantic conventions. It represents the delete request container that +// specifies the objects to be deleted. +func AWSS3Delete(val string) attribute.KeyValue { + return AWSS3DeleteKey.String(val) +} + +// AWSS3Key returns an attribute KeyValue conforming to the "aws.s3.key" semantic +// conventions. It represents the S3 object key the request refers to. +// Corresponds to the `--key` parameter of the [S3 API] operations. +// +// [S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html +func AWSS3Key(val string) attribute.KeyValue { + return AWSS3KeyKey.String(val) +} + +// AWSS3PartNumber returns an attribute KeyValue conforming to the +// "aws.s3.part_number" semantic conventions. It represents the part number of +// the part being uploaded in a multipart-upload operation. This is a positive +// integer between 1 and 10,000. +func AWSS3PartNumber(val int) attribute.KeyValue { + return AWSS3PartNumberKey.Int(val) +} + +// AWSS3UploadID returns an attribute KeyValue conforming to the +// "aws.s3.upload_id" semantic conventions. It represents the upload ID that +// identifies the multipart upload. +func AWSS3UploadID(val string) attribute.KeyValue { + return AWSS3UploadIDKey.String(val) +} + +// AWSSecretsmanagerSecretARN returns an attribute KeyValue conforming to the +// "aws.secretsmanager.secret.arn" semantic conventions. It represents the ARN of +// the Secret stored in the Secrets Mangger. +func AWSSecretsmanagerSecretARN(val string) attribute.KeyValue { + return AWSSecretsmanagerSecretARNKey.String(val) +} + +// AWSSNSTopicARN returns an attribute KeyValue conforming to the +// "aws.sns.topic.arn" semantic conventions. It represents the ARN of the AWS SNS +// Topic. An Amazon SNS [topic] is a logical access point that acts as a +// communication channel. +// +// [topic]: https://docs.aws.amazon.com/sns/latest/dg/sns-create-topic.html +func AWSSNSTopicARN(val string) attribute.KeyValue { + return AWSSNSTopicARNKey.String(val) +} + +// AWSSQSQueueURL returns an attribute KeyValue conforming to the +// "aws.sqs.queue.url" semantic conventions. It represents the URL of the AWS SQS +// Queue. It's a unique identifier for a queue in Amazon Simple Queue Service +// (SQS) and is used to access the queue and perform actions on it. +func AWSSQSQueueURL(val string) attribute.KeyValue { + return AWSSQSQueueURLKey.String(val) +} + +// AWSStepFunctionsActivityARN returns an attribute KeyValue conforming to the +// "aws.step_functions.activity.arn" semantic conventions. It represents the ARN +// of the AWS Step Functions Activity. +func AWSStepFunctionsActivityARN(val string) attribute.KeyValue { + return AWSStepFunctionsActivityARNKey.String(val) +} + +// AWSStepFunctionsStateMachineARN returns an attribute KeyValue conforming to +// the "aws.step_functions.state_machine.arn" semantic conventions. It represents +// the ARN of the AWS Step Functions State Machine. +func AWSStepFunctionsStateMachineARN(val string) attribute.KeyValue { + return AWSStepFunctionsStateMachineARNKey.String(val) +} + +// Enum values for aws.ecs.launchtype +var ( + // Amazon EC2 + // Stability: development + AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2") + // Amazon Fargate + // Stability: development + AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") +) + +// Namespace: azure +const ( + // AzureClientIDKey is the attribute Key conforming to the "azure.client.id" + // semantic conventions. It represents the unique identifier of the client + // instance. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "3ba4827d-4422-483f-b59f-85b74211c11d", "storage-client-1" + AzureClientIDKey = attribute.Key("azure.client.id") + + // AzureCosmosDBConnectionModeKey is the attribute Key conforming to the + // "azure.cosmosdb.connection.mode" semantic conventions. It represents the + // cosmos client connection mode. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + AzureCosmosDBConnectionModeKey = attribute.Key("azure.cosmosdb.connection.mode") + + // AzureCosmosDBConsistencyLevelKey is the attribute Key conforming to the + // "azure.cosmosdb.consistency.level" semantic conventions. It represents the + // account or request [consistency level]. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Eventual", "ConsistentPrefix", "BoundedStaleness", "Strong", + // "Session" + // + // [consistency level]: https://learn.microsoft.com/azure/cosmos-db/consistency-levels + AzureCosmosDBConsistencyLevelKey = attribute.Key("azure.cosmosdb.consistency.level") + + // AzureCosmosDBOperationContactedRegionsKey is the attribute Key conforming to + // the "azure.cosmosdb.operation.contacted_regions" semantic conventions. It + // represents the list of regions contacted during operation in the order that + // they were contacted. If there is more than one region listed, it indicates + // that the operation was performed on multiple regions i.e. cross-regional + // call. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "North Central US", "Australia East", "Australia Southeast" + // Note: Region name matches the format of `displayName` in [Azure Location API] + // + // [Azure Location API]: https://learn.microsoft.com/rest/api/subscription/subscriptions/list-locations?view=rest-subscription-2021-10-01&tabs=HTTP#location + AzureCosmosDBOperationContactedRegionsKey = attribute.Key("azure.cosmosdb.operation.contacted_regions") + + // AzureCosmosDBOperationRequestChargeKey is the attribute Key conforming to the + // "azure.cosmosdb.operation.request_charge" semantic conventions. It represents + // the number of request units consumed by the operation. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 46.18, 1.0 + AzureCosmosDBOperationRequestChargeKey = attribute.Key("azure.cosmosdb.operation.request_charge") + + // AzureCosmosDBRequestBodySizeKey is the attribute Key conforming to the + // "azure.cosmosdb.request.body.size" semantic conventions. It represents the + // request payload size in bytes. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + AzureCosmosDBRequestBodySizeKey = attribute.Key("azure.cosmosdb.request.body.size") + + // AzureCosmosDBResponseSubStatusCodeKey is the attribute Key conforming to the + // "azure.cosmosdb.response.sub_status_code" semantic conventions. It represents + // the cosmos DB sub status code. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1000, 1002 + AzureCosmosDBResponseSubStatusCodeKey = attribute.Key("azure.cosmosdb.response.sub_status_code") + + // AzureResourceProviderNamespaceKey is the attribute Key conforming to the + // "azure.resource_provider.namespace" semantic conventions. It represents the + // [Azure Resource Provider Namespace] as recognized by the client. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Microsoft.Storage", "Microsoft.KeyVault", "Microsoft.ServiceBus" + // + // [Azure Resource Provider Namespace]: https://learn.microsoft.com/azure/azure-resource-manager/management/azure-services-resource-providers + AzureResourceProviderNamespaceKey = attribute.Key("azure.resource_provider.namespace") + + // AzureServiceRequestIDKey is the attribute Key conforming to the + // "azure.service.request.id" semantic conventions. It represents the unique + // identifier of the service request. It's generated by the Azure service and + // returned with the response. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "00000000-0000-0000-0000-000000000000" + AzureServiceRequestIDKey = attribute.Key("azure.service.request.id") +) + +// AzureClientID returns an attribute KeyValue conforming to the +// "azure.client.id" semantic conventions. It represents the unique identifier of +// the client instance. +func AzureClientID(val string) attribute.KeyValue { + return AzureClientIDKey.String(val) +} + +// AzureCosmosDBOperationContactedRegions returns an attribute KeyValue +// conforming to the "azure.cosmosdb.operation.contacted_regions" semantic +// conventions. It represents the list of regions contacted during operation in +// the order that they were contacted. If there is more than one region listed, +// it indicates that the operation was performed on multiple regions i.e. +// cross-regional call. +func AzureCosmosDBOperationContactedRegions(val ...string) attribute.KeyValue { + return AzureCosmosDBOperationContactedRegionsKey.StringSlice(val) +} + +// AzureCosmosDBOperationRequestCharge returns an attribute KeyValue conforming +// to the "azure.cosmosdb.operation.request_charge" semantic conventions. It +// represents the number of request units consumed by the operation. +func AzureCosmosDBOperationRequestCharge(val float64) attribute.KeyValue { + return AzureCosmosDBOperationRequestChargeKey.Float64(val) +} + +// AzureCosmosDBRequestBodySize returns an attribute KeyValue conforming to the +// "azure.cosmosdb.request.body.size" semantic conventions. It represents the +// request payload size in bytes. +func AzureCosmosDBRequestBodySize(val int) attribute.KeyValue { + return AzureCosmosDBRequestBodySizeKey.Int(val) +} + +// AzureCosmosDBResponseSubStatusCode returns an attribute KeyValue conforming to +// the "azure.cosmosdb.response.sub_status_code" semantic conventions. It +// represents the cosmos DB sub status code. +func AzureCosmosDBResponseSubStatusCode(val int) attribute.KeyValue { + return AzureCosmosDBResponseSubStatusCodeKey.Int(val) +} + +// AzureResourceProviderNamespace returns an attribute KeyValue conforming to the +// "azure.resource_provider.namespace" semantic conventions. It represents the +// [Azure Resource Provider Namespace] as recognized by the client. +// +// [Azure Resource Provider Namespace]: https://learn.microsoft.com/azure/azure-resource-manager/management/azure-services-resource-providers +func AzureResourceProviderNamespace(val string) attribute.KeyValue { + return AzureResourceProviderNamespaceKey.String(val) +} + +// AzureServiceRequestID returns an attribute KeyValue conforming to the +// "azure.service.request.id" semantic conventions. It represents the unique +// identifier of the service request. It's generated by the Azure service and +// returned with the response. +func AzureServiceRequestID(val string) attribute.KeyValue { + return AzureServiceRequestIDKey.String(val) +} + +// Enum values for azure.cosmosdb.connection.mode +var ( + // Gateway (HTTP) connection. + // Stability: development + AzureCosmosDBConnectionModeGateway = AzureCosmosDBConnectionModeKey.String("gateway") + // Direct connection. + // Stability: development + AzureCosmosDBConnectionModeDirect = AzureCosmosDBConnectionModeKey.String("direct") +) + +// Enum values for azure.cosmosdb.consistency.level +var ( + // Strong + // Stability: development + AzureCosmosDBConsistencyLevelStrong = AzureCosmosDBConsistencyLevelKey.String("Strong") + // Bounded Staleness + // Stability: development + AzureCosmosDBConsistencyLevelBoundedStaleness = AzureCosmosDBConsistencyLevelKey.String("BoundedStaleness") + // Session + // Stability: development + AzureCosmosDBConsistencyLevelSession = AzureCosmosDBConsistencyLevelKey.String("Session") + // Eventual + // Stability: development + AzureCosmosDBConsistencyLevelEventual = AzureCosmosDBConsistencyLevelKey.String("Eventual") + // Consistent Prefix + // Stability: development + AzureCosmosDBConsistencyLevelConsistentPrefix = AzureCosmosDBConsistencyLevelKey.String("ConsistentPrefix") +) + +// Namespace: browser +const ( + // BrowserBrandsKey is the attribute Key conforming to the "browser.brands" + // semantic conventions. It represents the array of brand name and version + // separated by a space. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: " Not A;Brand 99", "Chromium 99", "Chrome 99" + // Note: This value is intended to be taken from the [UA client hints API] ( + // `navigator.userAgentData.brands`). + // + // [UA client hints API]: https://wicg.github.io/ua-client-hints/#interface + BrowserBrandsKey = attribute.Key("browser.brands") + + // BrowserLanguageKey is the attribute Key conforming to the "browser.language" + // semantic conventions. It represents the preferred language of the user using + // the browser. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "en", "en-US", "fr", "fr-FR" + // Note: This value is intended to be taken from the Navigator API + // `navigator.language`. + BrowserLanguageKey = attribute.Key("browser.language") + + // BrowserMobileKey is the attribute Key conforming to the "browser.mobile" + // semantic conventions. It represents a boolean that is true if the browser is + // running on a mobile device. + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: This value is intended to be taken from the [UA client hints API] ( + // `navigator.userAgentData.mobile`). If unavailable, this attribute SHOULD be + // left unset. + // + // [UA client hints API]: https://wicg.github.io/ua-client-hints/#interface + BrowserMobileKey = attribute.Key("browser.mobile") + + // BrowserPlatformKey is the attribute Key conforming to the "browser.platform" + // semantic conventions. It represents the platform on which the browser is + // running. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Windows", "macOS", "Android" + // Note: This value is intended to be taken from the [UA client hints API] ( + // `navigator.userAgentData.platform`). If unavailable, the legacy + // `navigator.platform` API SHOULD NOT be used instead and this attribute SHOULD + // be left unset in order for the values to be consistent. + // The list of possible values is defined in the + // [W3C User-Agent Client Hints specification]. Note that some (but not all) of + // these values can overlap with values in the + // [`os.type` and `os.name` attributes]. However, for consistency, the values in + // the `browser.platform` attribute should capture the exact value that the user + // agent provides. + // + // [UA client hints API]: https://wicg.github.io/ua-client-hints/#interface + // [W3C User-Agent Client Hints specification]: https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform + // [`os.type` and `os.name` attributes]: ./os.md + BrowserPlatformKey = attribute.Key("browser.platform") +) + +// BrowserBrands returns an attribute KeyValue conforming to the "browser.brands" +// semantic conventions. It represents the array of brand name and version +// separated by a space. +func BrowserBrands(val ...string) attribute.KeyValue { + return BrowserBrandsKey.StringSlice(val) +} + +// BrowserLanguage returns an attribute KeyValue conforming to the +// "browser.language" semantic conventions. It represents the preferred language +// of the user using the browser. +func BrowserLanguage(val string) attribute.KeyValue { + return BrowserLanguageKey.String(val) +} + +// BrowserMobile returns an attribute KeyValue conforming to the "browser.mobile" +// semantic conventions. It represents a boolean that is true if the browser is +// running on a mobile device. +func BrowserMobile(val bool) attribute.KeyValue { + return BrowserMobileKey.Bool(val) +} + +// BrowserPlatform returns an attribute KeyValue conforming to the +// "browser.platform" semantic conventions. It represents the platform on which +// the browser is running. +func BrowserPlatform(val string) attribute.KeyValue { + return BrowserPlatformKey.String(val) +} + +// Namespace: cassandra +const ( + // CassandraConsistencyLevelKey is the attribute Key conforming to the + // "cassandra.consistency.level" semantic conventions. It represents the + // consistency level of the query. Based on consistency values from [CQL]. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // + // [CQL]: https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html + CassandraConsistencyLevelKey = attribute.Key("cassandra.consistency.level") + + // CassandraCoordinatorDCKey is the attribute Key conforming to the + // "cassandra.coordinator.dc" semantic conventions. It represents the data + // center of the coordinating node for a query. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: us-west-2 + CassandraCoordinatorDCKey = attribute.Key("cassandra.coordinator.dc") + + // CassandraCoordinatorIDKey is the attribute Key conforming to the + // "cassandra.coordinator.id" semantic conventions. It represents the ID of the + // coordinating node for a query. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: be13faa2-8574-4d71-926d-27f16cf8a7af + CassandraCoordinatorIDKey = attribute.Key("cassandra.coordinator.id") + + // CassandraPageSizeKey is the attribute Key conforming to the + // "cassandra.page.size" semantic conventions. It represents the fetch size used + // for paging, i.e. how many rows will be returned at once. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 5000 + CassandraPageSizeKey = attribute.Key("cassandra.page.size") + + // CassandraQueryIdempotentKey is the attribute Key conforming to the + // "cassandra.query.idempotent" semantic conventions. It represents the whether + // or not the query is idempotent. + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + CassandraQueryIdempotentKey = attribute.Key("cassandra.query.idempotent") + + // CassandraSpeculativeExecutionCountKey is the attribute Key conforming to the + // "cassandra.speculative_execution.count" semantic conventions. It represents + // the number of times a query was speculatively executed. Not set or `0` if the + // query was not executed speculatively. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 0, 2 + CassandraSpeculativeExecutionCountKey = attribute.Key("cassandra.speculative_execution.count") +) + +// CassandraCoordinatorDC returns an attribute KeyValue conforming to the +// "cassandra.coordinator.dc" semantic conventions. It represents the data center +// of the coordinating node for a query. +func CassandraCoordinatorDC(val string) attribute.KeyValue { + return CassandraCoordinatorDCKey.String(val) +} + +// CassandraCoordinatorID returns an attribute KeyValue conforming to the +// "cassandra.coordinator.id" semantic conventions. It represents the ID of the +// coordinating node for a query. +func CassandraCoordinatorID(val string) attribute.KeyValue { + return CassandraCoordinatorIDKey.String(val) +} + +// CassandraPageSize returns an attribute KeyValue conforming to the +// "cassandra.page.size" semantic conventions. It represents the fetch size used +// for paging, i.e. how many rows will be returned at once. +func CassandraPageSize(val int) attribute.KeyValue { + return CassandraPageSizeKey.Int(val) +} + +// CassandraQueryIdempotent returns an attribute KeyValue conforming to the +// "cassandra.query.idempotent" semantic conventions. It represents the whether +// or not the query is idempotent. +func CassandraQueryIdempotent(val bool) attribute.KeyValue { + return CassandraQueryIdempotentKey.Bool(val) +} + +// CassandraSpeculativeExecutionCount returns an attribute KeyValue conforming to +// the "cassandra.speculative_execution.count" semantic conventions. It +// represents the number of times a query was speculatively executed. Not set or +// `0` if the query was not executed speculatively. +func CassandraSpeculativeExecutionCount(val int) attribute.KeyValue { + return CassandraSpeculativeExecutionCountKey.Int(val) +} + +// Enum values for cassandra.consistency.level +var ( + // All + // Stability: development + CassandraConsistencyLevelAll = CassandraConsistencyLevelKey.String("all") + // Each Quorum + // Stability: development + CassandraConsistencyLevelEachQuorum = CassandraConsistencyLevelKey.String("each_quorum") + // Quorum + // Stability: development + CassandraConsistencyLevelQuorum = CassandraConsistencyLevelKey.String("quorum") + // Local Quorum + // Stability: development + CassandraConsistencyLevelLocalQuorum = CassandraConsistencyLevelKey.String("local_quorum") + // One + // Stability: development + CassandraConsistencyLevelOne = CassandraConsistencyLevelKey.String("one") + // Two + // Stability: development + CassandraConsistencyLevelTwo = CassandraConsistencyLevelKey.String("two") + // Three + // Stability: development + CassandraConsistencyLevelThree = CassandraConsistencyLevelKey.String("three") + // Local One + // Stability: development + CassandraConsistencyLevelLocalOne = CassandraConsistencyLevelKey.String("local_one") + // Any + // Stability: development + CassandraConsistencyLevelAny = CassandraConsistencyLevelKey.String("any") + // Serial + // Stability: development + CassandraConsistencyLevelSerial = CassandraConsistencyLevelKey.String("serial") + // Local Serial + // Stability: development + CassandraConsistencyLevelLocalSerial = CassandraConsistencyLevelKey.String("local_serial") +) + +// Namespace: cicd +const ( + // CICDPipelineActionNameKey is the attribute Key conforming to the + // "cicd.pipeline.action.name" semantic conventions. It represents the kind of + // action a pipeline run is performing. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "BUILD", "RUN", "SYNC" + CICDPipelineActionNameKey = attribute.Key("cicd.pipeline.action.name") + + // CICDPipelineNameKey is the attribute Key conforming to the + // "cicd.pipeline.name" semantic conventions. It represents the human readable + // name of the pipeline within a CI/CD system. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Build and Test", "Lint", "Deploy Go Project", + // "deploy_to_environment" + CICDPipelineNameKey = attribute.Key("cicd.pipeline.name") + + // CICDPipelineResultKey is the attribute Key conforming to the + // "cicd.pipeline.result" semantic conventions. It represents the result of a + // pipeline run. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "success", "failure", "timeout", "skipped" + CICDPipelineResultKey = attribute.Key("cicd.pipeline.result") + + // CICDPipelineRunIDKey is the attribute Key conforming to the + // "cicd.pipeline.run.id" semantic conventions. It represents the unique + // identifier of a pipeline run within a CI/CD system. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "120912" + CICDPipelineRunIDKey = attribute.Key("cicd.pipeline.run.id") + + // CICDPipelineRunStateKey is the attribute Key conforming to the + // "cicd.pipeline.run.state" semantic conventions. It represents the pipeline + // run goes through these states during its lifecycle. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "pending", "executing", "finalizing" + CICDPipelineRunStateKey = attribute.Key("cicd.pipeline.run.state") + + // CICDPipelineRunURLFullKey is the attribute Key conforming to the + // "cicd.pipeline.run.url.full" semantic conventions. It represents the [URL] of + // the pipeline run, providing the complete address in order to locate and + // identify the pipeline run. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "https://github.com/open-telemetry/semantic-conventions/actions/runs/9753949763?pr=1075" + // + // [URL]: https://wikipedia.org/wiki/URL + CICDPipelineRunURLFullKey = attribute.Key("cicd.pipeline.run.url.full") + + // CICDPipelineTaskNameKey is the attribute Key conforming to the + // "cicd.pipeline.task.name" semantic conventions. It represents the human + // readable name of a task within a pipeline. Task here most closely aligns with + // a [computing process] in a pipeline. Other terms for tasks include commands, + // steps, and procedures. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Run GoLang Linter", "Go Build", "go-test", "deploy_binary" + // + // [computing process]: https://wikipedia.org/wiki/Pipeline_(computing) + CICDPipelineTaskNameKey = attribute.Key("cicd.pipeline.task.name") + + // CICDPipelineTaskRunIDKey is the attribute Key conforming to the + // "cicd.pipeline.task.run.id" semantic conventions. It represents the unique + // identifier of a task run within a pipeline. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "12097" + CICDPipelineTaskRunIDKey = attribute.Key("cicd.pipeline.task.run.id") + + // CICDPipelineTaskRunResultKey is the attribute Key conforming to the + // "cicd.pipeline.task.run.result" semantic conventions. It represents the + // result of a task run. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "success", "failure", "timeout", "skipped" + CICDPipelineTaskRunResultKey = attribute.Key("cicd.pipeline.task.run.result") + + // CICDPipelineTaskRunURLFullKey is the attribute Key conforming to the + // "cicd.pipeline.task.run.url.full" semantic conventions. It represents the + // [URL] of the pipeline task run, providing the complete address in order to + // locate and identify the pipeline task run. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "https://github.com/open-telemetry/semantic-conventions/actions/runs/9753949763/job/26920038674?pr=1075" + // + // [URL]: https://wikipedia.org/wiki/URL + CICDPipelineTaskRunURLFullKey = attribute.Key("cicd.pipeline.task.run.url.full") + + // CICDPipelineTaskTypeKey is the attribute Key conforming to the + // "cicd.pipeline.task.type" semantic conventions. It represents the type of the + // task within a pipeline. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "build", "test", "deploy" + CICDPipelineTaskTypeKey = attribute.Key("cicd.pipeline.task.type") + + // CICDSystemComponentKey is the attribute Key conforming to the + // "cicd.system.component" semantic conventions. It represents the name of a + // component of the CICD system. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "controller", "scheduler", "agent" + CICDSystemComponentKey = attribute.Key("cicd.system.component") + + // CICDWorkerIDKey is the attribute Key conforming to the "cicd.worker.id" + // semantic conventions. It represents the unique identifier of a worker within + // a CICD system. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "abc123", "10.0.1.2", "controller" + CICDWorkerIDKey = attribute.Key("cicd.worker.id") + + // CICDWorkerNameKey is the attribute Key conforming to the "cicd.worker.name" + // semantic conventions. It represents the name of a worker within a CICD + // system. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "agent-abc", "controller", "Ubuntu LTS" + CICDWorkerNameKey = attribute.Key("cicd.worker.name") + + // CICDWorkerStateKey is the attribute Key conforming to the "cicd.worker.state" + // semantic conventions. It represents the state of a CICD worker / agent. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "idle", "busy", "down" + CICDWorkerStateKey = attribute.Key("cicd.worker.state") + + // CICDWorkerURLFullKey is the attribute Key conforming to the + // "cicd.worker.url.full" semantic conventions. It represents the [URL] of the + // worker, providing the complete address in order to locate and identify the + // worker. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "https://cicd.example.org/worker/abc123" + // + // [URL]: https://wikipedia.org/wiki/URL + CICDWorkerURLFullKey = attribute.Key("cicd.worker.url.full") +) + +// CICDPipelineName returns an attribute KeyValue conforming to the +// "cicd.pipeline.name" semantic conventions. It represents the human readable +// name of the pipeline within a CI/CD system. +func CICDPipelineName(val string) attribute.KeyValue { + return CICDPipelineNameKey.String(val) +} + +// CICDPipelineRunID returns an attribute KeyValue conforming to the +// "cicd.pipeline.run.id" semantic conventions. It represents the unique +// identifier of a pipeline run within a CI/CD system. +func CICDPipelineRunID(val string) attribute.KeyValue { + return CICDPipelineRunIDKey.String(val) +} + +// CICDPipelineRunURLFull returns an attribute KeyValue conforming to the +// "cicd.pipeline.run.url.full" semantic conventions. It represents the [URL] of +// the pipeline run, providing the complete address in order to locate and +// identify the pipeline run. +// +// [URL]: https://wikipedia.org/wiki/URL +func CICDPipelineRunURLFull(val string) attribute.KeyValue { + return CICDPipelineRunURLFullKey.String(val) +} + +// CICDPipelineTaskName returns an attribute KeyValue conforming to the +// "cicd.pipeline.task.name" semantic conventions. It represents the human +// readable name of a task within a pipeline. Task here most closely aligns with +// a [computing process] in a pipeline. Other terms for tasks include commands, +// steps, and procedures. +// +// [computing process]: https://wikipedia.org/wiki/Pipeline_(computing) +func CICDPipelineTaskName(val string) attribute.KeyValue { + return CICDPipelineTaskNameKey.String(val) +} + +// CICDPipelineTaskRunID returns an attribute KeyValue conforming to the +// "cicd.pipeline.task.run.id" semantic conventions. It represents the unique +// identifier of a task run within a pipeline. +func CICDPipelineTaskRunID(val string) attribute.KeyValue { + return CICDPipelineTaskRunIDKey.String(val) +} + +// CICDPipelineTaskRunURLFull returns an attribute KeyValue conforming to the +// "cicd.pipeline.task.run.url.full" semantic conventions. It represents the +// [URL] of the pipeline task run, providing the complete address in order to +// locate and identify the pipeline task run. +// +// [URL]: https://wikipedia.org/wiki/URL +func CICDPipelineTaskRunURLFull(val string) attribute.KeyValue { + return CICDPipelineTaskRunURLFullKey.String(val) +} + +// CICDSystemComponent returns an attribute KeyValue conforming to the +// "cicd.system.component" semantic conventions. It represents the name of a +// component of the CICD system. +func CICDSystemComponent(val string) attribute.KeyValue { + return CICDSystemComponentKey.String(val) +} + +// CICDWorkerID returns an attribute KeyValue conforming to the "cicd.worker.id" +// semantic conventions. It represents the unique identifier of a worker within a +// CICD system. +func CICDWorkerID(val string) attribute.KeyValue { + return CICDWorkerIDKey.String(val) +} + +// CICDWorkerName returns an attribute KeyValue conforming to the +// "cicd.worker.name" semantic conventions. It represents the name of a worker +// within a CICD system. +func CICDWorkerName(val string) attribute.KeyValue { + return CICDWorkerNameKey.String(val) +} + +// CICDWorkerURLFull returns an attribute KeyValue conforming to the +// "cicd.worker.url.full" semantic conventions. It represents the [URL] of the +// worker, providing the complete address in order to locate and identify the +// worker. +// +// [URL]: https://wikipedia.org/wiki/URL +func CICDWorkerURLFull(val string) attribute.KeyValue { + return CICDWorkerURLFullKey.String(val) +} + +// Enum values for cicd.pipeline.action.name +var ( + // The pipeline run is executing a build. + // Stability: development + CICDPipelineActionNameBuild = CICDPipelineActionNameKey.String("BUILD") + // The pipeline run is executing. + // Stability: development + CICDPipelineActionNameRun = CICDPipelineActionNameKey.String("RUN") + // The pipeline run is executing a sync. + // Stability: development + CICDPipelineActionNameSync = CICDPipelineActionNameKey.String("SYNC") +) + +// Enum values for cicd.pipeline.result +var ( + // The pipeline run finished successfully. + // Stability: development + CICDPipelineResultSuccess = CICDPipelineResultKey.String("success") + // The pipeline run did not finish successfully, eg. due to a compile error or a + // failing test. Such failures are usually detected by non-zero exit codes of + // the tools executed in the pipeline run. + // Stability: development + CICDPipelineResultFailure = CICDPipelineResultKey.String("failure") + // The pipeline run failed due to an error in the CICD system, eg. due to the + // worker being killed. + // Stability: development + CICDPipelineResultError = CICDPipelineResultKey.String("error") + // A timeout caused the pipeline run to be interrupted. + // Stability: development + CICDPipelineResultTimeout = CICDPipelineResultKey.String("timeout") + // The pipeline run was cancelled, eg. by a user manually cancelling the + // pipeline run. + // Stability: development + CICDPipelineResultCancellation = CICDPipelineResultKey.String("cancellation") + // The pipeline run was skipped, eg. due to a precondition not being met. + // Stability: development + CICDPipelineResultSkip = CICDPipelineResultKey.String("skip") +) + +// Enum values for cicd.pipeline.run.state +var ( + // The run pending state spans from the event triggering the pipeline run until + // the execution of the run starts (eg. time spent in a queue, provisioning + // agents, creating run resources). + // + // Stability: development + CICDPipelineRunStatePending = CICDPipelineRunStateKey.String("pending") + // The executing state spans the execution of any run tasks (eg. build, test). + // Stability: development + CICDPipelineRunStateExecuting = CICDPipelineRunStateKey.String("executing") + // The finalizing state spans from when the run has finished executing (eg. + // cleanup of run resources). + // Stability: development + CICDPipelineRunStateFinalizing = CICDPipelineRunStateKey.String("finalizing") +) + +// Enum values for cicd.pipeline.task.run.result +var ( + // The task run finished successfully. + // Stability: development + CICDPipelineTaskRunResultSuccess = CICDPipelineTaskRunResultKey.String("success") + // The task run did not finish successfully, eg. due to a compile error or a + // failing test. Such failures are usually detected by non-zero exit codes of + // the tools executed in the task run. + // Stability: development + CICDPipelineTaskRunResultFailure = CICDPipelineTaskRunResultKey.String("failure") + // The task run failed due to an error in the CICD system, eg. due to the worker + // being killed. + // Stability: development + CICDPipelineTaskRunResultError = CICDPipelineTaskRunResultKey.String("error") + // A timeout caused the task run to be interrupted. + // Stability: development + CICDPipelineTaskRunResultTimeout = CICDPipelineTaskRunResultKey.String("timeout") + // The task run was cancelled, eg. by a user manually cancelling the task run. + // Stability: development + CICDPipelineTaskRunResultCancellation = CICDPipelineTaskRunResultKey.String("cancellation") + // The task run was skipped, eg. due to a precondition not being met. + // Stability: development + CICDPipelineTaskRunResultSkip = CICDPipelineTaskRunResultKey.String("skip") +) + +// Enum values for cicd.pipeline.task.type +var ( + // build + // Stability: development + CICDPipelineTaskTypeBuild = CICDPipelineTaskTypeKey.String("build") + // test + // Stability: development + CICDPipelineTaskTypeTest = CICDPipelineTaskTypeKey.String("test") + // deploy + // Stability: development + CICDPipelineTaskTypeDeploy = CICDPipelineTaskTypeKey.String("deploy") +) + +// Enum values for cicd.worker.state +var ( + // The worker is not performing work for the CICD system. It is available to the + // CICD system to perform work on (online / idle). + // Stability: development + CICDWorkerStateAvailable = CICDWorkerStateKey.String("available") + // The worker is performing work for the CICD system. + // Stability: development + CICDWorkerStateBusy = CICDWorkerStateKey.String("busy") + // The worker is not available to the CICD system (disconnected / down). + // Stability: development + CICDWorkerStateOffline = CICDWorkerStateKey.String("offline") +) + +// Namespace: client +const ( + // ClientAddressKey is the attribute Key conforming to the "client.address" + // semantic conventions. It represents the client address - domain name if + // available without reverse DNS lookup; otherwise, IP address or Unix domain + // socket name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "client.example.com", "10.1.2.80", "/tmp/my.sock" + // Note: When observed from the server side, and when communicating through an + // intermediary, `client.address` SHOULD represent the client address behind any + // intermediaries, for example proxies, if it's available. + ClientAddressKey = attribute.Key("client.address") + + // ClientPortKey is the attribute Key conforming to the "client.port" semantic + // conventions. It represents the client port number. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: 65123 + // Note: When observed from the server side, and when communicating through an + // intermediary, `client.port` SHOULD represent the client port behind any + // intermediaries, for example proxies, if it's available. + ClientPortKey = attribute.Key("client.port") +) + +// ClientAddress returns an attribute KeyValue conforming to the "client.address" +// semantic conventions. It represents the client address - domain name if +// available without reverse DNS lookup; otherwise, IP address or Unix domain +// socket name. +func ClientAddress(val string) attribute.KeyValue { + return ClientAddressKey.String(val) +} + +// ClientPort returns an attribute KeyValue conforming to the "client.port" +// semantic conventions. It represents the client port number. +func ClientPort(val int) attribute.KeyValue { + return ClientPortKey.Int(val) +} + +// Namespace: cloud +const ( + // CloudAccountIDKey is the attribute Key conforming to the "cloud.account.id" + // semantic conventions. It represents the cloud account ID the resource is + // assigned to. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "111111111111", "opentelemetry" + CloudAccountIDKey = attribute.Key("cloud.account.id") + + // CloudAvailabilityZoneKey is the attribute Key conforming to the + // "cloud.availability_zone" semantic conventions. It represents the cloud + // regions often have multiple, isolated locations known as zones to increase + // availability. Availability zone represents the zone where the resource is + // running. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "us-east-1c" + // Note: Availability zones are called "zones" on Alibaba Cloud and Google + // Cloud. + CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone") + + // CloudPlatformKey is the attribute Key conforming to the "cloud.platform" + // semantic conventions. It represents the cloud platform in use. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: The prefix of the service SHOULD match the one specified in + // `cloud.provider`. + CloudPlatformKey = attribute.Key("cloud.platform") + + // CloudProviderKey is the attribute Key conforming to the "cloud.provider" + // semantic conventions. It represents the name of the cloud provider. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + CloudProviderKey = attribute.Key("cloud.provider") + + // CloudRegionKey is the attribute Key conforming to the "cloud.region" semantic + // conventions. It represents the geographical region within a cloud provider. + // When associated with a resource, this attribute specifies the region where + // the resource operates. When calling services or APIs deployed on a cloud, + // this attribute identifies the region where the called destination is + // deployed. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "us-central1", "us-east-1" + // Note: Refer to your provider's docs to see the available regions, for example + // [Alibaba Cloud regions], [AWS regions], [Azure regions], + // [Google Cloud regions], or [Tencent Cloud regions]. + // + // [Alibaba Cloud regions]: https://www.alibabacloud.com/help/doc-detail/40654.htm + // [AWS regions]: https://aws.amazon.com/about-aws/global-infrastructure/regions_az/ + // [Azure regions]: https://azure.microsoft.com/global-infrastructure/geographies/ + // [Google Cloud regions]: https://cloud.google.com/about/locations + // [Tencent Cloud regions]: https://www.tencentcloud.com/document/product/213/6091 + CloudRegionKey = attribute.Key("cloud.region") + + // CloudResourceIDKey is the attribute Key conforming to the "cloud.resource_id" + // semantic conventions. It represents the cloud provider-specific native + // identifier of the monitored cloud resource (e.g. an [ARN] on AWS, a + // [fully qualified resource ID] on Azure, a [full resource name] on GCP). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "arn:aws:lambda:REGION:ACCOUNT_ID:function:my-function", + // "//run.googleapis.com/projects/PROJECT_ID/locations/LOCATION_ID/services/SERVICE_ID", + // "/subscriptions//resourceGroups/ + // /providers/Microsoft.Web/sites//functions/" + // Note: On some cloud providers, it may not be possible to determine the full + // ID at startup, + // so it may be necessary to set `cloud.resource_id` as a span attribute + // instead. + // + // The exact value to use for `cloud.resource_id` depends on the cloud provider. + // The following well-known definitions MUST be used if you set this attribute + // and they apply: + // + // - **AWS Lambda:** The function [ARN]. + // Take care not to use the "invoked ARN" directly but replace any + // [alias suffix] + // with the resolved function version, as the same runtime instance may be + // invocable with + // multiple different aliases. + // - **GCP:** The [URI of the resource] + // - **Azure:** The [Fully Qualified Resource ID] of the invoked function, + // *not* the function app, having the form + // + // `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/` + // . + // This means that a span attribute MUST be used, as an Azure function app + // can host multiple functions that would usually share + // a TracerProvider. + // + // + // [ARN]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html + // [fully qualified resource ID]: https://learn.microsoft.com/rest/api/resources/resources/get-by-id + // [full resource name]: https://google.aip.dev/122#full-resource-names + // [ARN]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html + // [alias suffix]: https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html + // [URI of the resource]: https://cloud.google.com/iam/docs/full-resource-names + // [Fully Qualified Resource ID]: https://learn.microsoft.com/rest/api/resources/resources/get-by-id + CloudResourceIDKey = attribute.Key("cloud.resource_id") +) + +// CloudAccountID returns an attribute KeyValue conforming to the +// "cloud.account.id" semantic conventions. It represents the cloud account ID +// the resource is assigned to. +func CloudAccountID(val string) attribute.KeyValue { + return CloudAccountIDKey.String(val) +} + +// CloudAvailabilityZone returns an attribute KeyValue conforming to the +// "cloud.availability_zone" semantic conventions. It represents the cloud +// regions often have multiple, isolated locations known as zones to increase +// availability. Availability zone represents the zone where the resource is +// running. +func CloudAvailabilityZone(val string) attribute.KeyValue { + return CloudAvailabilityZoneKey.String(val) +} + +// CloudRegion returns an attribute KeyValue conforming to the "cloud.region" +// semantic conventions. It represents the geographical region within a cloud +// provider. When associated with a resource, this attribute specifies the region +// where the resource operates. When calling services or APIs deployed on a +// cloud, this attribute identifies the region where the called destination is +// deployed. +func CloudRegion(val string) attribute.KeyValue { + return CloudRegionKey.String(val) +} + +// CloudResourceID returns an attribute KeyValue conforming to the +// "cloud.resource_id" semantic conventions. It represents the cloud +// provider-specific native identifier of the monitored cloud resource (e.g. an +// [ARN] on AWS, a [fully qualified resource ID] on Azure, a [full resource name] +// on GCP). +// +// [ARN]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html +// [fully qualified resource ID]: https://learn.microsoft.com/rest/api/resources/resources/get-by-id +// [full resource name]: https://google.aip.dev/122#full-resource-names +func CloudResourceID(val string) attribute.KeyValue { + return CloudResourceIDKey.String(val) +} + +// Enum values for cloud.platform +var ( + // Alibaba Cloud Elastic Compute Service + // Stability: development + CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs") + // Alibaba Cloud Function Compute + // Stability: development + CloudPlatformAlibabaCloudFC = CloudPlatformKey.String("alibaba_cloud_fc") + // Red Hat OpenShift on Alibaba Cloud + // Stability: development + CloudPlatformAlibabaCloudOpenShift = CloudPlatformKey.String("alibaba_cloud_openshift") + // AWS Elastic Compute Cloud + // Stability: development + CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2") + // AWS Elastic Container Service + // Stability: development + CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs") + // AWS Elastic Kubernetes Service + // Stability: development + CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks") + // AWS Lambda + // Stability: development + CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda") + // AWS Elastic Beanstalk + // Stability: development + CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk") + // AWS App Runner + // Stability: development + CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner") + // Red Hat OpenShift on AWS (ROSA) + // Stability: development + CloudPlatformAWSOpenShift = CloudPlatformKey.String("aws_openshift") + // Azure Virtual Machines + // Stability: development + CloudPlatformAzureVM = CloudPlatformKey.String("azure.vm") + // Azure Container Apps + // Stability: development + CloudPlatformAzureContainerApps = CloudPlatformKey.String("azure.container_apps") + // Azure Container Instances + // Stability: development + CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure.container_instances") + // Azure Kubernetes Service + // Stability: development + CloudPlatformAzureAKS = CloudPlatformKey.String("azure.aks") + // Azure Functions + // Stability: development + CloudPlatformAzureFunctions = CloudPlatformKey.String("azure.functions") + // Azure App Service + // Stability: development + CloudPlatformAzureAppService = CloudPlatformKey.String("azure.app_service") + // Azure Red Hat OpenShift + // Stability: development + CloudPlatformAzureOpenShift = CloudPlatformKey.String("azure.openshift") + // Google Bare Metal Solution (BMS) + // Stability: development + CloudPlatformGCPBareMetalSolution = CloudPlatformKey.String("gcp_bare_metal_solution") + // Google Cloud Compute Engine (GCE) + // Stability: development + CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine") + // Google Cloud Run + // Stability: development + CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run") + // Google Cloud Kubernetes Engine (GKE) + // Stability: development + CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine") + // Google Cloud Functions (GCF) + // Stability: development + CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions") + // Google Cloud App Engine (GAE) + // Stability: development + CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine") + // Red Hat OpenShift on Google Cloud + // Stability: development + CloudPlatformGCPOpenShift = CloudPlatformKey.String("gcp_openshift") + // Red Hat OpenShift on IBM Cloud + // Stability: development + CloudPlatformIBMCloudOpenShift = CloudPlatformKey.String("ibm_cloud_openshift") + // Compute on Oracle Cloud Infrastructure (OCI) + // Stability: development + CloudPlatformOracleCloudCompute = CloudPlatformKey.String("oracle_cloud_compute") + // Kubernetes Engine (OKE) on Oracle Cloud Infrastructure (OCI) + // Stability: development + CloudPlatformOracleCloudOKE = CloudPlatformKey.String("oracle_cloud_oke") + // Tencent Cloud Cloud Virtual Machine (CVM) + // Stability: development + CloudPlatformTencentCloudCVM = CloudPlatformKey.String("tencent_cloud_cvm") + // Tencent Cloud Elastic Kubernetes Service (EKS) + // Stability: development + CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks") + // Tencent Cloud Serverless Cloud Function (SCF) + // Stability: development + CloudPlatformTencentCloudSCF = CloudPlatformKey.String("tencent_cloud_scf") +) + +// Enum values for cloud.provider +var ( + // Alibaba Cloud + // Stability: development + CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud") + // Amazon Web Services + // Stability: development + CloudProviderAWS = CloudProviderKey.String("aws") + // Microsoft Azure + // Stability: development + CloudProviderAzure = CloudProviderKey.String("azure") + // Google Cloud Platform + // Stability: development + CloudProviderGCP = CloudProviderKey.String("gcp") + // Heroku Platform as a Service + // Stability: development + CloudProviderHeroku = CloudProviderKey.String("heroku") + // IBM Cloud + // Stability: development + CloudProviderIBMCloud = CloudProviderKey.String("ibm_cloud") + // Oracle Cloud Infrastructure (OCI) + // Stability: development + CloudProviderOracleCloud = CloudProviderKey.String("oracle_cloud") + // Tencent Cloud + // Stability: development + CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud") +) + +// Namespace: cloudevents +const ( + // CloudEventsEventIDKey is the attribute Key conforming to the + // "cloudevents.event_id" semantic conventions. It represents the [event_id] + // uniquely identifies the event. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "123e4567-e89b-12d3-a456-426614174000", "0001" + // + // [event_id]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id + CloudEventsEventIDKey = attribute.Key("cloudevents.event_id") + + // CloudEventsEventSourceKey is the attribute Key conforming to the + // "cloudevents.event_source" semantic conventions. It represents the [source] + // identifies the context in which an event happened. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "https://github.com/cloudevents", "/cloudevents/spec/pull/123", + // "my-service" + // + // [source]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1 + CloudEventsEventSourceKey = attribute.Key("cloudevents.event_source") + + // CloudEventsEventSpecVersionKey is the attribute Key conforming to the + // "cloudevents.event_spec_version" semantic conventions. It represents the + // [version of the CloudEvents specification] which the event uses. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1.0 + // + // [version of the CloudEvents specification]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion + CloudEventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version") + + // CloudEventsEventSubjectKey is the attribute Key conforming to the + // "cloudevents.event_subject" semantic conventions. It represents the [subject] + // of the event in the context of the event producer (identified by source). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: mynewfile.jpg + // + // [subject]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject + CloudEventsEventSubjectKey = attribute.Key("cloudevents.event_subject") + + // CloudEventsEventTypeKey is the attribute Key conforming to the + // "cloudevents.event_type" semantic conventions. It represents the [event_type] + // contains a value describing the type of event related to the originating + // occurrence. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "com.github.pull_request.opened", "com.example.object.deleted.v2" + // + // [event_type]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type + CloudEventsEventTypeKey = attribute.Key("cloudevents.event_type") +) + +// CloudEventsEventID returns an attribute KeyValue conforming to the +// "cloudevents.event_id" semantic conventions. It represents the [event_id] +// uniquely identifies the event. +// +// [event_id]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id +func CloudEventsEventID(val string) attribute.KeyValue { + return CloudEventsEventIDKey.String(val) +} + +// CloudEventsEventSource returns an attribute KeyValue conforming to the +// "cloudevents.event_source" semantic conventions. It represents the [source] +// identifies the context in which an event happened. +// +// [source]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1 +func CloudEventsEventSource(val string) attribute.KeyValue { + return CloudEventsEventSourceKey.String(val) +} + +// CloudEventsEventSpecVersion returns an attribute KeyValue conforming to the +// "cloudevents.event_spec_version" semantic conventions. It represents the +// [version of the CloudEvents specification] which the event uses. +// +// [version of the CloudEvents specification]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion +func CloudEventsEventSpecVersion(val string) attribute.KeyValue { + return CloudEventsEventSpecVersionKey.String(val) +} + +// CloudEventsEventSubject returns an attribute KeyValue conforming to the +// "cloudevents.event_subject" semantic conventions. It represents the [subject] +// of the event in the context of the event producer (identified by source). +// +// [subject]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject +func CloudEventsEventSubject(val string) attribute.KeyValue { + return CloudEventsEventSubjectKey.String(val) +} + +// CloudEventsEventType returns an attribute KeyValue conforming to the +// "cloudevents.event_type" semantic conventions. It represents the [event_type] +// contains a value describing the type of event related to the originating +// occurrence. +// +// [event_type]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type +func CloudEventsEventType(val string) attribute.KeyValue { + return CloudEventsEventTypeKey.String(val) +} + +// Namespace: cloudfoundry +const ( + // CloudFoundryAppIDKey is the attribute Key conforming to the + // "cloudfoundry.app.id" semantic conventions. It represents the guid of the + // application. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d" + // Note: Application instrumentation should use the value from environment + // variable `VCAP_APPLICATION.application_id`. This is the same value as + // reported by `cf app --guid`. + CloudFoundryAppIDKey = attribute.Key("cloudfoundry.app.id") + + // CloudFoundryAppInstanceIDKey is the attribute Key conforming to the + // "cloudfoundry.app.instance.id" semantic conventions. It represents the index + // of the application instance. 0 when just one instance is active. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "0", "1" + // Note: CloudFoundry defines the `instance_id` in the [Loggregator v2 envelope] + // . + // It is used for logs and metrics emitted by CloudFoundry. It is + // supposed to contain the application instance index for applications + // deployed on the runtime. + // + // Application instrumentation should use the value from environment + // variable `CF_INSTANCE_INDEX`. + // + // [Loggregator v2 envelope]: https://github.com/cloudfoundry/loggregator-api#v2-envelope + CloudFoundryAppInstanceIDKey = attribute.Key("cloudfoundry.app.instance.id") + + // CloudFoundryAppNameKey is the attribute Key conforming to the + // "cloudfoundry.app.name" semantic conventions. It represents the name of the + // application. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-app-name" + // Note: Application instrumentation should use the value from environment + // variable `VCAP_APPLICATION.application_name`. This is the same value + // as reported by `cf apps`. + CloudFoundryAppNameKey = attribute.Key("cloudfoundry.app.name") + + // CloudFoundryOrgIDKey is the attribute Key conforming to the + // "cloudfoundry.org.id" semantic conventions. It represents the guid of the + // CloudFoundry org the application is running in. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d" + // Note: Application instrumentation should use the value from environment + // variable `VCAP_APPLICATION.org_id`. This is the same value as + // reported by `cf org --guid`. + CloudFoundryOrgIDKey = attribute.Key("cloudfoundry.org.id") + + // CloudFoundryOrgNameKey is the attribute Key conforming to the + // "cloudfoundry.org.name" semantic conventions. It represents the name of the + // CloudFoundry organization the app is running in. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-org-name" + // Note: Application instrumentation should use the value from environment + // variable `VCAP_APPLICATION.org_name`. This is the same value as + // reported by `cf orgs`. + CloudFoundryOrgNameKey = attribute.Key("cloudfoundry.org.name") + + // CloudFoundryProcessIDKey is the attribute Key conforming to the + // "cloudfoundry.process.id" semantic conventions. It represents the UID + // identifying the process. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d" + // Note: Application instrumentation should use the value from environment + // variable `VCAP_APPLICATION.process_id`. It is supposed to be equal to + // `VCAP_APPLICATION.app_id` for applications deployed to the runtime. + // For system components, this could be the actual PID. + CloudFoundryProcessIDKey = attribute.Key("cloudfoundry.process.id") + + // CloudFoundryProcessTypeKey is the attribute Key conforming to the + // "cloudfoundry.process.type" semantic conventions. It represents the type of + // process. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "web" + // Note: CloudFoundry applications can consist of multiple jobs. Usually the + // main process will be of type `web`. There can be additional background + // tasks or side-cars with different process types. + CloudFoundryProcessTypeKey = attribute.Key("cloudfoundry.process.type") + + // CloudFoundrySpaceIDKey is the attribute Key conforming to the + // "cloudfoundry.space.id" semantic conventions. It represents the guid of the + // CloudFoundry space the application is running in. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d" + // Note: Application instrumentation should use the value from environment + // variable `VCAP_APPLICATION.space_id`. This is the same value as + // reported by `cf space --guid`. + CloudFoundrySpaceIDKey = attribute.Key("cloudfoundry.space.id") + + // CloudFoundrySpaceNameKey is the attribute Key conforming to the + // "cloudfoundry.space.name" semantic conventions. It represents the name of the + // CloudFoundry space the application is running in. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-space-name" + // Note: Application instrumentation should use the value from environment + // variable `VCAP_APPLICATION.space_name`. This is the same value as + // reported by `cf spaces`. + CloudFoundrySpaceNameKey = attribute.Key("cloudfoundry.space.name") + + // CloudFoundrySystemIDKey is the attribute Key conforming to the + // "cloudfoundry.system.id" semantic conventions. It represents a guid or + // another name describing the event source. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "cf/gorouter" + // Note: CloudFoundry defines the `source_id` in the [Loggregator v2 envelope]. + // It is used for logs and metrics emitted by CloudFoundry. It is + // supposed to contain the component name, e.g. "gorouter", for + // CloudFoundry components. + // + // When system components are instrumented, values from the + // [Bosh spec] + // should be used. The `system.id` should be set to + // `spec.deployment/spec.name`. + // + // [Loggregator v2 envelope]: https://github.com/cloudfoundry/loggregator-api#v2-envelope + // [Bosh spec]: https://bosh.io/docs/jobs/#properties-spec + CloudFoundrySystemIDKey = attribute.Key("cloudfoundry.system.id") + + // CloudFoundrySystemInstanceIDKey is the attribute Key conforming to the + // "cloudfoundry.system.instance.id" semantic conventions. It represents a guid + // describing the concrete instance of the event source. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d" + // Note: CloudFoundry defines the `instance_id` in the [Loggregator v2 envelope] + // . + // It is used for logs and metrics emitted by CloudFoundry. It is + // supposed to contain the vm id for CloudFoundry components. + // + // When system components are instrumented, values from the + // [Bosh spec] + // should be used. The `system.instance.id` should be set to `spec.id`. + // + // [Loggregator v2 envelope]: https://github.com/cloudfoundry/loggregator-api#v2-envelope + // [Bosh spec]: https://bosh.io/docs/jobs/#properties-spec + CloudFoundrySystemInstanceIDKey = attribute.Key("cloudfoundry.system.instance.id") +) + +// CloudFoundryAppID returns an attribute KeyValue conforming to the +// "cloudfoundry.app.id" semantic conventions. It represents the guid of the +// application. +func CloudFoundryAppID(val string) attribute.KeyValue { + return CloudFoundryAppIDKey.String(val) +} + +// CloudFoundryAppInstanceID returns an attribute KeyValue conforming to the +// "cloudfoundry.app.instance.id" semantic conventions. It represents the index +// of the application instance. 0 when just one instance is active. +func CloudFoundryAppInstanceID(val string) attribute.KeyValue { + return CloudFoundryAppInstanceIDKey.String(val) +} + +// CloudFoundryAppName returns an attribute KeyValue conforming to the +// "cloudfoundry.app.name" semantic conventions. It represents the name of the +// application. +func CloudFoundryAppName(val string) attribute.KeyValue { + return CloudFoundryAppNameKey.String(val) +} + +// CloudFoundryOrgID returns an attribute KeyValue conforming to the +// "cloudfoundry.org.id" semantic conventions. It represents the guid of the +// CloudFoundry org the application is running in. +func CloudFoundryOrgID(val string) attribute.KeyValue { + return CloudFoundryOrgIDKey.String(val) +} + +// CloudFoundryOrgName returns an attribute KeyValue conforming to the +// "cloudfoundry.org.name" semantic conventions. It represents the name of the +// CloudFoundry organization the app is running in. +func CloudFoundryOrgName(val string) attribute.KeyValue { + return CloudFoundryOrgNameKey.String(val) +} + +// CloudFoundryProcessID returns an attribute KeyValue conforming to the +// "cloudfoundry.process.id" semantic conventions. It represents the UID +// identifying the process. +func CloudFoundryProcessID(val string) attribute.KeyValue { + return CloudFoundryProcessIDKey.String(val) +} + +// CloudFoundryProcessType returns an attribute KeyValue conforming to the +// "cloudfoundry.process.type" semantic conventions. It represents the type of +// process. +func CloudFoundryProcessType(val string) attribute.KeyValue { + return CloudFoundryProcessTypeKey.String(val) +} + +// CloudFoundrySpaceID returns an attribute KeyValue conforming to the +// "cloudfoundry.space.id" semantic conventions. It represents the guid of the +// CloudFoundry space the application is running in. +func CloudFoundrySpaceID(val string) attribute.KeyValue { + return CloudFoundrySpaceIDKey.String(val) +} + +// CloudFoundrySpaceName returns an attribute KeyValue conforming to the +// "cloudfoundry.space.name" semantic conventions. It represents the name of the +// CloudFoundry space the application is running in. +func CloudFoundrySpaceName(val string) attribute.KeyValue { + return CloudFoundrySpaceNameKey.String(val) +} + +// CloudFoundrySystemID returns an attribute KeyValue conforming to the +// "cloudfoundry.system.id" semantic conventions. It represents a guid or another +// name describing the event source. +func CloudFoundrySystemID(val string) attribute.KeyValue { + return CloudFoundrySystemIDKey.String(val) +} + +// CloudFoundrySystemInstanceID returns an attribute KeyValue conforming to the +// "cloudfoundry.system.instance.id" semantic conventions. It represents a guid +// describing the concrete instance of the event source. +func CloudFoundrySystemInstanceID(val string) attribute.KeyValue { + return CloudFoundrySystemInstanceIDKey.String(val) +} + +// Namespace: code +const ( + // CodeColumnNumberKey is the attribute Key conforming to the + // "code.column.number" semantic conventions. It represents the column number in + // `code.file.path` best representing the operation. It SHOULD point within the + // code unit named in `code.function.name`. This attribute MUST NOT be used on + // the Profile signal since the data is already captured in 'message Line'. This + // constraint is imposed to prevent redundancy and maintain data integrity. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Stable + CodeColumnNumberKey = attribute.Key("code.column.number") + + // CodeFilePathKey is the attribute Key conforming to the "code.file.path" + // semantic conventions. It represents the source code file name that identifies + // the code unit as uniquely as possible (preferably an absolute file path). + // This attribute MUST NOT be used on the Profile signal since the data is + // already captured in 'message Function'. This constraint is imposed to prevent + // redundancy and maintain data integrity. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: /usr/local/MyApplication/content_root/app/index.php + CodeFilePathKey = attribute.Key("code.file.path") + + // CodeFunctionNameKey is the attribute Key conforming to the + // "code.function.name" semantic conventions. It represents the method or + // function fully-qualified name without arguments. The value should fit the + // natural representation of the language runtime, which is also likely the same + // used within `code.stacktrace` attribute value. This attribute MUST NOT be + // used on the Profile signal since the data is already captured in 'message + // Function'. This constraint is imposed to prevent redundancy and maintain data + // integrity. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "com.example.MyHttpService.serveRequest", + // "GuzzleHttp\Client::transfer", "fopen" + // Note: Values and format depends on each language runtime, thus it is + // impossible to provide an exhaustive list of examples. + // The values are usually the same (or prefixes of) the ones found in native + // stack trace representation stored in + // `code.stacktrace` without information on arguments. + // + // Examples: + // + // - Java method: `com.example.MyHttpService.serveRequest` + // - Java anonymous class method: `com.mycompany.Main$1.myMethod` + // - Java lambda method: + // `com.mycompany.Main$$Lambda/0x0000748ae4149c00.myMethod` + // - PHP function: `GuzzleHttp\Client::transfer` + // - Go function: `github.com/my/repo/pkg.foo.func5` + // - Elixir: `OpenTelemetry.Ctx.new` + // - Erlang: `opentelemetry_ctx:new` + // - Rust: `playground::my_module::my_cool_func` + // - C function: `fopen` + CodeFunctionNameKey = attribute.Key("code.function.name") + + // CodeLineNumberKey is the attribute Key conforming to the "code.line.number" + // semantic conventions. It represents the line number in `code.file.path` best + // representing the operation. It SHOULD point within the code unit named in + // `code.function.name`. This attribute MUST NOT be used on the Profile signal + // since the data is already captured in 'message Line'. This constraint is + // imposed to prevent redundancy and maintain data integrity. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Stable + CodeLineNumberKey = attribute.Key("code.line.number") + + // CodeStacktraceKey is the attribute Key conforming to the "code.stacktrace" + // semantic conventions. It represents a stacktrace as a string in the natural + // representation for the language runtime. The representation is identical to + // [`exception.stacktrace`]. This attribute MUST NOT be used on the Profile + // signal since the data is already captured in 'message Location'. This + // constraint is imposed to prevent redundancy and maintain data integrity. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: at com.example.GenerateTrace.methodB(GenerateTrace.java:13)\n at + // com.example.GenerateTrace.methodA(GenerateTrace.java:9)\n at + // com.example.GenerateTrace.main(GenerateTrace.java:5) + // + // [`exception.stacktrace`]: /docs/exceptions/exceptions-spans.md#stacktrace-representation + CodeStacktraceKey = attribute.Key("code.stacktrace") +) + +// CodeColumnNumber returns an attribute KeyValue conforming to the +// "code.column.number" semantic conventions. It represents the column number in +// `code.file.path` best representing the operation. It SHOULD point within the +// code unit named in `code.function.name`. This attribute MUST NOT be used on +// the Profile signal since the data is already captured in 'message Line'. This +// constraint is imposed to prevent redundancy and maintain data integrity. +func CodeColumnNumber(val int) attribute.KeyValue { + return CodeColumnNumberKey.Int(val) +} + +// CodeFilePath returns an attribute KeyValue conforming to the "code.file.path" +// semantic conventions. It represents the source code file name that identifies +// the code unit as uniquely as possible (preferably an absolute file path). This +// attribute MUST NOT be used on the Profile signal since the data is already +// captured in 'message Function'. This constraint is imposed to prevent +// redundancy and maintain data integrity. +func CodeFilePath(val string) attribute.KeyValue { + return CodeFilePathKey.String(val) +} + +// CodeFunctionName returns an attribute KeyValue conforming to the +// "code.function.name" semantic conventions. It represents the method or +// function fully-qualified name without arguments. The value should fit the +// natural representation of the language runtime, which is also likely the same +// used within `code.stacktrace` attribute value. This attribute MUST NOT be used +// on the Profile signal since the data is already captured in 'message +// Function'. This constraint is imposed to prevent redundancy and maintain data +// integrity. +func CodeFunctionName(val string) attribute.KeyValue { + return CodeFunctionNameKey.String(val) +} + +// CodeLineNumber returns an attribute KeyValue conforming to the +// "code.line.number" semantic conventions. It represents the line number in +// `code.file.path` best representing the operation. It SHOULD point within the +// code unit named in `code.function.name`. This attribute MUST NOT be used on +// the Profile signal since the data is already captured in 'message Line'. This +// constraint is imposed to prevent redundancy and maintain data integrity. +func CodeLineNumber(val int) attribute.KeyValue { + return CodeLineNumberKey.Int(val) +} + +// CodeStacktrace returns an attribute KeyValue conforming to the +// "code.stacktrace" semantic conventions. It represents a stacktrace as a string +// in the natural representation for the language runtime. The representation is +// identical to [`exception.stacktrace`]. This attribute MUST NOT be used on the +// Profile signal since the data is already captured in 'message Location'. This +// constraint is imposed to prevent redundancy and maintain data integrity. +// +// [`exception.stacktrace`]: /docs/exceptions/exceptions-spans.md#stacktrace-representation +func CodeStacktrace(val string) attribute.KeyValue { + return CodeStacktraceKey.String(val) +} + +// Namespace: container +const ( + // ContainerCommandKey is the attribute Key conforming to the + // "container.command" semantic conventions. It represents the command used to + // run the container (i.e. the command name). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "otelcontribcol" + // Note: If using embedded credentials or sensitive data, it is recommended to + // remove them to prevent potential leakage. + ContainerCommandKey = attribute.Key("container.command") + + // ContainerCommandArgsKey is the attribute Key conforming to the + // "container.command_args" semantic conventions. It represents the all the + // command arguments (including the command/executable itself) run by the + // container. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "otelcontribcol", "--config", "config.yaml" + ContainerCommandArgsKey = attribute.Key("container.command_args") + + // ContainerCommandLineKey is the attribute Key conforming to the + // "container.command_line" semantic conventions. It represents the full command + // run by the container as a single string representing the full command. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "otelcontribcol --config config.yaml" + ContainerCommandLineKey = attribute.Key("container.command_line") + + // ContainerCSIPluginNameKey is the attribute Key conforming to the + // "container.csi.plugin.name" semantic conventions. It represents the name of + // the CSI ([Container Storage Interface]) plugin used by the volume. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "pd.csi.storage.gke.io" + // Note: This can sometimes be referred to as a "driver" in CSI implementations. + // This should represent the `name` field of the GetPluginInfo RPC. + // + // [Container Storage Interface]: https://github.com/container-storage-interface/spec + ContainerCSIPluginNameKey = attribute.Key("container.csi.plugin.name") + + // ContainerCSIVolumeIDKey is the attribute Key conforming to the + // "container.csi.volume.id" semantic conventions. It represents the unique + // volume ID returned by the CSI ([Container Storage Interface]) plugin. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "projects/my-gcp-project/zones/my-gcp-zone/disks/my-gcp-disk" + // Note: This can sometimes be referred to as a "volume handle" in CSI + // implementations. This should represent the `Volume.volume_id` field in CSI + // spec. + // + // [Container Storage Interface]: https://github.com/container-storage-interface/spec + ContainerCSIVolumeIDKey = attribute.Key("container.csi.volume.id") + + // ContainerIDKey is the attribute Key conforming to the "container.id" semantic + // conventions. It represents the container ID. Usually a UUID, as for example + // used to [identify Docker containers]. The UUID might be abbreviated. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "a3bf90e006b2" + // + // [identify Docker containers]: https://docs.docker.com/engine/containers/run/#container-identification + ContainerIDKey = attribute.Key("container.id") + + // ContainerImageIDKey is the attribute Key conforming to the + // "container.image.id" semantic conventions. It represents the runtime specific + // image identifier. Usually a hash algorithm followed by a UUID. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "sha256:19c92d0a00d1b66d897bceaa7319bee0dd38a10a851c60bcec9474aa3f01e50f" + // Note: Docker defines a sha256 of the image id; `container.image.id` + // corresponds to the `Image` field from the Docker container inspect [API] + // endpoint. + // K8s defines a link to the container registry repository with digest + // `"imageID": "registry.azurecr.io /namespace/service/dockerfile@sha256:bdeabd40c3a8a492eaf9e8e44d0ebbb84bac7ee25ac0cf8a7159d25f62555625"` + // . + // The ID is assigned by the container runtime and can vary in different + // environments. Consider using `oci.manifest.digest` if it is important to + // identify the same image in different environments/runtimes. + // + // [API]: https://docs.docker.com/engine/api/v1.43/#tag/Container/operation/ContainerInspect + ContainerImageIDKey = attribute.Key("container.image.id") + + // ContainerImageNameKey is the attribute Key conforming to the + // "container.image.name" semantic conventions. It represents the name of the + // image the container was built on. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "gcr.io/opentelemetry/operator" + ContainerImageNameKey = attribute.Key("container.image.name") + + // ContainerImageRepoDigestsKey is the attribute Key conforming to the + // "container.image.repo_digests" semantic conventions. It represents the repo + // digests of the container image as provided by the container runtime. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb", + // "internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578" + // Note: [Docker] and [CRI] report those under the `RepoDigests` field. + // + // [Docker]: https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect + // [CRI]: https://github.com/kubernetes/cri-api/blob/c75ef5b473bbe2d0a4fc92f82235efd665ea8e9f/pkg/apis/runtime/v1/api.proto#L1237-L1238 + ContainerImageRepoDigestsKey = attribute.Key("container.image.repo_digests") + + // ContainerImageTagsKey is the attribute Key conforming to the + // "container.image.tags" semantic conventions. It represents the container + // image tags. An example can be found in [Docker Image Inspect]. Should be only + // the `` section of the full name for example from + // `registry.example.com/my-org/my-image:`. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "v1.27.1", "3.5.7-0" + // + // [Docker Image Inspect]: https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect + ContainerImageTagsKey = attribute.Key("container.image.tags") + + // ContainerNameKey is the attribute Key conforming to the "container.name" + // semantic conventions. It represents the container name used by container + // runtime. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry-autoconf" + ContainerNameKey = attribute.Key("container.name") + + // ContainerRuntimeDescriptionKey is the attribute Key conforming to the + // "container.runtime.description" semantic conventions. It represents a + // description about the runtime which could include, for example details about + // the CRI/API version being used or other customisations. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "docker://19.3.1 - CRI: 1.22.0" + ContainerRuntimeDescriptionKey = attribute.Key("container.runtime.description") + + // ContainerRuntimeNameKey is the attribute Key conforming to the + // "container.runtime.name" semantic conventions. It represents the container + // runtime managing this container. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "docker", "containerd", "rkt" + ContainerRuntimeNameKey = attribute.Key("container.runtime.name") + + // ContainerRuntimeVersionKey is the attribute Key conforming to the + // "container.runtime.version" semantic conventions. It represents the version + // of the runtime of this process, as returned by the runtime without + // modification. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1.0.0 + ContainerRuntimeVersionKey = attribute.Key("container.runtime.version") +) + +// ContainerCommand returns an attribute KeyValue conforming to the +// "container.command" semantic conventions. It represents the command used to +// run the container (i.e. the command name). +func ContainerCommand(val string) attribute.KeyValue { + return ContainerCommandKey.String(val) +} + +// ContainerCommandArgs returns an attribute KeyValue conforming to the +// "container.command_args" semantic conventions. It represents the all the +// command arguments (including the command/executable itself) run by the +// container. +func ContainerCommandArgs(val ...string) attribute.KeyValue { + return ContainerCommandArgsKey.StringSlice(val) +} + +// ContainerCommandLine returns an attribute KeyValue conforming to the +// "container.command_line" semantic conventions. It represents the full command +// run by the container as a single string representing the full command. +func ContainerCommandLine(val string) attribute.KeyValue { + return ContainerCommandLineKey.String(val) +} + +// ContainerCSIPluginName returns an attribute KeyValue conforming to the +// "container.csi.plugin.name" semantic conventions. It represents the name of +// the CSI ([Container Storage Interface]) plugin used by the volume. +// +// [Container Storage Interface]: https://github.com/container-storage-interface/spec +func ContainerCSIPluginName(val string) attribute.KeyValue { + return ContainerCSIPluginNameKey.String(val) +} + +// ContainerCSIVolumeID returns an attribute KeyValue conforming to the +// "container.csi.volume.id" semantic conventions. It represents the unique +// volume ID returned by the CSI ([Container Storage Interface]) plugin. +// +// [Container Storage Interface]: https://github.com/container-storage-interface/spec +func ContainerCSIVolumeID(val string) attribute.KeyValue { + return ContainerCSIVolumeIDKey.String(val) +} + +// ContainerID returns an attribute KeyValue conforming to the "container.id" +// semantic conventions. It represents the container ID. Usually a UUID, as for +// example used to [identify Docker containers]. The UUID might be abbreviated. +// +// [identify Docker containers]: https://docs.docker.com/engine/containers/run/#container-identification +func ContainerID(val string) attribute.KeyValue { + return ContainerIDKey.String(val) +} + +// ContainerImageID returns an attribute KeyValue conforming to the +// "container.image.id" semantic conventions. It represents the runtime specific +// image identifier. Usually a hash algorithm followed by a UUID. +func ContainerImageID(val string) attribute.KeyValue { + return ContainerImageIDKey.String(val) +} + +// ContainerImageName returns an attribute KeyValue conforming to the +// "container.image.name" semantic conventions. It represents the name of the +// image the container was built on. +func ContainerImageName(val string) attribute.KeyValue { + return ContainerImageNameKey.String(val) +} + +// ContainerImageRepoDigests returns an attribute KeyValue conforming to the +// "container.image.repo_digests" semantic conventions. It represents the repo +// digests of the container image as provided by the container runtime. +func ContainerImageRepoDigests(val ...string) attribute.KeyValue { + return ContainerImageRepoDigestsKey.StringSlice(val) +} + +// ContainerImageTags returns an attribute KeyValue conforming to the +// "container.image.tags" semantic conventions. It represents the container image +// tags. An example can be found in [Docker Image Inspect]. Should be only the +// `` section of the full name for example from +// `registry.example.com/my-org/my-image:`. +// +// [Docker Image Inspect]: https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect +func ContainerImageTags(val ...string) attribute.KeyValue { + return ContainerImageTagsKey.StringSlice(val) +} + +// ContainerLabel returns an attribute KeyValue conforming to the +// "container.label" semantic conventions. It represents the container labels, +// `` being the label name, the value being the label value. +func ContainerLabel(key string, val string) attribute.KeyValue { + return attribute.String("container.label."+key, val) +} + +// ContainerName returns an attribute KeyValue conforming to the "container.name" +// semantic conventions. It represents the container name used by container +// runtime. +func ContainerName(val string) attribute.KeyValue { + return ContainerNameKey.String(val) +} + +// ContainerRuntimeDescription returns an attribute KeyValue conforming to the +// "container.runtime.description" semantic conventions. It represents a +// description about the runtime which could include, for example details about +// the CRI/API version being used or other customisations. +func ContainerRuntimeDescription(val string) attribute.KeyValue { + return ContainerRuntimeDescriptionKey.String(val) +} + +// ContainerRuntimeName returns an attribute KeyValue conforming to the +// "container.runtime.name" semantic conventions. It represents the container +// runtime managing this container. +func ContainerRuntimeName(val string) attribute.KeyValue { + return ContainerRuntimeNameKey.String(val) +} + +// ContainerRuntimeVersion returns an attribute KeyValue conforming to the +// "container.runtime.version" semantic conventions. It represents the version of +// the runtime of this process, as returned by the runtime without modification. +func ContainerRuntimeVersion(val string) attribute.KeyValue { + return ContainerRuntimeVersionKey.String(val) +} + +// Namespace: cpu +const ( + // CPULogicalNumberKey is the attribute Key conforming to the + // "cpu.logical_number" semantic conventions. It represents the logical CPU + // number [0..n-1]. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1 + CPULogicalNumberKey = attribute.Key("cpu.logical_number") + + // CPUModeKey is the attribute Key conforming to the "cpu.mode" semantic + // conventions. It represents the mode of the CPU. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "user", "system" + CPUModeKey = attribute.Key("cpu.mode") +) + +// CPULogicalNumber returns an attribute KeyValue conforming to the +// "cpu.logical_number" semantic conventions. It represents the logical CPU +// number [0..n-1]. +func CPULogicalNumber(val int) attribute.KeyValue { + return CPULogicalNumberKey.Int(val) +} + +// Enum values for cpu.mode +var ( + // User + // Stability: development + CPUModeUser = CPUModeKey.String("user") + // System + // Stability: development + CPUModeSystem = CPUModeKey.String("system") + // Nice + // Stability: development + CPUModeNice = CPUModeKey.String("nice") + // Idle + // Stability: development + CPUModeIdle = CPUModeKey.String("idle") + // IO Wait + // Stability: development + CPUModeIOWait = CPUModeKey.String("iowait") + // Interrupt + // Stability: development + CPUModeInterrupt = CPUModeKey.String("interrupt") + // Steal + // Stability: development + CPUModeSteal = CPUModeKey.String("steal") + // Kernel + // Stability: development + CPUModeKernel = CPUModeKey.String("kernel") +) + +// Namespace: db +const ( + // DBClientConnectionPoolNameKey is the attribute Key conforming to the + // "db.client.connection.pool.name" semantic conventions. It represents the name + // of the connection pool; unique within the instrumented application. In case + // the connection pool implementation doesn't provide a name, instrumentation + // SHOULD use a combination of parameters that would make the name unique, for + // example, combining attributes `server.address`, `server.port`, and + // `db.namespace`, formatted as `server.address:server.port/db.namespace`. + // Instrumentations that generate connection pool name following different + // patterns SHOULD document it. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "myDataSource" + DBClientConnectionPoolNameKey = attribute.Key("db.client.connection.pool.name") + + // DBClientConnectionStateKey is the attribute Key conforming to the + // "db.client.connection.state" semantic conventions. It represents the state of + // a connection in the pool. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "idle" + DBClientConnectionStateKey = attribute.Key("db.client.connection.state") + + // DBCollectionNameKey is the attribute Key conforming to the + // "db.collection.name" semantic conventions. It represents the name of a + // collection (table, container) within the database. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "public.users", "customers" + // Note: It is RECOMMENDED to capture the value as provided by the application + // without attempting to do any case normalization. + // + // The collection name SHOULD NOT be extracted from `db.query.text`, + // when the database system supports query text with multiple collections + // in non-batch operations. + // + // For batch operations, if the individual operations are known to have the same + // collection name then that collection name SHOULD be used. + DBCollectionNameKey = attribute.Key("db.collection.name") + + // DBNamespaceKey is the attribute Key conforming to the "db.namespace" semantic + // conventions. It represents the name of the database, fully qualified within + // the server address and port. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "customers", "test.users" + // Note: If a database system has multiple namespace components, they SHOULD be + // concatenated from the most general to the most specific namespace component, + // using `|` as a separator between the components. Any missing components (and + // their associated separators) SHOULD be omitted. + // Semantic conventions for individual database systems SHOULD document what + // `db.namespace` means in the context of that system. + // It is RECOMMENDED to capture the value as provided by the application without + // attempting to do any case normalization. + DBNamespaceKey = attribute.Key("db.namespace") + + // DBOperationBatchSizeKey is the attribute Key conforming to the + // "db.operation.batch.size" semantic conventions. It represents the number of + // queries included in a batch operation. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: 2, 3, 4 + // Note: Operations are only considered batches when they contain two or more + // operations, and so `db.operation.batch.size` SHOULD never be `1`. + DBOperationBatchSizeKey = attribute.Key("db.operation.batch.size") + + // DBOperationNameKey is the attribute Key conforming to the "db.operation.name" + // semantic conventions. It represents the name of the operation or command + // being executed. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "findAndModify", "HMSET", "SELECT" + // Note: It is RECOMMENDED to capture the value as provided by the application + // without attempting to do any case normalization. + // + // The operation name SHOULD NOT be extracted from `db.query.text`, + // when the database system supports query text with multiple operations + // in non-batch operations. + // + // If spaces can occur in the operation name, multiple consecutive spaces + // SHOULD be normalized to a single space. + // + // For batch operations, if the individual operations are known to have the same + // operation name + // then that operation name SHOULD be used prepended by `BATCH `, + // otherwise `db.operation.name` SHOULD be `BATCH` or some other database + // system specific term if more applicable. + DBOperationNameKey = attribute.Key("db.operation.name") + + // DBQuerySummaryKey is the attribute Key conforming to the "db.query.summary" + // semantic conventions. It represents the low cardinality summary of a database + // query. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "SELECT wuser_table", "INSERT shipping_details SELECT orders", "get + // user by id" + // Note: The query summary describes a class of database queries and is useful + // as a grouping key, especially when analyzing telemetry for database + // calls involving complex queries. + // + // Summary may be available to the instrumentation through + // instrumentation hooks or other means. If it is not available, + // instrumentations + // that support query parsing SHOULD generate a summary following + // [Generating query summary] + // section. + // + // [Generating query summary]: /docs/database/database-spans.md#generating-a-summary-of-the-query + DBQuerySummaryKey = attribute.Key("db.query.summary") + + // DBQueryTextKey is the attribute Key conforming to the "db.query.text" + // semantic conventions. It represents the database query being executed. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "SELECT * FROM wuser_table where username = ?", "SET mykey ?" + // Note: For sanitization see [Sanitization of `db.query.text`]. + // For batch operations, if the individual operations are known to have the same + // query text then that query text SHOULD be used, otherwise all of the + // individual query texts SHOULD be concatenated with separator `; ` or some + // other database system specific separator if more applicable. + // Parameterized query text SHOULD NOT be sanitized. Even though parameterized + // query text can potentially have sensitive data, by using a parameterized + // query the user is giving a strong signal that any sensitive data will be + // passed as parameter values, and the benefit to observability of capturing the + // static part of the query text by default outweighs the risk. + // + // [Sanitization of `db.query.text`]: /docs/database/database-spans.md#sanitization-of-dbquerytext + DBQueryTextKey = attribute.Key("db.query.text") + + // DBResponseReturnedRowsKey is the attribute Key conforming to the + // "db.response.returned_rows" semantic conventions. It represents the number of + // rows returned by the operation. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 10, 30, 1000 + DBResponseReturnedRowsKey = attribute.Key("db.response.returned_rows") + + // DBResponseStatusCodeKey is the attribute Key conforming to the + // "db.response.status_code" semantic conventions. It represents the database + // response status code. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "102", "ORA-17002", "08P01", "404" + // Note: The status code returned by the database. Usually it represents an + // error code, but may also represent partial success, warning, or differentiate + // between various types of successful outcomes. + // Semantic conventions for individual database systems SHOULD document what + // `db.response.status_code` means in the context of that system. + DBResponseStatusCodeKey = attribute.Key("db.response.status_code") + + // DBStoredProcedureNameKey is the attribute Key conforming to the + // "db.stored_procedure.name" semantic conventions. It represents the name of a + // stored procedure within the database. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "GetCustomer" + // Note: It is RECOMMENDED to capture the value as provided by the application + // without attempting to do any case normalization. + // + // For batch operations, if the individual operations are known to have the same + // stored procedure name then that stored procedure name SHOULD be used. + DBStoredProcedureNameKey = attribute.Key("db.stored_procedure.name") + + // DBSystemNameKey is the attribute Key conforming to the "db.system.name" + // semantic conventions. It represents the database management system (DBMS) + // product as identified by the client instrumentation. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: + // Note: The actual DBMS may differ from the one identified by the client. For + // example, when using PostgreSQL client libraries to connect to a CockroachDB, + // the `db.system.name` is set to `postgresql` based on the instrumentation's + // best knowledge. + DBSystemNameKey = attribute.Key("db.system.name") +) + +// DBClientConnectionPoolName returns an attribute KeyValue conforming to the +// "db.client.connection.pool.name" semantic conventions. It represents the name +// of the connection pool; unique within the instrumented application. In case +// the connection pool implementation doesn't provide a name, instrumentation +// SHOULD use a combination of parameters that would make the name unique, for +// example, combining attributes `server.address`, `server.port`, and +// `db.namespace`, formatted as `server.address:server.port/db.namespace`. +// Instrumentations that generate connection pool name following different +// patterns SHOULD document it. +func DBClientConnectionPoolName(val string) attribute.KeyValue { + return DBClientConnectionPoolNameKey.String(val) +} + +// DBCollectionName returns an attribute KeyValue conforming to the +// "db.collection.name" semantic conventions. It represents the name of a +// collection (table, container) within the database. +func DBCollectionName(val string) attribute.KeyValue { + return DBCollectionNameKey.String(val) +} + +// DBNamespace returns an attribute KeyValue conforming to the "db.namespace" +// semantic conventions. It represents the name of the database, fully qualified +// within the server address and port. +func DBNamespace(val string) attribute.KeyValue { + return DBNamespaceKey.String(val) +} + +// DBOperationBatchSize returns an attribute KeyValue conforming to the +// "db.operation.batch.size" semantic conventions. It represents the number of +// queries included in a batch operation. +func DBOperationBatchSize(val int) attribute.KeyValue { + return DBOperationBatchSizeKey.Int(val) +} + +// DBOperationName returns an attribute KeyValue conforming to the +// "db.operation.name" semantic conventions. It represents the name of the +// operation or command being executed. +func DBOperationName(val string) attribute.KeyValue { + return DBOperationNameKey.String(val) +} + +// DBOperationParameter returns an attribute KeyValue conforming to the +// "db.operation.parameter" semantic conventions. It represents a database +// operation parameter, with `` being the parameter name, and the attribute +// value being a string representation of the parameter value. +func DBOperationParameter(key string, val string) attribute.KeyValue { + return attribute.String("db.operation.parameter."+key, val) +} + +// DBQueryParameter returns an attribute KeyValue conforming to the +// "db.query.parameter" semantic conventions. It represents a database query +// parameter, with `` being the parameter name, and the attribute value +// being a string representation of the parameter value. +func DBQueryParameter(key string, val string) attribute.KeyValue { + return attribute.String("db.query.parameter."+key, val) +} + +// DBQuerySummary returns an attribute KeyValue conforming to the +// "db.query.summary" semantic conventions. It represents the low cardinality +// summary of a database query. +func DBQuerySummary(val string) attribute.KeyValue { + return DBQuerySummaryKey.String(val) +} + +// DBQueryText returns an attribute KeyValue conforming to the "db.query.text" +// semantic conventions. It represents the database query being executed. +func DBQueryText(val string) attribute.KeyValue { + return DBQueryTextKey.String(val) +} + +// DBResponseReturnedRows returns an attribute KeyValue conforming to the +// "db.response.returned_rows" semantic conventions. It represents the number of +// rows returned by the operation. +func DBResponseReturnedRows(val int) attribute.KeyValue { + return DBResponseReturnedRowsKey.Int(val) +} + +// DBResponseStatusCode returns an attribute KeyValue conforming to the +// "db.response.status_code" semantic conventions. It represents the database +// response status code. +func DBResponseStatusCode(val string) attribute.KeyValue { + return DBResponseStatusCodeKey.String(val) +} + +// DBStoredProcedureName returns an attribute KeyValue conforming to the +// "db.stored_procedure.name" semantic conventions. It represents the name of a +// stored procedure within the database. +func DBStoredProcedureName(val string) attribute.KeyValue { + return DBStoredProcedureNameKey.String(val) +} + +// Enum values for db.client.connection.state +var ( + // idle + // Stability: development + DBClientConnectionStateIdle = DBClientConnectionStateKey.String("idle") + // used + // Stability: development + DBClientConnectionStateUsed = DBClientConnectionStateKey.String("used") +) + +// Enum values for db.system.name +var ( + // Some other SQL database. Fallback only. + // Stability: development + DBSystemNameOtherSQL = DBSystemNameKey.String("other_sql") + // [Adabas (Adaptable Database System)] + // Stability: development + // + // [Adabas (Adaptable Database System)]: https://documentation.softwareag.com/?pf=adabas + DBSystemNameSoftwareagAdabas = DBSystemNameKey.String("softwareag.adabas") + // [Actian Ingres] + // Stability: development + // + // [Actian Ingres]: https://www.actian.com/databases/ingres/ + DBSystemNameActianIngres = DBSystemNameKey.String("actian.ingres") + // [Amazon DynamoDB] + // Stability: development + // + // [Amazon DynamoDB]: https://aws.amazon.com/pm/dynamodb/ + DBSystemNameAWSDynamoDB = DBSystemNameKey.String("aws.dynamodb") + // [Amazon Redshift] + // Stability: development + // + // [Amazon Redshift]: https://aws.amazon.com/redshift/ + DBSystemNameAWSRedshift = DBSystemNameKey.String("aws.redshift") + // [Azure Cosmos DB] + // Stability: development + // + // [Azure Cosmos DB]: https://learn.microsoft.com/azure/cosmos-db + DBSystemNameAzureCosmosDB = DBSystemNameKey.String("azure.cosmosdb") + // [InterSystems Caché] + // Stability: development + // + // [InterSystems Caché]: https://www.intersystems.com/products/cache/ + DBSystemNameIntersystemsCache = DBSystemNameKey.String("intersystems.cache") + // [Apache Cassandra] + // Stability: development + // + // [Apache Cassandra]: https://cassandra.apache.org/ + DBSystemNameCassandra = DBSystemNameKey.String("cassandra") + // [ClickHouse] + // Stability: development + // + // [ClickHouse]: https://clickhouse.com/ + DBSystemNameClickHouse = DBSystemNameKey.String("clickhouse") + // [CockroachDB] + // Stability: development + // + // [CockroachDB]: https://www.cockroachlabs.com/ + DBSystemNameCockroachDB = DBSystemNameKey.String("cockroachdb") + // [Couchbase] + // Stability: development + // + // [Couchbase]: https://www.couchbase.com/ + DBSystemNameCouchbase = DBSystemNameKey.String("couchbase") + // [Apache CouchDB] + // Stability: development + // + // [Apache CouchDB]: https://couchdb.apache.org/ + DBSystemNameCouchDB = DBSystemNameKey.String("couchdb") + // [Apache Derby] + // Stability: development + // + // [Apache Derby]: https://db.apache.org/derby/ + DBSystemNameDerby = DBSystemNameKey.String("derby") + // [Elasticsearch] + // Stability: development + // + // [Elasticsearch]: https://www.elastic.co/elasticsearch + DBSystemNameElasticsearch = DBSystemNameKey.String("elasticsearch") + // [Firebird] + // Stability: development + // + // [Firebird]: https://www.firebirdsql.org/ + DBSystemNameFirebirdSQL = DBSystemNameKey.String("firebirdsql") + // [Google Cloud Spanner] + // Stability: development + // + // [Google Cloud Spanner]: https://cloud.google.com/spanner + DBSystemNameGCPSpanner = DBSystemNameKey.String("gcp.spanner") + // [Apache Geode] + // Stability: development + // + // [Apache Geode]: https://geode.apache.org/ + DBSystemNameGeode = DBSystemNameKey.String("geode") + // [H2 Database] + // Stability: development + // + // [H2 Database]: https://h2database.com/ + DBSystemNameH2database = DBSystemNameKey.String("h2database") + // [Apache HBase] + // Stability: development + // + // [Apache HBase]: https://hbase.apache.org/ + DBSystemNameHBase = DBSystemNameKey.String("hbase") + // [Apache Hive] + // Stability: development + // + // [Apache Hive]: https://hive.apache.org/ + DBSystemNameHive = DBSystemNameKey.String("hive") + // [HyperSQL Database] + // Stability: development + // + // [HyperSQL Database]: https://hsqldb.org/ + DBSystemNameHSQLDB = DBSystemNameKey.String("hsqldb") + // [IBM Db2] + // Stability: development + // + // [IBM Db2]: https://www.ibm.com/db2 + DBSystemNameIBMDB2 = DBSystemNameKey.String("ibm.db2") + // [IBM Informix] + // Stability: development + // + // [IBM Informix]: https://www.ibm.com/products/informix + DBSystemNameIBMInformix = DBSystemNameKey.String("ibm.informix") + // [IBM Netezza] + // Stability: development + // + // [IBM Netezza]: https://www.ibm.com/products/netezza + DBSystemNameIBMNetezza = DBSystemNameKey.String("ibm.netezza") + // [InfluxDB] + // Stability: development + // + // [InfluxDB]: https://www.influxdata.com/ + DBSystemNameInfluxDB = DBSystemNameKey.String("influxdb") + // [Instant] + // Stability: development + // + // [Instant]: https://www.instantdb.com/ + DBSystemNameInstantDB = DBSystemNameKey.String("instantdb") + // [MariaDB] + // Stability: stable + // + // [MariaDB]: https://mariadb.org/ + DBSystemNameMariaDB = DBSystemNameKey.String("mariadb") + // [Memcached] + // Stability: development + // + // [Memcached]: https://memcached.org/ + DBSystemNameMemcached = DBSystemNameKey.String("memcached") + // [MongoDB] + // Stability: development + // + // [MongoDB]: https://www.mongodb.com/ + DBSystemNameMongoDB = DBSystemNameKey.String("mongodb") + // [Microsoft SQL Server] + // Stability: stable + // + // [Microsoft SQL Server]: https://www.microsoft.com/sql-server + DBSystemNameMicrosoftSQLServer = DBSystemNameKey.String("microsoft.sql_server") + // [MySQL] + // Stability: stable + // + // [MySQL]: https://www.mysql.com/ + DBSystemNameMySQL = DBSystemNameKey.String("mysql") + // [Neo4j] + // Stability: development + // + // [Neo4j]: https://neo4j.com/ + DBSystemNameNeo4j = DBSystemNameKey.String("neo4j") + // [OpenSearch] + // Stability: development + // + // [OpenSearch]: https://opensearch.org/ + DBSystemNameOpenSearch = DBSystemNameKey.String("opensearch") + // [Oracle Database] + // Stability: development + // + // [Oracle Database]: https://www.oracle.com/database/ + DBSystemNameOracleDB = DBSystemNameKey.String("oracle.db") + // [PostgreSQL] + // Stability: stable + // + // [PostgreSQL]: https://www.postgresql.org/ + DBSystemNamePostgreSQL = DBSystemNameKey.String("postgresql") + // [Redis] + // Stability: development + // + // [Redis]: https://redis.io/ + DBSystemNameRedis = DBSystemNameKey.String("redis") + // [SAP HANA] + // Stability: development + // + // [SAP HANA]: https://www.sap.com/products/technology-platform/hana/what-is-sap-hana.html + DBSystemNameSAPHANA = DBSystemNameKey.String("sap.hana") + // [SAP MaxDB] + // Stability: development + // + // [SAP MaxDB]: https://maxdb.sap.com/ + DBSystemNameSAPMaxDB = DBSystemNameKey.String("sap.maxdb") + // [SQLite] + // Stability: development + // + // [SQLite]: https://www.sqlite.org/ + DBSystemNameSQLite = DBSystemNameKey.String("sqlite") + // [Teradata] + // Stability: development + // + // [Teradata]: https://www.teradata.com/ + DBSystemNameTeradata = DBSystemNameKey.String("teradata") + // [Trino] + // Stability: development + // + // [Trino]: https://trino.io/ + DBSystemNameTrino = DBSystemNameKey.String("trino") +) + +// Namespace: deployment +const ( + // DeploymentEnvironmentNameKey is the attribute Key conforming to the + // "deployment.environment.name" semantic conventions. It represents the name of + // the [deployment environment] (aka deployment tier). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "staging", "production" + // Note: `deployment.environment.name` does not affect the uniqueness + // constraints defined through + // the `service.namespace`, `service.name` and `service.instance.id` resource + // attributes. + // This implies that resources carrying the following attribute combinations + // MUST be + // considered to be identifying the same service: + // + // - `service.name=frontend`, `deployment.environment.name=production` + // - `service.name=frontend`, `deployment.environment.name=staging`. + // + // + // [deployment environment]: https://wikipedia.org/wiki/Deployment_environment + DeploymentEnvironmentNameKey = attribute.Key("deployment.environment.name") + + // DeploymentIDKey is the attribute Key conforming to the "deployment.id" + // semantic conventions. It represents the id of the deployment. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1208" + DeploymentIDKey = attribute.Key("deployment.id") + + // DeploymentNameKey is the attribute Key conforming to the "deployment.name" + // semantic conventions. It represents the name of the deployment. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "deploy my app", "deploy-frontend" + DeploymentNameKey = attribute.Key("deployment.name") + + // DeploymentStatusKey is the attribute Key conforming to the + // "deployment.status" semantic conventions. It represents the status of the + // deployment. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + DeploymentStatusKey = attribute.Key("deployment.status") +) + +// DeploymentEnvironmentName returns an attribute KeyValue conforming to the +// "deployment.environment.name" semantic conventions. It represents the name of +// the [deployment environment] (aka deployment tier). +// +// [deployment environment]: https://wikipedia.org/wiki/Deployment_environment +func DeploymentEnvironmentName(val string) attribute.KeyValue { + return DeploymentEnvironmentNameKey.String(val) +} + +// DeploymentID returns an attribute KeyValue conforming to the "deployment.id" +// semantic conventions. It represents the id of the deployment. +func DeploymentID(val string) attribute.KeyValue { + return DeploymentIDKey.String(val) +} + +// DeploymentName returns an attribute KeyValue conforming to the +// "deployment.name" semantic conventions. It represents the name of the +// deployment. +func DeploymentName(val string) attribute.KeyValue { + return DeploymentNameKey.String(val) +} + +// Enum values for deployment.status +var ( + // failed + // Stability: development + DeploymentStatusFailed = DeploymentStatusKey.String("failed") + // succeeded + // Stability: development + DeploymentStatusSucceeded = DeploymentStatusKey.String("succeeded") +) + +// Namespace: destination +const ( + // DestinationAddressKey is the attribute Key conforming to the + // "destination.address" semantic conventions. It represents the destination + // address - domain name if available without reverse DNS lookup; otherwise, IP + // address or Unix domain socket name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "destination.example.com", "10.1.2.80", "/tmp/my.sock" + // Note: When observed from the source side, and when communicating through an + // intermediary, `destination.address` SHOULD represent the destination address + // behind any intermediaries, for example proxies, if it's available. + DestinationAddressKey = attribute.Key("destination.address") + + // DestinationPortKey is the attribute Key conforming to the "destination.port" + // semantic conventions. It represents the destination port number. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 3389, 2888 + DestinationPortKey = attribute.Key("destination.port") +) + +// DestinationAddress returns an attribute KeyValue conforming to the +// "destination.address" semantic conventions. It represents the destination +// address - domain name if available without reverse DNS lookup; otherwise, IP +// address or Unix domain socket name. +func DestinationAddress(val string) attribute.KeyValue { + return DestinationAddressKey.String(val) +} + +// DestinationPort returns an attribute KeyValue conforming to the +// "destination.port" semantic conventions. It represents the destination port +// number. +func DestinationPort(val int) attribute.KeyValue { + return DestinationPortKey.Int(val) +} + +// Namespace: device +const ( + // DeviceIDKey is the attribute Key conforming to the "device.id" semantic + // conventions. It represents a unique identifier representing the device. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "123456789012345", "01:23:45:67:89:AB" + // Note: Its value SHOULD be identical for all apps on a device and it SHOULD + // NOT change if an app is uninstalled and re-installed. + // However, it might be resettable by the user for all apps on a device. + // Hardware IDs (e.g. vendor-specific serial number, IMEI or MAC address) MAY be + // used as values. + // + // More information about Android identifier best practices can be found in the + // [Android user data IDs guide]. + // + // > [!WARNING]> This attribute may contain sensitive (PII) information. Caution + // > should be taken when storing personal data or anything which can identify a + // > user. GDPR and data protection laws may apply, + // > ensure you do your own due diligence.> Due to these reasons, this + // > identifier is not recommended for consumer applications and will likely + // > result in rejection from both Google Play and App Store. + // > However, it may be appropriate for specific enterprise scenarios, such as + // > kiosk devices or enterprise-managed devices, with appropriate compliance + // > clearance. + // > Any instrumentation providing this identifier MUST implement it as an + // > opt-in feature.> See [`app.installation.id`]> for a more + // > privacy-preserving alternative. + // + // [Android user data IDs guide]: https://developer.android.com/training/articles/user-data-ids + // [`app.installation.id`]: /docs/registry/attributes/app.md#app-installation-id + DeviceIDKey = attribute.Key("device.id") + + // DeviceManufacturerKey is the attribute Key conforming to the + // "device.manufacturer" semantic conventions. It represents the name of the + // device manufacturer. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Apple", "Samsung" + // Note: The Android OS provides this field via [Build]. iOS apps SHOULD + // hardcode the value `Apple`. + // + // [Build]: https://developer.android.com/reference/android/os/Build#MANUFACTURER + DeviceManufacturerKey = attribute.Key("device.manufacturer") + + // DeviceModelIdentifierKey is the attribute Key conforming to the + // "device.model.identifier" semantic conventions. It represents the model + // identifier for the device. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "iPhone3,4", "SM-G920F" + // Note: It's recommended this value represents a machine-readable version of + // the model identifier rather than the market or consumer-friendly name of the + // device. + DeviceModelIdentifierKey = attribute.Key("device.model.identifier") + + // DeviceModelNameKey is the attribute Key conforming to the "device.model.name" + // semantic conventions. It represents the marketing name for the device model. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "iPhone 6s Plus", "Samsung Galaxy S6" + // Note: It's recommended this value represents a human-readable version of the + // device model rather than a machine-readable alternative. + DeviceModelNameKey = attribute.Key("device.model.name") +) + +// DeviceID returns an attribute KeyValue conforming to the "device.id" semantic +// conventions. It represents a unique identifier representing the device. +func DeviceID(val string) attribute.KeyValue { + return DeviceIDKey.String(val) +} + +// DeviceManufacturer returns an attribute KeyValue conforming to the +// "device.manufacturer" semantic conventions. It represents the name of the +// device manufacturer. +func DeviceManufacturer(val string) attribute.KeyValue { + return DeviceManufacturerKey.String(val) +} + +// DeviceModelIdentifier returns an attribute KeyValue conforming to the +// "device.model.identifier" semantic conventions. It represents the model +// identifier for the device. +func DeviceModelIdentifier(val string) attribute.KeyValue { + return DeviceModelIdentifierKey.String(val) +} + +// DeviceModelName returns an attribute KeyValue conforming to the +// "device.model.name" semantic conventions. It represents the marketing name for +// the device model. +func DeviceModelName(val string) attribute.KeyValue { + return DeviceModelNameKey.String(val) +} + +// Namespace: disk +const ( + // DiskIODirectionKey is the attribute Key conforming to the "disk.io.direction" + // semantic conventions. It represents the disk IO operation direction. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "read" + DiskIODirectionKey = attribute.Key("disk.io.direction") +) + +// Enum values for disk.io.direction +var ( + // read + // Stability: development + DiskIODirectionRead = DiskIODirectionKey.String("read") + // write + // Stability: development + DiskIODirectionWrite = DiskIODirectionKey.String("write") +) + +// Namespace: dns +const ( + // DNSAnswersKey is the attribute Key conforming to the "dns.answers" semantic + // conventions. It represents the list of IPv4 or IPv6 addresses resolved during + // DNS lookup. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "10.0.0.1", "2001:0db8:85a3:0000:0000:8a2e:0370:7334" + DNSAnswersKey = attribute.Key("dns.answers") + + // DNSQuestionNameKey is the attribute Key conforming to the "dns.question.name" + // semantic conventions. It represents the name being queried. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "www.example.com", "opentelemetry.io" + // Note: If the name field contains non-printable characters (below 32 or above + // 126), those characters should be represented as escaped base 10 integers + // (\DDD). Back slashes and quotes should be escaped. Tabs, carriage returns, + // and line feeds should be converted to \t, \r, and \n respectively. + DNSQuestionNameKey = attribute.Key("dns.question.name") +) + +// DNSAnswers returns an attribute KeyValue conforming to the "dns.answers" +// semantic conventions. It represents the list of IPv4 or IPv6 addresses +// resolved during DNS lookup. +func DNSAnswers(val ...string) attribute.KeyValue { + return DNSAnswersKey.StringSlice(val) +} + +// DNSQuestionName returns an attribute KeyValue conforming to the +// "dns.question.name" semantic conventions. It represents the name being +// queried. +func DNSQuestionName(val string) attribute.KeyValue { + return DNSQuestionNameKey.String(val) +} + +// Namespace: elasticsearch +const ( + // ElasticsearchNodeNameKey is the attribute Key conforming to the + // "elasticsearch.node.name" semantic conventions. It represents the represents + // the human-readable identifier of the node/instance to which a request was + // routed. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "instance-0000000001" + ElasticsearchNodeNameKey = attribute.Key("elasticsearch.node.name") +) + +// ElasticsearchNodeName returns an attribute KeyValue conforming to the +// "elasticsearch.node.name" semantic conventions. It represents the represents +// the human-readable identifier of the node/instance to which a request was +// routed. +func ElasticsearchNodeName(val string) attribute.KeyValue { + return ElasticsearchNodeNameKey.String(val) +} + +// Namespace: enduser +const ( + // EnduserIDKey is the attribute Key conforming to the "enduser.id" semantic + // conventions. It represents the unique identifier of an end user in the + // system. It maybe a username, email address, or other identifier. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "username" + // Note: Unique identifier of an end user in the system. + // + // > [!Warning] + // > This field contains sensitive (PII) information. + EnduserIDKey = attribute.Key("enduser.id") + + // EnduserPseudoIDKey is the attribute Key conforming to the "enduser.pseudo.id" + // semantic conventions. It represents the pseudonymous identifier of an end + // user. This identifier should be a random value that is not directly linked or + // associated with the end user's actual identity. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "QdH5CAWJgqVT4rOr0qtumf" + // Note: Pseudonymous identifier of an end user. + // + // > [!Warning] + // > This field contains sensitive (linkable PII) information. + EnduserPseudoIDKey = attribute.Key("enduser.pseudo.id") +) + +// EnduserID returns an attribute KeyValue conforming to the "enduser.id" +// semantic conventions. It represents the unique identifier of an end user in +// the system. It maybe a username, email address, or other identifier. +func EnduserID(val string) attribute.KeyValue { + return EnduserIDKey.String(val) +} + +// EnduserPseudoID returns an attribute KeyValue conforming to the +// "enduser.pseudo.id" semantic conventions. It represents the pseudonymous +// identifier of an end user. This identifier should be a random value that is +// not directly linked or associated with the end user's actual identity. +func EnduserPseudoID(val string) attribute.KeyValue { + return EnduserPseudoIDKey.String(val) +} + +// Namespace: error +const ( + // ErrorMessageKey is the attribute Key conforming to the "error.message" + // semantic conventions. It represents a message providing more detail about an + // error in human-readable form. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Unexpected input type: string", "The user has exceeded their + // storage quota" + // Note: `error.message` should provide additional context and detail about an + // error. + // It is NOT RECOMMENDED to duplicate the value of `error.type` in + // `error.message`. + // It is also NOT RECOMMENDED to duplicate the value of `exception.message` in + // `error.message`. + // + // `error.message` is NOT RECOMMENDED for metrics or spans due to its unbounded + // cardinality and overlap with span status. + ErrorMessageKey = attribute.Key("error.message") + + // ErrorTypeKey is the attribute Key conforming to the "error.type" semantic + // conventions. It represents the describes a class of error the operation ended + // with. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "timeout", "java.net.UnknownHostException", + // "server_certificate_invalid", "500" + // Note: The `error.type` SHOULD be predictable, and SHOULD have low + // cardinality. + // + // When `error.type` is set to a type (e.g., an exception type), its + // canonical class name identifying the type within the artifact SHOULD be used. + // + // Instrumentations SHOULD document the list of errors they report. + // + // The cardinality of `error.type` within one instrumentation library SHOULD be + // low. + // Telemetry consumers that aggregate data from multiple instrumentation + // libraries and applications + // should be prepared for `error.type` to have high cardinality at query time + // when no + // additional filters are applied. + // + // If the operation has completed successfully, instrumentations SHOULD NOT set + // `error.type`. + // + // If a specific domain defines its own set of error identifiers (such as HTTP + // or gRPC status codes), + // it's RECOMMENDED to: + // + // - Use a domain-specific attribute + // - Set `error.type` to capture all errors, regardless of whether they are + // defined within the domain-specific set or not. + ErrorTypeKey = attribute.Key("error.type") +) + +// ErrorMessage returns an attribute KeyValue conforming to the "error.message" +// semantic conventions. It represents a message providing more detail about an +// error in human-readable form. +func ErrorMessage(val string) attribute.KeyValue { + return ErrorMessageKey.String(val) +} + +// Enum values for error.type +var ( + // A fallback error value to be used when the instrumentation doesn't define a + // custom value. + // + // Stability: stable + ErrorTypeOther = ErrorTypeKey.String("_OTHER") +) + +// Namespace: exception +const ( + // ExceptionMessageKey is the attribute Key conforming to the + // "exception.message" semantic conventions. It represents the exception + // message. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "Division by zero", "Can't convert 'int' object to str implicitly" + ExceptionMessageKey = attribute.Key("exception.message") + + // ExceptionStacktraceKey is the attribute Key conforming to the + // "exception.stacktrace" semantic conventions. It represents a stacktrace as a + // string in the natural representation for the language runtime. The + // representation is to be determined and documented by each language SIG. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: Exception in thread "main" java.lang.RuntimeException: Test + // exception\n at com.example.GenerateTrace.methodB(GenerateTrace.java:13)\n at + // com.example.GenerateTrace.methodA(GenerateTrace.java:9)\n at + // com.example.GenerateTrace.main(GenerateTrace.java:5) + ExceptionStacktraceKey = attribute.Key("exception.stacktrace") + + // ExceptionTypeKey is the attribute Key conforming to the "exception.type" + // semantic conventions. It represents the type of the exception (its + // fully-qualified class name, if applicable). The dynamic type of the exception + // should be preferred over the static type in languages that support it. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "java.net.ConnectException", "OSError" + ExceptionTypeKey = attribute.Key("exception.type") +) + +// ExceptionMessage returns an attribute KeyValue conforming to the +// "exception.message" semantic conventions. It represents the exception message. +func ExceptionMessage(val string) attribute.KeyValue { + return ExceptionMessageKey.String(val) +} + +// ExceptionStacktrace returns an attribute KeyValue conforming to the +// "exception.stacktrace" semantic conventions. It represents a stacktrace as a +// string in the natural representation for the language runtime. The +// representation is to be determined and documented by each language SIG. +func ExceptionStacktrace(val string) attribute.KeyValue { + return ExceptionStacktraceKey.String(val) +} + +// ExceptionType returns an attribute KeyValue conforming to the "exception.type" +// semantic conventions. It represents the type of the exception (its +// fully-qualified class name, if applicable). The dynamic type of the exception +// should be preferred over the static type in languages that support it. +func ExceptionType(val string) attribute.KeyValue { + return ExceptionTypeKey.String(val) +} + +// Namespace: faas +const ( + // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart" + // semantic conventions. It represents a boolean that is true if the serverless + // function is executed for the first time (aka cold-start). + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + FaaSColdstartKey = attribute.Key("faas.coldstart") + + // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic + // conventions. It represents a string containing the schedule period as + // [Cron Expression]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 0/5 * * * ? * + // + // [Cron Expression]: https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm + FaaSCronKey = attribute.Key("faas.cron") + + // FaaSDocumentCollectionKey is the attribute Key conforming to the + // "faas.document.collection" semantic conventions. It represents the name of + // the source on which the triggering operation was performed. For example, in + // Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the + // database name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "myBucketName", "myDbName" + FaaSDocumentCollectionKey = attribute.Key("faas.document.collection") + + // FaaSDocumentNameKey is the attribute Key conforming to the + // "faas.document.name" semantic conventions. It represents the document + // name/table subjected to the operation. For example, in Cloud Storage or S3 is + // the name of the file, and in Cosmos DB the table name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "myFile.txt", "myTableName" + FaaSDocumentNameKey = attribute.Key("faas.document.name") + + // FaaSDocumentOperationKey is the attribute Key conforming to the + // "faas.document.operation" semantic conventions. It represents the describes + // the type of the operation that was performed on the data. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + FaaSDocumentOperationKey = attribute.Key("faas.document.operation") + + // FaaSDocumentTimeKey is the attribute Key conforming to the + // "faas.document.time" semantic conventions. It represents a string containing + // the time when the data was accessed in the [ISO 8601] format expressed in + // [UTC]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 2020-01-23T13:47:06Z + // + // [ISO 8601]: https://www.iso.org/iso-8601-date-and-time-format.html + // [UTC]: https://www.w3.org/TR/NOTE-datetime + FaaSDocumentTimeKey = attribute.Key("faas.document.time") + + // FaaSInstanceKey is the attribute Key conforming to the "faas.instance" + // semantic conventions. It represents the execution environment ID as a string, + // that will be potentially reused for other invocations to the same + // function/function version. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de" + // Note: - **AWS Lambda:** Use the (full) log stream name. + FaaSInstanceKey = attribute.Key("faas.instance") + + // FaaSInvocationIDKey is the attribute Key conforming to the + // "faas.invocation_id" semantic conventions. It represents the invocation ID of + // the current function invocation. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: af9d5aa4-a685-4c5f-a22b-444f80b3cc28 + FaaSInvocationIDKey = attribute.Key("faas.invocation_id") + + // FaaSInvokedNameKey is the attribute Key conforming to the "faas.invoked_name" + // semantic conventions. It represents the name of the invoked function. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: my-function + // Note: SHOULD be equal to the `faas.name` resource attribute of the invoked + // function. + FaaSInvokedNameKey = attribute.Key("faas.invoked_name") + + // FaaSInvokedProviderKey is the attribute Key conforming to the + // "faas.invoked_provider" semantic conventions. It represents the cloud + // provider of the invoked function. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: SHOULD be equal to the `cloud.provider` resource attribute of the + // invoked function. + FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider") + + // FaaSInvokedRegionKey is the attribute Key conforming to the + // "faas.invoked_region" semantic conventions. It represents the cloud region of + // the invoked function. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: eu-central-1 + // Note: SHOULD be equal to the `cloud.region` resource attribute of the invoked + // function. + FaaSInvokedRegionKey = attribute.Key("faas.invoked_region") + + // FaaSMaxMemoryKey is the attribute Key conforming to the "faas.max_memory" + // semantic conventions. It represents the amount of memory available to the + // serverless function converted to Bytes. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Note: It's recommended to set this attribute since e.g. too little memory can + // easily stop a Java AWS Lambda function from working correctly. On AWS Lambda, + // the environment variable `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this + // information (which must be multiplied by 1,048,576). + FaaSMaxMemoryKey = attribute.Key("faas.max_memory") + + // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic + // conventions. It represents the name of the single function that this runtime + // instance executes. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-function", "myazurefunctionapp/some-function-name" + // Note: This is the name of the function as configured/deployed on the FaaS + // platform and is usually different from the name of the callback + // function (which may be stored in the + // [`code.namespace`/`code.function.name`] + // span attributes). + // + // For some cloud providers, the above definition is ambiguous. The following + // definition of function name MUST be used for this attribute + // (and consequently the span name) for the listed cloud providers/products: + // + // - **Azure:** The full name `/`, i.e., function app name + // followed by a forward slash followed by the function name (this form + // can also be seen in the resource JSON for the function). + // This means that a span attribute MUST be used, as an Azure function + // app can host multiple functions that would usually share + // a TracerProvider (see also the `cloud.resource_id` attribute). + // + // + // [`code.namespace`/`code.function.name`]: /docs/general/attributes.md#source-code-attributes + FaaSNameKey = attribute.Key("faas.name") + + // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic + // conventions. It represents a string containing the function invocation time + // in the [ISO 8601] format expressed in [UTC]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 2020-01-23T13:47:06Z + // + // [ISO 8601]: https://www.iso.org/iso-8601-date-and-time-format.html + // [UTC]: https://www.w3.org/TR/NOTE-datetime + FaaSTimeKey = attribute.Key("faas.time") + + // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger" semantic + // conventions. It represents the type of the trigger which caused this function + // invocation. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + FaaSTriggerKey = attribute.Key("faas.trigger") + + // FaaSVersionKey is the attribute Key conforming to the "faas.version" semantic + // conventions. It represents the immutable version of the function being + // executed. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "26", "pinkfroid-00002" + // Note: Depending on the cloud provider and platform, use: + // + // - **AWS Lambda:** The [function version] + // (an integer represented as a decimal string). + // - **Google Cloud Run (Services):** The [revision] + // (i.e., the function name plus the revision suffix). + // - **Google Cloud Functions:** The value of the + // [`K_REVISION` environment variable]. + // - **Azure Functions:** Not applicable. Do not set this attribute. + // + // + // [function version]: https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html + // [revision]: https://cloud.google.com/run/docs/managing/revisions + // [`K_REVISION` environment variable]: https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically + FaaSVersionKey = attribute.Key("faas.version") +) + +// FaaSColdstart returns an attribute KeyValue conforming to the "faas.coldstart" +// semantic conventions. It represents a boolean that is true if the serverless +// function is executed for the first time (aka cold-start). +func FaaSColdstart(val bool) attribute.KeyValue { + return FaaSColdstartKey.Bool(val) +} + +// FaaSCron returns an attribute KeyValue conforming to the "faas.cron" semantic +// conventions. It represents a string containing the schedule period as +// [Cron Expression]. +// +// [Cron Expression]: https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm +func FaaSCron(val string) attribute.KeyValue { + return FaaSCronKey.String(val) +} + +// FaaSDocumentCollection returns an attribute KeyValue conforming to the +// "faas.document.collection" semantic conventions. It represents the name of the +// source on which the triggering operation was performed. For example, in Cloud +// Storage or S3 corresponds to the bucket name, and in Cosmos DB to the database +// name. +func FaaSDocumentCollection(val string) attribute.KeyValue { + return FaaSDocumentCollectionKey.String(val) +} + +// FaaSDocumentName returns an attribute KeyValue conforming to the +// "faas.document.name" semantic conventions. It represents the document +// name/table subjected to the operation. For example, in Cloud Storage or S3 is +// the name of the file, and in Cosmos DB the table name. +func FaaSDocumentName(val string) attribute.KeyValue { + return FaaSDocumentNameKey.String(val) +} + +// FaaSDocumentTime returns an attribute KeyValue conforming to the +// "faas.document.time" semantic conventions. It represents a string containing +// the time when the data was accessed in the [ISO 8601] format expressed in +// [UTC]. +// +// [ISO 8601]: https://www.iso.org/iso-8601-date-and-time-format.html +// [UTC]: https://www.w3.org/TR/NOTE-datetime +func FaaSDocumentTime(val string) attribute.KeyValue { + return FaaSDocumentTimeKey.String(val) +} + +// FaaSInstance returns an attribute KeyValue conforming to the "faas.instance" +// semantic conventions. It represents the execution environment ID as a string, +// that will be potentially reused for other invocations to the same +// function/function version. +func FaaSInstance(val string) attribute.KeyValue { + return FaaSInstanceKey.String(val) +} + +// FaaSInvocationID returns an attribute KeyValue conforming to the +// "faas.invocation_id" semantic conventions. It represents the invocation ID of +// the current function invocation. +func FaaSInvocationID(val string) attribute.KeyValue { + return FaaSInvocationIDKey.String(val) +} + +// FaaSInvokedName returns an attribute KeyValue conforming to the +// "faas.invoked_name" semantic conventions. It represents the name of the +// invoked function. +func FaaSInvokedName(val string) attribute.KeyValue { + return FaaSInvokedNameKey.String(val) +} + +// FaaSInvokedRegion returns an attribute KeyValue conforming to the +// "faas.invoked_region" semantic conventions. It represents the cloud region of +// the invoked function. +func FaaSInvokedRegion(val string) attribute.KeyValue { + return FaaSInvokedRegionKey.String(val) +} + +// FaaSMaxMemory returns an attribute KeyValue conforming to the +// "faas.max_memory" semantic conventions. It represents the amount of memory +// available to the serverless function converted to Bytes. +func FaaSMaxMemory(val int) attribute.KeyValue { + return FaaSMaxMemoryKey.Int(val) +} + +// FaaSName returns an attribute KeyValue conforming to the "faas.name" semantic +// conventions. It represents the name of the single function that this runtime +// instance executes. +func FaaSName(val string) attribute.KeyValue { + return FaaSNameKey.String(val) +} + +// FaaSTime returns an attribute KeyValue conforming to the "faas.time" semantic +// conventions. It represents a string containing the function invocation time in +// the [ISO 8601] format expressed in [UTC]. +// +// [ISO 8601]: https://www.iso.org/iso-8601-date-and-time-format.html +// [UTC]: https://www.w3.org/TR/NOTE-datetime +func FaaSTime(val string) attribute.KeyValue { + return FaaSTimeKey.String(val) +} + +// FaaSVersion returns an attribute KeyValue conforming to the "faas.version" +// semantic conventions. It represents the immutable version of the function +// being executed. +func FaaSVersion(val string) attribute.KeyValue { + return FaaSVersionKey.String(val) +} + +// Enum values for faas.document.operation +var ( + // When a new object is created. + // Stability: development + FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert") + // When an object is modified. + // Stability: development + FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit") + // When an object is deleted. + // Stability: development + FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete") +) + +// Enum values for faas.invoked_provider +var ( + // Alibaba Cloud + // Stability: development + FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud") + // Amazon Web Services + // Stability: development + FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws") + // Microsoft Azure + // Stability: development + FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure") + // Google Cloud Platform + // Stability: development + FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp") + // Tencent Cloud + // Stability: development + FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud") +) + +// Enum values for faas.trigger +var ( + // A response to some data source operation such as a database or filesystem + // read/write + // Stability: development + FaaSTriggerDatasource = FaaSTriggerKey.String("datasource") + // To provide an answer to an inbound HTTP request + // Stability: development + FaaSTriggerHTTP = FaaSTriggerKey.String("http") + // A function is set to be executed when messages are sent to a messaging system + // Stability: development + FaaSTriggerPubSub = FaaSTriggerKey.String("pubsub") + // A function is scheduled to be executed regularly + // Stability: development + FaaSTriggerTimer = FaaSTriggerKey.String("timer") + // If none of the others apply + // Stability: development + FaaSTriggerOther = FaaSTriggerKey.String("other") +) + +// Namespace: feature_flag +const ( + // FeatureFlagContextIDKey is the attribute Key conforming to the + // "feature_flag.context.id" semantic conventions. It represents the unique + // identifier for the flag evaluation context. For example, the targeting key. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Release_Candidate + // + // Examples: "5157782b-2203-4c80-a857-dbbd5e7761db" + FeatureFlagContextIDKey = attribute.Key("feature_flag.context.id") + + // FeatureFlagKeyKey is the attribute Key conforming to the "feature_flag.key" + // semantic conventions. It represents the lookup key of the feature flag. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Release_Candidate + // + // Examples: "logo-color" + FeatureFlagKeyKey = attribute.Key("feature_flag.key") + + // FeatureFlagProviderNameKey is the attribute Key conforming to the + // "feature_flag.provider.name" semantic conventions. It represents the + // identifies the feature flag provider. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Release_Candidate + // + // Examples: "Flag Manager" + FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider.name") + + // FeatureFlagResultReasonKey is the attribute Key conforming to the + // "feature_flag.result.reason" semantic conventions. It represents the reason + // code which shows how a feature flag value was determined. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Release_Candidate + // + // Examples: "static", "targeting_match", "error", "default" + FeatureFlagResultReasonKey = attribute.Key("feature_flag.result.reason") + + // FeatureFlagResultValueKey is the attribute Key conforming to the + // "feature_flag.result.value" semantic conventions. It represents the evaluated + // value of the feature flag. + // + // Type: any + // RequirementLevel: Recommended + // Stability: Release_Candidate + // + // Examples: "#ff0000", true, 3 + // Note: With some feature flag providers, feature flag results can be quite + // large or contain private or sensitive details. + // Because of this, `feature_flag.result.variant` is often the preferred + // attribute if it is available. + // + // It may be desirable to redact or otherwise limit the size and scope of + // `feature_flag.result.value` if possible. + // Because the evaluated flag value is unstructured and may be any type, it is + // left to the instrumentation author to determine how best to achieve this. + FeatureFlagResultValueKey = attribute.Key("feature_flag.result.value") + + // FeatureFlagResultVariantKey is the attribute Key conforming to the + // "feature_flag.result.variant" semantic conventions. It represents a semantic + // identifier for an evaluated flag value. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Release_Candidate + // + // Examples: "red", "true", "on" + // Note: A semantic identifier, commonly referred to as a variant, provides a + // means + // for referring to a value without including the value itself. This can + // provide additional context for understanding the meaning behind a value. + // For example, the variant `red` maybe be used for the value `#c05543`. + FeatureFlagResultVariantKey = attribute.Key("feature_flag.result.variant") + + // FeatureFlagSetIDKey is the attribute Key conforming to the + // "feature_flag.set.id" semantic conventions. It represents the identifier of + // the [flag set] to which the feature flag belongs. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Release_Candidate + // + // Examples: "proj-1", "ab98sgs", "service1/dev" + // + // [flag set]: https://openfeature.dev/specification/glossary/#flag-set + FeatureFlagSetIDKey = attribute.Key("feature_flag.set.id") + + // FeatureFlagVersionKey is the attribute Key conforming to the + // "feature_flag.version" semantic conventions. It represents the version of the + // ruleset used during the evaluation. This may be any stable value which + // uniquely identifies the ruleset. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Release_Candidate + // + // Examples: "1", "01ABCDEF" + FeatureFlagVersionKey = attribute.Key("feature_flag.version") +) + +// FeatureFlagContextID returns an attribute KeyValue conforming to the +// "feature_flag.context.id" semantic conventions. It represents the unique +// identifier for the flag evaluation context. For example, the targeting key. +func FeatureFlagContextID(val string) attribute.KeyValue { + return FeatureFlagContextIDKey.String(val) +} + +// FeatureFlagKey returns an attribute KeyValue conforming to the +// "feature_flag.key" semantic conventions. It represents the lookup key of the +// feature flag. +func FeatureFlagKey(val string) attribute.KeyValue { + return FeatureFlagKeyKey.String(val) +} + +// FeatureFlagProviderName returns an attribute KeyValue conforming to the +// "feature_flag.provider.name" semantic conventions. It represents the +// identifies the feature flag provider. +func FeatureFlagProviderName(val string) attribute.KeyValue { + return FeatureFlagProviderNameKey.String(val) +} + +// FeatureFlagResultVariant returns an attribute KeyValue conforming to the +// "feature_flag.result.variant" semantic conventions. It represents a semantic +// identifier for an evaluated flag value. +func FeatureFlagResultVariant(val string) attribute.KeyValue { + return FeatureFlagResultVariantKey.String(val) +} + +// FeatureFlagSetID returns an attribute KeyValue conforming to the +// "feature_flag.set.id" semantic conventions. It represents the identifier of +// the [flag set] to which the feature flag belongs. +// +// [flag set]: https://openfeature.dev/specification/glossary/#flag-set +func FeatureFlagSetID(val string) attribute.KeyValue { + return FeatureFlagSetIDKey.String(val) +} + +// FeatureFlagVersion returns an attribute KeyValue conforming to the +// "feature_flag.version" semantic conventions. It represents the version of the +// ruleset used during the evaluation. This may be any stable value which +// uniquely identifies the ruleset. +func FeatureFlagVersion(val string) attribute.KeyValue { + return FeatureFlagVersionKey.String(val) +} + +// Enum values for feature_flag.result.reason +var ( + // The resolved value is static (no dynamic evaluation). + // Stability: release_candidate + FeatureFlagResultReasonStatic = FeatureFlagResultReasonKey.String("static") + // The resolved value fell back to a pre-configured value (no dynamic evaluation + // occurred or dynamic evaluation yielded no result). + // Stability: release_candidate + FeatureFlagResultReasonDefault = FeatureFlagResultReasonKey.String("default") + // The resolved value was the result of a dynamic evaluation, such as a rule or + // specific user-targeting. + // Stability: release_candidate + FeatureFlagResultReasonTargetingMatch = FeatureFlagResultReasonKey.String("targeting_match") + // The resolved value was the result of pseudorandom assignment. + // Stability: release_candidate + FeatureFlagResultReasonSplit = FeatureFlagResultReasonKey.String("split") + // The resolved value was retrieved from cache. + // Stability: release_candidate + FeatureFlagResultReasonCached = FeatureFlagResultReasonKey.String("cached") + // The resolved value was the result of the flag being disabled in the + // management system. + // Stability: release_candidate + FeatureFlagResultReasonDisabled = FeatureFlagResultReasonKey.String("disabled") + // The reason for the resolved value could not be determined. + // Stability: release_candidate + FeatureFlagResultReasonUnknown = FeatureFlagResultReasonKey.String("unknown") + // The resolved value is non-authoritative or possibly out of date + // Stability: release_candidate + FeatureFlagResultReasonStale = FeatureFlagResultReasonKey.String("stale") + // The resolved value was the result of an error. + // Stability: release_candidate + FeatureFlagResultReasonError = FeatureFlagResultReasonKey.String("error") +) + +// Namespace: file +const ( + // FileAccessedKey is the attribute Key conforming to the "file.accessed" + // semantic conventions. It represents the time when the file was last accessed, + // in ISO 8601 format. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2021-01-01T12:00:00Z" + // Note: This attribute might not be supported by some file systems — NFS, + // FAT32, in embedded OS, etc. + FileAccessedKey = attribute.Key("file.accessed") + + // FileAttributesKey is the attribute Key conforming to the "file.attributes" + // semantic conventions. It represents the array of file attributes. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "readonly", "hidden" + // Note: Attributes names depend on the OS or file system. Here’s a + // non-exhaustive list of values expected for this attribute: `archive`, + // `compressed`, `directory`, `encrypted`, `execute`, `hidden`, `immutable`, + // `journaled`, `read`, `readonly`, `symbolic link`, `system`, `temporary`, + // `write`. + FileAttributesKey = attribute.Key("file.attributes") + + // FileChangedKey is the attribute Key conforming to the "file.changed" semantic + // conventions. It represents the time when the file attributes or metadata was + // last changed, in ISO 8601 format. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2021-01-01T12:00:00Z" + // Note: `file.changed` captures the time when any of the file's properties or + // attributes (including the content) are changed, while `file.modified` + // captures the timestamp when the file content is modified. + FileChangedKey = attribute.Key("file.changed") + + // FileCreatedKey is the attribute Key conforming to the "file.created" semantic + // conventions. It represents the time when the file was created, in ISO 8601 + // format. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2021-01-01T12:00:00Z" + // Note: This attribute might not be supported by some file systems — NFS, + // FAT32, in embedded OS, etc. + FileCreatedKey = attribute.Key("file.created") + + // FileDirectoryKey is the attribute Key conforming to the "file.directory" + // semantic conventions. It represents the directory where the file is located. + // It should include the drive letter, when appropriate. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/home/user", "C:\Program Files\MyApp" + FileDirectoryKey = attribute.Key("file.directory") + + // FileExtensionKey is the attribute Key conforming to the "file.extension" + // semantic conventions. It represents the file extension, excluding the leading + // dot. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "png", "gz" + // Note: When the file name has multiple extensions (example.tar.gz), only the + // last one should be captured ("gz", not "tar.gz"). + FileExtensionKey = attribute.Key("file.extension") + + // FileForkNameKey is the attribute Key conforming to the "file.fork_name" + // semantic conventions. It represents the name of the fork. A fork is + // additional data associated with a filesystem object. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Zone.Identifier" + // Note: On Linux, a resource fork is used to store additional data with a + // filesystem object. A file always has at least one fork for the data portion, + // and additional forks may exist. + // On NTFS, this is analogous to an Alternate Data Stream (ADS), and the default + // data stream for a file is just called $DATA. Zone.Identifier is commonly used + // by Windows to track contents downloaded from the Internet. An ADS is + // typically of the form: C:\path\to\filename.extension:some_fork_name, and + // some_fork_name is the value that should populate `fork_name`. + // `filename.extension` should populate `file.name`, and `extension` should + // populate `file.extension`. The full path, `file.path`, will include the fork + // name. + FileForkNameKey = attribute.Key("file.fork_name") + + // FileGroupIDKey is the attribute Key conforming to the "file.group.id" + // semantic conventions. It represents the primary Group ID (GID) of the file. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1000" + FileGroupIDKey = attribute.Key("file.group.id") + + // FileGroupNameKey is the attribute Key conforming to the "file.group.name" + // semantic conventions. It represents the primary group name of the file. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "users" + FileGroupNameKey = attribute.Key("file.group.name") + + // FileInodeKey is the attribute Key conforming to the "file.inode" semantic + // conventions. It represents the inode representing the file in the filesystem. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "256383" + FileInodeKey = attribute.Key("file.inode") + + // FileModeKey is the attribute Key conforming to the "file.mode" semantic + // conventions. It represents the mode of the file in octal representation. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "0640" + FileModeKey = attribute.Key("file.mode") + + // FileModifiedKey is the attribute Key conforming to the "file.modified" + // semantic conventions. It represents the time when the file content was last + // modified, in ISO 8601 format. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2021-01-01T12:00:00Z" + FileModifiedKey = attribute.Key("file.modified") + + // FileNameKey is the attribute Key conforming to the "file.name" semantic + // conventions. It represents the name of the file including the extension, + // without the directory. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "example.png" + FileNameKey = attribute.Key("file.name") + + // FileOwnerIDKey is the attribute Key conforming to the "file.owner.id" + // semantic conventions. It represents the user ID (UID) or security identifier + // (SID) of the file owner. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1000" + FileOwnerIDKey = attribute.Key("file.owner.id") + + // FileOwnerNameKey is the attribute Key conforming to the "file.owner.name" + // semantic conventions. It represents the username of the file owner. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "root" + FileOwnerNameKey = attribute.Key("file.owner.name") + + // FilePathKey is the attribute Key conforming to the "file.path" semantic + // conventions. It represents the full path to the file, including the file + // name. It should include the drive letter, when appropriate. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/home/alice/example.png", "C:\Program Files\MyApp\myapp.exe" + FilePathKey = attribute.Key("file.path") + + // FileSizeKey is the attribute Key conforming to the "file.size" semantic + // conventions. It represents the file size in bytes. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + FileSizeKey = attribute.Key("file.size") + + // FileSymbolicLinkTargetPathKey is the attribute Key conforming to the + // "file.symbolic_link.target_path" semantic conventions. It represents the path + // to the target of a symbolic link. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/usr/bin/python3" + // Note: This attribute is only applicable to symbolic links. + FileSymbolicLinkTargetPathKey = attribute.Key("file.symbolic_link.target_path") +) + +// FileAccessed returns an attribute KeyValue conforming to the "file.accessed" +// semantic conventions. It represents the time when the file was last accessed, +// in ISO 8601 format. +func FileAccessed(val string) attribute.KeyValue { + return FileAccessedKey.String(val) +} + +// FileAttributes returns an attribute KeyValue conforming to the +// "file.attributes" semantic conventions. It represents the array of file +// attributes. +func FileAttributes(val ...string) attribute.KeyValue { + return FileAttributesKey.StringSlice(val) +} + +// FileChanged returns an attribute KeyValue conforming to the "file.changed" +// semantic conventions. It represents the time when the file attributes or +// metadata was last changed, in ISO 8601 format. +func FileChanged(val string) attribute.KeyValue { + return FileChangedKey.String(val) +} + +// FileCreated returns an attribute KeyValue conforming to the "file.created" +// semantic conventions. It represents the time when the file was created, in ISO +// 8601 format. +func FileCreated(val string) attribute.KeyValue { + return FileCreatedKey.String(val) +} + +// FileDirectory returns an attribute KeyValue conforming to the "file.directory" +// semantic conventions. It represents the directory where the file is located. +// It should include the drive letter, when appropriate. +func FileDirectory(val string) attribute.KeyValue { + return FileDirectoryKey.String(val) +} + +// FileExtension returns an attribute KeyValue conforming to the "file.extension" +// semantic conventions. It represents the file extension, excluding the leading +// dot. +func FileExtension(val string) attribute.KeyValue { + return FileExtensionKey.String(val) +} + +// FileForkName returns an attribute KeyValue conforming to the "file.fork_name" +// semantic conventions. It represents the name of the fork. A fork is additional +// data associated with a filesystem object. +func FileForkName(val string) attribute.KeyValue { + return FileForkNameKey.String(val) +} + +// FileGroupID returns an attribute KeyValue conforming to the "file.group.id" +// semantic conventions. It represents the primary Group ID (GID) of the file. +func FileGroupID(val string) attribute.KeyValue { + return FileGroupIDKey.String(val) +} + +// FileGroupName returns an attribute KeyValue conforming to the +// "file.group.name" semantic conventions. It represents the primary group name +// of the file. +func FileGroupName(val string) attribute.KeyValue { + return FileGroupNameKey.String(val) +} + +// FileInode returns an attribute KeyValue conforming to the "file.inode" +// semantic conventions. It represents the inode representing the file in the +// filesystem. +func FileInode(val string) attribute.KeyValue { + return FileInodeKey.String(val) +} + +// FileMode returns an attribute KeyValue conforming to the "file.mode" semantic +// conventions. It represents the mode of the file in octal representation. +func FileMode(val string) attribute.KeyValue { + return FileModeKey.String(val) +} + +// FileModified returns an attribute KeyValue conforming to the "file.modified" +// semantic conventions. It represents the time when the file content was last +// modified, in ISO 8601 format. +func FileModified(val string) attribute.KeyValue { + return FileModifiedKey.String(val) +} + +// FileName returns an attribute KeyValue conforming to the "file.name" semantic +// conventions. It represents the name of the file including the extension, +// without the directory. +func FileName(val string) attribute.KeyValue { + return FileNameKey.String(val) +} + +// FileOwnerID returns an attribute KeyValue conforming to the "file.owner.id" +// semantic conventions. It represents the user ID (UID) or security identifier +// (SID) of the file owner. +func FileOwnerID(val string) attribute.KeyValue { + return FileOwnerIDKey.String(val) +} + +// FileOwnerName returns an attribute KeyValue conforming to the +// "file.owner.name" semantic conventions. It represents the username of the file +// owner. +func FileOwnerName(val string) attribute.KeyValue { + return FileOwnerNameKey.String(val) +} + +// FilePath returns an attribute KeyValue conforming to the "file.path" semantic +// conventions. It represents the full path to the file, including the file name. +// It should include the drive letter, when appropriate. +func FilePath(val string) attribute.KeyValue { + return FilePathKey.String(val) +} + +// FileSize returns an attribute KeyValue conforming to the "file.size" semantic +// conventions. It represents the file size in bytes. +func FileSize(val int) attribute.KeyValue { + return FileSizeKey.Int(val) +} + +// FileSymbolicLinkTargetPath returns an attribute KeyValue conforming to the +// "file.symbolic_link.target_path" semantic conventions. It represents the path +// to the target of a symbolic link. +func FileSymbolicLinkTargetPath(val string) attribute.KeyValue { + return FileSymbolicLinkTargetPathKey.String(val) +} + +// Namespace: gcp +const ( + // GCPAppHubApplicationContainerKey is the attribute Key conforming to the + // "gcp.apphub.application.container" semantic conventions. It represents the + // container within GCP where the AppHub application is defined. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "projects/my-container-project" + GCPAppHubApplicationContainerKey = attribute.Key("gcp.apphub.application.container") + + // GCPAppHubApplicationIDKey is the attribute Key conforming to the + // "gcp.apphub.application.id" semantic conventions. It represents the name of + // the application as configured in AppHub. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-application" + GCPAppHubApplicationIDKey = attribute.Key("gcp.apphub.application.id") + + // GCPAppHubApplicationLocationKey is the attribute Key conforming to the + // "gcp.apphub.application.location" semantic conventions. It represents the GCP + // zone or region where the application is defined. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "us-central1" + GCPAppHubApplicationLocationKey = attribute.Key("gcp.apphub.application.location") + + // GCPAppHubServiceCriticalityTypeKey is the attribute Key conforming to the + // "gcp.apphub.service.criticality_type" semantic conventions. It represents the + // criticality of a service indicates its importance to the business. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: [See AppHub type enum] + // + // [See AppHub type enum]: https://cloud.google.com/app-hub/docs/reference/rest/v1/Attributes#type + GCPAppHubServiceCriticalityTypeKey = attribute.Key("gcp.apphub.service.criticality_type") + + // GCPAppHubServiceEnvironmentTypeKey is the attribute Key conforming to the + // "gcp.apphub.service.environment_type" semantic conventions. It represents the + // environment of a service is the stage of a software lifecycle. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: [See AppHub environment type] + // + // [See AppHub environment type]: https://cloud.google.com/app-hub/docs/reference/rest/v1/Attributes#type_1 + GCPAppHubServiceEnvironmentTypeKey = attribute.Key("gcp.apphub.service.environment_type") + + // GCPAppHubServiceIDKey is the attribute Key conforming to the + // "gcp.apphub.service.id" semantic conventions. It represents the name of the + // service as configured in AppHub. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-service" + GCPAppHubServiceIDKey = attribute.Key("gcp.apphub.service.id") + + // GCPAppHubWorkloadCriticalityTypeKey is the attribute Key conforming to the + // "gcp.apphub.workload.criticality_type" semantic conventions. It represents + // the criticality of a workload indicates its importance to the business. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: [See AppHub type enum] + // + // [See AppHub type enum]: https://cloud.google.com/app-hub/docs/reference/rest/v1/Attributes#type + GCPAppHubWorkloadCriticalityTypeKey = attribute.Key("gcp.apphub.workload.criticality_type") + + // GCPAppHubWorkloadEnvironmentTypeKey is the attribute Key conforming to the + // "gcp.apphub.workload.environment_type" semantic conventions. It represents + // the environment of a workload is the stage of a software lifecycle. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: [See AppHub environment type] + // + // [See AppHub environment type]: https://cloud.google.com/app-hub/docs/reference/rest/v1/Attributes#type_1 + GCPAppHubWorkloadEnvironmentTypeKey = attribute.Key("gcp.apphub.workload.environment_type") + + // GCPAppHubWorkloadIDKey is the attribute Key conforming to the + // "gcp.apphub.workload.id" semantic conventions. It represents the name of the + // workload as configured in AppHub. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-workload" + GCPAppHubWorkloadIDKey = attribute.Key("gcp.apphub.workload.id") + + // GCPClientServiceKey is the attribute Key conforming to the + // "gcp.client.service" semantic conventions. It represents the identifies the + // Google Cloud service for which the official client library is intended. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "appengine", "run", "firestore", "alloydb", "spanner" + // Note: Intended to be a stable identifier for Google Cloud client libraries + // that is uniform across implementation languages. The value should be derived + // from the canonical service domain for the service; for example, + // 'foo.googleapis.com' should result in a value of 'foo'. + GCPClientServiceKey = attribute.Key("gcp.client.service") + + // GCPCloudRunJobExecutionKey is the attribute Key conforming to the + // "gcp.cloud_run.job.execution" semantic conventions. It represents the name of + // the Cloud Run [execution] being run for the Job, as set by the + // [`CLOUD_RUN_EXECUTION`] environment variable. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "job-name-xxxx", "sample-job-mdw84" + // + // [execution]: https://cloud.google.com/run/docs/managing/job-executions + // [`CLOUD_RUN_EXECUTION`]: https://cloud.google.com/run/docs/container-contract#jobs-env-vars + GCPCloudRunJobExecutionKey = attribute.Key("gcp.cloud_run.job.execution") + + // GCPCloudRunJobTaskIndexKey is the attribute Key conforming to the + // "gcp.cloud_run.job.task_index" semantic conventions. It represents the index + // for a task within an execution as provided by the [`CLOUD_RUN_TASK_INDEX`] + // environment variable. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 0, 1 + // + // [`CLOUD_RUN_TASK_INDEX`]: https://cloud.google.com/run/docs/container-contract#jobs-env-vars + GCPCloudRunJobTaskIndexKey = attribute.Key("gcp.cloud_run.job.task_index") + + // GCPGCEInstanceHostnameKey is the attribute Key conforming to the + // "gcp.gce.instance.hostname" semantic conventions. It represents the hostname + // of a GCE instance. This is the full value of the default or [custom hostname] + // . + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-host1234.example.com", + // "sample-vm.us-west1-b.c.my-project.internal" + // + // [custom hostname]: https://cloud.google.com/compute/docs/instances/custom-hostname-vm + GCPGCEInstanceHostnameKey = attribute.Key("gcp.gce.instance.hostname") + + // GCPGCEInstanceNameKey is the attribute Key conforming to the + // "gcp.gce.instance.name" semantic conventions. It represents the instance name + // of a GCE instance. This is the value provided by `host.name`, the visible + // name of the instance in the Cloud Console UI, and the prefix for the default + // hostname of the instance as defined by the [default internal DNS name]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "instance-1", "my-vm-name" + // + // [default internal DNS name]: https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names + GCPGCEInstanceNameKey = attribute.Key("gcp.gce.instance.name") +) + +// GCPAppHubApplicationContainer returns an attribute KeyValue conforming to the +// "gcp.apphub.application.container" semantic conventions. It represents the +// container within GCP where the AppHub application is defined. +func GCPAppHubApplicationContainer(val string) attribute.KeyValue { + return GCPAppHubApplicationContainerKey.String(val) +} + +// GCPAppHubApplicationID returns an attribute KeyValue conforming to the +// "gcp.apphub.application.id" semantic conventions. It represents the name of +// the application as configured in AppHub. +func GCPAppHubApplicationID(val string) attribute.KeyValue { + return GCPAppHubApplicationIDKey.String(val) +} + +// GCPAppHubApplicationLocation returns an attribute KeyValue conforming to the +// "gcp.apphub.application.location" semantic conventions. It represents the GCP +// zone or region where the application is defined. +func GCPAppHubApplicationLocation(val string) attribute.KeyValue { + return GCPAppHubApplicationLocationKey.String(val) +} + +// GCPAppHubServiceID returns an attribute KeyValue conforming to the +// "gcp.apphub.service.id" semantic conventions. It represents the name of the +// service as configured in AppHub. +func GCPAppHubServiceID(val string) attribute.KeyValue { + return GCPAppHubServiceIDKey.String(val) +} + +// GCPAppHubWorkloadID returns an attribute KeyValue conforming to the +// "gcp.apphub.workload.id" semantic conventions. It represents the name of the +// workload as configured in AppHub. +func GCPAppHubWorkloadID(val string) attribute.KeyValue { + return GCPAppHubWorkloadIDKey.String(val) +} + +// GCPClientService returns an attribute KeyValue conforming to the +// "gcp.client.service" semantic conventions. It represents the identifies the +// Google Cloud service for which the official client library is intended. +func GCPClientService(val string) attribute.KeyValue { + return GCPClientServiceKey.String(val) +} + +// GCPCloudRunJobExecution returns an attribute KeyValue conforming to the +// "gcp.cloud_run.job.execution" semantic conventions. It represents the name of +// the Cloud Run [execution] being run for the Job, as set by the +// [`CLOUD_RUN_EXECUTION`] environment variable. +// +// [execution]: https://cloud.google.com/run/docs/managing/job-executions +// [`CLOUD_RUN_EXECUTION`]: https://cloud.google.com/run/docs/container-contract#jobs-env-vars +func GCPCloudRunJobExecution(val string) attribute.KeyValue { + return GCPCloudRunJobExecutionKey.String(val) +} + +// GCPCloudRunJobTaskIndex returns an attribute KeyValue conforming to the +// "gcp.cloud_run.job.task_index" semantic conventions. It represents the index +// for a task within an execution as provided by the [`CLOUD_RUN_TASK_INDEX`] +// environment variable. +// +// [`CLOUD_RUN_TASK_INDEX`]: https://cloud.google.com/run/docs/container-contract#jobs-env-vars +func GCPCloudRunJobTaskIndex(val int) attribute.KeyValue { + return GCPCloudRunJobTaskIndexKey.Int(val) +} + +// GCPGCEInstanceHostname returns an attribute KeyValue conforming to the +// "gcp.gce.instance.hostname" semantic conventions. It represents the hostname +// of a GCE instance. This is the full value of the default or [custom hostname] +// . +// +// [custom hostname]: https://cloud.google.com/compute/docs/instances/custom-hostname-vm +func GCPGCEInstanceHostname(val string) attribute.KeyValue { + return GCPGCEInstanceHostnameKey.String(val) +} + +// GCPGCEInstanceName returns an attribute KeyValue conforming to the +// "gcp.gce.instance.name" semantic conventions. It represents the instance name +// of a GCE instance. This is the value provided by `host.name`, the visible name +// of the instance in the Cloud Console UI, and the prefix for the default +// hostname of the instance as defined by the [default internal DNS name]. +// +// [default internal DNS name]: https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names +func GCPGCEInstanceName(val string) attribute.KeyValue { + return GCPGCEInstanceNameKey.String(val) +} + +// Enum values for gcp.apphub.service.criticality_type +var ( + // Mission critical service. + // Stability: development + GCPAppHubServiceCriticalityTypeMissionCritical = GCPAppHubServiceCriticalityTypeKey.String("MISSION_CRITICAL") + // High impact. + // Stability: development + GCPAppHubServiceCriticalityTypeHigh = GCPAppHubServiceCriticalityTypeKey.String("HIGH") + // Medium impact. + // Stability: development + GCPAppHubServiceCriticalityTypeMedium = GCPAppHubServiceCriticalityTypeKey.String("MEDIUM") + // Low impact. + // Stability: development + GCPAppHubServiceCriticalityTypeLow = GCPAppHubServiceCriticalityTypeKey.String("LOW") +) + +// Enum values for gcp.apphub.service.environment_type +var ( + // Production environment. + // Stability: development + GCPAppHubServiceEnvironmentTypeProduction = GCPAppHubServiceEnvironmentTypeKey.String("PRODUCTION") + // Staging environment. + // Stability: development + GCPAppHubServiceEnvironmentTypeStaging = GCPAppHubServiceEnvironmentTypeKey.String("STAGING") + // Test environment. + // Stability: development + GCPAppHubServiceEnvironmentTypeTest = GCPAppHubServiceEnvironmentTypeKey.String("TEST") + // Development environment. + // Stability: development + GCPAppHubServiceEnvironmentTypeDevelopment = GCPAppHubServiceEnvironmentTypeKey.String("DEVELOPMENT") +) + +// Enum values for gcp.apphub.workload.criticality_type +var ( + // Mission critical service. + // Stability: development + GCPAppHubWorkloadCriticalityTypeMissionCritical = GCPAppHubWorkloadCriticalityTypeKey.String("MISSION_CRITICAL") + // High impact. + // Stability: development + GCPAppHubWorkloadCriticalityTypeHigh = GCPAppHubWorkloadCriticalityTypeKey.String("HIGH") + // Medium impact. + // Stability: development + GCPAppHubWorkloadCriticalityTypeMedium = GCPAppHubWorkloadCriticalityTypeKey.String("MEDIUM") + // Low impact. + // Stability: development + GCPAppHubWorkloadCriticalityTypeLow = GCPAppHubWorkloadCriticalityTypeKey.String("LOW") +) + +// Enum values for gcp.apphub.workload.environment_type +var ( + // Production environment. + // Stability: development + GCPAppHubWorkloadEnvironmentTypeProduction = GCPAppHubWorkloadEnvironmentTypeKey.String("PRODUCTION") + // Staging environment. + // Stability: development + GCPAppHubWorkloadEnvironmentTypeStaging = GCPAppHubWorkloadEnvironmentTypeKey.String("STAGING") + // Test environment. + // Stability: development + GCPAppHubWorkloadEnvironmentTypeTest = GCPAppHubWorkloadEnvironmentTypeKey.String("TEST") + // Development environment. + // Stability: development + GCPAppHubWorkloadEnvironmentTypeDevelopment = GCPAppHubWorkloadEnvironmentTypeKey.String("DEVELOPMENT") +) + +// Namespace: gen_ai +const ( + // GenAIAgentDescriptionKey is the attribute Key conforming to the + // "gen_ai.agent.description" semantic conventions. It represents the free-form + // description of the GenAI agent provided by the application. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Helps with math problems", "Generates fiction stories" + GenAIAgentDescriptionKey = attribute.Key("gen_ai.agent.description") + + // GenAIAgentIDKey is the attribute Key conforming to the "gen_ai.agent.id" + // semantic conventions. It represents the unique identifier of the GenAI agent. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "asst_5j66UpCpwteGg4YSxUnt7lPY" + GenAIAgentIDKey = attribute.Key("gen_ai.agent.id") + + // GenAIAgentNameKey is the attribute Key conforming to the "gen_ai.agent.name" + // semantic conventions. It represents the human-readable name of the GenAI + // agent provided by the application. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Math Tutor", "Fiction Writer" + GenAIAgentNameKey = attribute.Key("gen_ai.agent.name") + + // GenAIConversationIDKey is the attribute Key conforming to the + // "gen_ai.conversation.id" semantic conventions. It represents the unique + // identifier for a conversation (session, thread), used to store and correlate + // messages within this conversation. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "conv_5j66UpCpwteGg4YSxUnt7lPY" + GenAIConversationIDKey = attribute.Key("gen_ai.conversation.id") + + // GenAIDataSourceIDKey is the attribute Key conforming to the + // "gen_ai.data_source.id" semantic conventions. It represents the data source + // identifier. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "H7STPQYOND" + // Note: Data sources are used by AI agents and RAG applications to store + // grounding data. A data source may be an external database, object store, + // document collection, website, or any other storage system used by the GenAI + // agent or application. The `gen_ai.data_source.id` SHOULD match the identifier + // used by the GenAI system rather than a name specific to the external storage, + // such as a database or object store. Semantic conventions referencing + // `gen_ai.data_source.id` MAY also leverage additional attributes, such as + // `db.*`, to further identify and describe the data source. + GenAIDataSourceIDKey = attribute.Key("gen_ai.data_source.id") + + // GenAIInputMessagesKey is the attribute Key conforming to the + // "gen_ai.input.messages" semantic conventions. It represents the chat history + // provided to the model as an input. + // + // Type: any + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "[\n {\n "role": "user",\n "parts": [\n {\n "type": "text",\n + // "content": "Weather in Paris?"\n }\n ]\n },\n {\n "role": "assistant",\n + // "parts": [\n {\n "type": "tool_call",\n "id": + // "call_VSPygqKTWdrhaFErNvMV18Yl",\n "name": "get_weather",\n "arguments": {\n + // "location": "Paris"\n }\n }\n ]\n },\n {\n "role": "tool",\n "parts": [\n {\n + // "type": "tool_call_response",\n "id": " call_VSPygqKTWdrhaFErNvMV18Yl",\n + // "result": "rainy, 57°F"\n }\n ]\n }\n]\n" + // Note: Instrumentations MUST follow [Input messages JSON schema]. + // When the attribute is recorded on events, it MUST be recorded in structured + // form. When recorded on spans, it MAY be recorded as a JSON string if + // structured + // format is not supported and SHOULD be recorded in structured form otherwise. + // + // Messages MUST be provided in the order they were sent to the model. + // Instrumentations MAY provide a way for users to filter or truncate + // input messages. + // + // > [!Warning] + // > This attribute is likely to contain sensitive information including + // > user/PII data. + // + // See [Recording content on attributes] + // section for more details. + // + // [Input messages JSON schema]: /docs/gen-ai/gen-ai-input-messages.json + // [Recording content on attributes]: /docs/gen-ai/gen-ai-spans.md#recording-content-on-attributes + GenAIInputMessagesKey = attribute.Key("gen_ai.input.messages") + + // GenAIOperationNameKey is the attribute Key conforming to the + // "gen_ai.operation.name" semantic conventions. It represents the name of the + // operation being performed. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: If one of the predefined values applies, but specific system uses a + // different name it's RECOMMENDED to document it in the semantic conventions + // for specific GenAI system and use system-specific name in the + // instrumentation. If a different name is not documented, instrumentation + // libraries SHOULD use applicable predefined value. + GenAIOperationNameKey = attribute.Key("gen_ai.operation.name") + + // GenAIOutputMessagesKey is the attribute Key conforming to the + // "gen_ai.output.messages" semantic conventions. It represents the messages + // returned by the model where each message represents a specific model response + // (choice, candidate). + // + // Type: any + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "[\n {\n "role": "assistant",\n "parts": [\n {\n "type": "text",\n + // "content": "The weather in Paris is currently rainy with a temperature of + // 57°F."\n }\n ],\n "finish_reason": "stop"\n }\n]\n" + // Note: Instrumentations MUST follow [Output messages JSON schema] + // + // Each message represents a single output choice/candidate generated by + // the model. Each message corresponds to exactly one generation + // (choice/candidate) and vice versa - one choice cannot be split across + // multiple messages or one message cannot contain parts from multiple choices. + // + // When the attribute is recorded on events, it MUST be recorded in structured + // form. When recorded on spans, it MAY be recorded as a JSON string if + // structured + // format is not supported and SHOULD be recorded in structured form otherwise. + // + // Instrumentations MAY provide a way for users to filter or truncate + // output messages. + // + // > [!Warning] + // > This attribute is likely to contain sensitive information including + // > user/PII data. + // + // See [Recording content on attributes] + // section for more details. + // + // [Output messages JSON schema]: /docs/gen-ai/gen-ai-output-messages.json + // [Recording content on attributes]: /docs/gen-ai/gen-ai-spans.md#recording-content-on-attributes + GenAIOutputMessagesKey = attribute.Key("gen_ai.output.messages") + + // GenAIOutputTypeKey is the attribute Key conforming to the + // "gen_ai.output.type" semantic conventions. It represents the represents the + // content type requested by the client. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: This attribute SHOULD be used when the client requests output of a + // specific type. The model may return zero or more outputs of this type. + // This attribute specifies the output modality and not the actual output + // format. For example, if an image is requested, the actual output could be a + // URL pointing to an image file. + // Additional output format details may be recorded in the future in the + // `gen_ai.output.{type}.*` attributes. + GenAIOutputTypeKey = attribute.Key("gen_ai.output.type") + + // GenAIProviderNameKey is the attribute Key conforming to the + // "gen_ai.provider.name" semantic conventions. It represents the Generative AI + // provider as identified by the client or server instrumentation. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: The attribute SHOULD be set based on the instrumentation's best + // knowledge and may differ from the actual model provider. + // + // Multiple providers, including Azure OpenAI, Gemini, and AI hosting platforms + // are accessible using the OpenAI REST API and corresponding client libraries, + // but may proxy or host models from different providers. + // + // The `gen_ai.request.model`, `gen_ai.response.model`, and `server.address` + // attributes may help identify the actual system in use. + // + // The `gen_ai.provider.name` attribute acts as a discriminator that + // identifies the GenAI telemetry format flavor specific to that provider + // within GenAI semantic conventions. + // It SHOULD be set consistently with provider-specific attributes and signals. + // For example, GenAI spans, metrics, and events related to AWS Bedrock + // should have the `gen_ai.provider.name` set to `aws.bedrock` and include + // applicable `aws.bedrock.*` attributes and are not expected to include + // `openai.*` attributes. + GenAIProviderNameKey = attribute.Key("gen_ai.provider.name") + + // GenAIRequestChoiceCountKey is the attribute Key conforming to the + // "gen_ai.request.choice.count" semantic conventions. It represents the target + // number of candidate completions to return. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 3 + GenAIRequestChoiceCountKey = attribute.Key("gen_ai.request.choice.count") + + // GenAIRequestEncodingFormatsKey is the attribute Key conforming to the + // "gen_ai.request.encoding_formats" semantic conventions. It represents the + // encoding formats requested in an embeddings operation, if specified. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "base64"], ["float", "binary" + // Note: In some GenAI systems the encoding formats are called embedding types. + // Also, some GenAI systems only accept a single format per request. + GenAIRequestEncodingFormatsKey = attribute.Key("gen_ai.request.encoding_formats") + + // GenAIRequestFrequencyPenaltyKey is the attribute Key conforming to the + // "gen_ai.request.frequency_penalty" semantic conventions. It represents the + // frequency penalty setting for the GenAI request. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 0.1 + GenAIRequestFrequencyPenaltyKey = attribute.Key("gen_ai.request.frequency_penalty") + + // GenAIRequestMaxTokensKey is the attribute Key conforming to the + // "gen_ai.request.max_tokens" semantic conventions. It represents the maximum + // number of tokens the model generates for a request. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 100 + GenAIRequestMaxTokensKey = attribute.Key("gen_ai.request.max_tokens") + + // GenAIRequestModelKey is the attribute Key conforming to the + // "gen_ai.request.model" semantic conventions. It represents the name of the + // GenAI model a request is being made to. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: gpt-4 + GenAIRequestModelKey = attribute.Key("gen_ai.request.model") + + // GenAIRequestPresencePenaltyKey is the attribute Key conforming to the + // "gen_ai.request.presence_penalty" semantic conventions. It represents the + // presence penalty setting for the GenAI request. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 0.1 + GenAIRequestPresencePenaltyKey = attribute.Key("gen_ai.request.presence_penalty") + + // GenAIRequestSeedKey is the attribute Key conforming to the + // "gen_ai.request.seed" semantic conventions. It represents the requests with + // same seed value more likely to return same result. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 100 + GenAIRequestSeedKey = attribute.Key("gen_ai.request.seed") + + // GenAIRequestStopSequencesKey is the attribute Key conforming to the + // "gen_ai.request.stop_sequences" semantic conventions. It represents the list + // of sequences that the model will use to stop generating further tokens. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "forest", "lived" + GenAIRequestStopSequencesKey = attribute.Key("gen_ai.request.stop_sequences") + + // GenAIRequestTemperatureKey is the attribute Key conforming to the + // "gen_ai.request.temperature" semantic conventions. It represents the + // temperature setting for the GenAI request. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 0.0 + GenAIRequestTemperatureKey = attribute.Key("gen_ai.request.temperature") + + // GenAIRequestTopKKey is the attribute Key conforming to the + // "gen_ai.request.top_k" semantic conventions. It represents the top_k sampling + // setting for the GenAI request. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1.0 + GenAIRequestTopKKey = attribute.Key("gen_ai.request.top_k") + + // GenAIRequestTopPKey is the attribute Key conforming to the + // "gen_ai.request.top_p" semantic conventions. It represents the top_p sampling + // setting for the GenAI request. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1.0 + GenAIRequestTopPKey = attribute.Key("gen_ai.request.top_p") + + // GenAIResponseFinishReasonsKey is the attribute Key conforming to the + // "gen_ai.response.finish_reasons" semantic conventions. It represents the + // array of reasons the model stopped generating tokens, corresponding to each + // generation received. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "stop"], ["stop", "length" + GenAIResponseFinishReasonsKey = attribute.Key("gen_ai.response.finish_reasons") + + // GenAIResponseIDKey is the attribute Key conforming to the + // "gen_ai.response.id" semantic conventions. It represents the unique + // identifier for the completion. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "chatcmpl-123" + GenAIResponseIDKey = attribute.Key("gen_ai.response.id") + + // GenAIResponseModelKey is the attribute Key conforming to the + // "gen_ai.response.model" semantic conventions. It represents the name of the + // model that generated the response. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "gpt-4-0613" + GenAIResponseModelKey = attribute.Key("gen_ai.response.model") + + // GenAISystemInstructionsKey is the attribute Key conforming to the + // "gen_ai.system_instructions" semantic conventions. It represents the system + // message or instructions provided to the GenAI model separately from the chat + // history. + // + // Type: any + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "[\n {\n "type": "text",\n "content": "You are an Agent that greet + // users, always use greetings tool to respond"\n }\n]\n", "[\n {\n "type": + // "text",\n "content": "You are a language translator."\n },\n {\n "type": + // "text",\n "content": "Your mission is to translate text in English to + // French."\n }\n]\n" + // Note: This attribute SHOULD be used when the corresponding provider or API + // allows to provide system instructions or messages separately from the + // chat history. + // + // Instructions that are part of the chat history SHOULD be recorded in + // `gen_ai.input.messages` attribute instead. + // + // Instrumentations MUST follow [System instructions JSON schema]. + // + // When recorded on spans, it MAY be recorded as a JSON string if structured + // format is not supported and SHOULD be recorded in structured form otherwise. + // + // Instrumentations MAY provide a way for users to filter or truncate + // system instructions. + // + // > [!Warning] + // > This attribute may contain sensitive information. + // + // See [Recording content on attributes] + // section for more details. + // + // [System instructions JSON schema]: /docs/gen-ai/gen-ai-system-instructions.json + // [Recording content on attributes]: /docs/gen-ai/gen-ai-spans.md#recording-content-on-attributes + GenAISystemInstructionsKey = attribute.Key("gen_ai.system_instructions") + + // GenAITokenTypeKey is the attribute Key conforming to the "gen_ai.token.type" + // semantic conventions. It represents the type of token being counted. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "input", "output" + GenAITokenTypeKey = attribute.Key("gen_ai.token.type") + + // GenAIToolCallIDKey is the attribute Key conforming to the + // "gen_ai.tool.call.id" semantic conventions. It represents the tool call + // identifier. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "call_mszuSIzqtI65i1wAUOE8w5H4" + GenAIToolCallIDKey = attribute.Key("gen_ai.tool.call.id") + + // GenAIToolDescriptionKey is the attribute Key conforming to the + // "gen_ai.tool.description" semantic conventions. It represents the tool + // description. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Multiply two numbers" + GenAIToolDescriptionKey = attribute.Key("gen_ai.tool.description") + + // GenAIToolNameKey is the attribute Key conforming to the "gen_ai.tool.name" + // semantic conventions. It represents the name of the tool utilized by the + // agent. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Flights" + GenAIToolNameKey = attribute.Key("gen_ai.tool.name") + + // GenAIToolTypeKey is the attribute Key conforming to the "gen_ai.tool.type" + // semantic conventions. It represents the type of the tool utilized by the + // agent. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "function", "extension", "datastore" + // Note: Extension: A tool executed on the agent-side to directly call external + // APIs, bridging the gap between the agent and real-world systems. + // Agent-side operations involve actions that are performed by the agent on the + // server or within the agent's controlled environment. + // Function: A tool executed on the client-side, where the agent generates + // parameters for a predefined function, and the client executes the logic. + // Client-side operations are actions taken on the user's end or within the + // client application. + // Datastore: A tool used by the agent to access and query structured or + // unstructured external data for retrieval-augmented tasks or knowledge + // updates. + GenAIToolTypeKey = attribute.Key("gen_ai.tool.type") + + // GenAIUsageInputTokensKey is the attribute Key conforming to the + // "gen_ai.usage.input_tokens" semantic conventions. It represents the number of + // tokens used in the GenAI input (prompt). + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 100 + GenAIUsageInputTokensKey = attribute.Key("gen_ai.usage.input_tokens") + + // GenAIUsageOutputTokensKey is the attribute Key conforming to the + // "gen_ai.usage.output_tokens" semantic conventions. It represents the number + // of tokens used in the GenAI response (completion). + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 180 + GenAIUsageOutputTokensKey = attribute.Key("gen_ai.usage.output_tokens") +) + +// GenAIAgentDescription returns an attribute KeyValue conforming to the +// "gen_ai.agent.description" semantic conventions. It represents the free-form +// description of the GenAI agent provided by the application. +func GenAIAgentDescription(val string) attribute.KeyValue { + return GenAIAgentDescriptionKey.String(val) +} + +// GenAIAgentID returns an attribute KeyValue conforming to the "gen_ai.agent.id" +// semantic conventions. It represents the unique identifier of the GenAI agent. +func GenAIAgentID(val string) attribute.KeyValue { + return GenAIAgentIDKey.String(val) +} + +// GenAIAgentName returns an attribute KeyValue conforming to the +// "gen_ai.agent.name" semantic conventions. It represents the human-readable +// name of the GenAI agent provided by the application. +func GenAIAgentName(val string) attribute.KeyValue { + return GenAIAgentNameKey.String(val) +} + +// GenAIConversationID returns an attribute KeyValue conforming to the +// "gen_ai.conversation.id" semantic conventions. It represents the unique +// identifier for a conversation (session, thread), used to store and correlate +// messages within this conversation. +func GenAIConversationID(val string) attribute.KeyValue { + return GenAIConversationIDKey.String(val) +} + +// GenAIDataSourceID returns an attribute KeyValue conforming to the +// "gen_ai.data_source.id" semantic conventions. It represents the data source +// identifier. +func GenAIDataSourceID(val string) attribute.KeyValue { + return GenAIDataSourceIDKey.String(val) +} + +// GenAIRequestChoiceCount returns an attribute KeyValue conforming to the +// "gen_ai.request.choice.count" semantic conventions. It represents the target +// number of candidate completions to return. +func GenAIRequestChoiceCount(val int) attribute.KeyValue { + return GenAIRequestChoiceCountKey.Int(val) +} + +// GenAIRequestEncodingFormats returns an attribute KeyValue conforming to the +// "gen_ai.request.encoding_formats" semantic conventions. It represents the +// encoding formats requested in an embeddings operation, if specified. +func GenAIRequestEncodingFormats(val ...string) attribute.KeyValue { + return GenAIRequestEncodingFormatsKey.StringSlice(val) +} + +// GenAIRequestFrequencyPenalty returns an attribute KeyValue conforming to the +// "gen_ai.request.frequency_penalty" semantic conventions. It represents the +// frequency penalty setting for the GenAI request. +func GenAIRequestFrequencyPenalty(val float64) attribute.KeyValue { + return GenAIRequestFrequencyPenaltyKey.Float64(val) +} + +// GenAIRequestMaxTokens returns an attribute KeyValue conforming to the +// "gen_ai.request.max_tokens" semantic conventions. It represents the maximum +// number of tokens the model generates for a request. +func GenAIRequestMaxTokens(val int) attribute.KeyValue { + return GenAIRequestMaxTokensKey.Int(val) +} + +// GenAIRequestModel returns an attribute KeyValue conforming to the +// "gen_ai.request.model" semantic conventions. It represents the name of the +// GenAI model a request is being made to. +func GenAIRequestModel(val string) attribute.KeyValue { + return GenAIRequestModelKey.String(val) +} + +// GenAIRequestPresencePenalty returns an attribute KeyValue conforming to the +// "gen_ai.request.presence_penalty" semantic conventions. It represents the +// presence penalty setting for the GenAI request. +func GenAIRequestPresencePenalty(val float64) attribute.KeyValue { + return GenAIRequestPresencePenaltyKey.Float64(val) +} + +// GenAIRequestSeed returns an attribute KeyValue conforming to the +// "gen_ai.request.seed" semantic conventions. It represents the requests with +// same seed value more likely to return same result. +func GenAIRequestSeed(val int) attribute.KeyValue { + return GenAIRequestSeedKey.Int(val) +} + +// GenAIRequestStopSequences returns an attribute KeyValue conforming to the +// "gen_ai.request.stop_sequences" semantic conventions. It represents the list +// of sequences that the model will use to stop generating further tokens. +func GenAIRequestStopSequences(val ...string) attribute.KeyValue { + return GenAIRequestStopSequencesKey.StringSlice(val) +} + +// GenAIRequestTemperature returns an attribute KeyValue conforming to the +// "gen_ai.request.temperature" semantic conventions. It represents the +// temperature setting for the GenAI request. +func GenAIRequestTemperature(val float64) attribute.KeyValue { + return GenAIRequestTemperatureKey.Float64(val) +} + +// GenAIRequestTopK returns an attribute KeyValue conforming to the +// "gen_ai.request.top_k" semantic conventions. It represents the top_k sampling +// setting for the GenAI request. +func GenAIRequestTopK(val float64) attribute.KeyValue { + return GenAIRequestTopKKey.Float64(val) +} + +// GenAIRequestTopP returns an attribute KeyValue conforming to the +// "gen_ai.request.top_p" semantic conventions. It represents the top_p sampling +// setting for the GenAI request. +func GenAIRequestTopP(val float64) attribute.KeyValue { + return GenAIRequestTopPKey.Float64(val) +} + +// GenAIResponseFinishReasons returns an attribute KeyValue conforming to the +// "gen_ai.response.finish_reasons" semantic conventions. It represents the array +// of reasons the model stopped generating tokens, corresponding to each +// generation received. +func GenAIResponseFinishReasons(val ...string) attribute.KeyValue { + return GenAIResponseFinishReasonsKey.StringSlice(val) +} + +// GenAIResponseID returns an attribute KeyValue conforming to the +// "gen_ai.response.id" semantic conventions. It represents the unique identifier +// for the completion. +func GenAIResponseID(val string) attribute.KeyValue { + return GenAIResponseIDKey.String(val) +} + +// GenAIResponseModel returns an attribute KeyValue conforming to the +// "gen_ai.response.model" semantic conventions. It represents the name of the +// model that generated the response. +func GenAIResponseModel(val string) attribute.KeyValue { + return GenAIResponseModelKey.String(val) +} + +// GenAIToolCallID returns an attribute KeyValue conforming to the +// "gen_ai.tool.call.id" semantic conventions. It represents the tool call +// identifier. +func GenAIToolCallID(val string) attribute.KeyValue { + return GenAIToolCallIDKey.String(val) +} + +// GenAIToolDescription returns an attribute KeyValue conforming to the +// "gen_ai.tool.description" semantic conventions. It represents the tool +// description. +func GenAIToolDescription(val string) attribute.KeyValue { + return GenAIToolDescriptionKey.String(val) +} + +// GenAIToolName returns an attribute KeyValue conforming to the +// "gen_ai.tool.name" semantic conventions. It represents the name of the tool +// utilized by the agent. +func GenAIToolName(val string) attribute.KeyValue { + return GenAIToolNameKey.String(val) +} + +// GenAIToolType returns an attribute KeyValue conforming to the +// "gen_ai.tool.type" semantic conventions. It represents the type of the tool +// utilized by the agent. +func GenAIToolType(val string) attribute.KeyValue { + return GenAIToolTypeKey.String(val) +} + +// GenAIUsageInputTokens returns an attribute KeyValue conforming to the +// "gen_ai.usage.input_tokens" semantic conventions. It represents the number of +// tokens used in the GenAI input (prompt). +func GenAIUsageInputTokens(val int) attribute.KeyValue { + return GenAIUsageInputTokensKey.Int(val) +} + +// GenAIUsageOutputTokens returns an attribute KeyValue conforming to the +// "gen_ai.usage.output_tokens" semantic conventions. It represents the number of +// tokens used in the GenAI response (completion). +func GenAIUsageOutputTokens(val int) attribute.KeyValue { + return GenAIUsageOutputTokensKey.Int(val) +} + +// Enum values for gen_ai.operation.name +var ( + // Chat completion operation such as [OpenAI Chat API] + // Stability: development + // + // [OpenAI Chat API]: https://platform.openai.com/docs/api-reference/chat + GenAIOperationNameChat = GenAIOperationNameKey.String("chat") + // Multimodal content generation operation such as [Gemini Generate Content] + // Stability: development + // + // [Gemini Generate Content]: https://ai.google.dev/api/generate-content + GenAIOperationNameGenerateContent = GenAIOperationNameKey.String("generate_content") + // Text completions operation such as [OpenAI Completions API (Legacy)] + // Stability: development + // + // [OpenAI Completions API (Legacy)]: https://platform.openai.com/docs/api-reference/completions + GenAIOperationNameTextCompletion = GenAIOperationNameKey.String("text_completion") + // Embeddings operation such as [OpenAI Create embeddings API] + // Stability: development + // + // [OpenAI Create embeddings API]: https://platform.openai.com/docs/api-reference/embeddings/create + GenAIOperationNameEmbeddings = GenAIOperationNameKey.String("embeddings") + // Create GenAI agent + // Stability: development + GenAIOperationNameCreateAgent = GenAIOperationNameKey.String("create_agent") + // Invoke GenAI agent + // Stability: development + GenAIOperationNameInvokeAgent = GenAIOperationNameKey.String("invoke_agent") + // Execute a tool + // Stability: development + GenAIOperationNameExecuteTool = GenAIOperationNameKey.String("execute_tool") +) + +// Enum values for gen_ai.output.type +var ( + // Plain text + // Stability: development + GenAIOutputTypeText = GenAIOutputTypeKey.String("text") + // JSON object with known or unknown schema + // Stability: development + GenAIOutputTypeJSON = GenAIOutputTypeKey.String("json") + // Image + // Stability: development + GenAIOutputTypeImage = GenAIOutputTypeKey.String("image") + // Speech + // Stability: development + GenAIOutputTypeSpeech = GenAIOutputTypeKey.String("speech") +) + +// Enum values for gen_ai.provider.name +var ( + // [OpenAI] + // Stability: development + // + // [OpenAI]: https://openai.com/ + GenAIProviderNameOpenAI = GenAIProviderNameKey.String("openai") + // Any Google generative AI endpoint + // Stability: development + GenAIProviderNameGCPGenAI = GenAIProviderNameKey.String("gcp.gen_ai") + // [Vertex AI] + // Stability: development + // + // [Vertex AI]: https://cloud.google.com/vertex-ai + GenAIProviderNameGCPVertexAI = GenAIProviderNameKey.String("gcp.vertex_ai") + // [Gemini] + // Stability: development + // + // [Gemini]: https://cloud.google.com/products/gemini + GenAIProviderNameGCPGemini = GenAIProviderNameKey.String("gcp.gemini") + // [Anthropic] + // Stability: development + // + // [Anthropic]: https://www.anthropic.com/ + GenAIProviderNameAnthropic = GenAIProviderNameKey.String("anthropic") + // [Cohere] + // Stability: development + // + // [Cohere]: https://cohere.com/ + GenAIProviderNameCohere = GenAIProviderNameKey.String("cohere") + // Azure AI Inference + // Stability: development + GenAIProviderNameAzureAIInference = GenAIProviderNameKey.String("azure.ai.inference") + // [Azure OpenAI] + // Stability: development + // + // [Azure OpenAI]: https://azure.microsoft.com/products/ai-services/openai-service/ + GenAIProviderNameAzureAIOpenAI = GenAIProviderNameKey.String("azure.ai.openai") + // [IBM Watsonx AI] + // Stability: development + // + // [IBM Watsonx AI]: https://www.ibm.com/products/watsonx-ai + GenAIProviderNameIBMWatsonxAI = GenAIProviderNameKey.String("ibm.watsonx.ai") + // [AWS Bedrock] + // Stability: development + // + // [AWS Bedrock]: https://aws.amazon.com/bedrock + GenAIProviderNameAWSBedrock = GenAIProviderNameKey.String("aws.bedrock") + // [Perplexity] + // Stability: development + // + // [Perplexity]: https://www.perplexity.ai/ + GenAIProviderNamePerplexity = GenAIProviderNameKey.String("perplexity") + // [xAI] + // Stability: development + // + // [xAI]: https://x.ai/ + GenAIProviderNameXAI = GenAIProviderNameKey.String("x_ai") + // [DeepSeek] + // Stability: development + // + // [DeepSeek]: https://www.deepseek.com/ + GenAIProviderNameDeepseek = GenAIProviderNameKey.String("deepseek") + // [Groq] + // Stability: development + // + // [Groq]: https://groq.com/ + GenAIProviderNameGroq = GenAIProviderNameKey.String("groq") + // [Mistral AI] + // Stability: development + // + // [Mistral AI]: https://mistral.ai/ + GenAIProviderNameMistralAI = GenAIProviderNameKey.String("mistral_ai") +) + +// Enum values for gen_ai.token.type +var ( + // Input tokens (prompt, input, etc.) + // Stability: development + GenAITokenTypeInput = GenAITokenTypeKey.String("input") + // Output tokens (completion, response, etc.) + // Stability: development + GenAITokenTypeOutput = GenAITokenTypeKey.String("output") +) + +// Namespace: geo +const ( + // GeoContinentCodeKey is the attribute Key conforming to the + // "geo.continent.code" semantic conventions. It represents the two-letter code + // representing continent’s name. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + GeoContinentCodeKey = attribute.Key("geo.continent.code") + + // GeoCountryISOCodeKey is the attribute Key conforming to the + // "geo.country.iso_code" semantic conventions. It represents the two-letter ISO + // Country Code ([ISO 3166-1 alpha2]). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "CA" + // + // [ISO 3166-1 alpha2]: https://wikipedia.org/wiki/ISO_3166-1#Codes + GeoCountryISOCodeKey = attribute.Key("geo.country.iso_code") + + // GeoLocalityNameKey is the attribute Key conforming to the "geo.locality.name" + // semantic conventions. It represents the locality name. Represents the name of + // a city, town, village, or similar populated place. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Montreal", "Berlin" + GeoLocalityNameKey = attribute.Key("geo.locality.name") + + // GeoLocationLatKey is the attribute Key conforming to the "geo.location.lat" + // semantic conventions. It represents the latitude of the geo location in + // [WGS84]. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 45.505918 + // + // [WGS84]: https://wikipedia.org/wiki/World_Geodetic_System#WGS84 + GeoLocationLatKey = attribute.Key("geo.location.lat") + + // GeoLocationLonKey is the attribute Key conforming to the "geo.location.lon" + // semantic conventions. It represents the longitude of the geo location in + // [WGS84]. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: -73.61483 + // + // [WGS84]: https://wikipedia.org/wiki/World_Geodetic_System#WGS84 + GeoLocationLonKey = attribute.Key("geo.location.lon") + + // GeoPostalCodeKey is the attribute Key conforming to the "geo.postal_code" + // semantic conventions. It represents the postal code associated with the + // location. Values appropriate for this field may also be known as a postcode + // or ZIP code and will vary widely from country to country. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "94040" + GeoPostalCodeKey = attribute.Key("geo.postal_code") + + // GeoRegionISOCodeKey is the attribute Key conforming to the + // "geo.region.iso_code" semantic conventions. It represents the region ISO code + // ([ISO 3166-2]). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "CA-QC" + // + // [ISO 3166-2]: https://wikipedia.org/wiki/ISO_3166-2 + GeoRegionISOCodeKey = attribute.Key("geo.region.iso_code") +) + +// GeoCountryISOCode returns an attribute KeyValue conforming to the +// "geo.country.iso_code" semantic conventions. It represents the two-letter ISO +// Country Code ([ISO 3166-1 alpha2]). +// +// [ISO 3166-1 alpha2]: https://wikipedia.org/wiki/ISO_3166-1#Codes +func GeoCountryISOCode(val string) attribute.KeyValue { + return GeoCountryISOCodeKey.String(val) +} + +// GeoLocalityName returns an attribute KeyValue conforming to the +// "geo.locality.name" semantic conventions. It represents the locality name. +// Represents the name of a city, town, village, or similar populated place. +func GeoLocalityName(val string) attribute.KeyValue { + return GeoLocalityNameKey.String(val) +} + +// GeoLocationLat returns an attribute KeyValue conforming to the +// "geo.location.lat" semantic conventions. It represents the latitude of the geo +// location in [WGS84]. +// +// [WGS84]: https://wikipedia.org/wiki/World_Geodetic_System#WGS84 +func GeoLocationLat(val float64) attribute.KeyValue { + return GeoLocationLatKey.Float64(val) +} + +// GeoLocationLon returns an attribute KeyValue conforming to the +// "geo.location.lon" semantic conventions. It represents the longitude of the +// geo location in [WGS84]. +// +// [WGS84]: https://wikipedia.org/wiki/World_Geodetic_System#WGS84 +func GeoLocationLon(val float64) attribute.KeyValue { + return GeoLocationLonKey.Float64(val) +} + +// GeoPostalCode returns an attribute KeyValue conforming to the +// "geo.postal_code" semantic conventions. It represents the postal code +// associated with the location. Values appropriate for this field may also be +// known as a postcode or ZIP code and will vary widely from country to country. +func GeoPostalCode(val string) attribute.KeyValue { + return GeoPostalCodeKey.String(val) +} + +// GeoRegionISOCode returns an attribute KeyValue conforming to the +// "geo.region.iso_code" semantic conventions. It represents the region ISO code +// ([ISO 3166-2]). +// +// [ISO 3166-2]: https://wikipedia.org/wiki/ISO_3166-2 +func GeoRegionISOCode(val string) attribute.KeyValue { + return GeoRegionISOCodeKey.String(val) +} + +// Enum values for geo.continent.code +var ( + // Africa + // Stability: development + GeoContinentCodeAf = GeoContinentCodeKey.String("AF") + // Antarctica + // Stability: development + GeoContinentCodeAn = GeoContinentCodeKey.String("AN") + // Asia + // Stability: development + GeoContinentCodeAs = GeoContinentCodeKey.String("AS") + // Europe + // Stability: development + GeoContinentCodeEu = GeoContinentCodeKey.String("EU") + // North America + // Stability: development + GeoContinentCodeNa = GeoContinentCodeKey.String("NA") + // Oceania + // Stability: development + GeoContinentCodeOc = GeoContinentCodeKey.String("OC") + // South America + // Stability: development + GeoContinentCodeSa = GeoContinentCodeKey.String("SA") +) + +// Namespace: go +const ( + // GoMemoryTypeKey is the attribute Key conforming to the "go.memory.type" + // semantic conventions. It represents the type of memory. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "other", "stack" + GoMemoryTypeKey = attribute.Key("go.memory.type") +) + +// Enum values for go.memory.type +var ( + // Memory allocated from the heap that is reserved for stack space, whether or + // not it is currently in-use. + // Stability: development + GoMemoryTypeStack = GoMemoryTypeKey.String("stack") + // Memory used by the Go runtime, excluding other categories of memory usage + // described in this enumeration. + // Stability: development + GoMemoryTypeOther = GoMemoryTypeKey.String("other") +) + +// Namespace: graphql +const ( + // GraphQLDocumentKey is the attribute Key conforming to the "graphql.document" + // semantic conventions. It represents the GraphQL document being executed. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: query findBookById { bookById(id: ?) { name } } + // Note: The value may be sanitized to exclude sensitive information. + GraphQLDocumentKey = attribute.Key("graphql.document") + + // GraphQLOperationNameKey is the attribute Key conforming to the + // "graphql.operation.name" semantic conventions. It represents the name of the + // operation being executed. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: findBookById + GraphQLOperationNameKey = attribute.Key("graphql.operation.name") + + // GraphQLOperationTypeKey is the attribute Key conforming to the + // "graphql.operation.type" semantic conventions. It represents the type of the + // operation being executed. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "query", "mutation", "subscription" + GraphQLOperationTypeKey = attribute.Key("graphql.operation.type") +) + +// GraphQLDocument returns an attribute KeyValue conforming to the +// "graphql.document" semantic conventions. It represents the GraphQL document +// being executed. +func GraphQLDocument(val string) attribute.KeyValue { + return GraphQLDocumentKey.String(val) +} + +// GraphQLOperationName returns an attribute KeyValue conforming to the +// "graphql.operation.name" semantic conventions. It represents the name of the +// operation being executed. +func GraphQLOperationName(val string) attribute.KeyValue { + return GraphQLOperationNameKey.String(val) +} + +// Enum values for graphql.operation.type +var ( + // GraphQL query + // Stability: development + GraphQLOperationTypeQuery = GraphQLOperationTypeKey.String("query") + // GraphQL mutation + // Stability: development + GraphQLOperationTypeMutation = GraphQLOperationTypeKey.String("mutation") + // GraphQL subscription + // Stability: development + GraphQLOperationTypeSubscription = GraphQLOperationTypeKey.String("subscription") +) + +// Namespace: heroku +const ( + // HerokuAppIDKey is the attribute Key conforming to the "heroku.app.id" + // semantic conventions. It represents the unique identifier for the + // application. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2daa2797-e42b-4624-9322-ec3f968df4da" + HerokuAppIDKey = attribute.Key("heroku.app.id") + + // HerokuReleaseCommitKey is the attribute Key conforming to the + // "heroku.release.commit" semantic conventions. It represents the commit hash + // for the current release. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "e6134959463efd8966b20e75b913cafe3f5ec" + HerokuReleaseCommitKey = attribute.Key("heroku.release.commit") + + // HerokuReleaseCreationTimestampKey is the attribute Key conforming to the + // "heroku.release.creation_timestamp" semantic conventions. It represents the + // time and date the release was created. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2022-10-23T18:00:42Z" + HerokuReleaseCreationTimestampKey = attribute.Key("heroku.release.creation_timestamp") +) + +// HerokuAppID returns an attribute KeyValue conforming to the "heroku.app.id" +// semantic conventions. It represents the unique identifier for the application. +func HerokuAppID(val string) attribute.KeyValue { + return HerokuAppIDKey.String(val) +} + +// HerokuReleaseCommit returns an attribute KeyValue conforming to the +// "heroku.release.commit" semantic conventions. It represents the commit hash +// for the current release. +func HerokuReleaseCommit(val string) attribute.KeyValue { + return HerokuReleaseCommitKey.String(val) +} + +// HerokuReleaseCreationTimestamp returns an attribute KeyValue conforming to the +// "heroku.release.creation_timestamp" semantic conventions. It represents the +// time and date the release was created. +func HerokuReleaseCreationTimestamp(val string) attribute.KeyValue { + return HerokuReleaseCreationTimestampKey.String(val) +} + +// Namespace: host +const ( + // HostArchKey is the attribute Key conforming to the "host.arch" semantic + // conventions. It represents the CPU architecture the host system is running + // on. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + HostArchKey = attribute.Key("host.arch") + + // HostCPUCacheL2SizeKey is the attribute Key conforming to the + // "host.cpu.cache.l2.size" semantic conventions. It represents the amount of + // level 2 memory cache available to the processor (in Bytes). + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 12288000 + HostCPUCacheL2SizeKey = attribute.Key("host.cpu.cache.l2.size") + + // HostCPUFamilyKey is the attribute Key conforming to the "host.cpu.family" + // semantic conventions. It represents the family or generation of the CPU. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "6", "PA-RISC 1.1e" + HostCPUFamilyKey = attribute.Key("host.cpu.family") + + // HostCPUModelIDKey is the attribute Key conforming to the "host.cpu.model.id" + // semantic conventions. It represents the model identifier. It provides more + // granular information about the CPU, distinguishing it from other CPUs within + // the same family. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "6", "9000/778/B180L" + HostCPUModelIDKey = attribute.Key("host.cpu.model.id") + + // HostCPUModelNameKey is the attribute Key conforming to the + // "host.cpu.model.name" semantic conventions. It represents the model + // designation of the processor. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "11th Gen Intel(R) Core(TM) i7-1185G7 @ 3.00GHz" + HostCPUModelNameKey = attribute.Key("host.cpu.model.name") + + // HostCPUSteppingKey is the attribute Key conforming to the "host.cpu.stepping" + // semantic conventions. It represents the stepping or core revisions. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1", "r1p1" + HostCPUSteppingKey = attribute.Key("host.cpu.stepping") + + // HostCPUVendorIDKey is the attribute Key conforming to the + // "host.cpu.vendor.id" semantic conventions. It represents the processor + // manufacturer identifier. A maximum 12-character string. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "GenuineIntel" + // Note: [CPUID] command returns the vendor ID string in EBX, EDX and ECX + // registers. Writing these to memory in this order results in a 12-character + // string. + // + // [CPUID]: https://wiki.osdev.org/CPUID + HostCPUVendorIDKey = attribute.Key("host.cpu.vendor.id") + + // HostIDKey is the attribute Key conforming to the "host.id" semantic + // conventions. It represents the unique host ID. For Cloud, this must be the + // instance_id assigned by the cloud provider. For non-containerized systems, + // this should be the `machine-id`. See the table below for the sources to use + // to determine the `machine-id` based on operating system. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "fdbf79e8af94cb7f9e8df36789187052" + HostIDKey = attribute.Key("host.id") + + // HostImageIDKey is the attribute Key conforming to the "host.image.id" + // semantic conventions. It represents the VM image ID or host OS image ID. For + // Cloud, this value is from the provider. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "ami-07b06b442921831e5" + HostImageIDKey = attribute.Key("host.image.id") + + // HostImageNameKey is the attribute Key conforming to the "host.image.name" + // semantic conventions. It represents the name of the VM image or OS install + // the host was instantiated from. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "infra-ami-eks-worker-node-7d4ec78312", "CentOS-8-x86_64-1905" + HostImageNameKey = attribute.Key("host.image.name") + + // HostImageVersionKey is the attribute Key conforming to the + // "host.image.version" semantic conventions. It represents the version string + // of the VM image or host OS as defined in [Version Attributes]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "0.1" + // + // [Version Attributes]: /docs/resource/README.md#version-attributes + HostImageVersionKey = attribute.Key("host.image.version") + + // HostIPKey is the attribute Key conforming to the "host.ip" semantic + // conventions. It represents the available IP addresses of the host, excluding + // loopback interfaces. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "192.168.1.140", "fe80::abc2:4a28:737a:609e" + // Note: IPv4 Addresses MUST be specified in dotted-quad notation. IPv6 + // addresses MUST be specified in the [RFC 5952] format. + // + // [RFC 5952]: https://www.rfc-editor.org/rfc/rfc5952.html + HostIPKey = attribute.Key("host.ip") + + // HostMacKey is the attribute Key conforming to the "host.mac" semantic + // conventions. It represents the available MAC addresses of the host, excluding + // loopback interfaces. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "AC-DE-48-23-45-67", "AC-DE-48-23-45-67-01-9F" + // Note: MAC Addresses MUST be represented in [IEEE RA hexadecimal form]: as + // hyphen-separated octets in uppercase hexadecimal form from most to least + // significant. + // + // [IEEE RA hexadecimal form]: https://standards.ieee.org/wp-content/uploads/import/documents/tutorials/eui.pdf + HostMacKey = attribute.Key("host.mac") + + // HostNameKey is the attribute Key conforming to the "host.name" semantic + // conventions. It represents the name of the host. On Unix systems, it may + // contain what the hostname command returns, or the fully qualified hostname, + // or another name specified by the user. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry-test" + HostNameKey = attribute.Key("host.name") + + // HostTypeKey is the attribute Key conforming to the "host.type" semantic + // conventions. It represents the type of host. For Cloud, this must be the + // machine type. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "n1-standard-1" + HostTypeKey = attribute.Key("host.type") +) + +// HostCPUCacheL2Size returns an attribute KeyValue conforming to the +// "host.cpu.cache.l2.size" semantic conventions. It represents the amount of +// level 2 memory cache available to the processor (in Bytes). +func HostCPUCacheL2Size(val int) attribute.KeyValue { + return HostCPUCacheL2SizeKey.Int(val) +} + +// HostCPUFamily returns an attribute KeyValue conforming to the +// "host.cpu.family" semantic conventions. It represents the family or generation +// of the CPU. +func HostCPUFamily(val string) attribute.KeyValue { + return HostCPUFamilyKey.String(val) +} + +// HostCPUModelID returns an attribute KeyValue conforming to the +// "host.cpu.model.id" semantic conventions. It represents the model identifier. +// It provides more granular information about the CPU, distinguishing it from +// other CPUs within the same family. +func HostCPUModelID(val string) attribute.KeyValue { + return HostCPUModelIDKey.String(val) +} + +// HostCPUModelName returns an attribute KeyValue conforming to the +// "host.cpu.model.name" semantic conventions. It represents the model +// designation of the processor. +func HostCPUModelName(val string) attribute.KeyValue { + return HostCPUModelNameKey.String(val) +} + +// HostCPUStepping returns an attribute KeyValue conforming to the +// "host.cpu.stepping" semantic conventions. It represents the stepping or core +// revisions. +func HostCPUStepping(val string) attribute.KeyValue { + return HostCPUSteppingKey.String(val) +} + +// HostCPUVendorID returns an attribute KeyValue conforming to the +// "host.cpu.vendor.id" semantic conventions. It represents the processor +// manufacturer identifier. A maximum 12-character string. +func HostCPUVendorID(val string) attribute.KeyValue { + return HostCPUVendorIDKey.String(val) +} + +// HostID returns an attribute KeyValue conforming to the "host.id" semantic +// conventions. It represents the unique host ID. For Cloud, this must be the +// instance_id assigned by the cloud provider. For non-containerized systems, +// this should be the `machine-id`. See the table below for the sources to use to +// determine the `machine-id` based on operating system. +func HostID(val string) attribute.KeyValue { + return HostIDKey.String(val) +} + +// HostImageID returns an attribute KeyValue conforming to the "host.image.id" +// semantic conventions. It represents the VM image ID or host OS image ID. For +// Cloud, this value is from the provider. +func HostImageID(val string) attribute.KeyValue { + return HostImageIDKey.String(val) +} + +// HostImageName returns an attribute KeyValue conforming to the +// "host.image.name" semantic conventions. It represents the name of the VM image +// or OS install the host was instantiated from. +func HostImageName(val string) attribute.KeyValue { + return HostImageNameKey.String(val) +} + +// HostImageVersion returns an attribute KeyValue conforming to the +// "host.image.version" semantic conventions. It represents the version string of +// the VM image or host OS as defined in [Version Attributes]. +// +// [Version Attributes]: /docs/resource/README.md#version-attributes +func HostImageVersion(val string) attribute.KeyValue { + return HostImageVersionKey.String(val) +} + +// HostIP returns an attribute KeyValue conforming to the "host.ip" semantic +// conventions. It represents the available IP addresses of the host, excluding +// loopback interfaces. +func HostIP(val ...string) attribute.KeyValue { + return HostIPKey.StringSlice(val) +} + +// HostMac returns an attribute KeyValue conforming to the "host.mac" semantic +// conventions. It represents the available MAC addresses of the host, excluding +// loopback interfaces. +func HostMac(val ...string) attribute.KeyValue { + return HostMacKey.StringSlice(val) +} + +// HostName returns an attribute KeyValue conforming to the "host.name" semantic +// conventions. It represents the name of the host. On Unix systems, it may +// contain what the hostname command returns, or the fully qualified hostname, or +// another name specified by the user. +func HostName(val string) attribute.KeyValue { + return HostNameKey.String(val) +} + +// HostType returns an attribute KeyValue conforming to the "host.type" semantic +// conventions. It represents the type of host. For Cloud, this must be the +// machine type. +func HostType(val string) attribute.KeyValue { + return HostTypeKey.String(val) +} + +// Enum values for host.arch +var ( + // AMD64 + // Stability: development + HostArchAMD64 = HostArchKey.String("amd64") + // ARM32 + // Stability: development + HostArchARM32 = HostArchKey.String("arm32") + // ARM64 + // Stability: development + HostArchARM64 = HostArchKey.String("arm64") + // Itanium + // Stability: development + HostArchIA64 = HostArchKey.String("ia64") + // 32-bit PowerPC + // Stability: development + HostArchPPC32 = HostArchKey.String("ppc32") + // 64-bit PowerPC + // Stability: development + HostArchPPC64 = HostArchKey.String("ppc64") + // IBM z/Architecture + // Stability: development + HostArchS390x = HostArchKey.String("s390x") + // 32-bit x86 + // Stability: development + HostArchX86 = HostArchKey.String("x86") +) + +// Namespace: http +const ( + // HTTPConnectionStateKey is the attribute Key conforming to the + // "http.connection.state" semantic conventions. It represents the state of the + // HTTP connection in the HTTP connection pool. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "active", "idle" + HTTPConnectionStateKey = attribute.Key("http.connection.state") + + // HTTPRequestBodySizeKey is the attribute Key conforming to the + // "http.request.body.size" semantic conventions. It represents the size of the + // request payload body in bytes. This is the number of bytes transferred + // excluding headers and is often, but not always, present as the + // [Content-Length] header. For requests using transport encoding, this should + // be the compressed size. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length + HTTPRequestBodySizeKey = attribute.Key("http.request.body.size") + + // HTTPRequestMethodKey is the attribute Key conforming to the + // "http.request.method" semantic conventions. It represents the HTTP request + // method. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "GET", "POST", "HEAD" + // Note: HTTP request method value SHOULD be "known" to the instrumentation. + // By default, this convention defines "known" methods as the ones listed in + // [RFC9110] + // and the PATCH method defined in [RFC5789]. + // + // If the HTTP request method is not known to instrumentation, it MUST set the + // `http.request.method` attribute to `_OTHER`. + // + // If the HTTP instrumentation could end up converting valid HTTP request + // methods to `_OTHER`, then it MUST provide a way to override + // the list of known HTTP methods. If this override is done via environment + // variable, then the environment variable MUST be named + // OTEL_INSTRUMENTATION_HTTP_KNOWN_METHODS and support a comma-separated list of + // case-sensitive known HTTP methods + // (this list MUST be a full override of the default known method, it is not a + // list of known methods in addition to the defaults). + // + // HTTP method names are case-sensitive and `http.request.method` attribute + // value MUST match a known HTTP method name exactly. + // Instrumentations for specific web frameworks that consider HTTP methods to be + // case insensitive, SHOULD populate a canonical equivalent. + // Tracing instrumentations that do so, MUST also set + // `http.request.method_original` to the original value. + // + // [RFC9110]: https://www.rfc-editor.org/rfc/rfc9110.html#name-methods + // [RFC5789]: https://www.rfc-editor.org/rfc/rfc5789.html + HTTPRequestMethodKey = attribute.Key("http.request.method") + + // HTTPRequestMethodOriginalKey is the attribute Key conforming to the + // "http.request.method_original" semantic conventions. It represents the + // original HTTP method sent by the client in the request line. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "GeT", "ACL", "foo" + HTTPRequestMethodOriginalKey = attribute.Key("http.request.method_original") + + // HTTPRequestResendCountKey is the attribute Key conforming to the + // "http.request.resend_count" semantic conventions. It represents the ordinal + // number of request resending attempt (for any reason, including redirects). + // + // Type: int + // RequirementLevel: Recommended + // Stability: Stable + // + // Note: The resend count SHOULD be updated each time an HTTP request gets + // resent by the client, regardless of what was the cause of the resending (e.g. + // redirection, authorization failure, 503 Server Unavailable, network issues, + // or any other). + HTTPRequestResendCountKey = attribute.Key("http.request.resend_count") + + // HTTPRequestSizeKey is the attribute Key conforming to the "http.request.size" + // semantic conventions. It represents the total size of the request in bytes. + // This should be the total number of bytes sent over the wire, including the + // request line (HTTP/1.1), framing (HTTP/2 and HTTP/3), headers, and request + // body if any. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + HTTPRequestSizeKey = attribute.Key("http.request.size") + + // HTTPResponseBodySizeKey is the attribute Key conforming to the + // "http.response.body.size" semantic conventions. It represents the size of the + // response payload body in bytes. This is the number of bytes transferred + // excluding headers and is often, but not always, present as the + // [Content-Length] header. For requests using transport encoding, this should + // be the compressed size. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length + HTTPResponseBodySizeKey = attribute.Key("http.response.body.size") + + // HTTPResponseSizeKey is the attribute Key conforming to the + // "http.response.size" semantic conventions. It represents the total size of + // the response in bytes. This should be the total number of bytes sent over the + // wire, including the status line (HTTP/1.1), framing (HTTP/2 and HTTP/3), + // headers, and response body and trailers if any. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + HTTPResponseSizeKey = attribute.Key("http.response.size") + + // HTTPResponseStatusCodeKey is the attribute Key conforming to the + // "http.response.status_code" semantic conventions. It represents the + // [HTTP response status code]. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: 200 + // + // [HTTP response status code]: https://tools.ietf.org/html/rfc7231#section-6 + HTTPResponseStatusCodeKey = attribute.Key("http.response.status_code") + + // HTTPRouteKey is the attribute Key conforming to the "http.route" semantic + // conventions. It represents the matched route, that is, the path template in + // the format used by the respective server framework. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "/users/:userID?", "{controller}/{action}/{id?}" + // Note: MUST NOT be populated when this is not supported by the HTTP server + // framework as the route attribute should have low-cardinality and the URI path + // can NOT substitute it. + // SHOULD include the [application root] if there is one. + // + // [application root]: /docs/http/http-spans.md#http-server-definitions + HTTPRouteKey = attribute.Key("http.route") +) + +// HTTPRequestBodySize returns an attribute KeyValue conforming to the +// "http.request.body.size" semantic conventions. It represents the size of the +// request payload body in bytes. This is the number of bytes transferred +// excluding headers and is often, but not always, present as the +// [Content-Length] header. For requests using transport encoding, this should be +// the compressed size. +// +// [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length +func HTTPRequestBodySize(val int) attribute.KeyValue { + return HTTPRequestBodySizeKey.Int(val) +} + +// HTTPRequestHeader returns an attribute KeyValue conforming to the +// "http.request.header" semantic conventions. It represents the HTTP request +// headers, `` being the normalized HTTP Header name (lowercase), the value +// being the header values. +func HTTPRequestHeader(key string, val ...string) attribute.KeyValue { + return attribute.StringSlice("http.request.header."+key, val) +} + +// HTTPRequestMethodOriginal returns an attribute KeyValue conforming to the +// "http.request.method_original" semantic conventions. It represents the +// original HTTP method sent by the client in the request line. +func HTTPRequestMethodOriginal(val string) attribute.KeyValue { + return HTTPRequestMethodOriginalKey.String(val) +} + +// HTTPRequestResendCount returns an attribute KeyValue conforming to the +// "http.request.resend_count" semantic conventions. It represents the ordinal +// number of request resending attempt (for any reason, including redirects). +func HTTPRequestResendCount(val int) attribute.KeyValue { + return HTTPRequestResendCountKey.Int(val) +} + +// HTTPRequestSize returns an attribute KeyValue conforming to the +// "http.request.size" semantic conventions. It represents the total size of the +// request in bytes. This should be the total number of bytes sent over the wire, +// including the request line (HTTP/1.1), framing (HTTP/2 and HTTP/3), headers, +// and request body if any. +func HTTPRequestSize(val int) attribute.KeyValue { + return HTTPRequestSizeKey.Int(val) +} + +// HTTPResponseBodySize returns an attribute KeyValue conforming to the +// "http.response.body.size" semantic conventions. It represents the size of the +// response payload body in bytes. This is the number of bytes transferred +// excluding headers and is often, but not always, present as the +// [Content-Length] header. For requests using transport encoding, this should be +// the compressed size. +// +// [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length +func HTTPResponseBodySize(val int) attribute.KeyValue { + return HTTPResponseBodySizeKey.Int(val) +} + +// HTTPResponseHeader returns an attribute KeyValue conforming to the +// "http.response.header" semantic conventions. It represents the HTTP response +// headers, `` being the normalized HTTP Header name (lowercase), the value +// being the header values. +func HTTPResponseHeader(key string, val ...string) attribute.KeyValue { + return attribute.StringSlice("http.response.header."+key, val) +} + +// HTTPResponseSize returns an attribute KeyValue conforming to the +// "http.response.size" semantic conventions. It represents the total size of the +// response in bytes. This should be the total number of bytes sent over the +// wire, including the status line (HTTP/1.1), framing (HTTP/2 and HTTP/3), +// headers, and response body and trailers if any. +func HTTPResponseSize(val int) attribute.KeyValue { + return HTTPResponseSizeKey.Int(val) +} + +// HTTPResponseStatusCode returns an attribute KeyValue conforming to the +// "http.response.status_code" semantic conventions. It represents the +// [HTTP response status code]. +// +// [HTTP response status code]: https://tools.ietf.org/html/rfc7231#section-6 +func HTTPResponseStatusCode(val int) attribute.KeyValue { + return HTTPResponseStatusCodeKey.Int(val) +} + +// HTTPRoute returns an attribute KeyValue conforming to the "http.route" +// semantic conventions. It represents the matched route, that is, the path +// template in the format used by the respective server framework. +func HTTPRoute(val string) attribute.KeyValue { + return HTTPRouteKey.String(val) +} + +// Enum values for http.connection.state +var ( + // active state. + // Stability: development + HTTPConnectionStateActive = HTTPConnectionStateKey.String("active") + // idle state. + // Stability: development + HTTPConnectionStateIdle = HTTPConnectionStateKey.String("idle") +) + +// Enum values for http.request.method +var ( + // CONNECT method. + // Stability: stable + HTTPRequestMethodConnect = HTTPRequestMethodKey.String("CONNECT") + // DELETE method. + // Stability: stable + HTTPRequestMethodDelete = HTTPRequestMethodKey.String("DELETE") + // GET method. + // Stability: stable + HTTPRequestMethodGet = HTTPRequestMethodKey.String("GET") + // HEAD method. + // Stability: stable + HTTPRequestMethodHead = HTTPRequestMethodKey.String("HEAD") + // OPTIONS method. + // Stability: stable + HTTPRequestMethodOptions = HTTPRequestMethodKey.String("OPTIONS") + // PATCH method. + // Stability: stable + HTTPRequestMethodPatch = HTTPRequestMethodKey.String("PATCH") + // POST method. + // Stability: stable + HTTPRequestMethodPost = HTTPRequestMethodKey.String("POST") + // PUT method. + // Stability: stable + HTTPRequestMethodPut = HTTPRequestMethodKey.String("PUT") + // TRACE method. + // Stability: stable + HTTPRequestMethodTrace = HTTPRequestMethodKey.String("TRACE") + // Any HTTP method that the instrumentation has no prior knowledge of. + // Stability: stable + HTTPRequestMethodOther = HTTPRequestMethodKey.String("_OTHER") +) + +// Namespace: hw +const ( + // HwBatteryCapacityKey is the attribute Key conforming to the + // "hw.battery.capacity" semantic conventions. It represents the design capacity + // in Watts-hours or Amper-hours. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "9.3Ah", "50Wh" + HwBatteryCapacityKey = attribute.Key("hw.battery.capacity") + + // HwBatteryChemistryKey is the attribute Key conforming to the + // "hw.battery.chemistry" semantic conventions. It represents the battery + // [chemistry], e.g. Lithium-Ion, Nickel-Cadmium, etc. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Li-ion", "NiMH" + // + // [chemistry]: https://schemas.dmtf.org/wbem/cim-html/2.31.0/CIM_Battery.html + HwBatteryChemistryKey = attribute.Key("hw.battery.chemistry") + + // HwBatteryStateKey is the attribute Key conforming to the "hw.battery.state" + // semantic conventions. It represents the current state of the battery. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + HwBatteryStateKey = attribute.Key("hw.battery.state") + + // HwBiosVersionKey is the attribute Key conforming to the "hw.bios_version" + // semantic conventions. It represents the BIOS version of the hardware + // component. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1.2.3" + HwBiosVersionKey = attribute.Key("hw.bios_version") + + // HwDriverVersionKey is the attribute Key conforming to the "hw.driver_version" + // semantic conventions. It represents the driver version for the hardware + // component. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "10.2.1-3" + HwDriverVersionKey = attribute.Key("hw.driver_version") + + // HwEnclosureTypeKey is the attribute Key conforming to the "hw.enclosure.type" + // semantic conventions. It represents the type of the enclosure (useful for + // modular systems). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Computer", "Storage", "Switch" + HwEnclosureTypeKey = attribute.Key("hw.enclosure.type") + + // HwFirmwareVersionKey is the attribute Key conforming to the + // "hw.firmware_version" semantic conventions. It represents the firmware + // version of the hardware component. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2.0.1" + HwFirmwareVersionKey = attribute.Key("hw.firmware_version") + + // HwGpuTaskKey is the attribute Key conforming to the "hw.gpu.task" semantic + // conventions. It represents the type of task the GPU is performing. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + HwGpuTaskKey = attribute.Key("hw.gpu.task") + + // HwIDKey is the attribute Key conforming to the "hw.id" semantic conventions. + // It represents an identifier for the hardware component, unique within the + // monitored host. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "win32battery_battery_testsysa33_1" + HwIDKey = attribute.Key("hw.id") + + // HwLimitTypeKey is the attribute Key conforming to the "hw.limit_type" + // semantic conventions. It represents the type of limit for hardware + // components. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + HwLimitTypeKey = attribute.Key("hw.limit_type") + + // HwLogicalDiskRaidLevelKey is the attribute Key conforming to the + // "hw.logical_disk.raid_level" semantic conventions. It represents the RAID + // Level of the logical disk. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "RAID0+1", "RAID5", "RAID10" + HwLogicalDiskRaidLevelKey = attribute.Key("hw.logical_disk.raid_level") + + // HwLogicalDiskStateKey is the attribute Key conforming to the + // "hw.logical_disk.state" semantic conventions. It represents the state of the + // logical disk space usage. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + HwLogicalDiskStateKey = attribute.Key("hw.logical_disk.state") + + // HwMemoryTypeKey is the attribute Key conforming to the "hw.memory.type" + // semantic conventions. It represents the type of the memory module. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "DDR4", "DDR5", "LPDDR5" + HwMemoryTypeKey = attribute.Key("hw.memory.type") + + // HwModelKey is the attribute Key conforming to the "hw.model" semantic + // conventions. It represents the descriptive model name of the hardware + // component. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "PERC H740P", "Intel(R) Core(TM) i7-10700K", "Dell XPS 15 Battery" + HwModelKey = attribute.Key("hw.model") + + // HwNameKey is the attribute Key conforming to the "hw.name" semantic + // conventions. It represents an easily-recognizable name for the hardware + // component. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "eth0" + HwNameKey = attribute.Key("hw.name") + + // HwNetworkLogicalAddressesKey is the attribute Key conforming to the + // "hw.network.logical_addresses" semantic conventions. It represents the + // logical addresses of the adapter (e.g. IP address, or WWPN). + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "172.16.8.21", "57.11.193.42" + HwNetworkLogicalAddressesKey = attribute.Key("hw.network.logical_addresses") + + // HwNetworkPhysicalAddressKey is the attribute Key conforming to the + // "hw.network.physical_address" semantic conventions. It represents the + // physical address of the adapter (e.g. MAC address, or WWNN). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "00-90-F5-E9-7B-36" + HwNetworkPhysicalAddressKey = attribute.Key("hw.network.physical_address") + + // HwParentKey is the attribute Key conforming to the "hw.parent" semantic + // conventions. It represents the unique identifier of the parent component + // (typically the `hw.id` attribute of the enclosure, or disk controller). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "dellStorage_perc_0" + HwParentKey = attribute.Key("hw.parent") + + // HwPhysicalDiskSmartAttributeKey is the attribute Key conforming to the + // "hw.physical_disk.smart_attribute" semantic conventions. It represents the + // [S.M.A.R.T.] (Self-Monitoring, Analysis, and Reporting Technology) attribute + // of the physical disk. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Spin Retry Count", "Seek Error Rate", "Raw Read Error Rate" + // + // [S.M.A.R.T.]: https://wikipedia.org/wiki/S.M.A.R.T. + HwPhysicalDiskSmartAttributeKey = attribute.Key("hw.physical_disk.smart_attribute") + + // HwPhysicalDiskStateKey is the attribute Key conforming to the + // "hw.physical_disk.state" semantic conventions. It represents the state of the + // physical disk endurance utilization. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + HwPhysicalDiskStateKey = attribute.Key("hw.physical_disk.state") + + // HwPhysicalDiskTypeKey is the attribute Key conforming to the + // "hw.physical_disk.type" semantic conventions. It represents the type of the + // physical disk. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "HDD", "SSD", "10K" + HwPhysicalDiskTypeKey = attribute.Key("hw.physical_disk.type") + + // HwSensorLocationKey is the attribute Key conforming to the + // "hw.sensor_location" semantic conventions. It represents the location of the + // sensor. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "cpu0", "ps1", "INLET", "CPU0_DIE", "AMBIENT", "MOTHERBOARD", "PS0 + // V3_3", "MAIN_12V", "CPU_VCORE" + HwSensorLocationKey = attribute.Key("hw.sensor_location") + + // HwSerialNumberKey is the attribute Key conforming to the "hw.serial_number" + // semantic conventions. It represents the serial number of the hardware + // component. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "CNFCP0123456789" + HwSerialNumberKey = attribute.Key("hw.serial_number") + + // HwStateKey is the attribute Key conforming to the "hw.state" semantic + // conventions. It represents the current state of the component. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + HwStateKey = attribute.Key("hw.state") + + // HwTapeDriveOperationTypeKey is the attribute Key conforming to the + // "hw.tape_drive.operation_type" semantic conventions. It represents the type + // of tape drive operation. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + HwTapeDriveOperationTypeKey = attribute.Key("hw.tape_drive.operation_type") + + // HwTypeKey is the attribute Key conforming to the "hw.type" semantic + // conventions. It represents the type of the component. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: Describes the category of the hardware component for which `hw.state` + // is being reported. For example, `hw.type=temperature` along with + // `hw.state=degraded` would indicate that the temperature of the hardware + // component has been reported as `degraded`. + HwTypeKey = attribute.Key("hw.type") + + // HwVendorKey is the attribute Key conforming to the "hw.vendor" semantic + // conventions. It represents the vendor name of the hardware component. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Dell", "HP", "Intel", "AMD", "LSI", "Lenovo" + HwVendorKey = attribute.Key("hw.vendor") +) + +// HwBatteryCapacity returns an attribute KeyValue conforming to the +// "hw.battery.capacity" semantic conventions. It represents the design capacity +// in Watts-hours or Amper-hours. +func HwBatteryCapacity(val string) attribute.KeyValue { + return HwBatteryCapacityKey.String(val) +} + +// HwBatteryChemistry returns an attribute KeyValue conforming to the +// "hw.battery.chemistry" semantic conventions. It represents the battery +// [chemistry], e.g. Lithium-Ion, Nickel-Cadmium, etc. +// +// [chemistry]: https://schemas.dmtf.org/wbem/cim-html/2.31.0/CIM_Battery.html +func HwBatteryChemistry(val string) attribute.KeyValue { + return HwBatteryChemistryKey.String(val) +} + +// HwBiosVersion returns an attribute KeyValue conforming to the +// "hw.bios_version" semantic conventions. It represents the BIOS version of the +// hardware component. +func HwBiosVersion(val string) attribute.KeyValue { + return HwBiosVersionKey.String(val) +} + +// HwDriverVersion returns an attribute KeyValue conforming to the +// "hw.driver_version" semantic conventions. It represents the driver version for +// the hardware component. +func HwDriverVersion(val string) attribute.KeyValue { + return HwDriverVersionKey.String(val) +} + +// HwEnclosureType returns an attribute KeyValue conforming to the +// "hw.enclosure.type" semantic conventions. It represents the type of the +// enclosure (useful for modular systems). +func HwEnclosureType(val string) attribute.KeyValue { + return HwEnclosureTypeKey.String(val) +} + +// HwFirmwareVersion returns an attribute KeyValue conforming to the +// "hw.firmware_version" semantic conventions. It represents the firmware version +// of the hardware component. +func HwFirmwareVersion(val string) attribute.KeyValue { + return HwFirmwareVersionKey.String(val) +} + +// HwID returns an attribute KeyValue conforming to the "hw.id" semantic +// conventions. It represents an identifier for the hardware component, unique +// within the monitored host. +func HwID(val string) attribute.KeyValue { + return HwIDKey.String(val) +} + +// HwLogicalDiskRaidLevel returns an attribute KeyValue conforming to the +// "hw.logical_disk.raid_level" semantic conventions. It represents the RAID +// Level of the logical disk. +func HwLogicalDiskRaidLevel(val string) attribute.KeyValue { + return HwLogicalDiskRaidLevelKey.String(val) +} + +// HwMemoryType returns an attribute KeyValue conforming to the "hw.memory.type" +// semantic conventions. It represents the type of the memory module. +func HwMemoryType(val string) attribute.KeyValue { + return HwMemoryTypeKey.String(val) +} + +// HwModel returns an attribute KeyValue conforming to the "hw.model" semantic +// conventions. It represents the descriptive model name of the hardware +// component. +func HwModel(val string) attribute.KeyValue { + return HwModelKey.String(val) +} + +// HwName returns an attribute KeyValue conforming to the "hw.name" semantic +// conventions. It represents an easily-recognizable name for the hardware +// component. +func HwName(val string) attribute.KeyValue { + return HwNameKey.String(val) +} + +// HwNetworkLogicalAddresses returns an attribute KeyValue conforming to the +// "hw.network.logical_addresses" semantic conventions. It represents the logical +// addresses of the adapter (e.g. IP address, or WWPN). +func HwNetworkLogicalAddresses(val ...string) attribute.KeyValue { + return HwNetworkLogicalAddressesKey.StringSlice(val) +} + +// HwNetworkPhysicalAddress returns an attribute KeyValue conforming to the +// "hw.network.physical_address" semantic conventions. It represents the physical +// address of the adapter (e.g. MAC address, or WWNN). +func HwNetworkPhysicalAddress(val string) attribute.KeyValue { + return HwNetworkPhysicalAddressKey.String(val) +} + +// HwParent returns an attribute KeyValue conforming to the "hw.parent" semantic +// conventions. It represents the unique identifier of the parent component +// (typically the `hw.id` attribute of the enclosure, or disk controller). +func HwParent(val string) attribute.KeyValue { + return HwParentKey.String(val) +} + +// HwPhysicalDiskSmartAttribute returns an attribute KeyValue conforming to the +// "hw.physical_disk.smart_attribute" semantic conventions. It represents the +// [S.M.A.R.T.] (Self-Monitoring, Analysis, and Reporting Technology) attribute +// of the physical disk. +// +// [S.M.A.R.T.]: https://wikipedia.org/wiki/S.M.A.R.T. +func HwPhysicalDiskSmartAttribute(val string) attribute.KeyValue { + return HwPhysicalDiskSmartAttributeKey.String(val) +} + +// HwPhysicalDiskType returns an attribute KeyValue conforming to the +// "hw.physical_disk.type" semantic conventions. It represents the type of the +// physical disk. +func HwPhysicalDiskType(val string) attribute.KeyValue { + return HwPhysicalDiskTypeKey.String(val) +} + +// HwSensorLocation returns an attribute KeyValue conforming to the +// "hw.sensor_location" semantic conventions. It represents the location of the +// sensor. +func HwSensorLocation(val string) attribute.KeyValue { + return HwSensorLocationKey.String(val) +} + +// HwSerialNumber returns an attribute KeyValue conforming to the +// "hw.serial_number" semantic conventions. It represents the serial number of +// the hardware component. +func HwSerialNumber(val string) attribute.KeyValue { + return HwSerialNumberKey.String(val) +} + +// HwVendor returns an attribute KeyValue conforming to the "hw.vendor" semantic +// conventions. It represents the vendor name of the hardware component. +func HwVendor(val string) attribute.KeyValue { + return HwVendorKey.String(val) +} + +// Enum values for hw.battery.state +var ( + // Charging + // Stability: development + HwBatteryStateCharging = HwBatteryStateKey.String("charging") + // Discharging + // Stability: development + HwBatteryStateDischarging = HwBatteryStateKey.String("discharging") +) + +// Enum values for hw.gpu.task +var ( + // Decoder + // Stability: development + HwGpuTaskDecoder = HwGpuTaskKey.String("decoder") + // Encoder + // Stability: development + HwGpuTaskEncoder = HwGpuTaskKey.String("encoder") + // General + // Stability: development + HwGpuTaskGeneral = HwGpuTaskKey.String("general") +) + +// Enum values for hw.limit_type +var ( + // Critical + // Stability: development + HwLimitTypeCritical = HwLimitTypeKey.String("critical") + // Degraded + // Stability: development + HwLimitTypeDegraded = HwLimitTypeKey.String("degraded") + // High Critical + // Stability: development + HwLimitTypeHighCritical = HwLimitTypeKey.String("high.critical") + // High Degraded + // Stability: development + HwLimitTypeHighDegraded = HwLimitTypeKey.String("high.degraded") + // Low Critical + // Stability: development + HwLimitTypeLowCritical = HwLimitTypeKey.String("low.critical") + // Low Degraded + // Stability: development + HwLimitTypeLowDegraded = HwLimitTypeKey.String("low.degraded") + // Maximum + // Stability: development + HwLimitTypeMax = HwLimitTypeKey.String("max") + // Throttled + // Stability: development + HwLimitTypeThrottled = HwLimitTypeKey.String("throttled") + // Turbo + // Stability: development + HwLimitTypeTurbo = HwLimitTypeKey.String("turbo") +) + +// Enum values for hw.logical_disk.state +var ( + // Used + // Stability: development + HwLogicalDiskStateUsed = HwLogicalDiskStateKey.String("used") + // Free + // Stability: development + HwLogicalDiskStateFree = HwLogicalDiskStateKey.String("free") +) + +// Enum values for hw.physical_disk.state +var ( + // Remaining + // Stability: development + HwPhysicalDiskStateRemaining = HwPhysicalDiskStateKey.String("remaining") +) + +// Enum values for hw.state +var ( + // Degraded + // Stability: development + HwStateDegraded = HwStateKey.String("degraded") + // Failed + // Stability: development + HwStateFailed = HwStateKey.String("failed") + // Needs Cleaning + // Stability: development + HwStateNeedsCleaning = HwStateKey.String("needs_cleaning") + // OK + // Stability: development + HwStateOk = HwStateKey.String("ok") + // Predicted Failure + // Stability: development + HwStatePredictedFailure = HwStateKey.String("predicted_failure") +) + +// Enum values for hw.tape_drive.operation_type +var ( + // Mount + // Stability: development + HwTapeDriveOperationTypeMount = HwTapeDriveOperationTypeKey.String("mount") + // Unmount + // Stability: development + HwTapeDriveOperationTypeUnmount = HwTapeDriveOperationTypeKey.String("unmount") + // Clean + // Stability: development + HwTapeDriveOperationTypeClean = HwTapeDriveOperationTypeKey.String("clean") +) + +// Enum values for hw.type +var ( + // Battery + // Stability: development + HwTypeBattery = HwTypeKey.String("battery") + // CPU + // Stability: development + HwTypeCPU = HwTypeKey.String("cpu") + // Disk controller + // Stability: development + HwTypeDiskController = HwTypeKey.String("disk_controller") + // Enclosure + // Stability: development + HwTypeEnclosure = HwTypeKey.String("enclosure") + // Fan + // Stability: development + HwTypeFan = HwTypeKey.String("fan") + // GPU + // Stability: development + HwTypeGpu = HwTypeKey.String("gpu") + // Logical disk + // Stability: development + HwTypeLogicalDisk = HwTypeKey.String("logical_disk") + // Memory + // Stability: development + HwTypeMemory = HwTypeKey.String("memory") + // Network + // Stability: development + HwTypeNetwork = HwTypeKey.String("network") + // Physical disk + // Stability: development + HwTypePhysicalDisk = HwTypeKey.String("physical_disk") + // Power supply + // Stability: development + HwTypePowerSupply = HwTypeKey.String("power_supply") + // Tape drive + // Stability: development + HwTypeTapeDrive = HwTypeKey.String("tape_drive") + // Temperature + // Stability: development + HwTypeTemperature = HwTypeKey.String("temperature") + // Voltage + // Stability: development + HwTypeVoltage = HwTypeKey.String("voltage") +) + +// Namespace: ios +const ( + // IOSAppStateKey is the attribute Key conforming to the "ios.app.state" + // semantic conventions. It represents the this attribute represents the state + // of the application. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: The iOS lifecycle states are defined in the + // [UIApplicationDelegate documentation], and from which the `OS terminology` + // column values are derived. + // + // [UIApplicationDelegate documentation]: https://developer.apple.com/documentation/uikit/uiapplicationdelegate + IOSAppStateKey = attribute.Key("ios.app.state") +) + +// Enum values for ios.app.state +var ( + // The app has become `active`. Associated with UIKit notification + // `applicationDidBecomeActive`. + // + // Stability: development + IOSAppStateActive = IOSAppStateKey.String("active") + // The app is now `inactive`. Associated with UIKit notification + // `applicationWillResignActive`. + // + // Stability: development + IOSAppStateInactive = IOSAppStateKey.String("inactive") + // The app is now in the background. This value is associated with UIKit + // notification `applicationDidEnterBackground`. + // + // Stability: development + IOSAppStateBackground = IOSAppStateKey.String("background") + // The app is now in the foreground. This value is associated with UIKit + // notification `applicationWillEnterForeground`. + // + // Stability: development + IOSAppStateForeground = IOSAppStateKey.String("foreground") + // The app is about to terminate. Associated with UIKit notification + // `applicationWillTerminate`. + // + // Stability: development + IOSAppStateTerminate = IOSAppStateKey.String("terminate") +) + +// Namespace: k8s +const ( + // K8SClusterNameKey is the attribute Key conforming to the "k8s.cluster.name" + // semantic conventions. It represents the name of the cluster. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry-cluster" + K8SClusterNameKey = attribute.Key("k8s.cluster.name") + + // K8SClusterUIDKey is the attribute Key conforming to the "k8s.cluster.uid" + // semantic conventions. It represents a pseudo-ID for the cluster, set to the + // UID of the `kube-system` namespace. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d" + // Note: K8s doesn't have support for obtaining a cluster ID. If this is ever + // added, we will recommend collecting the `k8s.cluster.uid` through the + // official APIs. In the meantime, we are able to use the `uid` of the + // `kube-system` namespace as a proxy for cluster ID. Read on for the + // rationale. + // + // Every object created in a K8s cluster is assigned a distinct UID. The + // `kube-system` namespace is used by Kubernetes itself and will exist + // for the lifetime of the cluster. Using the `uid` of the `kube-system` + // namespace is a reasonable proxy for the K8s ClusterID as it will only + // change if the cluster is rebuilt. Furthermore, Kubernetes UIDs are + // UUIDs as standardized by + // [ISO/IEC 9834-8 and ITU-T X.667]. + // Which states: + // + // > If generated according to one of the mechanisms defined in Rec. + // > ITU-T X.667 | ISO/IEC 9834-8, a UUID is either guaranteed to be + // > different from all other UUIDs generated before 3603 A.D., or is + // > extremely likely to be different (depending on the mechanism chosen). + // + // Therefore, UIDs between clusters should be extremely unlikely to + // conflict. + // + // [ISO/IEC 9834-8 and ITU-T X.667]: https://www.itu.int/ITU-T/studygroups/com17/oid.html + K8SClusterUIDKey = attribute.Key("k8s.cluster.uid") + + // K8SContainerNameKey is the attribute Key conforming to the + // "k8s.container.name" semantic conventions. It represents the name of the + // Container from Pod specification, must be unique within a Pod. Container + // runtime usually uses different globally unique name (`container.name`). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "redis" + K8SContainerNameKey = attribute.Key("k8s.container.name") + + // K8SContainerRestartCountKey is the attribute Key conforming to the + // "k8s.container.restart_count" semantic conventions. It represents the number + // of times the container was restarted. This attribute can be used to identify + // a particular container (running or stopped) within a container spec. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count") + + // K8SContainerStatusLastTerminatedReasonKey is the attribute Key conforming to + // the "k8s.container.status.last_terminated_reason" semantic conventions. It + // represents the last terminated reason of the Container. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Evicted", "Error" + K8SContainerStatusLastTerminatedReasonKey = attribute.Key("k8s.container.status.last_terminated_reason") + + // K8SContainerStatusReasonKey is the attribute Key conforming to the + // "k8s.container.status.reason" semantic conventions. It represents the reason + // for the container state. Corresponds to the `reason` field of the: + // [K8s ContainerStateWaiting] or [K8s ContainerStateTerminated]. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "ContainerCreating", "CrashLoopBackOff", + // "CreateContainerConfigError", "ErrImagePull", "ImagePullBackOff", + // "OOMKilled", "Completed", "Error", "ContainerCannotRun" + // + // [K8s ContainerStateWaiting]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#containerstatewaiting-v1-core + // [K8s ContainerStateTerminated]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#containerstateterminated-v1-core + K8SContainerStatusReasonKey = attribute.Key("k8s.container.status.reason") + + // K8SContainerStatusStateKey is the attribute Key conforming to the + // "k8s.container.status.state" semantic conventions. It represents the state of + // the container. [K8s ContainerState]. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "terminated", "running", "waiting" + // + // [K8s ContainerState]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#containerstate-v1-core + K8SContainerStatusStateKey = attribute.Key("k8s.container.status.state") + + // K8SCronJobNameKey is the attribute Key conforming to the "k8s.cronjob.name" + // semantic conventions. It represents the name of the CronJob. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry" + K8SCronJobNameKey = attribute.Key("k8s.cronjob.name") + + // K8SCronJobUIDKey is the attribute Key conforming to the "k8s.cronjob.uid" + // semantic conventions. It represents the UID of the CronJob. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" + K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid") + + // K8SDaemonSetNameKey is the attribute Key conforming to the + // "k8s.daemonset.name" semantic conventions. It represents the name of the + // DaemonSet. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry" + K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name") + + // K8SDaemonSetUIDKey is the attribute Key conforming to the "k8s.daemonset.uid" + // semantic conventions. It represents the UID of the DaemonSet. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" + K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid") + + // K8SDeploymentNameKey is the attribute Key conforming to the + // "k8s.deployment.name" semantic conventions. It represents the name of the + // Deployment. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry" + K8SDeploymentNameKey = attribute.Key("k8s.deployment.name") + + // K8SDeploymentUIDKey is the attribute Key conforming to the + // "k8s.deployment.uid" semantic conventions. It represents the UID of the + // Deployment. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" + K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") + + // K8SHPAMetricTypeKey is the attribute Key conforming to the + // "k8s.hpa.metric.type" semantic conventions. It represents the type of metric + // source for the horizontal pod autoscaler. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Resource", "ContainerResource" + // Note: This attribute reflects the `type` field of spec.metrics[] in the HPA. + K8SHPAMetricTypeKey = attribute.Key("k8s.hpa.metric.type") + + // K8SHPANameKey is the attribute Key conforming to the "k8s.hpa.name" semantic + // conventions. It represents the name of the horizontal pod autoscaler. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry" + K8SHPANameKey = attribute.Key("k8s.hpa.name") + + // K8SHPAScaletargetrefAPIVersionKey is the attribute Key conforming to the + // "k8s.hpa.scaletargetref.api_version" semantic conventions. It represents the + // API version of the target resource to scale for the HorizontalPodAutoscaler. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "apps/v1", "autoscaling/v2" + // Note: This maps to the `apiVersion` field in the `scaleTargetRef` of the HPA + // spec. + K8SHPAScaletargetrefAPIVersionKey = attribute.Key("k8s.hpa.scaletargetref.api_version") + + // K8SHPAScaletargetrefKindKey is the attribute Key conforming to the + // "k8s.hpa.scaletargetref.kind" semantic conventions. It represents the kind of + // the target resource to scale for the HorizontalPodAutoscaler. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Deployment", "StatefulSet" + // Note: This maps to the `kind` field in the `scaleTargetRef` of the HPA spec. + K8SHPAScaletargetrefKindKey = attribute.Key("k8s.hpa.scaletargetref.kind") + + // K8SHPAScaletargetrefNameKey is the attribute Key conforming to the + // "k8s.hpa.scaletargetref.name" semantic conventions. It represents the name of + // the target resource to scale for the HorizontalPodAutoscaler. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-deployment", "my-statefulset" + // Note: This maps to the `name` field in the `scaleTargetRef` of the HPA spec. + K8SHPAScaletargetrefNameKey = attribute.Key("k8s.hpa.scaletargetref.name") + + // K8SHPAUIDKey is the attribute Key conforming to the "k8s.hpa.uid" semantic + // conventions. It represents the UID of the horizontal pod autoscaler. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" + K8SHPAUIDKey = attribute.Key("k8s.hpa.uid") + + // K8SHugepageSizeKey is the attribute Key conforming to the "k8s.hugepage.size" + // semantic conventions. It represents the size (identifier) of the K8s huge + // page. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2Mi" + K8SHugepageSizeKey = attribute.Key("k8s.hugepage.size") + + // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name" semantic + // conventions. It represents the name of the Job. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry" + K8SJobNameKey = attribute.Key("k8s.job.name") + + // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid" semantic + // conventions. It represents the UID of the Job. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" + K8SJobUIDKey = attribute.Key("k8s.job.uid") + + // K8SNamespaceNameKey is the attribute Key conforming to the + // "k8s.namespace.name" semantic conventions. It represents the name of the + // namespace that the pod is running in. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "default" + K8SNamespaceNameKey = attribute.Key("k8s.namespace.name") + + // K8SNamespacePhaseKey is the attribute Key conforming to the + // "k8s.namespace.phase" semantic conventions. It represents the phase of the + // K8s namespace. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "active", "terminating" + // Note: This attribute aligns with the `phase` field of the + // [K8s NamespaceStatus] + // + // [K8s NamespaceStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#namespacestatus-v1-core + K8SNamespacePhaseKey = attribute.Key("k8s.namespace.phase") + + // K8SNodeConditionStatusKey is the attribute Key conforming to the + // "k8s.node.condition.status" semantic conventions. It represents the status of + // the condition, one of True, False, Unknown. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "true", "false", "unknown" + // Note: This attribute aligns with the `status` field of the + // [NodeCondition] + // + // [NodeCondition]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#nodecondition-v1-core + K8SNodeConditionStatusKey = attribute.Key("k8s.node.condition.status") + + // K8SNodeConditionTypeKey is the attribute Key conforming to the + // "k8s.node.condition.type" semantic conventions. It represents the condition + // type of a K8s Node. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Ready", "DiskPressure" + // Note: K8s Node conditions as described + // by [K8s documentation]. + // + // This attribute aligns with the `type` field of the + // [NodeCondition] + // + // The set of possible values is not limited to those listed here. Managed + // Kubernetes environments, + // or custom controllers MAY introduce additional node condition types. + // When this occurs, the exact value as reported by the Kubernetes API SHOULD be + // used. + // + // [K8s documentation]: https://v1-32.docs.kubernetes.io/docs/reference/node/node-status/#condition + // [NodeCondition]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#nodecondition-v1-core + K8SNodeConditionTypeKey = attribute.Key("k8s.node.condition.type") + + // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name" + // semantic conventions. It represents the name of the Node. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "node-1" + K8SNodeNameKey = attribute.Key("k8s.node.name") + + // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid" semantic + // conventions. It represents the UID of the Node. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2" + K8SNodeUIDKey = attribute.Key("k8s.node.uid") + + // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name" semantic + // conventions. It represents the name of the Pod. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry-pod-autoconf" + K8SPodNameKey = attribute.Key("k8s.pod.name") + + // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid" semantic + // conventions. It represents the UID of the Pod. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" + K8SPodUIDKey = attribute.Key("k8s.pod.uid") + + // K8SReplicaSetNameKey is the attribute Key conforming to the + // "k8s.replicaset.name" semantic conventions. It represents the name of the + // ReplicaSet. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry" + K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name") + + // K8SReplicaSetUIDKey is the attribute Key conforming to the + // "k8s.replicaset.uid" semantic conventions. It represents the UID of the + // ReplicaSet. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" + K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid") + + // K8SReplicationControllerNameKey is the attribute Key conforming to the + // "k8s.replicationcontroller.name" semantic conventions. It represents the name + // of the replication controller. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry" + K8SReplicationControllerNameKey = attribute.Key("k8s.replicationcontroller.name") + + // K8SReplicationControllerUIDKey is the attribute Key conforming to the + // "k8s.replicationcontroller.uid" semantic conventions. It represents the UID + // of the replication controller. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" + K8SReplicationControllerUIDKey = attribute.Key("k8s.replicationcontroller.uid") + + // K8SResourceQuotaNameKey is the attribute Key conforming to the + // "k8s.resourcequota.name" semantic conventions. It represents the name of the + // resource quota. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry" + K8SResourceQuotaNameKey = attribute.Key("k8s.resourcequota.name") + + // K8SResourceQuotaResourceNameKey is the attribute Key conforming to the + // "k8s.resourcequota.resource_name" semantic conventions. It represents the + // name of the K8s resource a resource quota defines. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "count/replicationcontrollers" + // Note: The value for this attribute can be either the full + // `count/[.]` string (e.g., count/deployments.apps, + // count/pods), or, for certain core Kubernetes resources, just the resource + // name (e.g., pods, services, configmaps). Both forms are supported by + // Kubernetes for object count quotas. See + // [Kubernetes Resource Quotas documentation] for more details. + // + // [Kubernetes Resource Quotas documentation]: https://kubernetes.io/docs/concepts/policy/resource-quotas/#object-count-quota + K8SResourceQuotaResourceNameKey = attribute.Key("k8s.resourcequota.resource_name") + + // K8SResourceQuotaUIDKey is the attribute Key conforming to the + // "k8s.resourcequota.uid" semantic conventions. It represents the UID of the + // resource quota. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" + K8SResourceQuotaUIDKey = attribute.Key("k8s.resourcequota.uid") + + // K8SStatefulSetNameKey is the attribute Key conforming to the + // "k8s.statefulset.name" semantic conventions. It represents the name of the + // StatefulSet. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry" + K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name") + + // K8SStatefulSetUIDKey is the attribute Key conforming to the + // "k8s.statefulset.uid" semantic conventions. It represents the UID of the + // StatefulSet. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" + K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid") + + // K8SStorageclassNameKey is the attribute Key conforming to the + // "k8s.storageclass.name" semantic conventions. It represents the name of K8s + // [StorageClass] object. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "gold.storageclass.storage.k8s.io" + // + // [StorageClass]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#storageclass-v1-storage-k8s-io + K8SStorageclassNameKey = attribute.Key("k8s.storageclass.name") + + // K8SVolumeNameKey is the attribute Key conforming to the "k8s.volume.name" + // semantic conventions. It represents the name of the K8s volume. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "volume0" + K8SVolumeNameKey = attribute.Key("k8s.volume.name") + + // K8SVolumeTypeKey is the attribute Key conforming to the "k8s.volume.type" + // semantic conventions. It represents the type of the K8s volume. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "emptyDir", "persistentVolumeClaim" + K8SVolumeTypeKey = attribute.Key("k8s.volume.type") +) + +// K8SClusterName returns an attribute KeyValue conforming to the +// "k8s.cluster.name" semantic conventions. It represents the name of the +// cluster. +func K8SClusterName(val string) attribute.KeyValue { + return K8SClusterNameKey.String(val) +} + +// K8SClusterUID returns an attribute KeyValue conforming to the +// "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for the +// cluster, set to the UID of the `kube-system` namespace. +func K8SClusterUID(val string) attribute.KeyValue { + return K8SClusterUIDKey.String(val) +} + +// K8SContainerName returns an attribute KeyValue conforming to the +// "k8s.container.name" semantic conventions. It represents the name of the +// Container from Pod specification, must be unique within a Pod. Container +// runtime usually uses different globally unique name (`container.name`). +func K8SContainerName(val string) attribute.KeyValue { + return K8SContainerNameKey.String(val) +} + +// K8SContainerRestartCount returns an attribute KeyValue conforming to the +// "k8s.container.restart_count" semantic conventions. It represents the number +// of times the container was restarted. This attribute can be used to identify a +// particular container (running or stopped) within a container spec. +func K8SContainerRestartCount(val int) attribute.KeyValue { + return K8SContainerRestartCountKey.Int(val) +} + +// K8SContainerStatusLastTerminatedReason returns an attribute KeyValue +// conforming to the "k8s.container.status.last_terminated_reason" semantic +// conventions. It represents the last terminated reason of the Container. +func K8SContainerStatusLastTerminatedReason(val string) attribute.KeyValue { + return K8SContainerStatusLastTerminatedReasonKey.String(val) +} + +// K8SCronJobAnnotation returns an attribute KeyValue conforming to the +// "k8s.cronjob.annotation" semantic conventions. It represents the cronjob +// annotation placed on the CronJob, the `` being the annotation name, the +// value being the annotation value. +func K8SCronJobAnnotation(key string, val string) attribute.KeyValue { + return attribute.String("k8s.cronjob.annotation."+key, val) +} + +// K8SCronJobLabel returns an attribute KeyValue conforming to the +// "k8s.cronjob.label" semantic conventions. It represents the label placed on +// the CronJob, the `` being the label name, the value being the label +// value. +func K8SCronJobLabel(key string, val string) attribute.KeyValue { + return attribute.String("k8s.cronjob.label."+key, val) +} + +// K8SCronJobName returns an attribute KeyValue conforming to the +// "k8s.cronjob.name" semantic conventions. It represents the name of the +// CronJob. +func K8SCronJobName(val string) attribute.KeyValue { + return K8SCronJobNameKey.String(val) +} + +// K8SCronJobUID returns an attribute KeyValue conforming to the +// "k8s.cronjob.uid" semantic conventions. It represents the UID of the CronJob. +func K8SCronJobUID(val string) attribute.KeyValue { + return K8SCronJobUIDKey.String(val) +} + +// K8SDaemonSetAnnotation returns an attribute KeyValue conforming to the +// "k8s.daemonset.annotation" semantic conventions. It represents the annotation +// placed on the DaemonSet, the `` being the annotation name, the value +// being the annotation value, even if the value is empty. +func K8SDaemonSetAnnotation(key string, val string) attribute.KeyValue { + return attribute.String("k8s.daemonset.annotation."+key, val) +} + +// K8SDaemonSetLabel returns an attribute KeyValue conforming to the +// "k8s.daemonset.label" semantic conventions. It represents the label placed on +// the DaemonSet, the `` being the label name, the value being the label +// value, even if the value is empty. +func K8SDaemonSetLabel(key string, val string) attribute.KeyValue { + return attribute.String("k8s.daemonset.label."+key, val) +} + +// K8SDaemonSetName returns an attribute KeyValue conforming to the +// "k8s.daemonset.name" semantic conventions. It represents the name of the +// DaemonSet. +func K8SDaemonSetName(val string) attribute.KeyValue { + return K8SDaemonSetNameKey.String(val) +} + +// K8SDaemonSetUID returns an attribute KeyValue conforming to the +// "k8s.daemonset.uid" semantic conventions. It represents the UID of the +// DaemonSet. +func K8SDaemonSetUID(val string) attribute.KeyValue { + return K8SDaemonSetUIDKey.String(val) +} + +// K8SDeploymentAnnotation returns an attribute KeyValue conforming to the +// "k8s.deployment.annotation" semantic conventions. It represents the annotation +// placed on the Deployment, the `` being the annotation name, the value +// being the annotation value, even if the value is empty. +func K8SDeploymentAnnotation(key string, val string) attribute.KeyValue { + return attribute.String("k8s.deployment.annotation."+key, val) +} + +// K8SDeploymentLabel returns an attribute KeyValue conforming to the +// "k8s.deployment.label" semantic conventions. It represents the label placed on +// the Deployment, the `` being the label name, the value being the label +// value, even if the value is empty. +func K8SDeploymentLabel(key string, val string) attribute.KeyValue { + return attribute.String("k8s.deployment.label."+key, val) +} + +// K8SDeploymentName returns an attribute KeyValue conforming to the +// "k8s.deployment.name" semantic conventions. It represents the name of the +// Deployment. +func K8SDeploymentName(val string) attribute.KeyValue { + return K8SDeploymentNameKey.String(val) +} + +// K8SDeploymentUID returns an attribute KeyValue conforming to the +// "k8s.deployment.uid" semantic conventions. It represents the UID of the +// Deployment. +func K8SDeploymentUID(val string) attribute.KeyValue { + return K8SDeploymentUIDKey.String(val) +} + +// K8SHPAMetricType returns an attribute KeyValue conforming to the +// "k8s.hpa.metric.type" semantic conventions. It represents the type of metric +// source for the horizontal pod autoscaler. +func K8SHPAMetricType(val string) attribute.KeyValue { + return K8SHPAMetricTypeKey.String(val) +} + +// K8SHPAName returns an attribute KeyValue conforming to the "k8s.hpa.name" +// semantic conventions. It represents the name of the horizontal pod autoscaler. +func K8SHPAName(val string) attribute.KeyValue { + return K8SHPANameKey.String(val) +} + +// K8SHPAScaletargetrefAPIVersion returns an attribute KeyValue conforming to the +// "k8s.hpa.scaletargetref.api_version" semantic conventions. It represents the +// API version of the target resource to scale for the HorizontalPodAutoscaler. +func K8SHPAScaletargetrefAPIVersion(val string) attribute.KeyValue { + return K8SHPAScaletargetrefAPIVersionKey.String(val) +} + +// K8SHPAScaletargetrefKind returns an attribute KeyValue conforming to the +// "k8s.hpa.scaletargetref.kind" semantic conventions. It represents the kind of +// the target resource to scale for the HorizontalPodAutoscaler. +func K8SHPAScaletargetrefKind(val string) attribute.KeyValue { + return K8SHPAScaletargetrefKindKey.String(val) +} + +// K8SHPAScaletargetrefName returns an attribute KeyValue conforming to the +// "k8s.hpa.scaletargetref.name" semantic conventions. It represents the name of +// the target resource to scale for the HorizontalPodAutoscaler. +func K8SHPAScaletargetrefName(val string) attribute.KeyValue { + return K8SHPAScaletargetrefNameKey.String(val) +} + +// K8SHPAUID returns an attribute KeyValue conforming to the "k8s.hpa.uid" +// semantic conventions. It represents the UID of the horizontal pod autoscaler. +func K8SHPAUID(val string) attribute.KeyValue { + return K8SHPAUIDKey.String(val) +} + +// K8SHugepageSize returns an attribute KeyValue conforming to the +// "k8s.hugepage.size" semantic conventions. It represents the size (identifier) +// of the K8s huge page. +func K8SHugepageSize(val string) attribute.KeyValue { + return K8SHugepageSizeKey.String(val) +} + +// K8SJobAnnotation returns an attribute KeyValue conforming to the +// "k8s.job.annotation" semantic conventions. It represents the annotation placed +// on the Job, the `` being the annotation name, the value being the +// annotation value, even if the value is empty. +func K8SJobAnnotation(key string, val string) attribute.KeyValue { + return attribute.String("k8s.job.annotation."+key, val) +} + +// K8SJobLabel returns an attribute KeyValue conforming to the "k8s.job.label" +// semantic conventions. It represents the label placed on the Job, the `` +// being the label name, the value being the label value, even if the value is +// empty. +func K8SJobLabel(key string, val string) attribute.KeyValue { + return attribute.String("k8s.job.label."+key, val) +} + +// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name" +// semantic conventions. It represents the name of the Job. +func K8SJobName(val string) attribute.KeyValue { + return K8SJobNameKey.String(val) +} + +// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid" +// semantic conventions. It represents the UID of the Job. +func K8SJobUID(val string) attribute.KeyValue { + return K8SJobUIDKey.String(val) +} + +// K8SNamespaceAnnotation returns an attribute KeyValue conforming to the +// "k8s.namespace.annotation" semantic conventions. It represents the annotation +// placed on the Namespace, the `` being the annotation name, the value +// being the annotation value, even if the value is empty. +func K8SNamespaceAnnotation(key string, val string) attribute.KeyValue { + return attribute.String("k8s.namespace.annotation."+key, val) +} + +// K8SNamespaceLabel returns an attribute KeyValue conforming to the +// "k8s.namespace.label" semantic conventions. It represents the label placed on +// the Namespace, the `` being the label name, the value being the label +// value, even if the value is empty. +func K8SNamespaceLabel(key string, val string) attribute.KeyValue { + return attribute.String("k8s.namespace.label."+key, val) +} + +// K8SNamespaceName returns an attribute KeyValue conforming to the +// "k8s.namespace.name" semantic conventions. It represents the name of the +// namespace that the pod is running in. +func K8SNamespaceName(val string) attribute.KeyValue { + return K8SNamespaceNameKey.String(val) +} + +// K8SNodeAnnotation returns an attribute KeyValue conforming to the +// "k8s.node.annotation" semantic conventions. It represents the annotation +// placed on the Node, the `` being the annotation name, the value being the +// annotation value, even if the value is empty. +func K8SNodeAnnotation(key string, val string) attribute.KeyValue { + return attribute.String("k8s.node.annotation."+key, val) +} + +// K8SNodeLabel returns an attribute KeyValue conforming to the "k8s.node.label" +// semantic conventions. It represents the label placed on the Node, the `` +// being the label name, the value being the label value, even if the value is +// empty. +func K8SNodeLabel(key string, val string) attribute.KeyValue { + return attribute.String("k8s.node.label."+key, val) +} + +// K8SNodeName returns an attribute KeyValue conforming to the "k8s.node.name" +// semantic conventions. It represents the name of the Node. +func K8SNodeName(val string) attribute.KeyValue { + return K8SNodeNameKey.String(val) +} + +// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid" +// semantic conventions. It represents the UID of the Node. +func K8SNodeUID(val string) attribute.KeyValue { + return K8SNodeUIDKey.String(val) +} + +// K8SPodAnnotation returns an attribute KeyValue conforming to the +// "k8s.pod.annotation" semantic conventions. It represents the annotation placed +// on the Pod, the `` being the annotation name, the value being the +// annotation value. +func K8SPodAnnotation(key string, val string) attribute.KeyValue { + return attribute.String("k8s.pod.annotation."+key, val) +} + +// K8SPodLabel returns an attribute KeyValue conforming to the "k8s.pod.label" +// semantic conventions. It represents the label placed on the Pod, the `` +// being the label name, the value being the label value. +func K8SPodLabel(key string, val string) attribute.KeyValue { + return attribute.String("k8s.pod.label."+key, val) +} + +// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name" +// semantic conventions. It represents the name of the Pod. +func K8SPodName(val string) attribute.KeyValue { + return K8SPodNameKey.String(val) +} + +// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid" +// semantic conventions. It represents the UID of the Pod. +func K8SPodUID(val string) attribute.KeyValue { + return K8SPodUIDKey.String(val) +} + +// K8SReplicaSetAnnotation returns an attribute KeyValue conforming to the +// "k8s.replicaset.annotation" semantic conventions. It represents the annotation +// placed on the ReplicaSet, the `` being the annotation name, the value +// being the annotation value, even if the value is empty. +func K8SReplicaSetAnnotation(key string, val string) attribute.KeyValue { + return attribute.String("k8s.replicaset.annotation."+key, val) +} + +// K8SReplicaSetLabel returns an attribute KeyValue conforming to the +// "k8s.replicaset.label" semantic conventions. It represents the label placed on +// the ReplicaSet, the `` being the label name, the value being the label +// value, even if the value is empty. +func K8SReplicaSetLabel(key string, val string) attribute.KeyValue { + return attribute.String("k8s.replicaset.label."+key, val) +} + +// K8SReplicaSetName returns an attribute KeyValue conforming to the +// "k8s.replicaset.name" semantic conventions. It represents the name of the +// ReplicaSet. +func K8SReplicaSetName(val string) attribute.KeyValue { + return K8SReplicaSetNameKey.String(val) +} + +// K8SReplicaSetUID returns an attribute KeyValue conforming to the +// "k8s.replicaset.uid" semantic conventions. It represents the UID of the +// ReplicaSet. +func K8SReplicaSetUID(val string) attribute.KeyValue { + return K8SReplicaSetUIDKey.String(val) +} + +// K8SReplicationControllerName returns an attribute KeyValue conforming to the +// "k8s.replicationcontroller.name" semantic conventions. It represents the name +// of the replication controller. +func K8SReplicationControllerName(val string) attribute.KeyValue { + return K8SReplicationControllerNameKey.String(val) +} + +// K8SReplicationControllerUID returns an attribute KeyValue conforming to the +// "k8s.replicationcontroller.uid" semantic conventions. It represents the UID of +// the replication controller. +func K8SReplicationControllerUID(val string) attribute.KeyValue { + return K8SReplicationControllerUIDKey.String(val) +} + +// K8SResourceQuotaName returns an attribute KeyValue conforming to the +// "k8s.resourcequota.name" semantic conventions. It represents the name of the +// resource quota. +func K8SResourceQuotaName(val string) attribute.KeyValue { + return K8SResourceQuotaNameKey.String(val) +} + +// K8SResourceQuotaResourceName returns an attribute KeyValue conforming to the +// "k8s.resourcequota.resource_name" semantic conventions. It represents the name +// of the K8s resource a resource quota defines. +func K8SResourceQuotaResourceName(val string) attribute.KeyValue { + return K8SResourceQuotaResourceNameKey.String(val) +} + +// K8SResourceQuotaUID returns an attribute KeyValue conforming to the +// "k8s.resourcequota.uid" semantic conventions. It represents the UID of the +// resource quota. +func K8SResourceQuotaUID(val string) attribute.KeyValue { + return K8SResourceQuotaUIDKey.String(val) +} + +// K8SStatefulSetAnnotation returns an attribute KeyValue conforming to the +// "k8s.statefulset.annotation" semantic conventions. It represents the +// annotation placed on the StatefulSet, the `` being the annotation name, +// the value being the annotation value, even if the value is empty. +func K8SStatefulSetAnnotation(key string, val string) attribute.KeyValue { + return attribute.String("k8s.statefulset.annotation."+key, val) +} + +// K8SStatefulSetLabel returns an attribute KeyValue conforming to the +// "k8s.statefulset.label" semantic conventions. It represents the label placed +// on the StatefulSet, the `` being the label name, the value being the +// label value, even if the value is empty. +func K8SStatefulSetLabel(key string, val string) attribute.KeyValue { + return attribute.String("k8s.statefulset.label."+key, val) +} + +// K8SStatefulSetName returns an attribute KeyValue conforming to the +// "k8s.statefulset.name" semantic conventions. It represents the name of the +// StatefulSet. +func K8SStatefulSetName(val string) attribute.KeyValue { + return K8SStatefulSetNameKey.String(val) +} + +// K8SStatefulSetUID returns an attribute KeyValue conforming to the +// "k8s.statefulset.uid" semantic conventions. It represents the UID of the +// StatefulSet. +func K8SStatefulSetUID(val string) attribute.KeyValue { + return K8SStatefulSetUIDKey.String(val) +} + +// K8SStorageclassName returns an attribute KeyValue conforming to the +// "k8s.storageclass.name" semantic conventions. It represents the name of K8s +// [StorageClass] object. +// +// [StorageClass]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#storageclass-v1-storage-k8s-io +func K8SStorageclassName(val string) attribute.KeyValue { + return K8SStorageclassNameKey.String(val) +} + +// K8SVolumeName returns an attribute KeyValue conforming to the +// "k8s.volume.name" semantic conventions. It represents the name of the K8s +// volume. +func K8SVolumeName(val string) attribute.KeyValue { + return K8SVolumeNameKey.String(val) +} + +// Enum values for k8s.container.status.reason +var ( + // The container is being created. + // Stability: development + K8SContainerStatusReasonContainerCreating = K8SContainerStatusReasonKey.String("ContainerCreating") + // The container is in a crash loop back off state. + // Stability: development + K8SContainerStatusReasonCrashLoopBackOff = K8SContainerStatusReasonKey.String("CrashLoopBackOff") + // There was an error creating the container configuration. + // Stability: development + K8SContainerStatusReasonCreateContainerConfigError = K8SContainerStatusReasonKey.String("CreateContainerConfigError") + // There was an error pulling the container image. + // Stability: development + K8SContainerStatusReasonErrImagePull = K8SContainerStatusReasonKey.String("ErrImagePull") + // The container image pull is in back off state. + // Stability: development + K8SContainerStatusReasonImagePullBackOff = K8SContainerStatusReasonKey.String("ImagePullBackOff") + // The container was killed due to out of memory. + // Stability: development + K8SContainerStatusReasonOomKilled = K8SContainerStatusReasonKey.String("OOMKilled") + // The container has completed execution. + // Stability: development + K8SContainerStatusReasonCompleted = K8SContainerStatusReasonKey.String("Completed") + // There was an error with the container. + // Stability: development + K8SContainerStatusReasonError = K8SContainerStatusReasonKey.String("Error") + // The container cannot run. + // Stability: development + K8SContainerStatusReasonContainerCannotRun = K8SContainerStatusReasonKey.String("ContainerCannotRun") +) + +// Enum values for k8s.container.status.state +var ( + // The container has terminated. + // Stability: development + K8SContainerStatusStateTerminated = K8SContainerStatusStateKey.String("terminated") + // The container is running. + // Stability: development + K8SContainerStatusStateRunning = K8SContainerStatusStateKey.String("running") + // The container is waiting. + // Stability: development + K8SContainerStatusStateWaiting = K8SContainerStatusStateKey.String("waiting") +) + +// Enum values for k8s.namespace.phase +var ( + // Active namespace phase as described by [K8s API] + // Stability: development + // + // [K8s API]: https://pkg.go.dev/k8s.io/api@v0.31.3/core/v1#NamespacePhase + K8SNamespacePhaseActive = K8SNamespacePhaseKey.String("active") + // Terminating namespace phase as described by [K8s API] + // Stability: development + // + // [K8s API]: https://pkg.go.dev/k8s.io/api@v0.31.3/core/v1#NamespacePhase + K8SNamespacePhaseTerminating = K8SNamespacePhaseKey.String("terminating") +) + +// Enum values for k8s.node.condition.status +var ( + // condition_true + // Stability: development + K8SNodeConditionStatusConditionTrue = K8SNodeConditionStatusKey.String("true") + // condition_false + // Stability: development + K8SNodeConditionStatusConditionFalse = K8SNodeConditionStatusKey.String("false") + // condition_unknown + // Stability: development + K8SNodeConditionStatusConditionUnknown = K8SNodeConditionStatusKey.String("unknown") +) + +// Enum values for k8s.node.condition.type +var ( + // The node is healthy and ready to accept pods + // Stability: development + K8SNodeConditionTypeReady = K8SNodeConditionTypeKey.String("Ready") + // Pressure exists on the disk size—that is, if the disk capacity is low + // Stability: development + K8SNodeConditionTypeDiskPressure = K8SNodeConditionTypeKey.String("DiskPressure") + // Pressure exists on the node memory—that is, if the node memory is low + // Stability: development + K8SNodeConditionTypeMemoryPressure = K8SNodeConditionTypeKey.String("MemoryPressure") + // Pressure exists on the processes—that is, if there are too many processes + // on the node + // Stability: development + K8SNodeConditionTypePIDPressure = K8SNodeConditionTypeKey.String("PIDPressure") + // The network for the node is not correctly configured + // Stability: development + K8SNodeConditionTypeNetworkUnavailable = K8SNodeConditionTypeKey.String("NetworkUnavailable") +) + +// Enum values for k8s.volume.type +var ( + // A [persistentVolumeClaim] volume + // Stability: development + // + // [persistentVolumeClaim]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#persistentvolumeclaim + K8SVolumeTypePersistentVolumeClaim = K8SVolumeTypeKey.String("persistentVolumeClaim") + // A [configMap] volume + // Stability: development + // + // [configMap]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#configmap + K8SVolumeTypeConfigMap = K8SVolumeTypeKey.String("configMap") + // A [downwardAPI] volume + // Stability: development + // + // [downwardAPI]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#downwardapi + K8SVolumeTypeDownwardAPI = K8SVolumeTypeKey.String("downwardAPI") + // An [emptyDir] volume + // Stability: development + // + // [emptyDir]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#emptydir + K8SVolumeTypeEmptyDir = K8SVolumeTypeKey.String("emptyDir") + // A [secret] volume + // Stability: development + // + // [secret]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#secret + K8SVolumeTypeSecret = K8SVolumeTypeKey.String("secret") + // A [local] volume + // Stability: development + // + // [local]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#local + K8SVolumeTypeLocal = K8SVolumeTypeKey.String("local") +) + +// Namespace: linux +const ( + // LinuxMemorySlabStateKey is the attribute Key conforming to the + // "linux.memory.slab.state" semantic conventions. It represents the Linux Slab + // memory state. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "reclaimable", "unreclaimable" + LinuxMemorySlabStateKey = attribute.Key("linux.memory.slab.state") +) + +// Enum values for linux.memory.slab.state +var ( + // reclaimable + // Stability: development + LinuxMemorySlabStateReclaimable = LinuxMemorySlabStateKey.String("reclaimable") + // unreclaimable + // Stability: development + LinuxMemorySlabStateUnreclaimable = LinuxMemorySlabStateKey.String("unreclaimable") +) + +// Namespace: log +const ( + // LogFileNameKey is the attribute Key conforming to the "log.file.name" + // semantic conventions. It represents the basename of the file. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "audit.log" + LogFileNameKey = attribute.Key("log.file.name") + + // LogFileNameResolvedKey is the attribute Key conforming to the + // "log.file.name_resolved" semantic conventions. It represents the basename of + // the file, with symlinks resolved. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "uuid.log" + LogFileNameResolvedKey = attribute.Key("log.file.name_resolved") + + // LogFilePathKey is the attribute Key conforming to the "log.file.path" + // semantic conventions. It represents the full path to the file. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/var/log/mysql/audit.log" + LogFilePathKey = attribute.Key("log.file.path") + + // LogFilePathResolvedKey is the attribute Key conforming to the + // "log.file.path_resolved" semantic conventions. It represents the full path to + // the file, with symlinks resolved. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/var/lib/docker/uuid.log" + LogFilePathResolvedKey = attribute.Key("log.file.path_resolved") + + // LogIostreamKey is the attribute Key conforming to the "log.iostream" semantic + // conventions. It represents the stream associated with the log. See below for + // a list of well-known values. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + LogIostreamKey = attribute.Key("log.iostream") + + // LogRecordOriginalKey is the attribute Key conforming to the + // "log.record.original" semantic conventions. It represents the complete + // original Log Record. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "77 <86>1 2015-08-06T21:58:59.694Z 192.168.2.133 inactive - - - + // Something happened", "[INFO] 8/3/24 12:34:56 Something happened" + // Note: This value MAY be added when processing a Log Record which was + // originally transmitted as a string or equivalent data type AND the Body field + // of the Log Record does not contain the same value. (e.g. a syslog or a log + // record read from a file.) + LogRecordOriginalKey = attribute.Key("log.record.original") + + // LogRecordUIDKey is the attribute Key conforming to the "log.record.uid" + // semantic conventions. It represents a unique identifier for the Log Record. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "01ARZ3NDEKTSV4RRFFQ69G5FAV" + // Note: If an id is provided, other log records with the same id will be + // considered duplicates and can be removed safely. This means, that two + // distinguishable log records MUST have different values. + // The id MAY be an + // [Universally Unique Lexicographically Sortable Identifier (ULID)], but other + // identifiers (e.g. UUID) may be used as needed. + // + // [Universally Unique Lexicographically Sortable Identifier (ULID)]: https://github.com/ulid/spec + LogRecordUIDKey = attribute.Key("log.record.uid") +) + +// LogFileName returns an attribute KeyValue conforming to the "log.file.name" +// semantic conventions. It represents the basename of the file. +func LogFileName(val string) attribute.KeyValue { + return LogFileNameKey.String(val) +} + +// LogFileNameResolved returns an attribute KeyValue conforming to the +// "log.file.name_resolved" semantic conventions. It represents the basename of +// the file, with symlinks resolved. +func LogFileNameResolved(val string) attribute.KeyValue { + return LogFileNameResolvedKey.String(val) +} + +// LogFilePath returns an attribute KeyValue conforming to the "log.file.path" +// semantic conventions. It represents the full path to the file. +func LogFilePath(val string) attribute.KeyValue { + return LogFilePathKey.String(val) +} + +// LogFilePathResolved returns an attribute KeyValue conforming to the +// "log.file.path_resolved" semantic conventions. It represents the full path to +// the file, with symlinks resolved. +func LogFilePathResolved(val string) attribute.KeyValue { + return LogFilePathResolvedKey.String(val) +} + +// LogRecordOriginal returns an attribute KeyValue conforming to the +// "log.record.original" semantic conventions. It represents the complete +// original Log Record. +func LogRecordOriginal(val string) attribute.KeyValue { + return LogRecordOriginalKey.String(val) +} + +// LogRecordUID returns an attribute KeyValue conforming to the "log.record.uid" +// semantic conventions. It represents a unique identifier for the Log Record. +func LogRecordUID(val string) attribute.KeyValue { + return LogRecordUIDKey.String(val) +} + +// Enum values for log.iostream +var ( + // Logs from stdout stream + // Stability: development + LogIostreamStdout = LogIostreamKey.String("stdout") + // Events from stderr stream + // Stability: development + LogIostreamStderr = LogIostreamKey.String("stderr") +) + +// Namespace: mainframe +const ( + // MainframeLparNameKey is the attribute Key conforming to the + // "mainframe.lpar.name" semantic conventions. It represents the name of the + // logical partition that hosts a systems with a mainframe operating system. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "LPAR01" + MainframeLparNameKey = attribute.Key("mainframe.lpar.name") +) + +// MainframeLparName returns an attribute KeyValue conforming to the +// "mainframe.lpar.name" semantic conventions. It represents the name of the +// logical partition that hosts a systems with a mainframe operating system. +func MainframeLparName(val string) attribute.KeyValue { + return MainframeLparNameKey.String(val) +} + +// Namespace: messaging +const ( + // MessagingBatchMessageCountKey is the attribute Key conforming to the + // "messaging.batch.message_count" semantic conventions. It represents the + // number of messages sent, received, or processed in the scope of the batching + // operation. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 0, 1, 2 + // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on + // spans that operate with a single message. When a messaging client library + // supports both batch and single-message API for the same operation, + // instrumentations SHOULD use `messaging.batch.message_count` for batching APIs + // and SHOULD NOT use it for single-message APIs. + MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count") + + // MessagingClientIDKey is the attribute Key conforming to the + // "messaging.client.id" semantic conventions. It represents a unique identifier + // for the client that consumes or produces a message. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "client-5", "myhost@8742@s8083jm" + MessagingClientIDKey = attribute.Key("messaging.client.id") + + // MessagingConsumerGroupNameKey is the attribute Key conforming to the + // "messaging.consumer.group.name" semantic conventions. It represents the name + // of the consumer group with which a consumer is associated. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-group", "indexer" + // Note: Semantic conventions for individual messaging systems SHOULD document + // whether `messaging.consumer.group.name` is applicable and what it means in + // the context of that system. + MessagingConsumerGroupNameKey = attribute.Key("messaging.consumer.group.name") + + // MessagingDestinationAnonymousKey is the attribute Key conforming to the + // "messaging.destination.anonymous" semantic conventions. It represents a + // boolean that is true if the message destination is anonymous (could be + // unnamed or have auto-generated name). + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous") + + // MessagingDestinationNameKey is the attribute Key conforming to the + // "messaging.destination.name" semantic conventions. It represents the message + // destination name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "MyQueue", "MyTopic" + // Note: Destination name SHOULD uniquely identify a specific queue, topic or + // other entity within the broker. If + // the broker doesn't have such notion, the destination name SHOULD uniquely + // identify the broker. + MessagingDestinationNameKey = attribute.Key("messaging.destination.name") + + // MessagingDestinationPartitionIDKey is the attribute Key conforming to the + // "messaging.destination.partition.id" semantic conventions. It represents the + // identifier of the partition messages are sent to or received from, unique + // within the `messaging.destination.name`. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1 + MessagingDestinationPartitionIDKey = attribute.Key("messaging.destination.partition.id") + + // MessagingDestinationSubscriptionNameKey is the attribute Key conforming to + // the "messaging.destination.subscription.name" semantic conventions. It + // represents the name of the destination subscription from which a message is + // consumed. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "subscription-a" + // Note: Semantic conventions for individual messaging systems SHOULD document + // whether `messaging.destination.subscription.name` is applicable and what it + // means in the context of that system. + MessagingDestinationSubscriptionNameKey = attribute.Key("messaging.destination.subscription.name") + + // MessagingDestinationTemplateKey is the attribute Key conforming to the + // "messaging.destination.template" semantic conventions. It represents the low + // cardinality representation of the messaging destination name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/customers/{customerId}" + // Note: Destination names could be constructed from templates. An example would + // be a destination name involving a user name or product id. Although the + // destination name in this case is of high cardinality, the underlying template + // is of low cardinality and can be effectively used for grouping and + // aggregation. + MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template") + + // MessagingDestinationTemporaryKey is the attribute Key conforming to the + // "messaging.destination.temporary" semantic conventions. It represents a + // boolean that is true if the message destination is temporary and might not + // exist anymore after messages are processed. + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary") + + // MessagingEventHubsMessageEnqueuedTimeKey is the attribute Key conforming to + // the "messaging.eventhubs.message.enqueued_time" semantic conventions. It + // represents the UTC epoch seconds at which the message has been accepted and + // stored in the entity. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + MessagingEventHubsMessageEnqueuedTimeKey = attribute.Key("messaging.eventhubs.message.enqueued_time") + + // MessagingGCPPubSubMessageAckDeadlineKey is the attribute Key conforming to + // the "messaging.gcp_pubsub.message.ack_deadline" semantic conventions. It + // represents the ack deadline in seconds set for the modify ack deadline + // request. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + MessagingGCPPubSubMessageAckDeadlineKey = attribute.Key("messaging.gcp_pubsub.message.ack_deadline") + + // MessagingGCPPubSubMessageAckIDKey is the attribute Key conforming to the + // "messaging.gcp_pubsub.message.ack_id" semantic conventions. It represents the + // ack id for a given message. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: ack_id + MessagingGCPPubSubMessageAckIDKey = attribute.Key("messaging.gcp_pubsub.message.ack_id") + + // MessagingGCPPubSubMessageDeliveryAttemptKey is the attribute Key conforming + // to the "messaging.gcp_pubsub.message.delivery_attempt" semantic conventions. + // It represents the delivery attempt for a given message. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + MessagingGCPPubSubMessageDeliveryAttemptKey = attribute.Key("messaging.gcp_pubsub.message.delivery_attempt") + + // MessagingGCPPubSubMessageOrderingKeyKey is the attribute Key conforming to + // the "messaging.gcp_pubsub.message.ordering_key" semantic conventions. It + // represents the ordering key for a given message. If the attribute is not + // present, the message does not have an ordering key. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: ordering_key + MessagingGCPPubSubMessageOrderingKeyKey = attribute.Key("messaging.gcp_pubsub.message.ordering_key") + + // MessagingKafkaMessageKeyKey is the attribute Key conforming to the + // "messaging.kafka.message.key" semantic conventions. It represents the message + // keys in Kafka are used for grouping alike messages to ensure they're + // processed on the same partition. They differ from `messaging.message.id` in + // that they're not unique. If the key is `null`, the attribute MUST NOT be set. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: myKey + // Note: If the key type is not string, it's string representation has to be + // supplied for the attribute. If the key has no unambiguous, canonical string + // form, don't include its value. + MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key") + + // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the + // "messaging.kafka.message.tombstone" semantic conventions. It represents a + // boolean that is true if the message is a tombstone. + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone") + + // MessagingKafkaOffsetKey is the attribute Key conforming to the + // "messaging.kafka.offset" semantic conventions. It represents the offset of a + // record in the corresponding Kafka partition. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + MessagingKafkaOffsetKey = attribute.Key("messaging.kafka.offset") + + // MessagingMessageBodySizeKey is the attribute Key conforming to the + // "messaging.message.body.size" semantic conventions. It represents the size of + // the message body in bytes. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Note: This can refer to both the compressed or uncompressed body size. If + // both sizes are known, the uncompressed + // body size should be used. + MessagingMessageBodySizeKey = attribute.Key("messaging.message.body.size") + + // MessagingMessageConversationIDKey is the attribute Key conforming to the + // "messaging.message.conversation_id" semantic conventions. It represents the + // conversation ID identifying the conversation to which the message belongs, + // represented as a string. Sometimes called "Correlation ID". + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: MyConversationId + MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id") + + // MessagingMessageEnvelopeSizeKey is the attribute Key conforming to the + // "messaging.message.envelope.size" semantic conventions. It represents the + // size of the message body and metadata in bytes. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Note: This can refer to both the compressed or uncompressed size. If both + // sizes are known, the uncompressed + // size should be used. + MessagingMessageEnvelopeSizeKey = attribute.Key("messaging.message.envelope.size") + + // MessagingMessageIDKey is the attribute Key conforming to the + // "messaging.message.id" semantic conventions. It represents a value used by + // the messaging system as an identifier for the message, represented as a + // string. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 452a7c7c7c7048c2f887f61572b18fc2 + MessagingMessageIDKey = attribute.Key("messaging.message.id") + + // MessagingOperationNameKey is the attribute Key conforming to the + // "messaging.operation.name" semantic conventions. It represents the + // system-specific name of the messaging operation. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "ack", "nack", "send" + MessagingOperationNameKey = attribute.Key("messaging.operation.name") + + // MessagingOperationTypeKey is the attribute Key conforming to the + // "messaging.operation.type" semantic conventions. It represents a string + // identifying the type of the messaging operation. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: If a custom value is used, it MUST be of low cardinality. + MessagingOperationTypeKey = attribute.Key("messaging.operation.type") + + // MessagingRabbitMQDestinationRoutingKeyKey is the attribute Key conforming to + // the "messaging.rabbitmq.destination.routing_key" semantic conventions. It + // represents the rabbitMQ message routing key. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: myKey + MessagingRabbitMQDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key") + + // MessagingRabbitMQMessageDeliveryTagKey is the attribute Key conforming to the + // "messaging.rabbitmq.message.delivery_tag" semantic conventions. It represents + // the rabbitMQ message delivery tag. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + MessagingRabbitMQMessageDeliveryTagKey = attribute.Key("messaging.rabbitmq.message.delivery_tag") + + // MessagingRocketMQConsumptionModelKey is the attribute Key conforming to the + // "messaging.rocketmq.consumption_model" semantic conventions. It represents + // the model of message consumption. This only applies to consumer spans. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + MessagingRocketMQConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model") + + // MessagingRocketMQMessageDelayTimeLevelKey is the attribute Key conforming to + // the "messaging.rocketmq.message.delay_time_level" semantic conventions. It + // represents the delay time level for delay message, which determines the + // message delay time. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + MessagingRocketMQMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level") + + // MessagingRocketMQMessageDeliveryTimestampKey is the attribute Key conforming + // to the "messaging.rocketmq.message.delivery_timestamp" semantic conventions. + // It represents the timestamp in milliseconds that the delay message is + // expected to be delivered to consumer. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + MessagingRocketMQMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp") + + // MessagingRocketMQMessageGroupKey is the attribute Key conforming to the + // "messaging.rocketmq.message.group" semantic conventions. It represents the it + // is essential for FIFO message. Messages that belong to the same message group + // are always processed one by one within the same consumer group. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: myMessageGroup + MessagingRocketMQMessageGroupKey = attribute.Key("messaging.rocketmq.message.group") + + // MessagingRocketMQMessageKeysKey is the attribute Key conforming to the + // "messaging.rocketmq.message.keys" semantic conventions. It represents the + // key(s) of message, another way to mark message besides message id. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "keyA", "keyB" + MessagingRocketMQMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys") + + // MessagingRocketMQMessageTagKey is the attribute Key conforming to the + // "messaging.rocketmq.message.tag" semantic conventions. It represents the + // secondary classifier of message besides topic. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: tagA + MessagingRocketMQMessageTagKey = attribute.Key("messaging.rocketmq.message.tag") + + // MessagingRocketMQMessageTypeKey is the attribute Key conforming to the + // "messaging.rocketmq.message.type" semantic conventions. It represents the + // type of message. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + MessagingRocketMQMessageTypeKey = attribute.Key("messaging.rocketmq.message.type") + + // MessagingRocketMQNamespaceKey is the attribute Key conforming to the + // "messaging.rocketmq.namespace" semantic conventions. It represents the + // namespace of RocketMQ resources, resources in different namespaces are + // individual. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: myNamespace + MessagingRocketMQNamespaceKey = attribute.Key("messaging.rocketmq.namespace") + + // MessagingServiceBusDispositionStatusKey is the attribute Key conforming to + // the "messaging.servicebus.disposition_status" semantic conventions. It + // represents the describes the [settlement type]. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // + // [settlement type]: https://learn.microsoft.com/azure/service-bus-messaging/message-transfers-locks-settlement#peeklock + MessagingServiceBusDispositionStatusKey = attribute.Key("messaging.servicebus.disposition_status") + + // MessagingServiceBusMessageDeliveryCountKey is the attribute Key conforming to + // the "messaging.servicebus.message.delivery_count" semantic conventions. It + // represents the number of deliveries that have been attempted for this + // message. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + MessagingServiceBusMessageDeliveryCountKey = attribute.Key("messaging.servicebus.message.delivery_count") + + // MessagingServiceBusMessageEnqueuedTimeKey is the attribute Key conforming to + // the "messaging.servicebus.message.enqueued_time" semantic conventions. It + // represents the UTC epoch seconds at which the message has been accepted and + // stored in the entity. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + MessagingServiceBusMessageEnqueuedTimeKey = attribute.Key("messaging.servicebus.message.enqueued_time") + + // MessagingSystemKey is the attribute Key conforming to the "messaging.system" + // semantic conventions. It represents the messaging system as identified by the + // client instrumentation. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: The actual messaging system may differ from the one known by the + // client. For example, when using Kafka client libraries to communicate with + // Azure Event Hubs, the `messaging.system` is set to `kafka` based on the + // instrumentation's best knowledge. + MessagingSystemKey = attribute.Key("messaging.system") +) + +// MessagingBatchMessageCount returns an attribute KeyValue conforming to the +// "messaging.batch.message_count" semantic conventions. It represents the number +// of messages sent, received, or processed in the scope of the batching +// operation. +func MessagingBatchMessageCount(val int) attribute.KeyValue { + return MessagingBatchMessageCountKey.Int(val) +} + +// MessagingClientID returns an attribute KeyValue conforming to the +// "messaging.client.id" semantic conventions. It represents a unique identifier +// for the client that consumes or produces a message. +func MessagingClientID(val string) attribute.KeyValue { + return MessagingClientIDKey.String(val) +} + +// MessagingConsumerGroupName returns an attribute KeyValue conforming to the +// "messaging.consumer.group.name" semantic conventions. It represents the name +// of the consumer group with which a consumer is associated. +func MessagingConsumerGroupName(val string) attribute.KeyValue { + return MessagingConsumerGroupNameKey.String(val) +} + +// MessagingDestinationAnonymous returns an attribute KeyValue conforming to the +// "messaging.destination.anonymous" semantic conventions. It represents a +// boolean that is true if the message destination is anonymous (could be unnamed +// or have auto-generated name). +func MessagingDestinationAnonymous(val bool) attribute.KeyValue { + return MessagingDestinationAnonymousKey.Bool(val) +} + +// MessagingDestinationName returns an attribute KeyValue conforming to the +// "messaging.destination.name" semantic conventions. It represents the message +// destination name. +func MessagingDestinationName(val string) attribute.KeyValue { + return MessagingDestinationNameKey.String(val) +} + +// MessagingDestinationPartitionID returns an attribute KeyValue conforming to +// the "messaging.destination.partition.id" semantic conventions. It represents +// the identifier of the partition messages are sent to or received from, unique +// within the `messaging.destination.name`. +func MessagingDestinationPartitionID(val string) attribute.KeyValue { + return MessagingDestinationPartitionIDKey.String(val) +} + +// MessagingDestinationSubscriptionName returns an attribute KeyValue conforming +// to the "messaging.destination.subscription.name" semantic conventions. It +// represents the name of the destination subscription from which a message is +// consumed. +func MessagingDestinationSubscriptionName(val string) attribute.KeyValue { + return MessagingDestinationSubscriptionNameKey.String(val) +} + +// MessagingDestinationTemplate returns an attribute KeyValue conforming to the +// "messaging.destination.template" semantic conventions. It represents the low +// cardinality representation of the messaging destination name. +func MessagingDestinationTemplate(val string) attribute.KeyValue { + return MessagingDestinationTemplateKey.String(val) +} + +// MessagingDestinationTemporary returns an attribute KeyValue conforming to the +// "messaging.destination.temporary" semantic conventions. It represents a +// boolean that is true if the message destination is temporary and might not +// exist anymore after messages are processed. +func MessagingDestinationTemporary(val bool) attribute.KeyValue { + return MessagingDestinationTemporaryKey.Bool(val) +} + +// MessagingEventHubsMessageEnqueuedTime returns an attribute KeyValue conforming +// to the "messaging.eventhubs.message.enqueued_time" semantic conventions. It +// represents the UTC epoch seconds at which the message has been accepted and +// stored in the entity. +func MessagingEventHubsMessageEnqueuedTime(val int) attribute.KeyValue { + return MessagingEventHubsMessageEnqueuedTimeKey.Int(val) +} + +// MessagingGCPPubSubMessageAckDeadline returns an attribute KeyValue conforming +// to the "messaging.gcp_pubsub.message.ack_deadline" semantic conventions. It +// represents the ack deadline in seconds set for the modify ack deadline +// request. +func MessagingGCPPubSubMessageAckDeadline(val int) attribute.KeyValue { + return MessagingGCPPubSubMessageAckDeadlineKey.Int(val) +} + +// MessagingGCPPubSubMessageAckID returns an attribute KeyValue conforming to the +// "messaging.gcp_pubsub.message.ack_id" semantic conventions. It represents the +// ack id for a given message. +func MessagingGCPPubSubMessageAckID(val string) attribute.KeyValue { + return MessagingGCPPubSubMessageAckIDKey.String(val) +} + +// MessagingGCPPubSubMessageDeliveryAttempt returns an attribute KeyValue +// conforming to the "messaging.gcp_pubsub.message.delivery_attempt" semantic +// conventions. It represents the delivery attempt for a given message. +func MessagingGCPPubSubMessageDeliveryAttempt(val int) attribute.KeyValue { + return MessagingGCPPubSubMessageDeliveryAttemptKey.Int(val) +} + +// MessagingGCPPubSubMessageOrderingKey returns an attribute KeyValue conforming +// to the "messaging.gcp_pubsub.message.ordering_key" semantic conventions. It +// represents the ordering key for a given message. If the attribute is not +// present, the message does not have an ordering key. +func MessagingGCPPubSubMessageOrderingKey(val string) attribute.KeyValue { + return MessagingGCPPubSubMessageOrderingKeyKey.String(val) +} + +// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the +// "messaging.kafka.message.key" semantic conventions. It represents the message +// keys in Kafka are used for grouping alike messages to ensure they're processed +// on the same partition. They differ from `messaging.message.id` in that they're +// not unique. If the key is `null`, the attribute MUST NOT be set. +func MessagingKafkaMessageKey(val string) attribute.KeyValue { + return MessagingKafkaMessageKeyKey.String(val) +} + +// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming to the +// "messaging.kafka.message.tombstone" semantic conventions. It represents a +// boolean that is true if the message is a tombstone. +func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue { + return MessagingKafkaMessageTombstoneKey.Bool(val) +} + +// MessagingKafkaOffset returns an attribute KeyValue conforming to the +// "messaging.kafka.offset" semantic conventions. It represents the offset of a +// record in the corresponding Kafka partition. +func MessagingKafkaOffset(val int) attribute.KeyValue { + return MessagingKafkaOffsetKey.Int(val) +} + +// MessagingMessageBodySize returns an attribute KeyValue conforming to the +// "messaging.message.body.size" semantic conventions. It represents the size of +// the message body in bytes. +func MessagingMessageBodySize(val int) attribute.KeyValue { + return MessagingMessageBodySizeKey.Int(val) +} + +// MessagingMessageConversationID returns an attribute KeyValue conforming to the +// "messaging.message.conversation_id" semantic conventions. It represents the +// conversation ID identifying the conversation to which the message belongs, +// represented as a string. Sometimes called "Correlation ID". +func MessagingMessageConversationID(val string) attribute.KeyValue { + return MessagingMessageConversationIDKey.String(val) +} + +// MessagingMessageEnvelopeSize returns an attribute KeyValue conforming to the +// "messaging.message.envelope.size" semantic conventions. It represents the size +// of the message body and metadata in bytes. +func MessagingMessageEnvelopeSize(val int) attribute.KeyValue { + return MessagingMessageEnvelopeSizeKey.Int(val) +} + +// MessagingMessageID returns an attribute KeyValue conforming to the +// "messaging.message.id" semantic conventions. It represents a value used by the +// messaging system as an identifier for the message, represented as a string. +func MessagingMessageID(val string) attribute.KeyValue { + return MessagingMessageIDKey.String(val) +} + +// MessagingOperationName returns an attribute KeyValue conforming to the +// "messaging.operation.name" semantic conventions. It represents the +// system-specific name of the messaging operation. +func MessagingOperationName(val string) attribute.KeyValue { + return MessagingOperationNameKey.String(val) +} + +// MessagingRabbitMQDestinationRoutingKey returns an attribute KeyValue +// conforming to the "messaging.rabbitmq.destination.routing_key" semantic +// conventions. It represents the rabbitMQ message routing key. +func MessagingRabbitMQDestinationRoutingKey(val string) attribute.KeyValue { + return MessagingRabbitMQDestinationRoutingKeyKey.String(val) +} + +// MessagingRabbitMQMessageDeliveryTag returns an attribute KeyValue conforming +// to the "messaging.rabbitmq.message.delivery_tag" semantic conventions. It +// represents the rabbitMQ message delivery tag. +func MessagingRabbitMQMessageDeliveryTag(val int) attribute.KeyValue { + return MessagingRabbitMQMessageDeliveryTagKey.Int(val) +} + +// MessagingRocketMQMessageDelayTimeLevel returns an attribute KeyValue +// conforming to the "messaging.rocketmq.message.delay_time_level" semantic +// conventions. It represents the delay time level for delay message, which +// determines the message delay time. +func MessagingRocketMQMessageDelayTimeLevel(val int) attribute.KeyValue { + return MessagingRocketMQMessageDelayTimeLevelKey.Int(val) +} + +// MessagingRocketMQMessageDeliveryTimestamp returns an attribute KeyValue +// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic +// conventions. It represents the timestamp in milliseconds that the delay +// message is expected to be delivered to consumer. +func MessagingRocketMQMessageDeliveryTimestamp(val int) attribute.KeyValue { + return MessagingRocketMQMessageDeliveryTimestampKey.Int(val) +} + +// MessagingRocketMQMessageGroup returns an attribute KeyValue conforming to the +// "messaging.rocketmq.message.group" semantic conventions. It represents the it +// is essential for FIFO message. Messages that belong to the same message group +// are always processed one by one within the same consumer group. +func MessagingRocketMQMessageGroup(val string) attribute.KeyValue { + return MessagingRocketMQMessageGroupKey.String(val) +} + +// MessagingRocketMQMessageKeys returns an attribute KeyValue conforming to the +// "messaging.rocketmq.message.keys" semantic conventions. It represents the +// key(s) of message, another way to mark message besides message id. +func MessagingRocketMQMessageKeys(val ...string) attribute.KeyValue { + return MessagingRocketMQMessageKeysKey.StringSlice(val) +} + +// MessagingRocketMQMessageTag returns an attribute KeyValue conforming to the +// "messaging.rocketmq.message.tag" semantic conventions. It represents the +// secondary classifier of message besides topic. +func MessagingRocketMQMessageTag(val string) attribute.KeyValue { + return MessagingRocketMQMessageTagKey.String(val) +} + +// MessagingRocketMQNamespace returns an attribute KeyValue conforming to the +// "messaging.rocketmq.namespace" semantic conventions. It represents the +// namespace of RocketMQ resources, resources in different namespaces are +// individual. +func MessagingRocketMQNamespace(val string) attribute.KeyValue { + return MessagingRocketMQNamespaceKey.String(val) +} + +// MessagingServiceBusMessageDeliveryCount returns an attribute KeyValue +// conforming to the "messaging.servicebus.message.delivery_count" semantic +// conventions. It represents the number of deliveries that have been attempted +// for this message. +func MessagingServiceBusMessageDeliveryCount(val int) attribute.KeyValue { + return MessagingServiceBusMessageDeliveryCountKey.Int(val) +} + +// MessagingServiceBusMessageEnqueuedTime returns an attribute KeyValue +// conforming to the "messaging.servicebus.message.enqueued_time" semantic +// conventions. It represents the UTC epoch seconds at which the message has been +// accepted and stored in the entity. +func MessagingServiceBusMessageEnqueuedTime(val int) attribute.KeyValue { + return MessagingServiceBusMessageEnqueuedTimeKey.Int(val) +} + +// Enum values for messaging.operation.type +var ( + // A message is created. "Create" spans always refer to a single message and are + // used to provide a unique creation context for messages in batch sending + // scenarios. + // + // Stability: development + MessagingOperationTypeCreate = MessagingOperationTypeKey.String("create") + // One or more messages are provided for sending to an intermediary. If a single + // message is sent, the context of the "Send" span can be used as the creation + // context and no "Create" span needs to be created. + // + // Stability: development + MessagingOperationTypeSend = MessagingOperationTypeKey.String("send") + // One or more messages are requested by a consumer. This operation refers to + // pull-based scenarios, where consumers explicitly call methods of messaging + // SDKs to receive messages. + // + // Stability: development + MessagingOperationTypeReceive = MessagingOperationTypeKey.String("receive") + // One or more messages are processed by a consumer. + // + // Stability: development + MessagingOperationTypeProcess = MessagingOperationTypeKey.String("process") + // One or more messages are settled. + // + // Stability: development + MessagingOperationTypeSettle = MessagingOperationTypeKey.String("settle") +) + +// Enum values for messaging.rocketmq.consumption_model +var ( + // Clustering consumption model + // Stability: development + MessagingRocketMQConsumptionModelClustering = MessagingRocketMQConsumptionModelKey.String("clustering") + // Broadcasting consumption model + // Stability: development + MessagingRocketMQConsumptionModelBroadcasting = MessagingRocketMQConsumptionModelKey.String("broadcasting") +) + +// Enum values for messaging.rocketmq.message.type +var ( + // Normal message + // Stability: development + MessagingRocketMQMessageTypeNormal = MessagingRocketMQMessageTypeKey.String("normal") + // FIFO message + // Stability: development + MessagingRocketMQMessageTypeFifo = MessagingRocketMQMessageTypeKey.String("fifo") + // Delay message + // Stability: development + MessagingRocketMQMessageTypeDelay = MessagingRocketMQMessageTypeKey.String("delay") + // Transaction message + // Stability: development + MessagingRocketMQMessageTypeTransaction = MessagingRocketMQMessageTypeKey.String("transaction") +) + +// Enum values for messaging.servicebus.disposition_status +var ( + // Message is completed + // Stability: development + MessagingServiceBusDispositionStatusComplete = MessagingServiceBusDispositionStatusKey.String("complete") + // Message is abandoned + // Stability: development + MessagingServiceBusDispositionStatusAbandon = MessagingServiceBusDispositionStatusKey.String("abandon") + // Message is sent to dead letter queue + // Stability: development + MessagingServiceBusDispositionStatusDeadLetter = MessagingServiceBusDispositionStatusKey.String("dead_letter") + // Message is deferred + // Stability: development + MessagingServiceBusDispositionStatusDefer = MessagingServiceBusDispositionStatusKey.String("defer") +) + +// Enum values for messaging.system +var ( + // Apache ActiveMQ + // Stability: development + MessagingSystemActiveMQ = MessagingSystemKey.String("activemq") + // Amazon Simple Notification Service (SNS) + // Stability: development + MessagingSystemAWSSNS = MessagingSystemKey.String("aws.sns") + // Amazon Simple Queue Service (SQS) + // Stability: development + MessagingSystemAWSSQS = MessagingSystemKey.String("aws_sqs") + // Azure Event Grid + // Stability: development + MessagingSystemEventGrid = MessagingSystemKey.String("eventgrid") + // Azure Event Hubs + // Stability: development + MessagingSystemEventHubs = MessagingSystemKey.String("eventhubs") + // Azure Service Bus + // Stability: development + MessagingSystemServiceBus = MessagingSystemKey.String("servicebus") + // Google Cloud Pub/Sub + // Stability: development + MessagingSystemGCPPubSub = MessagingSystemKey.String("gcp_pubsub") + // Java Message Service + // Stability: development + MessagingSystemJMS = MessagingSystemKey.String("jms") + // Apache Kafka + // Stability: development + MessagingSystemKafka = MessagingSystemKey.String("kafka") + // RabbitMQ + // Stability: development + MessagingSystemRabbitMQ = MessagingSystemKey.String("rabbitmq") + // Apache RocketMQ + // Stability: development + MessagingSystemRocketMQ = MessagingSystemKey.String("rocketmq") + // Apache Pulsar + // Stability: development + MessagingSystemPulsar = MessagingSystemKey.String("pulsar") +) + +// Namespace: network +const ( + // NetworkCarrierICCKey is the attribute Key conforming to the + // "network.carrier.icc" semantic conventions. It represents the ISO 3166-1 + // alpha-2 2-character country code associated with the mobile carrier network. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: DE + NetworkCarrierICCKey = attribute.Key("network.carrier.icc") + + // NetworkCarrierMCCKey is the attribute Key conforming to the + // "network.carrier.mcc" semantic conventions. It represents the mobile carrier + // country code. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 310 + NetworkCarrierMCCKey = attribute.Key("network.carrier.mcc") + + // NetworkCarrierMNCKey is the attribute Key conforming to the + // "network.carrier.mnc" semantic conventions. It represents the mobile carrier + // network code. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 001 + NetworkCarrierMNCKey = attribute.Key("network.carrier.mnc") + + // NetworkCarrierNameKey is the attribute Key conforming to the + // "network.carrier.name" semantic conventions. It represents the name of the + // mobile carrier. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: sprint + NetworkCarrierNameKey = attribute.Key("network.carrier.name") + + // NetworkConnectionStateKey is the attribute Key conforming to the + // "network.connection.state" semantic conventions. It represents the state of + // network connection. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "close_wait" + // Note: Connection states are defined as part of the [rfc9293] + // + // [rfc9293]: https://datatracker.ietf.org/doc/html/rfc9293#section-3.3.2 + NetworkConnectionStateKey = attribute.Key("network.connection.state") + + // NetworkConnectionSubtypeKey is the attribute Key conforming to the + // "network.connection.subtype" semantic conventions. It represents the this + // describes more details regarding the connection.type. It may be the type of + // cell technology connection, but it could be used for describing details about + // a wifi connection. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: LTE + NetworkConnectionSubtypeKey = attribute.Key("network.connection.subtype") + + // NetworkConnectionTypeKey is the attribute Key conforming to the + // "network.connection.type" semantic conventions. It represents the internet + // connection type. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: wifi + NetworkConnectionTypeKey = attribute.Key("network.connection.type") + + // NetworkInterfaceNameKey is the attribute Key conforming to the + // "network.interface.name" semantic conventions. It represents the network + // interface name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "lo", "eth0" + NetworkInterfaceNameKey = attribute.Key("network.interface.name") + + // NetworkIODirectionKey is the attribute Key conforming to the + // "network.io.direction" semantic conventions. It represents the network IO + // operation direction. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "transmit" + NetworkIODirectionKey = attribute.Key("network.io.direction") + + // NetworkLocalAddressKey is the attribute Key conforming to the + // "network.local.address" semantic conventions. It represents the local address + // of the network connection - IP address or Unix domain socket name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "10.1.2.80", "/tmp/my.sock" + NetworkLocalAddressKey = attribute.Key("network.local.address") + + // NetworkLocalPortKey is the attribute Key conforming to the + // "network.local.port" semantic conventions. It represents the local port + // number of the network connection. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: 65123 + NetworkLocalPortKey = attribute.Key("network.local.port") + + // NetworkPeerAddressKey is the attribute Key conforming to the + // "network.peer.address" semantic conventions. It represents the peer address + // of the network connection - IP address or Unix domain socket name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "10.1.2.80", "/tmp/my.sock" + NetworkPeerAddressKey = attribute.Key("network.peer.address") + + // NetworkPeerPortKey is the attribute Key conforming to the "network.peer.port" + // semantic conventions. It represents the peer port number of the network + // connection. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: 65123 + NetworkPeerPortKey = attribute.Key("network.peer.port") + + // NetworkProtocolNameKey is the attribute Key conforming to the + // "network.protocol.name" semantic conventions. It represents the + // [OSI application layer] or non-OSI equivalent. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "amqp", "http", "mqtt" + // Note: The value SHOULD be normalized to lowercase. + // + // [OSI application layer]: https://wikipedia.org/wiki/Application_layer + NetworkProtocolNameKey = attribute.Key("network.protocol.name") + + // NetworkProtocolVersionKey is the attribute Key conforming to the + // "network.protocol.version" semantic conventions. It represents the actual + // version of the protocol used for network communication. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "1.1", "2" + // Note: If protocol version is subject to negotiation (for example using [ALPN] + // ), this attribute SHOULD be set to the negotiated version. If the actual + // protocol version is not known, this attribute SHOULD NOT be set. + // + // [ALPN]: https://www.rfc-editor.org/rfc/rfc7301.html + NetworkProtocolVersionKey = attribute.Key("network.protocol.version") + + // NetworkTransportKey is the attribute Key conforming to the + // "network.transport" semantic conventions. It represents the + // [OSI transport layer] or [inter-process communication method]. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "tcp", "udp" + // Note: The value SHOULD be normalized to lowercase. + // + // Consider always setting the transport when setting a port number, since + // a port number is ambiguous without knowing the transport. For example + // different processes could be listening on TCP port 12345 and UDP port 12345. + // + // [OSI transport layer]: https://wikipedia.org/wiki/Transport_layer + // [inter-process communication method]: https://wikipedia.org/wiki/Inter-process_communication + NetworkTransportKey = attribute.Key("network.transport") + + // NetworkTypeKey is the attribute Key conforming to the "network.type" semantic + // conventions. It represents the [OSI network layer] or non-OSI equivalent. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "ipv4", "ipv6" + // Note: The value SHOULD be normalized to lowercase. + // + // [OSI network layer]: https://wikipedia.org/wiki/Network_layer + NetworkTypeKey = attribute.Key("network.type") +) + +// NetworkCarrierICC returns an attribute KeyValue conforming to the +// "network.carrier.icc" semantic conventions. It represents the ISO 3166-1 +// alpha-2 2-character country code associated with the mobile carrier network. +func NetworkCarrierICC(val string) attribute.KeyValue { + return NetworkCarrierICCKey.String(val) +} + +// NetworkCarrierMCC returns an attribute KeyValue conforming to the +// "network.carrier.mcc" semantic conventions. It represents the mobile carrier +// country code. +func NetworkCarrierMCC(val string) attribute.KeyValue { + return NetworkCarrierMCCKey.String(val) +} + +// NetworkCarrierMNC returns an attribute KeyValue conforming to the +// "network.carrier.mnc" semantic conventions. It represents the mobile carrier +// network code. +func NetworkCarrierMNC(val string) attribute.KeyValue { + return NetworkCarrierMNCKey.String(val) +} + +// NetworkCarrierName returns an attribute KeyValue conforming to the +// "network.carrier.name" semantic conventions. It represents the name of the +// mobile carrier. +func NetworkCarrierName(val string) attribute.KeyValue { + return NetworkCarrierNameKey.String(val) +} + +// NetworkInterfaceName returns an attribute KeyValue conforming to the +// "network.interface.name" semantic conventions. It represents the network +// interface name. +func NetworkInterfaceName(val string) attribute.KeyValue { + return NetworkInterfaceNameKey.String(val) +} + +// NetworkLocalAddress returns an attribute KeyValue conforming to the +// "network.local.address" semantic conventions. It represents the local address +// of the network connection - IP address or Unix domain socket name. +func NetworkLocalAddress(val string) attribute.KeyValue { + return NetworkLocalAddressKey.String(val) +} + +// NetworkLocalPort returns an attribute KeyValue conforming to the +// "network.local.port" semantic conventions. It represents the local port number +// of the network connection. +func NetworkLocalPort(val int) attribute.KeyValue { + return NetworkLocalPortKey.Int(val) +} + +// NetworkPeerAddress returns an attribute KeyValue conforming to the +// "network.peer.address" semantic conventions. It represents the peer address of +// the network connection - IP address or Unix domain socket name. +func NetworkPeerAddress(val string) attribute.KeyValue { + return NetworkPeerAddressKey.String(val) +} + +// NetworkPeerPort returns an attribute KeyValue conforming to the +// "network.peer.port" semantic conventions. It represents the peer port number +// of the network connection. +func NetworkPeerPort(val int) attribute.KeyValue { + return NetworkPeerPortKey.Int(val) +} + +// NetworkProtocolName returns an attribute KeyValue conforming to the +// "network.protocol.name" semantic conventions. It represents the +// [OSI application layer] or non-OSI equivalent. +// +// [OSI application layer]: https://wikipedia.org/wiki/Application_layer +func NetworkProtocolName(val string) attribute.KeyValue { + return NetworkProtocolNameKey.String(val) +} + +// NetworkProtocolVersion returns an attribute KeyValue conforming to the +// "network.protocol.version" semantic conventions. It represents the actual +// version of the protocol used for network communication. +func NetworkProtocolVersion(val string) attribute.KeyValue { + return NetworkProtocolVersionKey.String(val) +} + +// Enum values for network.connection.state +var ( + // closed + // Stability: development + NetworkConnectionStateClosed = NetworkConnectionStateKey.String("closed") + // close_wait + // Stability: development + NetworkConnectionStateCloseWait = NetworkConnectionStateKey.String("close_wait") + // closing + // Stability: development + NetworkConnectionStateClosing = NetworkConnectionStateKey.String("closing") + // established + // Stability: development + NetworkConnectionStateEstablished = NetworkConnectionStateKey.String("established") + // fin_wait_1 + // Stability: development + NetworkConnectionStateFinWait1 = NetworkConnectionStateKey.String("fin_wait_1") + // fin_wait_2 + // Stability: development + NetworkConnectionStateFinWait2 = NetworkConnectionStateKey.String("fin_wait_2") + // last_ack + // Stability: development + NetworkConnectionStateLastAck = NetworkConnectionStateKey.String("last_ack") + // listen + // Stability: development + NetworkConnectionStateListen = NetworkConnectionStateKey.String("listen") + // syn_received + // Stability: development + NetworkConnectionStateSynReceived = NetworkConnectionStateKey.String("syn_received") + // syn_sent + // Stability: development + NetworkConnectionStateSynSent = NetworkConnectionStateKey.String("syn_sent") + // time_wait + // Stability: development + NetworkConnectionStateTimeWait = NetworkConnectionStateKey.String("time_wait") +) + +// Enum values for network.connection.subtype +var ( + // GPRS + // Stability: development + NetworkConnectionSubtypeGprs = NetworkConnectionSubtypeKey.String("gprs") + // EDGE + // Stability: development + NetworkConnectionSubtypeEdge = NetworkConnectionSubtypeKey.String("edge") + // UMTS + // Stability: development + NetworkConnectionSubtypeUmts = NetworkConnectionSubtypeKey.String("umts") + // CDMA + // Stability: development + NetworkConnectionSubtypeCdma = NetworkConnectionSubtypeKey.String("cdma") + // EVDO Rel. 0 + // Stability: development + NetworkConnectionSubtypeEvdo0 = NetworkConnectionSubtypeKey.String("evdo_0") + // EVDO Rev. A + // Stability: development + NetworkConnectionSubtypeEvdoA = NetworkConnectionSubtypeKey.String("evdo_a") + // CDMA2000 1XRTT + // Stability: development + NetworkConnectionSubtypeCdma20001xrtt = NetworkConnectionSubtypeKey.String("cdma2000_1xrtt") + // HSDPA + // Stability: development + NetworkConnectionSubtypeHsdpa = NetworkConnectionSubtypeKey.String("hsdpa") + // HSUPA + // Stability: development + NetworkConnectionSubtypeHsupa = NetworkConnectionSubtypeKey.String("hsupa") + // HSPA + // Stability: development + NetworkConnectionSubtypeHspa = NetworkConnectionSubtypeKey.String("hspa") + // IDEN + // Stability: development + NetworkConnectionSubtypeIden = NetworkConnectionSubtypeKey.String("iden") + // EVDO Rev. B + // Stability: development + NetworkConnectionSubtypeEvdoB = NetworkConnectionSubtypeKey.String("evdo_b") + // LTE + // Stability: development + NetworkConnectionSubtypeLte = NetworkConnectionSubtypeKey.String("lte") + // EHRPD + // Stability: development + NetworkConnectionSubtypeEhrpd = NetworkConnectionSubtypeKey.String("ehrpd") + // HSPAP + // Stability: development + NetworkConnectionSubtypeHspap = NetworkConnectionSubtypeKey.String("hspap") + // GSM + // Stability: development + NetworkConnectionSubtypeGsm = NetworkConnectionSubtypeKey.String("gsm") + // TD-SCDMA + // Stability: development + NetworkConnectionSubtypeTdScdma = NetworkConnectionSubtypeKey.String("td_scdma") + // IWLAN + // Stability: development + NetworkConnectionSubtypeIwlan = NetworkConnectionSubtypeKey.String("iwlan") + // 5G NR (New Radio) + // Stability: development + NetworkConnectionSubtypeNr = NetworkConnectionSubtypeKey.String("nr") + // 5G NRNSA (New Radio Non-Standalone) + // Stability: development + NetworkConnectionSubtypeNrnsa = NetworkConnectionSubtypeKey.String("nrnsa") + // LTE CA + // Stability: development + NetworkConnectionSubtypeLteCa = NetworkConnectionSubtypeKey.String("lte_ca") +) + +// Enum values for network.connection.type +var ( + // wifi + // Stability: development + NetworkConnectionTypeWifi = NetworkConnectionTypeKey.String("wifi") + // wired + // Stability: development + NetworkConnectionTypeWired = NetworkConnectionTypeKey.String("wired") + // cell + // Stability: development + NetworkConnectionTypeCell = NetworkConnectionTypeKey.String("cell") + // unavailable + // Stability: development + NetworkConnectionTypeUnavailable = NetworkConnectionTypeKey.String("unavailable") + // unknown + // Stability: development + NetworkConnectionTypeUnknown = NetworkConnectionTypeKey.String("unknown") +) + +// Enum values for network.io.direction +var ( + // transmit + // Stability: development + NetworkIODirectionTransmit = NetworkIODirectionKey.String("transmit") + // receive + // Stability: development + NetworkIODirectionReceive = NetworkIODirectionKey.String("receive") +) + +// Enum values for network.transport +var ( + // TCP + // Stability: stable + NetworkTransportTCP = NetworkTransportKey.String("tcp") + // UDP + // Stability: stable + NetworkTransportUDP = NetworkTransportKey.String("udp") + // Named or anonymous pipe. + // Stability: stable + NetworkTransportPipe = NetworkTransportKey.String("pipe") + // Unix domain socket + // Stability: stable + NetworkTransportUnix = NetworkTransportKey.String("unix") + // QUIC + // Stability: stable + NetworkTransportQUIC = NetworkTransportKey.String("quic") +) + +// Enum values for network.type +var ( + // IPv4 + // Stability: stable + NetworkTypeIPv4 = NetworkTypeKey.String("ipv4") + // IPv6 + // Stability: stable + NetworkTypeIPv6 = NetworkTypeKey.String("ipv6") +) + +// Namespace: oci +const ( + // OCIManifestDigestKey is the attribute Key conforming to the + // "oci.manifest.digest" semantic conventions. It represents the digest of the + // OCI image manifest. For container images specifically is the digest by which + // the container image is known. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "sha256:e4ca62c0d62f3e886e684806dfe9d4e0cda60d54986898173c1083856cfda0f4" + // Note: Follows [OCI Image Manifest Specification], and specifically the + // [Digest property]. + // An example can be found in [Example Image Manifest]. + // + // [OCI Image Manifest Specification]: https://github.com/opencontainers/image-spec/blob/main/manifest.md + // [Digest property]: https://github.com/opencontainers/image-spec/blob/main/descriptor.md#digests + // [Example Image Manifest]: https://github.com/opencontainers/image-spec/blob/main/manifest.md#example-image-manifest + OCIManifestDigestKey = attribute.Key("oci.manifest.digest") +) + +// OCIManifestDigest returns an attribute KeyValue conforming to the +// "oci.manifest.digest" semantic conventions. It represents the digest of the +// OCI image manifest. For container images specifically is the digest by which +// the container image is known. +func OCIManifestDigest(val string) attribute.KeyValue { + return OCIManifestDigestKey.String(val) +} + +// Namespace: openai +const ( + // OpenAIRequestServiceTierKey is the attribute Key conforming to the + // "openai.request.service_tier" semantic conventions. It represents the service + // tier requested. May be a specific tier, default, or auto. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "auto", "default" + OpenAIRequestServiceTierKey = attribute.Key("openai.request.service_tier") + + // OpenAIResponseServiceTierKey is the attribute Key conforming to the + // "openai.response.service_tier" semantic conventions. It represents the + // service tier used for the response. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "scale", "default" + OpenAIResponseServiceTierKey = attribute.Key("openai.response.service_tier") + + // OpenAIResponseSystemFingerprintKey is the attribute Key conforming to the + // "openai.response.system_fingerprint" semantic conventions. It represents a + // fingerprint to track any eventual change in the Generative AI environment. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "fp_44709d6fcb" + OpenAIResponseSystemFingerprintKey = attribute.Key("openai.response.system_fingerprint") +) + +// OpenAIResponseServiceTier returns an attribute KeyValue conforming to the +// "openai.response.service_tier" semantic conventions. It represents the service +// tier used for the response. +func OpenAIResponseServiceTier(val string) attribute.KeyValue { + return OpenAIResponseServiceTierKey.String(val) +} + +// OpenAIResponseSystemFingerprint returns an attribute KeyValue conforming to +// the "openai.response.system_fingerprint" semantic conventions. It represents a +// fingerprint to track any eventual change in the Generative AI environment. +func OpenAIResponseSystemFingerprint(val string) attribute.KeyValue { + return OpenAIResponseSystemFingerprintKey.String(val) +} + +// Enum values for openai.request.service_tier +var ( + // The system will utilize scale tier credits until they are exhausted. + // Stability: development + OpenAIRequestServiceTierAuto = OpenAIRequestServiceTierKey.String("auto") + // The system will utilize the default scale tier. + // Stability: development + OpenAIRequestServiceTierDefault = OpenAIRequestServiceTierKey.String("default") +) + +// Namespace: opentracing +const ( + // OpenTracingRefTypeKey is the attribute Key conforming to the + // "opentracing.ref_type" semantic conventions. It represents the parent-child + // Reference type. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: The causal relationship between a child Span and a parent Span. + OpenTracingRefTypeKey = attribute.Key("opentracing.ref_type") +) + +// Enum values for opentracing.ref_type +var ( + // The parent Span depends on the child Span in some capacity + // Stability: development + OpenTracingRefTypeChildOf = OpenTracingRefTypeKey.String("child_of") + // The parent Span doesn't depend in any way on the result of the child Span + // Stability: development + OpenTracingRefTypeFollowsFrom = OpenTracingRefTypeKey.String("follows_from") +) + +// Namespace: os +const ( + // OSBuildIDKey is the attribute Key conforming to the "os.build_id" semantic + // conventions. It represents the unique identifier for a particular build or + // compilation of the operating system. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "TQ3C.230805.001.B2", "20E247", "22621" + OSBuildIDKey = attribute.Key("os.build_id") + + // OSDescriptionKey is the attribute Key conforming to the "os.description" + // semantic conventions. It represents the human readable (not intended to be + // parsed) OS version information, like e.g. reported by `ver` or + // `lsb_release -a` commands. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Microsoft Windows [Version 10.0.18363.778]", "Ubuntu 18.04.1 LTS" + OSDescriptionKey = attribute.Key("os.description") + + // OSNameKey is the attribute Key conforming to the "os.name" semantic + // conventions. It represents the human readable operating system name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "iOS", "Android", "Ubuntu" + OSNameKey = attribute.Key("os.name") + + // OSTypeKey is the attribute Key conforming to the "os.type" semantic + // conventions. It represents the operating system type. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + OSTypeKey = attribute.Key("os.type") + + // OSVersionKey is the attribute Key conforming to the "os.version" semantic + // conventions. It represents the version string of the operating system as + // defined in [Version Attributes]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "14.2.1", "18.04.1" + // + // [Version Attributes]: /docs/resource/README.md#version-attributes + OSVersionKey = attribute.Key("os.version") +) + +// OSBuildID returns an attribute KeyValue conforming to the "os.build_id" +// semantic conventions. It represents the unique identifier for a particular +// build or compilation of the operating system. +func OSBuildID(val string) attribute.KeyValue { + return OSBuildIDKey.String(val) +} + +// OSDescription returns an attribute KeyValue conforming to the "os.description" +// semantic conventions. It represents the human readable (not intended to be +// parsed) OS version information, like e.g. reported by `ver` or +// `lsb_release -a` commands. +func OSDescription(val string) attribute.KeyValue { + return OSDescriptionKey.String(val) +} + +// OSName returns an attribute KeyValue conforming to the "os.name" semantic +// conventions. It represents the human readable operating system name. +func OSName(val string) attribute.KeyValue { + return OSNameKey.String(val) +} + +// OSVersion returns an attribute KeyValue conforming to the "os.version" +// semantic conventions. It represents the version string of the operating system +// as defined in [Version Attributes]. +// +// [Version Attributes]: /docs/resource/README.md#version-attributes +func OSVersion(val string) attribute.KeyValue { + return OSVersionKey.String(val) +} + +// Enum values for os.type +var ( + // Microsoft Windows + // Stability: development + OSTypeWindows = OSTypeKey.String("windows") + // Linux + // Stability: development + OSTypeLinux = OSTypeKey.String("linux") + // Apple Darwin + // Stability: development + OSTypeDarwin = OSTypeKey.String("darwin") + // FreeBSD + // Stability: development + OSTypeFreeBSD = OSTypeKey.String("freebsd") + // NetBSD + // Stability: development + OSTypeNetBSD = OSTypeKey.String("netbsd") + // OpenBSD + // Stability: development + OSTypeOpenBSD = OSTypeKey.String("openbsd") + // DragonFly BSD + // Stability: development + OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd") + // HP-UX (Hewlett Packard Unix) + // Stability: development + OSTypeHPUX = OSTypeKey.String("hpux") + // AIX (Advanced Interactive eXecutive) + // Stability: development + OSTypeAIX = OSTypeKey.String("aix") + // SunOS, Oracle Solaris + // Stability: development + OSTypeSolaris = OSTypeKey.String("solaris") + // IBM z/OS + // Stability: development + OSTypeZOS = OSTypeKey.String("zos") +) + +// Namespace: otel +const ( + // OTelComponentNameKey is the attribute Key conforming to the + // "otel.component.name" semantic conventions. It represents a name uniquely + // identifying the instance of the OpenTelemetry component within its containing + // SDK instance. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "otlp_grpc_span_exporter/0", "custom-name" + // Note: Implementations SHOULD ensure a low cardinality for this attribute, + // even across application or SDK restarts. + // E.g. implementations MUST NOT use UUIDs as values for this attribute. + // + // Implementations MAY achieve these goals by following a + // `/` pattern, e.g. + // `batching_span_processor/0`. + // Hereby `otel.component.type` refers to the corresponding attribute value of + // the component. + // + // The value of `instance-counter` MAY be automatically assigned by the + // component and uniqueness within the enclosing SDK instance MUST be + // guaranteed. + // For example, `` MAY be implemented by using a monotonically + // increasing counter (starting with `0`), which is incremented every time an + // instance of the given component type is started. + // + // With this implementation, for example the first Batching Span Processor would + // have `batching_span_processor/0` + // as `otel.component.name`, the second one `batching_span_processor/1` and so + // on. + // These values will therefore be reused in the case of an application restart. + OTelComponentNameKey = attribute.Key("otel.component.name") + + // OTelComponentTypeKey is the attribute Key conforming to the + // "otel.component.type" semantic conventions. It represents a name identifying + // the type of the OpenTelemetry component. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "batching_span_processor", "com.example.MySpanExporter" + // Note: If none of the standardized values apply, implementations SHOULD use + // the language-defined name of the type. + // E.g. for Java the fully qualified classname SHOULD be used in this case. + OTelComponentTypeKey = attribute.Key("otel.component.type") + + // OTelScopeNameKey is the attribute Key conforming to the "otel.scope.name" + // semantic conventions. It represents the name of the instrumentation scope - ( + // `InstrumentationScope.Name` in OTLP). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "io.opentelemetry.contrib.mongodb" + OTelScopeNameKey = attribute.Key("otel.scope.name") + + // OTelScopeSchemaURLKey is the attribute Key conforming to the + // "otel.scope.schema_url" semantic conventions. It represents the schema URL of + // the instrumentation scope. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "https://opentelemetry.io/schemas/1.31.0" + OTelScopeSchemaURLKey = attribute.Key("otel.scope.schema_url") + + // OTelScopeVersionKey is the attribute Key conforming to the + // "otel.scope.version" semantic conventions. It represents the version of the + // instrumentation scope - (`InstrumentationScope.Version` in OTLP). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "1.0.0" + OTelScopeVersionKey = attribute.Key("otel.scope.version") + + // OTelSpanParentOriginKey is the attribute Key conforming to the + // "otel.span.parent.origin" semantic conventions. It represents the determines + // whether the span has a parent span, and if so, + // [whether it is a remote parent]. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // + // [whether it is a remote parent]: https://opentelemetry.io/docs/specs/otel/trace/api/#isremote + OTelSpanParentOriginKey = attribute.Key("otel.span.parent.origin") + + // OTelSpanSamplingResultKey is the attribute Key conforming to the + // "otel.span.sampling_result" semantic conventions. It represents the result + // value of the sampler for this span. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + OTelSpanSamplingResultKey = attribute.Key("otel.span.sampling_result") + + // OTelStatusCodeKey is the attribute Key conforming to the "otel.status_code" + // semantic conventions. It represents the name of the code, either "OK" or + // "ERROR". MUST NOT be set if the status code is UNSET. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: + OTelStatusCodeKey = attribute.Key("otel.status_code") + + // OTelStatusDescriptionKey is the attribute Key conforming to the + // "otel.status_description" semantic conventions. It represents the description + // of the Status if it has a value, otherwise not set. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "resource not found" + OTelStatusDescriptionKey = attribute.Key("otel.status_description") +) + +// OTelComponentName returns an attribute KeyValue conforming to the +// "otel.component.name" semantic conventions. It represents a name uniquely +// identifying the instance of the OpenTelemetry component within its containing +// SDK instance. +func OTelComponentName(val string) attribute.KeyValue { + return OTelComponentNameKey.String(val) +} + +// OTelScopeName returns an attribute KeyValue conforming to the +// "otel.scope.name" semantic conventions. It represents the name of the +// instrumentation scope - (`InstrumentationScope.Name` in OTLP). +func OTelScopeName(val string) attribute.KeyValue { + return OTelScopeNameKey.String(val) +} + +// OTelScopeSchemaURL returns an attribute KeyValue conforming to the +// "otel.scope.schema_url" semantic conventions. It represents the schema URL of +// the instrumentation scope. +func OTelScopeSchemaURL(val string) attribute.KeyValue { + return OTelScopeSchemaURLKey.String(val) +} + +// OTelScopeVersion returns an attribute KeyValue conforming to the +// "otel.scope.version" semantic conventions. It represents the version of the +// instrumentation scope - (`InstrumentationScope.Version` in OTLP). +func OTelScopeVersion(val string) attribute.KeyValue { + return OTelScopeVersionKey.String(val) +} + +// OTelStatusDescription returns an attribute KeyValue conforming to the +// "otel.status_description" semantic conventions. It represents the description +// of the Status if it has a value, otherwise not set. +func OTelStatusDescription(val string) attribute.KeyValue { + return OTelStatusDescriptionKey.String(val) +} + +// Enum values for otel.component.type +var ( + // The builtin SDK batching span processor + // + // Stability: development + OTelComponentTypeBatchingSpanProcessor = OTelComponentTypeKey.String("batching_span_processor") + // The builtin SDK simple span processor + // + // Stability: development + OTelComponentTypeSimpleSpanProcessor = OTelComponentTypeKey.String("simple_span_processor") + // The builtin SDK batching log record processor + // + // Stability: development + OTelComponentTypeBatchingLogProcessor = OTelComponentTypeKey.String("batching_log_processor") + // The builtin SDK simple log record processor + // + // Stability: development + OTelComponentTypeSimpleLogProcessor = OTelComponentTypeKey.String("simple_log_processor") + // OTLP span exporter over gRPC with protobuf serialization + // + // Stability: development + OTelComponentTypeOtlpGRPCSpanExporter = OTelComponentTypeKey.String("otlp_grpc_span_exporter") + // OTLP span exporter over HTTP with protobuf serialization + // + // Stability: development + OTelComponentTypeOtlpHTTPSpanExporter = OTelComponentTypeKey.String("otlp_http_span_exporter") + // OTLP span exporter over HTTP with JSON serialization + // + // Stability: development + OTelComponentTypeOtlpHTTPJSONSpanExporter = OTelComponentTypeKey.String("otlp_http_json_span_exporter") + // Zipkin span exporter over HTTP + // + // Stability: development + OTelComponentTypeZipkinHTTPSpanExporter = OTelComponentTypeKey.String("zipkin_http_span_exporter") + // OTLP log record exporter over gRPC with protobuf serialization + // + // Stability: development + OTelComponentTypeOtlpGRPCLogExporter = OTelComponentTypeKey.String("otlp_grpc_log_exporter") + // OTLP log record exporter over HTTP with protobuf serialization + // + // Stability: development + OTelComponentTypeOtlpHTTPLogExporter = OTelComponentTypeKey.String("otlp_http_log_exporter") + // OTLP log record exporter over HTTP with JSON serialization + // + // Stability: development + OTelComponentTypeOtlpHTTPJSONLogExporter = OTelComponentTypeKey.String("otlp_http_json_log_exporter") + // The builtin SDK periodically exporting metric reader + // + // Stability: development + OTelComponentTypePeriodicMetricReader = OTelComponentTypeKey.String("periodic_metric_reader") + // OTLP metric exporter over gRPC with protobuf serialization + // + // Stability: development + OTelComponentTypeOtlpGRPCMetricExporter = OTelComponentTypeKey.String("otlp_grpc_metric_exporter") + // OTLP metric exporter over HTTP with protobuf serialization + // + // Stability: development + OTelComponentTypeOtlpHTTPMetricExporter = OTelComponentTypeKey.String("otlp_http_metric_exporter") + // OTLP metric exporter over HTTP with JSON serialization + // + // Stability: development + OTelComponentTypeOtlpHTTPJSONMetricExporter = OTelComponentTypeKey.String("otlp_http_json_metric_exporter") + // Prometheus metric exporter over HTTP with the default text-based format + // + // Stability: development + OTelComponentTypePrometheusHTTPTextMetricExporter = OTelComponentTypeKey.String("prometheus_http_text_metric_exporter") +) + +// Enum values for otel.span.parent.origin +var ( + // The span does not have a parent, it is a root span + // Stability: development + OTelSpanParentOriginNone = OTelSpanParentOriginKey.String("none") + // The span has a parent and the parent's span context [isRemote()] is false + // Stability: development + // + // [isRemote()]: https://opentelemetry.io/docs/specs/otel/trace/api/#isremote + OTelSpanParentOriginLocal = OTelSpanParentOriginKey.String("local") + // The span has a parent and the parent's span context [isRemote()] is true + // Stability: development + // + // [isRemote()]: https://opentelemetry.io/docs/specs/otel/trace/api/#isremote + OTelSpanParentOriginRemote = OTelSpanParentOriginKey.String("remote") +) + +// Enum values for otel.span.sampling_result +var ( + // The span is not sampled and not recording + // Stability: development + OTelSpanSamplingResultDrop = OTelSpanSamplingResultKey.String("DROP") + // The span is not sampled, but recording + // Stability: development + OTelSpanSamplingResultRecordOnly = OTelSpanSamplingResultKey.String("RECORD_ONLY") + // The span is sampled and recording + // Stability: development + OTelSpanSamplingResultRecordAndSample = OTelSpanSamplingResultKey.String("RECORD_AND_SAMPLE") +) + +// Enum values for otel.status_code +var ( + // The operation has been validated by an Application developer or Operator to + // have completed successfully. + // Stability: stable + OTelStatusCodeOk = OTelStatusCodeKey.String("OK") + // The operation contains an error. + // Stability: stable + OTelStatusCodeError = OTelStatusCodeKey.String("ERROR") +) + +// Namespace: peer +const ( + // PeerServiceKey is the attribute Key conforming to the "peer.service" semantic + // conventions. It represents the [`service.name`] of the remote service. SHOULD + // be equal to the actual `service.name` resource attribute of the remote + // service if any. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: AuthTokenCache + // + // [`service.name`]: /docs/resource/README.md#service + PeerServiceKey = attribute.Key("peer.service") +) + +// PeerService returns an attribute KeyValue conforming to the "peer.service" +// semantic conventions. It represents the [`service.name`] of the remote +// service. SHOULD be equal to the actual `service.name` resource attribute of +// the remote service if any. +// +// [`service.name`]: /docs/resource/README.md#service +func PeerService(val string) attribute.KeyValue { + return PeerServiceKey.String(val) +} + +// Namespace: process +const ( + // ProcessArgsCountKey is the attribute Key conforming to the + // "process.args_count" semantic conventions. It represents the length of the + // process.command_args array. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 4 + // Note: This field can be useful for querying or performing bucket analysis on + // how many arguments were provided to start a process. More arguments may be an + // indication of suspicious activity. + ProcessArgsCountKey = attribute.Key("process.args_count") + + // ProcessCommandKey is the attribute Key conforming to the "process.command" + // semantic conventions. It represents the command used to launch the process + // (i.e. the command name). On Linux based systems, can be set to the zeroth + // string in `proc/[pid]/cmdline`. On Windows, can be set to the first parameter + // extracted from `GetCommandLineW`. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "cmd/otelcol" + ProcessCommandKey = attribute.Key("process.command") + + // ProcessCommandArgsKey is the attribute Key conforming to the + // "process.command_args" semantic conventions. It represents the all the + // command arguments (including the command/executable itself) as received by + // the process. On Linux-based systems (and some other Unixoid systems + // supporting procfs), can be set according to the list of null-delimited + // strings extracted from `proc/[pid]/cmdline`. For libc-based executables, this + // would be the full argv vector passed to `main`. SHOULD NOT be collected by + // default unless there is sanitization that excludes sensitive data. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "cmd/otecol", "--config=config.yaml" + ProcessCommandArgsKey = attribute.Key("process.command_args") + + // ProcessCommandLineKey is the attribute Key conforming to the + // "process.command_line" semantic conventions. It represents the full command + // used to launch the process as a single string representing the full command. + // On Windows, can be set to the result of `GetCommandLineW`. Do not set this if + // you have to assemble it just for monitoring; use `process.command_args` + // instead. SHOULD NOT be collected by default unless there is sanitization that + // excludes sensitive data. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "C:\cmd\otecol --config="my directory\config.yaml"" + ProcessCommandLineKey = attribute.Key("process.command_line") + + // ProcessContextSwitchTypeKey is the attribute Key conforming to the + // "process.context_switch_type" semantic conventions. It represents the + // specifies whether the context switches for this data point were voluntary or + // involuntary. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + ProcessContextSwitchTypeKey = attribute.Key("process.context_switch_type") + + // ProcessCreationTimeKey is the attribute Key conforming to the + // "process.creation.time" semantic conventions. It represents the date and time + // the process was created, in ISO 8601 format. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2023-11-21T09:25:34.853Z" + ProcessCreationTimeKey = attribute.Key("process.creation.time") + + // ProcessExecutableBuildIDGNUKey is the attribute Key conforming to the + // "process.executable.build_id.gnu" semantic conventions. It represents the GNU + // build ID as found in the `.note.gnu.build-id` ELF section (hex string). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "c89b11207f6479603b0d49bf291c092c2b719293" + ProcessExecutableBuildIDGNUKey = attribute.Key("process.executable.build_id.gnu") + + // ProcessExecutableBuildIDGoKey is the attribute Key conforming to the + // "process.executable.build_id.go" semantic conventions. It represents the Go + // build ID as retrieved by `go tool buildid `. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "foh3mEXu7BLZjsN9pOwG/kATcXlYVCDEFouRMQed_/WwRFB1hPo9LBkekthSPG/x8hMC8emW2cCjXD0_1aY" + ProcessExecutableBuildIDGoKey = attribute.Key("process.executable.build_id.go") + + // ProcessExecutableBuildIDHtlhashKey is the attribute Key conforming to the + // "process.executable.build_id.htlhash" semantic conventions. It represents the + // profiling specific build ID for executables. See the OTel specification for + // Profiles for more information. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "600DCAFE4A110000F2BF38C493F5FB92" + ProcessExecutableBuildIDHtlhashKey = attribute.Key("process.executable.build_id.htlhash") + + // ProcessExecutableNameKey is the attribute Key conforming to the + // "process.executable.name" semantic conventions. It represents the name of the + // process executable. On Linux based systems, this SHOULD be set to the base + // name of the target of `/proc/[pid]/exe`. On Windows, this SHOULD be set to + // the base name of `GetProcessImageFileNameW`. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "otelcol" + ProcessExecutableNameKey = attribute.Key("process.executable.name") + + // ProcessExecutablePathKey is the attribute Key conforming to the + // "process.executable.path" semantic conventions. It represents the full path + // to the process executable. On Linux based systems, can be set to the target + // of `proc/[pid]/exe`. On Windows, can be set to the result of + // `GetProcessImageFileNameW`. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/usr/bin/cmd/otelcol" + ProcessExecutablePathKey = attribute.Key("process.executable.path") + + // ProcessExitCodeKey is the attribute Key conforming to the "process.exit.code" + // semantic conventions. It represents the exit code of the process. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 127 + ProcessExitCodeKey = attribute.Key("process.exit.code") + + // ProcessExitTimeKey is the attribute Key conforming to the "process.exit.time" + // semantic conventions. It represents the date and time the process exited, in + // ISO 8601 format. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2023-11-21T09:26:12.315Z" + ProcessExitTimeKey = attribute.Key("process.exit.time") + + // ProcessGroupLeaderPIDKey is the attribute Key conforming to the + // "process.group_leader.pid" semantic conventions. It represents the PID of the + // process's group leader. This is also the process group ID (PGID) of the + // process. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 23 + ProcessGroupLeaderPIDKey = attribute.Key("process.group_leader.pid") + + // ProcessInteractiveKey is the attribute Key conforming to the + // "process.interactive" semantic conventions. It represents the whether the + // process is connected to an interactive shell. + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + ProcessInteractiveKey = attribute.Key("process.interactive") + + // ProcessLinuxCgroupKey is the attribute Key conforming to the + // "process.linux.cgroup" semantic conventions. It represents the control group + // associated with the process. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1:name=systemd:/user.slice/user-1000.slice/session-3.scope", + // "0::/user.slice/user-1000.slice/user@1000.service/tmux-spawn-0267755b-4639-4a27-90ed-f19f88e53748.scope" + // Note: Control groups (cgroups) are a kernel feature used to organize and + // manage process resources. This attribute provides the path(s) to the + // cgroup(s) associated with the process, which should match the contents of the + // [/proc/[PID]/cgroup] file. + // + // [/proc/[PID]/cgroup]: https://man7.org/linux/man-pages/man7/cgroups.7.html + ProcessLinuxCgroupKey = attribute.Key("process.linux.cgroup") + + // ProcessOwnerKey is the attribute Key conforming to the "process.owner" + // semantic conventions. It represents the username of the user that owns the + // process. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "root" + ProcessOwnerKey = attribute.Key("process.owner") + + // ProcessPagingFaultTypeKey is the attribute Key conforming to the + // "process.paging.fault_type" semantic conventions. It represents the type of + // page fault for this data point. Type `major` is for major/hard page faults, + // and `minor` is for minor/soft page faults. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + ProcessPagingFaultTypeKey = attribute.Key("process.paging.fault_type") + + // ProcessParentPIDKey is the attribute Key conforming to the + // "process.parent_pid" semantic conventions. It represents the parent Process + // identifier (PPID). + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 111 + ProcessParentPIDKey = attribute.Key("process.parent_pid") + + // ProcessPIDKey is the attribute Key conforming to the "process.pid" semantic + // conventions. It represents the process identifier (PID). + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1234 + ProcessPIDKey = attribute.Key("process.pid") + + // ProcessRealUserIDKey is the attribute Key conforming to the + // "process.real_user.id" semantic conventions. It represents the real user ID + // (RUID) of the process. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1000 + ProcessRealUserIDKey = attribute.Key("process.real_user.id") + + // ProcessRealUserNameKey is the attribute Key conforming to the + // "process.real_user.name" semantic conventions. It represents the username of + // the real user of the process. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "operator" + ProcessRealUserNameKey = attribute.Key("process.real_user.name") + + // ProcessRuntimeDescriptionKey is the attribute Key conforming to the + // "process.runtime.description" semantic conventions. It represents an + // additional description about the runtime of the process, for example a + // specific vendor customization of the runtime environment. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0 + ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description") + + // ProcessRuntimeNameKey is the attribute Key conforming to the + // "process.runtime.name" semantic conventions. It represents the name of the + // runtime of this process. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "OpenJDK Runtime Environment" + ProcessRuntimeNameKey = attribute.Key("process.runtime.name") + + // ProcessRuntimeVersionKey is the attribute Key conforming to the + // "process.runtime.version" semantic conventions. It represents the version of + // the runtime of this process, as returned by the runtime without modification. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 14.0.2 + ProcessRuntimeVersionKey = attribute.Key("process.runtime.version") + + // ProcessSavedUserIDKey is the attribute Key conforming to the + // "process.saved_user.id" semantic conventions. It represents the saved user ID + // (SUID) of the process. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1002 + ProcessSavedUserIDKey = attribute.Key("process.saved_user.id") + + // ProcessSavedUserNameKey is the attribute Key conforming to the + // "process.saved_user.name" semantic conventions. It represents the username of + // the saved user. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "operator" + ProcessSavedUserNameKey = attribute.Key("process.saved_user.name") + + // ProcessSessionLeaderPIDKey is the attribute Key conforming to the + // "process.session_leader.pid" semantic conventions. It represents the PID of + // the process's session leader. This is also the session ID (SID) of the + // process. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 14 + ProcessSessionLeaderPIDKey = attribute.Key("process.session_leader.pid") + + // ProcessTitleKey is the attribute Key conforming to the "process.title" + // semantic conventions. It represents the process title (proctitle). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "cat /etc/hostname", "xfce4-session", "bash" + // Note: In many Unix-like systems, process title (proctitle), is the string + // that represents the name or command line of a running process, displayed by + // system monitoring tools like ps, top, and htop. + ProcessTitleKey = attribute.Key("process.title") + + // ProcessUserIDKey is the attribute Key conforming to the "process.user.id" + // semantic conventions. It represents the effective user ID (EUID) of the + // process. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1001 + ProcessUserIDKey = attribute.Key("process.user.id") + + // ProcessUserNameKey is the attribute Key conforming to the "process.user.name" + // semantic conventions. It represents the username of the effective user of the + // process. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "root" + ProcessUserNameKey = attribute.Key("process.user.name") + + // ProcessVpidKey is the attribute Key conforming to the "process.vpid" semantic + // conventions. It represents the virtual process identifier. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 12 + // Note: The process ID within a PID namespace. This is not necessarily unique + // across all processes on the host but it is unique within the process + // namespace that the process exists within. + ProcessVpidKey = attribute.Key("process.vpid") + + // ProcessWorkingDirectoryKey is the attribute Key conforming to the + // "process.working_directory" semantic conventions. It represents the working + // directory of the process. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/root" + ProcessWorkingDirectoryKey = attribute.Key("process.working_directory") +) + +// ProcessArgsCount returns an attribute KeyValue conforming to the +// "process.args_count" semantic conventions. It represents the length of the +// process.command_args array. +func ProcessArgsCount(val int) attribute.KeyValue { + return ProcessArgsCountKey.Int(val) +} + +// ProcessCommand returns an attribute KeyValue conforming to the +// "process.command" semantic conventions. It represents the command used to +// launch the process (i.e. the command name). On Linux based systems, can be set +// to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to the +// first parameter extracted from `GetCommandLineW`. +func ProcessCommand(val string) attribute.KeyValue { + return ProcessCommandKey.String(val) +} + +// ProcessCommandArgs returns an attribute KeyValue conforming to the +// "process.command_args" semantic conventions. It represents the all the command +// arguments (including the command/executable itself) as received by the +// process. On Linux-based systems (and some other Unixoid systems supporting +// procfs), can be set according to the list of null-delimited strings extracted +// from `proc/[pid]/cmdline`. For libc-based executables, this would be the full +// argv vector passed to `main`. SHOULD NOT be collected by default unless there +// is sanitization that excludes sensitive data. +func ProcessCommandArgs(val ...string) attribute.KeyValue { + return ProcessCommandArgsKey.StringSlice(val) +} + +// ProcessCommandLine returns an attribute KeyValue conforming to the +// "process.command_line" semantic conventions. It represents the full command +// used to launch the process as a single string representing the full command. +// On Windows, can be set to the result of `GetCommandLineW`. Do not set this if +// you have to assemble it just for monitoring; use `process.command_args` +// instead. SHOULD NOT be collected by default unless there is sanitization that +// excludes sensitive data. +func ProcessCommandLine(val string) attribute.KeyValue { + return ProcessCommandLineKey.String(val) +} + +// ProcessCreationTime returns an attribute KeyValue conforming to the +// "process.creation.time" semantic conventions. It represents the date and time +// the process was created, in ISO 8601 format. +func ProcessCreationTime(val string) attribute.KeyValue { + return ProcessCreationTimeKey.String(val) +} + +// ProcessEnvironmentVariable returns an attribute KeyValue conforming to the +// "process.environment_variable" semantic conventions. It represents the process +// environment variables, `` being the environment variable name, the value +// being the environment variable value. +func ProcessEnvironmentVariable(key string, val string) attribute.KeyValue { + return attribute.String("process.environment_variable."+key, val) +} + +// ProcessExecutableBuildIDGNU returns an attribute KeyValue conforming to the +// "process.executable.build_id.gnu" semantic conventions. It represents the GNU +// build ID as found in the `.note.gnu.build-id` ELF section (hex string). +func ProcessExecutableBuildIDGNU(val string) attribute.KeyValue { + return ProcessExecutableBuildIDGNUKey.String(val) +} + +// ProcessExecutableBuildIDGo returns an attribute KeyValue conforming to the +// "process.executable.build_id.go" semantic conventions. It represents the Go +// build ID as retrieved by `go tool buildid `. +func ProcessExecutableBuildIDGo(val string) attribute.KeyValue { + return ProcessExecutableBuildIDGoKey.String(val) +} + +// ProcessExecutableBuildIDHtlhash returns an attribute KeyValue conforming to +// the "process.executable.build_id.htlhash" semantic conventions. It represents +// the profiling specific build ID for executables. See the OTel specification +// for Profiles for more information. +func ProcessExecutableBuildIDHtlhash(val string) attribute.KeyValue { + return ProcessExecutableBuildIDHtlhashKey.String(val) +} + +// ProcessExecutableName returns an attribute KeyValue conforming to the +// "process.executable.name" semantic conventions. It represents the name of the +// process executable. On Linux based systems, this SHOULD be set to the base +// name of the target of `/proc/[pid]/exe`. On Windows, this SHOULD be set to the +// base name of `GetProcessImageFileNameW`. +func ProcessExecutableName(val string) attribute.KeyValue { + return ProcessExecutableNameKey.String(val) +} + +// ProcessExecutablePath returns an attribute KeyValue conforming to the +// "process.executable.path" semantic conventions. It represents the full path to +// the process executable. On Linux based systems, can be set to the target of +// `proc/[pid]/exe`. On Windows, can be set to the result of +// `GetProcessImageFileNameW`. +func ProcessExecutablePath(val string) attribute.KeyValue { + return ProcessExecutablePathKey.String(val) +} + +// ProcessExitCode returns an attribute KeyValue conforming to the +// "process.exit.code" semantic conventions. It represents the exit code of the +// process. +func ProcessExitCode(val int) attribute.KeyValue { + return ProcessExitCodeKey.Int(val) +} + +// ProcessExitTime returns an attribute KeyValue conforming to the +// "process.exit.time" semantic conventions. It represents the date and time the +// process exited, in ISO 8601 format. +func ProcessExitTime(val string) attribute.KeyValue { + return ProcessExitTimeKey.String(val) +} + +// ProcessGroupLeaderPID returns an attribute KeyValue conforming to the +// "process.group_leader.pid" semantic conventions. It represents the PID of the +// process's group leader. This is also the process group ID (PGID) of the +// process. +func ProcessGroupLeaderPID(val int) attribute.KeyValue { + return ProcessGroupLeaderPIDKey.Int(val) +} + +// ProcessInteractive returns an attribute KeyValue conforming to the +// "process.interactive" semantic conventions. It represents the whether the +// process is connected to an interactive shell. +func ProcessInteractive(val bool) attribute.KeyValue { + return ProcessInteractiveKey.Bool(val) +} + +// ProcessLinuxCgroup returns an attribute KeyValue conforming to the +// "process.linux.cgroup" semantic conventions. It represents the control group +// associated with the process. +func ProcessLinuxCgroup(val string) attribute.KeyValue { + return ProcessLinuxCgroupKey.String(val) +} + +// ProcessOwner returns an attribute KeyValue conforming to the "process.owner" +// semantic conventions. It represents the username of the user that owns the +// process. +func ProcessOwner(val string) attribute.KeyValue { + return ProcessOwnerKey.String(val) +} + +// ProcessParentPID returns an attribute KeyValue conforming to the +// "process.parent_pid" semantic conventions. It represents the parent Process +// identifier (PPID). +func ProcessParentPID(val int) attribute.KeyValue { + return ProcessParentPIDKey.Int(val) +} + +// ProcessPID returns an attribute KeyValue conforming to the "process.pid" +// semantic conventions. It represents the process identifier (PID). +func ProcessPID(val int) attribute.KeyValue { + return ProcessPIDKey.Int(val) +} + +// ProcessRealUserID returns an attribute KeyValue conforming to the +// "process.real_user.id" semantic conventions. It represents the real user ID +// (RUID) of the process. +func ProcessRealUserID(val int) attribute.KeyValue { + return ProcessRealUserIDKey.Int(val) +} + +// ProcessRealUserName returns an attribute KeyValue conforming to the +// "process.real_user.name" semantic conventions. It represents the username of +// the real user of the process. +func ProcessRealUserName(val string) attribute.KeyValue { + return ProcessRealUserNameKey.String(val) +} + +// ProcessRuntimeDescription returns an attribute KeyValue conforming to the +// "process.runtime.description" semantic conventions. It represents an +// additional description about the runtime of the process, for example a +// specific vendor customization of the runtime environment. +func ProcessRuntimeDescription(val string) attribute.KeyValue { + return ProcessRuntimeDescriptionKey.String(val) +} + +// ProcessRuntimeName returns an attribute KeyValue conforming to the +// "process.runtime.name" semantic conventions. It represents the name of the +// runtime of this process. +func ProcessRuntimeName(val string) attribute.KeyValue { + return ProcessRuntimeNameKey.String(val) +} + +// ProcessRuntimeVersion returns an attribute KeyValue conforming to the +// "process.runtime.version" semantic conventions. It represents the version of +// the runtime of this process, as returned by the runtime without modification. +func ProcessRuntimeVersion(val string) attribute.KeyValue { + return ProcessRuntimeVersionKey.String(val) +} + +// ProcessSavedUserID returns an attribute KeyValue conforming to the +// "process.saved_user.id" semantic conventions. It represents the saved user ID +// (SUID) of the process. +func ProcessSavedUserID(val int) attribute.KeyValue { + return ProcessSavedUserIDKey.Int(val) +} + +// ProcessSavedUserName returns an attribute KeyValue conforming to the +// "process.saved_user.name" semantic conventions. It represents the username of +// the saved user. +func ProcessSavedUserName(val string) attribute.KeyValue { + return ProcessSavedUserNameKey.String(val) +} + +// ProcessSessionLeaderPID returns an attribute KeyValue conforming to the +// "process.session_leader.pid" semantic conventions. It represents the PID of +// the process's session leader. This is also the session ID (SID) of the +// process. +func ProcessSessionLeaderPID(val int) attribute.KeyValue { + return ProcessSessionLeaderPIDKey.Int(val) +} + +// ProcessTitle returns an attribute KeyValue conforming to the "process.title" +// semantic conventions. It represents the process title (proctitle). +func ProcessTitle(val string) attribute.KeyValue { + return ProcessTitleKey.String(val) +} + +// ProcessUserID returns an attribute KeyValue conforming to the +// "process.user.id" semantic conventions. It represents the effective user ID +// (EUID) of the process. +func ProcessUserID(val int) attribute.KeyValue { + return ProcessUserIDKey.Int(val) +} + +// ProcessUserName returns an attribute KeyValue conforming to the +// "process.user.name" semantic conventions. It represents the username of the +// effective user of the process. +func ProcessUserName(val string) attribute.KeyValue { + return ProcessUserNameKey.String(val) +} + +// ProcessVpid returns an attribute KeyValue conforming to the "process.vpid" +// semantic conventions. It represents the virtual process identifier. +func ProcessVpid(val int) attribute.KeyValue { + return ProcessVpidKey.Int(val) +} + +// ProcessWorkingDirectory returns an attribute KeyValue conforming to the +// "process.working_directory" semantic conventions. It represents the working +// directory of the process. +func ProcessWorkingDirectory(val string) attribute.KeyValue { + return ProcessWorkingDirectoryKey.String(val) +} + +// Enum values for process.context_switch_type +var ( + // voluntary + // Stability: development + ProcessContextSwitchTypeVoluntary = ProcessContextSwitchTypeKey.String("voluntary") + // involuntary + // Stability: development + ProcessContextSwitchTypeInvoluntary = ProcessContextSwitchTypeKey.String("involuntary") +) + +// Enum values for process.paging.fault_type +var ( + // major + // Stability: development + ProcessPagingFaultTypeMajor = ProcessPagingFaultTypeKey.String("major") + // minor + // Stability: development + ProcessPagingFaultTypeMinor = ProcessPagingFaultTypeKey.String("minor") +) + +// Namespace: profile +const ( + // ProfileFrameTypeKey is the attribute Key conforming to the + // "profile.frame.type" semantic conventions. It represents the describes the + // interpreter or compiler of a single frame. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "cpython" + ProfileFrameTypeKey = attribute.Key("profile.frame.type") +) + +// Enum values for profile.frame.type +var ( + // [.NET] + // + // Stability: development + // + // [.NET]: https://wikipedia.org/wiki/.NET + ProfileFrameTypeDotnet = ProfileFrameTypeKey.String("dotnet") + // [JVM] + // + // Stability: development + // + // [JVM]: https://wikipedia.org/wiki/Java_virtual_machine + ProfileFrameTypeJVM = ProfileFrameTypeKey.String("jvm") + // [Kernel] + // + // Stability: development + // + // [Kernel]: https://wikipedia.org/wiki/Kernel_(operating_system) + ProfileFrameTypeKernel = ProfileFrameTypeKey.String("kernel") + // Can be one of but not limited to [C], [C++], [Go] or [Rust]. If possible, a + // more precise value MUST be used. + // + // Stability: development + // + // [C]: https://wikipedia.org/wiki/C_(programming_language) + // [C++]: https://wikipedia.org/wiki/C%2B%2B + // [Go]: https://wikipedia.org/wiki/Go_(programming_language) + // [Rust]: https://wikipedia.org/wiki/Rust_(programming_language) + ProfileFrameTypeNative = ProfileFrameTypeKey.String("native") + // [Perl] + // + // Stability: development + // + // [Perl]: https://wikipedia.org/wiki/Perl + ProfileFrameTypePerl = ProfileFrameTypeKey.String("perl") + // [PHP] + // + // Stability: development + // + // [PHP]: https://wikipedia.org/wiki/PHP + ProfileFrameTypePHP = ProfileFrameTypeKey.String("php") + // [Python] + // + // Stability: development + // + // [Python]: https://wikipedia.org/wiki/Python_(programming_language) + ProfileFrameTypeCpython = ProfileFrameTypeKey.String("cpython") + // [Ruby] + // + // Stability: development + // + // [Ruby]: https://wikipedia.org/wiki/Ruby_(programming_language) + ProfileFrameTypeRuby = ProfileFrameTypeKey.String("ruby") + // [V8JS] + // + // Stability: development + // + // [V8JS]: https://wikipedia.org/wiki/V8_(JavaScript_engine) + ProfileFrameTypeV8JS = ProfileFrameTypeKey.String("v8js") + // [Erlang] + // + // Stability: development + // + // [Erlang]: https://en.wikipedia.org/wiki/BEAM_(Erlang_virtual_machine) + ProfileFrameTypeBeam = ProfileFrameTypeKey.String("beam") + // [Go], + // + // Stability: development + // + // [Go]: https://wikipedia.org/wiki/Go_(programming_language) + ProfileFrameTypeGo = ProfileFrameTypeKey.String("go") + // [Rust] + // + // Stability: development + // + // [Rust]: https://wikipedia.org/wiki/Rust_(programming_language) + ProfileFrameTypeRust = ProfileFrameTypeKey.String("rust") +) + +// Namespace: rpc +const ( + // RPCConnectRPCErrorCodeKey is the attribute Key conforming to the + // "rpc.connect_rpc.error_code" semantic conventions. It represents the + // [error codes] of the Connect request. Error codes are always string values. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // + // [error codes]: https://connectrpc.com//docs/protocol/#error-codes + RPCConnectRPCErrorCodeKey = attribute.Key("rpc.connect_rpc.error_code") + + // RPCGRPCStatusCodeKey is the attribute Key conforming to the + // "rpc.grpc.status_code" semantic conventions. It represents the + // [numeric status code] of the gRPC request. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // + // [numeric status code]: https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md + RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") + + // RPCJSONRPCErrorCodeKey is the attribute Key conforming to the + // "rpc.jsonrpc.error_code" semantic conventions. It represents the `error.code` + // property of response if it is an error response. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: -32700, 100 + RPCJSONRPCErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code") + + // RPCJSONRPCErrorMessageKey is the attribute Key conforming to the + // "rpc.jsonrpc.error_message" semantic conventions. It represents the + // `error.message` property of response if it is an error response. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Parse error", "User already exists" + RPCJSONRPCErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message") + + // RPCJSONRPCRequestIDKey is the attribute Key conforming to the + // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` + // property of request or response. Since protocol allows id to be int, string, + // `null` or missing (for notifications), value is expected to be cast to string + // for simplicity. Use empty string in case of `null` value. Omit entirely if + // this is a notification. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "10", "request-7", "" + RPCJSONRPCRequestIDKey = attribute.Key("rpc.jsonrpc.request_id") + + // RPCJSONRPCVersionKey is the attribute Key conforming to the + // "rpc.jsonrpc.version" semantic conventions. It represents the protocol + // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 + // doesn't specify this, the value can be omitted. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2.0", "1.0" + RPCJSONRPCVersionKey = attribute.Key("rpc.jsonrpc.version") + + // RPCMessageCompressedSizeKey is the attribute Key conforming to the + // "rpc.message.compressed_size" semantic conventions. It represents the + // compressed size of the message in bytes. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + RPCMessageCompressedSizeKey = attribute.Key("rpc.message.compressed_size") + + // RPCMessageIDKey is the attribute Key conforming to the "rpc.message.id" + // semantic conventions. It MUST be calculated as two different counters + // starting from `1` one for sent messages and one for received message.. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: This way we guarantee that the values will be consistent between + // different implementations. + RPCMessageIDKey = attribute.Key("rpc.message.id") + + // RPCMessageTypeKey is the attribute Key conforming to the "rpc.message.type" + // semantic conventions. It represents the whether this is a received or sent + // message. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + RPCMessageTypeKey = attribute.Key("rpc.message.type") + + // RPCMessageUncompressedSizeKey is the attribute Key conforming to the + // "rpc.message.uncompressed_size" semantic conventions. It represents the + // uncompressed size of the message in bytes. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + RPCMessageUncompressedSizeKey = attribute.Key("rpc.message.uncompressed_size") + + // RPCMethodKey is the attribute Key conforming to the "rpc.method" semantic + // conventions. It represents the name of the (logical) method being called, + // must be equal to the $method part in the span name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: exampleMethod + // Note: This is the logical name of the method from the RPC interface + // perspective, which can be different from the name of any implementing + // method/function. The `code.function.name` attribute may be used to store the + // latter (e.g., method actually executing the call on the server side, RPC + // client stub method on the client side). + RPCMethodKey = attribute.Key("rpc.method") + + // RPCServiceKey is the attribute Key conforming to the "rpc.service" semantic + // conventions. It represents the full (logical) name of the service being + // called, including its package name, if applicable. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: myservice.EchoService + // Note: This is the logical name of the service from the RPC interface + // perspective, which can be different from the name of any implementing class. + // The `code.namespace` attribute may be used to store the latter (despite the + // attribute name, it may include a class name; e.g., class with method actually + // executing the call on the server side, RPC client stub class on the client + // side). + RPCServiceKey = attribute.Key("rpc.service") + + // RPCSystemKey is the attribute Key conforming to the "rpc.system" semantic + // conventions. It represents a string identifying the remoting system. See + // below for a list of well-known identifiers. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + RPCSystemKey = attribute.Key("rpc.system") +) + +// RPCConnectRPCRequestMetadata returns an attribute KeyValue conforming to the +// "rpc.connect_rpc.request.metadata" semantic conventions. It represents the +// connect request metadata, `` being the normalized Connect Metadata key +// (lowercase), the value being the metadata values. +func RPCConnectRPCRequestMetadata(key string, val ...string) attribute.KeyValue { + return attribute.StringSlice("rpc.connect_rpc.request.metadata."+key, val) +} + +// RPCConnectRPCResponseMetadata returns an attribute KeyValue conforming to the +// "rpc.connect_rpc.response.metadata" semantic conventions. It represents the +// connect response metadata, `` being the normalized Connect Metadata key +// (lowercase), the value being the metadata values. +func RPCConnectRPCResponseMetadata(key string, val ...string) attribute.KeyValue { + return attribute.StringSlice("rpc.connect_rpc.response.metadata."+key, val) +} + +// RPCGRPCRequestMetadata returns an attribute KeyValue conforming to the +// "rpc.grpc.request.metadata" semantic conventions. It represents the gRPC +// request metadata, `` being the normalized gRPC Metadata key (lowercase), +// the value being the metadata values. +func RPCGRPCRequestMetadata(key string, val ...string) attribute.KeyValue { + return attribute.StringSlice("rpc.grpc.request.metadata."+key, val) +} + +// RPCGRPCResponseMetadata returns an attribute KeyValue conforming to the +// "rpc.grpc.response.metadata" semantic conventions. It represents the gRPC +// response metadata, `` being the normalized gRPC Metadata key (lowercase), +// the value being the metadata values. +func RPCGRPCResponseMetadata(key string, val ...string) attribute.KeyValue { + return attribute.StringSlice("rpc.grpc.response.metadata."+key, val) +} + +// RPCJSONRPCErrorCode returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.error_code" semantic conventions. It represents the `error.code` +// property of response if it is an error response. +func RPCJSONRPCErrorCode(val int) attribute.KeyValue { + return RPCJSONRPCErrorCodeKey.Int(val) +} + +// RPCJSONRPCErrorMessage returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.error_message" semantic conventions. It represents the +// `error.message` property of response if it is an error response. +func RPCJSONRPCErrorMessage(val string) attribute.KeyValue { + return RPCJSONRPCErrorMessageKey.String(val) +} + +// RPCJSONRPCRequestID returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` property +// of request or response. Since protocol allows id to be int, string, `null` or +// missing (for notifications), value is expected to be cast to string for +// simplicity. Use empty string in case of `null` value. Omit entirely if this is +// a notification. +func RPCJSONRPCRequestID(val string) attribute.KeyValue { + return RPCJSONRPCRequestIDKey.String(val) +} + +// RPCJSONRPCVersion returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.version" semantic conventions. It represents the protocol version +// as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 doesn't +// specify this, the value can be omitted. +func RPCJSONRPCVersion(val string) attribute.KeyValue { + return RPCJSONRPCVersionKey.String(val) +} + +// RPCMessageCompressedSize returns an attribute KeyValue conforming to the +// "rpc.message.compressed_size" semantic conventions. It represents the +// compressed size of the message in bytes. +func RPCMessageCompressedSize(val int) attribute.KeyValue { + return RPCMessageCompressedSizeKey.Int(val) +} + +// RPCMessageID returns an attribute KeyValue conforming to the "rpc.message.id" +// semantic conventions. It MUST be calculated as two different counters starting +// from `1` one for sent messages and one for received message.. +func RPCMessageID(val int) attribute.KeyValue { + return RPCMessageIDKey.Int(val) +} + +// RPCMessageUncompressedSize returns an attribute KeyValue conforming to the +// "rpc.message.uncompressed_size" semantic conventions. It represents the +// uncompressed size of the message in bytes. +func RPCMessageUncompressedSize(val int) attribute.KeyValue { + return RPCMessageUncompressedSizeKey.Int(val) +} + +// RPCMethod returns an attribute KeyValue conforming to the "rpc.method" +// semantic conventions. It represents the name of the (logical) method being +// called, must be equal to the $method part in the span name. +func RPCMethod(val string) attribute.KeyValue { + return RPCMethodKey.String(val) +} + +// RPCService returns an attribute KeyValue conforming to the "rpc.service" +// semantic conventions. It represents the full (logical) name of the service +// being called, including its package name, if applicable. +func RPCService(val string) attribute.KeyValue { + return RPCServiceKey.String(val) +} + +// Enum values for rpc.connect_rpc.error_code +var ( + // cancelled + // Stability: development + RPCConnectRPCErrorCodeCancelled = RPCConnectRPCErrorCodeKey.String("cancelled") + // unknown + // Stability: development + RPCConnectRPCErrorCodeUnknown = RPCConnectRPCErrorCodeKey.String("unknown") + // invalid_argument + // Stability: development + RPCConnectRPCErrorCodeInvalidArgument = RPCConnectRPCErrorCodeKey.String("invalid_argument") + // deadline_exceeded + // Stability: development + RPCConnectRPCErrorCodeDeadlineExceeded = RPCConnectRPCErrorCodeKey.String("deadline_exceeded") + // not_found + // Stability: development + RPCConnectRPCErrorCodeNotFound = RPCConnectRPCErrorCodeKey.String("not_found") + // already_exists + // Stability: development + RPCConnectRPCErrorCodeAlreadyExists = RPCConnectRPCErrorCodeKey.String("already_exists") + // permission_denied + // Stability: development + RPCConnectRPCErrorCodePermissionDenied = RPCConnectRPCErrorCodeKey.String("permission_denied") + // resource_exhausted + // Stability: development + RPCConnectRPCErrorCodeResourceExhausted = RPCConnectRPCErrorCodeKey.String("resource_exhausted") + // failed_precondition + // Stability: development + RPCConnectRPCErrorCodeFailedPrecondition = RPCConnectRPCErrorCodeKey.String("failed_precondition") + // aborted + // Stability: development + RPCConnectRPCErrorCodeAborted = RPCConnectRPCErrorCodeKey.String("aborted") + // out_of_range + // Stability: development + RPCConnectRPCErrorCodeOutOfRange = RPCConnectRPCErrorCodeKey.String("out_of_range") + // unimplemented + // Stability: development + RPCConnectRPCErrorCodeUnimplemented = RPCConnectRPCErrorCodeKey.String("unimplemented") + // internal + // Stability: development + RPCConnectRPCErrorCodeInternal = RPCConnectRPCErrorCodeKey.String("internal") + // unavailable + // Stability: development + RPCConnectRPCErrorCodeUnavailable = RPCConnectRPCErrorCodeKey.String("unavailable") + // data_loss + // Stability: development + RPCConnectRPCErrorCodeDataLoss = RPCConnectRPCErrorCodeKey.String("data_loss") + // unauthenticated + // Stability: development + RPCConnectRPCErrorCodeUnauthenticated = RPCConnectRPCErrorCodeKey.String("unauthenticated") +) + +// Enum values for rpc.grpc.status_code +var ( + // OK + // Stability: development + RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0) + // CANCELLED + // Stability: development + RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1) + // UNKNOWN + // Stability: development + RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2) + // INVALID_ARGUMENT + // Stability: development + RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3) + // DEADLINE_EXCEEDED + // Stability: development + RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4) + // NOT_FOUND + // Stability: development + RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5) + // ALREADY_EXISTS + // Stability: development + RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6) + // PERMISSION_DENIED + // Stability: development + RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7) + // RESOURCE_EXHAUSTED + // Stability: development + RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8) + // FAILED_PRECONDITION + // Stability: development + RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9) + // ABORTED + // Stability: development + RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10) + // OUT_OF_RANGE + // Stability: development + RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11) + // UNIMPLEMENTED + // Stability: development + RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12) + // INTERNAL + // Stability: development + RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13) + // UNAVAILABLE + // Stability: development + RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14) + // DATA_LOSS + // Stability: development + RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15) + // UNAUTHENTICATED + // Stability: development + RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16) +) + +// Enum values for rpc.message.type +var ( + // sent + // Stability: development + RPCMessageTypeSent = RPCMessageTypeKey.String("SENT") + // received + // Stability: development + RPCMessageTypeReceived = RPCMessageTypeKey.String("RECEIVED") +) + +// Enum values for rpc.system +var ( + // gRPC + // Stability: development + RPCSystemGRPC = RPCSystemKey.String("grpc") + // Java RMI + // Stability: development + RPCSystemJavaRmi = RPCSystemKey.String("java_rmi") + // .NET WCF + // Stability: development + RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf") + // Apache Dubbo + // Stability: development + RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo") + // Connect RPC + // Stability: development + RPCSystemConnectRPC = RPCSystemKey.String("connect_rpc") +) + +// Namespace: security_rule +const ( + // SecurityRuleCategoryKey is the attribute Key conforming to the + // "security_rule.category" semantic conventions. It represents a categorization + // value keyword used by the entity using the rule for detection of this event. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Attempted Information Leak" + SecurityRuleCategoryKey = attribute.Key("security_rule.category") + + // SecurityRuleDescriptionKey is the attribute Key conforming to the + // "security_rule.description" semantic conventions. It represents the + // description of the rule generating the event. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Block requests to public DNS over HTTPS / TLS protocols" + SecurityRuleDescriptionKey = attribute.Key("security_rule.description") + + // SecurityRuleLicenseKey is the attribute Key conforming to the + // "security_rule.license" semantic conventions. It represents the name of the + // license under which the rule used to generate this event is made available. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Apache 2.0" + SecurityRuleLicenseKey = attribute.Key("security_rule.license") + + // SecurityRuleNameKey is the attribute Key conforming to the + // "security_rule.name" semantic conventions. It represents the name of the rule + // or signature generating the event. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "BLOCK_DNS_over_TLS" + SecurityRuleNameKey = attribute.Key("security_rule.name") + + // SecurityRuleReferenceKey is the attribute Key conforming to the + // "security_rule.reference" semantic conventions. It represents the reference + // URL to additional information about the rule used to generate this event. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "https://en.wikipedia.org/wiki/DNS_over_TLS" + // Note: The URL can point to the vendor’s documentation about the rule. If + // that’s not available, it can also be a link to a more general page + // describing this type of alert. + SecurityRuleReferenceKey = attribute.Key("security_rule.reference") + + // SecurityRuleRulesetNameKey is the attribute Key conforming to the + // "security_rule.ruleset.name" semantic conventions. It represents the name of + // the ruleset, policy, group, or parent category in which the rule used to + // generate this event is a member. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Standard_Protocol_Filters" + SecurityRuleRulesetNameKey = attribute.Key("security_rule.ruleset.name") + + // SecurityRuleUUIDKey is the attribute Key conforming to the + // "security_rule.uuid" semantic conventions. It represents a rule ID that is + // unique within the scope of a set or group of agents, observers, or other + // entities using the rule for detection of this event. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "550e8400-e29b-41d4-a716-446655440000", "1100110011" + SecurityRuleUUIDKey = attribute.Key("security_rule.uuid") + + // SecurityRuleVersionKey is the attribute Key conforming to the + // "security_rule.version" semantic conventions. It represents the version / + // revision of the rule being used for analysis. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1.0.0" + SecurityRuleVersionKey = attribute.Key("security_rule.version") +) + +// SecurityRuleCategory returns an attribute KeyValue conforming to the +// "security_rule.category" semantic conventions. It represents a categorization +// value keyword used by the entity using the rule for detection of this event. +func SecurityRuleCategory(val string) attribute.KeyValue { + return SecurityRuleCategoryKey.String(val) +} + +// SecurityRuleDescription returns an attribute KeyValue conforming to the +// "security_rule.description" semantic conventions. It represents the +// description of the rule generating the event. +func SecurityRuleDescription(val string) attribute.KeyValue { + return SecurityRuleDescriptionKey.String(val) +} + +// SecurityRuleLicense returns an attribute KeyValue conforming to the +// "security_rule.license" semantic conventions. It represents the name of the +// license under which the rule used to generate this event is made available. +func SecurityRuleLicense(val string) attribute.KeyValue { + return SecurityRuleLicenseKey.String(val) +} + +// SecurityRuleName returns an attribute KeyValue conforming to the +// "security_rule.name" semantic conventions. It represents the name of the rule +// or signature generating the event. +func SecurityRuleName(val string) attribute.KeyValue { + return SecurityRuleNameKey.String(val) +} + +// SecurityRuleReference returns an attribute KeyValue conforming to the +// "security_rule.reference" semantic conventions. It represents the reference +// URL to additional information about the rule used to generate this event. +func SecurityRuleReference(val string) attribute.KeyValue { + return SecurityRuleReferenceKey.String(val) +} + +// SecurityRuleRulesetName returns an attribute KeyValue conforming to the +// "security_rule.ruleset.name" semantic conventions. It represents the name of +// the ruleset, policy, group, or parent category in which the rule used to +// generate this event is a member. +func SecurityRuleRulesetName(val string) attribute.KeyValue { + return SecurityRuleRulesetNameKey.String(val) +} + +// SecurityRuleUUID returns an attribute KeyValue conforming to the +// "security_rule.uuid" semantic conventions. It represents a rule ID that is +// unique within the scope of a set or group of agents, observers, or other +// entities using the rule for detection of this event. +func SecurityRuleUUID(val string) attribute.KeyValue { + return SecurityRuleUUIDKey.String(val) +} + +// SecurityRuleVersion returns an attribute KeyValue conforming to the +// "security_rule.version" semantic conventions. It represents the version / +// revision of the rule being used for analysis. +func SecurityRuleVersion(val string) attribute.KeyValue { + return SecurityRuleVersionKey.String(val) +} + +// Namespace: server +const ( + // ServerAddressKey is the attribute Key conforming to the "server.address" + // semantic conventions. It represents the server domain name if available + // without reverse DNS lookup; otherwise, IP address or Unix domain socket name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "example.com", "10.1.2.80", "/tmp/my.sock" + // Note: When observed from the client side, and when communicating through an + // intermediary, `server.address` SHOULD represent the server address behind any + // intermediaries, for example proxies, if it's available. + ServerAddressKey = attribute.Key("server.address") + + // ServerPortKey is the attribute Key conforming to the "server.port" semantic + // conventions. It represents the server port number. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: 80, 8080, 443 + // Note: When observed from the client side, and when communicating through an + // intermediary, `server.port` SHOULD represent the server port behind any + // intermediaries, for example proxies, if it's available. + ServerPortKey = attribute.Key("server.port") +) + +// ServerAddress returns an attribute KeyValue conforming to the "server.address" +// semantic conventions. It represents the server domain name if available +// without reverse DNS lookup; otherwise, IP address or Unix domain socket name. +func ServerAddress(val string) attribute.KeyValue { + return ServerAddressKey.String(val) +} + +// ServerPort returns an attribute KeyValue conforming to the "server.port" +// semantic conventions. It represents the server port number. +func ServerPort(val int) attribute.KeyValue { + return ServerPortKey.Int(val) +} + +// Namespace: service +const ( + // ServiceInstanceIDKey is the attribute Key conforming to the + // "service.instance.id" semantic conventions. It represents the string ID of + // the service instance. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "627cc493-f310-47de-96bd-71410b7dec09" + // Note: MUST be unique for each instance of the same + // `service.namespace,service.name` pair (in other words + // `service.namespace,service.name,service.instance.id` triplet MUST be globally + // unique). The ID helps to + // distinguish instances of the same service that exist at the same time (e.g. + // instances of a horizontally scaled + // service). + // + // Implementations, such as SDKs, are recommended to generate a random Version 1 + // or Version 4 [RFC + // 4122] UUID, but are free to use an inherent unique ID as + // the source of + // this value if stability is desirable. In that case, the ID SHOULD be used as + // source of a UUID Version 5 and + // SHOULD use the following UUID as the namespace: + // `4d63009a-8d0f-11ee-aad7-4c796ed8e320`. + // + // UUIDs are typically recommended, as only an opaque value for the purposes of + // identifying a service instance is + // needed. Similar to what can be seen in the man page for the + // [`/etc/machine-id`] file, the underlying + // data, such as pod name and namespace should be treated as confidential, being + // the user's choice to expose it + // or not via another resource attribute. + // + // For applications running behind an application server (like unicorn), we do + // not recommend using one identifier + // for all processes participating in the application. Instead, it's recommended + // each division (e.g. a worker + // thread in unicorn) to have its own instance.id. + // + // It's not recommended for a Collector to set `service.instance.id` if it can't + // unambiguously determine the + // service instance that is generating that telemetry. For instance, creating an + // UUID based on `pod.name` will + // likely be wrong, as the Collector might not know from which container within + // that pod the telemetry originated. + // However, Collectors can set the `service.instance.id` if they can + // unambiguously determine the service instance + // for that telemetry. This is typically the case for scraping receivers, as + // they know the target address and + // port. + // + // [RFC + // 4122]: https://www.ietf.org/rfc/rfc4122.txt + // [`/etc/machine-id`]: https://www.freedesktop.org/software/systemd/man/latest/machine-id.html + ServiceInstanceIDKey = attribute.Key("service.instance.id") + + // ServiceNameKey is the attribute Key conforming to the "service.name" semantic + // conventions. It represents the logical name of the service. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "shoppingcart" + // Note: MUST be the same for all instances of horizontally scaled services. If + // the value was not specified, SDKs MUST fallback to `unknown_service:` + // concatenated with [`process.executable.name`], e.g. `unknown_service:bash`. + // If `process.executable.name` is not available, the value MUST be set to + // `unknown_service`. + // + // [`process.executable.name`]: process.md + ServiceNameKey = attribute.Key("service.name") + + // ServiceNamespaceKey is the attribute Key conforming to the + // "service.namespace" semantic conventions. It represents a namespace for + // `service.name`. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Shop" + // Note: A string value having a meaning that helps to distinguish a group of + // services, for example the team name that owns a group of services. + // `service.name` is expected to be unique within the same namespace. If + // `service.namespace` is not specified in the Resource then `service.name` is + // expected to be unique for all services that have no explicit namespace + // defined (so the empty/unspecified namespace is simply one more valid + // namespace). Zero-length namespace string is assumed equal to unspecified + // namespace. + ServiceNamespaceKey = attribute.Key("service.namespace") + + // ServiceVersionKey is the attribute Key conforming to the "service.version" + // semantic conventions. It represents the version string of the service API or + // implementation. The format is not defined by these conventions. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "2.0.0", "a01dbef8a" + ServiceVersionKey = attribute.Key("service.version") +) + +// ServiceInstanceID returns an attribute KeyValue conforming to the +// "service.instance.id" semantic conventions. It represents the string ID of the +// service instance. +func ServiceInstanceID(val string) attribute.KeyValue { + return ServiceInstanceIDKey.String(val) +} + +// ServiceName returns an attribute KeyValue conforming to the "service.name" +// semantic conventions. It represents the logical name of the service. +func ServiceName(val string) attribute.KeyValue { + return ServiceNameKey.String(val) +} + +// ServiceNamespace returns an attribute KeyValue conforming to the +// "service.namespace" semantic conventions. It represents a namespace for +// `service.name`. +func ServiceNamespace(val string) attribute.KeyValue { + return ServiceNamespaceKey.String(val) +} + +// ServiceVersion returns an attribute KeyValue conforming to the +// "service.version" semantic conventions. It represents the version string of +// the service API or implementation. The format is not defined by these +// conventions. +func ServiceVersion(val string) attribute.KeyValue { + return ServiceVersionKey.String(val) +} + +// Namespace: session +const ( + // SessionIDKey is the attribute Key conforming to the "session.id" semantic + // conventions. It represents a unique id to identify a session. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 00112233-4455-6677-8899-aabbccddeeff + SessionIDKey = attribute.Key("session.id") + + // SessionPreviousIDKey is the attribute Key conforming to the + // "session.previous_id" semantic conventions. It represents the previous + // `session.id` for this user, when known. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 00112233-4455-6677-8899-aabbccddeeff + SessionPreviousIDKey = attribute.Key("session.previous_id") +) + +// SessionID returns an attribute KeyValue conforming to the "session.id" +// semantic conventions. It represents a unique id to identify a session. +func SessionID(val string) attribute.KeyValue { + return SessionIDKey.String(val) +} + +// SessionPreviousID returns an attribute KeyValue conforming to the +// "session.previous_id" semantic conventions. It represents the previous +// `session.id` for this user, when known. +func SessionPreviousID(val string) attribute.KeyValue { + return SessionPreviousIDKey.String(val) +} + +// Namespace: signalr +const ( + // SignalRConnectionStatusKey is the attribute Key conforming to the + // "signalr.connection.status" semantic conventions. It represents the signalR + // HTTP connection closure status. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "app_shutdown", "timeout" + SignalRConnectionStatusKey = attribute.Key("signalr.connection.status") + + // SignalRTransportKey is the attribute Key conforming to the + // "signalr.transport" semantic conventions. It represents the + // [SignalR transport type]. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "web_sockets", "long_polling" + // + // [SignalR transport type]: https://github.com/dotnet/aspnetcore/blob/main/src/SignalR/docs/specs/TransportProtocols.md + SignalRTransportKey = attribute.Key("signalr.transport") +) + +// Enum values for signalr.connection.status +var ( + // The connection was closed normally. + // Stability: stable + SignalRConnectionStatusNormalClosure = SignalRConnectionStatusKey.String("normal_closure") + // The connection was closed due to a timeout. + // Stability: stable + SignalRConnectionStatusTimeout = SignalRConnectionStatusKey.String("timeout") + // The connection was closed because the app is shutting down. + // Stability: stable + SignalRConnectionStatusAppShutdown = SignalRConnectionStatusKey.String("app_shutdown") +) + +// Enum values for signalr.transport +var ( + // ServerSentEvents protocol + // Stability: stable + SignalRTransportServerSentEvents = SignalRTransportKey.String("server_sent_events") + // LongPolling protocol + // Stability: stable + SignalRTransportLongPolling = SignalRTransportKey.String("long_polling") + // WebSockets protocol + // Stability: stable + SignalRTransportWebSockets = SignalRTransportKey.String("web_sockets") +) + +// Namespace: source +const ( + // SourceAddressKey is the attribute Key conforming to the "source.address" + // semantic conventions. It represents the source address - domain name if + // available without reverse DNS lookup; otherwise, IP address or Unix domain + // socket name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "source.example.com", "10.1.2.80", "/tmp/my.sock" + // Note: When observed from the destination side, and when communicating through + // an intermediary, `source.address` SHOULD represent the source address behind + // any intermediaries, for example proxies, if it's available. + SourceAddressKey = attribute.Key("source.address") + + // SourcePortKey is the attribute Key conforming to the "source.port" semantic + // conventions. It represents the source port number. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 3389, 2888 + SourcePortKey = attribute.Key("source.port") +) + +// SourceAddress returns an attribute KeyValue conforming to the "source.address" +// semantic conventions. It represents the source address - domain name if +// available without reverse DNS lookup; otherwise, IP address or Unix domain +// socket name. +func SourceAddress(val string) attribute.KeyValue { + return SourceAddressKey.String(val) +} + +// SourcePort returns an attribute KeyValue conforming to the "source.port" +// semantic conventions. It represents the source port number. +func SourcePort(val int) attribute.KeyValue { + return SourcePortKey.Int(val) +} + +// Namespace: system +const ( + // SystemCPULogicalNumberKey is the attribute Key conforming to the + // "system.cpu.logical_number" semantic conventions. It represents the + // deprecated, use `cpu.logical_number` instead. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1 + SystemCPULogicalNumberKey = attribute.Key("system.cpu.logical_number") + + // SystemDeviceKey is the attribute Key conforming to the "system.device" + // semantic conventions. It represents the device identifier. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "(identifier)" + SystemDeviceKey = attribute.Key("system.device") + + // SystemFilesystemModeKey is the attribute Key conforming to the + // "system.filesystem.mode" semantic conventions. It represents the filesystem + // mode. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "rw, ro" + SystemFilesystemModeKey = attribute.Key("system.filesystem.mode") + + // SystemFilesystemMountpointKey is the attribute Key conforming to the + // "system.filesystem.mountpoint" semantic conventions. It represents the + // filesystem mount path. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/mnt/data" + SystemFilesystemMountpointKey = attribute.Key("system.filesystem.mountpoint") + + // SystemFilesystemStateKey is the attribute Key conforming to the + // "system.filesystem.state" semantic conventions. It represents the filesystem + // state. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "used" + SystemFilesystemStateKey = attribute.Key("system.filesystem.state") + + // SystemFilesystemTypeKey is the attribute Key conforming to the + // "system.filesystem.type" semantic conventions. It represents the filesystem + // type. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "ext4" + SystemFilesystemTypeKey = attribute.Key("system.filesystem.type") + + // SystemMemoryStateKey is the attribute Key conforming to the + // "system.memory.state" semantic conventions. It represents the memory state. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "free", "cached" + SystemMemoryStateKey = attribute.Key("system.memory.state") + + // SystemPagingDirectionKey is the attribute Key conforming to the + // "system.paging.direction" semantic conventions. It represents the paging + // access direction. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "in" + SystemPagingDirectionKey = attribute.Key("system.paging.direction") + + // SystemPagingStateKey is the attribute Key conforming to the + // "system.paging.state" semantic conventions. It represents the memory paging + // state. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "free" + SystemPagingStateKey = attribute.Key("system.paging.state") + + // SystemPagingTypeKey is the attribute Key conforming to the + // "system.paging.type" semantic conventions. It represents the memory paging + // type. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "minor" + SystemPagingTypeKey = attribute.Key("system.paging.type") + + // SystemProcessStatusKey is the attribute Key conforming to the + // "system.process.status" semantic conventions. It represents the process + // state, e.g., [Linux Process State Codes]. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "running" + // + // [Linux Process State Codes]: https://man7.org/linux/man-pages/man1/ps.1.html#PROCESS_STATE_CODES + SystemProcessStatusKey = attribute.Key("system.process.status") +) + +// SystemCPULogicalNumber returns an attribute KeyValue conforming to the +// "system.cpu.logical_number" semantic conventions. It represents the +// deprecated, use `cpu.logical_number` instead. +func SystemCPULogicalNumber(val int) attribute.KeyValue { + return SystemCPULogicalNumberKey.Int(val) +} + +// SystemDevice returns an attribute KeyValue conforming to the "system.device" +// semantic conventions. It represents the device identifier. +func SystemDevice(val string) attribute.KeyValue { + return SystemDeviceKey.String(val) +} + +// SystemFilesystemMode returns an attribute KeyValue conforming to the +// "system.filesystem.mode" semantic conventions. It represents the filesystem +// mode. +func SystemFilesystemMode(val string) attribute.KeyValue { + return SystemFilesystemModeKey.String(val) +} + +// SystemFilesystemMountpoint returns an attribute KeyValue conforming to the +// "system.filesystem.mountpoint" semantic conventions. It represents the +// filesystem mount path. +func SystemFilesystemMountpoint(val string) attribute.KeyValue { + return SystemFilesystemMountpointKey.String(val) +} + +// Enum values for system.filesystem.state +var ( + // used + // Stability: development + SystemFilesystemStateUsed = SystemFilesystemStateKey.String("used") + // free + // Stability: development + SystemFilesystemStateFree = SystemFilesystemStateKey.String("free") + // reserved + // Stability: development + SystemFilesystemStateReserved = SystemFilesystemStateKey.String("reserved") +) + +// Enum values for system.filesystem.type +var ( + // fat32 + // Stability: development + SystemFilesystemTypeFat32 = SystemFilesystemTypeKey.String("fat32") + // exfat + // Stability: development + SystemFilesystemTypeExfat = SystemFilesystemTypeKey.String("exfat") + // ntfs + // Stability: development + SystemFilesystemTypeNtfs = SystemFilesystemTypeKey.String("ntfs") + // refs + // Stability: development + SystemFilesystemTypeRefs = SystemFilesystemTypeKey.String("refs") + // hfsplus + // Stability: development + SystemFilesystemTypeHfsplus = SystemFilesystemTypeKey.String("hfsplus") + // ext4 + // Stability: development + SystemFilesystemTypeExt4 = SystemFilesystemTypeKey.String("ext4") +) + +// Enum values for system.memory.state +var ( + // Actual used virtual memory in bytes. + // Stability: development + SystemMemoryStateUsed = SystemMemoryStateKey.String("used") + // free + // Stability: development + SystemMemoryStateFree = SystemMemoryStateKey.String("free") + // buffers + // Stability: development + SystemMemoryStateBuffers = SystemMemoryStateKey.String("buffers") + // cached + // Stability: development + SystemMemoryStateCached = SystemMemoryStateKey.String("cached") +) + +// Enum values for system.paging.direction +var ( + // in + // Stability: development + SystemPagingDirectionIn = SystemPagingDirectionKey.String("in") + // out + // Stability: development + SystemPagingDirectionOut = SystemPagingDirectionKey.String("out") +) + +// Enum values for system.paging.state +var ( + // used + // Stability: development + SystemPagingStateUsed = SystemPagingStateKey.String("used") + // free + // Stability: development + SystemPagingStateFree = SystemPagingStateKey.String("free") +) + +// Enum values for system.paging.type +var ( + // major + // Stability: development + SystemPagingTypeMajor = SystemPagingTypeKey.String("major") + // minor + // Stability: development + SystemPagingTypeMinor = SystemPagingTypeKey.String("minor") +) + +// Enum values for system.process.status +var ( + // running + // Stability: development + SystemProcessStatusRunning = SystemProcessStatusKey.String("running") + // sleeping + // Stability: development + SystemProcessStatusSleeping = SystemProcessStatusKey.String("sleeping") + // stopped + // Stability: development + SystemProcessStatusStopped = SystemProcessStatusKey.String("stopped") + // defunct + // Stability: development + SystemProcessStatusDefunct = SystemProcessStatusKey.String("defunct") +) + +// Namespace: telemetry +const ( + // TelemetryDistroNameKey is the attribute Key conforming to the + // "telemetry.distro.name" semantic conventions. It represents the name of the + // auto instrumentation agent or distribution, if used. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "parts-unlimited-java" + // Note: Official auto instrumentation agents and distributions SHOULD set the + // `telemetry.distro.name` attribute to + // a string starting with `opentelemetry-`, e.g. + // `opentelemetry-java-instrumentation`. + TelemetryDistroNameKey = attribute.Key("telemetry.distro.name") + + // TelemetryDistroVersionKey is the attribute Key conforming to the + // "telemetry.distro.version" semantic conventions. It represents the version + // string of the auto instrumentation agent or distribution, if used. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1.2.3" + TelemetryDistroVersionKey = attribute.Key("telemetry.distro.version") + + // TelemetrySDKLanguageKey is the attribute Key conforming to the + // "telemetry.sdk.language" semantic conventions. It represents the language of + // the telemetry SDK. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: + TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language") + + // TelemetrySDKNameKey is the attribute Key conforming to the + // "telemetry.sdk.name" semantic conventions. It represents the name of the + // telemetry SDK as defined above. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "opentelemetry" + // Note: The OpenTelemetry SDK MUST set the `telemetry.sdk.name` attribute to + // `opentelemetry`. + // If another SDK, like a fork or a vendor-provided implementation, is used, + // this SDK MUST set the + // `telemetry.sdk.name` attribute to the fully-qualified class or module name of + // this SDK's main entry point + // or another suitable identifier depending on the language. + // The identifier `opentelemetry` is reserved and MUST NOT be used in this case. + // All custom identifiers SHOULD be stable across different versions of an + // implementation. + TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name") + + // TelemetrySDKVersionKey is the attribute Key conforming to the + // "telemetry.sdk.version" semantic conventions. It represents the version + // string of the telemetry SDK. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "1.2.3" + TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version") +) + +// TelemetryDistroName returns an attribute KeyValue conforming to the +// "telemetry.distro.name" semantic conventions. It represents the name of the +// auto instrumentation agent or distribution, if used. +func TelemetryDistroName(val string) attribute.KeyValue { + return TelemetryDistroNameKey.String(val) +} + +// TelemetryDistroVersion returns an attribute KeyValue conforming to the +// "telemetry.distro.version" semantic conventions. It represents the version +// string of the auto instrumentation agent or distribution, if used. +func TelemetryDistroVersion(val string) attribute.KeyValue { + return TelemetryDistroVersionKey.String(val) +} + +// TelemetrySDKName returns an attribute KeyValue conforming to the +// "telemetry.sdk.name" semantic conventions. It represents the name of the +// telemetry SDK as defined above. +func TelemetrySDKName(val string) attribute.KeyValue { + return TelemetrySDKNameKey.String(val) +} + +// TelemetrySDKVersion returns an attribute KeyValue conforming to the +// "telemetry.sdk.version" semantic conventions. It represents the version string +// of the telemetry SDK. +func TelemetrySDKVersion(val string) attribute.KeyValue { + return TelemetrySDKVersionKey.String(val) +} + +// Enum values for telemetry.sdk.language +var ( + // cpp + // Stability: stable + TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp") + // dotnet + // Stability: stable + TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet") + // erlang + // Stability: stable + TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang") + // go + // Stability: stable + TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go") + // java + // Stability: stable + TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java") + // nodejs + // Stability: stable + TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs") + // php + // Stability: stable + TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php") + // python + // Stability: stable + TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python") + // ruby + // Stability: stable + TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby") + // rust + // Stability: stable + TelemetrySDKLanguageRust = TelemetrySDKLanguageKey.String("rust") + // swift + // Stability: stable + TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift") + // webjs + // Stability: stable + TelemetrySDKLanguageWebJS = TelemetrySDKLanguageKey.String("webjs") +) + +// Namespace: test +const ( + // TestCaseNameKey is the attribute Key conforming to the "test.case.name" + // semantic conventions. It represents the fully qualified human readable name + // of the [test case]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "org.example.TestCase1.test1", "example/tests/TestCase1.test1", + // "ExampleTestCase1_test1" + // + // [test case]: https://wikipedia.org/wiki/Test_case + TestCaseNameKey = attribute.Key("test.case.name") + + // TestCaseResultStatusKey is the attribute Key conforming to the + // "test.case.result.status" semantic conventions. It represents the status of + // the actual test case result from test execution. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "pass", "fail" + TestCaseResultStatusKey = attribute.Key("test.case.result.status") + + // TestSuiteNameKey is the attribute Key conforming to the "test.suite.name" + // semantic conventions. It represents the human readable name of a [test suite] + // . + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "TestSuite1" + // + // [test suite]: https://wikipedia.org/wiki/Test_suite + TestSuiteNameKey = attribute.Key("test.suite.name") + + // TestSuiteRunStatusKey is the attribute Key conforming to the + // "test.suite.run.status" semantic conventions. It represents the status of the + // test suite run. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "success", "failure", "skipped", "aborted", "timed_out", + // "in_progress" + TestSuiteRunStatusKey = attribute.Key("test.suite.run.status") +) + +// TestCaseName returns an attribute KeyValue conforming to the "test.case.name" +// semantic conventions. It represents the fully qualified human readable name of +// the [test case]. +// +// [test case]: https://wikipedia.org/wiki/Test_case +func TestCaseName(val string) attribute.KeyValue { + return TestCaseNameKey.String(val) +} + +// TestSuiteName returns an attribute KeyValue conforming to the +// "test.suite.name" semantic conventions. It represents the human readable name +// of a [test suite]. +// +// [test suite]: https://wikipedia.org/wiki/Test_suite +func TestSuiteName(val string) attribute.KeyValue { + return TestSuiteNameKey.String(val) +} + +// Enum values for test.case.result.status +var ( + // pass + // Stability: development + TestCaseResultStatusPass = TestCaseResultStatusKey.String("pass") + // fail + // Stability: development + TestCaseResultStatusFail = TestCaseResultStatusKey.String("fail") +) + +// Enum values for test.suite.run.status +var ( + // success + // Stability: development + TestSuiteRunStatusSuccess = TestSuiteRunStatusKey.String("success") + // failure + // Stability: development + TestSuiteRunStatusFailure = TestSuiteRunStatusKey.String("failure") + // skipped + // Stability: development + TestSuiteRunStatusSkipped = TestSuiteRunStatusKey.String("skipped") + // aborted + // Stability: development + TestSuiteRunStatusAborted = TestSuiteRunStatusKey.String("aborted") + // timed_out + // Stability: development + TestSuiteRunStatusTimedOut = TestSuiteRunStatusKey.String("timed_out") + // in_progress + // Stability: development + TestSuiteRunStatusInProgress = TestSuiteRunStatusKey.String("in_progress") +) + +// Namespace: thread +const ( + // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic + // conventions. It represents the current "managed" thread ID (as opposed to OS + // thread ID). + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + ThreadIDKey = attribute.Key("thread.id") + + // ThreadNameKey is the attribute Key conforming to the "thread.name" semantic + // conventions. It represents the current thread name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: main + ThreadNameKey = attribute.Key("thread.name") +) + +// ThreadID returns an attribute KeyValue conforming to the "thread.id" semantic +// conventions. It represents the current "managed" thread ID (as opposed to OS +// thread ID). +func ThreadID(val int) attribute.KeyValue { + return ThreadIDKey.Int(val) +} + +// ThreadName returns an attribute KeyValue conforming to the "thread.name" +// semantic conventions. It represents the current thread name. +func ThreadName(val string) attribute.KeyValue { + return ThreadNameKey.String(val) +} + +// Namespace: tls +const ( + // TLSCipherKey is the attribute Key conforming to the "tls.cipher" semantic + // conventions. It represents the string indicating the [cipher] used during the + // current connection. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "TLS_RSA_WITH_3DES_EDE_CBC_SHA", + // "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256" + // Note: The values allowed for `tls.cipher` MUST be one of the `Descriptions` + // of the [registered TLS Cipher Suits]. + // + // [cipher]: https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5 + // [registered TLS Cipher Suits]: https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#table-tls-parameters-4 + TLSCipherKey = attribute.Key("tls.cipher") + + // TLSClientCertificateKey is the attribute Key conforming to the + // "tls.client.certificate" semantic conventions. It represents the PEM-encoded + // stand-alone certificate offered by the client. This is usually + // mutually-exclusive of `client.certificate_chain` since this value also exists + // in that list. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "MII..." + TLSClientCertificateKey = attribute.Key("tls.client.certificate") + + // TLSClientCertificateChainKey is the attribute Key conforming to the + // "tls.client.certificate_chain" semantic conventions. It represents the array + // of PEM-encoded certificates that make up the certificate chain offered by the + // client. This is usually mutually-exclusive of `client.certificate` since that + // value should be the first certificate in the chain. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "MII...", "MI..." + TLSClientCertificateChainKey = attribute.Key("tls.client.certificate_chain") + + // TLSClientHashMd5Key is the attribute Key conforming to the + // "tls.client.hash.md5" semantic conventions. It represents the certificate + // fingerprint using the MD5 digest of DER-encoded version of certificate + // offered by the client. For consistency with other hash values, this value + // should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC" + TLSClientHashMd5Key = attribute.Key("tls.client.hash.md5") + + // TLSClientHashSha1Key is the attribute Key conforming to the + // "tls.client.hash.sha1" semantic conventions. It represents the certificate + // fingerprint using the SHA1 digest of DER-encoded version of certificate + // offered by the client. For consistency with other hash values, this value + // should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "9E393D93138888D288266C2D915214D1D1CCEB2A" + TLSClientHashSha1Key = attribute.Key("tls.client.hash.sha1") + + // TLSClientHashSha256Key is the attribute Key conforming to the + // "tls.client.hash.sha256" semantic conventions. It represents the certificate + // fingerprint using the SHA256 digest of DER-encoded version of certificate + // offered by the client. For consistency with other hash values, this value + // should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0" + TLSClientHashSha256Key = attribute.Key("tls.client.hash.sha256") + + // TLSClientIssuerKey is the attribute Key conforming to the "tls.client.issuer" + // semantic conventions. It represents the distinguished name of [subject] of + // the issuer of the x.509 certificate presented by the client. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "CN=Example Root CA, OU=Infrastructure Team, DC=example, DC=com" + // + // [subject]: https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6 + TLSClientIssuerKey = attribute.Key("tls.client.issuer") + + // TLSClientJa3Key is the attribute Key conforming to the "tls.client.ja3" + // semantic conventions. It represents a hash that identifies clients based on + // how they perform an SSL/TLS handshake. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "d4e5b18d6b55c71272893221c96ba240" + TLSClientJa3Key = attribute.Key("tls.client.ja3") + + // TLSClientNotAfterKey is the attribute Key conforming to the + // "tls.client.not_after" semantic conventions. It represents the date/Time + // indicating when client certificate is no longer considered valid. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2021-01-01T00:00:00.000Z" + TLSClientNotAfterKey = attribute.Key("tls.client.not_after") + + // TLSClientNotBeforeKey is the attribute Key conforming to the + // "tls.client.not_before" semantic conventions. It represents the date/Time + // indicating when client certificate is first considered valid. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1970-01-01T00:00:00.000Z" + TLSClientNotBeforeKey = attribute.Key("tls.client.not_before") + + // TLSClientSubjectKey is the attribute Key conforming to the + // "tls.client.subject" semantic conventions. It represents the distinguished + // name of subject of the x.509 certificate presented by the client. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "CN=myclient, OU=Documentation Team, DC=example, DC=com" + TLSClientSubjectKey = attribute.Key("tls.client.subject") + + // TLSClientSupportedCiphersKey is the attribute Key conforming to the + // "tls.client.supported_ciphers" semantic conventions. It represents the array + // of ciphers offered by the client during the client hello. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", + // "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384" + TLSClientSupportedCiphersKey = attribute.Key("tls.client.supported_ciphers") + + // TLSCurveKey is the attribute Key conforming to the "tls.curve" semantic + // conventions. It represents the string indicating the curve used for the given + // cipher, when applicable. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "secp256r1" + TLSCurveKey = attribute.Key("tls.curve") + + // TLSEstablishedKey is the attribute Key conforming to the "tls.established" + // semantic conventions. It represents the boolean flag indicating if the TLS + // negotiation was successful and transitioned to an encrypted tunnel. + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: true + TLSEstablishedKey = attribute.Key("tls.established") + + // TLSNextProtocolKey is the attribute Key conforming to the "tls.next_protocol" + // semantic conventions. It represents the string indicating the protocol being + // tunneled. Per the values in the [IANA registry], this string should be lower + // case. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "http/1.1" + // + // [IANA registry]: https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids + TLSNextProtocolKey = attribute.Key("tls.next_protocol") + + // TLSProtocolNameKey is the attribute Key conforming to the "tls.protocol.name" + // semantic conventions. It represents the normalized lowercase protocol name + // parsed from original string of the negotiated [SSL/TLS protocol version]. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // + // [SSL/TLS protocol version]: https://docs.openssl.org/1.1.1/man3/SSL_get_version/#return-values + TLSProtocolNameKey = attribute.Key("tls.protocol.name") + + // TLSProtocolVersionKey is the attribute Key conforming to the + // "tls.protocol.version" semantic conventions. It represents the numeric part + // of the version parsed from the original string of the negotiated + // [SSL/TLS protocol version]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1.2", "3" + // + // [SSL/TLS protocol version]: https://docs.openssl.org/1.1.1/man3/SSL_get_version/#return-values + TLSProtocolVersionKey = attribute.Key("tls.protocol.version") + + // TLSResumedKey is the attribute Key conforming to the "tls.resumed" semantic + // conventions. It represents the boolean flag indicating if this TLS connection + // was resumed from an existing TLS negotiation. + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: true + TLSResumedKey = attribute.Key("tls.resumed") + + // TLSServerCertificateKey is the attribute Key conforming to the + // "tls.server.certificate" semantic conventions. It represents the PEM-encoded + // stand-alone certificate offered by the server. This is usually + // mutually-exclusive of `server.certificate_chain` since this value also exists + // in that list. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "MII..." + TLSServerCertificateKey = attribute.Key("tls.server.certificate") + + // TLSServerCertificateChainKey is the attribute Key conforming to the + // "tls.server.certificate_chain" semantic conventions. It represents the array + // of PEM-encoded certificates that make up the certificate chain offered by the + // server. This is usually mutually-exclusive of `server.certificate` since that + // value should be the first certificate in the chain. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "MII...", "MI..." + TLSServerCertificateChainKey = attribute.Key("tls.server.certificate_chain") + + // TLSServerHashMd5Key is the attribute Key conforming to the + // "tls.server.hash.md5" semantic conventions. It represents the certificate + // fingerprint using the MD5 digest of DER-encoded version of certificate + // offered by the server. For consistency with other hash values, this value + // should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC" + TLSServerHashMd5Key = attribute.Key("tls.server.hash.md5") + + // TLSServerHashSha1Key is the attribute Key conforming to the + // "tls.server.hash.sha1" semantic conventions. It represents the certificate + // fingerprint using the SHA1 digest of DER-encoded version of certificate + // offered by the server. For consistency with other hash values, this value + // should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "9E393D93138888D288266C2D915214D1D1CCEB2A" + TLSServerHashSha1Key = attribute.Key("tls.server.hash.sha1") + + // TLSServerHashSha256Key is the attribute Key conforming to the + // "tls.server.hash.sha256" semantic conventions. It represents the certificate + // fingerprint using the SHA256 digest of DER-encoded version of certificate + // offered by the server. For consistency with other hash values, this value + // should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0" + TLSServerHashSha256Key = attribute.Key("tls.server.hash.sha256") + + // TLSServerIssuerKey is the attribute Key conforming to the "tls.server.issuer" + // semantic conventions. It represents the distinguished name of [subject] of + // the issuer of the x.509 certificate presented by the client. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "CN=Example Root CA, OU=Infrastructure Team, DC=example, DC=com" + // + // [subject]: https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6 + TLSServerIssuerKey = attribute.Key("tls.server.issuer") + + // TLSServerJa3sKey is the attribute Key conforming to the "tls.server.ja3s" + // semantic conventions. It represents a hash that identifies servers based on + // how they perform an SSL/TLS handshake. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "d4e5b18d6b55c71272893221c96ba240" + TLSServerJa3sKey = attribute.Key("tls.server.ja3s") + + // TLSServerNotAfterKey is the attribute Key conforming to the + // "tls.server.not_after" semantic conventions. It represents the date/Time + // indicating when server certificate is no longer considered valid. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2021-01-01T00:00:00.000Z" + TLSServerNotAfterKey = attribute.Key("tls.server.not_after") + + // TLSServerNotBeforeKey is the attribute Key conforming to the + // "tls.server.not_before" semantic conventions. It represents the date/Time + // indicating when server certificate is first considered valid. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1970-01-01T00:00:00.000Z" + TLSServerNotBeforeKey = attribute.Key("tls.server.not_before") + + // TLSServerSubjectKey is the attribute Key conforming to the + // "tls.server.subject" semantic conventions. It represents the distinguished + // name of subject of the x.509 certificate presented by the server. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "CN=myserver, OU=Documentation Team, DC=example, DC=com" + TLSServerSubjectKey = attribute.Key("tls.server.subject") +) + +// TLSCipher returns an attribute KeyValue conforming to the "tls.cipher" +// semantic conventions. It represents the string indicating the [cipher] used +// during the current connection. +// +// [cipher]: https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5 +func TLSCipher(val string) attribute.KeyValue { + return TLSCipherKey.String(val) +} + +// TLSClientCertificate returns an attribute KeyValue conforming to the +// "tls.client.certificate" semantic conventions. It represents the PEM-encoded +// stand-alone certificate offered by the client. This is usually +// mutually-exclusive of `client.certificate_chain` since this value also exists +// in that list. +func TLSClientCertificate(val string) attribute.KeyValue { + return TLSClientCertificateKey.String(val) +} + +// TLSClientCertificateChain returns an attribute KeyValue conforming to the +// "tls.client.certificate_chain" semantic conventions. It represents the array +// of PEM-encoded certificates that make up the certificate chain offered by the +// client. This is usually mutually-exclusive of `client.certificate` since that +// value should be the first certificate in the chain. +func TLSClientCertificateChain(val ...string) attribute.KeyValue { + return TLSClientCertificateChainKey.StringSlice(val) +} + +// TLSClientHashMd5 returns an attribute KeyValue conforming to the +// "tls.client.hash.md5" semantic conventions. It represents the certificate +// fingerprint using the MD5 digest of DER-encoded version of certificate offered +// by the client. For consistency with other hash values, this value should be +// formatted as an uppercase hash. +func TLSClientHashMd5(val string) attribute.KeyValue { + return TLSClientHashMd5Key.String(val) +} + +// TLSClientHashSha1 returns an attribute KeyValue conforming to the +// "tls.client.hash.sha1" semantic conventions. It represents the certificate +// fingerprint using the SHA1 digest of DER-encoded version of certificate +// offered by the client. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSClientHashSha1(val string) attribute.KeyValue { + return TLSClientHashSha1Key.String(val) +} + +// TLSClientHashSha256 returns an attribute KeyValue conforming to the +// "tls.client.hash.sha256" semantic conventions. It represents the certificate +// fingerprint using the SHA256 digest of DER-encoded version of certificate +// offered by the client. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSClientHashSha256(val string) attribute.KeyValue { + return TLSClientHashSha256Key.String(val) +} + +// TLSClientIssuer returns an attribute KeyValue conforming to the +// "tls.client.issuer" semantic conventions. It represents the distinguished name +// of [subject] of the issuer of the x.509 certificate presented by the client. +// +// [subject]: https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6 +func TLSClientIssuer(val string) attribute.KeyValue { + return TLSClientIssuerKey.String(val) +} + +// TLSClientJa3 returns an attribute KeyValue conforming to the "tls.client.ja3" +// semantic conventions. It represents a hash that identifies clients based on +// how they perform an SSL/TLS handshake. +func TLSClientJa3(val string) attribute.KeyValue { + return TLSClientJa3Key.String(val) +} + +// TLSClientNotAfter returns an attribute KeyValue conforming to the +// "tls.client.not_after" semantic conventions. It represents the date/Time +// indicating when client certificate is no longer considered valid. +func TLSClientNotAfter(val string) attribute.KeyValue { + return TLSClientNotAfterKey.String(val) +} + +// TLSClientNotBefore returns an attribute KeyValue conforming to the +// "tls.client.not_before" semantic conventions. It represents the date/Time +// indicating when client certificate is first considered valid. +func TLSClientNotBefore(val string) attribute.KeyValue { + return TLSClientNotBeforeKey.String(val) +} + +// TLSClientSubject returns an attribute KeyValue conforming to the +// "tls.client.subject" semantic conventions. It represents the distinguished +// name of subject of the x.509 certificate presented by the client. +func TLSClientSubject(val string) attribute.KeyValue { + return TLSClientSubjectKey.String(val) +} + +// TLSClientSupportedCiphers returns an attribute KeyValue conforming to the +// "tls.client.supported_ciphers" semantic conventions. It represents the array +// of ciphers offered by the client during the client hello. +func TLSClientSupportedCiphers(val ...string) attribute.KeyValue { + return TLSClientSupportedCiphersKey.StringSlice(val) +} + +// TLSCurve returns an attribute KeyValue conforming to the "tls.curve" semantic +// conventions. It represents the string indicating the curve used for the given +// cipher, when applicable. +func TLSCurve(val string) attribute.KeyValue { + return TLSCurveKey.String(val) +} + +// TLSEstablished returns an attribute KeyValue conforming to the +// "tls.established" semantic conventions. It represents the boolean flag +// indicating if the TLS negotiation was successful and transitioned to an +// encrypted tunnel. +func TLSEstablished(val bool) attribute.KeyValue { + return TLSEstablishedKey.Bool(val) +} + +// TLSNextProtocol returns an attribute KeyValue conforming to the +// "tls.next_protocol" semantic conventions. It represents the string indicating +// the protocol being tunneled. Per the values in the [IANA registry], this +// string should be lower case. +// +// [IANA registry]: https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids +func TLSNextProtocol(val string) attribute.KeyValue { + return TLSNextProtocolKey.String(val) +} + +// TLSProtocolVersion returns an attribute KeyValue conforming to the +// "tls.protocol.version" semantic conventions. It represents the numeric part of +// the version parsed from the original string of the negotiated +// [SSL/TLS protocol version]. +// +// [SSL/TLS protocol version]: https://docs.openssl.org/1.1.1/man3/SSL_get_version/#return-values +func TLSProtocolVersion(val string) attribute.KeyValue { + return TLSProtocolVersionKey.String(val) +} + +// TLSResumed returns an attribute KeyValue conforming to the "tls.resumed" +// semantic conventions. It represents the boolean flag indicating if this TLS +// connection was resumed from an existing TLS negotiation. +func TLSResumed(val bool) attribute.KeyValue { + return TLSResumedKey.Bool(val) +} + +// TLSServerCertificate returns an attribute KeyValue conforming to the +// "tls.server.certificate" semantic conventions. It represents the PEM-encoded +// stand-alone certificate offered by the server. This is usually +// mutually-exclusive of `server.certificate_chain` since this value also exists +// in that list. +func TLSServerCertificate(val string) attribute.KeyValue { + return TLSServerCertificateKey.String(val) +} + +// TLSServerCertificateChain returns an attribute KeyValue conforming to the +// "tls.server.certificate_chain" semantic conventions. It represents the array +// of PEM-encoded certificates that make up the certificate chain offered by the +// server. This is usually mutually-exclusive of `server.certificate` since that +// value should be the first certificate in the chain. +func TLSServerCertificateChain(val ...string) attribute.KeyValue { + return TLSServerCertificateChainKey.StringSlice(val) +} + +// TLSServerHashMd5 returns an attribute KeyValue conforming to the +// "tls.server.hash.md5" semantic conventions. It represents the certificate +// fingerprint using the MD5 digest of DER-encoded version of certificate offered +// by the server. For consistency with other hash values, this value should be +// formatted as an uppercase hash. +func TLSServerHashMd5(val string) attribute.KeyValue { + return TLSServerHashMd5Key.String(val) +} + +// TLSServerHashSha1 returns an attribute KeyValue conforming to the +// "tls.server.hash.sha1" semantic conventions. It represents the certificate +// fingerprint using the SHA1 digest of DER-encoded version of certificate +// offered by the server. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSServerHashSha1(val string) attribute.KeyValue { + return TLSServerHashSha1Key.String(val) +} + +// TLSServerHashSha256 returns an attribute KeyValue conforming to the +// "tls.server.hash.sha256" semantic conventions. It represents the certificate +// fingerprint using the SHA256 digest of DER-encoded version of certificate +// offered by the server. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSServerHashSha256(val string) attribute.KeyValue { + return TLSServerHashSha256Key.String(val) +} + +// TLSServerIssuer returns an attribute KeyValue conforming to the +// "tls.server.issuer" semantic conventions. It represents the distinguished name +// of [subject] of the issuer of the x.509 certificate presented by the client. +// +// [subject]: https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6 +func TLSServerIssuer(val string) attribute.KeyValue { + return TLSServerIssuerKey.String(val) +} + +// TLSServerJa3s returns an attribute KeyValue conforming to the +// "tls.server.ja3s" semantic conventions. It represents a hash that identifies +// servers based on how they perform an SSL/TLS handshake. +func TLSServerJa3s(val string) attribute.KeyValue { + return TLSServerJa3sKey.String(val) +} + +// TLSServerNotAfter returns an attribute KeyValue conforming to the +// "tls.server.not_after" semantic conventions. It represents the date/Time +// indicating when server certificate is no longer considered valid. +func TLSServerNotAfter(val string) attribute.KeyValue { + return TLSServerNotAfterKey.String(val) +} + +// TLSServerNotBefore returns an attribute KeyValue conforming to the +// "tls.server.not_before" semantic conventions. It represents the date/Time +// indicating when server certificate is first considered valid. +func TLSServerNotBefore(val string) attribute.KeyValue { + return TLSServerNotBeforeKey.String(val) +} + +// TLSServerSubject returns an attribute KeyValue conforming to the +// "tls.server.subject" semantic conventions. It represents the distinguished +// name of subject of the x.509 certificate presented by the server. +func TLSServerSubject(val string) attribute.KeyValue { + return TLSServerSubjectKey.String(val) +} + +// Enum values for tls.protocol.name +var ( + // ssl + // Stability: development + TLSProtocolNameSsl = TLSProtocolNameKey.String("ssl") + // tls + // Stability: development + TLSProtocolNameTLS = TLSProtocolNameKey.String("tls") +) + +// Namespace: url +const ( + // URLDomainKey is the attribute Key conforming to the "url.domain" semantic + // conventions. It represents the domain extracted from the `url.full`, such as + // "opentelemetry.io". + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "www.foo.bar", "opentelemetry.io", "3.12.167.2", + // "[1080:0:0:0:8:800:200C:417A]" + // Note: In some cases a URL may refer to an IP and/or port directly, without a + // domain name. In this case, the IP address would go to the domain field. If + // the URL contains a [literal IPv6 address] enclosed by `[` and `]`, the `[` + // and `]` characters should also be captured in the domain field. + // + // [literal IPv6 address]: https://www.rfc-editor.org/rfc/rfc2732#section-2 + URLDomainKey = attribute.Key("url.domain") + + // URLExtensionKey is the attribute Key conforming to the "url.extension" + // semantic conventions. It represents the file extension extracted from the + // `url.full`, excluding the leading dot. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "png", "gz" + // Note: The file extension is only set if it exists, as not every url has a + // file extension. When the file name has multiple extensions `example.tar.gz`, + // only the last one should be captured `gz`, not `tar.gz`. + URLExtensionKey = attribute.Key("url.extension") + + // URLFragmentKey is the attribute Key conforming to the "url.fragment" semantic + // conventions. It represents the [URI fragment] component. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "SemConv" + // + // [URI fragment]: https://www.rfc-editor.org/rfc/rfc3986#section-3.5 + URLFragmentKey = attribute.Key("url.fragment") + + // URLFullKey is the attribute Key conforming to the "url.full" semantic + // conventions. It represents the absolute URL describing a network resource + // according to [RFC3986]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "https://www.foo.bar/search?q=OpenTelemetry#SemConv", "//localhost" + // Note: For network calls, URL usually has + // `scheme://host[:port][path][?query][#fragment]` format, where the fragment + // is not transmitted over HTTP, but if it is known, it SHOULD be included + // nevertheless. + // + // `url.full` MUST NOT contain credentials passed via URL in form of + // `https://username:password@www.example.com/`. + // In such case username and password SHOULD be redacted and attribute's value + // SHOULD be `https://REDACTED:REDACTED@www.example.com/`. + // + // `url.full` SHOULD capture the absolute URL when it is available (or can be + // reconstructed). + // + // Sensitive content provided in `url.full` SHOULD be scrubbed when + // instrumentations can identify it. + // + // + // Query string values for the following keys SHOULD be redacted by default and + // replaced by the + // value `REDACTED`: + // + // - [`AWSAccessKeyId`] + // - [`Signature`] + // - [`sig`] + // - [`X-Goog-Signature`] + // + // This list is subject to change over time. + // + // When a query string value is redacted, the query string key SHOULD still be + // preserved, e.g. + // `https://www.example.com/path?color=blue&sig=REDACTED`. + // + // [RFC3986]: https://www.rfc-editor.org/rfc/rfc3986 + // [`AWSAccessKeyId`]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RESTAuthentication.html#RESTAuthenticationQueryStringAuth + // [`Signature`]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RESTAuthentication.html#RESTAuthenticationQueryStringAuth + // [`sig`]: https://learn.microsoft.com/azure/storage/common/storage-sas-overview#sas-token + // [`X-Goog-Signature`]: https://cloud.google.com/storage/docs/access-control/signed-urls + URLFullKey = attribute.Key("url.full") + + // URLOriginalKey is the attribute Key conforming to the "url.original" semantic + // conventions. It represents the unmodified original URL as seen in the event + // source. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "https://www.foo.bar/search?q=OpenTelemetry#SemConv", + // "search?q=OpenTelemetry" + // Note: In network monitoring, the observed URL may be a full URL, whereas in + // access logs, the URL is often just represented as a path. This field is meant + // to represent the URL as it was observed, complete or not. + // `url.original` might contain credentials passed via URL in form of + // `https://username:password@www.example.com/`. In such case password and + // username SHOULD NOT be redacted and attribute's value SHOULD remain the same. + URLOriginalKey = attribute.Key("url.original") + + // URLPathKey is the attribute Key conforming to the "url.path" semantic + // conventions. It represents the [URI path] component. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "/search" + // Note: Sensitive content provided in `url.path` SHOULD be scrubbed when + // instrumentations can identify it. + // + // [URI path]: https://www.rfc-editor.org/rfc/rfc3986#section-3.3 + URLPathKey = attribute.Key("url.path") + + // URLPortKey is the attribute Key conforming to the "url.port" semantic + // conventions. It represents the port extracted from the `url.full`. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 443 + URLPortKey = attribute.Key("url.port") + + // URLQueryKey is the attribute Key conforming to the "url.query" semantic + // conventions. It represents the [URI query] component. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "q=OpenTelemetry" + // Note: Sensitive content provided in `url.query` SHOULD be scrubbed when + // instrumentations can identify it. + // + // + // Query string values for the following keys SHOULD be redacted by default and + // replaced by the value `REDACTED`: + // + // - [`AWSAccessKeyId`] + // - [`Signature`] + // - [`sig`] + // - [`X-Goog-Signature`] + // + // This list is subject to change over time. + // + // When a query string value is redacted, the query string key SHOULD still be + // preserved, e.g. + // `q=OpenTelemetry&sig=REDACTED`. + // + // [URI query]: https://www.rfc-editor.org/rfc/rfc3986#section-3.4 + // [`AWSAccessKeyId`]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RESTAuthentication.html#RESTAuthenticationQueryStringAuth + // [`Signature`]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RESTAuthentication.html#RESTAuthenticationQueryStringAuth + // [`sig`]: https://learn.microsoft.com/azure/storage/common/storage-sas-overview#sas-token + // [`X-Goog-Signature`]: https://cloud.google.com/storage/docs/access-control/signed-urls + URLQueryKey = attribute.Key("url.query") + + // URLRegisteredDomainKey is the attribute Key conforming to the + // "url.registered_domain" semantic conventions. It represents the highest + // registered url domain, stripped of the subdomain. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "example.com", "foo.co.uk" + // Note: This value can be determined precisely with the [public suffix list]. + // For example, the registered domain for `foo.example.com` is `example.com`. + // Trying to approximate this by simply taking the last two labels will not work + // well for TLDs such as `co.uk`. + // + // [public suffix list]: https://publicsuffix.org/ + URLRegisteredDomainKey = attribute.Key("url.registered_domain") + + // URLSchemeKey is the attribute Key conforming to the "url.scheme" semantic + // conventions. It represents the [URI scheme] component identifying the used + // protocol. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "https", "ftp", "telnet" + // + // [URI scheme]: https://www.rfc-editor.org/rfc/rfc3986#section-3.1 + URLSchemeKey = attribute.Key("url.scheme") + + // URLSubdomainKey is the attribute Key conforming to the "url.subdomain" + // semantic conventions. It represents the subdomain portion of a fully + // qualified domain name includes all of the names except the host name under + // the registered_domain. In a partially qualified domain, or if the + // qualification level of the full name cannot be determined, subdomain contains + // all of the names below the registered domain. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "east", "sub2.sub1" + // Note: The subdomain portion of `www.east.mydomain.co.uk` is `east`. If the + // domain has multiple levels of subdomain, such as `sub2.sub1.example.com`, the + // subdomain field should contain `sub2.sub1`, with no trailing period. + URLSubdomainKey = attribute.Key("url.subdomain") + + // URLTemplateKey is the attribute Key conforming to the "url.template" semantic + // conventions. It represents the low-cardinality template of an + // [absolute path reference]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/users/{id}", "/users/:id", "/users?id={id}" + // + // [absolute path reference]: https://www.rfc-editor.org/rfc/rfc3986#section-4.2 + URLTemplateKey = attribute.Key("url.template") + + // URLTopLevelDomainKey is the attribute Key conforming to the + // "url.top_level_domain" semantic conventions. It represents the effective top + // level domain (eTLD), also known as the domain suffix, is the last part of the + // domain name. For example, the top level domain for example.com is `com`. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "com", "co.uk" + // Note: This value can be determined precisely with the [public suffix list]. + // + // [public suffix list]: https://publicsuffix.org/ + URLTopLevelDomainKey = attribute.Key("url.top_level_domain") +) + +// URLDomain returns an attribute KeyValue conforming to the "url.domain" +// semantic conventions. It represents the domain extracted from the `url.full`, +// such as "opentelemetry.io". +func URLDomain(val string) attribute.KeyValue { + return URLDomainKey.String(val) +} + +// URLExtension returns an attribute KeyValue conforming to the "url.extension" +// semantic conventions. It represents the file extension extracted from the +// `url.full`, excluding the leading dot. +func URLExtension(val string) attribute.KeyValue { + return URLExtensionKey.String(val) +} + +// URLFragment returns an attribute KeyValue conforming to the "url.fragment" +// semantic conventions. It represents the [URI fragment] component. +// +// [URI fragment]: https://www.rfc-editor.org/rfc/rfc3986#section-3.5 +func URLFragment(val string) attribute.KeyValue { + return URLFragmentKey.String(val) +} + +// URLFull returns an attribute KeyValue conforming to the "url.full" semantic +// conventions. It represents the absolute URL describing a network resource +// according to [RFC3986]. +// +// [RFC3986]: https://www.rfc-editor.org/rfc/rfc3986 +func URLFull(val string) attribute.KeyValue { + return URLFullKey.String(val) +} + +// URLOriginal returns an attribute KeyValue conforming to the "url.original" +// semantic conventions. It represents the unmodified original URL as seen in the +// event source. +func URLOriginal(val string) attribute.KeyValue { + return URLOriginalKey.String(val) +} + +// URLPath returns an attribute KeyValue conforming to the "url.path" semantic +// conventions. It represents the [URI path] component. +// +// [URI path]: https://www.rfc-editor.org/rfc/rfc3986#section-3.3 +func URLPath(val string) attribute.KeyValue { + return URLPathKey.String(val) +} + +// URLPort returns an attribute KeyValue conforming to the "url.port" semantic +// conventions. It represents the port extracted from the `url.full`. +func URLPort(val int) attribute.KeyValue { + return URLPortKey.Int(val) +} + +// URLQuery returns an attribute KeyValue conforming to the "url.query" semantic +// conventions. It represents the [URI query] component. +// +// [URI query]: https://www.rfc-editor.org/rfc/rfc3986#section-3.4 +func URLQuery(val string) attribute.KeyValue { + return URLQueryKey.String(val) +} + +// URLRegisteredDomain returns an attribute KeyValue conforming to the +// "url.registered_domain" semantic conventions. It represents the highest +// registered url domain, stripped of the subdomain. +func URLRegisteredDomain(val string) attribute.KeyValue { + return URLRegisteredDomainKey.String(val) +} + +// URLScheme returns an attribute KeyValue conforming to the "url.scheme" +// semantic conventions. It represents the [URI scheme] component identifying the +// used protocol. +// +// [URI scheme]: https://www.rfc-editor.org/rfc/rfc3986#section-3.1 +func URLScheme(val string) attribute.KeyValue { + return URLSchemeKey.String(val) +} + +// URLSubdomain returns an attribute KeyValue conforming to the "url.subdomain" +// semantic conventions. It represents the subdomain portion of a fully qualified +// domain name includes all of the names except the host name under the +// registered_domain. In a partially qualified domain, or if the qualification +// level of the full name cannot be determined, subdomain contains all of the +// names below the registered domain. +func URLSubdomain(val string) attribute.KeyValue { + return URLSubdomainKey.String(val) +} + +// URLTemplate returns an attribute KeyValue conforming to the "url.template" +// semantic conventions. It represents the low-cardinality template of an +// [absolute path reference]. +// +// [absolute path reference]: https://www.rfc-editor.org/rfc/rfc3986#section-4.2 +func URLTemplate(val string) attribute.KeyValue { + return URLTemplateKey.String(val) +} + +// URLTopLevelDomain returns an attribute KeyValue conforming to the +// "url.top_level_domain" semantic conventions. It represents the effective top +// level domain (eTLD), also known as the domain suffix, is the last part of the +// domain name. For example, the top level domain for example.com is `com`. +func URLTopLevelDomain(val string) attribute.KeyValue { + return URLTopLevelDomainKey.String(val) +} + +// Namespace: user +const ( + // UserEmailKey is the attribute Key conforming to the "user.email" semantic + // conventions. It represents the user email address. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "a.einstein@example.com" + UserEmailKey = attribute.Key("user.email") + + // UserFullNameKey is the attribute Key conforming to the "user.full_name" + // semantic conventions. It represents the user's full name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Albert Einstein" + UserFullNameKey = attribute.Key("user.full_name") + + // UserHashKey is the attribute Key conforming to the "user.hash" semantic + // conventions. It represents the unique user hash to correlate information for + // a user in anonymized form. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "364fc68eaf4c8acec74a4e52d7d1feaa" + // Note: Useful if `user.id` or `user.name` contain confidential information and + // cannot be used. + UserHashKey = attribute.Key("user.hash") + + // UserIDKey is the attribute Key conforming to the "user.id" semantic + // conventions. It represents the unique identifier of the user. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "S-1-5-21-202424912787-2692429404-2351956786-1000" + UserIDKey = attribute.Key("user.id") + + // UserNameKey is the attribute Key conforming to the "user.name" semantic + // conventions. It represents the short name or login/username of the user. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "a.einstein" + UserNameKey = attribute.Key("user.name") + + // UserRolesKey is the attribute Key conforming to the "user.roles" semantic + // conventions. It represents the array of user roles at the time of the event. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "admin", "reporting_user" + UserRolesKey = attribute.Key("user.roles") +) + +// UserEmail returns an attribute KeyValue conforming to the "user.email" +// semantic conventions. It represents the user email address. +func UserEmail(val string) attribute.KeyValue { + return UserEmailKey.String(val) +} + +// UserFullName returns an attribute KeyValue conforming to the "user.full_name" +// semantic conventions. It represents the user's full name. +func UserFullName(val string) attribute.KeyValue { + return UserFullNameKey.String(val) +} + +// UserHash returns an attribute KeyValue conforming to the "user.hash" semantic +// conventions. It represents the unique user hash to correlate information for a +// user in anonymized form. +func UserHash(val string) attribute.KeyValue { + return UserHashKey.String(val) +} + +// UserID returns an attribute KeyValue conforming to the "user.id" semantic +// conventions. It represents the unique identifier of the user. +func UserID(val string) attribute.KeyValue { + return UserIDKey.String(val) +} + +// UserName returns an attribute KeyValue conforming to the "user.name" semantic +// conventions. It represents the short name or login/username of the user. +func UserName(val string) attribute.KeyValue { + return UserNameKey.String(val) +} + +// UserRoles returns an attribute KeyValue conforming to the "user.roles" +// semantic conventions. It represents the array of user roles at the time of the +// event. +func UserRoles(val ...string) attribute.KeyValue { + return UserRolesKey.StringSlice(val) +} + +// Namespace: user_agent +const ( + // UserAgentNameKey is the attribute Key conforming to the "user_agent.name" + // semantic conventions. It represents the name of the user-agent extracted from + // original. Usually refers to the browser's name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Safari", "YourApp" + // Note: [Example] of extracting browser's name from original string. In the + // case of using a user-agent for non-browser products, such as microservices + // with multiple names/versions inside the `user_agent.original`, the most + // significant name SHOULD be selected. In such a scenario it should align with + // `user_agent.version` + // + // [Example]: https://www.whatsmyua.info + UserAgentNameKey = attribute.Key("user_agent.name") + + // UserAgentOriginalKey is the attribute Key conforming to the + // "user_agent.original" semantic conventions. It represents the value of the + // [HTTP User-Agent] header sent by the client. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "CERN-LineMode/2.15 libwww/2.17b3", "Mozilla/5.0 (iPhone; CPU + // iPhone OS 14_7_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) + // Version/14.1.2 Mobile/15E148 Safari/604.1", "YourApp/1.0.0 + // grpc-java-okhttp/1.27.2" + // + // [HTTP User-Agent]: https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent + UserAgentOriginalKey = attribute.Key("user_agent.original") + + // UserAgentOSNameKey is the attribute Key conforming to the + // "user_agent.os.name" semantic conventions. It represents the human readable + // operating system name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "iOS", "Android", "Ubuntu" + // Note: For mapping user agent strings to OS names, libraries such as + // [ua-parser] can be utilized. + // + // [ua-parser]: https://github.com/ua-parser + UserAgentOSNameKey = attribute.Key("user_agent.os.name") + + // UserAgentOSVersionKey is the attribute Key conforming to the + // "user_agent.os.version" semantic conventions. It represents the version + // string of the operating system as defined in [Version Attributes]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "14.2.1", "18.04.1" + // Note: For mapping user agent strings to OS versions, libraries such as + // [ua-parser] can be utilized. + // + // [Version Attributes]: /docs/resource/README.md#version-attributes + // [ua-parser]: https://github.com/ua-parser + UserAgentOSVersionKey = attribute.Key("user_agent.os.version") + + // UserAgentSyntheticTypeKey is the attribute Key conforming to the + // "user_agent.synthetic.type" semantic conventions. It represents the specifies + // the category of synthetic traffic, such as tests or bots. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: This attribute MAY be derived from the contents of the + // `user_agent.original` attribute. Components that populate the attribute are + // responsible for determining what they consider to be synthetic bot or test + // traffic. This attribute can either be set for self-identification purposes, + // or on telemetry detected to be generated as a result of a synthetic request. + // This attribute is useful for distinguishing between genuine client traffic + // and synthetic traffic generated by bots or tests. + UserAgentSyntheticTypeKey = attribute.Key("user_agent.synthetic.type") + + // UserAgentVersionKey is the attribute Key conforming to the + // "user_agent.version" semantic conventions. It represents the version of the + // user-agent extracted from original. Usually refers to the browser's version. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "14.1.2", "1.0.0" + // Note: [Example] of extracting browser's version from original string. In the + // case of using a user-agent for non-browser products, such as microservices + // with multiple names/versions inside the `user_agent.original`, the most + // significant version SHOULD be selected. In such a scenario it should align + // with `user_agent.name` + // + // [Example]: https://www.whatsmyua.info + UserAgentVersionKey = attribute.Key("user_agent.version") +) + +// UserAgentName returns an attribute KeyValue conforming to the +// "user_agent.name" semantic conventions. It represents the name of the +// user-agent extracted from original. Usually refers to the browser's name. +func UserAgentName(val string) attribute.KeyValue { + return UserAgentNameKey.String(val) +} + +// UserAgentOriginal returns an attribute KeyValue conforming to the +// "user_agent.original" semantic conventions. It represents the value of the +// [HTTP User-Agent] header sent by the client. +// +// [HTTP User-Agent]: https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent +func UserAgentOriginal(val string) attribute.KeyValue { + return UserAgentOriginalKey.String(val) +} + +// UserAgentOSName returns an attribute KeyValue conforming to the +// "user_agent.os.name" semantic conventions. It represents the human readable +// operating system name. +func UserAgentOSName(val string) attribute.KeyValue { + return UserAgentOSNameKey.String(val) +} + +// UserAgentOSVersion returns an attribute KeyValue conforming to the +// "user_agent.os.version" semantic conventions. It represents the version string +// of the operating system as defined in [Version Attributes]. +// +// [Version Attributes]: /docs/resource/README.md#version-attributes +func UserAgentOSVersion(val string) attribute.KeyValue { + return UserAgentOSVersionKey.String(val) +} + +// UserAgentVersion returns an attribute KeyValue conforming to the +// "user_agent.version" semantic conventions. It represents the version of the +// user-agent extracted from original. Usually refers to the browser's version. +func UserAgentVersion(val string) attribute.KeyValue { + return UserAgentVersionKey.String(val) +} + +// Enum values for user_agent.synthetic.type +var ( + // Bot source. + // Stability: development + UserAgentSyntheticTypeBot = UserAgentSyntheticTypeKey.String("bot") + // Synthetic test source. + // Stability: development + UserAgentSyntheticTypeTest = UserAgentSyntheticTypeKey.String("test") +) + +// Namespace: vcs +const ( + // VCSChangeIDKey is the attribute Key conforming to the "vcs.change.id" + // semantic conventions. It represents the ID of the change (pull request/merge + // request/changelist) if applicable. This is usually a unique (within + // repository) identifier generated by the VCS system. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "123" + VCSChangeIDKey = attribute.Key("vcs.change.id") + + // VCSChangeStateKey is the attribute Key conforming to the "vcs.change.state" + // semantic conventions. It represents the state of the change (pull + // request/merge request/changelist). + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "open", "closed", "merged" + VCSChangeStateKey = attribute.Key("vcs.change.state") + + // VCSChangeTitleKey is the attribute Key conforming to the "vcs.change.title" + // semantic conventions. It represents the human readable title of the change + // (pull request/merge request/changelist). This title is often a brief summary + // of the change and may get merged in to a ref as the commit summary. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Fixes broken thing", "feat: add my new feature", "[chore] update + // dependency" + VCSChangeTitleKey = attribute.Key("vcs.change.title") + + // VCSLineChangeTypeKey is the attribute Key conforming to the + // "vcs.line_change.type" semantic conventions. It represents the type of line + // change being measured on a branch or change. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "added", "removed" + VCSLineChangeTypeKey = attribute.Key("vcs.line_change.type") + + // VCSOwnerNameKey is the attribute Key conforming to the "vcs.owner.name" + // semantic conventions. It represents the group owner within the version + // control system. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-org", "myteam", "business-unit" + VCSOwnerNameKey = attribute.Key("vcs.owner.name") + + // VCSProviderNameKey is the attribute Key conforming to the "vcs.provider.name" + // semantic conventions. It represents the name of the version control system + // provider. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "github", "gitlab", "gitea", "bitbucket" + VCSProviderNameKey = attribute.Key("vcs.provider.name") + + // VCSRefBaseNameKey is the attribute Key conforming to the "vcs.ref.base.name" + // semantic conventions. It represents the name of the [reference] such as + // **branch** or **tag** in the repository. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-feature-branch", "tag-1-test" + // Note: `base` refers to the starting point of a change. For example, `main` + // would be the base reference of type branch if you've created a new + // reference of type branch from it and created new commits. + // + // [reference]: https://git-scm.com/docs/gitglossary#def_ref + VCSRefBaseNameKey = attribute.Key("vcs.ref.base.name") + + // VCSRefBaseRevisionKey is the attribute Key conforming to the + // "vcs.ref.base.revision" semantic conventions. It represents the revision, + // literally [revised version], The revision most often refers to a commit + // object in Git, or a revision number in SVN. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "9d59409acf479dfa0df1aa568182e43e43df8bbe28d60fcf2bc52e30068802cc", + // "main", "123", "HEAD" + // Note: `base` refers to the starting point of a change. For example, `main` + // would be the base reference of type branch if you've created a new + // reference of type branch from it and created new commits. The + // revision can be a full [hash value (see + // glossary)], + // of the recorded change to a ref within a repository pointing to a + // commit [commit] object. It does + // not necessarily have to be a hash; it can simply define a [revision + // number] + // which is an integer that is monotonically increasing. In cases where + // it is identical to the `ref.base.name`, it SHOULD still be included. + // It is up to the implementer to decide which value to set as the + // revision based on the VCS system and situational context. + // + // [revised version]: https://www.merriam-webster.com/dictionary/revision + // [hash value (see + // glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf + // [commit]: https://git-scm.com/docs/git-commit + // [revision + // number]: https://svnbook.red-bean.com/en/1.7/svn.tour.revs.specifiers.html + VCSRefBaseRevisionKey = attribute.Key("vcs.ref.base.revision") + + // VCSRefBaseTypeKey is the attribute Key conforming to the "vcs.ref.base.type" + // semantic conventions. It represents the type of the [reference] in the + // repository. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "branch", "tag" + // Note: `base` refers to the starting point of a change. For example, `main` + // would be the base reference of type branch if you've created a new + // reference of type branch from it and created new commits. + // + // [reference]: https://git-scm.com/docs/gitglossary#def_ref + VCSRefBaseTypeKey = attribute.Key("vcs.ref.base.type") + + // VCSRefHeadNameKey is the attribute Key conforming to the "vcs.ref.head.name" + // semantic conventions. It represents the name of the [reference] such as + // **branch** or **tag** in the repository. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-feature-branch", "tag-1-test" + // Note: `head` refers to where you are right now; the current reference at a + // given time. + // + // [reference]: https://git-scm.com/docs/gitglossary#def_ref + VCSRefHeadNameKey = attribute.Key("vcs.ref.head.name") + + // VCSRefHeadRevisionKey is the attribute Key conforming to the + // "vcs.ref.head.revision" semantic conventions. It represents the revision, + // literally [revised version], The revision most often refers to a commit + // object in Git, or a revision number in SVN. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "9d59409acf479dfa0df1aa568182e43e43df8bbe28d60fcf2bc52e30068802cc", + // "main", "123", "HEAD" + // Note: `head` refers to where you are right now; the current reference at a + // given time.The revision can be a full [hash value (see + // glossary)], + // of the recorded change to a ref within a repository pointing to a + // commit [commit] object. It does + // not necessarily have to be a hash; it can simply define a [revision + // number] + // which is an integer that is monotonically increasing. In cases where + // it is identical to the `ref.head.name`, it SHOULD still be included. + // It is up to the implementer to decide which value to set as the + // revision based on the VCS system and situational context. + // + // [revised version]: https://www.merriam-webster.com/dictionary/revision + // [hash value (see + // glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf + // [commit]: https://git-scm.com/docs/git-commit + // [revision + // number]: https://svnbook.red-bean.com/en/1.7/svn.tour.revs.specifiers.html + VCSRefHeadRevisionKey = attribute.Key("vcs.ref.head.revision") + + // VCSRefHeadTypeKey is the attribute Key conforming to the "vcs.ref.head.type" + // semantic conventions. It represents the type of the [reference] in the + // repository. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "branch", "tag" + // Note: `head` refers to where you are right now; the current reference at a + // given time. + // + // [reference]: https://git-scm.com/docs/gitglossary#def_ref + VCSRefHeadTypeKey = attribute.Key("vcs.ref.head.type") + + // VCSRefTypeKey is the attribute Key conforming to the "vcs.ref.type" semantic + // conventions. It represents the type of the [reference] in the repository. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "branch", "tag" + // + // [reference]: https://git-scm.com/docs/gitglossary#def_ref + VCSRefTypeKey = attribute.Key("vcs.ref.type") + + // VCSRepositoryNameKey is the attribute Key conforming to the + // "vcs.repository.name" semantic conventions. It represents the human readable + // name of the repository. It SHOULD NOT include any additional identifier like + // Group/SubGroup in GitLab or organization in GitHub. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "semantic-conventions", "my-cool-repo" + // Note: Due to it only being the name, it can clash with forks of the same + // repository if collecting telemetry across multiple orgs or groups in + // the same backends. + VCSRepositoryNameKey = attribute.Key("vcs.repository.name") + + // VCSRepositoryURLFullKey is the attribute Key conforming to the + // "vcs.repository.url.full" semantic conventions. It represents the + // [canonical URL] of the repository providing the complete HTTP(S) address in + // order to locate and identify the repository through a browser. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "https://github.com/opentelemetry/open-telemetry-collector-contrib", + // "https://gitlab.com/my-org/my-project/my-projects-project/repo" + // Note: In Git Version Control Systems, the canonical URL SHOULD NOT include + // the `.git` extension. + // + // [canonical URL]: https://support.google.com/webmasters/answer/10347851?hl=en#:~:text=A%20canonical%20URL%20is%20the,Google%20chooses%20one%20as%20canonical. + VCSRepositoryURLFullKey = attribute.Key("vcs.repository.url.full") + + // VCSRevisionDeltaDirectionKey is the attribute Key conforming to the + // "vcs.revision_delta.direction" semantic conventions. It represents the type + // of revision comparison. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "ahead", "behind" + VCSRevisionDeltaDirectionKey = attribute.Key("vcs.revision_delta.direction") +) + +// VCSChangeID returns an attribute KeyValue conforming to the "vcs.change.id" +// semantic conventions. It represents the ID of the change (pull request/merge +// request/changelist) if applicable. This is usually a unique (within +// repository) identifier generated by the VCS system. +func VCSChangeID(val string) attribute.KeyValue { + return VCSChangeIDKey.String(val) +} + +// VCSChangeTitle returns an attribute KeyValue conforming to the +// "vcs.change.title" semantic conventions. It represents the human readable +// title of the change (pull request/merge request/changelist). This title is +// often a brief summary of the change and may get merged in to a ref as the +// commit summary. +func VCSChangeTitle(val string) attribute.KeyValue { + return VCSChangeTitleKey.String(val) +} + +// VCSOwnerName returns an attribute KeyValue conforming to the "vcs.owner.name" +// semantic conventions. It represents the group owner within the version control +// system. +func VCSOwnerName(val string) attribute.KeyValue { + return VCSOwnerNameKey.String(val) +} + +// VCSRefBaseName returns an attribute KeyValue conforming to the +// "vcs.ref.base.name" semantic conventions. It represents the name of the +// [reference] such as **branch** or **tag** in the repository. +// +// [reference]: https://git-scm.com/docs/gitglossary#def_ref +func VCSRefBaseName(val string) attribute.KeyValue { + return VCSRefBaseNameKey.String(val) +} + +// VCSRefBaseRevision returns an attribute KeyValue conforming to the +// "vcs.ref.base.revision" semantic conventions. It represents the revision, +// literally [revised version], The revision most often refers to a commit object +// in Git, or a revision number in SVN. +// +// [revised version]: https://www.merriam-webster.com/dictionary/revision +func VCSRefBaseRevision(val string) attribute.KeyValue { + return VCSRefBaseRevisionKey.String(val) +} + +// VCSRefHeadName returns an attribute KeyValue conforming to the +// "vcs.ref.head.name" semantic conventions. It represents the name of the +// [reference] such as **branch** or **tag** in the repository. +// +// [reference]: https://git-scm.com/docs/gitglossary#def_ref +func VCSRefHeadName(val string) attribute.KeyValue { + return VCSRefHeadNameKey.String(val) +} + +// VCSRefHeadRevision returns an attribute KeyValue conforming to the +// "vcs.ref.head.revision" semantic conventions. It represents the revision, +// literally [revised version], The revision most often refers to a commit object +// in Git, or a revision number in SVN. +// +// [revised version]: https://www.merriam-webster.com/dictionary/revision +func VCSRefHeadRevision(val string) attribute.KeyValue { + return VCSRefHeadRevisionKey.String(val) +} + +// VCSRepositoryName returns an attribute KeyValue conforming to the +// "vcs.repository.name" semantic conventions. It represents the human readable +// name of the repository. It SHOULD NOT include any additional identifier like +// Group/SubGroup in GitLab or organization in GitHub. +func VCSRepositoryName(val string) attribute.KeyValue { + return VCSRepositoryNameKey.String(val) +} + +// VCSRepositoryURLFull returns an attribute KeyValue conforming to the +// "vcs.repository.url.full" semantic conventions. It represents the +// [canonical URL] of the repository providing the complete HTTP(S) address in +// order to locate and identify the repository through a browser. +// +// [canonical URL]: https://support.google.com/webmasters/answer/10347851?hl=en#:~:text=A%20canonical%20URL%20is%20the,Google%20chooses%20one%20as%20canonical. +func VCSRepositoryURLFull(val string) attribute.KeyValue { + return VCSRepositoryURLFullKey.String(val) +} + +// Enum values for vcs.change.state +var ( + // Open means the change is currently active and under review. It hasn't been + // merged into the target branch yet, and it's still possible to make changes or + // add comments. + // Stability: development + VCSChangeStateOpen = VCSChangeStateKey.String("open") + // WIP (work-in-progress, draft) means the change is still in progress and not + // yet ready for a full review. It might still undergo significant changes. + // Stability: development + VCSChangeStateWip = VCSChangeStateKey.String("wip") + // Closed means the merge request has been closed without merging. This can + // happen for various reasons, such as the changes being deemed unnecessary, the + // issue being resolved in another way, or the author deciding to withdraw the + // request. + // Stability: development + VCSChangeStateClosed = VCSChangeStateKey.String("closed") + // Merged indicates that the change has been successfully integrated into the + // target codebase. + // Stability: development + VCSChangeStateMerged = VCSChangeStateKey.String("merged") +) + +// Enum values for vcs.line_change.type +var ( + // How many lines were added. + // Stability: development + VCSLineChangeTypeAdded = VCSLineChangeTypeKey.String("added") + // How many lines were removed. + // Stability: development + VCSLineChangeTypeRemoved = VCSLineChangeTypeKey.String("removed") +) + +// Enum values for vcs.provider.name +var ( + // [GitHub] + // Stability: development + // + // [GitHub]: https://github.com + VCSProviderNameGithub = VCSProviderNameKey.String("github") + // [GitLab] + // Stability: development + // + // [GitLab]: https://gitlab.com + VCSProviderNameGitlab = VCSProviderNameKey.String("gitlab") + // [Gitea] + // Stability: development + // + // [Gitea]: https://gitea.io + VCSProviderNameGitea = VCSProviderNameKey.String("gitea") + // [Bitbucket] + // Stability: development + // + // [Bitbucket]: https://bitbucket.org + VCSProviderNameBitbucket = VCSProviderNameKey.String("bitbucket") +) + +// Enum values for vcs.ref.base.type +var ( + // [branch] + // Stability: development + // + // [branch]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddefbranchabranch + VCSRefBaseTypeBranch = VCSRefBaseTypeKey.String("branch") + // [tag] + // Stability: development + // + // [tag]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddeftagatag + VCSRefBaseTypeTag = VCSRefBaseTypeKey.String("tag") +) + +// Enum values for vcs.ref.head.type +var ( + // [branch] + // Stability: development + // + // [branch]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddefbranchabranch + VCSRefHeadTypeBranch = VCSRefHeadTypeKey.String("branch") + // [tag] + // Stability: development + // + // [tag]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddeftagatag + VCSRefHeadTypeTag = VCSRefHeadTypeKey.String("tag") +) + +// Enum values for vcs.ref.type +var ( + // [branch] + // Stability: development + // + // [branch]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddefbranchabranch + VCSRefTypeBranch = VCSRefTypeKey.String("branch") + // [tag] + // Stability: development + // + // [tag]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddeftagatag + VCSRefTypeTag = VCSRefTypeKey.String("tag") +) + +// Enum values for vcs.revision_delta.direction +var ( + // How many revisions the change is behind the target ref. + // Stability: development + VCSRevisionDeltaDirectionBehind = VCSRevisionDeltaDirectionKey.String("behind") + // How many revisions the change is ahead of the target ref. + // Stability: development + VCSRevisionDeltaDirectionAhead = VCSRevisionDeltaDirectionKey.String("ahead") +) + +// Namespace: webengine +const ( + // WebEngineDescriptionKey is the attribute Key conforming to the + // "webengine.description" semantic conventions. It represents the additional + // description of the web engine (e.g. detailed version and edition + // information). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - + // 2.2.2.Final" + WebEngineDescriptionKey = attribute.Key("webengine.description") + + // WebEngineNameKey is the attribute Key conforming to the "webengine.name" + // semantic conventions. It represents the name of the web engine. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "WildFly" + WebEngineNameKey = attribute.Key("webengine.name") + + // WebEngineVersionKey is the attribute Key conforming to the + // "webengine.version" semantic conventions. It represents the version of the + // web engine. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "21.0.0" + WebEngineVersionKey = attribute.Key("webengine.version") +) + +// WebEngineDescription returns an attribute KeyValue conforming to the +// "webengine.description" semantic conventions. It represents the additional +// description of the web engine (e.g. detailed version and edition information). +func WebEngineDescription(val string) attribute.KeyValue { + return WebEngineDescriptionKey.String(val) +} + +// WebEngineName returns an attribute KeyValue conforming to the "webengine.name" +// semantic conventions. It represents the name of the web engine. +func WebEngineName(val string) attribute.KeyValue { + return WebEngineNameKey.String(val) +} + +// WebEngineVersion returns an attribute KeyValue conforming to the +// "webengine.version" semantic conventions. It represents the version of the web +// engine. +func WebEngineVersion(val string) attribute.KeyValue { + return WebEngineVersionKey.String(val) +} + +// Namespace: zos +const ( + // ZOSSmfIDKey is the attribute Key conforming to the "zos.smf.id" semantic + // conventions. It represents the System Management Facility (SMF) Identifier + // uniquely identified a z/OS system within a SYSPLEX or mainframe environment + // and is used for system and performance analysis. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "SYS1" + ZOSSmfIDKey = attribute.Key("zos.smf.id") + + // ZOSSysplexNameKey is the attribute Key conforming to the "zos.sysplex.name" + // semantic conventions. It represents the name of the SYSPLEX to which the z/OS + // system belongs too. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "SYSPLEX1" + ZOSSysplexNameKey = attribute.Key("zos.sysplex.name") +) + +// ZOSSmfID returns an attribute KeyValue conforming to the "zos.smf.id" semantic +// conventions. It represents the System Management Facility (SMF) Identifier +// uniquely identified a z/OS system within a SYSPLEX or mainframe environment +// and is used for system and performance analysis. +func ZOSSmfID(val string) attribute.KeyValue { + return ZOSSmfIDKey.String(val) +} + +// ZOSSysplexName returns an attribute KeyValue conforming to the +// "zos.sysplex.name" semantic conventions. It represents the name of the SYSPLEX +// to which the z/OS system belongs too. +func ZOSSysplexName(val string) attribute.KeyValue { + return ZOSSysplexNameKey.String(val) +} \ No newline at end of file diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/doc.go similarity index 80% rename from vendor/go.opentelemetry.io/otel/semconv/v1.26.0/doc.go rename to vendor/go.opentelemetry.io/otel/semconv/v1.37.0/doc.go index d031bbea..11101032 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/doc.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/doc.go @@ -4,6 +4,6 @@ // Package semconv implements OpenTelemetry semantic conventions. // // OpenTelemetry semantic conventions are agreed standardized naming -// patterns for OpenTelemetry things. This package represents the v1.26.0 +// patterns for OpenTelemetry things. This package represents the v1.37.0 // version of the OpenTelemetry semantic conventions. -package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0" +package semconv // import "go.opentelemetry.io/otel/semconv/v1.37.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/error_type.go b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/error_type.go new file mode 100644 index 00000000..267979c0 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/error_type.go @@ -0,0 +1,56 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.37.0" + +import ( + "reflect" + + "go.opentelemetry.io/otel/attribute" +) + +// ErrorType returns an [attribute.KeyValue] identifying the error type of err. +// +// If err is nil, the returned attribute has the default value +// [ErrorTypeOther]. +// +// If err's type has the method +// +// ErrorType() string +// +// then the returned attribute has the value of err.ErrorType(). Otherwise, the +// returned attribute has a value derived from the concrete type of err. +// +// The key of the returned attribute is [ErrorTypeKey]. +func ErrorType(err error) attribute.KeyValue { + if err == nil { + return ErrorTypeOther + } + + return ErrorTypeKey.String(errorType(err)) +} + +func errorType(err error) string { + var s string + if et, ok := err.(interface{ ErrorType() string }); ok { + // Prioritize the ErrorType method if available. + s = et.ErrorType() + } + if s == "" { + // Fallback to reflection if the ErrorType method is not supported or + // returns an empty value. + + t := reflect.TypeOf(err) + pkg, name := t.PkgPath(), t.Name() + if pkg != "" && name != "" { + s = pkg + "." + name + } else { + // The type has no package path or name (predeclared, not-defined, + // or alias for a not-defined type). + // + // This is not guaranteed to be unique, but is a best effort. + s = t.String() + } + } + return s +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/exception.go similarity index 76% rename from vendor/go.opentelemetry.io/otel/semconv/v1.17.0/exception.go rename to vendor/go.opentelemetry.io/otel/semconv/v1.37.0/exception.go index 137acc67..e67469a4 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/exception.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/exception.go @@ -1,7 +1,7 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" +package semconv // import "go.opentelemetry.io/otel/semconv/v1.37.0" const ( // ExceptionEventName is the name of the Span event representing an exception. diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/otelconv/metric.go b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/otelconv/metric.go new file mode 100644 index 00000000..fd064530 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/otelconv/metric.go @@ -0,0 +1,2264 @@ +// Code generated from semantic convention specification. DO NOT EDIT. + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package otelconv provides types and functionality for OpenTelemetry semantic +// conventions in the "otel" namespace. +package otelconv + +import ( + "context" + "sync" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/noop" +) + +var ( + addOptPool = &sync.Pool{New: func() any { return &[]metric.AddOption{} }} + recOptPool = &sync.Pool{New: func() any { return &[]metric.RecordOption{} }} +) + +// ErrorTypeAttr is an attribute conforming to the error.type semantic +// conventions. It represents the describes a class of error the operation ended +// with. +type ErrorTypeAttr string + +var ( + // ErrorTypeOther is a fallback error value to be used when the instrumentation + // doesn't define a custom value. + ErrorTypeOther ErrorTypeAttr = "_OTHER" +) + +// ComponentTypeAttr is an attribute conforming to the otel.component.type +// semantic conventions. It represents a name identifying the type of the +// OpenTelemetry component. +type ComponentTypeAttr string + +var ( + // ComponentTypeBatchingSpanProcessor is the builtin SDK batching span + // processor. + ComponentTypeBatchingSpanProcessor ComponentTypeAttr = "batching_span_processor" + // ComponentTypeSimpleSpanProcessor is the builtin SDK simple span processor. + ComponentTypeSimpleSpanProcessor ComponentTypeAttr = "simple_span_processor" + // ComponentTypeBatchingLogProcessor is the builtin SDK batching log record + // processor. + ComponentTypeBatchingLogProcessor ComponentTypeAttr = "batching_log_processor" + // ComponentTypeSimpleLogProcessor is the builtin SDK simple log record + // processor. + ComponentTypeSimpleLogProcessor ComponentTypeAttr = "simple_log_processor" + // ComponentTypeOtlpGRPCSpanExporter is the OTLP span exporter over gRPC with + // protobuf serialization. + ComponentTypeOtlpGRPCSpanExporter ComponentTypeAttr = "otlp_grpc_span_exporter" + // ComponentTypeOtlpHTTPSpanExporter is the OTLP span exporter over HTTP with + // protobuf serialization. + ComponentTypeOtlpHTTPSpanExporter ComponentTypeAttr = "otlp_http_span_exporter" + // ComponentTypeOtlpHTTPJSONSpanExporter is the OTLP span exporter over HTTP + // with JSON serialization. + ComponentTypeOtlpHTTPJSONSpanExporter ComponentTypeAttr = "otlp_http_json_span_exporter" + // ComponentTypeZipkinHTTPSpanExporter is the zipkin span exporter over HTTP. + ComponentTypeZipkinHTTPSpanExporter ComponentTypeAttr = "zipkin_http_span_exporter" + // ComponentTypeOtlpGRPCLogExporter is the OTLP log record exporter over gRPC + // with protobuf serialization. + ComponentTypeOtlpGRPCLogExporter ComponentTypeAttr = "otlp_grpc_log_exporter" + // ComponentTypeOtlpHTTPLogExporter is the OTLP log record exporter over HTTP + // with protobuf serialization. + ComponentTypeOtlpHTTPLogExporter ComponentTypeAttr = "otlp_http_log_exporter" + // ComponentTypeOtlpHTTPJSONLogExporter is the OTLP log record exporter over + // HTTP with JSON serialization. + ComponentTypeOtlpHTTPJSONLogExporter ComponentTypeAttr = "otlp_http_json_log_exporter" + // ComponentTypePeriodicMetricReader is the builtin SDK periodically exporting + // metric reader. + ComponentTypePeriodicMetricReader ComponentTypeAttr = "periodic_metric_reader" + // ComponentTypeOtlpGRPCMetricExporter is the OTLP metric exporter over gRPC + // with protobuf serialization. + ComponentTypeOtlpGRPCMetricExporter ComponentTypeAttr = "otlp_grpc_metric_exporter" + // ComponentTypeOtlpHTTPMetricExporter is the OTLP metric exporter over HTTP + // with protobuf serialization. + ComponentTypeOtlpHTTPMetricExporter ComponentTypeAttr = "otlp_http_metric_exporter" + // ComponentTypeOtlpHTTPJSONMetricExporter is the OTLP metric exporter over HTTP + // with JSON serialization. + ComponentTypeOtlpHTTPJSONMetricExporter ComponentTypeAttr = "otlp_http_json_metric_exporter" + // ComponentTypePrometheusHTTPTextMetricExporter is the prometheus metric + // exporter over HTTP with the default text-based format. + ComponentTypePrometheusHTTPTextMetricExporter ComponentTypeAttr = "prometheus_http_text_metric_exporter" +) + +// SpanParentOriginAttr is an attribute conforming to the otel.span.parent.origin +// semantic conventions. It represents the determines whether the span has a +// parent span, and if so, [whether it is a remote parent]. +// +// [whether it is a remote parent]: https://opentelemetry.io/docs/specs/otel/trace/api/#isremote +type SpanParentOriginAttr string + +var ( + // SpanParentOriginNone is the span does not have a parent, it is a root span. + SpanParentOriginNone SpanParentOriginAttr = "none" + // SpanParentOriginLocal is the span has a parent and the parent's span context + // [isRemote()] is false. + // + // [isRemote()]: https://opentelemetry.io/docs/specs/otel/trace/api/#isremote + SpanParentOriginLocal SpanParentOriginAttr = "local" + // SpanParentOriginRemote is the span has a parent and the parent's span context + // [isRemote()] is true. + // + // [isRemote()]: https://opentelemetry.io/docs/specs/otel/trace/api/#isremote + SpanParentOriginRemote SpanParentOriginAttr = "remote" +) + +// SpanSamplingResultAttr is an attribute conforming to the +// otel.span.sampling_result semantic conventions. It represents the result value +// of the sampler for this span. +type SpanSamplingResultAttr string + +var ( + // SpanSamplingResultDrop is the span is not sampled and not recording. + SpanSamplingResultDrop SpanSamplingResultAttr = "DROP" + // SpanSamplingResultRecordOnly is the span is not sampled, but recording. + SpanSamplingResultRecordOnly SpanSamplingResultAttr = "RECORD_ONLY" + // SpanSamplingResultRecordAndSample is the span is sampled and recording. + SpanSamplingResultRecordAndSample SpanSamplingResultAttr = "RECORD_AND_SAMPLE" +) + +// RPCGRPCStatusCodeAttr is an attribute conforming to the rpc.grpc.status_code +// semantic conventions. It represents the gRPC status code of the last gRPC +// requests performed in scope of this export call. +type RPCGRPCStatusCodeAttr int64 + +var ( + // RPCGRPCStatusCodeOk is the OK. + RPCGRPCStatusCodeOk RPCGRPCStatusCodeAttr = 0 + // RPCGRPCStatusCodeCancelled is the CANCELLED. + RPCGRPCStatusCodeCancelled RPCGRPCStatusCodeAttr = 1 + // RPCGRPCStatusCodeUnknown is the UNKNOWN. + RPCGRPCStatusCodeUnknown RPCGRPCStatusCodeAttr = 2 + // RPCGRPCStatusCodeInvalidArgument is the INVALID_ARGUMENT. + RPCGRPCStatusCodeInvalidArgument RPCGRPCStatusCodeAttr = 3 + // RPCGRPCStatusCodeDeadlineExceeded is the DEADLINE_EXCEEDED. + RPCGRPCStatusCodeDeadlineExceeded RPCGRPCStatusCodeAttr = 4 + // RPCGRPCStatusCodeNotFound is the NOT_FOUND. + RPCGRPCStatusCodeNotFound RPCGRPCStatusCodeAttr = 5 + // RPCGRPCStatusCodeAlreadyExists is the ALREADY_EXISTS. + RPCGRPCStatusCodeAlreadyExists RPCGRPCStatusCodeAttr = 6 + // RPCGRPCStatusCodePermissionDenied is the PERMISSION_DENIED. + RPCGRPCStatusCodePermissionDenied RPCGRPCStatusCodeAttr = 7 + // RPCGRPCStatusCodeResourceExhausted is the RESOURCE_EXHAUSTED. + RPCGRPCStatusCodeResourceExhausted RPCGRPCStatusCodeAttr = 8 + // RPCGRPCStatusCodeFailedPrecondition is the FAILED_PRECONDITION. + RPCGRPCStatusCodeFailedPrecondition RPCGRPCStatusCodeAttr = 9 + // RPCGRPCStatusCodeAborted is the ABORTED. + RPCGRPCStatusCodeAborted RPCGRPCStatusCodeAttr = 10 + // RPCGRPCStatusCodeOutOfRange is the OUT_OF_RANGE. + RPCGRPCStatusCodeOutOfRange RPCGRPCStatusCodeAttr = 11 + // RPCGRPCStatusCodeUnimplemented is the UNIMPLEMENTED. + RPCGRPCStatusCodeUnimplemented RPCGRPCStatusCodeAttr = 12 + // RPCGRPCStatusCodeInternal is the INTERNAL. + RPCGRPCStatusCodeInternal RPCGRPCStatusCodeAttr = 13 + // RPCGRPCStatusCodeUnavailable is the UNAVAILABLE. + RPCGRPCStatusCodeUnavailable RPCGRPCStatusCodeAttr = 14 + // RPCGRPCStatusCodeDataLoss is the DATA_LOSS. + RPCGRPCStatusCodeDataLoss RPCGRPCStatusCodeAttr = 15 + // RPCGRPCStatusCodeUnauthenticated is the UNAUTHENTICATED. + RPCGRPCStatusCodeUnauthenticated RPCGRPCStatusCodeAttr = 16 +) + +// SDKExporterLogExported is an instrument used to record metric values +// conforming to the "otel.sdk.exporter.log.exported" semantic conventions. It +// represents the number of log records for which the export has finished, either +// successful or failed. +type SDKExporterLogExported struct { + metric.Int64Counter +} + +var newSDKExporterLogExportedOpts = []metric.Int64CounterOption{ + metric.WithDescription("The number of log records for which the export has finished, either successful or failed."), + metric.WithUnit("{log_record}"), +} + +// NewSDKExporterLogExported returns a new SDKExporterLogExported instrument. +func NewSDKExporterLogExported( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (SDKExporterLogExported, error) { + // Check if the meter is nil. + if m == nil { + return SDKExporterLogExported{noop.Int64Counter{}}, nil + } + + if len(opt) == 0 { + opt = newSDKExporterLogExportedOpts + } else { + opt = append(opt, newSDKExporterLogExportedOpts...) + } + + i, err := m.Int64Counter( + "otel.sdk.exporter.log.exported", + opt..., + ) + if err != nil { + return SDKExporterLogExported{noop.Int64Counter{}}, err + } + return SDKExporterLogExported{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKExporterLogExported) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (SDKExporterLogExported) Name() string { + return "otel.sdk.exporter.log.exported" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKExporterLogExported) Unit() string { + return "{log_record}" +} + +// Description returns the semantic convention description of the instrument +func (SDKExporterLogExported) Description() string { + return "The number of log records for which the export has finished, either successful or failed." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// For successful exports, `error.type` MUST NOT be set. For failed exports, +// `error.type` MUST contain the failure cause. +// For exporters with partial success semantics (e.g. OTLP with +// `rejected_log_records`), rejected log records MUST count as failed and only +// non-rejected log records count as success. +// If no rejection reason is available, `rejected` SHOULD be used as value for +// `error.type`. +func (m SDKExporterLogExported) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// For successful exports, `error.type` MUST NOT be set. For failed exports, +// `error.type` MUST contain the failure cause. +// For exporters with partial success semantics (e.g. OTLP with +// `rejected_log_records`), rejected log records MUST count as failed and only +// non-rejected log records count as success. +// If no rejection reason is available, `rejected` SHOULD be used as value for +// `error.type`. +func (m SDKExporterLogExported) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AttrErrorType returns an optional attribute for the "error.type" semantic +// convention. It represents the describes a class of error the operation ended +// with. +func (SDKExporterLogExported) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue { + return attribute.String("error.type", string(val)) +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKExporterLogExported) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKExporterLogExported) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// AttrServerAddress returns an optional attribute for the "server.address" +// semantic convention. It represents the server domain name if available without +// reverse DNS lookup; otherwise, IP address or Unix domain socket name. +func (SDKExporterLogExported) AttrServerAddress(val string) attribute.KeyValue { + return attribute.String("server.address", val) +} + +// AttrServerPort returns an optional attribute for the "server.port" semantic +// convention. It represents the server port number. +func (SDKExporterLogExported) AttrServerPort(val int) attribute.KeyValue { + return attribute.Int("server.port", val) +} + +// SDKExporterLogInflight is an instrument used to record metric values +// conforming to the "otel.sdk.exporter.log.inflight" semantic conventions. It +// represents the number of log records which were passed to the exporter, but +// that have not been exported yet (neither successful, nor failed). +type SDKExporterLogInflight struct { + metric.Int64UpDownCounter +} + +var newSDKExporterLogInflightOpts = []metric.Int64UpDownCounterOption{ + metric.WithDescription("The number of log records which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)."), + metric.WithUnit("{log_record}"), +} + +// NewSDKExporterLogInflight returns a new SDKExporterLogInflight instrument. +func NewSDKExporterLogInflight( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (SDKExporterLogInflight, error) { + // Check if the meter is nil. + if m == nil { + return SDKExporterLogInflight{noop.Int64UpDownCounter{}}, nil + } + + if len(opt) == 0 { + opt = newSDKExporterLogInflightOpts + } else { + opt = append(opt, newSDKExporterLogInflightOpts...) + } + + i, err := m.Int64UpDownCounter( + "otel.sdk.exporter.log.inflight", + opt..., + ) + if err != nil { + return SDKExporterLogInflight{noop.Int64UpDownCounter{}}, err + } + return SDKExporterLogInflight{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKExporterLogInflight) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (SDKExporterLogInflight) Name() string { + return "otel.sdk.exporter.log.inflight" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKExporterLogInflight) Unit() string { + return "{log_record}" +} + +// Description returns the semantic convention description of the instrument +func (SDKExporterLogInflight) Description() string { + return "The number of log records which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// For successful exports, `error.type` MUST NOT be set. For failed exports, +// `error.type` MUST contain the failure cause. +func (m SDKExporterLogInflight) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// For successful exports, `error.type` MUST NOT be set. For failed exports, +// `error.type` MUST contain the failure cause. +func (m SDKExporterLogInflight) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKExporterLogInflight) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKExporterLogInflight) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// AttrServerAddress returns an optional attribute for the "server.address" +// semantic convention. It represents the server domain name if available without +// reverse DNS lookup; otherwise, IP address or Unix domain socket name. +func (SDKExporterLogInflight) AttrServerAddress(val string) attribute.KeyValue { + return attribute.String("server.address", val) +} + +// AttrServerPort returns an optional attribute for the "server.port" semantic +// convention. It represents the server port number. +func (SDKExporterLogInflight) AttrServerPort(val int) attribute.KeyValue { + return attribute.Int("server.port", val) +} + +// SDKExporterMetricDataPointExported is an instrument used to record metric +// values conforming to the "otel.sdk.exporter.metric_data_point.exported" +// semantic conventions. It represents the number of metric data points for which +// the export has finished, either successful or failed. +type SDKExporterMetricDataPointExported struct { + metric.Int64Counter +} + +var newSDKExporterMetricDataPointExportedOpts = []metric.Int64CounterOption{ + metric.WithDescription("The number of metric data points for which the export has finished, either successful or failed."), + metric.WithUnit("{data_point}"), +} + +// NewSDKExporterMetricDataPointExported returns a new +// SDKExporterMetricDataPointExported instrument. +func NewSDKExporterMetricDataPointExported( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (SDKExporterMetricDataPointExported, error) { + // Check if the meter is nil. + if m == nil { + return SDKExporterMetricDataPointExported{noop.Int64Counter{}}, nil + } + + if len(opt) == 0 { + opt = newSDKExporterMetricDataPointExportedOpts + } else { + opt = append(opt, newSDKExporterMetricDataPointExportedOpts...) + } + + i, err := m.Int64Counter( + "otel.sdk.exporter.metric_data_point.exported", + opt..., + ) + if err != nil { + return SDKExporterMetricDataPointExported{noop.Int64Counter{}}, err + } + return SDKExporterMetricDataPointExported{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKExporterMetricDataPointExported) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (SDKExporterMetricDataPointExported) Name() string { + return "otel.sdk.exporter.metric_data_point.exported" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKExporterMetricDataPointExported) Unit() string { + return "{data_point}" +} + +// Description returns the semantic convention description of the instrument +func (SDKExporterMetricDataPointExported) Description() string { + return "The number of metric data points for which the export has finished, either successful or failed." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// For successful exports, `error.type` MUST NOT be set. For failed exports, +// `error.type` MUST contain the failure cause. +// For exporters with partial success semantics (e.g. OTLP with +// `rejected_data_points`), rejected data points MUST count as failed and only +// non-rejected data points count as success. +// If no rejection reason is available, `rejected` SHOULD be used as value for +// `error.type`. +func (m SDKExporterMetricDataPointExported) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// For successful exports, `error.type` MUST NOT be set. For failed exports, +// `error.type` MUST contain the failure cause. +// For exporters with partial success semantics (e.g. OTLP with +// `rejected_data_points`), rejected data points MUST count as failed and only +// non-rejected data points count as success. +// If no rejection reason is available, `rejected` SHOULD be used as value for +// `error.type`. +func (m SDKExporterMetricDataPointExported) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AttrErrorType returns an optional attribute for the "error.type" semantic +// convention. It represents the describes a class of error the operation ended +// with. +func (SDKExporterMetricDataPointExported) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue { + return attribute.String("error.type", string(val)) +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKExporterMetricDataPointExported) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKExporterMetricDataPointExported) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// AttrServerAddress returns an optional attribute for the "server.address" +// semantic convention. It represents the server domain name if available without +// reverse DNS lookup; otherwise, IP address or Unix domain socket name. +func (SDKExporterMetricDataPointExported) AttrServerAddress(val string) attribute.KeyValue { + return attribute.String("server.address", val) +} + +// AttrServerPort returns an optional attribute for the "server.port" semantic +// convention. It represents the server port number. +func (SDKExporterMetricDataPointExported) AttrServerPort(val int) attribute.KeyValue { + return attribute.Int("server.port", val) +} + +// SDKExporterMetricDataPointInflight is an instrument used to record metric +// values conforming to the "otel.sdk.exporter.metric_data_point.inflight" +// semantic conventions. It represents the number of metric data points which +// were passed to the exporter, but that have not been exported yet (neither +// successful, nor failed). +type SDKExporterMetricDataPointInflight struct { + metric.Int64UpDownCounter +} + +var newSDKExporterMetricDataPointInflightOpts = []metric.Int64UpDownCounterOption{ + metric.WithDescription("The number of metric data points which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)."), + metric.WithUnit("{data_point}"), +} + +// NewSDKExporterMetricDataPointInflight returns a new +// SDKExporterMetricDataPointInflight instrument. +func NewSDKExporterMetricDataPointInflight( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (SDKExporterMetricDataPointInflight, error) { + // Check if the meter is nil. + if m == nil { + return SDKExporterMetricDataPointInflight{noop.Int64UpDownCounter{}}, nil + } + + if len(opt) == 0 { + opt = newSDKExporterMetricDataPointInflightOpts + } else { + opt = append(opt, newSDKExporterMetricDataPointInflightOpts...) + } + + i, err := m.Int64UpDownCounter( + "otel.sdk.exporter.metric_data_point.inflight", + opt..., + ) + if err != nil { + return SDKExporterMetricDataPointInflight{noop.Int64UpDownCounter{}}, err + } + return SDKExporterMetricDataPointInflight{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKExporterMetricDataPointInflight) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (SDKExporterMetricDataPointInflight) Name() string { + return "otel.sdk.exporter.metric_data_point.inflight" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKExporterMetricDataPointInflight) Unit() string { + return "{data_point}" +} + +// Description returns the semantic convention description of the instrument +func (SDKExporterMetricDataPointInflight) Description() string { + return "The number of metric data points which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// For successful exports, `error.type` MUST NOT be set. For failed exports, +// `error.type` MUST contain the failure cause. +func (m SDKExporterMetricDataPointInflight) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// For successful exports, `error.type` MUST NOT be set. For failed exports, +// `error.type` MUST contain the failure cause. +func (m SDKExporterMetricDataPointInflight) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKExporterMetricDataPointInflight) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKExporterMetricDataPointInflight) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// AttrServerAddress returns an optional attribute for the "server.address" +// semantic convention. It represents the server domain name if available without +// reverse DNS lookup; otherwise, IP address or Unix domain socket name. +func (SDKExporterMetricDataPointInflight) AttrServerAddress(val string) attribute.KeyValue { + return attribute.String("server.address", val) +} + +// AttrServerPort returns an optional attribute for the "server.port" semantic +// convention. It represents the server port number. +func (SDKExporterMetricDataPointInflight) AttrServerPort(val int) attribute.KeyValue { + return attribute.Int("server.port", val) +} + +// SDKExporterOperationDuration is an instrument used to record metric values +// conforming to the "otel.sdk.exporter.operation.duration" semantic conventions. +// It represents the duration of exporting a batch of telemetry records. +type SDKExporterOperationDuration struct { + metric.Float64Histogram +} + +var newSDKExporterOperationDurationOpts = []metric.Float64HistogramOption{ + metric.WithDescription("The duration of exporting a batch of telemetry records."), + metric.WithUnit("s"), +} + +// NewSDKExporterOperationDuration returns a new SDKExporterOperationDuration +// instrument. +func NewSDKExporterOperationDuration( + m metric.Meter, + opt ...metric.Float64HistogramOption, +) (SDKExporterOperationDuration, error) { + // Check if the meter is nil. + if m == nil { + return SDKExporterOperationDuration{noop.Float64Histogram{}}, nil + } + + if len(opt) == 0 { + opt = newSDKExporterOperationDurationOpts + } else { + opt = append(opt, newSDKExporterOperationDurationOpts...) + } + + i, err := m.Float64Histogram( + "otel.sdk.exporter.operation.duration", + opt..., + ) + if err != nil { + return SDKExporterOperationDuration{noop.Float64Histogram{}}, err + } + return SDKExporterOperationDuration{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKExporterOperationDuration) Inst() metric.Float64Histogram { + return m.Float64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (SDKExporterOperationDuration) Name() string { + return "otel.sdk.exporter.operation.duration" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKExporterOperationDuration) Unit() string { + return "s" +} + +// Description returns the semantic convention description of the instrument +func (SDKExporterOperationDuration) Description() string { + return "The duration of exporting a batch of telemetry records." +} + +// Record records val to the current distribution for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// This metric defines successful operations using the full success definitions +// for [http] +// and [grpc]. Anything else is defined as an unsuccessful operation. For +// successful +// operations, `error.type` MUST NOT be set. For unsuccessful export operations, +// `error.type` MUST contain a relevant failure cause. +// +// [http]: https://github.com/open-telemetry/opentelemetry-proto/blob/v1.5.0/docs/specification.md#full-success-1 +// [grpc]: https://github.com/open-telemetry/opentelemetry-proto/blob/v1.5.0/docs/specification.md#full-success +func (m SDKExporterOperationDuration) Record( + ctx context.Context, + val float64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Float64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Float64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +// +// This metric defines successful operations using the full success definitions +// for [http] +// and [grpc]. Anything else is defined as an unsuccessful operation. For +// successful +// operations, `error.type` MUST NOT be set. For unsuccessful export operations, +// `error.type` MUST contain a relevant failure cause. +// +// [http]: https://github.com/open-telemetry/opentelemetry-proto/blob/v1.5.0/docs/specification.md#full-success-1 +// [grpc]: https://github.com/open-telemetry/opentelemetry-proto/blob/v1.5.0/docs/specification.md#full-success +func (m SDKExporterOperationDuration) RecordSet(ctx context.Context, val float64, set attribute.Set) { + if set.Len() == 0 { + m.Float64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Float64Histogram.Record(ctx, val, *o...) +} + +// AttrErrorType returns an optional attribute for the "error.type" semantic +// convention. It represents the describes a class of error the operation ended +// with. +func (SDKExporterOperationDuration) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue { + return attribute.String("error.type", string(val)) +} + +// AttrHTTPResponseStatusCode returns an optional attribute for the +// "http.response.status_code" semantic convention. It represents the HTTP status +// code of the last HTTP request performed in scope of this export call. +func (SDKExporterOperationDuration) AttrHTTPResponseStatusCode(val int) attribute.KeyValue { + return attribute.Int("http.response.status_code", val) +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKExporterOperationDuration) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKExporterOperationDuration) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// AttrRPCGRPCStatusCode returns an optional attribute for the +// "rpc.grpc.status_code" semantic convention. It represents the gRPC status code +// of the last gRPC requests performed in scope of this export call. +func (SDKExporterOperationDuration) AttrRPCGRPCStatusCode(val RPCGRPCStatusCodeAttr) attribute.KeyValue { + return attribute.Int64("rpc.grpc.status_code", int64(val)) +} + +// AttrServerAddress returns an optional attribute for the "server.address" +// semantic convention. It represents the server domain name if available without +// reverse DNS lookup; otherwise, IP address or Unix domain socket name. +func (SDKExporterOperationDuration) AttrServerAddress(val string) attribute.KeyValue { + return attribute.String("server.address", val) +} + +// AttrServerPort returns an optional attribute for the "server.port" semantic +// convention. It represents the server port number. +func (SDKExporterOperationDuration) AttrServerPort(val int) attribute.KeyValue { + return attribute.Int("server.port", val) +} + +// SDKExporterSpanExported is an instrument used to record metric values +// conforming to the "otel.sdk.exporter.span.exported" semantic conventions. It +// represents the number of spans for which the export has finished, either +// successful or failed. +type SDKExporterSpanExported struct { + metric.Int64Counter +} + +var newSDKExporterSpanExportedOpts = []metric.Int64CounterOption{ + metric.WithDescription("The number of spans for which the export has finished, either successful or failed."), + metric.WithUnit("{span}"), +} + +// NewSDKExporterSpanExported returns a new SDKExporterSpanExported instrument. +func NewSDKExporterSpanExported( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (SDKExporterSpanExported, error) { + // Check if the meter is nil. + if m == nil { + return SDKExporterSpanExported{noop.Int64Counter{}}, nil + } + + if len(opt) == 0 { + opt = newSDKExporterSpanExportedOpts + } else { + opt = append(opt, newSDKExporterSpanExportedOpts...) + } + + i, err := m.Int64Counter( + "otel.sdk.exporter.span.exported", + opt..., + ) + if err != nil { + return SDKExporterSpanExported{noop.Int64Counter{}}, err + } + return SDKExporterSpanExported{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKExporterSpanExported) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (SDKExporterSpanExported) Name() string { + return "otel.sdk.exporter.span.exported" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKExporterSpanExported) Unit() string { + return "{span}" +} + +// Description returns the semantic convention description of the instrument +func (SDKExporterSpanExported) Description() string { + return "The number of spans for which the export has finished, either successful or failed." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// For successful exports, `error.type` MUST NOT be set. For failed exports, +// `error.type` MUST contain the failure cause. +// For exporters with partial success semantics (e.g. OTLP with `rejected_spans` +// ), rejected spans MUST count as failed and only non-rejected spans count as +// success. +// If no rejection reason is available, `rejected` SHOULD be used as value for +// `error.type`. +func (m SDKExporterSpanExported) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// For successful exports, `error.type` MUST NOT be set. For failed exports, +// `error.type` MUST contain the failure cause. +// For exporters with partial success semantics (e.g. OTLP with `rejected_spans` +// ), rejected spans MUST count as failed and only non-rejected spans count as +// success. +// If no rejection reason is available, `rejected` SHOULD be used as value for +// `error.type`. +func (m SDKExporterSpanExported) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AttrErrorType returns an optional attribute for the "error.type" semantic +// convention. It represents the describes a class of error the operation ended +// with. +func (SDKExporterSpanExported) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue { + return attribute.String("error.type", string(val)) +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKExporterSpanExported) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKExporterSpanExported) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// AttrServerAddress returns an optional attribute for the "server.address" +// semantic convention. It represents the server domain name if available without +// reverse DNS lookup; otherwise, IP address or Unix domain socket name. +func (SDKExporterSpanExported) AttrServerAddress(val string) attribute.KeyValue { + return attribute.String("server.address", val) +} + +// AttrServerPort returns an optional attribute for the "server.port" semantic +// convention. It represents the server port number. +func (SDKExporterSpanExported) AttrServerPort(val int) attribute.KeyValue { + return attribute.Int("server.port", val) +} + +// SDKExporterSpanInflight is an instrument used to record metric values +// conforming to the "otel.sdk.exporter.span.inflight" semantic conventions. It +// represents the number of spans which were passed to the exporter, but that +// have not been exported yet (neither successful, nor failed). +type SDKExporterSpanInflight struct { + metric.Int64UpDownCounter +} + +var newSDKExporterSpanInflightOpts = []metric.Int64UpDownCounterOption{ + metric.WithDescription("The number of spans which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)."), + metric.WithUnit("{span}"), +} + +// NewSDKExporterSpanInflight returns a new SDKExporterSpanInflight instrument. +func NewSDKExporterSpanInflight( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (SDKExporterSpanInflight, error) { + // Check if the meter is nil. + if m == nil { + return SDKExporterSpanInflight{noop.Int64UpDownCounter{}}, nil + } + + if len(opt) == 0 { + opt = newSDKExporterSpanInflightOpts + } else { + opt = append(opt, newSDKExporterSpanInflightOpts...) + } + + i, err := m.Int64UpDownCounter( + "otel.sdk.exporter.span.inflight", + opt..., + ) + if err != nil { + return SDKExporterSpanInflight{noop.Int64UpDownCounter{}}, err + } + return SDKExporterSpanInflight{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKExporterSpanInflight) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (SDKExporterSpanInflight) Name() string { + return "otel.sdk.exporter.span.inflight" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKExporterSpanInflight) Unit() string { + return "{span}" +} + +// Description returns the semantic convention description of the instrument +func (SDKExporterSpanInflight) Description() string { + return "The number of spans which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// For successful exports, `error.type` MUST NOT be set. For failed exports, +// `error.type` MUST contain the failure cause. +func (m SDKExporterSpanInflight) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// For successful exports, `error.type` MUST NOT be set. For failed exports, +// `error.type` MUST contain the failure cause. +func (m SDKExporterSpanInflight) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKExporterSpanInflight) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKExporterSpanInflight) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// AttrServerAddress returns an optional attribute for the "server.address" +// semantic convention. It represents the server domain name if available without +// reverse DNS lookup; otherwise, IP address or Unix domain socket name. +func (SDKExporterSpanInflight) AttrServerAddress(val string) attribute.KeyValue { + return attribute.String("server.address", val) +} + +// AttrServerPort returns an optional attribute for the "server.port" semantic +// convention. It represents the server port number. +func (SDKExporterSpanInflight) AttrServerPort(val int) attribute.KeyValue { + return attribute.Int("server.port", val) +} + +// SDKLogCreated is an instrument used to record metric values conforming to the +// "otel.sdk.log.created" semantic conventions. It represents the number of logs +// submitted to enabled SDK Loggers. +type SDKLogCreated struct { + metric.Int64Counter +} + +var newSDKLogCreatedOpts = []metric.Int64CounterOption{ + metric.WithDescription("The number of logs submitted to enabled SDK Loggers."), + metric.WithUnit("{log_record}"), +} + +// NewSDKLogCreated returns a new SDKLogCreated instrument. +func NewSDKLogCreated( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (SDKLogCreated, error) { + // Check if the meter is nil. + if m == nil { + return SDKLogCreated{noop.Int64Counter{}}, nil + } + + if len(opt) == 0 { + opt = newSDKLogCreatedOpts + } else { + opt = append(opt, newSDKLogCreatedOpts...) + } + + i, err := m.Int64Counter( + "otel.sdk.log.created", + opt..., + ) + if err != nil { + return SDKLogCreated{noop.Int64Counter{}}, err + } + return SDKLogCreated{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKLogCreated) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (SDKLogCreated) Name() string { + return "otel.sdk.log.created" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKLogCreated) Unit() string { + return "{log_record}" +} + +// Description returns the semantic convention description of the instrument +func (SDKLogCreated) Description() string { + return "The number of logs submitted to enabled SDK Loggers." +} + +// Add adds incr to the existing count for attrs. +func (m SDKLogCreated) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m SDKLogCreated) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// SDKMetricReaderCollectionDuration is an instrument used to record metric +// values conforming to the "otel.sdk.metric_reader.collection.duration" semantic +// conventions. It represents the duration of the collect operation of the metric +// reader. +type SDKMetricReaderCollectionDuration struct { + metric.Float64Histogram +} + +var newSDKMetricReaderCollectionDurationOpts = []metric.Float64HistogramOption{ + metric.WithDescription("The duration of the collect operation of the metric reader."), + metric.WithUnit("s"), +} + +// NewSDKMetricReaderCollectionDuration returns a new +// SDKMetricReaderCollectionDuration instrument. +func NewSDKMetricReaderCollectionDuration( + m metric.Meter, + opt ...metric.Float64HistogramOption, +) (SDKMetricReaderCollectionDuration, error) { + // Check if the meter is nil. + if m == nil { + return SDKMetricReaderCollectionDuration{noop.Float64Histogram{}}, nil + } + + if len(opt) == 0 { + opt = newSDKMetricReaderCollectionDurationOpts + } else { + opt = append(opt, newSDKMetricReaderCollectionDurationOpts...) + } + + i, err := m.Float64Histogram( + "otel.sdk.metric_reader.collection.duration", + opt..., + ) + if err != nil { + return SDKMetricReaderCollectionDuration{noop.Float64Histogram{}}, err + } + return SDKMetricReaderCollectionDuration{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKMetricReaderCollectionDuration) Inst() metric.Float64Histogram { + return m.Float64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (SDKMetricReaderCollectionDuration) Name() string { + return "otel.sdk.metric_reader.collection.duration" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKMetricReaderCollectionDuration) Unit() string { + return "s" +} + +// Description returns the semantic convention description of the instrument +func (SDKMetricReaderCollectionDuration) Description() string { + return "The duration of the collect operation of the metric reader." +} + +// Record records val to the current distribution for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// For successful collections, `error.type` MUST NOT be set. For failed +// collections, `error.type` SHOULD contain the failure cause. +// It can happen that metrics collection is successful for some MetricProducers, +// while others fail. In that case `error.type` SHOULD be set to any of the +// failure causes. +func (m SDKMetricReaderCollectionDuration) Record( + ctx context.Context, + val float64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Float64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Float64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +// +// For successful collections, `error.type` MUST NOT be set. For failed +// collections, `error.type` SHOULD contain the failure cause. +// It can happen that metrics collection is successful for some MetricProducers, +// while others fail. In that case `error.type` SHOULD be set to any of the +// failure causes. +func (m SDKMetricReaderCollectionDuration) RecordSet(ctx context.Context, val float64, set attribute.Set) { + if set.Len() == 0 { + m.Float64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Float64Histogram.Record(ctx, val, *o...) +} + +// AttrErrorType returns an optional attribute for the "error.type" semantic +// convention. It represents the describes a class of error the operation ended +// with. +func (SDKMetricReaderCollectionDuration) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue { + return attribute.String("error.type", string(val)) +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKMetricReaderCollectionDuration) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKMetricReaderCollectionDuration) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// SDKProcessorLogProcessed is an instrument used to record metric values +// conforming to the "otel.sdk.processor.log.processed" semantic conventions. It +// represents the number of log records for which the processing has finished, +// either successful or failed. +type SDKProcessorLogProcessed struct { + metric.Int64Counter +} + +var newSDKProcessorLogProcessedOpts = []metric.Int64CounterOption{ + metric.WithDescription("The number of log records for which the processing has finished, either successful or failed."), + metric.WithUnit("{log_record}"), +} + +// NewSDKProcessorLogProcessed returns a new SDKProcessorLogProcessed instrument. +func NewSDKProcessorLogProcessed( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (SDKProcessorLogProcessed, error) { + // Check if the meter is nil. + if m == nil { + return SDKProcessorLogProcessed{noop.Int64Counter{}}, nil + } + + if len(opt) == 0 { + opt = newSDKProcessorLogProcessedOpts + } else { + opt = append(opt, newSDKProcessorLogProcessedOpts...) + } + + i, err := m.Int64Counter( + "otel.sdk.processor.log.processed", + opt..., + ) + if err != nil { + return SDKProcessorLogProcessed{noop.Int64Counter{}}, err + } + return SDKProcessorLogProcessed{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKProcessorLogProcessed) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (SDKProcessorLogProcessed) Name() string { + return "otel.sdk.processor.log.processed" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKProcessorLogProcessed) Unit() string { + return "{log_record}" +} + +// Description returns the semantic convention description of the instrument +func (SDKProcessorLogProcessed) Description() string { + return "The number of log records for which the processing has finished, either successful or failed." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// For successful processing, `error.type` MUST NOT be set. For failed +// processing, `error.type` MUST contain the failure cause. +// For the SDK Simple and Batching Log Record Processor a log record is +// considered to be processed already when it has been submitted to the exporter, +// not when the corresponding export call has finished. +func (m SDKProcessorLogProcessed) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// For successful processing, `error.type` MUST NOT be set. For failed +// processing, `error.type` MUST contain the failure cause. +// For the SDK Simple and Batching Log Record Processor a log record is +// considered to be processed already when it has been submitted to the exporter, +// not when the corresponding export call has finished. +func (m SDKProcessorLogProcessed) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AttrErrorType returns an optional attribute for the "error.type" semantic +// convention. It represents a low-cardinality description of the failure reason. +// SDK Batching Log Record Processors MUST use `queue_full` for log records +// dropped due to a full queue. +func (SDKProcessorLogProcessed) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue { + return attribute.String("error.type", string(val)) +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKProcessorLogProcessed) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKProcessorLogProcessed) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// SDKProcessorLogQueueCapacity is an instrument used to record metric values +// conforming to the "otel.sdk.processor.log.queue.capacity" semantic +// conventions. It represents the maximum number of log records the queue of a +// given instance of an SDK Log Record processor can hold. +type SDKProcessorLogQueueCapacity struct { + metric.Int64ObservableUpDownCounter +} + +var newSDKProcessorLogQueueCapacityOpts = []metric.Int64ObservableUpDownCounterOption{ + metric.WithDescription("The maximum number of log records the queue of a given instance of an SDK Log Record processor can hold."), + metric.WithUnit("{log_record}"), +} + +// NewSDKProcessorLogQueueCapacity returns a new SDKProcessorLogQueueCapacity +// instrument. +func NewSDKProcessorLogQueueCapacity( + m metric.Meter, + opt ...metric.Int64ObservableUpDownCounterOption, +) (SDKProcessorLogQueueCapacity, error) { + // Check if the meter is nil. + if m == nil { + return SDKProcessorLogQueueCapacity{noop.Int64ObservableUpDownCounter{}}, nil + } + + if len(opt) == 0 { + opt = newSDKProcessorLogQueueCapacityOpts + } else { + opt = append(opt, newSDKProcessorLogQueueCapacityOpts...) + } + + i, err := m.Int64ObservableUpDownCounter( + "otel.sdk.processor.log.queue.capacity", + opt..., + ) + if err != nil { + return SDKProcessorLogQueueCapacity{noop.Int64ObservableUpDownCounter{}}, err + } + return SDKProcessorLogQueueCapacity{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKProcessorLogQueueCapacity) Inst() metric.Int64ObservableUpDownCounter { + return m.Int64ObservableUpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (SDKProcessorLogQueueCapacity) Name() string { + return "otel.sdk.processor.log.queue.capacity" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKProcessorLogQueueCapacity) Unit() string { + return "{log_record}" +} + +// Description returns the semantic convention description of the instrument +func (SDKProcessorLogQueueCapacity) Description() string { + return "The maximum number of log records the queue of a given instance of an SDK Log Record processor can hold." +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKProcessorLogQueueCapacity) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKProcessorLogQueueCapacity) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// SDKProcessorLogQueueSize is an instrument used to record metric values +// conforming to the "otel.sdk.processor.log.queue.size" semantic conventions. It +// represents the number of log records in the queue of a given instance of an +// SDK log processor. +type SDKProcessorLogQueueSize struct { + metric.Int64ObservableUpDownCounter +} + +var newSDKProcessorLogQueueSizeOpts = []metric.Int64ObservableUpDownCounterOption{ + metric.WithDescription("The number of log records in the queue of a given instance of an SDK log processor."), + metric.WithUnit("{log_record}"), +} + +// NewSDKProcessorLogQueueSize returns a new SDKProcessorLogQueueSize instrument. +func NewSDKProcessorLogQueueSize( + m metric.Meter, + opt ...metric.Int64ObservableUpDownCounterOption, +) (SDKProcessorLogQueueSize, error) { + // Check if the meter is nil. + if m == nil { + return SDKProcessorLogQueueSize{noop.Int64ObservableUpDownCounter{}}, nil + } + + if len(opt) == 0 { + opt = newSDKProcessorLogQueueSizeOpts + } else { + opt = append(opt, newSDKProcessorLogQueueSizeOpts...) + } + + i, err := m.Int64ObservableUpDownCounter( + "otel.sdk.processor.log.queue.size", + opt..., + ) + if err != nil { + return SDKProcessorLogQueueSize{noop.Int64ObservableUpDownCounter{}}, err + } + return SDKProcessorLogQueueSize{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKProcessorLogQueueSize) Inst() metric.Int64ObservableUpDownCounter { + return m.Int64ObservableUpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (SDKProcessorLogQueueSize) Name() string { + return "otel.sdk.processor.log.queue.size" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKProcessorLogQueueSize) Unit() string { + return "{log_record}" +} + +// Description returns the semantic convention description of the instrument +func (SDKProcessorLogQueueSize) Description() string { + return "The number of log records in the queue of a given instance of an SDK log processor." +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKProcessorLogQueueSize) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKProcessorLogQueueSize) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// SDKProcessorSpanProcessed is an instrument used to record metric values +// conforming to the "otel.sdk.processor.span.processed" semantic conventions. It +// represents the number of spans for which the processing has finished, either +// successful or failed. +type SDKProcessorSpanProcessed struct { + metric.Int64Counter +} + +var newSDKProcessorSpanProcessedOpts = []metric.Int64CounterOption{ + metric.WithDescription("The number of spans for which the processing has finished, either successful or failed."), + metric.WithUnit("{span}"), +} + +// NewSDKProcessorSpanProcessed returns a new SDKProcessorSpanProcessed +// instrument. +func NewSDKProcessorSpanProcessed( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (SDKProcessorSpanProcessed, error) { + // Check if the meter is nil. + if m == nil { + return SDKProcessorSpanProcessed{noop.Int64Counter{}}, nil + } + + if len(opt) == 0 { + opt = newSDKProcessorSpanProcessedOpts + } else { + opt = append(opt, newSDKProcessorSpanProcessedOpts...) + } + + i, err := m.Int64Counter( + "otel.sdk.processor.span.processed", + opt..., + ) + if err != nil { + return SDKProcessorSpanProcessed{noop.Int64Counter{}}, err + } + return SDKProcessorSpanProcessed{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKProcessorSpanProcessed) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (SDKProcessorSpanProcessed) Name() string { + return "otel.sdk.processor.span.processed" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKProcessorSpanProcessed) Unit() string { + return "{span}" +} + +// Description returns the semantic convention description of the instrument +func (SDKProcessorSpanProcessed) Description() string { + return "The number of spans for which the processing has finished, either successful or failed." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// For successful processing, `error.type` MUST NOT be set. For failed +// processing, `error.type` MUST contain the failure cause. +// For the SDK Simple and Batching Span Processor a span is considered to be +// processed already when it has been submitted to the exporter, not when the +// corresponding export call has finished. +func (m SDKProcessorSpanProcessed) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// For successful processing, `error.type` MUST NOT be set. For failed +// processing, `error.type` MUST contain the failure cause. +// For the SDK Simple and Batching Span Processor a span is considered to be +// processed already when it has been submitted to the exporter, not when the +// corresponding export call has finished. +func (m SDKProcessorSpanProcessed) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AttrErrorType returns an optional attribute for the "error.type" semantic +// convention. It represents a low-cardinality description of the failure reason. +// SDK Batching Span Processors MUST use `queue_full` for spans dropped due to a +// full queue. +func (SDKProcessorSpanProcessed) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue { + return attribute.String("error.type", string(val)) +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKProcessorSpanProcessed) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKProcessorSpanProcessed) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// SDKProcessorSpanQueueCapacity is an instrument used to record metric values +// conforming to the "otel.sdk.processor.span.queue.capacity" semantic +// conventions. It represents the maximum number of spans the queue of a given +// instance of an SDK span processor can hold. +type SDKProcessorSpanQueueCapacity struct { + metric.Int64ObservableUpDownCounter +} + +var newSDKProcessorSpanQueueCapacityOpts = []metric.Int64ObservableUpDownCounterOption{ + metric.WithDescription("The maximum number of spans the queue of a given instance of an SDK span processor can hold."), + metric.WithUnit("{span}"), +} + +// NewSDKProcessorSpanQueueCapacity returns a new SDKProcessorSpanQueueCapacity +// instrument. +func NewSDKProcessorSpanQueueCapacity( + m metric.Meter, + opt ...metric.Int64ObservableUpDownCounterOption, +) (SDKProcessorSpanQueueCapacity, error) { + // Check if the meter is nil. + if m == nil { + return SDKProcessorSpanQueueCapacity{noop.Int64ObservableUpDownCounter{}}, nil + } + + if len(opt) == 0 { + opt = newSDKProcessorSpanQueueCapacityOpts + } else { + opt = append(opt, newSDKProcessorSpanQueueCapacityOpts...) + } + + i, err := m.Int64ObservableUpDownCounter( + "otel.sdk.processor.span.queue.capacity", + opt..., + ) + if err != nil { + return SDKProcessorSpanQueueCapacity{noop.Int64ObservableUpDownCounter{}}, err + } + return SDKProcessorSpanQueueCapacity{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKProcessorSpanQueueCapacity) Inst() metric.Int64ObservableUpDownCounter { + return m.Int64ObservableUpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (SDKProcessorSpanQueueCapacity) Name() string { + return "otel.sdk.processor.span.queue.capacity" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKProcessorSpanQueueCapacity) Unit() string { + return "{span}" +} + +// Description returns the semantic convention description of the instrument +func (SDKProcessorSpanQueueCapacity) Description() string { + return "The maximum number of spans the queue of a given instance of an SDK span processor can hold." +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKProcessorSpanQueueCapacity) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKProcessorSpanQueueCapacity) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// SDKProcessorSpanQueueSize is an instrument used to record metric values +// conforming to the "otel.sdk.processor.span.queue.size" semantic conventions. +// It represents the number of spans in the queue of a given instance of an SDK +// span processor. +type SDKProcessorSpanQueueSize struct { + metric.Int64ObservableUpDownCounter +} + +var newSDKProcessorSpanQueueSizeOpts = []metric.Int64ObservableUpDownCounterOption{ + metric.WithDescription("The number of spans in the queue of a given instance of an SDK span processor."), + metric.WithUnit("{span}"), +} + +// NewSDKProcessorSpanQueueSize returns a new SDKProcessorSpanQueueSize +// instrument. +func NewSDKProcessorSpanQueueSize( + m metric.Meter, + opt ...metric.Int64ObservableUpDownCounterOption, +) (SDKProcessorSpanQueueSize, error) { + // Check if the meter is nil. + if m == nil { + return SDKProcessorSpanQueueSize{noop.Int64ObservableUpDownCounter{}}, nil + } + + if len(opt) == 0 { + opt = newSDKProcessorSpanQueueSizeOpts + } else { + opt = append(opt, newSDKProcessorSpanQueueSizeOpts...) + } + + i, err := m.Int64ObservableUpDownCounter( + "otel.sdk.processor.span.queue.size", + opt..., + ) + if err != nil { + return SDKProcessorSpanQueueSize{noop.Int64ObservableUpDownCounter{}}, err + } + return SDKProcessorSpanQueueSize{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKProcessorSpanQueueSize) Inst() metric.Int64ObservableUpDownCounter { + return m.Int64ObservableUpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (SDKProcessorSpanQueueSize) Name() string { + return "otel.sdk.processor.span.queue.size" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKProcessorSpanQueueSize) Unit() string { + return "{span}" +} + +// Description returns the semantic convention description of the instrument +func (SDKProcessorSpanQueueSize) Description() string { + return "The number of spans in the queue of a given instance of an SDK span processor." +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKProcessorSpanQueueSize) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKProcessorSpanQueueSize) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// SDKSpanLive is an instrument used to record metric values conforming to the +// "otel.sdk.span.live" semantic conventions. It represents the number of created +// spans with `recording=true` for which the end operation has not been called +// yet. +type SDKSpanLive struct { + metric.Int64UpDownCounter +} + +var newSDKSpanLiveOpts = []metric.Int64UpDownCounterOption{ + metric.WithDescription("The number of created spans with `recording=true` for which the end operation has not been called yet."), + metric.WithUnit("{span}"), +} + +// NewSDKSpanLive returns a new SDKSpanLive instrument. +func NewSDKSpanLive( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (SDKSpanLive, error) { + // Check if the meter is nil. + if m == nil { + return SDKSpanLive{noop.Int64UpDownCounter{}}, nil + } + + if len(opt) == 0 { + opt = newSDKSpanLiveOpts + } else { + opt = append(opt, newSDKSpanLiveOpts...) + } + + i, err := m.Int64UpDownCounter( + "otel.sdk.span.live", + opt..., + ) + if err != nil { + return SDKSpanLive{noop.Int64UpDownCounter{}}, err + } + return SDKSpanLive{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKSpanLive) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (SDKSpanLive) Name() string { + return "otel.sdk.span.live" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKSpanLive) Unit() string { + return "{span}" +} + +// Description returns the semantic convention description of the instrument +func (SDKSpanLive) Description() string { + return "The number of created spans with `recording=true` for which the end operation has not been called yet." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +func (m SDKSpanLive) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m SDKSpanLive) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AttrSpanSamplingResult returns an optional attribute for the +// "otel.span.sampling_result" semantic convention. It represents the result +// value of the sampler for this span. +func (SDKSpanLive) AttrSpanSamplingResult(val SpanSamplingResultAttr) attribute.KeyValue { + return attribute.String("otel.span.sampling_result", string(val)) +} + +// SDKSpanStarted is an instrument used to record metric values conforming to the +// "otel.sdk.span.started" semantic conventions. It represents the number of +// created spans. +type SDKSpanStarted struct { + metric.Int64Counter +} + +var newSDKSpanStartedOpts = []metric.Int64CounterOption{ + metric.WithDescription("The number of created spans."), + metric.WithUnit("{span}"), +} + +// NewSDKSpanStarted returns a new SDKSpanStarted instrument. +func NewSDKSpanStarted( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (SDKSpanStarted, error) { + // Check if the meter is nil. + if m == nil { + return SDKSpanStarted{noop.Int64Counter{}}, nil + } + + if len(opt) == 0 { + opt = newSDKSpanStartedOpts + } else { + opt = append(opt, newSDKSpanStartedOpts...) + } + + i, err := m.Int64Counter( + "otel.sdk.span.started", + opt..., + ) + if err != nil { + return SDKSpanStarted{noop.Int64Counter{}}, err + } + return SDKSpanStarted{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKSpanStarted) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (SDKSpanStarted) Name() string { + return "otel.sdk.span.started" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKSpanStarted) Unit() string { + return "{span}" +} + +// Description returns the semantic convention description of the instrument +func (SDKSpanStarted) Description() string { + return "The number of created spans." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// Implementations MUST record this metric for all spans, even for non-recording +// ones. +func (m SDKSpanStarted) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// Implementations MUST record this metric for all spans, even for non-recording +// ones. +func (m SDKSpanStarted) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AttrSpanParentOrigin returns an optional attribute for the +// "otel.span.parent.origin" semantic convention. It represents the determines +// whether the span has a parent span, and if so, [whether it is a remote parent] +// . +// +// [whether it is a remote parent]: https://opentelemetry.io/docs/specs/otel/trace/api/#isremote +func (SDKSpanStarted) AttrSpanParentOrigin(val SpanParentOriginAttr) attribute.KeyValue { + return attribute.String("otel.span.parent.origin", string(val)) +} + +// AttrSpanSamplingResult returns an optional attribute for the +// "otel.span.sampling_result" semantic convention. It represents the result +// value of the sampler for this span. +func (SDKSpanStarted) AttrSpanSamplingResult(val SpanSamplingResultAttr) attribute.KeyValue { + return attribute.String("otel.span.sampling_result", string(val)) +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/rpcconv/metric.go b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/rpcconv/metric.go new file mode 100644 index 00000000..089b0c45 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/rpcconv/metric.go @@ -0,0 +1,1010 @@ +// Code generated from semantic convention specification. DO NOT EDIT. + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package rpcconv provides types and functionality for OpenTelemetry semantic +// conventions in the "rpc" namespace. +package rpcconv + +import ( + "context" + "sync" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/noop" +) + +var ( + addOptPool = &sync.Pool{New: func() any { return &[]metric.AddOption{} }} + recOptPool = &sync.Pool{New: func() any { return &[]metric.RecordOption{} }} +) + +// ClientDuration is an instrument used to record metric values conforming to the +// "rpc.client.duration" semantic conventions. It represents the measures the +// duration of outbound RPC. +type ClientDuration struct { + metric.Float64Histogram +} + +var newClientDurationOpts = []metric.Float64HistogramOption{ + metric.WithDescription("Measures the duration of outbound RPC."), + metric.WithUnit("ms"), +} + +// NewClientDuration returns a new ClientDuration instrument. +func NewClientDuration( + m metric.Meter, + opt ...metric.Float64HistogramOption, +) (ClientDuration, error) { + // Check if the meter is nil. + if m == nil { + return ClientDuration{noop.Float64Histogram{}}, nil + } + + if len(opt) == 0 { + opt = newClientDurationOpts + } else { + opt = append(opt, newClientDurationOpts...) + } + + i, err := m.Float64Histogram( + "rpc.client.duration", + opt..., + ) + if err != nil { + return ClientDuration{noop.Float64Histogram{}}, err + } + return ClientDuration{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ClientDuration) Inst() metric.Float64Histogram { + return m.Float64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (ClientDuration) Name() string { + return "rpc.client.duration" +} + +// Unit returns the semantic convention unit of the instrument +func (ClientDuration) Unit() string { + return "ms" +} + +// Description returns the semantic convention description of the instrument +func (ClientDuration) Description() string { + return "Measures the duration of outbound RPC." +} + +// Record records val to the current distribution for attrs. +// +// While streaming RPCs may record this metric as start-of-batch +// to end-of-batch, it's hard to interpret in practice. +// +// **Streaming**: N/A. +func (m ClientDuration) Record(ctx context.Context, val float64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Float64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Float64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +// +// While streaming RPCs may record this metric as start-of-batch +// to end-of-batch, it's hard to interpret in practice. +// +// **Streaming**: N/A. +func (m ClientDuration) RecordSet(ctx context.Context, val float64, set attribute.Set) { + if set.Len() == 0 { + m.Float64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Float64Histogram.Record(ctx, val, *o...) +} + +// ClientRequestSize is an instrument used to record metric values conforming to +// the "rpc.client.request.size" semantic conventions. It represents the measures +// the size of RPC request messages (uncompressed). +type ClientRequestSize struct { + metric.Int64Histogram +} + +var newClientRequestSizeOpts = []metric.Int64HistogramOption{ + metric.WithDescription("Measures the size of RPC request messages (uncompressed)."), + metric.WithUnit("By"), +} + +// NewClientRequestSize returns a new ClientRequestSize instrument. +func NewClientRequestSize( + m metric.Meter, + opt ...metric.Int64HistogramOption, +) (ClientRequestSize, error) { + // Check if the meter is nil. + if m == nil { + return ClientRequestSize{noop.Int64Histogram{}}, nil + } + + if len(opt) == 0 { + opt = newClientRequestSizeOpts + } else { + opt = append(opt, newClientRequestSizeOpts...) + } + + i, err := m.Int64Histogram( + "rpc.client.request.size", + opt..., + ) + if err != nil { + return ClientRequestSize{noop.Int64Histogram{}}, err + } + return ClientRequestSize{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ClientRequestSize) Inst() metric.Int64Histogram { + return m.Int64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (ClientRequestSize) Name() string { + return "rpc.client.request.size" +} + +// Unit returns the semantic convention unit of the instrument +func (ClientRequestSize) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (ClientRequestSize) Description() string { + return "Measures the size of RPC request messages (uncompressed)." +} + +// Record records val to the current distribution for attrs. +// +// **Streaming**: Recorded per message in a streaming batch +func (m ClientRequestSize) Record(ctx context.Context, val int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +// +// **Streaming**: Recorded per message in a streaming batch +func (m ClientRequestSize) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Histogram.Record(ctx, val, *o...) +} + +// ClientRequestsPerRPC is an instrument used to record metric values conforming +// to the "rpc.client.requests_per_rpc" semantic conventions. It represents the +// measures the number of messages received per RPC. +type ClientRequestsPerRPC struct { + metric.Int64Histogram +} + +var newClientRequestsPerRPCOpts = []metric.Int64HistogramOption{ + metric.WithDescription("Measures the number of messages received per RPC."), + metric.WithUnit("{count}"), +} + +// NewClientRequestsPerRPC returns a new ClientRequestsPerRPC instrument. +func NewClientRequestsPerRPC( + m metric.Meter, + opt ...metric.Int64HistogramOption, +) (ClientRequestsPerRPC, error) { + // Check if the meter is nil. + if m == nil { + return ClientRequestsPerRPC{noop.Int64Histogram{}}, nil + } + + if len(opt) == 0 { + opt = newClientRequestsPerRPCOpts + } else { + opt = append(opt, newClientRequestsPerRPCOpts...) + } + + i, err := m.Int64Histogram( + "rpc.client.requests_per_rpc", + opt..., + ) + if err != nil { + return ClientRequestsPerRPC{noop.Int64Histogram{}}, err + } + return ClientRequestsPerRPC{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ClientRequestsPerRPC) Inst() metric.Int64Histogram { + return m.Int64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (ClientRequestsPerRPC) Name() string { + return "rpc.client.requests_per_rpc" +} + +// Unit returns the semantic convention unit of the instrument +func (ClientRequestsPerRPC) Unit() string { + return "{count}" +} + +// Description returns the semantic convention description of the instrument +func (ClientRequestsPerRPC) Description() string { + return "Measures the number of messages received per RPC." +} + +// Record records val to the current distribution for attrs. +// +// Should be 1 for all non-streaming RPCs. +// +// **Streaming**: This metric is required for server and client streaming RPCs +func (m ClientRequestsPerRPC) Record(ctx context.Context, val int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +// +// Should be 1 for all non-streaming RPCs. +// +// **Streaming**: This metric is required for server and client streaming RPCs +func (m ClientRequestsPerRPC) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Histogram.Record(ctx, val, *o...) +} + +// ClientResponseSize is an instrument used to record metric values conforming to +// the "rpc.client.response.size" semantic conventions. It represents the +// measures the size of RPC response messages (uncompressed). +type ClientResponseSize struct { + metric.Int64Histogram +} + +var newClientResponseSizeOpts = []metric.Int64HistogramOption{ + metric.WithDescription("Measures the size of RPC response messages (uncompressed)."), + metric.WithUnit("By"), +} + +// NewClientResponseSize returns a new ClientResponseSize instrument. +func NewClientResponseSize( + m metric.Meter, + opt ...metric.Int64HistogramOption, +) (ClientResponseSize, error) { + // Check if the meter is nil. + if m == nil { + return ClientResponseSize{noop.Int64Histogram{}}, nil + } + + if len(opt) == 0 { + opt = newClientResponseSizeOpts + } else { + opt = append(opt, newClientResponseSizeOpts...) + } + + i, err := m.Int64Histogram( + "rpc.client.response.size", + opt..., + ) + if err != nil { + return ClientResponseSize{noop.Int64Histogram{}}, err + } + return ClientResponseSize{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ClientResponseSize) Inst() metric.Int64Histogram { + return m.Int64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (ClientResponseSize) Name() string { + return "rpc.client.response.size" +} + +// Unit returns the semantic convention unit of the instrument +func (ClientResponseSize) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (ClientResponseSize) Description() string { + return "Measures the size of RPC response messages (uncompressed)." +} + +// Record records val to the current distribution for attrs. +// +// **Streaming**: Recorded per response in a streaming batch +func (m ClientResponseSize) Record(ctx context.Context, val int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +// +// **Streaming**: Recorded per response in a streaming batch +func (m ClientResponseSize) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Histogram.Record(ctx, val, *o...) +} + +// ClientResponsesPerRPC is an instrument used to record metric values conforming +// to the "rpc.client.responses_per_rpc" semantic conventions. It represents the +// measures the number of messages sent per RPC. +type ClientResponsesPerRPC struct { + metric.Int64Histogram +} + +var newClientResponsesPerRPCOpts = []metric.Int64HistogramOption{ + metric.WithDescription("Measures the number of messages sent per RPC."), + metric.WithUnit("{count}"), +} + +// NewClientResponsesPerRPC returns a new ClientResponsesPerRPC instrument. +func NewClientResponsesPerRPC( + m metric.Meter, + opt ...metric.Int64HistogramOption, +) (ClientResponsesPerRPC, error) { + // Check if the meter is nil. + if m == nil { + return ClientResponsesPerRPC{noop.Int64Histogram{}}, nil + } + + if len(opt) == 0 { + opt = newClientResponsesPerRPCOpts + } else { + opt = append(opt, newClientResponsesPerRPCOpts...) + } + + i, err := m.Int64Histogram( + "rpc.client.responses_per_rpc", + opt..., + ) + if err != nil { + return ClientResponsesPerRPC{noop.Int64Histogram{}}, err + } + return ClientResponsesPerRPC{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ClientResponsesPerRPC) Inst() metric.Int64Histogram { + return m.Int64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (ClientResponsesPerRPC) Name() string { + return "rpc.client.responses_per_rpc" +} + +// Unit returns the semantic convention unit of the instrument +func (ClientResponsesPerRPC) Unit() string { + return "{count}" +} + +// Description returns the semantic convention description of the instrument +func (ClientResponsesPerRPC) Description() string { + return "Measures the number of messages sent per RPC." +} + +// Record records val to the current distribution for attrs. +// +// Should be 1 for all non-streaming RPCs. +// +// **Streaming**: This metric is required for server and client streaming RPCs +func (m ClientResponsesPerRPC) Record(ctx context.Context, val int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +// +// Should be 1 for all non-streaming RPCs. +// +// **Streaming**: This metric is required for server and client streaming RPCs +func (m ClientResponsesPerRPC) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Histogram.Record(ctx, val, *o...) +} + +// ServerDuration is an instrument used to record metric values conforming to the +// "rpc.server.duration" semantic conventions. It represents the measures the +// duration of inbound RPC. +type ServerDuration struct { + metric.Float64Histogram +} + +var newServerDurationOpts = []metric.Float64HistogramOption{ + metric.WithDescription("Measures the duration of inbound RPC."), + metric.WithUnit("ms"), +} + +// NewServerDuration returns a new ServerDuration instrument. +func NewServerDuration( + m metric.Meter, + opt ...metric.Float64HistogramOption, +) (ServerDuration, error) { + // Check if the meter is nil. + if m == nil { + return ServerDuration{noop.Float64Histogram{}}, nil + } + + if len(opt) == 0 { + opt = newServerDurationOpts + } else { + opt = append(opt, newServerDurationOpts...) + } + + i, err := m.Float64Histogram( + "rpc.server.duration", + opt..., + ) + if err != nil { + return ServerDuration{noop.Float64Histogram{}}, err + } + return ServerDuration{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ServerDuration) Inst() metric.Float64Histogram { + return m.Float64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (ServerDuration) Name() string { + return "rpc.server.duration" +} + +// Unit returns the semantic convention unit of the instrument +func (ServerDuration) Unit() string { + return "ms" +} + +// Description returns the semantic convention description of the instrument +func (ServerDuration) Description() string { + return "Measures the duration of inbound RPC." +} + +// Record records val to the current distribution for attrs. +// +// While streaming RPCs may record this metric as start-of-batch +// to end-of-batch, it's hard to interpret in practice. +// +// **Streaming**: N/A. +func (m ServerDuration) Record(ctx context.Context, val float64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Float64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Float64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +// +// While streaming RPCs may record this metric as start-of-batch +// to end-of-batch, it's hard to interpret in practice. +// +// **Streaming**: N/A. +func (m ServerDuration) RecordSet(ctx context.Context, val float64, set attribute.Set) { + if set.Len() == 0 { + m.Float64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Float64Histogram.Record(ctx, val, *o...) +} + +// ServerRequestSize is an instrument used to record metric values conforming to +// the "rpc.server.request.size" semantic conventions. It represents the measures +// the size of RPC request messages (uncompressed). +type ServerRequestSize struct { + metric.Int64Histogram +} + +var newServerRequestSizeOpts = []metric.Int64HistogramOption{ + metric.WithDescription("Measures the size of RPC request messages (uncompressed)."), + metric.WithUnit("By"), +} + +// NewServerRequestSize returns a new ServerRequestSize instrument. +func NewServerRequestSize( + m metric.Meter, + opt ...metric.Int64HistogramOption, +) (ServerRequestSize, error) { + // Check if the meter is nil. + if m == nil { + return ServerRequestSize{noop.Int64Histogram{}}, nil + } + + if len(opt) == 0 { + opt = newServerRequestSizeOpts + } else { + opt = append(opt, newServerRequestSizeOpts...) + } + + i, err := m.Int64Histogram( + "rpc.server.request.size", + opt..., + ) + if err != nil { + return ServerRequestSize{noop.Int64Histogram{}}, err + } + return ServerRequestSize{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ServerRequestSize) Inst() metric.Int64Histogram { + return m.Int64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (ServerRequestSize) Name() string { + return "rpc.server.request.size" +} + +// Unit returns the semantic convention unit of the instrument +func (ServerRequestSize) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (ServerRequestSize) Description() string { + return "Measures the size of RPC request messages (uncompressed)." +} + +// Record records val to the current distribution for attrs. +// +// **Streaming**: Recorded per message in a streaming batch +func (m ServerRequestSize) Record(ctx context.Context, val int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +// +// **Streaming**: Recorded per message in a streaming batch +func (m ServerRequestSize) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Histogram.Record(ctx, val, *o...) +} + +// ServerRequestsPerRPC is an instrument used to record metric values conforming +// to the "rpc.server.requests_per_rpc" semantic conventions. It represents the +// measures the number of messages received per RPC. +type ServerRequestsPerRPC struct { + metric.Int64Histogram +} + +var newServerRequestsPerRPCOpts = []metric.Int64HistogramOption{ + metric.WithDescription("Measures the number of messages received per RPC."), + metric.WithUnit("{count}"), +} + +// NewServerRequestsPerRPC returns a new ServerRequestsPerRPC instrument. +func NewServerRequestsPerRPC( + m metric.Meter, + opt ...metric.Int64HistogramOption, +) (ServerRequestsPerRPC, error) { + // Check if the meter is nil. + if m == nil { + return ServerRequestsPerRPC{noop.Int64Histogram{}}, nil + } + + if len(opt) == 0 { + opt = newServerRequestsPerRPCOpts + } else { + opt = append(opt, newServerRequestsPerRPCOpts...) + } + + i, err := m.Int64Histogram( + "rpc.server.requests_per_rpc", + opt..., + ) + if err != nil { + return ServerRequestsPerRPC{noop.Int64Histogram{}}, err + } + return ServerRequestsPerRPC{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ServerRequestsPerRPC) Inst() metric.Int64Histogram { + return m.Int64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (ServerRequestsPerRPC) Name() string { + return "rpc.server.requests_per_rpc" +} + +// Unit returns the semantic convention unit of the instrument +func (ServerRequestsPerRPC) Unit() string { + return "{count}" +} + +// Description returns the semantic convention description of the instrument +func (ServerRequestsPerRPC) Description() string { + return "Measures the number of messages received per RPC." +} + +// Record records val to the current distribution for attrs. +// +// Should be 1 for all non-streaming RPCs. +// +// **Streaming** : This metric is required for server and client streaming RPCs +func (m ServerRequestsPerRPC) Record(ctx context.Context, val int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +// +// Should be 1 for all non-streaming RPCs. +// +// **Streaming** : This metric is required for server and client streaming RPCs +func (m ServerRequestsPerRPC) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Histogram.Record(ctx, val, *o...) +} + +// ServerResponseSize is an instrument used to record metric values conforming to +// the "rpc.server.response.size" semantic conventions. It represents the +// measures the size of RPC response messages (uncompressed). +type ServerResponseSize struct { + metric.Int64Histogram +} + +var newServerResponseSizeOpts = []metric.Int64HistogramOption{ + metric.WithDescription("Measures the size of RPC response messages (uncompressed)."), + metric.WithUnit("By"), +} + +// NewServerResponseSize returns a new ServerResponseSize instrument. +func NewServerResponseSize( + m metric.Meter, + opt ...metric.Int64HistogramOption, +) (ServerResponseSize, error) { + // Check if the meter is nil. + if m == nil { + return ServerResponseSize{noop.Int64Histogram{}}, nil + } + + if len(opt) == 0 { + opt = newServerResponseSizeOpts + } else { + opt = append(opt, newServerResponseSizeOpts...) + } + + i, err := m.Int64Histogram( + "rpc.server.response.size", + opt..., + ) + if err != nil { + return ServerResponseSize{noop.Int64Histogram{}}, err + } + return ServerResponseSize{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ServerResponseSize) Inst() metric.Int64Histogram { + return m.Int64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (ServerResponseSize) Name() string { + return "rpc.server.response.size" +} + +// Unit returns the semantic convention unit of the instrument +func (ServerResponseSize) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (ServerResponseSize) Description() string { + return "Measures the size of RPC response messages (uncompressed)." +} + +// Record records val to the current distribution for attrs. +// +// **Streaming**: Recorded per response in a streaming batch +func (m ServerResponseSize) Record(ctx context.Context, val int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +// +// **Streaming**: Recorded per response in a streaming batch +func (m ServerResponseSize) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Histogram.Record(ctx, val, *o...) +} + +// ServerResponsesPerRPC is an instrument used to record metric values conforming +// to the "rpc.server.responses_per_rpc" semantic conventions. It represents the +// measures the number of messages sent per RPC. +type ServerResponsesPerRPC struct { + metric.Int64Histogram +} + +var newServerResponsesPerRPCOpts = []metric.Int64HistogramOption{ + metric.WithDescription("Measures the number of messages sent per RPC."), + metric.WithUnit("{count}"), +} + +// NewServerResponsesPerRPC returns a new ServerResponsesPerRPC instrument. +func NewServerResponsesPerRPC( + m metric.Meter, + opt ...metric.Int64HistogramOption, +) (ServerResponsesPerRPC, error) { + // Check if the meter is nil. + if m == nil { + return ServerResponsesPerRPC{noop.Int64Histogram{}}, nil + } + + if len(opt) == 0 { + opt = newServerResponsesPerRPCOpts + } else { + opt = append(opt, newServerResponsesPerRPCOpts...) + } + + i, err := m.Int64Histogram( + "rpc.server.responses_per_rpc", + opt..., + ) + if err != nil { + return ServerResponsesPerRPC{noop.Int64Histogram{}}, err + } + return ServerResponsesPerRPC{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ServerResponsesPerRPC) Inst() metric.Int64Histogram { + return m.Int64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (ServerResponsesPerRPC) Name() string { + return "rpc.server.responses_per_rpc" +} + +// Unit returns the semantic convention unit of the instrument +func (ServerResponsesPerRPC) Unit() string { + return "{count}" +} + +// Description returns the semantic convention description of the instrument +func (ServerResponsesPerRPC) Description() string { + return "Measures the number of messages sent per RPC." +} + +// Record records val to the current distribution for attrs. +// +// Should be 1 for all non-streaming RPCs. +// +// **Streaming**: This metric is required for server and client streaming RPCs +func (m ServerResponsesPerRPC) Record(ctx context.Context, val int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +// +// Should be 1 for all non-streaming RPCs. +// +// **Streaming**: This metric is required for server and client streaming RPCs +func (m ServerResponsesPerRPC) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Histogram.Record(ctx, val, *o...) +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/schema.go similarity index 72% rename from vendor/go.opentelemetry.io/otel/semconv/v1.17.0/schema.go rename to vendor/go.opentelemetry.io/otel/semconv/v1.37.0/schema.go index 634a1dce..f8a0b704 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/schema.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/schema.go @@ -1,9 +1,9 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" +package semconv // import "go.opentelemetry.io/otel/semconv/v1.37.0" // SchemaURL is the schema URL that matches the version of the semantic conventions // that this package defines. Semconv packages starting from v1.4.0 must declare // non-empty schema URL in the form https://opentelemetry.io/schemas/ -const SchemaURL = "https://opentelemetry.io/schemas/1.17.0" +const SchemaURL = "https://opentelemetry.io/schemas/1.37.0" diff --git a/vendor/go.opentelemetry.io/otel/trace/LICENSE b/vendor/go.opentelemetry.io/otel/trace/LICENSE index 261eeb9e..f1aee0f1 100644 --- a/vendor/go.opentelemetry.io/otel/trace/LICENSE +++ b/vendor/go.opentelemetry.io/otel/trace/LICENSE @@ -199,3 +199,33 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +-------------------------------------------------------------------------------- + +Copyright 2009 The Go Authors. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google LLC nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/vendor/go.opentelemetry.io/otel/trace/auto.go b/vendor/go.opentelemetry.io/otel/trace/auto.go index 7e291002..8763936a 100644 --- a/vendor/go.opentelemetry.io/otel/trace/auto.go +++ b/vendor/go.opentelemetry.io/otel/trace/auto.go @@ -20,7 +20,7 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" - semconv "go.opentelemetry.io/otel/semconv/v1.26.0" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" "go.opentelemetry.io/otel/trace/embedded" "go.opentelemetry.io/otel/trace/internal/telemetry" ) @@ -39,7 +39,7 @@ type autoTracerProvider struct{ embedded.TracerProvider } var _ TracerProvider = autoTracerProvider{} -func (p autoTracerProvider) Tracer(name string, opts ...TracerOption) Tracer { +func (autoTracerProvider) Tracer(name string, opts ...TracerOption) Tracer { cfg := NewTracerConfig(opts...) return autoTracer{ name: name, @@ -57,14 +57,15 @@ type autoTracer struct { var _ Tracer = autoTracer{} func (t autoTracer) Start(ctx context.Context, name string, opts ...SpanStartOption) (context.Context, Span) { - var psc SpanContext + var psc, sc SpanContext sampled := true span := new(autoSpan) // Ask eBPF for sampling decision and span context info. - t.start(ctx, span, &psc, &sampled, &span.spanContext) + t.start(ctx, span, &psc, &sampled, &sc) span.sampled.Store(sampled) + span.spanContext = sc ctx = ContextWithSpan(ctx, span) @@ -80,7 +81,7 @@ func (t autoTracer) Start(ctx context.Context, name string, opts ...SpanStartOpt // Expected to be implemented in eBPF. // //go:noinline -func (t *autoTracer) start( +func (*autoTracer) start( ctx context.Context, spanPtr *autoSpan, psc *SpanContext, diff --git a/vendor/go.opentelemetry.io/otel/trace/config.go b/vendor/go.opentelemetry.io/otel/trace/config.go index 9c0b720a..d9ecef1c 100644 --- a/vendor/go.opentelemetry.io/otel/trace/config.go +++ b/vendor/go.opentelemetry.io/otel/trace/config.go @@ -4,6 +4,7 @@ package trace // import "go.opentelemetry.io/otel/trace" import ( + "slices" "time" "go.opentelemetry.io/otel/attribute" @@ -73,7 +74,7 @@ func (cfg *SpanConfig) Timestamp() time.Time { return cfg.timestamp } -// StackTrace checks whether stack trace capturing is enabled. +// StackTrace reports whether stack trace capturing is enabled. func (cfg *SpanConfig) StackTrace() bool { return cfg.stackTrace } @@ -154,7 +155,7 @@ func (cfg *EventConfig) Timestamp() time.Time { return cfg.timestamp } -// StackTrace checks whether stack trace capturing is enabled. +// StackTrace reports whether stack trace capturing is enabled. func (cfg *EventConfig) StackTrace() bool { return cfg.stackTrace } @@ -304,12 +305,50 @@ func WithInstrumentationVersion(version string) TracerOption { }) } -// WithInstrumentationAttributes sets the instrumentation attributes. +// mergeSets returns the union of keys between a and b. Any duplicate keys will +// use the value associated with b. +func mergeSets(a, b attribute.Set) attribute.Set { + // NewMergeIterator uses the first value for any duplicates. + iter := attribute.NewMergeIterator(&b, &a) + merged := make([]attribute.KeyValue, 0, a.Len()+b.Len()) + for iter.Next() { + merged = append(merged, iter.Attribute()) + } + return attribute.NewSet(merged...) +} + +// WithInstrumentationAttributes adds the instrumentation attributes. // -// The passed attributes will be de-duplicated. +// This is equivalent to calling [WithInstrumentationAttributeSet] with an +// [attribute.Set] created from a clone of the passed attributes. +// [WithInstrumentationAttributeSet] is recommended for more control. +// +// If multiple [WithInstrumentationAttributes] or [WithInstrumentationAttributeSet] +// options are passed, the attributes will be merged together in the order +// they are passed. Attributes with duplicate keys will use the last value passed. func WithInstrumentationAttributes(attr ...attribute.KeyValue) TracerOption { + set := attribute.NewSet(slices.Clone(attr)...) + return WithInstrumentationAttributeSet(set) +} + +// WithInstrumentationAttributeSet adds the instrumentation attributes. +// +// If multiple [WithInstrumentationAttributes] or [WithInstrumentationAttributeSet] +// options are passed, the attributes will be merged together in the order +// they are passed. Attributes with duplicate keys will use the last value passed. +func WithInstrumentationAttributeSet(set attribute.Set) TracerOption { + if set.Len() == 0 { + return tracerOptionFunc(func(config TracerConfig) TracerConfig { + return config + }) + } + return tracerOptionFunc(func(config TracerConfig) TracerConfig { - config.attrs = attribute.NewSet(attr...) + if config.attrs.Len() == 0 { + config.attrs = set + } else { + config.attrs = mergeSets(config.attrs, set) + } return config }) } diff --git a/vendor/go.opentelemetry.io/otel/trace/hex.go b/vendor/go.opentelemetry.io/otel/trace/hex.go new file mode 100644 index 00000000..1cbef1d4 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/hex.go @@ -0,0 +1,38 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/trace" + +const ( + // hexLU is a hex lookup table of the 16 lowercase hex digits. + // The character values of the string are indexed at the equivalent + // hexadecimal value they represent. This table efficiently encodes byte data + // into a string representation of hexadecimal. + hexLU = "0123456789abcdef" + + // hexRev is a reverse hex lookup table for lowercase hex digits. + // The table is efficiently decodes a hexadecimal string into bytes. + // Valid hexadecimal characters are indexed at their respective values. All + // other invalid ASCII characters are represented with '\xff'. + // + // The '\xff' character is used as invalid because no valid character has + // the upper 4 bits set. Meaning, an efficient validation can be performed + // over multiple character parsing by checking these bits remain zero. + hexRev = "" + + "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" + + "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" + + "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" + + "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\xff\xff\xff\xff\xff\xff" + + "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" + + "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" + + "\xff\x0a\x0b\x0c\x0d\x0e\x0f\xff\xff\xff\xff\xff\xff\xff\xff\xff" + + "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" + + "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" + + "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" + + "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" + + "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" + + "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" + + "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" + + "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" + + "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" +) diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/attr.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/attr.go index f663547b..ff0f6eac 100644 --- a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/attr.go +++ b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/attr.go @@ -52,7 +52,7 @@ func Map(key string, value ...Attr) Attr { return Attr{key, MapValue(value...)} } -// Equal returns if a is equal to b. +// Equal reports whether a is equal to b. func (a Attr) Equal(b Attr) bool { return a.Key == b.Key && a.Value.Equal(b.Value) } diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/id.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/id.go index 7b1ae3c4..bea56f2e 100644 --- a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/id.go +++ b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/id.go @@ -22,7 +22,7 @@ func (tid TraceID) String() string { return hex.EncodeToString(tid[:]) } -// IsEmpty returns false if id contains at least one non-zero byte. +// IsEmpty reports whether the TraceID contains only zero bytes. func (tid TraceID) IsEmpty() bool { return tid == [traceIDSize]byte{} } @@ -50,7 +50,7 @@ func (sid SpanID) String() string { return hex.EncodeToString(sid[:]) } -// IsEmpty returns true if the span ID contains at least one non-zero byte. +// IsEmpty reports whether the SpanID contains only zero bytes. func (sid SpanID) IsEmpty() bool { return sid == [spanIDSize]byte{} } @@ -82,7 +82,7 @@ func marshalJSON(id []byte) ([]byte, error) { } // unmarshalJSON inflates trace id from hex string, possibly enclosed in quotes. -func unmarshalJSON(dst []byte, src []byte) error { +func unmarshalJSON(dst, src []byte) error { if l := len(src); l >= 2 && src[0] == '"' && src[l-1] == '"' { src = src[1 : l-1] } diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/span.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/span.go index 3c5e1cdb..e7ca62c6 100644 --- a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/span.go +++ b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/span.go @@ -251,13 +251,20 @@ func (s *Span) UnmarshalJSON(data []byte) error { type SpanFlags int32 const ( + // SpanFlagsTraceFlagsMask is a mask for trace-flags. + // // Bits 0-7 are used for trace flags. SpanFlagsTraceFlagsMask SpanFlags = 255 - // Bits 8 and 9 are used to indicate that the parent span or link span is remote. - // Bit 8 (`HAS_IS_REMOTE`) indicates whether the value is known. - // Bit 9 (`IS_REMOTE`) indicates whether the span or link is remote. + // SpanFlagsContextHasIsRemoteMask is a mask for HAS_IS_REMOTE status. + // + // Bits 8 and 9 are used to indicate that the parent span or link span is + // remote. Bit 8 (`HAS_IS_REMOTE`) indicates whether the value is known. SpanFlagsContextHasIsRemoteMask SpanFlags = 256 - // SpanFlagsContextHasIsRemoteMask indicates the Span is remote. + // SpanFlagsContextIsRemoteMask is a mask for IS_REMOTE status. + // + // Bits 8 and 9 are used to indicate that the parent span or link span is + // remote. Bit 9 (`IS_REMOTE`) indicates whether the span or link is + // remote. SpanFlagsContextIsRemoteMask SpanFlags = 512 ) @@ -266,27 +273,31 @@ const ( type SpanKind int32 const ( - // Indicates that the span represents an internal operation within an application, - // as opposed to an operation happening at the boundaries. Default value. + // SpanKindInternal indicates that the span represents an internal + // operation within an application, as opposed to an operation happening at + // the boundaries. SpanKindInternal SpanKind = 1 - // Indicates that the span covers server-side handling of an RPC or other - // remote network request. + // SpanKindServer indicates that the span covers server-side handling of an + // RPC or other remote network request. SpanKindServer SpanKind = 2 - // Indicates that the span describes a request to some remote service. + // SpanKindClient indicates that the span describes a request to some + // remote service. SpanKindClient SpanKind = 3 - // Indicates that the span describes a producer sending a message to a broker. - // Unlike CLIENT and SERVER, there is often no direct critical path latency relationship - // between producer and consumer spans. A PRODUCER span ends when the message was accepted - // by the broker while the logical processing of the message might span a much longer time. + // SpanKindProducer indicates that the span describes a producer sending a + // message to a broker. Unlike SpanKindClient and SpanKindServer, there is + // often no direct critical path latency relationship between producer and + // consumer spans. A SpanKindProducer span ends when the message was + // accepted by the broker while the logical processing of the message might + // span a much longer time. SpanKindProducer SpanKind = 4 - // Indicates that the span describes consumer receiving a message from a broker. - // Like the PRODUCER kind, there is often no direct critical path latency relationship - // between producer and consumer spans. + // SpanKindConsumer indicates that the span describes a consumer receiving + // a message from a broker. Like SpanKindProducer, there is often no direct + // critical path latency relationship between producer and consumer spans. SpanKindConsumer SpanKind = 5 ) -// Event is a time-stamped annotation of the span, consisting of user-supplied -// text description and key-value pairs. +// SpanEvent is a time-stamped annotation of the span, consisting of +// user-supplied text description and key-value pairs. type SpanEvent struct { // time_unix_nano is the time the event occurred. Time time.Time `json:"timeUnixNano,omitempty"` @@ -369,10 +380,11 @@ func (se *SpanEvent) UnmarshalJSON(data []byte) error { return nil } -// A pointer from the current span to another span in the same trace or in a -// different trace. For example, this can be used in batching operations, -// where a single batch handler processes multiple requests from different -// traces or when the handler receives a request from a different project. +// SpanLink is a reference from the current span to another span in the same +// trace or in a different trace. For example, this can be used in batching +// operations, where a single batch handler processes multiple requests from +// different traces or when the handler receives a request from a different +// project. type SpanLink struct { // A unique identifier of a trace that this linked span is part of. The ID is a // 16-byte array. diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/status.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/status.go index 1d013a8f..1039bf40 100644 --- a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/status.go +++ b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/status.go @@ -3,17 +3,19 @@ package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry" +// StatusCode is the status of a Span. +// // For the semantics of status codes see // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/api.md#set-status type StatusCode int32 const ( - // The default status. + // StatusCodeUnset is the default status. StatusCodeUnset StatusCode = 0 - // The Span has been validated by an Application developer or Operator to - // have completed successfully. + // StatusCodeOK is used when the Span has been validated by an Application + // developer or Operator to have completed successfully. StatusCodeOK StatusCode = 1 - // The Span contains an error. + // StatusCodeError is used when the Span contains an error. StatusCodeError StatusCode = 2 ) @@ -30,7 +32,7 @@ func (s StatusCode) String() string { return "" } -// The Status type defines a logical error model that is suitable for different +// Status defines a logical error model that is suitable for different // programming environments, including REST APIs and RPC APIs. type Status struct { // A developer-facing human readable error message. diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/traces.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/traces.go index b0394070..e5f10767 100644 --- a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/traces.go +++ b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/traces.go @@ -71,7 +71,7 @@ func (td *Traces) UnmarshalJSON(data []byte) error { return nil } -// A collection of ScopeSpans from a Resource. +// ResourceSpans is a collection of ScopeSpans from a Resource. type ResourceSpans struct { // The resource for the spans in this message. // If this field is not set then no resource info is known. @@ -128,7 +128,7 @@ func (rs *ResourceSpans) UnmarshalJSON(data []byte) error { return nil } -// A collection of Spans produced by an InstrumentationScope. +// ScopeSpans is a collection of Spans produced by an InstrumentationScope. type ScopeSpans struct { // The instrumentation scope information for the spans in this message. // Semantically when InstrumentationScope isn't set, it is equivalent with diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/value.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/value.go index 7251492d..cb7927b8 100644 --- a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/value.go +++ b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/value.go @@ -257,10 +257,10 @@ func (v Value) Kind() ValueKind { } } -// Empty returns if v does not hold any value. +// Empty reports whether v does not hold any value. func (v Value) Empty() bool { return v.Kind() == ValueKindEmpty } -// Equal returns if v is equal to w. +// Equal reports whether v is equal to w. func (v Value) Equal(w Value) bool { k1 := v.Kind() k2 := w.Kind() @@ -316,7 +316,7 @@ func (v Value) String() string { case ValueKindBool: return strconv.FormatBool(v.asBool()) case ValueKindBytes: - return fmt.Sprint(v.asBytes()) + return string(v.asBytes()) case ValueKindMap: return fmt.Sprint(v.asMap()) case ValueKindSlice: diff --git a/vendor/go.opentelemetry.io/otel/trace/noop.go b/vendor/go.opentelemetry.io/otel/trace/noop.go index c8b1ae5d..400fab12 100644 --- a/vendor/go.opentelemetry.io/otel/trace/noop.go +++ b/vendor/go.opentelemetry.io/otel/trace/noop.go @@ -26,7 +26,7 @@ type noopTracerProvider struct{ embedded.TracerProvider } var _ TracerProvider = noopTracerProvider{} // Tracer returns noop implementation of Tracer. -func (p noopTracerProvider) Tracer(string, ...TracerOption) Tracer { +func (noopTracerProvider) Tracer(string, ...TracerOption) Tracer { return noopTracer{} } @@ -37,7 +37,7 @@ var _ Tracer = noopTracer{} // Start carries forward a non-recording Span, if one is present in the context, otherwise it // creates a no-op Span. -func (t noopTracer) Start(ctx context.Context, name string, _ ...SpanStartOption) (context.Context, Span) { +func (noopTracer) Start(ctx context.Context, _ string, _ ...SpanStartOption) (context.Context, Span) { span := SpanFromContext(ctx) if _, ok := span.(nonRecordingSpan); !ok { // span is likely already a noopSpan, but let's be sure @@ -95,6 +95,8 @@ var autoInstEnabled = new(bool) // tracerProvider return a noopTracerProvider if autoEnabled is false, // otherwise it will return a TracerProvider from the sdk package used in // auto-instrumentation. +// +//go:noinline func (noopSpan) tracerProvider(autoEnabled *bool) TracerProvider { if *autoEnabled { return newAutoTracerProvider() diff --git a/vendor/go.opentelemetry.io/otel/trace/noop/noop.go b/vendor/go.opentelemetry.io/otel/trace/noop/noop.go index 64a4f1b3..689d220d 100644 --- a/vendor/go.opentelemetry.io/otel/trace/noop/noop.go +++ b/vendor/go.opentelemetry.io/otel/trace/noop/noop.go @@ -51,7 +51,7 @@ type Tracer struct{ embedded.Tracer } // If ctx contains a span context, the returned span will also contain that // span context. If the span context in ctx is for a non-recording span, that // span instance will be returned directly. -func (t Tracer) Start(ctx context.Context, _ string, _ ...trace.SpanStartOption) (context.Context, trace.Span) { +func (Tracer) Start(ctx context.Context, _ string, _ ...trace.SpanStartOption) (context.Context, trace.Span) { span := trace.SpanFromContext(ctx) // If the parent context contains a non-zero span context, that span diff --git a/vendor/go.opentelemetry.io/otel/trace/span.go b/vendor/go.opentelemetry.io/otel/trace/span.go index d3aa476e..d01e7936 100644 --- a/vendor/go.opentelemetry.io/otel/trace/span.go +++ b/vendor/go.opentelemetry.io/otel/trace/span.go @@ -66,6 +66,10 @@ type Span interface { // SetAttributes sets kv as attributes of the Span. If a key from kv // already exists for an attribute of the Span it will be overwritten with // the value contained in kv. + // + // Note that adding attributes at span creation using [WithAttributes] is preferred + // to calling SetAttribute later, as samplers can only consider information + // already present during span creation. SetAttributes(kv ...attribute.KeyValue) // TracerProvider returns a TracerProvider that can be used to generate diff --git a/vendor/go.opentelemetry.io/otel/trace/trace.go b/vendor/go.opentelemetry.io/otel/trace/trace.go index d49adf67..ee6f4bcb 100644 --- a/vendor/go.opentelemetry.io/otel/trace/trace.go +++ b/vendor/go.opentelemetry.io/otel/trace/trace.go @@ -4,8 +4,6 @@ package trace // import "go.opentelemetry.io/otel/trace" import ( - "bytes" - "encoding/hex" "encoding/json" ) @@ -38,21 +36,47 @@ var ( _ json.Marshaler = nilTraceID ) -// IsValid checks whether the trace TraceID is valid. A valid trace ID does +// IsValid reports whether the trace TraceID is valid. A valid trace ID does // not consist of zeros only. func (t TraceID) IsValid() bool { - return !bytes.Equal(t[:], nilTraceID[:]) + return t != nilTraceID } // MarshalJSON implements a custom marshal function to encode TraceID // as a hex string. func (t TraceID) MarshalJSON() ([]byte, error) { - return json.Marshal(t.String()) + b := [32 + 2]byte{0: '"', 33: '"'} + h := t.hexBytes() + copy(b[1:], h[:]) + return b[:], nil } // String returns the hex string representation form of a TraceID. func (t TraceID) String() string { - return hex.EncodeToString(t[:]) + h := t.hexBytes() + return string(h[:]) +} + +// hexBytes returns the hex string representation form of a TraceID. +func (t TraceID) hexBytes() [32]byte { + return [32]byte{ + hexLU[t[0x0]>>4], hexLU[t[0x0]&0xf], + hexLU[t[0x1]>>4], hexLU[t[0x1]&0xf], + hexLU[t[0x2]>>4], hexLU[t[0x2]&0xf], + hexLU[t[0x3]>>4], hexLU[t[0x3]&0xf], + hexLU[t[0x4]>>4], hexLU[t[0x4]&0xf], + hexLU[t[0x5]>>4], hexLU[t[0x5]&0xf], + hexLU[t[0x6]>>4], hexLU[t[0x6]&0xf], + hexLU[t[0x7]>>4], hexLU[t[0x7]&0xf], + hexLU[t[0x8]>>4], hexLU[t[0x8]&0xf], + hexLU[t[0x9]>>4], hexLU[t[0x9]&0xf], + hexLU[t[0xa]>>4], hexLU[t[0xa]&0xf], + hexLU[t[0xb]>>4], hexLU[t[0xb]&0xf], + hexLU[t[0xc]>>4], hexLU[t[0xc]&0xf], + hexLU[t[0xd]>>4], hexLU[t[0xd]&0xf], + hexLU[t[0xe]>>4], hexLU[t[0xe]&0xf], + hexLU[t[0xf]>>4], hexLU[t[0xf]&0xf], + } } // SpanID is a unique identity of a span in a trace. @@ -63,21 +87,38 @@ var ( _ json.Marshaler = nilSpanID ) -// IsValid checks whether the SpanID is valid. A valid SpanID does not consist +// IsValid reports whether the SpanID is valid. A valid SpanID does not consist // of zeros only. func (s SpanID) IsValid() bool { - return !bytes.Equal(s[:], nilSpanID[:]) + return s != nilSpanID } // MarshalJSON implements a custom marshal function to encode SpanID // as a hex string. func (s SpanID) MarshalJSON() ([]byte, error) { - return json.Marshal(s.String()) + b := [16 + 2]byte{0: '"', 17: '"'} + h := s.hexBytes() + copy(b[1:], h[:]) + return b[:], nil } // String returns the hex string representation form of a SpanID. func (s SpanID) String() string { - return hex.EncodeToString(s[:]) + b := s.hexBytes() + return string(b[:]) +} + +func (s SpanID) hexBytes() [16]byte { + return [16]byte{ + hexLU[s[0]>>4], hexLU[s[0]&0xf], + hexLU[s[1]>>4], hexLU[s[1]&0xf], + hexLU[s[2]>>4], hexLU[s[2]&0xf], + hexLU[s[3]>>4], hexLU[s[3]&0xf], + hexLU[s[4]>>4], hexLU[s[4]&0xf], + hexLU[s[5]>>4], hexLU[s[5]&0xf], + hexLU[s[6]>>4], hexLU[s[6]&0xf], + hexLU[s[7]>>4], hexLU[s[7]&0xf], + } } // TraceIDFromHex returns a TraceID from a hex string if it is compliant with @@ -85,65 +126,58 @@ func (s SpanID) String() string { // https://www.w3.org/TR/trace-context/#trace-id // nolint:revive // revive complains about stutter of `trace.TraceIDFromHex`. func TraceIDFromHex(h string) (TraceID, error) { - t := TraceID{} if len(h) != 32 { - return t, errInvalidTraceIDLength + return [16]byte{}, errInvalidTraceIDLength } - - if err := decodeHex(h, t[:]); err != nil { - return t, err + var b [16]byte + invalidMark := byte(0) + for i := 0; i < len(h); i += 4 { + b[i/2] = (hexRev[h[i]] << 4) | hexRev[h[i+1]] + b[i/2+1] = (hexRev[h[i+2]] << 4) | hexRev[h[i+3]] + invalidMark |= hexRev[h[i]] | hexRev[h[i+1]] | hexRev[h[i+2]] | hexRev[h[i+3]] } - - if !t.IsValid() { - return t, errNilTraceID + // If the upper 4 bits of any byte are not zero, there was an invalid hex + // character since invalid hex characters are 0xff in hexRev. + if invalidMark&0xf0 != 0 { + return [16]byte{}, errInvalidHexID + } + // If we didn't set any bits, then h was all zeros. + if invalidMark == 0 { + return [16]byte{}, errNilTraceID } - return t, nil + return b, nil } // SpanIDFromHex returns a SpanID from a hex string if it is compliant // with the w3c trace-context specification. // See more at https://www.w3.org/TR/trace-context/#parent-id func SpanIDFromHex(h string) (SpanID, error) { - s := SpanID{} if len(h) != 16 { - return s, errInvalidSpanIDLength - } - - if err := decodeHex(h, s[:]); err != nil { - return s, err + return [8]byte{}, errInvalidSpanIDLength } - - if !s.IsValid() { - return s, errNilSpanID + var b [8]byte + invalidMark := byte(0) + for i := 0; i < len(h); i += 4 { + b[i/2] = (hexRev[h[i]] << 4) | hexRev[h[i+1]] + b[i/2+1] = (hexRev[h[i+2]] << 4) | hexRev[h[i+3]] + invalidMark |= hexRev[h[i]] | hexRev[h[i+1]] | hexRev[h[i+2]] | hexRev[h[i+3]] } - return s, nil -} - -func decodeHex(h string, b []byte) error { - for _, r := range h { - switch { - case 'a' <= r && r <= 'f': - continue - case '0' <= r && r <= '9': - continue - default: - return errInvalidHexID - } + // If the upper 4 bits of any byte are not zero, there was an invalid hex + // character since invalid hex characters are 0xff in hexRev. + if invalidMark&0xf0 != 0 { + return [8]byte{}, errInvalidHexID } - - decoded, err := hex.DecodeString(h) - if err != nil { - return err + // If we didn't set any bits, then h was all zeros. + if invalidMark == 0 { + return [8]byte{}, errNilSpanID } - - copy(b, decoded) - return nil + return b, nil } // TraceFlags contains flags that can be set on a SpanContext. type TraceFlags byte //nolint:revive // revive complains about stutter of `trace.TraceFlags`. -// IsSampled returns if the sampling bit is set in the TraceFlags. +// IsSampled reports whether the sampling bit is set in the TraceFlags. func (tf TraceFlags) IsSampled() bool { return tf&FlagsSampled == FlagsSampled } @@ -160,12 +194,20 @@ func (tf TraceFlags) WithSampled(sampled bool) TraceFlags { // nolint:revive // // MarshalJSON implements a custom marshal function to encode TraceFlags // as a hex string. func (tf TraceFlags) MarshalJSON() ([]byte, error) { - return json.Marshal(tf.String()) + b := [2 + 2]byte{0: '"', 3: '"'} + h := tf.hexBytes() + copy(b[1:], h[:]) + return b[:], nil } // String returns the hex string representation form of TraceFlags. func (tf TraceFlags) String() string { - return hex.EncodeToString([]byte{byte(tf)}[:]) + h := tf.hexBytes() + return string(h[:]) +} + +func (tf TraceFlags) hexBytes() [2]byte { + return [2]byte{hexLU[tf>>4], hexLU[tf&0xf]} } // SpanContextConfig contains mutable fields usable for constructing @@ -201,13 +243,13 @@ type SpanContext struct { var _ json.Marshaler = SpanContext{} -// IsValid returns if the SpanContext is valid. A valid span context has a +// IsValid reports whether the SpanContext is valid. A valid span context has a // valid TraceID and SpanID. func (sc SpanContext) IsValid() bool { return sc.HasTraceID() && sc.HasSpanID() } -// IsRemote indicates whether the SpanContext represents a remotely-created Span. +// IsRemote reports whether the SpanContext represents a remotely-created Span. func (sc SpanContext) IsRemote() bool { return sc.remote } @@ -228,7 +270,7 @@ func (sc SpanContext) TraceID() TraceID { return sc.traceID } -// HasTraceID checks if the SpanContext has a valid TraceID. +// HasTraceID reports whether the SpanContext has a valid TraceID. func (sc SpanContext) HasTraceID() bool { return sc.traceID.IsValid() } @@ -249,7 +291,7 @@ func (sc SpanContext) SpanID() SpanID { return sc.spanID } -// HasSpanID checks if the SpanContext has a valid SpanID. +// HasSpanID reports whether the SpanContext has a valid SpanID. func (sc SpanContext) HasSpanID() bool { return sc.spanID.IsValid() } @@ -270,7 +312,7 @@ func (sc SpanContext) TraceFlags() TraceFlags { return sc.traceFlags } -// IsSampled returns if the sampling bit is set in the SpanContext's TraceFlags. +// IsSampled reports whether the sampling bit is set in the SpanContext's TraceFlags. func (sc SpanContext) IsSampled() bool { return sc.traceFlags.IsSampled() } @@ -302,7 +344,7 @@ func (sc SpanContext) WithTraceState(state TraceState) SpanContext { } } -// Equal is a predicate that determines whether two SpanContext values are equal. +// Equal reports whether two SpanContext values are equal. func (sc SpanContext) Equal(other SpanContext) bool { return sc.traceID == other.traceID && sc.spanID == other.spanID && diff --git a/vendor/go.opentelemetry.io/otel/trace/tracestate.go b/vendor/go.opentelemetry.io/otel/trace/tracestate.go index dc5e34ca..073adae2 100644 --- a/vendor/go.opentelemetry.io/otel/trace/tracestate.go +++ b/vendor/go.opentelemetry.io/otel/trace/tracestate.go @@ -80,7 +80,7 @@ func checkKeyRemain(key string) bool { // // param n is remain part length, should be 255 in simple-key or 13 in system-id. func checkKeyPart(key string, n int) bool { - if len(key) == 0 { + if key == "" { return false } first := key[0] // key's first char @@ -102,7 +102,7 @@ func isAlphaNum(c byte) bool { // // param n is remain part length, should be 240 exactly. func checkKeyTenant(key string, n int) bool { - if len(key) == 0 { + if key == "" { return false } return isAlphaNum(key[0]) && len(key[1:]) <= n && checkKeyRemain(key[1:]) @@ -191,7 +191,7 @@ func ParseTraceState(ts string) (TraceState, error) { for ts != "" { var memberStr string memberStr, ts, _ = strings.Cut(ts, listDelimiters) - if len(memberStr) == 0 { + if memberStr == "" { continue } diff --git a/vendor/go.opentelemetry.io/otel/verify_readmes.sh b/vendor/go.opentelemetry.io/otel/verify_readmes.sh deleted file mode 100644 index 1e87855e..00000000 --- a/vendor/go.opentelemetry.io/otel/verify_readmes.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/bash - -# Copyright The OpenTelemetry Authors -# SPDX-License-Identifier: Apache-2.0 - -set -euo pipefail - -dirs=$(find . -type d -not -path "*/internal*" -not -path "*/test*" -not -path "*/example*" -not -path "*/.*" | sort) - -missingReadme=false -for dir in $dirs; do - if [ ! -f "$dir/README.md" ]; then - echo "couldn't find README.md for $dir" - missingReadme=true - fi -done - -if [ "$missingReadme" = true ] ; then - echo "Error: some READMEs couldn't be found." - exit 1 -fi diff --git a/vendor/go.opentelemetry.io/otel/version.go b/vendor/go.opentelemetry.io/otel/version.go index d5fa71f6..0d5b0291 100644 --- a/vendor/go.opentelemetry.io/otel/version.go +++ b/vendor/go.opentelemetry.io/otel/version.go @@ -5,5 +5,5 @@ package otel // import "go.opentelemetry.io/otel" // Version is the current release version of OpenTelemetry in use. func Version() string { - return "1.35.0" + return "1.39.0" } diff --git a/vendor/go.opentelemetry.io/otel/versions.yaml b/vendor/go.opentelemetry.io/otel/versions.yaml index 2b4cb4b4..f4a3893e 100644 --- a/vendor/go.opentelemetry.io/otel/versions.yaml +++ b/vendor/go.opentelemetry.io/otel/versions.yaml @@ -3,13 +3,12 @@ module-sets: stable-v1: - version: v1.35.0 + version: v1.39.0 modules: - go.opentelemetry.io/otel - go.opentelemetry.io/otel/bridge/opencensus - go.opentelemetry.io/otel/bridge/opencensus/test - go.opentelemetry.io/otel/bridge/opentracing - - go.opentelemetry.io/otel/bridge/opentracing/test - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp - go.opentelemetry.io/otel/exporters/otlp/otlptrace @@ -23,21 +22,42 @@ module-sets: - go.opentelemetry.io/otel/sdk/metric - go.opentelemetry.io/otel/trace experimental-metrics: - version: v0.57.0 + version: v0.61.0 modules: - go.opentelemetry.io/otel/exporters/prometheus experimental-logs: - version: v0.11.0 + version: v0.15.0 modules: - go.opentelemetry.io/otel/log + - go.opentelemetry.io/otel/log/logtest - go.opentelemetry.io/otel/sdk/log + - go.opentelemetry.io/otel/sdk/log/logtest - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp - go.opentelemetry.io/otel/exporters/stdout/stdoutlog experimental-schema: - version: v0.0.12 + version: v0.0.14 modules: - go.opentelemetry.io/otel/schema excluded-modules: - go.opentelemetry.io/otel/internal/tools - go.opentelemetry.io/otel/trace/internal/telemetry/test +modules: + go.opentelemetry.io/otel/exporters/stdout/stdouttrace: + version-refs: + - ./internal/version.go + go.opentelemetry.io/otel/exporters/prometheus: + version-refs: + - ./internal/version.go + go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc: + version-refs: + - ./internal/version.go + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc: + version-refs: + - ./internal/version.go + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp: + version-refs: + - ./internal/version.go + go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp: + version-refs: + - ./internal/version.go diff --git a/vendor/go.opentelemetry.io/proto/otlp/collector/logs/v1/logs_service_grpc.pb.go b/vendor/go.opentelemetry.io/proto/otlp/collector/logs/v1/logs_service_grpc.pb.go index e1b7c457..d1d24fe6 100644 --- a/vendor/go.opentelemetry.io/proto/otlp/collector/logs/v1/logs_service_grpc.pb.go +++ b/vendor/go.opentelemetry.io/proto/otlp/collector/logs/v1/logs_service_grpc.pb.go @@ -22,8 +22,6 @@ const _ = grpc.SupportPackageIsVersion7 // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. type LogsServiceClient interface { - // For performance reasons, it is recommended to keep this RPC - // alive for the entire life of the application. Export(ctx context.Context, in *ExportLogsServiceRequest, opts ...grpc.CallOption) (*ExportLogsServiceResponse, error) } @@ -48,8 +46,6 @@ func (c *logsServiceClient) Export(ctx context.Context, in *ExportLogsServiceReq // All implementations must embed UnimplementedLogsServiceServer // for forward compatibility type LogsServiceServer interface { - // For performance reasons, it is recommended to keep this RPC - // alive for the entire life of the application. Export(context.Context, *ExportLogsServiceRequest) (*ExportLogsServiceResponse, error) mustEmbedUnimplementedLogsServiceServer() } diff --git a/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service_grpc.pb.go b/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service_grpc.pb.go index dd1b73f1..892864ea 100644 --- a/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service_grpc.pb.go +++ b/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service_grpc.pb.go @@ -22,8 +22,6 @@ const _ = grpc.SupportPackageIsVersion7 // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. type TraceServiceClient interface { - // For performance reasons, it is recommended to keep this RPC - // alive for the entire life of the application. Export(ctx context.Context, in *ExportTraceServiceRequest, opts ...grpc.CallOption) (*ExportTraceServiceResponse, error) } @@ -48,8 +46,6 @@ func (c *traceServiceClient) Export(ctx context.Context, in *ExportTraceServiceR // All implementations must embed UnimplementedTraceServiceServer // for forward compatibility type TraceServiceServer interface { - // For performance reasons, it is recommended to keep this RPC - // alive for the entire life of the application. Export(context.Context, *ExportTraceServiceRequest) (*ExportTraceServiceResponse, error) mustEmbedUnimplementedTraceServiceServer() } diff --git a/vendor/go.opentelemetry.io/proto/otlp/common/v1/common.pb.go b/vendor/go.opentelemetry.io/proto/otlp/common/v1/common.pb.go index 852209b0..1f8d49bc 100644 --- a/vendor/go.opentelemetry.io/proto/otlp/common/v1/common.pb.go +++ b/vendor/go.opentelemetry.io/proto/otlp/common/v1/common.pb.go @@ -34,7 +34,7 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// AnyValue is used to represent any type of attribute value. AnyValue may contain a +// Represents any type of attribute value. AnyValue may contain a // primitive value such as a string or integer or it may contain an arbitrary nested // object containing arrays, key-value lists and primitives. type AnyValue struct { @@ -252,8 +252,10 @@ type KeyValueList struct { // A collection of key/value pairs of key-value pairs. The list may be empty (may // contain 0 elements). + // // The keys MUST be unique (it is not allowed to have more than one // value with the same key). + // The behavior of software that receives duplicated keys can be unpredictable. Values []*KeyValue `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` } @@ -296,14 +298,16 @@ func (x *KeyValueList) GetValues() []*KeyValue { return nil } -// KeyValue is a key-value pair that is used to store Span attributes, Link +// Represents a key-value pair that is used to store Span attributes, Link // attributes, etc. type KeyValue struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + // The key name of the pair. + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + // The value of the pair. Value *AnyValue `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` } @@ -360,14 +364,21 @@ type InstrumentationScope struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields + // A name denoting the Instrumentation scope. // An empty instrumentation scope name means the name is unknown. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Defines the version of the instrumentation scope. + // An empty instrumentation scope version means the version is unknown. Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` // Additional attributes that describe the scope. [Optional]. // Attribute keys MUST be unique (it is not allowed to have more than one // attribute with the same key). - Attributes []*KeyValue `protobuf:"bytes,3,rep,name=attributes,proto3" json:"attributes,omitempty"` - DroppedAttributesCount uint32 `protobuf:"varint,4,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"` + // The behavior of software that receives duplicated keys can be unpredictable. + Attributes []*KeyValue `protobuf:"bytes,3,rep,name=attributes,proto3" json:"attributes,omitempty"` + // The number of attributes that were discarded. Attributes + // can be discarded because their keys are too long or because there are too many + // attributes. If this value is 0, then no attributes were dropped. + DroppedAttributesCount uint32 `protobuf:"varint,4,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"` } func (x *InstrumentationScope) Reset() { @@ -430,6 +441,101 @@ func (x *InstrumentationScope) GetDroppedAttributesCount() uint32 { return 0 } +// A reference to an Entity. +// Entity represents an object of interest associated with produced telemetry: e.g spans, metrics, profiles, or logs. +// +// Status: [Development] +type EntityRef struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The Schema URL, if known. This is the identifier of the Schema that the entity data + // is recorded in. To learn more about Schema URL see + // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url + // + // This schema_url applies to the data in this message and to the Resource attributes + // referenced by id_keys and description_keys. + // TODO: discuss if we are happy with this somewhat complicated definition of what + // the schema_url applies to. + // + // This field obsoletes the schema_url field in ResourceMetrics/ResourceSpans/ResourceLogs. + SchemaUrl string `protobuf:"bytes,1,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"` + // Defines the type of the entity. MUST not change during the lifetime of the entity. + // For example: "service" or "host". This field is required and MUST not be empty + // for valid entities. + Type string `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"` + // Attribute Keys that identify the entity. + // MUST not change during the lifetime of the entity. The Id must contain at least one attribute. + // These keys MUST exist in the containing {message}.attributes. + IdKeys []string `protobuf:"bytes,3,rep,name=id_keys,json=idKeys,proto3" json:"id_keys,omitempty"` + // Descriptive (non-identifying) attribute keys of the entity. + // MAY change over the lifetime of the entity. MAY be empty. + // These attribute keys are not part of entity's identity. + // These keys MUST exist in the containing {message}.attributes. + DescriptionKeys []string `protobuf:"bytes,4,rep,name=description_keys,json=descriptionKeys,proto3" json:"description_keys,omitempty"` +} + +func (x *EntityRef) Reset() { + *x = EntityRef{} + if protoimpl.UnsafeEnabled { + mi := &file_opentelemetry_proto_common_v1_common_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EntityRef) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EntityRef) ProtoMessage() {} + +func (x *EntityRef) ProtoReflect() protoreflect.Message { + mi := &file_opentelemetry_proto_common_v1_common_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EntityRef.ProtoReflect.Descriptor instead. +func (*EntityRef) Descriptor() ([]byte, []int) { + return file_opentelemetry_proto_common_v1_common_proto_rawDescGZIP(), []int{5} +} + +func (x *EntityRef) GetSchemaUrl() string { + if x != nil { + return x.SchemaUrl + } + return "" +} + +func (x *EntityRef) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *EntityRef) GetIdKeys() []string { + if x != nil { + return x.IdKeys + } + return nil +} + +func (x *EntityRef) GetDescriptionKeys() []string { + if x != nil { + return x.DescriptionKeys + } + return nil +} + var File_opentelemetry_proto_common_v1_common_proto protoreflect.FileDescriptor var file_opentelemetry_proto_common_v1_common_proto_rawDesc = []byte{ @@ -488,15 +594,23 @@ var file_opentelemetry_proto_common_v1_common_proto_rawDesc = []byte{ 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x16, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, - 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x42, 0x7b, 0x0a, 0x20, 0x69, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, - 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, - 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x42, 0x0b, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, - 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x28, 0x67, 0x6f, 0x2e, 0x6f, 0x70, 0x65, - 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x69, 0x6f, 0x2f, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x2f, 0x6f, 0x74, 0x6c, 0x70, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, - 0x76, 0x31, 0xaa, 0x02, 0x1d, 0x4f, 0x70, 0x65, 0x6e, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, - 0x72, 0x79, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, - 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x82, 0x01, 0x0a, 0x09, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, + 0x52, 0x65, 0x66, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f, 0x75, 0x72, + 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x55, + 0x72, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x69, 0x64, 0x5f, 0x6b, 0x65, 0x79, + 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x69, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x12, + 0x29, 0x0a, 0x10, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, + 0x65, 0x79, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x64, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x42, 0x7b, 0x0a, 0x20, 0x69, 0x6f, + 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x42, 0x0b, + 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x28, 0x67, + 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, + 0x69, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x6f, 0x74, 0x6c, 0x70, 0x2f, 0x63, 0x6f, + 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x76, 0x31, 0xaa, 0x02, 0x1d, 0x4f, 0x70, 0x65, 0x6e, 0x54, 0x65, + 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x6f, + 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -511,13 +625,14 @@ func file_opentelemetry_proto_common_v1_common_proto_rawDescGZIP() []byte { return file_opentelemetry_proto_common_v1_common_proto_rawDescData } -var file_opentelemetry_proto_common_v1_common_proto_msgTypes = make([]protoimpl.MessageInfo, 5) +var file_opentelemetry_proto_common_v1_common_proto_msgTypes = make([]protoimpl.MessageInfo, 6) var file_opentelemetry_proto_common_v1_common_proto_goTypes = []interface{}{ (*AnyValue)(nil), // 0: opentelemetry.proto.common.v1.AnyValue (*ArrayValue)(nil), // 1: opentelemetry.proto.common.v1.ArrayValue (*KeyValueList)(nil), // 2: opentelemetry.proto.common.v1.KeyValueList (*KeyValue)(nil), // 3: opentelemetry.proto.common.v1.KeyValue (*InstrumentationScope)(nil), // 4: opentelemetry.proto.common.v1.InstrumentationScope + (*EntityRef)(nil), // 5: opentelemetry.proto.common.v1.EntityRef } var file_opentelemetry_proto_common_v1_common_proto_depIdxs = []int32{ 1, // 0: opentelemetry.proto.common.v1.AnyValue.array_value:type_name -> opentelemetry.proto.common.v1.ArrayValue @@ -599,6 +714,18 @@ func file_opentelemetry_proto_common_v1_common_proto_init() { return nil } } + file_opentelemetry_proto_common_v1_common_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EntityRef); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } } file_opentelemetry_proto_common_v1_common_proto_msgTypes[0].OneofWrappers = []interface{}{ (*AnyValue_StringValue)(nil), @@ -615,7 +742,7 @@ func file_opentelemetry_proto_common_v1_common_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_opentelemetry_proto_common_v1_common_proto_rawDesc, NumEnums: 0, - NumMessages: 5, + NumMessages: 6, NumExtensions: 0, NumServices: 0, }, diff --git a/vendor/go.opentelemetry.io/proto/otlp/logs/v1/logs.pb.go b/vendor/go.opentelemetry.io/proto/otlp/logs/v1/logs.pb.go index 9b47481c..b25abe6f 100644 --- a/vendor/go.opentelemetry.io/proto/otlp/logs/v1/logs.pb.go +++ b/vendor/go.opentelemetry.io/proto/otlp/logs/v1/logs.pb.go @@ -361,7 +361,8 @@ type ScopeLogs struct { // is recorded in. Notably, the last part of the URL path is the version number of the // schema: http[s]://server[:port]/path/. To learn more about Schema URL see // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url - // This schema_url applies to all logs in the "logs" field. + // This schema_url applies to the data in the "scope" field and all logs in the + // "log_records" field. SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"` } @@ -458,6 +459,7 @@ type LogRecord struct { // Additional attributes that describe the specific event occurrence. [Optional]. // Attribute keys MUST be unique (it is not allowed to have more than one // attribute with the same key). + // The behavior of software that receives duplicated keys can be unpredictable. Attributes []*v11.KeyValue `protobuf:"bytes,6,rep,name=attributes,proto3" json:"attributes,omitempty"` DroppedAttributesCount uint32 `protobuf:"varint,7,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"` // Flags, a bit field. 8 least significant bits are the trace flags as @@ -501,8 +503,6 @@ type LogRecord struct { // as an event. // // [Optional]. - // - // Status: [Development] EventName string `protobuf:"bytes,12,opt,name=event_name,json=eventName,proto3" json:"event_name,omitempty"` } diff --git a/vendor/go.opentelemetry.io/proto/otlp/resource/v1/resource.pb.go b/vendor/go.opentelemetry.io/proto/otlp/resource/v1/resource.pb.go index b7545b03..301247dd 100644 --- a/vendor/go.opentelemetry.io/proto/otlp/resource/v1/resource.pb.go +++ b/vendor/go.opentelemetry.io/proto/otlp/resource/v1/resource.pb.go @@ -44,10 +44,17 @@ type Resource struct { // Set of attributes that describe the resource. // Attribute keys MUST be unique (it is not allowed to have more than one // attribute with the same key). + // The behavior of software that receives duplicated keys can be unpredictable. Attributes []*v1.KeyValue `protobuf:"bytes,1,rep,name=attributes,proto3" json:"attributes,omitempty"` - // dropped_attributes_count is the number of dropped attributes. If the value is 0, then + // The number of dropped attributes. If the value is 0, then // no attributes were dropped. DroppedAttributesCount uint32 `protobuf:"varint,2,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"` + // Set of entities that participate in this Resource. + // + // Note: keys in the references MUST exist in attributes of this message. + // + // Status: [Development] + EntityRefs []*v1.EntityRef `protobuf:"bytes,3,rep,name=entity_refs,json=entityRefs,proto3" json:"entity_refs,omitempty"` } func (x *Resource) Reset() { @@ -96,6 +103,13 @@ func (x *Resource) GetDroppedAttributesCount() uint32 { return 0 } +func (x *Resource) GetEntityRefs() []*v1.EntityRef { + if x != nil { + return x.EntityRefs + } + return nil +} + var File_opentelemetry_proto_resource_v1_resource_proto protoreflect.FileDescriptor var file_opentelemetry_proto_resource_v1_resource_proto_rawDesc = []byte{ @@ -106,7 +120,7 @@ var file_opentelemetry_proto_resource_v1_resource_proto_rawDesc = []byte{ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x1a, 0x2a, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x76, 0x31, - 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x8d, 0x01, + 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xd8, 0x01, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x47, 0x0a, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, @@ -115,16 +129,21 @@ var file_opentelemetry_proto_resource_v1_resource_proto_rawDesc = []byte{ 0x74, 0x65, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x16, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x41, 0x74, - 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x42, 0x83, 0x01, - 0x0a, 0x22, 0x69, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, - 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x2e, 0x76, 0x31, 0x42, 0x0d, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2a, 0x67, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, - 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x69, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x2f, 0x6f, 0x74, 0x6c, 0x70, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2f, 0x76, - 0x31, 0xaa, 0x02, 0x1f, 0x4f, 0x70, 0x65, 0x6e, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, - 0x79, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x2e, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x49, 0x0a, + 0x0b, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x72, 0x65, 0x66, 0x73, 0x18, 0x03, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, + 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, + 0x76, 0x31, 0x2e, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x65, 0x66, 0x52, 0x0a, 0x65, 0x6e, + 0x74, 0x69, 0x74, 0x79, 0x52, 0x65, 0x66, 0x73, 0x42, 0x83, 0x01, 0x0a, 0x22, 0x69, 0x6f, 0x2e, + 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2e, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x42, + 0x0d, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, + 0x5a, 0x2a, 0x67, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, + 0x72, 0x79, 0x2e, 0x69, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x6f, 0x74, 0x6c, 0x70, + 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2f, 0x76, 0x31, 0xaa, 0x02, 0x1f, 0x4f, + 0x70, 0x65, 0x6e, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x56, 0x31, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -141,16 +160,18 @@ func file_opentelemetry_proto_resource_v1_resource_proto_rawDescGZIP() []byte { var file_opentelemetry_proto_resource_v1_resource_proto_msgTypes = make([]protoimpl.MessageInfo, 1) var file_opentelemetry_proto_resource_v1_resource_proto_goTypes = []interface{}{ - (*Resource)(nil), // 0: opentelemetry.proto.resource.v1.Resource - (*v1.KeyValue)(nil), // 1: opentelemetry.proto.common.v1.KeyValue + (*Resource)(nil), // 0: opentelemetry.proto.resource.v1.Resource + (*v1.KeyValue)(nil), // 1: opentelemetry.proto.common.v1.KeyValue + (*v1.EntityRef)(nil), // 2: opentelemetry.proto.common.v1.EntityRef } var file_opentelemetry_proto_resource_v1_resource_proto_depIdxs = []int32{ 1, // 0: opentelemetry.proto.resource.v1.Resource.attributes:type_name -> opentelemetry.proto.common.v1.KeyValue - 1, // [1:1] is the sub-list for method output_type - 1, // [1:1] is the sub-list for method input_type - 1, // [1:1] is the sub-list for extension type_name - 1, // [1:1] is the sub-list for extension extendee - 0, // [0:1] is the sub-list for field type_name + 2, // 1: opentelemetry.proto.resource.v1.Resource.entity_refs:type_name -> opentelemetry.proto.common.v1.EntityRef + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name } func init() { file_opentelemetry_proto_resource_v1_resource_proto_init() } diff --git a/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go b/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go index b342a0a9..d7bfca90 100644 --- a/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go +++ b/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go @@ -388,7 +388,8 @@ type ScopeSpans struct { // is recorded in. Notably, the last part of the URL path is the version number of the // schema: http[s]://server[:port]/path/. To learn more about Schema URL see // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url - // This schema_url applies to all spans and span events in the "spans" field. + // This schema_url applies to the data in the "scope" field and all spans and span + // events in the "spans" field. SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"` } @@ -512,21 +513,21 @@ type Span struct { // two spans with the same name may be distinguished using `CLIENT` (caller) // and `SERVER` (callee) to identify queueing latency associated with the span. Kind Span_SpanKind `protobuf:"varint,6,opt,name=kind,proto3,enum=opentelemetry.proto.trace.v1.Span_SpanKind" json:"kind,omitempty"` - // start_time_unix_nano is the start time of the span. On the client side, this is the time + // The start time of the span. On the client side, this is the time // kept by the local machine where the span execution starts. On the server side, this // is the time when the server's application handler starts running. // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. // // This field is semantically required and it is expected that end_time >= start_time. StartTimeUnixNano uint64 `protobuf:"fixed64,7,opt,name=start_time_unix_nano,json=startTimeUnixNano,proto3" json:"start_time_unix_nano,omitempty"` - // end_time_unix_nano is the end time of the span. On the client side, this is the time + // The end time of the span. On the client side, this is the time // kept by the local machine where the span execution ends. On the server side, this // is the time when the server application handler stops running. // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. // // This field is semantically required and it is expected that end_time >= start_time. EndTimeUnixNano uint64 `protobuf:"fixed64,8,opt,name=end_time_unix_nano,json=endTimeUnixNano,proto3" json:"end_time_unix_nano,omitempty"` - // attributes is a collection of key/value pairs. Note, global attributes + // A collection of key/value pairs. Note, global attributes // like server name can be set using the resource API. Examples of attributes: // // "/http/user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36" @@ -534,24 +535,23 @@ type Span struct { // "example.com/myattribute": true // "example.com/score": 10.239 // - // The OpenTelemetry API specification further restricts the allowed value types: - // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/common/README.md#attribute // Attribute keys MUST be unique (it is not allowed to have more than one // attribute with the same key). + // The behavior of software that receives duplicated keys can be unpredictable. Attributes []*v11.KeyValue `protobuf:"bytes,9,rep,name=attributes,proto3" json:"attributes,omitempty"` - // dropped_attributes_count is the number of attributes that were discarded. Attributes + // The number of attributes that were discarded. Attributes // can be discarded because their keys are too long or because there are too many // attributes. If this value is 0, then no attributes were dropped. DroppedAttributesCount uint32 `protobuf:"varint,10,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"` - // events is a collection of Event items. + // A collection of Event items. Events []*Span_Event `protobuf:"bytes,11,rep,name=events,proto3" json:"events,omitempty"` - // dropped_events_count is the number of dropped events. If the value is 0, then no + // The number of dropped events. If the value is 0, then no // events were dropped. DroppedEventsCount uint32 `protobuf:"varint,12,opt,name=dropped_events_count,json=droppedEventsCount,proto3" json:"dropped_events_count,omitempty"` - // links is a collection of Links, which are references from this span to a span + // A collection of Links, which are references from this span to a span // in the same or different trace. Links []*Span_Link `protobuf:"bytes,13,rep,name=links,proto3" json:"links,omitempty"` - // dropped_links_count is the number of dropped links after the maximum size was + // The number of dropped links after the maximum size was // enforced. If this value is 0, then no links were dropped. DroppedLinksCount uint32 `protobuf:"varint,14,opt,name=dropped_links_count,json=droppedLinksCount,proto3" json:"dropped_links_count,omitempty"` // An optional final status for this span. Semantically when Status isn't set, it means @@ -769,16 +769,17 @@ type Span_Event struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // time_unix_nano is the time the event occurred. + // The time the event occurred. TimeUnixNano uint64 `protobuf:"fixed64,1,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"` - // name of the event. + // The name of the event. // This field is semantically required to be set to non-empty string. Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` - // attributes is a collection of attribute key/value pairs on the event. + // A collection of attribute key/value pairs on the event. // Attribute keys MUST be unique (it is not allowed to have more than one // attribute with the same key). + // The behavior of software that receives duplicated keys can be unpredictable. Attributes []*v11.KeyValue `protobuf:"bytes,3,rep,name=attributes,proto3" json:"attributes,omitempty"` - // dropped_attributes_count is the number of dropped attributes. If the value is 0, + // The number of dropped attributes. If the value is 0, // then no attributes were dropped. DroppedAttributesCount uint32 `protobuf:"varint,4,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"` } @@ -859,11 +860,12 @@ type Span_Link struct { SpanId []byte `protobuf:"bytes,2,opt,name=span_id,json=spanId,proto3" json:"span_id,omitempty"` // The trace_state associated with the link. TraceState string `protobuf:"bytes,3,opt,name=trace_state,json=traceState,proto3" json:"trace_state,omitempty"` - // attributes is a collection of attribute key/value pairs on the link. + // A collection of attribute key/value pairs on the link. // Attribute keys MUST be unique (it is not allowed to have more than one // attribute with the same key). + // The behavior of software that receives duplicated keys can be unpredictable. Attributes []*v11.KeyValue `protobuf:"bytes,4,rep,name=attributes,proto3" json:"attributes,omitempty"` - // dropped_attributes_count is the number of dropped attributes. If the value is 0, + // The number of dropped attributes. If the value is 0, // then no attributes were dropped. DroppedAttributesCount uint32 `protobuf:"varint,5,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"` // Flags, a bit field. diff --git a/vendor/go.uber.org/zap/.golangci.yml b/vendor/go.uber.org/zap/.golangci.yml index 2346df13..74faaa71 100644 --- a/vendor/go.uber.org/zap/.golangci.yml +++ b/vendor/go.uber.org/zap/.golangci.yml @@ -25,7 +25,7 @@ linters-settings: govet: # These govet checks are disabled by default, but they're useful. enable: - - niliness + - nilness - reflectvaluecompare - sortslice - unusedwrite diff --git a/vendor/go.uber.org/zap/CHANGELOG.md b/vendor/go.uber.org/zap/CHANGELOG.md index 6d6cd5f4..86e7e6f9 100644 --- a/vendor/go.uber.org/zap/CHANGELOG.md +++ b/vendor/go.uber.org/zap/CHANGELOG.md @@ -3,6 +3,16 @@ All notable changes to this project will be documented in this file. This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## 1.27.1 (19 Nov 2025) +Enhancements: +* [#1501][]: prevent `Object` from panicking on nils +* [#1511][]: Fix a race condition in `WithLazy`. + +Thanks to @rabbbit, @alshopov, @jquirke, @arukiidou for their contributions to this release. + +[#1501]: https://github.com/uber-go/zap/pull/1501 +[#1511]: https://github.com/uber-go/zap/pull/1511 + ## 1.27.0 (20 Feb 2024) Enhancements: * [#1378][]: Add `WithLazy` method for `SugaredLogger`. diff --git a/vendor/go.uber.org/zap/CODE_OF_CONDUCT.md b/vendor/go.uber.org/zap/CODE_OF_CONDUCT.md index e327d9aa..bc988b72 100644 --- a/vendor/go.uber.org/zap/CODE_OF_CONDUCT.md +++ b/vendor/go.uber.org/zap/CODE_OF_CONDUCT.md @@ -71,5 +71,5 @@ This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version]. -[homepage]: http://contributor-covenant.org -[version]: http://contributor-covenant.org/version/1/4/ +[homepage]: https://contributor-covenant.org +[version]: https://contributor-covenant.org/version/1/4/ diff --git a/vendor/go.uber.org/zap/LICENSE b/vendor/go.uber.org/zap/LICENSE index 6652bed4..3883b9a7 100644 --- a/vendor/go.uber.org/zap/LICENSE +++ b/vendor/go.uber.org/zap/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2016-2017 Uber Technologies, Inc. +Copyright (c) 2016-2024 Uber Technologies, Inc. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/vendor/go.uber.org/zap/Makefile b/vendor/go.uber.org/zap/Makefile index eb1cee53..f9db385b 100644 --- a/vendor/go.uber.org/zap/Makefile +++ b/vendor/go.uber.org/zap/Makefile @@ -24,7 +24,7 @@ golangci-lint: @$(foreach mod,$(MODULE_DIRS), \ (cd $(mod) && \ echo "[lint] golangci-lint: $(mod)" && \ - golangci-lint run --path-prefix $(mod)) &&) true + golangci-lint run --path-prefix $(mod) ./...) &&) true .PHONY: tidy tidy: diff --git a/vendor/go.uber.org/zap/field.go b/vendor/go.uber.org/zap/field.go index 6743930b..1884afab 100644 --- a/vendor/go.uber.org/zap/field.go +++ b/vendor/go.uber.org/zap/field.go @@ -398,6 +398,9 @@ func Durationp(key string, val *time.Duration) Field { // struct-like user-defined types to the logging context. The struct's // MarshalLogObject method is called lazily. func Object(key string, val zapcore.ObjectMarshaler) Field { + if val == nil { + return nilField(key) + } return Field{Key: key, Type: zapcore.ObjectMarshalerType, Interface: val} } @@ -431,6 +434,13 @@ func (d dictObject) MarshalLogObject(enc zapcore.ObjectEncoder) error { return nil } +// DictObject constructs a [zapcore.ObjectMarshaler] with the given list of fields. +// The resulting object marshaler can be used as input to [Object], [Objects], or +// any other functions that expect an object marshaler. +func DictObject(val ...Field) zapcore.ObjectMarshaler { + return dictObject(val) +} + // We discovered an issue where zap.Any can cause a performance degradation // when used in new goroutines. // diff --git a/vendor/go.uber.org/zap/http_handler.go b/vendor/go.uber.org/zap/http_handler.go index 2be8f651..1cae2c16 100644 --- a/vendor/go.uber.org/zap/http_handler.go +++ b/vendor/go.uber.org/zap/http_handler.go @@ -71,7 +71,7 @@ import ( func (lvl AtomicLevel) ServeHTTP(w http.ResponseWriter, r *http.Request) { if err := lvl.serveHTTP(w, r); err != nil { w.WriteHeader(http.StatusInternalServerError) - fmt.Fprintf(w, "internal error: %v", err) + _, _ = fmt.Fprintf(w, "internal error: %v", err) } } diff --git a/vendor/go.uber.org/zap/logger.go b/vendor/go.uber.org/zap/logger.go index c4d30032..2d0ef141 100644 --- a/vendor/go.uber.org/zap/logger.go +++ b/vendor/go.uber.org/zap/logger.go @@ -381,7 +381,11 @@ func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry { if stack.Count() == 0 { if log.addCaller { - fmt.Fprintf(log.errorOutput, "%v Logger.check error: failed to get caller\n", ent.Time.UTC()) + _, _ = fmt.Fprintf( + log.errorOutput, + "%v Logger.check error: failed to get caller\n", + ent.Time.UTC(), + ) _ = log.errorOutput.Sync() } return ce diff --git a/vendor/go.uber.org/zap/options.go b/vendor/go.uber.org/zap/options.go index 43d357ac..04a3c1e6 100644 --- a/vendor/go.uber.org/zap/options.go +++ b/vendor/go.uber.org/zap/options.go @@ -125,7 +125,11 @@ func IncreaseLevel(lvl zapcore.LevelEnabler) Option { return optionFunc(func(log *Logger) { core, err := zapcore.NewIncreaseLevelCore(log.core, lvl) if err != nil { - fmt.Fprintf(log.errorOutput, "failed to IncreaseLevel: %v\n", err) + _, _ = fmt.Fprintf( + log.errorOutput, + "failed to IncreaseLevel: %v\n", + err, + ) } else { log.core = core } diff --git a/vendor/go.uber.org/zap/sink.go b/vendor/go.uber.org/zap/sink.go index 499772a0..92202280 100644 --- a/vendor/go.uber.org/zap/sink.go +++ b/vendor/go.uber.org/zap/sink.go @@ -71,7 +71,7 @@ func newSinkRegistry() *sinkRegistry { return sr } -// RegisterScheme registers the given factory for the specific scheme. +// RegisterSink registers the given factory for the specific scheme. func (sr *sinkRegistry) RegisterSink(scheme string, factory func(*url.URL) (Sink, error)) error { sr.mu.Lock() defer sr.mu.Unlock() diff --git a/vendor/go.uber.org/zap/zapcore/buffered_write_syncer.go b/vendor/go.uber.org/zap/zapcore/buffered_write_syncer.go index a40e93b3..4b426a56 100644 --- a/vendor/go.uber.org/zap/zapcore/buffered_write_syncer.go +++ b/vendor/go.uber.org/zap/zapcore/buffered_write_syncer.go @@ -188,32 +188,33 @@ func (s *BufferedWriteSyncer) flushLoop() { // Stop closes the buffer, cleans up background goroutines, and flushes // remaining unwritten data. func (s *BufferedWriteSyncer) Stop() (err error) { - var stopped bool - // Critical section. - func() { + stopped := func() bool { s.mu.Lock() defer s.mu.Unlock() if !s.initialized { - return + return false } - stopped = s.stopped - if stopped { - return + if s.stopped { + return false } s.stopped = true s.ticker.Stop() close(s.stop) // tell flushLoop to stop - <-s.done // and wait until it has + return true }() - // Don't call Sync on consecutive Stops. + // Not initialized, or already stopped, no need for any cleanup. if !stopped { - err = s.Sync() + return } - return err + // Wait for flushLoop to end outside of the lock, as it may need the lock to complete. + // See https://github.com/uber-go/zap/issues/1428 for details. + <-s.done + + return s.Sync() } diff --git a/vendor/go.uber.org/zap/zapcore/console_encoder.go b/vendor/go.uber.org/zap/zapcore/console_encoder.go index cc2b4e07..98eea515 100644 --- a/vendor/go.uber.org/zap/zapcore/console_encoder.go +++ b/vendor/go.uber.org/zap/zapcore/console_encoder.go @@ -105,7 +105,7 @@ func (c consoleEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer, if i > 0 { line.AppendString(c.ConsoleSeparator) } - fmt.Fprint(line, arr.elems[i]) + _, _ = fmt.Fprint(line, arr.elems[i]) } putSliceEncoder(arr) diff --git a/vendor/go.uber.org/zap/zapcore/entry.go b/vendor/go.uber.org/zap/zapcore/entry.go index 459a5d7c..841752f2 100644 --- a/vendor/go.uber.org/zap/zapcore/entry.go +++ b/vendor/go.uber.org/zap/zapcore/entry.go @@ -241,7 +241,12 @@ func (ce *CheckedEntry) Write(fields ...Field) { // If the entry is dirty, log an internal error; because the // CheckedEntry is being used after it was returned to the pool, // the message may be an amalgamation from multiple call sites. - fmt.Fprintf(ce.ErrorOutput, "%v Unsafe CheckedEntry re-use near Entry %+v.\n", ce.Time, ce.Entry) + _, _ = fmt.Fprintf( + ce.ErrorOutput, + "%v Unsafe CheckedEntry re-use near Entry %+v.\n", + ce.Time, + ce.Entry, + ) _ = ce.ErrorOutput.Sync() // ignore error } return @@ -253,7 +258,12 @@ func (ce *CheckedEntry) Write(fields ...Field) { err = multierr.Append(err, ce.cores[i].Write(ce.Entry, fields)) } if err != nil && ce.ErrorOutput != nil { - fmt.Fprintf(ce.ErrorOutput, "%v write error: %v\n", ce.Time, err) + _, _ = fmt.Fprintf( + ce.ErrorOutput, + "%v write error: %v\n", + ce.Time, + err, + ) _ = ce.ErrorOutput.Sync() // ignore error } diff --git a/vendor/go.uber.org/zap/zapcore/lazy_with.go b/vendor/go.uber.org/zap/zapcore/lazy_with.go index 05288d6a..500809de 100644 --- a/vendor/go.uber.org/zap/zapcore/lazy_with.go +++ b/vendor/go.uber.org/zap/zapcore/lazy_with.go @@ -23,7 +23,8 @@ package zapcore import "sync" type lazyWithCore struct { - Core + core Core + originalCore Core sync.Once fields []Field } @@ -32,23 +33,45 @@ type lazyWithCore struct { // the logger is written to (or is further chained in a lon-lazy manner). func NewLazyWith(core Core, fields []Field) Core { return &lazyWithCore{ - Core: core, - fields: fields, + core: nil, // core is allocated once `initOnce` is called. + originalCore: core, + fields: fields, } } func (d *lazyWithCore) initOnce() { d.Once.Do(func() { - d.Core = d.Core.With(d.fields) + d.core = d.originalCore.With(d.fields) }) } func (d *lazyWithCore) With(fields []Field) Core { d.initOnce() - return d.Core.With(fields) + return d.core.With(fields) } func (d *lazyWithCore) Check(e Entry, ce *CheckedEntry) *CheckedEntry { + // This is safe because `lazyWithCore` doesn't change the level. + // So we can delagate the level check, any not `initOnce` + // just for the check. + if !d.originalCore.Enabled(e.Level) { + return ce + } + d.initOnce() + return d.core.Check(e, ce) +} + +func (d *lazyWithCore) Enabled(level Level) bool { + // Like above, this is safe because `lazyWithCore` doesn't change the level. + return d.originalCore.Enabled(level) +} + +func (d *lazyWithCore) Write(e Entry, fields []Field) error { + d.initOnce() + return d.core.Write(e, fields) +} + +func (d *lazyWithCore) Sync() error { d.initOnce() - return d.Core.Check(e, ce) + return d.core.Sync() } diff --git a/vendor/go.uber.org/zap/zapcore/level.go b/vendor/go.uber.org/zap/zapcore/level.go index e01a2413..f3e166d6 100644 --- a/vendor/go.uber.org/zap/zapcore/level.go +++ b/vendor/go.uber.org/zap/zapcore/level.go @@ -179,19 +179,19 @@ func (l *Level) UnmarshalText(text []byte) error { func (l *Level) unmarshalText(text []byte) bool { switch string(text) { - case "debug", "DEBUG": + case "debug": *l = DebugLevel - case "info", "INFO", "": // make the zero value useful + case "info", "": // make the zero value useful *l = InfoLevel - case "warn", "WARN": + case "warn", "warning": *l = WarnLevel - case "error", "ERROR": + case "error": *l = ErrorLevel - case "dpanic", "DPANIC": + case "dpanic": *l = DPanicLevel - case "panic", "PANIC": + case "panic": *l = PanicLevel - case "fatal", "FATAL": + case "fatal": *l = FatalLevel default: return false diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_arm64.s b/vendor/golang.org/x/crypto/chacha20/chacha_arm64.s index 7dd2638e..769af387 100644 --- a/vendor/golang.org/x/crypto/chacha20/chacha_arm64.s +++ b/vendor/golang.org/x/crypto/chacha20/chacha_arm64.s @@ -29,7 +29,7 @@ loop: MOVD $NUM_ROUNDS, R21 VLD1 (R11), [V30.S4, V31.S4] - // load contants + // load constants // VLD4R (R10), [V0.S4, V1.S4, V2.S4, V3.S4] WORD $0x4D60E940 diff --git a/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.go b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.go index 50695a14..b850e772 100644 --- a/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.go +++ b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.go @@ -56,7 +56,10 @@ func (c *chacha20poly1305) seal(dst, nonce, plaintext, additionalData []byte) [] ret, out := sliceForAppend(dst, len(plaintext)+16) if alias.InexactOverlap(out, plaintext) { - panic("chacha20poly1305: invalid buffer overlap") + panic("chacha20poly1305: invalid buffer overlap of output and input") + } + if alias.AnyOverlap(out, additionalData) { + panic("chacha20poly1305: invalid buffer overlap of output and additional data") } chacha20Poly1305Seal(out[:], state[:], plaintext, additionalData) return ret @@ -73,7 +76,10 @@ func (c *chacha20poly1305) open(dst, nonce, ciphertext, additionalData []byte) ( ciphertext = ciphertext[:len(ciphertext)-16] ret, out := sliceForAppend(dst, len(ciphertext)) if alias.InexactOverlap(out, ciphertext) { - panic("chacha20poly1305: invalid buffer overlap") + panic("chacha20poly1305: invalid buffer overlap of output and input") + } + if alias.AnyOverlap(out, additionalData) { + panic("chacha20poly1305: invalid buffer overlap of output and additional data") } if !chacha20Poly1305Open(out, state[:], ciphertext, additionalData) { for i := range out { diff --git a/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_generic.go b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_generic.go index 6313898f..2ecc840f 100644 --- a/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_generic.go +++ b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_generic.go @@ -31,7 +31,10 @@ func (c *chacha20poly1305) sealGeneric(dst, nonce, plaintext, additionalData []b ret, out := sliceForAppend(dst, len(plaintext)+poly1305.TagSize) ciphertext, tag := out[:len(plaintext)], out[len(plaintext):] if alias.InexactOverlap(out, plaintext) { - panic("chacha20poly1305: invalid buffer overlap") + panic("chacha20poly1305: invalid buffer overlap of output and input") + } + if alias.AnyOverlap(out, additionalData) { + panic("chacha20poly1305: invalid buffer overlap of output and additional data") } var polyKey [32]byte @@ -67,7 +70,10 @@ func (c *chacha20poly1305) openGeneric(dst, nonce, ciphertext, additionalData [] ret, out := sliceForAppend(dst, len(ciphertext)) if alias.InexactOverlap(out, ciphertext) { - panic("chacha20poly1305: invalid buffer overlap") + panic("chacha20poly1305: invalid buffer overlap of output and input") + } + if alias.AnyOverlap(out, additionalData) { + panic("chacha20poly1305: invalid buffer overlap of output and additional data") } if !p.Verify(tag) { for i := range out { diff --git a/vendor/golang.org/x/crypto/curve25519/curve25519.go b/vendor/golang.org/x/crypto/curve25519/curve25519.go index 21ca3b2e..048faef3 100644 --- a/vendor/golang.org/x/crypto/curve25519/curve25519.go +++ b/vendor/golang.org/x/crypto/curve25519/curve25519.go @@ -3,11 +3,14 @@ // license that can be found in the LICENSE file. // Package curve25519 provides an implementation of the X25519 function, which -// performs scalar multiplication on the elliptic curve known as Curve25519. -// See RFC 7748. +// performs scalar multiplication on the elliptic curve known as Curve25519 +// according to [RFC 7748]. // -// This package is a wrapper for the X25519 implementation -// in the crypto/ecdh package. +// The curve25519 package is a wrapper for the X25519 implementation in the +// crypto/ecdh package. It is [frozen] and is not accepting new features. +// +// [RFC 7748]: https://datatracker.ietf.org/doc/html/rfc7748 +// [frozen]: https://go.dev/wiki/Frozen package curve25519 import "crypto/ecdh" @@ -36,7 +39,7 @@ func ScalarBaseMult(dst, scalar *[32]byte) { curve := ecdh.X25519() priv, err := curve.NewPrivateKey(scalar[:]) if err != nil { - panic("curve25519: internal error: scalarBaseMult was not 32 bytes") + panic("curve25519: " + err.Error()) } copy(dst[:], priv.PublicKey().Bytes()) } diff --git a/vendor/golang.org/x/crypto/internal/poly1305/mac_noasm.go b/vendor/golang.org/x/crypto/internal/poly1305/mac_noasm.go index bd896bdc..8d99551f 100644 --- a/vendor/golang.org/x/crypto/internal/poly1305/mac_noasm.go +++ b/vendor/golang.org/x/crypto/internal/poly1305/mac_noasm.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build (!amd64 && !ppc64le && !ppc64 && !s390x) || !gc || purego +//go:build (!amd64 && !loong64 && !ppc64le && !ppc64 && !s390x) || !gc || purego package poly1305 diff --git a/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.go b/vendor/golang.org/x/crypto/internal/poly1305/sum_asm.go similarity index 94% rename from vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.go rename to vendor/golang.org/x/crypto/internal/poly1305/sum_asm.go index 164cd47d..315b84ac 100644 --- a/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.go +++ b/vendor/golang.org/x/crypto/internal/poly1305/sum_asm.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build gc && !purego +//go:build gc && !purego && (amd64 || loong64 || ppc64 || ppc64le) package poly1305 diff --git a/vendor/golang.org/x/crypto/internal/poly1305/sum_loong64.s b/vendor/golang.org/x/crypto/internal/poly1305/sum_loong64.s new file mode 100644 index 00000000..bc8361da --- /dev/null +++ b/vendor/golang.org/x/crypto/internal/poly1305/sum_loong64.s @@ -0,0 +1,123 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc && !purego + +// func update(state *macState, msg []byte) +TEXT ·update(SB), $0-32 + MOVV state+0(FP), R4 + MOVV msg_base+8(FP), R5 + MOVV msg_len+16(FP), R6 + + MOVV $0x10, R7 + + MOVV (R4), R8 // h0 + MOVV 8(R4), R9 // h1 + MOVV 16(R4), R10 // h2 + MOVV 24(R4), R11 // r0 + MOVV 32(R4), R12 // r1 + + BLT R6, R7, bytes_between_0_and_15 + +loop: + MOVV (R5), R14 // msg[0:8] + MOVV 8(R5), R16 // msg[8:16] + ADDV R14, R8, R8 // h0 (x1 + y1 = z1', if z1' < x1 then z1' overflow) + ADDV R16, R9, R27 + SGTU R14, R8, R24 // h0.carry + SGTU R9, R27, R28 + ADDV R27, R24, R9 // h1 + SGTU R27, R9, R24 + OR R24, R28, R24 // h1.carry + ADDV $0x01, R24, R24 + ADDV R10, R24, R10 // h2 + + ADDV $16, R5, R5 // msg = msg[16:] + +multiply: + MULV R8, R11, R14 // h0r0.lo + MULHVU R8, R11, R15 // h0r0.hi + MULV R9, R11, R13 // h1r0.lo + MULHVU R9, R11, R16 // h1r0.hi + ADDV R13, R15, R15 + SGTU R13, R15, R24 + ADDV R24, R16, R16 + MULV R10, R11, R25 + ADDV R16, R25, R25 + MULV R8, R12, R13 // h0r1.lo + MULHVU R8, R12, R16 // h0r1.hi + ADDV R13, R15, R15 + SGTU R13, R15, R24 + ADDV R24, R16, R16 + MOVV R16, R8 + MULV R10, R12, R26 // h2r1 + MULV R9, R12, R13 // h1r1.lo + MULHVU R9, R12, R16 // h1r1.hi + ADDV R13, R25, R25 + ADDV R16, R26, R27 + SGTU R13, R25, R24 + ADDV R27, R24, R26 + ADDV R8, R25, R25 + SGTU R8, R25, R24 + ADDV R24, R26, R26 + AND $3, R25, R10 + AND $-4, R25, R17 + ADDV R17, R14, R8 + ADDV R26, R15, R27 + SGTU R17, R8, R24 + SGTU R26, R27, R28 + ADDV R27, R24, R9 + SGTU R27, R9, R24 + OR R24, R28, R24 + ADDV R24, R10, R10 + SLLV $62, R26, R27 + SRLV $2, R25, R28 + SRLV $2, R26, R26 + OR R27, R28, R25 + ADDV R25, R8, R8 + ADDV R26, R9, R27 + SGTU R25, R8, R24 + SGTU R26, R27, R28 + ADDV R27, R24, R9 + SGTU R27, R9, R24 + OR R24, R28, R24 + ADDV R24, R10, R10 + + SUBV $16, R6, R6 + BGE R6, R7, loop + +bytes_between_0_and_15: + BEQ R6, R0, done + MOVV $1, R14 + XOR R15, R15 + ADDV R6, R5, R5 + +flush_buffer: + MOVBU -1(R5), R25 + SRLV $56, R14, R24 + SLLV $8, R15, R28 + SLLV $8, R14, R14 + OR R24, R28, R15 + XOR R25, R14, R14 + SUBV $1, R6, R6 + SUBV $1, R5, R5 + BNE R6, R0, flush_buffer + + ADDV R14, R8, R8 + SGTU R14, R8, R24 + ADDV R15, R9, R27 + SGTU R15, R27, R28 + ADDV R27, R24, R9 + SGTU R27, R9, R24 + OR R24, R28, R24 + ADDV R10, R24, R10 + + MOVV $16, R6 + JMP multiply + +done: + MOVV R8, (R4) + MOVV R9, 8(R4) + MOVV R10, 16(R4) + RET diff --git a/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64x.go b/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64x.go deleted file mode 100644 index 1a1679aa..00000000 --- a/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64x.go +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build gc && !purego && (ppc64 || ppc64le) - -package poly1305 - -//go:noescape -func update(state *macState, msg []byte) - -// mac is a wrapper for macGeneric that redirects calls that would have gone to -// updateGeneric to update. -// -// Its Write and Sum methods are otherwise identical to the macGeneric ones, but -// using function pointers would carry a major performance cost. -type mac struct{ macGeneric } - -func (h *mac) Write(p []byte) (int, error) { - nn := len(p) - if h.offset > 0 { - n := copy(h.buffer[h.offset:], p) - if h.offset+n < TagSize { - h.offset += n - return nn, nil - } - p = p[n:] - h.offset = 0 - update(&h.macState, h.buffer[:]) - } - if n := len(p) - (len(p) % TagSize); n > 0 { - update(&h.macState, p[:n]) - p = p[n:] - } - if len(p) > 0 { - h.offset += copy(h.buffer[h.offset:], p) - } - return nn, nil -} - -func (h *mac) Sum(out *[16]byte) { - state := h.macState - if h.offset > 0 { - update(&state, h.buffer[:h.offset]) - } - finalize(out, &state.h, &state.s) -} diff --git a/vendor/golang.org/x/crypto/ssh/certs.go b/vendor/golang.org/x/crypto/ssh/certs.go index 27d0e14a..139fa31e 100644 --- a/vendor/golang.org/x/crypto/ssh/certs.go +++ b/vendor/golang.org/x/crypto/ssh/certs.go @@ -20,14 +20,19 @@ import ( // returned by MultiAlgorithmSigner and don't appear in the Signature.Format // field. const ( - CertAlgoRSAv01 = "ssh-rsa-cert-v01@openssh.com" - CertAlgoDSAv01 = "ssh-dss-cert-v01@openssh.com" - CertAlgoECDSA256v01 = "ecdsa-sha2-nistp256-cert-v01@openssh.com" - CertAlgoECDSA384v01 = "ecdsa-sha2-nistp384-cert-v01@openssh.com" - CertAlgoECDSA521v01 = "ecdsa-sha2-nistp521-cert-v01@openssh.com" - CertAlgoSKECDSA256v01 = "sk-ecdsa-sha2-nistp256-cert-v01@openssh.com" - CertAlgoED25519v01 = "ssh-ed25519-cert-v01@openssh.com" - CertAlgoSKED25519v01 = "sk-ssh-ed25519-cert-v01@openssh.com" + CertAlgoRSAv01 = "ssh-rsa-cert-v01@openssh.com" + // Deprecated: DSA is only supported at insecure key sizes, and was removed + // from major implementations. + CertAlgoDSAv01 = InsecureCertAlgoDSAv01 + // Deprecated: DSA is only supported at insecure key sizes, and was removed + // from major implementations. + InsecureCertAlgoDSAv01 = "ssh-dss-cert-v01@openssh.com" + CertAlgoECDSA256v01 = "ecdsa-sha2-nistp256-cert-v01@openssh.com" + CertAlgoECDSA384v01 = "ecdsa-sha2-nistp384-cert-v01@openssh.com" + CertAlgoECDSA521v01 = "ecdsa-sha2-nistp521-cert-v01@openssh.com" + CertAlgoSKECDSA256v01 = "sk-ecdsa-sha2-nistp256-cert-v01@openssh.com" + CertAlgoED25519v01 = "ssh-ed25519-cert-v01@openssh.com" + CertAlgoSKED25519v01 = "sk-ssh-ed25519-cert-v01@openssh.com" // CertAlgoRSASHA256v01 and CertAlgoRSASHA512v01 can't appear as a // Certificate.Type (or PublicKey.Type), but only in @@ -228,7 +233,11 @@ func parseCert(in []byte, privAlgo string) (*Certificate, error) { if err != nil { return nil, err } - + // The Type() function is intended to return only certificate key types, but + // we use certKeyAlgoNames anyway for safety, to match [Certificate.Type]. + if _, ok := certKeyAlgoNames[k.Type()]; ok { + return nil, fmt.Errorf("ssh: the signature key type %q is invalid for certificates", k.Type()) + } c.SignatureKey = k c.Signature, rest, ok = parseSignatureBody(g.Signature) if !ok || len(rest) > 0 { @@ -296,16 +305,13 @@ type CertChecker struct { SupportedCriticalOptions []string // IsUserAuthority should return true if the key is recognized as an - // authority for the given user certificate. This allows for - // certificates to be signed by other certificates. This must be set - // if this CertChecker will be checking user certificates. + // authority for user certificate. This must be set if this CertChecker + // will be checking user certificates. IsUserAuthority func(auth PublicKey) bool // IsHostAuthority should report whether the key is recognized as - // an authority for this host. This allows for certificates to be - // signed by other keys, and for those other keys to only be valid - // signers for particular hostnames. This must be set if this - // CertChecker will be checking host certificates. + // an authority for this host. This must be set if this CertChecker + // will be checking host certificates. IsHostAuthority func(auth PublicKey, address string) bool // Clock is used for verifying time stamps. If nil, time.Now @@ -442,12 +448,19 @@ func (c *CertChecker) CheckCert(principal string, cert *Certificate) error { // SignCert signs the certificate with an authority, setting the Nonce, // SignatureKey, and Signature fields. If the authority implements the // MultiAlgorithmSigner interface the first algorithm in the list is used. This -// is useful if you want to sign with a specific algorithm. +// is useful if you want to sign with a specific algorithm. As specified in +// [SSH-CERTS], Section 2.1.1, authority can't be a [Certificate]. func (c *Certificate) SignCert(rand io.Reader, authority Signer) error { c.Nonce = make([]byte, 32) if _, err := io.ReadFull(rand, c.Nonce); err != nil { return err } + // The Type() function is intended to return only certificate key types, but + // we use certKeyAlgoNames anyway for safety, to match [Certificate.Type]. + if _, ok := certKeyAlgoNames[authority.PublicKey().Type()]; ok { + return fmt.Errorf("ssh: certificates cannot be used as authority (public key type %q)", + authority.PublicKey().Type()) + } c.SignatureKey = authority.PublicKey() if v, ok := authority.(MultiAlgorithmSigner); ok { @@ -485,16 +498,16 @@ func (c *Certificate) SignCert(rand io.Reader, authority Signer) error { // // This map must be kept in sync with the one in agent/client.go. var certKeyAlgoNames = map[string]string{ - CertAlgoRSAv01: KeyAlgoRSA, - CertAlgoRSASHA256v01: KeyAlgoRSASHA256, - CertAlgoRSASHA512v01: KeyAlgoRSASHA512, - CertAlgoDSAv01: KeyAlgoDSA, - CertAlgoECDSA256v01: KeyAlgoECDSA256, - CertAlgoECDSA384v01: KeyAlgoECDSA384, - CertAlgoECDSA521v01: KeyAlgoECDSA521, - CertAlgoSKECDSA256v01: KeyAlgoSKECDSA256, - CertAlgoED25519v01: KeyAlgoED25519, - CertAlgoSKED25519v01: KeyAlgoSKED25519, + CertAlgoRSAv01: KeyAlgoRSA, + CertAlgoRSASHA256v01: KeyAlgoRSASHA256, + CertAlgoRSASHA512v01: KeyAlgoRSASHA512, + InsecureCertAlgoDSAv01: InsecureKeyAlgoDSA, + CertAlgoECDSA256v01: KeyAlgoECDSA256, + CertAlgoECDSA384v01: KeyAlgoECDSA384, + CertAlgoECDSA521v01: KeyAlgoECDSA521, + CertAlgoSKECDSA256v01: KeyAlgoSKECDSA256, + CertAlgoED25519v01: KeyAlgoED25519, + CertAlgoSKED25519v01: KeyAlgoSKED25519, } // underlyingAlgo returns the signature algorithm associated with algo (which is diff --git a/vendor/golang.org/x/crypto/ssh/cipher.go b/vendor/golang.org/x/crypto/ssh/cipher.go index 741e984f..7554ed57 100644 --- a/vendor/golang.org/x/crypto/ssh/cipher.go +++ b/vendor/golang.org/x/crypto/ssh/cipher.go @@ -8,6 +8,7 @@ import ( "crypto/aes" "crypto/cipher" "crypto/des" + "crypto/fips140" "crypto/rc4" "crypto/subtle" "encoding/binary" @@ -15,6 +16,7 @@ import ( "fmt" "hash" "io" + "slices" "golang.org/x/crypto/chacha20" "golang.org/x/crypto/internal/poly1305" @@ -58,11 +60,11 @@ func newRC4(key, iv []byte) (cipher.Stream, error) { type cipherMode struct { keySize int ivSize int - create func(key, iv []byte, macKey []byte, algs directionAlgorithms) (packetCipher, error) + create func(key, iv []byte, macKey []byte, algs DirectionAlgorithms) (packetCipher, error) } -func streamCipherMode(skip int, createFunc func(key, iv []byte) (cipher.Stream, error)) func(key, iv []byte, macKey []byte, algs directionAlgorithms) (packetCipher, error) { - return func(key, iv, macKey []byte, algs directionAlgorithms) (packetCipher, error) { +func streamCipherMode(skip int, createFunc func(key, iv []byte) (cipher.Stream, error)) func(key, iv []byte, macKey []byte, algs DirectionAlgorithms) (packetCipher, error) { + return func(key, iv, macKey []byte, algs DirectionAlgorithms) (packetCipher, error) { stream, err := createFunc(key, iv) if err != nil { return nil, err @@ -93,41 +95,41 @@ func streamCipherMode(skip int, createFunc func(key, iv []byte) (cipher.Stream, } // cipherModes documents properties of supported ciphers. Ciphers not included -// are not supported and will not be negotiated, even if explicitly requested in -// ClientConfig.Crypto.Ciphers. -var cipherModes = map[string]*cipherMode{ - // Ciphers from RFC 4344, which introduced many CTR-based ciphers. Algorithms - // are defined in the order specified in the RFC. - "aes128-ctr": {16, aes.BlockSize, streamCipherMode(0, newAESCTR)}, - "aes192-ctr": {24, aes.BlockSize, streamCipherMode(0, newAESCTR)}, - "aes256-ctr": {32, aes.BlockSize, streamCipherMode(0, newAESCTR)}, - - // Ciphers from RFC 4345, which introduces security-improved arcfour ciphers. - // They are defined in the order specified in the RFC. - "arcfour128": {16, 0, streamCipherMode(1536, newRC4)}, - "arcfour256": {32, 0, streamCipherMode(1536, newRC4)}, - - // Cipher defined in RFC 4253, which describes SSH Transport Layer Protocol. - // Note that this cipher is not safe, as stated in RFC 4253: "Arcfour (and - // RC4) has problems with weak keys, and should be used with caution." - // RFC 4345 introduces improved versions of Arcfour. - "arcfour": {16, 0, streamCipherMode(0, newRC4)}, - - // AEAD ciphers - gcm128CipherID: {16, 12, newGCMCipher}, - gcm256CipherID: {32, 12, newGCMCipher}, - chacha20Poly1305ID: {64, 0, newChaCha20Cipher}, - +// are not supported and will not be negotiated, even if explicitly configured. +// When FIPS mode is enabled, only FIPS-approved algorithms are included. +var cipherModes = map[string]*cipherMode{} + +func init() { + cipherModes[CipherAES128CTR] = &cipherMode{16, aes.BlockSize, streamCipherMode(0, newAESCTR)} + cipherModes[CipherAES192CTR] = &cipherMode{24, aes.BlockSize, streamCipherMode(0, newAESCTR)} + cipherModes[CipherAES256CTR] = &cipherMode{32, aes.BlockSize, streamCipherMode(0, newAESCTR)} + // Use of GCM with arbitrary IVs is not allowed in FIPS 140-only mode, + // we'll wire it up to NewGCMForSSH in Go 1.26. + // + // For now it means we'll work with fips140=on but not fips140=only. + cipherModes[CipherAES128GCM] = &cipherMode{16, 12, newGCMCipher} + cipherModes[CipherAES256GCM] = &cipherMode{32, 12, newGCMCipher} + + if fips140.Enabled() { + defaultCiphers = slices.DeleteFunc(defaultCiphers, func(algo string) bool { + _, ok := cipherModes[algo] + return !ok + }) + return + } + + cipherModes[CipherChaCha20Poly1305] = &cipherMode{64, 0, newChaCha20Cipher} + // Insecure ciphers not included in the default configuration. + cipherModes[InsecureCipherRC4128] = &cipherMode{16, 0, streamCipherMode(1536, newRC4)} + cipherModes[InsecureCipherRC4256] = &cipherMode{32, 0, streamCipherMode(1536, newRC4)} + cipherModes[InsecureCipherRC4] = &cipherMode{16, 0, streamCipherMode(0, newRC4)} // CBC mode is insecure and so is not included in the default config. // (See https://www.ieee-security.org/TC/SP2013/papers/4977a526.pdf). If absolutely // needed, it's possible to specify a custom Config to enable it. // You should expect that an active attacker can recover plaintext if // you do. - aes128cbcID: {16, aes.BlockSize, newAESCBCCipher}, - - // 3des-cbc is insecure and is not included in the default - // config. - tripledescbcID: {24, des.BlockSize, newTripleDESCBCCipher}, + cipherModes[InsecureCipherAES128CBC] = &cipherMode{16, aes.BlockSize, newAESCBCCipher} + cipherModes[InsecureCipherTripleDESCBC] = &cipherMode{24, des.BlockSize, newTripleDESCBCCipher} } // prefixLen is the length of the packet prefix that contains the packet length @@ -307,7 +309,7 @@ type gcmCipher struct { buf []byte } -func newGCMCipher(key, iv, unusedMacKey []byte, unusedAlgs directionAlgorithms) (packetCipher, error) { +func newGCMCipher(key, iv, unusedMacKey []byte, unusedAlgs DirectionAlgorithms) (packetCipher, error) { c, err := aes.NewCipher(key) if err != nil { return nil, err @@ -429,7 +431,7 @@ type cbcCipher struct { oracleCamouflage uint32 } -func newCBCCipher(c cipher.Block, key, iv, macKey []byte, algs directionAlgorithms) (packetCipher, error) { +func newCBCCipher(c cipher.Block, key, iv, macKey []byte, algs DirectionAlgorithms) (packetCipher, error) { cbc := &cbcCipher{ mac: macModes[algs.MAC].new(macKey), decrypter: cipher.NewCBCDecrypter(c, iv), @@ -443,7 +445,7 @@ func newCBCCipher(c cipher.Block, key, iv, macKey []byte, algs directionAlgorith return cbc, nil } -func newAESCBCCipher(key, iv, macKey []byte, algs directionAlgorithms) (packetCipher, error) { +func newAESCBCCipher(key, iv, macKey []byte, algs DirectionAlgorithms) (packetCipher, error) { c, err := aes.NewCipher(key) if err != nil { return nil, err @@ -457,7 +459,7 @@ func newAESCBCCipher(key, iv, macKey []byte, algs directionAlgorithms) (packetCi return cbc, nil } -func newTripleDESCBCCipher(key, iv, macKey []byte, algs directionAlgorithms) (packetCipher, error) { +func newTripleDESCBCCipher(key, iv, macKey []byte, algs DirectionAlgorithms) (packetCipher, error) { c, err := des.NewTripleDESCipher(key) if err != nil { return nil, err @@ -635,8 +637,6 @@ func (c *cbcCipher) writeCipherPacket(seqNum uint32, w io.Writer, rand io.Reader return nil } -const chacha20Poly1305ID = "chacha20-poly1305@openssh.com" - // chacha20Poly1305Cipher implements the chacha20-poly1305@openssh.com // AEAD, which is described here: // @@ -650,7 +650,7 @@ type chacha20Poly1305Cipher struct { buf []byte } -func newChaCha20Cipher(key, unusedIV, unusedMACKey []byte, unusedAlgs directionAlgorithms) (packetCipher, error) { +func newChaCha20Cipher(key, unusedIV, unusedMACKey []byte, unusedAlgs DirectionAlgorithms) (packetCipher, error) { if len(key) != 64 { panic(len(key)) } diff --git a/vendor/golang.org/x/crypto/ssh/client.go b/vendor/golang.org/x/crypto/ssh/client.go index fd8c4974..33079789 100644 --- a/vendor/golang.org/x/crypto/ssh/client.go +++ b/vendor/golang.org/x/crypto/ssh/client.go @@ -110,6 +110,7 @@ func (c *connection) clientHandshake(dialAddress string, config *ClientConfig) e } c.sessionID = c.transport.getSessionID() + c.algorithms = c.transport.getAlgorithms() return c.clientAuthenticate(config) } diff --git a/vendor/golang.org/x/crypto/ssh/client_auth.go b/vendor/golang.org/x/crypto/ssh/client_auth.go index b86dde15..3127e499 100644 --- a/vendor/golang.org/x/crypto/ssh/client_auth.go +++ b/vendor/golang.org/x/crypto/ssh/client_auth.go @@ -9,6 +9,7 @@ import ( "errors" "fmt" "io" + "slices" "strings" ) @@ -83,7 +84,7 @@ func (c *connection) clientAuthenticate(config *ClientConfig) error { // success return nil } else if ok == authFailure { - if m := auth.method(); !contains(tried, m) { + if m := auth.method(); !slices.Contains(tried, m) { tried = append(tried, m) } } @@ -97,7 +98,7 @@ func (c *connection) clientAuthenticate(config *ClientConfig) error { findNext: for _, a := range config.Auth { candidateMethod := a.method() - if contains(tried, candidateMethod) { + if slices.Contains(tried, candidateMethod) { continue } for _, meth := range methods { @@ -117,15 +118,6 @@ func (c *connection) clientAuthenticate(config *ClientConfig) error { return fmt.Errorf("ssh: unable to authenticate, attempted methods %v, no supported methods remain", tried) } -func contains(list []string, e string) bool { - for _, s := range list { - if s == e { - return true - } - } - return false -} - // An AuthMethod represents an instance of an RFC 4252 authentication method. type AuthMethod interface { // auth authenticates user over transport t. @@ -255,7 +247,7 @@ func pickSignatureAlgorithm(signer Signer, extensions map[string][]byte) (MultiA // Fallback to use if there is no "server-sig-algs" extension or a // common algorithm cannot be found. We use the public key format if the // MultiAlgorithmSigner supports it, otherwise we return an error. - if !contains(as.Algorithms(), underlyingAlgo(keyFormat)) { + if !slices.Contains(as.Algorithms(), underlyingAlgo(keyFormat)) { return "", fmt.Errorf("ssh: no common public key signature algorithm, server only supports %q for key type %q, signer only supports %v", underlyingAlgo(keyFormat), keyFormat, as.Algorithms()) } @@ -284,12 +276,12 @@ func pickSignatureAlgorithm(signer Signer, extensions map[string][]byte) (MultiA // Filter algorithms based on those supported by MultiAlgorithmSigner. var keyAlgos []string for _, algo := range algorithmsForKeyFormat(keyFormat) { - if contains(as.Algorithms(), underlyingAlgo(algo)) { + if slices.Contains(as.Algorithms(), underlyingAlgo(algo)) { keyAlgos = append(keyAlgos, algo) } } - algo, err := findCommon("public key signature algorithm", keyAlgos, serverAlgos) + algo, err := findCommon("public key signature algorithm", keyAlgos, serverAlgos, true) if err != nil { // If there is no overlap, return the fallback algorithm to support // servers that fail to list all supported algorithms. @@ -334,7 +326,7 @@ func (cb publicKeyCallback) auth(session []byte, user string, c packetConn, rand // the key try to use the obtained algorithm as if "server-sig-algs" had // not been implemented if supported from the algorithm signer. if !ok && idx < origSignersLen && isRSACert(algo) && algo != CertAlgoRSAv01 { - if contains(as.Algorithms(), KeyAlgoRSA) { + if slices.Contains(as.Algorithms(), KeyAlgoRSA) { // We retry using the compat algorithm after all signers have // been tried normally. signers = append(signers, &multiAlgorithmSigner{ @@ -385,7 +377,7 @@ func (cb publicKeyCallback) auth(session []byte, user string, c packetConn, rand // contain the "publickey" method, do not attempt to authenticate with any // other keys. According to RFC 4252 Section 7, the latter can occur when // additional authentication methods are required. - if success == authSuccess || !contains(methods, cb.method()) { + if success == authSuccess || !slices.Contains(methods, cb.method()) { return success, methods, err } } @@ -434,7 +426,7 @@ func confirmKeyAck(key PublicKey, c packetConn) (bool, error) { // servers send the key type instead. OpenSSH allows any algorithm // that matches the public key, so we do the same. // https://github.com/openssh/openssh-portable/blob/86bdd385/sshconnect2.c#L709 - if !contains(algorithmsForKeyFormat(key.Type()), msg.Algo) { + if !slices.Contains(algorithmsForKeyFormat(key.Type()), msg.Algo) { return false, nil } if !bytes.Equal(msg.PubKey, pubKey) { diff --git a/vendor/golang.org/x/crypto/ssh/common.go b/vendor/golang.org/x/crypto/ssh/common.go index 7e9c2cbc..2e44e9c9 100644 --- a/vendor/golang.org/x/crypto/ssh/common.go +++ b/vendor/golang.org/x/crypto/ssh/common.go @@ -6,10 +6,12 @@ package ssh import ( "crypto" + "crypto/fips140" "crypto/rand" "fmt" "io" "math" + "slices" "sync" _ "crypto/sha1" @@ -24,88 +26,298 @@ const ( serviceSSH = "ssh-connection" ) -// supportedCiphers lists ciphers we support but might not recommend. -var supportedCiphers = []string{ - "aes128-ctr", "aes192-ctr", "aes256-ctr", - "aes128-gcm@openssh.com", gcm256CipherID, - chacha20Poly1305ID, - "arcfour256", "arcfour128", "arcfour", - aes128cbcID, - tripledescbcID, -} +// The ciphers currently or previously implemented by this library, to use in +// [Config.Ciphers]. For a list, see the [Algorithms.Ciphers] returned by +// [SupportedAlgorithms] or [InsecureAlgorithms]. +const ( + CipherAES128GCM = "aes128-gcm@openssh.com" + CipherAES256GCM = "aes256-gcm@openssh.com" + CipherChaCha20Poly1305 = "chacha20-poly1305@openssh.com" + CipherAES128CTR = "aes128-ctr" + CipherAES192CTR = "aes192-ctr" + CipherAES256CTR = "aes256-ctr" + InsecureCipherAES128CBC = "aes128-cbc" + InsecureCipherTripleDESCBC = "3des-cbc" + InsecureCipherRC4 = "arcfour" + InsecureCipherRC4128 = "arcfour128" + InsecureCipherRC4256 = "arcfour256" +) -// preferredCiphers specifies the default preference for ciphers. -var preferredCiphers = []string{ - "aes128-gcm@openssh.com", gcm256CipherID, - chacha20Poly1305ID, - "aes128-ctr", "aes192-ctr", "aes256-ctr", -} +// The key exchanges currently or previously implemented by this library, to use +// in [Config.KeyExchanges]. For a list, see the +// [Algorithms.KeyExchanges] returned by [SupportedAlgorithms] or +// [InsecureAlgorithms]. +const ( + InsecureKeyExchangeDH1SHA1 = "diffie-hellman-group1-sha1" + InsecureKeyExchangeDH14SHA1 = "diffie-hellman-group14-sha1" + KeyExchangeDH14SHA256 = "diffie-hellman-group14-sha256" + KeyExchangeDH16SHA512 = "diffie-hellman-group16-sha512" + KeyExchangeECDHP256 = "ecdh-sha2-nistp256" + KeyExchangeECDHP384 = "ecdh-sha2-nistp384" + KeyExchangeECDHP521 = "ecdh-sha2-nistp521" + KeyExchangeCurve25519 = "curve25519-sha256" + InsecureKeyExchangeDHGEXSHA1 = "diffie-hellman-group-exchange-sha1" + KeyExchangeDHGEXSHA256 = "diffie-hellman-group-exchange-sha256" + // KeyExchangeMLKEM768X25519 is supported from Go 1.24. + KeyExchangeMLKEM768X25519 = "mlkem768x25519-sha256" + + // An alias for KeyExchangeCurve25519SHA256. This kex ID will be added if + // KeyExchangeCurve25519SHA256 is requested for backward compatibility with + // OpenSSH versions up to 7.2. + keyExchangeCurve25519LibSSH = "curve25519-sha256@libssh.org" +) -// supportedKexAlgos specifies the supported key-exchange algorithms in -// preference order. -var supportedKexAlgos = []string{ - kexAlgoCurve25519SHA256, kexAlgoCurve25519SHA256LibSSH, - // P384 and P521 are not constant-time yet, but since we don't - // reuse ephemeral keys, using them for ECDH should be OK. - kexAlgoECDH256, kexAlgoECDH384, kexAlgoECDH521, - kexAlgoDH14SHA256, kexAlgoDH16SHA512, kexAlgoDH14SHA1, - kexAlgoDH1SHA1, -} +// The message authentication code (MAC) currently or previously implemented by +// this library, to use in [Config.MACs]. For a list, see the +// [Algorithms.MACs] returned by [SupportedAlgorithms] or +// [InsecureAlgorithms]. +const ( + HMACSHA256ETM = "hmac-sha2-256-etm@openssh.com" + HMACSHA512ETM = "hmac-sha2-512-etm@openssh.com" + HMACSHA256 = "hmac-sha2-256" + HMACSHA512 = "hmac-sha2-512" + HMACSHA1 = "hmac-sha1" + InsecureHMACSHA196 = "hmac-sha1-96" +) -// serverForbiddenKexAlgos contains key exchange algorithms, that are forbidden -// for the server half. -var serverForbiddenKexAlgos = map[string]struct{}{ - kexAlgoDHGEXSHA1: {}, // server half implementation is only minimal to satisfy the automated tests - kexAlgoDHGEXSHA256: {}, // server half implementation is only minimal to satisfy the automated tests -} +var ( + // supportedKexAlgos specifies key-exchange algorithms implemented by this + // package in preference order, excluding those with security issues. + supportedKexAlgos = []string{ + KeyExchangeMLKEM768X25519, + KeyExchangeCurve25519, + KeyExchangeECDHP256, + KeyExchangeECDHP384, + KeyExchangeECDHP521, + KeyExchangeDH14SHA256, + KeyExchangeDH16SHA512, + KeyExchangeDHGEXSHA256, + } + // defaultKexAlgos specifies the default preference for key-exchange + // algorithms in preference order. + defaultKexAlgos = []string{ + KeyExchangeMLKEM768X25519, + KeyExchangeCurve25519, + KeyExchangeECDHP256, + KeyExchangeECDHP384, + KeyExchangeECDHP521, + KeyExchangeDH14SHA256, + InsecureKeyExchangeDH14SHA1, + } + // insecureKexAlgos specifies key-exchange algorithms implemented by this + // package and which have security issues. + insecureKexAlgos = []string{ + InsecureKeyExchangeDH14SHA1, + InsecureKeyExchangeDH1SHA1, + InsecureKeyExchangeDHGEXSHA1, + } + // supportedCiphers specifies cipher algorithms implemented by this package + // in preference order, excluding those with security issues. + supportedCiphers = []string{ + CipherAES128GCM, + CipherAES256GCM, + CipherChaCha20Poly1305, + CipherAES128CTR, + CipherAES192CTR, + CipherAES256CTR, + } + // defaultCiphers specifies the default preference for ciphers algorithms + // in preference order. + defaultCiphers = supportedCiphers + // insecureCiphers specifies cipher algorithms implemented by this + // package and which have security issues. + insecureCiphers = []string{ + InsecureCipherAES128CBC, + InsecureCipherTripleDESCBC, + InsecureCipherRC4256, + InsecureCipherRC4128, + InsecureCipherRC4, + } + // supportedMACs specifies MAC algorithms implemented by this package in + // preference order, excluding those with security issues. + supportedMACs = []string{ + HMACSHA256ETM, + HMACSHA512ETM, + HMACSHA256, + HMACSHA512, + HMACSHA1, + } + // defaultMACs specifies the default preference for MAC algorithms in + // preference order. + defaultMACs = []string{ + HMACSHA256ETM, + HMACSHA512ETM, + HMACSHA256, + HMACSHA512, + HMACSHA1, + InsecureHMACSHA196, + } + // insecureMACs specifies MAC algorithms implemented by this + // package and which have security issues. + insecureMACs = []string{ + InsecureHMACSHA196, + } + // supportedHostKeyAlgos specifies the supported host-key algorithms (i.e. + // methods of authenticating servers) implemented by this package in + // preference order, excluding those with security issues. + supportedHostKeyAlgos = []string{ + CertAlgoRSASHA256v01, + CertAlgoRSASHA512v01, + CertAlgoECDSA256v01, + CertAlgoECDSA384v01, + CertAlgoECDSA521v01, + CertAlgoED25519v01, + KeyAlgoRSASHA256, + KeyAlgoRSASHA512, + KeyAlgoECDSA256, + KeyAlgoECDSA384, + KeyAlgoECDSA521, + KeyAlgoED25519, + } + // defaultHostKeyAlgos specifies the default preference for host-key + // algorithms in preference order. + defaultHostKeyAlgos = []string{ + CertAlgoRSASHA256v01, + CertAlgoRSASHA512v01, + CertAlgoRSAv01, + InsecureCertAlgoDSAv01, + CertAlgoECDSA256v01, + CertAlgoECDSA384v01, + CertAlgoECDSA521v01, + CertAlgoED25519v01, + KeyAlgoECDSA256, + KeyAlgoECDSA384, + KeyAlgoECDSA521, + KeyAlgoRSASHA256, + KeyAlgoRSASHA512, + KeyAlgoRSA, + InsecureKeyAlgoDSA, + KeyAlgoED25519, + } + // insecureHostKeyAlgos specifies host-key algorithms implemented by this + // package and which have security issues. + insecureHostKeyAlgos = []string{ + KeyAlgoRSA, + InsecureKeyAlgoDSA, + CertAlgoRSAv01, + InsecureCertAlgoDSAv01, + } + // supportedPubKeyAuthAlgos specifies the supported client public key + // authentication algorithms. Note that this doesn't include certificate + // types since those use the underlying algorithm. Order is irrelevant. + supportedPubKeyAuthAlgos = []string{ + KeyAlgoED25519, + KeyAlgoSKED25519, + KeyAlgoSKECDSA256, + KeyAlgoECDSA256, + KeyAlgoECDSA384, + KeyAlgoECDSA521, + KeyAlgoRSASHA256, + KeyAlgoRSASHA512, + } -// preferredKexAlgos specifies the default preference for key-exchange -// algorithms in preference order. The diffie-hellman-group16-sha512 algorithm -// is disabled by default because it is a bit slower than the others. -var preferredKexAlgos = []string{ - kexAlgoCurve25519SHA256, kexAlgoCurve25519SHA256LibSSH, - kexAlgoECDH256, kexAlgoECDH384, kexAlgoECDH521, - kexAlgoDH14SHA256, kexAlgoDH14SHA1, -} + // defaultPubKeyAuthAlgos specifies the preferred client public key + // authentication algorithms. This list is sent to the client if it supports + // the server-sig-algs extension. Order is irrelevant. + defaultPubKeyAuthAlgos = []string{ + KeyAlgoED25519, + KeyAlgoSKED25519, + KeyAlgoSKECDSA256, + KeyAlgoECDSA256, + KeyAlgoECDSA384, + KeyAlgoECDSA521, + KeyAlgoRSASHA256, + KeyAlgoRSASHA512, + KeyAlgoRSA, + InsecureKeyAlgoDSA, + } + // insecurePubKeyAuthAlgos specifies client public key authentication + // algorithms implemented by this package and which have security issues. + insecurePubKeyAuthAlgos = []string{ + KeyAlgoRSA, + InsecureKeyAlgoDSA, + } +) -// supportedHostKeyAlgos specifies the supported host-key algorithms (i.e. methods -// of authenticating servers) in preference order. -var supportedHostKeyAlgos = []string{ - CertAlgoRSASHA256v01, CertAlgoRSASHA512v01, - CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, - CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoED25519v01, +// NegotiatedAlgorithms defines algorithms negotiated between client and server. +type NegotiatedAlgorithms struct { + KeyExchange string + HostKey string + Read DirectionAlgorithms + Write DirectionAlgorithms +} + +// Algorithms defines a set of algorithms that can be configured in the client +// or server config for negotiation during a handshake. +type Algorithms struct { + KeyExchanges []string + Ciphers []string + MACs []string + HostKeys []string + PublicKeyAuths []string +} + +func init() { + if fips140.Enabled() { + defaultHostKeyAlgos = slices.DeleteFunc(defaultHostKeyAlgos, func(algo string) bool { + _, err := hashFunc(underlyingAlgo(algo)) + return err != nil + }) + defaultPubKeyAuthAlgos = slices.DeleteFunc(defaultPubKeyAuthAlgos, func(algo string) bool { + _, err := hashFunc(underlyingAlgo(algo)) + return err != nil + }) + } +} - KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521, - KeyAlgoRSASHA256, KeyAlgoRSASHA512, - KeyAlgoRSA, KeyAlgoDSA, +func hashFunc(format string) (crypto.Hash, error) { + switch format { + case KeyAlgoRSASHA256, KeyAlgoECDSA256, KeyAlgoSKED25519, KeyAlgoSKECDSA256: + return crypto.SHA256, nil + case KeyAlgoECDSA384: + return crypto.SHA384, nil + case KeyAlgoRSASHA512, KeyAlgoECDSA521: + return crypto.SHA512, nil + case KeyAlgoED25519: + // KeyAlgoED25519 doesn't pre-hash. + return 0, nil + case KeyAlgoRSA, InsecureKeyAlgoDSA: + if fips140.Enabled() { + return 0, fmt.Errorf("ssh: hash algorithm for format %q not allowed in FIPS 140 mode", format) + } + return crypto.SHA1, nil + default: + return 0, fmt.Errorf("ssh: hash algorithm for format %q not mapped", format) + } +} - KeyAlgoED25519, +// SupportedAlgorithms returns algorithms currently implemented by this package, +// excluding those with security issues, which are returned by +// InsecureAlgorithms. The algorithms listed here are in preference order. +func SupportedAlgorithms() Algorithms { + return Algorithms{ + Ciphers: slices.Clone(supportedCiphers), + MACs: slices.Clone(supportedMACs), + KeyExchanges: slices.Clone(supportedKexAlgos), + HostKeys: slices.Clone(supportedHostKeyAlgos), + PublicKeyAuths: slices.Clone(supportedPubKeyAuthAlgos), + } } -// supportedMACs specifies a default set of MAC algorithms in preference order. -// This is based on RFC 4253, section 6.4, but with hmac-md5 variants removed -// because they have reached the end of their useful life. -var supportedMACs = []string{ - "hmac-sha2-256-etm@openssh.com", "hmac-sha2-512-etm@openssh.com", "hmac-sha2-256", "hmac-sha2-512", "hmac-sha1", "hmac-sha1-96", +// InsecureAlgorithms returns algorithms currently implemented by this package +// and which have security issues. +func InsecureAlgorithms() Algorithms { + return Algorithms{ + KeyExchanges: slices.Clone(insecureKexAlgos), + Ciphers: slices.Clone(insecureCiphers), + MACs: slices.Clone(insecureMACs), + HostKeys: slices.Clone(insecureHostKeyAlgos), + PublicKeyAuths: slices.Clone(insecurePubKeyAuthAlgos), + } } var supportedCompressions = []string{compressionNone} -// hashFuncs keeps the mapping of supported signature algorithms to their -// respective hashes needed for signing and verification. -var hashFuncs = map[string]crypto.Hash{ - KeyAlgoRSA: crypto.SHA1, - KeyAlgoRSASHA256: crypto.SHA256, - KeyAlgoRSASHA512: crypto.SHA512, - KeyAlgoDSA: crypto.SHA1, - KeyAlgoECDSA256: crypto.SHA256, - KeyAlgoECDSA384: crypto.SHA384, - KeyAlgoECDSA521: crypto.SHA512, - // KeyAlgoED25519 doesn't pre-hash. - KeyAlgoSKECDSA256: crypto.SHA256, - KeyAlgoSKED25519: crypto.SHA256, -} - // algorithmsForKeyFormat returns the supported signature algorithms for a given // public key format (PublicKey.Type), in order of preference. See RFC 8332, // Section 2. See also the note in sendKexInit on backwards compatibility. @@ -120,11 +332,40 @@ func algorithmsForKeyFormat(keyFormat string) []string { } } +// keyFormatForAlgorithm returns the key format corresponding to the given +// signature algorithm. It returns an empty string if the signature algorithm is +// invalid or unsupported. +func keyFormatForAlgorithm(sigAlgo string) string { + switch sigAlgo { + case KeyAlgoRSA, KeyAlgoRSASHA256, KeyAlgoRSASHA512: + return KeyAlgoRSA + case CertAlgoRSAv01, CertAlgoRSASHA256v01, CertAlgoRSASHA512v01: + return CertAlgoRSAv01 + case KeyAlgoED25519, + KeyAlgoSKED25519, + KeyAlgoSKECDSA256, + KeyAlgoECDSA256, + KeyAlgoECDSA384, + KeyAlgoECDSA521, + InsecureKeyAlgoDSA, + InsecureCertAlgoDSAv01, + CertAlgoECDSA256v01, + CertAlgoECDSA384v01, + CertAlgoECDSA521v01, + CertAlgoSKECDSA256v01, + CertAlgoED25519v01, + CertAlgoSKED25519v01: + return sigAlgo + default: + return "" + } +} + // isRSA returns whether algo is a supported RSA algorithm, including certificate // algorithms. func isRSA(algo string) bool { algos := algorithmsForKeyFormat(KeyAlgoRSA) - return contains(algos, underlyingAlgo(algo)) + return slices.Contains(algos, underlyingAlgo(algo)) } func isRSACert(algo string) bool { @@ -135,18 +376,6 @@ func isRSACert(algo string) bool { return isRSA(algo) } -// supportedPubKeyAuthAlgos specifies the supported client public key -// authentication algorithms. Note that this doesn't include certificate types -// since those use the underlying algorithm. This list is sent to the client if -// it supports the server-sig-algs extension. Order is irrelevant. -var supportedPubKeyAuthAlgos = []string{ - KeyAlgoED25519, - KeyAlgoSKED25519, KeyAlgoSKECDSA256, - KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521, - KeyAlgoRSASHA256, KeyAlgoRSASHA512, KeyAlgoRSA, - KeyAlgoDSA, -} - // unexpectedMessageError results when the SSH message that we received didn't // match what we wanted. func unexpectedMessageError(expected, got uint8) error { @@ -158,7 +387,7 @@ func parseError(tag uint8) error { return fmt.Errorf("ssh: parse error in message type %d", tag) } -func findCommon(what string, client []string, server []string) (common string, err error) { +func findCommon(what string, client []string, server []string, isClient bool) (string, error) { for _, c := range client { for _, s := range server { if c == s { @@ -166,23 +395,49 @@ func findCommon(what string, client []string, server []string) (common string, e } } } - return "", fmt.Errorf("ssh: no common algorithm for %s; client offered: %v, server offered: %v", what, client, server) + err := &AlgorithmNegotiationError{ + What: what, + } + if isClient { + err.SupportedAlgorithms = client + err.RequestedAlgorithms = server + } else { + err.SupportedAlgorithms = server + err.RequestedAlgorithms = client + } + return "", err } -// directionAlgorithms records algorithm choices in one direction (either read or write) -type directionAlgorithms struct { +// AlgorithmNegotiationError defines the error returned if the client and the +// server cannot agree on an algorithm for key exchange, host key, cipher, MAC. +type AlgorithmNegotiationError struct { + What string + // RequestedAlgorithms lists the algorithms supported by the peer. + RequestedAlgorithms []string + // SupportedAlgorithms lists the algorithms supported on our side. + SupportedAlgorithms []string +} + +func (a *AlgorithmNegotiationError) Error() string { + return fmt.Sprintf("ssh: no common algorithm for %s; we offered: %v, peer offered: %v", + a.What, a.SupportedAlgorithms, a.RequestedAlgorithms) +} + +// DirectionAlgorithms defines the algorithms negotiated in one direction +// (either read or write). +type DirectionAlgorithms struct { Cipher string MAC string - Compression string + compression string } // rekeyBytes returns a rekeying intervals in bytes. -func (a *directionAlgorithms) rekeyBytes() int64 { +func (a *DirectionAlgorithms) rekeyBytes() int64 { // According to RFC 4344 block ciphers should rekey after // 2^(BLOCKSIZE/4) blocks. For all AES flavors BLOCKSIZE is // 128. switch a.Cipher { - case "aes128-ctr", "aes192-ctr", "aes256-ctr", gcm128CipherID, gcm256CipherID, aes128cbcID: + case CipherAES128CTR, CipherAES192CTR, CipherAES256CTR, CipherAES128GCM, CipherAES256GCM, InsecureCipherAES128CBC: return 16 * (1 << 32) } @@ -192,66 +447,59 @@ func (a *directionAlgorithms) rekeyBytes() int64 { } var aeadCiphers = map[string]bool{ - gcm128CipherID: true, - gcm256CipherID: true, - chacha20Poly1305ID: true, + CipherAES128GCM: true, + CipherAES256GCM: true, + CipherChaCha20Poly1305: true, } -type algorithms struct { - kex string - hostKey string - w directionAlgorithms - r directionAlgorithms -} - -func findAgreedAlgorithms(isClient bool, clientKexInit, serverKexInit *kexInitMsg) (algs *algorithms, err error) { - result := &algorithms{} +func findAgreedAlgorithms(isClient bool, clientKexInit, serverKexInit *kexInitMsg) (algs *NegotiatedAlgorithms, err error) { + result := &NegotiatedAlgorithms{} - result.kex, err = findCommon("key exchange", clientKexInit.KexAlgos, serverKexInit.KexAlgos) + result.KeyExchange, err = findCommon("key exchange", clientKexInit.KexAlgos, serverKexInit.KexAlgos, isClient) if err != nil { return } - result.hostKey, err = findCommon("host key", clientKexInit.ServerHostKeyAlgos, serverKexInit.ServerHostKeyAlgos) + result.HostKey, err = findCommon("host key", clientKexInit.ServerHostKeyAlgos, serverKexInit.ServerHostKeyAlgos, isClient) if err != nil { return } - stoc, ctos := &result.w, &result.r + stoc, ctos := &result.Write, &result.Read if isClient { ctos, stoc = stoc, ctos } - ctos.Cipher, err = findCommon("client to server cipher", clientKexInit.CiphersClientServer, serverKexInit.CiphersClientServer) + ctos.Cipher, err = findCommon("client to server cipher", clientKexInit.CiphersClientServer, serverKexInit.CiphersClientServer, isClient) if err != nil { return } - stoc.Cipher, err = findCommon("server to client cipher", clientKexInit.CiphersServerClient, serverKexInit.CiphersServerClient) + stoc.Cipher, err = findCommon("server to client cipher", clientKexInit.CiphersServerClient, serverKexInit.CiphersServerClient, isClient) if err != nil { return } if !aeadCiphers[ctos.Cipher] { - ctos.MAC, err = findCommon("client to server MAC", clientKexInit.MACsClientServer, serverKexInit.MACsClientServer) + ctos.MAC, err = findCommon("client to server MAC", clientKexInit.MACsClientServer, serverKexInit.MACsClientServer, isClient) if err != nil { return } } if !aeadCiphers[stoc.Cipher] { - stoc.MAC, err = findCommon("server to client MAC", clientKexInit.MACsServerClient, serverKexInit.MACsServerClient) + stoc.MAC, err = findCommon("server to client MAC", clientKexInit.MACsServerClient, serverKexInit.MACsServerClient, isClient) if err != nil { return } } - ctos.Compression, err = findCommon("client to server compression", clientKexInit.CompressionClientServer, serverKexInit.CompressionClientServer) + ctos.compression, err = findCommon("client to server compression", clientKexInit.CompressionClientServer, serverKexInit.CompressionClientServer, isClient) if err != nil { return } - stoc.Compression, err = findCommon("server to client compression", clientKexInit.CompressionServerClient, serverKexInit.CompressionServerClient) + stoc.compression, err = findCommon("server to client compression", clientKexInit.CompressionServerClient, serverKexInit.CompressionServerClient, isClient) if err != nil { return } @@ -297,7 +545,7 @@ func (c *Config) SetDefaults() { c.Rand = rand.Reader } if c.Ciphers == nil { - c.Ciphers = preferredCiphers + c.Ciphers = defaultCiphers } var ciphers []string for _, c := range c.Ciphers { @@ -309,19 +557,22 @@ func (c *Config) SetDefaults() { c.Ciphers = ciphers if c.KeyExchanges == nil { - c.KeyExchanges = preferredKexAlgos + c.KeyExchanges = defaultKexAlgos } var kexs []string for _, k := range c.KeyExchanges { if kexAlgoMap[k] != nil { // Ignore the KEX if we have no kexAlgoMap definition. kexs = append(kexs, k) + if k == KeyExchangeCurve25519 && !slices.Contains(c.KeyExchanges, keyExchangeCurve25519LibSSH) { + kexs = append(kexs, keyExchangeCurve25519LibSSH) + } } } c.KeyExchanges = kexs if c.MACs == nil { - c.MACs = supportedMACs + c.MACs = defaultMACs } var macs []string for _, m := range c.MACs { diff --git a/vendor/golang.org/x/crypto/ssh/connection.go b/vendor/golang.org/x/crypto/ssh/connection.go index 8f345ee9..613a71a7 100644 --- a/vendor/golang.org/x/crypto/ssh/connection.go +++ b/vendor/golang.org/x/crypto/ssh/connection.go @@ -74,6 +74,13 @@ type Conn interface { // Disconnect } +// AlgorithmsConnMetadata is a ConnMetadata that can return the algorithms +// negotiated between client and server. +type AlgorithmsConnMetadata interface { + ConnMetadata + Algorithms() NegotiatedAlgorithms +} + // DiscardRequests consumes and rejects all requests from the // passed-in channel. func DiscardRequests(in <-chan *Request) { @@ -106,6 +113,7 @@ type sshConn struct { sessionID []byte clientVersion []byte serverVersion []byte + algorithms NegotiatedAlgorithms } func dup(src []byte) []byte { @@ -141,3 +149,7 @@ func (c *sshConn) ClientVersion() []byte { func (c *sshConn) ServerVersion() []byte { return dup(c.serverVersion) } + +func (c *sshConn) Algorithms() NegotiatedAlgorithms { + return c.algorithms +} diff --git a/vendor/golang.org/x/crypto/ssh/doc.go b/vendor/golang.org/x/crypto/ssh/doc.go index f5d352fe..5b4de9ef 100644 --- a/vendor/golang.org/x/crypto/ssh/doc.go +++ b/vendor/golang.org/x/crypto/ssh/doc.go @@ -16,8 +16,19 @@ References: [PROTOCOL]: https://cvsweb.openbsd.org/cgi-bin/cvsweb/src/usr.bin/ssh/PROTOCOL?rev=HEAD [PROTOCOL.certkeys]: http://cvsweb.openbsd.org/cgi-bin/cvsweb/src/usr.bin/ssh/PROTOCOL.certkeys?rev=HEAD [SSH-PARAMETERS]: http://www.iana.org/assignments/ssh-parameters/ssh-parameters.xml#ssh-parameters-1 + [SSH-CERTS]: https://datatracker.ietf.org/doc/html/draft-miller-ssh-cert-01 + [FIPS 140-3 mode]: https://go.dev/doc/security/fips140 This package does not fall under the stability promise of the Go language itself, so its API may be changed when pressing needs arise. + +# FIPS 140-3 mode + +When the program is in [FIPS 140-3 mode], this package behaves as if only SP +800-140C and SP 800-140D approved cipher suites, signature algorithms, +certificate public key types and sizes, and key exchange and derivation +algorithms were implemented. Others are silently ignored and not negotiated, or +rejected. This set may depend on the algorithms supported by the FIPS 140-3 Go +Cryptographic Module selected with GOFIPS140, and may change across Go versions. */ package ssh diff --git a/vendor/golang.org/x/crypto/ssh/handshake.go b/vendor/golang.org/x/crypto/ssh/handshake.go index fef687db..4be3cbb6 100644 --- a/vendor/golang.org/x/crypto/ssh/handshake.go +++ b/vendor/golang.org/x/crypto/ssh/handshake.go @@ -5,12 +5,12 @@ package ssh import ( - "crypto/rand" "errors" "fmt" "io" "log" "net" + "slices" "strings" "sync" ) @@ -25,6 +25,11 @@ const debugHandshake = false // quickly. const chanSize = 16 +// maxPendingPackets sets the maximum number of packets to queue while waiting +// for KEX to complete. This limits the total pending data to maxPendingPackets +// * maxPacket bytes, which is ~16.8MB. +const maxPendingPackets = 64 + // keyingTransport is a packet based transport that supports key // changes. It need not be thread-safe. It should pass through // msgNewKeys in both directions. @@ -34,7 +39,7 @@ type keyingTransport interface { // prepareKeyChange sets up a key change. The key change for a // direction will be effected if a msgNewKeys message is sent // or received. - prepareKeyChange(*algorithms, *kexResult) error + prepareKeyChange(*NegotiatedAlgorithms, *kexResult) error // setStrictMode sets the strict KEX mode, notably triggering // sequence number resets on sending or receiving msgNewKeys. @@ -73,11 +78,19 @@ type handshakeTransport struct { incoming chan []byte readError error - mu sync.Mutex - writeError error - sentInitPacket []byte - sentInitMsg *kexInitMsg - pendingPackets [][]byte // Used when a key exchange is in progress. + mu sync.Mutex + // Condition for the above mutex. It is used to notify a completed key + // exchange or a write failure. Writes can wait for this condition while a + // key exchange is in progress. + writeCond *sync.Cond + writeError error + sentInitPacket []byte + sentInitMsg *kexInitMsg + // Used to queue writes when a key exchange is in progress. The length is + // limited by pendingPacketsSize. Once full, writes will block until the key + // exchange is completed or an error occurs. If not empty, it is emptied + // all at once when the key exchange is completed in kexLoop. + pendingPackets [][]byte writePacketsLeft uint32 writeBytesLeft int64 userAuthComplete bool // whether the user authentication phase is complete @@ -103,7 +116,7 @@ type handshakeTransport struct { bannerCallback BannerCallback // Algorithms agreed in the last key exchange. - algorithms *algorithms + algorithms *NegotiatedAlgorithms // Counters exclusively owned by readLoop. readPacketsLeft uint32 @@ -134,6 +147,7 @@ func newHandshakeTransport(conn keyingTransport, config *Config, clientVersion, config: config, } + t.writeCond = sync.NewCond(&t.mu) t.resetReadThresholds() t.resetWriteThresholds() @@ -151,7 +165,7 @@ func newClientTransport(conn keyingTransport, clientVersion, serverVersion []byt if config.HostKeyAlgorithms != nil { t.hostKeyAlgorithms = config.HostKeyAlgorithms } else { - t.hostKeyAlgorithms = supportedHostKeyAlgos + t.hostKeyAlgorithms = defaultHostKeyAlgos } go t.readLoop() go t.kexLoop() @@ -171,6 +185,10 @@ func (t *handshakeTransport) getSessionID() []byte { return t.sessionID } +func (t *handshakeTransport) getAlgorithms() NegotiatedAlgorithms { + return *t.algorithms +} + // waitSession waits for the session to be established. This should be // the first thing to call after instantiating handshakeTransport. func (t *handshakeTransport) waitSession() error { @@ -260,6 +278,7 @@ func (t *handshakeTransport) recordWriteError(err error) { defer t.mu.Unlock() if t.writeError == nil && err != nil { t.writeError = err + t.writeCond.Broadcast() } } @@ -276,7 +295,7 @@ func (t *handshakeTransport) resetWriteThresholds() { if t.config.RekeyThreshold > 0 { t.writeBytesLeft = int64(t.config.RekeyThreshold) } else if t.algorithms != nil { - t.writeBytesLeft = t.algorithms.w.rekeyBytes() + t.writeBytesLeft = t.algorithms.Write.rekeyBytes() } else { t.writeBytesLeft = 1 << 30 } @@ -363,6 +382,8 @@ write: } } t.pendingPackets = t.pendingPackets[:0] + // Unblock writePacket if waiting for KEX. + t.writeCond.Broadcast() t.mu.Unlock() } @@ -391,7 +412,7 @@ func (t *handshakeTransport) resetReadThresholds() { if t.config.RekeyThreshold > 0 { t.readBytesLeft = int64(t.config.RekeyThreshold) } else if t.algorithms != nil { - t.readBytesLeft = t.algorithms.r.rekeyBytes() + t.readBytesLeft = t.algorithms.Read.rekeyBytes() } else { t.readBytesLeft = 1 << 30 } @@ -484,7 +505,7 @@ func (t *handshakeTransport) sendKexInit() error { CompressionClientServer: supportedCompressions, CompressionServerClient: supportedCompressions, } - io.ReadFull(rand.Reader, msg.Cookie[:]) + io.ReadFull(t.config.Rand, msg.Cookie[:]) // We mutate the KexAlgos slice, in order to add the kex-strict extension algorithm, // and possibly to add the ext-info extension algorithm. Since the slice may be the @@ -507,7 +528,7 @@ func (t *handshakeTransport) sendKexInit() error { switch s := k.(type) { case MultiAlgorithmSigner: for _, algo := range algorithmsForKeyFormat(keyFormat) { - if contains(s.Algorithms(), underlyingAlgo(algo)) { + if slices.Contains(s.Algorithms(), underlyingAlgo(algo)) { msg.ServerHostKeyAlgos = append(msg.ServerHostKeyAlgos, algo) } } @@ -577,11 +598,20 @@ func (t *handshakeTransport) writePacket(p []byte) error { } if t.sentInitMsg != nil { - // Copy the packet so the writer can reuse the buffer. - cp := make([]byte, len(p)) - copy(cp, p) - t.pendingPackets = append(t.pendingPackets, cp) - return nil + if len(t.pendingPackets) < maxPendingPackets { + // Copy the packet so the writer can reuse the buffer. + cp := make([]byte, len(p)) + copy(cp, p) + t.pendingPackets = append(t.pendingPackets, cp) + return nil + } + for t.sentInitMsg != nil { + // Block and wait for KEX to complete or an error. + t.writeCond.Wait() + if t.writeError != nil { + return t.writeError + } + } } if t.writeBytesLeft > 0 { @@ -598,6 +628,7 @@ func (t *handshakeTransport) writePacket(p []byte) error { if err := t.pushPacket(p); err != nil { t.writeError = err + t.writeCond.Broadcast() } return nil @@ -649,7 +680,7 @@ func (t *handshakeTransport) enterKeyExchange(otherInitPacket []byte) error { return err } - if t.sessionID == nil && ((isClient && contains(serverInit.KexAlgos, kexStrictServer)) || (!isClient && contains(clientInit.KexAlgos, kexStrictClient))) { + if t.sessionID == nil && ((isClient && slices.Contains(serverInit.KexAlgos, kexStrictServer)) || (!isClient && slices.Contains(clientInit.KexAlgos, kexStrictClient))) { t.strictMode = true if err := t.conn.setStrictMode(); err != nil { return err @@ -674,9 +705,9 @@ func (t *handshakeTransport) enterKeyExchange(otherInitPacket []byte) error { } } - kex, ok := kexAlgoMap[t.algorithms.kex] + kex, ok := kexAlgoMap[t.algorithms.KeyExchange] if !ok { - return fmt.Errorf("ssh: unexpected key exchange algorithm %v", t.algorithms.kex) + return fmt.Errorf("ssh: unexpected key exchange algorithm %v", t.algorithms.KeyExchange) } var result *kexResult @@ -706,7 +737,7 @@ func (t *handshakeTransport) enterKeyExchange(otherInitPacket []byte) error { // On the server side, after the first SSH_MSG_NEWKEYS, send a SSH_MSG_EXT_INFO // message with the server-sig-algs extension if the client supports it. See // RFC 8308, Sections 2.4 and 3.1, and [PROTOCOL], Section 1.9. - if !isClient && firstKeyExchange && contains(clientInit.KexAlgos, "ext-info-c") { + if !isClient && firstKeyExchange && slices.Contains(clientInit.KexAlgos, "ext-info-c") { supportedPubKeyAuthAlgosList := strings.Join(t.publicKeyAuthAlgorithms, ",") extInfo := &extInfoMsg{ NumExtensions: 2, @@ -760,7 +791,7 @@ func (a algorithmSignerWrapper) SignWithAlgorithm(rand io.Reader, data []byte, a func pickHostKey(hostKeys []Signer, algo string) AlgorithmSigner { for _, k := range hostKeys { if s, ok := k.(MultiAlgorithmSigner); ok { - if !contains(s.Algorithms(), underlyingAlgo(algo)) { + if !slices.Contains(s.Algorithms(), underlyingAlgo(algo)) { continue } } @@ -783,12 +814,12 @@ func pickHostKey(hostKeys []Signer, algo string) AlgorithmSigner { } func (t *handshakeTransport) server(kex kexAlgorithm, magics *handshakeMagics) (*kexResult, error) { - hostKey := pickHostKey(t.hostKeys, t.algorithms.hostKey) + hostKey := pickHostKey(t.hostKeys, t.algorithms.HostKey) if hostKey == nil { return nil, errors.New("ssh: internal error: negotiated unsupported signature type") } - r, err := kex.Server(t.conn, t.config.Rand, magics, hostKey, t.algorithms.hostKey) + r, err := kex.Server(t.conn, t.config.Rand, magics, hostKey, t.algorithms.HostKey) return r, err } @@ -803,7 +834,7 @@ func (t *handshakeTransport) client(kex kexAlgorithm, magics *handshakeMagics) ( return nil, err } - if err := verifyHostKeySignature(hostKey, t.algorithms.hostKey, result); err != nil { + if err := verifyHostKeySignature(hostKey, t.algorithms.HostKey, result); err != nil { return nil, err } diff --git a/vendor/golang.org/x/crypto/ssh/kex.go b/vendor/golang.org/x/crypto/ssh/kex.go index 8a05f799..5f7fdd85 100644 --- a/vendor/golang.org/x/crypto/ssh/kex.go +++ b/vendor/golang.org/x/crypto/ssh/kex.go @@ -8,33 +8,31 @@ import ( "crypto" "crypto/ecdsa" "crypto/elliptic" + "crypto/fips140" "crypto/rand" - "crypto/subtle" "encoding/binary" "errors" "fmt" "io" "math/big" + "slices" "golang.org/x/crypto/curve25519" ) const ( - kexAlgoDH1SHA1 = "diffie-hellman-group1-sha1" - kexAlgoDH14SHA1 = "diffie-hellman-group14-sha1" - kexAlgoDH14SHA256 = "diffie-hellman-group14-sha256" - kexAlgoDH16SHA512 = "diffie-hellman-group16-sha512" - kexAlgoECDH256 = "ecdh-sha2-nistp256" - kexAlgoECDH384 = "ecdh-sha2-nistp384" - kexAlgoECDH521 = "ecdh-sha2-nistp521" - kexAlgoCurve25519SHA256LibSSH = "curve25519-sha256@libssh.org" - kexAlgoCurve25519SHA256 = "curve25519-sha256" - - // For the following kex only the client half contains a production - // ready implementation. The server half only consists of a minimal - // implementation to satisfy the automated tests. - kexAlgoDHGEXSHA1 = "diffie-hellman-group-exchange-sha1" - kexAlgoDHGEXSHA256 = "diffie-hellman-group-exchange-sha256" + // This is the group called diffie-hellman-group1-sha1 in RFC 4253 and + // Oakley Group 2 in RFC 2409. + oakleyGroup2 = "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF" + // This is the group called diffie-hellman-group14-sha1 in RFC 4253 and + // Oakley Group 14 in RFC 3526. + oakleyGroup14 = "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AACAA68FFFFFFFFFFFFFFFF" + // This is the group called diffie-hellman-group15-sha512 in RFC 8268 and + // Oakley Group 15 in RFC 3526. + oakleyGroup15 = "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF" + // This is the group called diffie-hellman-group16-sha512 in RFC 8268 and + // Oakley Group 16 in RFC 3526. + oakleyGroup16 = "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199FFFFFFFFFFFFFFFF" ) // kexResult captures the outcome of a key exchange. @@ -399,56 +397,64 @@ func ecHash(curve elliptic.Curve) crypto.Hash { return crypto.SHA512 } +// kexAlgoMap defines the supported KEXs. KEXs not included are not supported +// and will not be negotiated, even if explicitly configured. When FIPS mode is +// enabled, only FIPS-approved algorithms are included. var kexAlgoMap = map[string]kexAlgorithm{} func init() { - // This is the group called diffie-hellman-group1-sha1 in - // RFC 4253 and Oakley Group 2 in RFC 2409. - p, _ := new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF", 16) - kexAlgoMap[kexAlgoDH1SHA1] = &dhGroup{ + // mlkem768x25519-sha256 we'll work with fips140=on but not fips140=only + // until Go 1.26. + kexAlgoMap[KeyExchangeMLKEM768X25519] = &mlkem768WithCurve25519sha256{} + kexAlgoMap[KeyExchangeECDHP521] = &ecdh{elliptic.P521()} + kexAlgoMap[KeyExchangeECDHP384] = &ecdh{elliptic.P384()} + kexAlgoMap[KeyExchangeECDHP256] = &ecdh{elliptic.P256()} + + if fips140.Enabled() { + defaultKexAlgos = slices.DeleteFunc(defaultKexAlgos, func(algo string) bool { + _, ok := kexAlgoMap[algo] + return !ok + }) + return + } + + p, _ := new(big.Int).SetString(oakleyGroup2, 16) + kexAlgoMap[InsecureKeyExchangeDH1SHA1] = &dhGroup{ g: new(big.Int).SetInt64(2), p: p, pMinus1: new(big.Int).Sub(p, bigOne), hashFunc: crypto.SHA1, } - // This are the groups called diffie-hellman-group14-sha1 and - // diffie-hellman-group14-sha256 in RFC 4253 and RFC 8268, - // and Oakley Group 14 in RFC 3526. - p, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AACAA68FFFFFFFFFFFFFFFF", 16) + p, _ = new(big.Int).SetString(oakleyGroup14, 16) group14 := &dhGroup{ g: new(big.Int).SetInt64(2), p: p, pMinus1: new(big.Int).Sub(p, bigOne), } - kexAlgoMap[kexAlgoDH14SHA1] = &dhGroup{ + kexAlgoMap[InsecureKeyExchangeDH14SHA1] = &dhGroup{ g: group14.g, p: group14.p, pMinus1: group14.pMinus1, hashFunc: crypto.SHA1, } - kexAlgoMap[kexAlgoDH14SHA256] = &dhGroup{ + kexAlgoMap[KeyExchangeDH14SHA256] = &dhGroup{ g: group14.g, p: group14.p, pMinus1: group14.pMinus1, hashFunc: crypto.SHA256, } - // This is the group called diffie-hellman-group16-sha512 in RFC - // 8268 and Oakley Group 16 in RFC 3526. - p, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199FFFFFFFFFFFFFFFF", 16) + p, _ = new(big.Int).SetString(oakleyGroup16, 16) - kexAlgoMap[kexAlgoDH16SHA512] = &dhGroup{ + kexAlgoMap[KeyExchangeDH16SHA512] = &dhGroup{ g: new(big.Int).SetInt64(2), p: p, pMinus1: new(big.Int).Sub(p, bigOne), hashFunc: crypto.SHA512, } - kexAlgoMap[kexAlgoECDH521] = &ecdh{elliptic.P521()} - kexAlgoMap[kexAlgoECDH384] = &ecdh{elliptic.P384()} - kexAlgoMap[kexAlgoECDH256] = &ecdh{elliptic.P256()} - kexAlgoMap[kexAlgoCurve25519SHA256] = &curve25519sha256{} - kexAlgoMap[kexAlgoCurve25519SHA256LibSSH] = &curve25519sha256{} - kexAlgoMap[kexAlgoDHGEXSHA1] = &dhGEXSHA{hashFunc: crypto.SHA1} - kexAlgoMap[kexAlgoDHGEXSHA256] = &dhGEXSHA{hashFunc: crypto.SHA256} + kexAlgoMap[KeyExchangeCurve25519] = &curve25519sha256{} + kexAlgoMap[keyExchangeCurve25519LibSSH] = &curve25519sha256{} + kexAlgoMap[InsecureKeyExchangeDHGEXSHA1] = &dhGEXSHA{hashFunc: crypto.SHA1} + kexAlgoMap[KeyExchangeDHGEXSHA256] = &dhGEXSHA{hashFunc: crypto.SHA256} } // curve25519sha256 implements the curve25519-sha256 (formerly known as @@ -464,15 +470,17 @@ func (kp *curve25519KeyPair) generate(rand io.Reader) error { if _, err := io.ReadFull(rand, kp.priv[:]); err != nil { return err } - curve25519.ScalarBaseMult(&kp.pub, &kp.priv) + p, err := curve25519.X25519(kp.priv[:], curve25519.Basepoint) + if err != nil { + return fmt.Errorf("curve25519: %w", err) + } + if len(p) != 32 { + return fmt.Errorf("curve25519: internal error: X25519 returned %d bytes, expected 32", len(p)) + } + copy(kp.pub[:], p) return nil } -// curve25519Zeros is just an array of 32 zero bytes so that we have something -// convenient to compare against in order to reject curve25519 points with the -// wrong order. -var curve25519Zeros [32]byte - func (kex *curve25519sha256) Client(c packetConn, rand io.Reader, magics *handshakeMagics) (*kexResult, error) { var kp curve25519KeyPair if err := kp.generate(rand); err != nil { @@ -495,11 +503,9 @@ func (kex *curve25519sha256) Client(c packetConn, rand io.Reader, magics *handsh return nil, errors.New("ssh: peer's curve25519 public value has wrong length") } - var servPub, secret [32]byte - copy(servPub[:], reply.EphemeralPubKey) - curve25519.ScalarMult(&secret, &kp.priv, &servPub) - if subtle.ConstantTimeCompare(secret[:], curve25519Zeros[:]) == 1 { - return nil, errors.New("ssh: peer's curve25519 public value has wrong order") + secret, err := curve25519.X25519(kp.priv[:], reply.EphemeralPubKey) + if err != nil { + return nil, fmt.Errorf("ssh: peer's curve25519 public value is not valid: %w", err) } h := crypto.SHA256.New() @@ -541,11 +547,9 @@ func (kex *curve25519sha256) Server(c packetConn, rand io.Reader, magics *handsh return nil, err } - var clientPub, secret [32]byte - copy(clientPub[:], kexInit.ClientPubKey) - curve25519.ScalarMult(&secret, &kp.priv, &clientPub) - if subtle.ConstantTimeCompare(secret[:], curve25519Zeros[:]) == 1 { - return nil, errors.New("ssh: peer's curve25519 public value has wrong order") + secret, err := curve25519.X25519(kp.priv[:], kexInit.ClientPubKey) + if err != nil { + return nil, fmt.Errorf("ssh: peer's curve25519 public value is not valid: %w", err) } hostKeyBytes := priv.PublicKey().Marshal() @@ -601,9 +605,9 @@ const ( func (gex *dhGEXSHA) Client(c packetConn, randSource io.Reader, magics *handshakeMagics) (*kexResult, error) { // Send GexRequest kexDHGexRequest := kexDHGexRequestMsg{ - MinBits: dhGroupExchangeMinimumBits, - PreferedBits: dhGroupExchangePreferredBits, - MaxBits: dhGroupExchangeMaximumBits, + MinBits: dhGroupExchangeMinimumBits, + PreferredBits: dhGroupExchangePreferredBits, + MaxBits: dhGroupExchangeMaximumBits, } if err := c.writePacket(Marshal(&kexDHGexRequest)); err != nil { return nil, err @@ -690,9 +694,7 @@ func (gex *dhGEXSHA) Client(c packetConn, randSource io.Reader, magics *handshak } // Server half implementation of the Diffie Hellman Key Exchange with SHA1 and SHA256. -// -// This is a minimal implementation to satisfy the automated tests. -func (gex dhGEXSHA) Server(c packetConn, randSource io.Reader, magics *handshakeMagics, priv AlgorithmSigner, algo string) (result *kexResult, err error) { +func (gex *dhGEXSHA) Server(c packetConn, randSource io.Reader, magics *handshakeMagics, priv AlgorithmSigner, algo string) (result *kexResult, err error) { // Receive GexRequest packet, err := c.readPacket() if err != nil { @@ -702,13 +704,32 @@ func (gex dhGEXSHA) Server(c packetConn, randSource io.Reader, magics *handshake if err = Unmarshal(packet, &kexDHGexRequest); err != nil { return } + // We check that the request received is valid and that the MaxBits + // requested are at least equal to our supported minimum. This is the same + // check done in OpenSSH: + // https://github.com/openssh/openssh-portable/blob/80a2f64b/kexgexs.c#L94 + // + // Furthermore, we also check that the required MinBits are less than or + // equal to 4096 because we can use up to Oakley Group 16. + if kexDHGexRequest.MaxBits < kexDHGexRequest.MinBits || kexDHGexRequest.PreferredBits < kexDHGexRequest.MinBits || + kexDHGexRequest.MaxBits < kexDHGexRequest.PreferredBits || kexDHGexRequest.MaxBits < dhGroupExchangeMinimumBits || + kexDHGexRequest.MinBits > 4096 { + return nil, fmt.Errorf("ssh: DH GEX request out of range, min: %d, max: %d, preferred: %d", kexDHGexRequest.MinBits, + kexDHGexRequest.MaxBits, kexDHGexRequest.PreferredBits) + } + + var p *big.Int + // We hardcode sending Oakley Group 14 (2048 bits), Oakley Group 15 (3072 + // bits) or Oakley Group 16 (4096 bits), based on the requested max size. + if kexDHGexRequest.MaxBits < 3072 { + p, _ = new(big.Int).SetString(oakleyGroup14, 16) + } else if kexDHGexRequest.MaxBits < 4096 { + p, _ = new(big.Int).SetString(oakleyGroup15, 16) + } else { + p, _ = new(big.Int).SetString(oakleyGroup16, 16) + } - // Send GexGroup - // This is the group called diffie-hellman-group14-sha1 in RFC - // 4253 and Oakley Group 14 in RFC 3526. - p, _ := new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AACAA68FFFFFFFFFFFFFFFF", 16) g := big.NewInt(2) - msg := &kexDHGexGroupMsg{ P: p, G: g, @@ -746,9 +767,9 @@ func (gex dhGEXSHA) Server(c packetConn, randSource io.Reader, magics *handshake h := gex.hashFunc.New() magics.write(h) writeString(h, hostKeyBytes) - binary.Write(h, binary.BigEndian, uint32(dhGroupExchangeMinimumBits)) - binary.Write(h, binary.BigEndian, uint32(dhGroupExchangePreferredBits)) - binary.Write(h, binary.BigEndian, uint32(dhGroupExchangeMaximumBits)) + binary.Write(h, binary.BigEndian, kexDHGexRequest.MinBits) + binary.Write(h, binary.BigEndian, kexDHGexRequest.PreferredBits) + binary.Write(h, binary.BigEndian, kexDHGexRequest.MaxBits) writeInt(h, p) writeInt(h, g) writeInt(h, kexDHGexInit.X) diff --git a/vendor/golang.org/x/crypto/ssh/keys.go b/vendor/golang.org/x/crypto/ssh/keys.go index 98e6706d..a035956f 100644 --- a/vendor/golang.org/x/crypto/ssh/keys.go +++ b/vendor/golang.org/x/crypto/ssh/keys.go @@ -27,6 +27,7 @@ import ( "fmt" "io" "math/big" + "slices" "strings" "golang.org/x/crypto/ssh/internal/bcrypt_pbkdf" @@ -36,14 +37,19 @@ import ( // ClientConfig.HostKeyAlgorithms, Signature.Format, or as AlgorithmSigner // arguments. const ( - KeyAlgoRSA = "ssh-rsa" - KeyAlgoDSA = "ssh-dss" - KeyAlgoECDSA256 = "ecdsa-sha2-nistp256" - KeyAlgoSKECDSA256 = "sk-ecdsa-sha2-nistp256@openssh.com" - KeyAlgoECDSA384 = "ecdsa-sha2-nistp384" - KeyAlgoECDSA521 = "ecdsa-sha2-nistp521" - KeyAlgoED25519 = "ssh-ed25519" - KeyAlgoSKED25519 = "sk-ssh-ed25519@openssh.com" + KeyAlgoRSA = "ssh-rsa" + // Deprecated: DSA is only supported at insecure key sizes, and was removed + // from major implementations. + KeyAlgoDSA = InsecureKeyAlgoDSA + // Deprecated: DSA is only supported at insecure key sizes, and was removed + // from major implementations. + InsecureKeyAlgoDSA = "ssh-dss" + KeyAlgoECDSA256 = "ecdsa-sha2-nistp256" + KeyAlgoSKECDSA256 = "sk-ecdsa-sha2-nistp256@openssh.com" + KeyAlgoECDSA384 = "ecdsa-sha2-nistp384" + KeyAlgoECDSA521 = "ecdsa-sha2-nistp521" + KeyAlgoED25519 = "ssh-ed25519" + KeyAlgoSKED25519 = "sk-ssh-ed25519@openssh.com" // KeyAlgoRSASHA256 and KeyAlgoRSASHA512 are only public key algorithms, not // public key formats, so they can't appear as a PublicKey.Type. The @@ -67,7 +73,7 @@ func parsePubKey(in []byte, algo string) (pubKey PublicKey, rest []byte, err err switch algo { case KeyAlgoRSA: return parseRSA(in) - case KeyAlgoDSA: + case InsecureKeyAlgoDSA: return parseDSA(in) case KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521: return parseECDSA(in) @@ -77,13 +83,18 @@ func parsePubKey(in []byte, algo string) (pubKey PublicKey, rest []byte, err err return parseED25519(in) case KeyAlgoSKED25519: return parseSKEd25519(in) - case CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoSKECDSA256v01, CertAlgoED25519v01, CertAlgoSKED25519v01: + case CertAlgoRSAv01, InsecureCertAlgoDSAv01, CertAlgoECDSA256v01, CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoSKECDSA256v01, CertAlgoED25519v01, CertAlgoSKED25519v01: cert, err := parseCert(in, certKeyAlgoNames[algo]) if err != nil { return nil, nil, err } return cert, nil, nil } + if keyFormat := keyFormatForAlgorithm(algo); keyFormat != "" { + return nil, nil, fmt.Errorf("ssh: signature algorithm %q isn't a key format; key is malformed and should be re-encoded with type %q", + algo, keyFormat) + } + return nil, nil, fmt.Errorf("ssh: unknown key algorithm: %v", algo) } @@ -186,9 +197,10 @@ func ParseKnownHosts(in []byte) (marker string, hosts []string, pubKey PublicKey return "", nil, nil, "", nil, io.EOF } -// ParseAuthorizedKey parses a public key from an authorized_keys -// file used in OpenSSH according to the sshd(8) manual page. +// ParseAuthorizedKey parses a public key from an authorized_keys file used in +// OpenSSH according to the sshd(8) manual page. Invalid lines are ignored. func ParseAuthorizedKey(in []byte) (out PublicKey, comment string, options []string, rest []byte, err error) { + var lastErr error for len(in) > 0 { end := bytes.IndexByte(in, '\n') if end != -1 { @@ -217,6 +229,8 @@ func ParseAuthorizedKey(in []byte) (out PublicKey, comment string, options []str if out, comment, err = parseAuthorizedKey(in[i:]); err == nil { return out, comment, options, rest, nil + } else { + lastErr = err } // No key type recognised. Maybe there's an options field at @@ -259,16 +273,22 @@ func ParseAuthorizedKey(in []byte) (out PublicKey, comment string, options []str if out, comment, err = parseAuthorizedKey(in[i:]); err == nil { options = candidateOptions return out, comment, options, rest, nil + } else { + lastErr = err } in = rest continue } + if lastErr != nil { + return nil, "", nil, nil, fmt.Errorf("ssh: no key found; last parsing error for ignored line: %w", lastErr) + } + return nil, "", nil, nil, errors.New("ssh: no key found") } -// ParsePublicKey parses an SSH public key formatted for use in +// ParsePublicKey parses an SSH public key or certificate formatted for use in // the SSH wire protocol according to RFC 4253, section 6.6. func ParsePublicKey(in []byte) (out PublicKey, err error) { algo, in, ok := parseString(in) @@ -390,11 +410,11 @@ func NewSignerWithAlgorithms(signer AlgorithmSigner, algorithms []string) (Multi } for _, algo := range algorithms { - if !contains(supportedAlgos, algo) { + if !slices.Contains(supportedAlgos, algo) { return nil, fmt.Errorf("ssh: algorithm %q is not supported for key type %q", algo, signer.PublicKey().Type()) } - if !contains(signerAlgos, algo) { + if !slices.Contains(signerAlgos, algo) { return nil, fmt.Errorf("ssh: algorithm %q is restricted for the provided signer", algo) } } @@ -481,10 +501,13 @@ func (r *rsaPublicKey) Marshal() []byte { func (r *rsaPublicKey) Verify(data []byte, sig *Signature) error { supportedAlgos := algorithmsForKeyFormat(r.Type()) - if !contains(supportedAlgos, sig.Format) { + if !slices.Contains(supportedAlgos, sig.Format) { return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, r.Type()) } - hash := hashFuncs[sig.Format] + hash, err := hashFunc(sig.Format) + if err != nil { + return err + } h := hash.New() h.Write(data) digest := h.Sum(nil) @@ -601,7 +624,11 @@ func (k *dsaPublicKey) Verify(data []byte, sig *Signature) error { if sig.Format != k.Type() { return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type()) } - h := hashFuncs[sig.Format].New() + hash, err := hashFunc(sig.Format) + if err != nil { + return err + } + h := hash.New() h.Write(data) digest := h.Sum(nil) @@ -646,7 +673,11 @@ func (k *dsaPrivateKey) SignWithAlgorithm(rand io.Reader, data []byte, algorithm return nil, fmt.Errorf("ssh: unsupported signature algorithm %s", algorithm) } - h := hashFuncs[k.PublicKey().Type()].New() + hash, err := hashFunc(k.PublicKey().Type()) + if err != nil { + return nil, err + } + h := hash.New() h.Write(data) digest := h.Sum(nil) r, s, err := dsa.Sign(rand, k.PrivateKey, digest) @@ -796,8 +827,11 @@ func (k *ecdsaPublicKey) Verify(data []byte, sig *Signature) error { if sig.Format != k.Type() { return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type()) } - - h := hashFuncs[sig.Format].New() + hash, err := hashFunc(sig.Format) + if err != nil { + return err + } + h := hash.New() h.Write(data) digest := h.Sum(nil) @@ -900,8 +934,11 @@ func (k *skECDSAPublicKey) Verify(data []byte, sig *Signature) error { if sig.Format != k.Type() { return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type()) } - - h := hashFuncs[sig.Format].New() + hash, err := hashFunc(sig.Format) + if err != nil { + return err + } + h := hash.New() h.Write([]byte(k.application)) appDigest := h.Sum(nil) @@ -1004,7 +1041,11 @@ func (k *skEd25519PublicKey) Verify(data []byte, sig *Signature) error { return fmt.Errorf("invalid size %d for Ed25519 public key", l) } - h := hashFuncs[sig.Format].New() + hash, err := hashFunc(sig.Format) + if err != nil { + return err + } + h := hash.New() h.Write([]byte(k.application)) appDigest := h.Sum(nil) @@ -1107,11 +1148,14 @@ func (s *wrappedSigner) SignWithAlgorithm(rand io.Reader, data []byte, algorithm algorithm = s.pubKey.Type() } - if !contains(s.Algorithms(), algorithm) { + if !slices.Contains(s.Algorithms(), algorithm) { return nil, fmt.Errorf("ssh: unsupported signature algorithm %q for key format %q", algorithm, s.pubKey.Type()) } - hashFunc := hashFuncs[algorithm] + hashFunc, err := hashFunc(algorithm) + if err != nil { + return nil, err + } var digest []byte if hashFunc != 0 { h := hashFunc.New() diff --git a/vendor/golang.org/x/crypto/ssh/mac.go b/vendor/golang.org/x/crypto/ssh/mac.go index 06a1b275..87d626fb 100644 --- a/vendor/golang.org/x/crypto/ssh/mac.go +++ b/vendor/golang.org/x/crypto/ssh/mac.go @@ -7,11 +7,13 @@ package ssh // Message authentication support import ( + "crypto/fips140" "crypto/hmac" "crypto/sha1" "crypto/sha256" "crypto/sha512" "hash" + "slices" ) type macMode struct { @@ -46,23 +48,37 @@ func (t truncatingMAC) Size() int { func (t truncatingMAC) BlockSize() int { return t.hmac.BlockSize() } -var macModes = map[string]*macMode{ - "hmac-sha2-512-etm@openssh.com": {64, true, func(key []byte) hash.Hash { +// macModes defines the supported MACs. MACs not included are not supported +// and will not be negotiated, even if explicitly configured. When FIPS mode is +// enabled, only FIPS-approved algorithms are included. +var macModes = map[string]*macMode{} + +func init() { + macModes[HMACSHA512ETM] = &macMode{64, true, func(key []byte) hash.Hash { return hmac.New(sha512.New, key) - }}, - "hmac-sha2-256-etm@openssh.com": {32, true, func(key []byte) hash.Hash { + }} + macModes[HMACSHA256ETM] = &macMode{32, true, func(key []byte) hash.Hash { return hmac.New(sha256.New, key) - }}, - "hmac-sha2-512": {64, false, func(key []byte) hash.Hash { + }} + macModes[HMACSHA512] = &macMode{64, false, func(key []byte) hash.Hash { return hmac.New(sha512.New, key) - }}, - "hmac-sha2-256": {32, false, func(key []byte) hash.Hash { + }} + macModes[HMACSHA256] = &macMode{32, false, func(key []byte) hash.Hash { return hmac.New(sha256.New, key) - }}, - "hmac-sha1": {20, false, func(key []byte) hash.Hash { + }} + + if fips140.Enabled() { + defaultMACs = slices.DeleteFunc(defaultMACs, func(algo string) bool { + _, ok := macModes[algo] + return !ok + }) + return + } + + macModes[HMACSHA1] = &macMode{20, false, func(key []byte) hash.Hash { return hmac.New(sha1.New, key) - }}, - "hmac-sha1-96": {20, false, func(key []byte) hash.Hash { + }} + macModes[InsecureHMACSHA196] = &macMode{20, false, func(key []byte) hash.Hash { return truncatingMAC{12, hmac.New(sha1.New, key)} - }}, + }} } diff --git a/vendor/golang.org/x/crypto/ssh/messages.go b/vendor/golang.org/x/crypto/ssh/messages.go index b55f8605..ab22c3d3 100644 --- a/vendor/golang.org/x/crypto/ssh/messages.go +++ b/vendor/golang.org/x/crypto/ssh/messages.go @@ -122,9 +122,9 @@ type kexDHGexReplyMsg struct { const msgKexDHGexRequest = 34 type kexDHGexRequestMsg struct { - MinBits uint32 `sshtype:"34"` - PreferedBits uint32 - MaxBits uint32 + MinBits uint32 `sshtype:"34"` + PreferredBits uint32 + MaxBits uint32 } // See RFC 4253, section 10. @@ -792,7 +792,7 @@ func marshalString(to []byte, s []byte) []byte { return to[len(s):] } -var bigIntType = reflect.TypeOf((*big.Int)(nil)) +var bigIntType = reflect.TypeFor[*big.Int]() // Decode a packet into its corresponding message. func decode(packet []byte) (interface{}, error) { @@ -818,6 +818,8 @@ func decode(packet []byte) (interface{}, error) { return new(userAuthSuccessMsg), nil case msgUserAuthFailure: msg = new(userAuthFailureMsg) + case msgUserAuthBanner: + msg = new(userAuthBannerMsg) case msgUserAuthPubKeyOk: msg = new(userAuthPubKeyOkMsg) case msgGlobalRequest: diff --git a/vendor/golang.org/x/crypto/ssh/mlkem.go b/vendor/golang.org/x/crypto/ssh/mlkem.go new file mode 100644 index 00000000..ddc0ed1f --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/mlkem.go @@ -0,0 +1,168 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "crypto" + "crypto/mlkem" + "crypto/sha256" + "errors" + "fmt" + "io" + + "golang.org/x/crypto/curve25519" +) + +// mlkem768WithCurve25519sha256 implements the hybrid ML-KEM768 with +// curve25519-sha256 key exchange method, as described by +// draft-kampanakis-curdle-ssh-pq-ke-05 section 2.3.3. +type mlkem768WithCurve25519sha256 struct{} + +func (kex *mlkem768WithCurve25519sha256) Client(c packetConn, rand io.Reader, magics *handshakeMagics) (*kexResult, error) { + var c25519kp curve25519KeyPair + if err := c25519kp.generate(rand); err != nil { + return nil, err + } + + seed := make([]byte, mlkem.SeedSize) + if _, err := io.ReadFull(rand, seed); err != nil { + return nil, err + } + + mlkemDk, err := mlkem.NewDecapsulationKey768(seed) + if err != nil { + return nil, err + } + + hybridKey := append(mlkemDk.EncapsulationKey().Bytes(), c25519kp.pub[:]...) + if err := c.writePacket(Marshal(&kexECDHInitMsg{hybridKey})); err != nil { + return nil, err + } + + packet, err := c.readPacket() + if err != nil { + return nil, err + } + + var reply kexECDHReplyMsg + if err = Unmarshal(packet, &reply); err != nil { + return nil, err + } + + if len(reply.EphemeralPubKey) != mlkem.CiphertextSize768+32 { + return nil, errors.New("ssh: peer's mlkem768x25519 public value has wrong length") + } + + // Perform KEM decapsulate operation to obtain shared key from ML-KEM. + mlkem768Secret, err := mlkemDk.Decapsulate(reply.EphemeralPubKey[:mlkem.CiphertextSize768]) + if err != nil { + return nil, err + } + + // Complete Curve25519 ECDH to obtain its shared key. + c25519Secret, err := curve25519.X25519(c25519kp.priv[:], reply.EphemeralPubKey[mlkem.CiphertextSize768:]) + if err != nil { + return nil, fmt.Errorf("ssh: peer's mlkem768x25519 public value is not valid: %w", err) + } + // Compute actual shared key. + h := sha256.New() + h.Write(mlkem768Secret) + h.Write(c25519Secret) + secret := h.Sum(nil) + + h.Reset() + magics.write(h) + writeString(h, reply.HostKey) + writeString(h, hybridKey) + writeString(h, reply.EphemeralPubKey) + + K := make([]byte, stringLength(len(secret))) + marshalString(K, secret) + h.Write(K) + + return &kexResult{ + H: h.Sum(nil), + K: K, + HostKey: reply.HostKey, + Signature: reply.Signature, + Hash: crypto.SHA256, + }, nil +} + +func (kex *mlkem768WithCurve25519sha256) Server(c packetConn, rand io.Reader, magics *handshakeMagics, priv AlgorithmSigner, algo string) (*kexResult, error) { + packet, err := c.readPacket() + if err != nil { + return nil, err + } + + var kexInit kexECDHInitMsg + if err = Unmarshal(packet, &kexInit); err != nil { + return nil, err + } + + if len(kexInit.ClientPubKey) != mlkem.EncapsulationKeySize768+32 { + return nil, errors.New("ssh: peer's ML-KEM768/curve25519 public value has wrong length") + } + + encapsulationKey, err := mlkem.NewEncapsulationKey768(kexInit.ClientPubKey[:mlkem.EncapsulationKeySize768]) + if err != nil { + return nil, fmt.Errorf("ssh: peer's ML-KEM768 encapsulation key is not valid: %w", err) + } + // Perform KEM encapsulate operation to obtain ciphertext and shared key. + mlkem768Secret, mlkem768Ciphertext := encapsulationKey.Encapsulate() + + // Perform server side of Curve25519 ECDH to obtain server public value and + // shared key. + var c25519kp curve25519KeyPair + if err := c25519kp.generate(rand); err != nil { + return nil, err + } + c25519Secret, err := curve25519.X25519(c25519kp.priv[:], kexInit.ClientPubKey[mlkem.EncapsulationKeySize768:]) + if err != nil { + return nil, fmt.Errorf("ssh: peer's ML-KEM768/curve25519 public value is not valid: %w", err) + } + hybridKey := append(mlkem768Ciphertext, c25519kp.pub[:]...) + + // Compute actual shared key. + h := sha256.New() + h.Write(mlkem768Secret) + h.Write(c25519Secret) + secret := h.Sum(nil) + + hostKeyBytes := priv.PublicKey().Marshal() + + h.Reset() + magics.write(h) + writeString(h, hostKeyBytes) + writeString(h, kexInit.ClientPubKey) + writeString(h, hybridKey) + + K := make([]byte, stringLength(len(secret))) + marshalString(K, secret) + h.Write(K) + + H := h.Sum(nil) + + sig, err := signAndMarshal(priv, rand, H, algo) + if err != nil { + return nil, err + } + + reply := kexECDHReplyMsg{ + EphemeralPubKey: hybridKey, + HostKey: hostKeyBytes, + Signature: sig, + } + if err := c.writePacket(Marshal(&reply)); err != nil { + return nil, err + } + return &kexResult{ + H: H, + K: K, + HostKey: hostKeyBytes, + Signature: sig, + Hash: crypto.SHA256, + }, nil +} diff --git a/vendor/golang.org/x/crypto/ssh/server.go b/vendor/golang.org/x/crypto/ssh/server.go index 1839ddc6..064dcbaf 100644 --- a/vendor/golang.org/x/crypto/ssh/server.go +++ b/vendor/golang.org/x/crypto/ssh/server.go @@ -10,6 +10,7 @@ import ( "fmt" "io" "net" + "slices" "strings" ) @@ -43,6 +44,9 @@ type Permissions struct { // pass data from the authentication callbacks to the server // application layer. Extensions map[string]string + + // ExtraData allows to store user defined data. + ExtraData map[any]any } type GSSAPIWithMICConfig struct { @@ -126,6 +130,21 @@ type ServerConfig struct { // Permissions.Extensions entry. PublicKeyCallback func(conn ConnMetadata, key PublicKey) (*Permissions, error) + // VerifiedPublicKeyCallback, if non-nil, is called after a client + // successfully confirms having control over a key that was previously + // approved by PublicKeyCallback. The permissions object passed to the + // callback is the one returned by PublicKeyCallback for the given public + // key and its ownership is transferred to the callback. The returned + // Permissions object can be the same object, optionally modified, or a + // completely new object. If VerifiedPublicKeyCallback is non-nil, + // PublicKeyCallback is not allowed to return a PartialSuccessError, which + // can instead be returned by VerifiedPublicKeyCallback. + // + // VerifiedPublicKeyCallback does not affect which authentication methods + // are included in the list of methods that can be attempted by the client. + VerifiedPublicKeyCallback func(conn ConnMetadata, key PublicKey, permissions *Permissions, + signatureAlgorithm string) (*Permissions, error) + // KeyboardInteractiveCallback, if non-nil, is called when // keyboard-interactive authentication is selected (RFC // 4256). The client object's Challenge function should be @@ -243,22 +262,15 @@ func NewServerConn(c net.Conn, config *ServerConfig) (*ServerConn, <-chan NewCha fullConf.MaxAuthTries = 6 } if len(fullConf.PublicKeyAuthAlgorithms) == 0 { - fullConf.PublicKeyAuthAlgorithms = supportedPubKeyAuthAlgos + fullConf.PublicKeyAuthAlgorithms = defaultPubKeyAuthAlgos } else { for _, algo := range fullConf.PublicKeyAuthAlgorithms { - if !contains(supportedPubKeyAuthAlgos, algo) { + if !slices.Contains(SupportedAlgorithms().PublicKeyAuths, algo) && !slices.Contains(InsecureAlgorithms().PublicKeyAuths, algo) { c.Close() return nil, nil, nil, fmt.Errorf("ssh: unsupported public key authentication algorithm %s", algo) } } } - // Check if the config contains any unsupported key exchanges - for _, kex := range fullConf.KeyExchanges { - if _, ok := serverForbiddenKexAlgos[kex]; ok { - c.Close() - return nil, nil, nil, fmt.Errorf("ssh: unsupported key exchange %s for server", kex) - } - } s := &connection{ sshConn: sshConn{conn: c}, @@ -315,6 +327,7 @@ func (s *connection) serverHandshake(config *ServerConfig) (*Permissions, error) // We just did the key change, so the session ID is established. s.sessionID = s.transport.getSessionID() + s.algorithms = s.transport.getAlgorithms() var packet []byte if packet, err = s.transport.readPacket(); err != nil { @@ -637,7 +650,7 @@ userAuthLoop: return nil, parseError(msgUserAuthRequest) } algo := string(algoBytes) - if !contains(config.PublicKeyAuthAlgorithms, underlyingAlgo(algo)) { + if !slices.Contains(config.PublicKeyAuthAlgorithms, underlyingAlgo(algo)) { authErr = fmt.Errorf("ssh: algorithm %q not accepted", algo) break } @@ -658,6 +671,9 @@ userAuthLoop: candidate.pubKeyData = pubKeyData candidate.perms, candidate.result = authConfig.PublicKeyCallback(s, pubKey) _, isPartialSuccessError := candidate.result.(*PartialSuccessError) + if isPartialSuccessError && config.VerifiedPublicKeyCallback != nil { + return nil, errors.New("ssh: invalid library usage: PublicKeyCallback must not return partial success when VerifiedPublicKeyCallback is defined") + } if (candidate.result == nil || isPartialSuccessError) && candidate.perms != nil && @@ -701,7 +717,7 @@ userAuthLoop: // ssh-rsa-cert-v01@openssh.com algorithm with ssh-rsa public // key type. The algorithm and public key type must be // consistent: both must be certificate algorithms, or neither. - if !contains(algorithmsForKeyFormat(pubKey.Type()), algo) { + if !slices.Contains(algorithmsForKeyFormat(pubKey.Type()), algo) { authErr = fmt.Errorf("ssh: public key type %q not compatible with selected algorithm %q", pubKey.Type(), algo) break @@ -711,7 +727,7 @@ userAuthLoop: // algorithm name that corresponds to algo with // sig.Format. This is usually the same, but // for certs, the names differ. - if !contains(config.PublicKeyAuthAlgorithms, sig.Format) { + if !slices.Contains(config.PublicKeyAuthAlgorithms, sig.Format) { authErr = fmt.Errorf("ssh: algorithm %q not accepted", sig.Format) break } @@ -728,6 +744,12 @@ userAuthLoop: authErr = candidate.result perms = candidate.perms + if authErr == nil && config.VerifiedPublicKeyCallback != nil { + // Only call VerifiedPublicKeyCallback after the key has been accepted + // and successfully verified. If authErr is non-nil, the key is not + // considered verified and the callback must not run. + perms, authErr = config.VerifiedPublicKeyCallback(s, pubKey, perms, algo) + } } case "gssapi-with-mic": if authConfig.GSSAPIWithMICConfig == nil { diff --git a/vendor/golang.org/x/crypto/ssh/tcpip.go b/vendor/golang.org/x/crypto/ssh/tcpip.go index ef5059a1..93d844f0 100644 --- a/vendor/golang.org/x/crypto/ssh/tcpip.go +++ b/vendor/golang.org/x/crypto/ssh/tcpip.go @@ -459,7 +459,7 @@ func (c *Client) dial(laddr string, lport int, raddr string, rport int) (Channel return nil, err } go DiscardRequests(in) - return ch, err + return ch, nil } type tcpChan struct { diff --git a/vendor/golang.org/x/crypto/ssh/transport.go b/vendor/golang.org/x/crypto/ssh/transport.go index 0424d2d3..fa3dd6a4 100644 --- a/vendor/golang.org/x/crypto/ssh/transport.go +++ b/vendor/golang.org/x/crypto/ssh/transport.go @@ -8,6 +8,7 @@ import ( "bufio" "bytes" "errors" + "fmt" "io" "log" ) @@ -16,13 +17,6 @@ import ( // wire. No message decoding is done, to minimize the impact on timing. const debugTransport = false -const ( - gcm128CipherID = "aes128-gcm@openssh.com" - gcm256CipherID = "aes256-gcm@openssh.com" - aes128cbcID = "aes128-cbc" - tripledescbcID = "3des-cbc" -) - // packetConn represents a transport that implements packet based // operations. type packetConn interface { @@ -92,14 +86,14 @@ func (t *transport) setInitialKEXDone() { // prepareKeyChange sets up key material for a keychange. The key changes in // both directions are triggered by reading and writing a msgNewKey packet // respectively. -func (t *transport) prepareKeyChange(algs *algorithms, kexResult *kexResult) error { - ciph, err := newPacketCipher(t.reader.dir, algs.r, kexResult) +func (t *transport) prepareKeyChange(algs *NegotiatedAlgorithms, kexResult *kexResult) error { + ciph, err := newPacketCipher(t.reader.dir, algs.Read, kexResult) if err != nil { return err } t.reader.pendingKeyChange <- ciph - ciph, err = newPacketCipher(t.writer.dir, algs.w, kexResult) + ciph, err = newPacketCipher(t.writer.dir, algs.Write, kexResult) if err != nil { return err } @@ -259,8 +253,11 @@ var ( // setupKeys sets the cipher and MAC keys from kex.K, kex.H and sessionId, as // described in RFC 4253, section 6.4. direction should either be serverKeys // (to setup server->client keys) or clientKeys (for client->server keys). -func newPacketCipher(d direction, algs directionAlgorithms, kex *kexResult) (packetCipher, error) { +func newPacketCipher(d direction, algs DirectionAlgorithms, kex *kexResult) (packetCipher, error) { cipherMode := cipherModes[algs.Cipher] + if cipherMode == nil { + return nil, fmt.Errorf("ssh: unsupported cipher %v", algs.Cipher) + } iv := make([]byte, cipherMode.ivSize) key := make([]byte, cipherMode.keySize) diff --git a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go index 37dc0cfd..e0df203c 100644 --- a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go +++ b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go @@ -3,7 +3,7 @@ // license that can be found in the LICENSE file. // Package ctxhttp provides helper functions for performing context-aware HTTP requests. -package ctxhttp // import "golang.org/x/net/context/ctxhttp" +package ctxhttp import ( "context" diff --git a/vendor/golang.org/x/net/http2/config.go b/vendor/golang.org/x/net/http2/config.go index ca645d9a..8a7a89d0 100644 --- a/vendor/golang.org/x/net/http2/config.go +++ b/vendor/golang.org/x/net/http2/config.go @@ -27,6 +27,7 @@ import ( // - If the resulting value is zero or out of range, use a default. type http2Config struct { MaxConcurrentStreams uint32 + StrictMaxConcurrentRequests bool MaxDecoderHeaderTableSize uint32 MaxEncoderHeaderTableSize uint32 MaxReadFrameSize uint32 @@ -55,7 +56,7 @@ func configFromServer(h1 *http.Server, h2 *Server) http2Config { PermitProhibitedCipherSuites: h2.PermitProhibitedCipherSuites, CountError: h2.CountError, } - fillNetHTTPServerConfig(&conf, h1) + fillNetHTTPConfig(&conf, h1.HTTP2) setConfigDefaults(&conf, true) return conf } @@ -64,12 +65,13 @@ func configFromServer(h1 *http.Server, h2 *Server) http2Config { // (the net/http Transport). func configFromTransport(h2 *Transport) http2Config { conf := http2Config{ - MaxEncoderHeaderTableSize: h2.MaxEncoderHeaderTableSize, - MaxDecoderHeaderTableSize: h2.MaxDecoderHeaderTableSize, - MaxReadFrameSize: h2.MaxReadFrameSize, - SendPingTimeout: h2.ReadIdleTimeout, - PingTimeout: h2.PingTimeout, - WriteByteTimeout: h2.WriteByteTimeout, + StrictMaxConcurrentRequests: h2.StrictMaxConcurrentStreams, + MaxEncoderHeaderTableSize: h2.MaxEncoderHeaderTableSize, + MaxDecoderHeaderTableSize: h2.MaxDecoderHeaderTableSize, + MaxReadFrameSize: h2.MaxReadFrameSize, + SendPingTimeout: h2.ReadIdleTimeout, + PingTimeout: h2.PingTimeout, + WriteByteTimeout: h2.WriteByteTimeout, } // Unlike most config fields, where out-of-range values revert to the default, @@ -81,7 +83,7 @@ func configFromTransport(h2 *Transport) http2Config { } if h2.t1 != nil { - fillNetHTTPTransportConfig(&conf, h2.t1) + fillNetHTTPConfig(&conf, h2.t1.HTTP2) } setConfigDefaults(&conf, false) return conf @@ -120,3 +122,48 @@ func adjustHTTP1MaxHeaderSize(n int64) int64 { const typicalHeaders = 10 // conservative return n + typicalHeaders*perFieldOverhead } + +func fillNetHTTPConfig(conf *http2Config, h2 *http.HTTP2Config) { + if h2 == nil { + return + } + if h2.MaxConcurrentStreams != 0 { + conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams) + } + if http2ConfigStrictMaxConcurrentRequests(h2) { + conf.StrictMaxConcurrentRequests = true + } + if h2.MaxEncoderHeaderTableSize != 0 { + conf.MaxEncoderHeaderTableSize = uint32(h2.MaxEncoderHeaderTableSize) + } + if h2.MaxDecoderHeaderTableSize != 0 { + conf.MaxDecoderHeaderTableSize = uint32(h2.MaxDecoderHeaderTableSize) + } + if h2.MaxConcurrentStreams != 0 { + conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams) + } + if h2.MaxReadFrameSize != 0 { + conf.MaxReadFrameSize = uint32(h2.MaxReadFrameSize) + } + if h2.MaxReceiveBufferPerConnection != 0 { + conf.MaxUploadBufferPerConnection = int32(h2.MaxReceiveBufferPerConnection) + } + if h2.MaxReceiveBufferPerStream != 0 { + conf.MaxUploadBufferPerStream = int32(h2.MaxReceiveBufferPerStream) + } + if h2.SendPingTimeout != 0 { + conf.SendPingTimeout = h2.SendPingTimeout + } + if h2.PingTimeout != 0 { + conf.PingTimeout = h2.PingTimeout + } + if h2.WriteByteTimeout != 0 { + conf.WriteByteTimeout = h2.WriteByteTimeout + } + if h2.PermitProhibitedCipherSuites { + conf.PermitProhibitedCipherSuites = true + } + if h2.CountError != nil { + conf.CountError = h2.CountError + } +} diff --git a/vendor/golang.org/x/net/http2/config_go124.go b/vendor/golang.org/x/net/http2/config_go124.go deleted file mode 100644 index 5b516c55..00000000 --- a/vendor/golang.org/x/net/http2/config_go124.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.24 - -package http2 - -import "net/http" - -// fillNetHTTPServerConfig sets fields in conf from srv.HTTP2. -func fillNetHTTPServerConfig(conf *http2Config, srv *http.Server) { - fillNetHTTPConfig(conf, srv.HTTP2) -} - -// fillNetHTTPTransportConfig sets fields in conf from tr.HTTP2. -func fillNetHTTPTransportConfig(conf *http2Config, tr *http.Transport) { - fillNetHTTPConfig(conf, tr.HTTP2) -} - -func fillNetHTTPConfig(conf *http2Config, h2 *http.HTTP2Config) { - if h2 == nil { - return - } - if h2.MaxConcurrentStreams != 0 { - conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams) - } - if h2.MaxEncoderHeaderTableSize != 0 { - conf.MaxEncoderHeaderTableSize = uint32(h2.MaxEncoderHeaderTableSize) - } - if h2.MaxDecoderHeaderTableSize != 0 { - conf.MaxDecoderHeaderTableSize = uint32(h2.MaxDecoderHeaderTableSize) - } - if h2.MaxConcurrentStreams != 0 { - conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams) - } - if h2.MaxReadFrameSize != 0 { - conf.MaxReadFrameSize = uint32(h2.MaxReadFrameSize) - } - if h2.MaxReceiveBufferPerConnection != 0 { - conf.MaxUploadBufferPerConnection = int32(h2.MaxReceiveBufferPerConnection) - } - if h2.MaxReceiveBufferPerStream != 0 { - conf.MaxUploadBufferPerStream = int32(h2.MaxReceiveBufferPerStream) - } - if h2.SendPingTimeout != 0 { - conf.SendPingTimeout = h2.SendPingTimeout - } - if h2.PingTimeout != 0 { - conf.PingTimeout = h2.PingTimeout - } - if h2.WriteByteTimeout != 0 { - conf.WriteByteTimeout = h2.WriteByteTimeout - } - if h2.PermitProhibitedCipherSuites { - conf.PermitProhibitedCipherSuites = true - } - if h2.CountError != nil { - conf.CountError = h2.CountError - } -} diff --git a/vendor/golang.org/x/net/http2/config_go125.go b/vendor/golang.org/x/net/http2/config_go125.go new file mode 100644 index 00000000..b4373fe3 --- /dev/null +++ b/vendor/golang.org/x/net/http2/config_go125.go @@ -0,0 +1,15 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.26 + +package http2 + +import ( + "net/http" +) + +func http2ConfigStrictMaxConcurrentRequests(h2 *http.HTTP2Config) bool { + return false +} diff --git a/vendor/golang.org/x/net/http2/config_go126.go b/vendor/golang.org/x/net/http2/config_go126.go new file mode 100644 index 00000000..6b071c14 --- /dev/null +++ b/vendor/golang.org/x/net/http2/config_go126.go @@ -0,0 +1,15 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.26 + +package http2 + +import ( + "net/http" +) + +func http2ConfigStrictMaxConcurrentRequests(h2 *http.HTTP2Config) bool { + return h2.StrictMaxConcurrentRequests +} diff --git a/vendor/golang.org/x/net/http2/config_pre_go124.go b/vendor/golang.org/x/net/http2/config_pre_go124.go deleted file mode 100644 index 060fd6c6..00000000 --- a/vendor/golang.org/x/net/http2/config_pre_go124.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.24 - -package http2 - -import "net/http" - -// Pre-Go 1.24 fallback. -// The Server.HTTP2 and Transport.HTTP2 config fields were added in Go 1.24. - -func fillNetHTTPServerConfig(conf *http2Config, srv *http.Server) {} - -func fillNetHTTPTransportConfig(conf *http2Config, tr *http.Transport) {} diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go index 81faec7e..9a4bd123 100644 --- a/vendor/golang.org/x/net/http2/frame.go +++ b/vendor/golang.org/x/net/http2/frame.go @@ -39,7 +39,7 @@ const ( FrameContinuation FrameType = 0x9 ) -var frameName = map[FrameType]string{ +var frameNames = [...]string{ FrameData: "DATA", FrameHeaders: "HEADERS", FramePriority: "PRIORITY", @@ -53,10 +53,10 @@ var frameName = map[FrameType]string{ } func (t FrameType) String() string { - if s, ok := frameName[t]; ok { - return s + if int(t) < len(frameNames) { + return frameNames[t] } - return fmt.Sprintf("UNKNOWN_FRAME_TYPE_%d", uint8(t)) + return fmt.Sprintf("UNKNOWN_FRAME_TYPE_%d", t) } // Flags is a bitmask of HTTP/2 flags. @@ -124,7 +124,7 @@ var flagName = map[FrameType]map[Flags]string{ // might be 0). type frameParser func(fc *frameCache, fh FrameHeader, countError func(string), payload []byte) (Frame, error) -var frameParsers = map[FrameType]frameParser{ +var frameParsers = [...]frameParser{ FrameData: parseDataFrame, FrameHeaders: parseHeadersFrame, FramePriority: parsePriorityFrame, @@ -138,8 +138,8 @@ var frameParsers = map[FrameType]frameParser{ } func typeFrameParser(t FrameType) frameParser { - if f := frameParsers[t]; f != nil { - return f + if int(t) < len(frameParsers) { + return frameParsers[t] } return parseUnknownFrame } @@ -225,6 +225,11 @@ var fhBytes = sync.Pool{ }, } +func invalidHTTP1LookingFrameHeader() FrameHeader { + fh, _ := readFrameHeader(make([]byte, frameHeaderLen), strings.NewReader("HTTP/1.1 ")) + return fh +} + // ReadFrameHeader reads 9 bytes from r and returns a FrameHeader. // Most users should use Framer.ReadFrame instead. func ReadFrameHeader(r io.Reader) (FrameHeader, error) { @@ -275,6 +280,8 @@ type Framer struct { // lastHeaderStream is non-zero if the last frame was an // unfinished HEADERS/CONTINUATION. lastHeaderStream uint32 + // lastFrameType holds the type of the last frame for verifying frame order. + lastFrameType FrameType maxReadSize uint32 headerBuf [frameHeaderLen]byte @@ -342,7 +349,7 @@ func (fr *Framer) maxHeaderListSize() uint32 { func (f *Framer) startWrite(ftype FrameType, flags Flags, streamID uint32) { // Write the FrameHeader. f.wbuf = append(f.wbuf[:0], - 0, // 3 bytes of length, filled in in endWrite + 0, // 3 bytes of length, filled in endWrite 0, 0, byte(ftype), @@ -483,30 +490,47 @@ func terminalReadFrameError(err error) bool { return err != nil } -// ReadFrame reads a single frame. The returned Frame is only valid -// until the next call to ReadFrame. +// ReadFrameHeader reads the header of the next frame. +// It reads the 9-byte fixed frame header, and does not read any portion of the +// frame payload. The caller is responsible for consuming the payload, either +// with ReadFrameForHeader or directly from the Framer's io.Reader. // -// If the frame is larger than previously set with SetMaxReadFrameSize, the -// returned error is ErrFrameTooLarge. Other errors may be of type -// ConnectionError, StreamError, or anything else from the underlying -// reader. +// If the frame is larger than previously set with SetMaxReadFrameSize, it +// returns the frame header and ErrFrameTooLarge. // -// If ReadFrame returns an error and a non-nil Frame, the Frame's StreamID -// indicates the stream responsible for the error. -func (fr *Framer) ReadFrame() (Frame, error) { +// If the returned FrameHeader.StreamID is non-zero, it indicates the stream +// responsible for the error. +func (fr *Framer) ReadFrameHeader() (FrameHeader, error) { fr.errDetail = nil - if fr.lastFrame != nil { - fr.lastFrame.invalidate() - } fh, err := readFrameHeader(fr.headerBuf[:], fr.r) if err != nil { - return nil, err + return fh, err } if fh.Length > fr.maxReadSize { - return nil, ErrFrameTooLarge + if fh == invalidHTTP1LookingFrameHeader() { + return fh, fmt.Errorf("http2: failed reading the frame payload: %w, note that the frame header looked like an HTTP/1.1 header", ErrFrameTooLarge) + } + return fh, ErrFrameTooLarge + } + if err := fr.checkFrameOrder(fh); err != nil { + return fh, err + } + return fh, nil +} + +// ReadFrameForHeader reads the payload for the frame with the given FrameHeader. +// +// It behaves identically to ReadFrame, other than not checking the maximum +// frame size. +func (fr *Framer) ReadFrameForHeader(fh FrameHeader) (Frame, error) { + if fr.lastFrame != nil { + fr.lastFrame.invalidate() } payload := fr.getReadBuf(fh.Length) if _, err := io.ReadFull(fr.r, payload); err != nil { + if fh == invalidHTTP1LookingFrameHeader() { + return nil, fmt.Errorf("http2: failed reading the frame payload: %w, note that the frame header looked like an HTTP/1.1 header", err) + } return nil, err } f, err := typeFrameParser(fh.Type)(fr.frameCache, fh, fr.countError, payload) @@ -516,9 +540,7 @@ func (fr *Framer) ReadFrame() (Frame, error) { } return nil, err } - if err := fr.checkFrameOrder(f); err != nil { - return nil, err - } + fr.lastFrame = f if fr.logReads { fr.debugReadLoggerf("http2: Framer %p: read %v", fr, summarizeFrame(f)) } @@ -528,6 +550,24 @@ func (fr *Framer) ReadFrame() (Frame, error) { return f, nil } +// ReadFrame reads a single frame. The returned Frame is only valid +// until the next call to ReadFrame or ReadFrameBodyForHeader. +// +// If the frame is larger than previously set with SetMaxReadFrameSize, the +// returned error is ErrFrameTooLarge. Other errors may be of type +// ConnectionError, StreamError, or anything else from the underlying +// reader. +// +// If ReadFrame returns an error and a non-nil Frame, the Frame's StreamID +// indicates the stream responsible for the error. +func (fr *Framer) ReadFrame() (Frame, error) { + fh, err := fr.ReadFrameHeader() + if err != nil { + return nil, err + } + return fr.ReadFrameForHeader(fh) +} + // connError returns ConnectionError(code) but first // stashes away a public reason to the caller can optionally relay it // to the peer before hanging up on them. This might help others debug @@ -540,20 +580,19 @@ func (fr *Framer) connError(code ErrCode, reason string) error { // checkFrameOrder reports an error if f is an invalid frame to return // next from ReadFrame. Mostly it checks whether HEADERS and // CONTINUATION frames are contiguous. -func (fr *Framer) checkFrameOrder(f Frame) error { - last := fr.lastFrame - fr.lastFrame = f +func (fr *Framer) checkFrameOrder(fh FrameHeader) error { + lastType := fr.lastFrameType + fr.lastFrameType = fh.Type if fr.AllowIllegalReads { return nil } - fh := f.Header() if fr.lastHeaderStream != 0 { if fh.Type != FrameContinuation { return fr.connError(ErrCodeProtocol, fmt.Sprintf("got %s for stream %d; expected CONTINUATION following %s for stream %d", fh.Type, fh.StreamID, - last.Header().Type, fr.lastHeaderStream)) + lastType, fr.lastHeaderStream)) } if fh.StreamID != fr.lastHeaderStream { return fr.connError(ErrCodeProtocol, @@ -1141,7 +1180,16 @@ type PriorityFrame struct { PriorityParam } -// PriorityParam are the stream prioritzation parameters. +var defaultRFC9218Priority = PriorityParam{ + incremental: 0, + urgency: 3, +} + +// Note that HTTP/2 has had two different prioritization schemes, and +// PriorityParam struct below is a superset of both schemes. The exported +// symbols are from RFC 7540 and the non-exported ones are from RFC 9218. + +// PriorityParam are the stream prioritization parameters. type PriorityParam struct { // StreamDep is a 31-bit stream identifier for the // stream that this stream depends on. Zero means no @@ -1156,6 +1204,20 @@ type PriorityParam struct { // the spec, "Add one to the value to obtain a weight between // 1 and 256." Weight uint8 + + // "The urgency (u) parameter value is Integer (see Section 3.3.1 of + // [STRUCTURED-FIELDS]), between 0 and 7 inclusive, in descending order of + // priority. The default is 3." + urgency uint8 + + // "The incremental (i) parameter value is Boolean (see Section 3.3.6 of + // [STRUCTURED-FIELDS]). It indicates if an HTTP response can be processed + // incrementally, i.e., provide some meaningful output as chunks of the + // response arrive." + // + // We use uint8 (i.e. 0 is false, 1 is true) instead of bool so we can + // avoid unnecessary type conversions and because either type takes 1 byte. + incremental uint8 } func (p PriorityParam) IsZero() bool { diff --git a/vendor/golang.org/x/net/http2/gotrack.go b/vendor/golang.org/x/net/http2/gotrack.go index 9933c9f8..9921ca09 100644 --- a/vendor/golang.org/x/net/http2/gotrack.go +++ b/vendor/golang.org/x/net/http2/gotrack.go @@ -15,21 +15,32 @@ import ( "runtime" "strconv" "sync" + "sync/atomic" ) var DebugGoroutines = os.Getenv("DEBUG_HTTP2_GOROUTINES") == "1" +// Setting DebugGoroutines to false during a test to disable goroutine debugging +// results in race detector complaints when a test leaves goroutines running before +// returning. Tests shouldn't do this, of course, but when they do it generally shows +// up as infrequent, hard-to-debug flakes. (See #66519.) +// +// Disable goroutine debugging during individual tests with an atomic bool. +// (Note that it's safe to enable/disable debugging mid-test, so the actual race condition +// here is harmless.) +var disableDebugGoroutines atomic.Bool + type goroutineLock uint64 func newGoroutineLock() goroutineLock { - if !DebugGoroutines { + if !DebugGoroutines || disableDebugGoroutines.Load() { return 0 } return goroutineLock(curGoroutineID()) } func (g goroutineLock) check() { - if !DebugGoroutines { + if !DebugGoroutines || disableDebugGoroutines.Load() { return } if curGoroutineID() != uint64(g) { @@ -38,7 +49,7 @@ func (g goroutineLock) check() { } func (g goroutineLock) checkNotOn() { - if !DebugGoroutines { + if !DebugGoroutines || disableDebugGoroutines.Load() { return } if curGoroutineID() == uint64(g) { diff --git a/vendor/golang.org/x/net/http2/http2.go b/vendor/golang.org/x/net/http2/http2.go index 6c18ea23..105fe12f 100644 --- a/vendor/golang.org/x/net/http2/http2.go +++ b/vendor/golang.org/x/net/http2/http2.go @@ -11,13 +11,10 @@ // requires Go 1.6 or later) // // See https://http2.github.io/ for more information on HTTP/2. -// -// See https://http2.golang.org/ for a test server running this code. package http2 // import "golang.org/x/net/http2" import ( "bufio" - "context" "crypto/tls" "errors" "fmt" @@ -37,7 +34,6 @@ var ( VerboseLogs bool logFrameWrites bool logFrameReads bool - inTests bool // Enabling extended CONNECT by causes browsers to attempt to use // WebSockets-over-HTTP/2. This results in problems when the server's websocket @@ -257,15 +253,13 @@ func (cw closeWaiter) Wait() { // idle memory usage with many connections. type bufferedWriter struct { _ incomparable - group synctestGroupInterface // immutable - conn net.Conn // immutable - bw *bufio.Writer // non-nil when data is buffered - byteTimeout time.Duration // immutable, WriteByteTimeout + conn net.Conn // immutable + bw *bufio.Writer // non-nil when data is buffered + byteTimeout time.Duration // immutable, WriteByteTimeout } -func newBufferedWriter(group synctestGroupInterface, conn net.Conn, timeout time.Duration) *bufferedWriter { +func newBufferedWriter(conn net.Conn, timeout time.Duration) *bufferedWriter { return &bufferedWriter{ - group: group, conn: conn, byteTimeout: timeout, } @@ -316,24 +310,18 @@ func (w *bufferedWriter) Flush() error { type bufferedWriterTimeoutWriter bufferedWriter func (w *bufferedWriterTimeoutWriter) Write(p []byte) (n int, err error) { - return writeWithByteTimeout(w.group, w.conn, w.byteTimeout, p) + return writeWithByteTimeout(w.conn, w.byteTimeout, p) } // writeWithByteTimeout writes to conn. // If more than timeout passes without any bytes being written to the connection, // the write fails. -func writeWithByteTimeout(group synctestGroupInterface, conn net.Conn, timeout time.Duration, p []byte) (n int, err error) { +func writeWithByteTimeout(conn net.Conn, timeout time.Duration, p []byte) (n int, err error) { if timeout <= 0 { return conn.Write(p) } for { - var now time.Time - if group == nil { - now = time.Now() - } else { - now = group.Now() - } - conn.SetWriteDeadline(now.Add(timeout)) + conn.SetWriteDeadline(time.Now().Add(timeout)) nn, err := conn.Write(p[n:]) n += nn if n == len(p) || nn == 0 || !errors.Is(err, os.ErrDeadlineExceeded) { @@ -419,14 +407,3 @@ func (s *sorter) SortStrings(ss []string) { // makes that struct also non-comparable, and generally doesn't add // any size (as long as it's first). type incomparable [0]func() - -// synctestGroupInterface is the methods of synctestGroup used by Server and Transport. -// It's defined as an interface here to let us keep synctestGroup entirely test-only -// and not a part of non-test builds. -type synctestGroupInterface interface { - Join() - Now() time.Time - NewTimer(d time.Duration) timer - AfterFunc(d time.Duration, f func()) timer - ContextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc) -} diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index 7434b878..bdc5520e 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -176,44 +176,15 @@ type Server struct { // so that we don't embed a Mutex in this struct, which will make the // struct non-copyable, which might break some callers. state *serverInternalState - - // Synchronization group used for testing. - // Outside of tests, this is nil. - group synctestGroupInterface -} - -func (s *Server) markNewGoroutine() { - if s.group != nil { - s.group.Join() - } -} - -func (s *Server) now() time.Time { - if s.group != nil { - return s.group.Now() - } - return time.Now() -} - -// newTimer creates a new time.Timer, or a synthetic timer in tests. -func (s *Server) newTimer(d time.Duration) timer { - if s.group != nil { - return s.group.NewTimer(d) - } - return timeTimer{time.NewTimer(d)} -} - -// afterFunc creates a new time.AfterFunc timer, or a synthetic timer in tests. -func (s *Server) afterFunc(d time.Duration, f func()) timer { - if s.group != nil { - return s.group.AfterFunc(d, f) - } - return timeTimer{time.AfterFunc(d, f)} } type serverInternalState struct { mu sync.Mutex activeConns map[*serverConn]struct{} + + // Pool of error channels. This is per-Server rather than global + // because channels can't be reused across synctest bubbles. + errChanPool sync.Pool } func (s *serverInternalState) registerConn(sc *serverConn) { @@ -245,6 +216,27 @@ func (s *serverInternalState) startGracefulShutdown() { s.mu.Unlock() } +// Global error channel pool used for uninitialized Servers. +// We use a per-Server pool when possible to avoid using channels across synctest bubbles. +var errChanPool = sync.Pool{ + New: func() any { return make(chan error, 1) }, +} + +func (s *serverInternalState) getErrChan() chan error { + if s == nil { + return errChanPool.Get().(chan error) // Server used without calling ConfigureServer + } + return s.errChanPool.Get().(chan error) +} + +func (s *serverInternalState) putErrChan(ch chan error) { + if s == nil { + errChanPool.Put(ch) // Server used without calling ConfigureServer + return + } + s.errChanPool.Put(ch) +} + // ConfigureServer adds HTTP/2 support to a net/http Server. // // The configuration conf may be nil. @@ -257,7 +249,10 @@ func ConfigureServer(s *http.Server, conf *Server) error { if conf == nil { conf = new(Server) } - conf.state = &serverInternalState{activeConns: make(map[*serverConn]struct{})} + conf.state = &serverInternalState{ + activeConns: make(map[*serverConn]struct{}), + errChanPool: sync.Pool{New: func() any { return make(chan error, 1) }}, + } if h1, h2 := s, conf; h2.IdleTimeout == 0 { if h1.IdleTimeout != 0 { h2.IdleTimeout = h1.IdleTimeout @@ -423,6 +418,9 @@ func (o *ServeConnOpts) handler() http.Handler { // // The opts parameter is optional. If nil, default values are used. func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { + if opts == nil { + opts = &ServeConnOpts{} + } s.serveConn(c, opts, nil) } @@ -438,7 +436,7 @@ func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverCon conn: c, baseCtx: baseCtx, remoteAddrStr: c.RemoteAddr().String(), - bw: newBufferedWriter(s.group, c, conf.WriteByteTimeout), + bw: newBufferedWriter(c, conf.WriteByteTimeout), handler: opts.handler(), streams: make(map[uint32]*stream), readFrameCh: make(chan readFrameResult), @@ -638,11 +636,11 @@ type serverConn struct { pingSent bool sentPingData [8]byte goAwayCode ErrCode - shutdownTimer timer // nil until used - idleTimer timer // nil if unused + shutdownTimer *time.Timer // nil until used + idleTimer *time.Timer // nil if unused readIdleTimeout time.Duration pingTimeout time.Duration - readIdleTimer timer // nil if unused + readIdleTimer *time.Timer // nil if unused // Owned by the writeFrameAsync goroutine: headerWriteBuf bytes.Buffer @@ -687,12 +685,12 @@ type stream struct { flow outflow // limits writing from Handler to client inflow inflow // what the client is allowed to POST/etc to us state streamState - resetQueued bool // RST_STREAM queued for write; set by sc.resetStream - gotTrailerHeader bool // HEADER frame for trailers was seen - wroteHeaders bool // whether we wrote headers (not status 100) - readDeadline timer // nil if unused - writeDeadline timer // nil if unused - closeErr error // set before cw is closed + resetQueued bool // RST_STREAM queued for write; set by sc.resetStream + gotTrailerHeader bool // HEADER frame for trailers was seen + wroteHeaders bool // whether we wrote headers (not status 100) + readDeadline *time.Timer // nil if unused + writeDeadline *time.Timer // nil if unused + closeErr error // set before cw is closed trailer http.Header // accumulated trailers reqTrailer http.Header // handler's Request.Trailer @@ -848,7 +846,6 @@ type readFrameResult struct { // consumer is done with the frame. // It's run on its own goroutine. func (sc *serverConn) readFrames() { - sc.srv.markNewGoroutine() gate := make(chan struct{}) gateDone := func() { gate <- struct{}{} } for { @@ -881,7 +878,6 @@ type frameWriteResult struct { // At most one goroutine can be running writeFrameAsync at a time per // serverConn. func (sc *serverConn) writeFrameAsync(wr FrameWriteRequest, wd *writeData) { - sc.srv.markNewGoroutine() var err error if wd == nil { err = wr.write.writeFrame(sc) @@ -965,22 +961,22 @@ func (sc *serverConn) serve(conf http2Config) { sc.setConnState(http.StateIdle) if sc.srv.IdleTimeout > 0 { - sc.idleTimer = sc.srv.afterFunc(sc.srv.IdleTimeout, sc.onIdleTimer) + sc.idleTimer = time.AfterFunc(sc.srv.IdleTimeout, sc.onIdleTimer) defer sc.idleTimer.Stop() } if conf.SendPingTimeout > 0 { sc.readIdleTimeout = conf.SendPingTimeout - sc.readIdleTimer = sc.srv.afterFunc(conf.SendPingTimeout, sc.onReadIdleTimer) + sc.readIdleTimer = time.AfterFunc(conf.SendPingTimeout, sc.onReadIdleTimer) defer sc.readIdleTimer.Stop() } go sc.readFrames() // closed by defer sc.conn.Close above - settingsTimer := sc.srv.afterFunc(firstSettingsTimeout, sc.onSettingsTimer) + settingsTimer := time.AfterFunc(firstSettingsTimeout, sc.onSettingsTimer) defer settingsTimer.Stop() - lastFrameTime := sc.srv.now() + lastFrameTime := time.Now() loopNum := 0 for { loopNum++ @@ -994,7 +990,7 @@ func (sc *serverConn) serve(conf http2Config) { case res := <-sc.wroteFrameCh: sc.wroteFrame(res) case res := <-sc.readFrameCh: - lastFrameTime = sc.srv.now() + lastFrameTime = time.Now() // Process any written frames before reading new frames from the client since a // written frame could have triggered a new stream to be started. if sc.writingFrameAsync { @@ -1068,13 +1064,16 @@ func (sc *serverConn) serve(conf http2Config) { func (sc *serverConn) handlePingTimer(lastFrameReadTime time.Time) { if sc.pingSent { - sc.vlogf("timeout waiting for PING response") + sc.logf("timeout waiting for PING response") + if f := sc.countErrorFunc; f != nil { + f("conn_close_lost_ping") + } sc.conn.Close() return } pingAt := lastFrameReadTime.Add(sc.readIdleTimeout) - now := sc.srv.now() + now := time.Now() if pingAt.After(now) { // We received frames since arming the ping timer. // Reset it for the next possible timeout. @@ -1138,10 +1137,10 @@ func (sc *serverConn) readPreface() error { errc <- nil } }() - timer := sc.srv.newTimer(prefaceTimeout) // TODO: configurable on *Server? + timer := time.NewTimer(prefaceTimeout) // TODO: configurable on *Server? defer timer.Stop() select { - case <-timer.C(): + case <-timer.C: return errPrefaceTimeout case err := <-errc: if err == nil { @@ -1153,10 +1152,6 @@ func (sc *serverConn) readPreface() error { } } -var errChanPool = sync.Pool{ - New: func() interface{} { return make(chan error, 1) }, -} - var writeDataPool = sync.Pool{ New: func() interface{} { return new(writeData) }, } @@ -1164,7 +1159,7 @@ var writeDataPool = sync.Pool{ // writeDataFromHandler writes DATA response frames from a handler on // the given stream. func (sc *serverConn) writeDataFromHandler(stream *stream, data []byte, endStream bool) error { - ch := errChanPool.Get().(chan error) + ch := sc.srv.state.getErrChan() writeArg := writeDataPool.Get().(*writeData) *writeArg = writeData{stream.id, data, endStream} err := sc.writeFrameFromHandler(FrameWriteRequest{ @@ -1196,7 +1191,7 @@ func (sc *serverConn) writeDataFromHandler(stream *stream, data []byte, endStrea return errStreamClosed } } - errChanPool.Put(ch) + sc.srv.state.putErrChan(ch) if frameWriteDone { writeDataPool.Put(writeArg) } @@ -1510,7 +1505,7 @@ func (sc *serverConn) goAway(code ErrCode) { func (sc *serverConn) shutDownIn(d time.Duration) { sc.serveG.check() - sc.shutdownTimer = sc.srv.afterFunc(d, sc.onShutdownTimer) + sc.shutdownTimer = time.AfterFunc(d, sc.onShutdownTimer) } func (sc *serverConn) resetStream(se StreamError) { @@ -2115,7 +2110,7 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error { // (in Go 1.8), though. That's a more sane option anyway. if sc.hs.ReadTimeout > 0 { sc.conn.SetReadDeadline(time.Time{}) - st.readDeadline = sc.srv.afterFunc(sc.hs.ReadTimeout, st.onReadTimeout) + st.readDeadline = time.AfterFunc(sc.hs.ReadTimeout, st.onReadTimeout) } return sc.scheduleHandler(id, rw, req, handler) @@ -2213,7 +2208,7 @@ func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream st.flow.add(sc.initialStreamSendWindowSize) st.inflow.init(sc.initialStreamRecvWindowSize) if sc.hs.WriteTimeout > 0 { - st.writeDeadline = sc.srv.afterFunc(sc.hs.WriteTimeout, st.onWriteTimeout) + st.writeDeadline = time.AfterFunc(sc.hs.WriteTimeout, st.onWriteTimeout) } sc.streams[id] = st @@ -2233,25 +2228,25 @@ func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*responseWriter, *http.Request, error) { sc.serveG.check() - rp := requestParam{ - method: f.PseudoValue("method"), - scheme: f.PseudoValue("scheme"), - authority: f.PseudoValue("authority"), - path: f.PseudoValue("path"), - protocol: f.PseudoValue("protocol"), + rp := httpcommon.ServerRequestParam{ + Method: f.PseudoValue("method"), + Scheme: f.PseudoValue("scheme"), + Authority: f.PseudoValue("authority"), + Path: f.PseudoValue("path"), + Protocol: f.PseudoValue("protocol"), } // extended connect is disabled, so we should not see :protocol - if disableExtendedConnectProtocol && rp.protocol != "" { + if disableExtendedConnectProtocol && rp.Protocol != "" { return nil, nil, sc.countError("bad_connect", streamError(f.StreamID, ErrCodeProtocol)) } - isConnect := rp.method == "CONNECT" + isConnect := rp.Method == "CONNECT" if isConnect { - if rp.protocol == "" && (rp.path != "" || rp.scheme != "" || rp.authority == "") { + if rp.Protocol == "" && (rp.Path != "" || rp.Scheme != "" || rp.Authority == "") { return nil, nil, sc.countError("bad_connect", streamError(f.StreamID, ErrCodeProtocol)) } - } else if rp.method == "" || rp.path == "" || (rp.scheme != "https" && rp.scheme != "http") { + } else if rp.Method == "" || rp.Path == "" || (rp.Scheme != "https" && rp.Scheme != "http") { // See 8.1.2.6 Malformed Requests and Responses: // // Malformed requests or responses that are detected @@ -2265,15 +2260,16 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res return nil, nil, sc.countError("bad_path_method", streamError(f.StreamID, ErrCodeProtocol)) } - rp.header = make(http.Header) + header := make(http.Header) + rp.Header = header for _, hf := range f.RegularFields() { - rp.header.Add(sc.canonicalHeader(hf.Name), hf.Value) + header.Add(sc.canonicalHeader(hf.Name), hf.Value) } - if rp.authority == "" { - rp.authority = rp.header.Get("Host") + if rp.Authority == "" { + rp.Authority = header.Get("Host") } - if rp.protocol != "" { - rp.header.Set(":protocol", rp.protocol) + if rp.Protocol != "" { + header.Set(":protocol", rp.Protocol) } rw, req, err := sc.newWriterAndRequestNoBody(st, rp) @@ -2282,7 +2278,7 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res } bodyOpen := !f.StreamEnded() if bodyOpen { - if vv, ok := rp.header["Content-Length"]; ok { + if vv, ok := rp.Header["Content-Length"]; ok { if cl, err := strconv.ParseUint(vv[0], 10, 63); err == nil { req.ContentLength = int64(cl) } else { @@ -2298,84 +2294,38 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res return rw, req, nil } -type requestParam struct { - method string - scheme, authority, path string - protocol string - header http.Header -} - -func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp requestParam) (*responseWriter, *http.Request, error) { +func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp httpcommon.ServerRequestParam) (*responseWriter, *http.Request, error) { sc.serveG.check() var tlsState *tls.ConnectionState // nil if not scheme https - if rp.scheme == "https" { + if rp.Scheme == "https" { tlsState = sc.tlsState } - needsContinue := httpguts.HeaderValuesContainsToken(rp.header["Expect"], "100-continue") - if needsContinue { - rp.header.Del("Expect") - } - // Merge Cookie headers into one "; "-delimited value. - if cookies := rp.header["Cookie"]; len(cookies) > 1 { - rp.header.Set("Cookie", strings.Join(cookies, "; ")) - } - - // Setup Trailers - var trailer http.Header - for _, v := range rp.header["Trailer"] { - for _, key := range strings.Split(v, ",") { - key = http.CanonicalHeaderKey(textproto.TrimString(key)) - switch key { - case "Transfer-Encoding", "Trailer", "Content-Length": - // Bogus. (copy of http1 rules) - // Ignore. - default: - if trailer == nil { - trailer = make(http.Header) - } - trailer[key] = nil - } - } - } - delete(rp.header, "Trailer") - - var url_ *url.URL - var requestURI string - if rp.method == "CONNECT" && rp.protocol == "" { - url_ = &url.URL{Host: rp.authority} - requestURI = rp.authority // mimic HTTP/1 server behavior - } else { - var err error - url_, err = url.ParseRequestURI(rp.path) - if err != nil { - return nil, nil, sc.countError("bad_path", streamError(st.id, ErrCodeProtocol)) - } - requestURI = rp.path + res := httpcommon.NewServerRequest(rp) + if res.InvalidReason != "" { + return nil, nil, sc.countError(res.InvalidReason, streamError(st.id, ErrCodeProtocol)) } body := &requestBody{ conn: sc, stream: st, - needsContinue: needsContinue, + needsContinue: res.NeedsContinue, } - req := &http.Request{ - Method: rp.method, - URL: url_, + req := (&http.Request{ + Method: rp.Method, + URL: res.URL, RemoteAddr: sc.remoteAddrStr, - Header: rp.header, - RequestURI: requestURI, + Header: rp.Header, + RequestURI: res.RequestURI, Proto: "HTTP/2.0", ProtoMajor: 2, ProtoMinor: 0, TLS: tlsState, - Host: rp.authority, + Host: rp.Authority, Body: body, - Trailer: trailer, - } - req = req.WithContext(st.ctx) - + Trailer: res.Trailer, + }).WithContext(st.ctx) rw := sc.newResponseWriter(st, req) return rw, req, nil } @@ -2447,7 +2397,6 @@ func (sc *serverConn) handlerDone() { // Run on its own goroutine. func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) { - sc.srv.markNewGoroutine() defer sc.sendServeMsg(handlerDoneMsg) didPanic := true defer func() { @@ -2496,7 +2445,7 @@ func (sc *serverConn) writeHeaders(st *stream, headerData *writeResHeaders) erro // waiting for this frame to be written, so an http.Flush mid-handler // writes out the correct value of keys, before a handler later potentially // mutates it. - errc = errChanPool.Get().(chan error) + errc = sc.srv.state.getErrChan() } if err := sc.writeFrameFromHandler(FrameWriteRequest{ write: headerData, @@ -2508,7 +2457,7 @@ func (sc *serverConn) writeHeaders(st *stream, headerData *writeResHeaders) erro if errc != nil { select { case err := <-errc: - errChanPool.Put(errc) + sc.srv.state.putErrChan(errc) return err case <-sc.doneServing: return errClientDisconnected @@ -2615,7 +2564,7 @@ func (b *requestBody) Read(p []byte) (n int, err error) { if err == io.EOF { b.sawEOF = true } - if b.conn == nil && inTests { + if b.conn == nil { return } b.conn.noteBodyReadFromHandler(b.stream, n, err) @@ -2744,7 +2693,7 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { var date string if _, ok := rws.snapHeader["Date"]; !ok { // TODO(bradfitz): be faster here, like net/http? measure. - date = rws.conn.srv.now().UTC().Format(http.TimeFormat) + date = time.Now().UTC().Format(http.TimeFormat) } for _, v := range rws.snapHeader["Trailer"] { @@ -2866,7 +2815,7 @@ func (rws *responseWriterState) promoteUndeclaredTrailers() { func (w *responseWriter) SetReadDeadline(deadline time.Time) error { st := w.rws.stream - if !deadline.IsZero() && deadline.Before(w.rws.conn.srv.now()) { + if !deadline.IsZero() && deadline.Before(time.Now()) { // If we're setting a deadline in the past, reset the stream immediately // so writes after SetWriteDeadline returns will fail. st.onReadTimeout() @@ -2882,9 +2831,9 @@ func (w *responseWriter) SetReadDeadline(deadline time.Time) error { if deadline.IsZero() { st.readDeadline = nil } else if st.readDeadline == nil { - st.readDeadline = sc.srv.afterFunc(deadline.Sub(sc.srv.now()), st.onReadTimeout) + st.readDeadline = time.AfterFunc(deadline.Sub(time.Now()), st.onReadTimeout) } else { - st.readDeadline.Reset(deadline.Sub(sc.srv.now())) + st.readDeadline.Reset(deadline.Sub(time.Now())) } }) return nil @@ -2892,7 +2841,7 @@ func (w *responseWriter) SetReadDeadline(deadline time.Time) error { func (w *responseWriter) SetWriteDeadline(deadline time.Time) error { st := w.rws.stream - if !deadline.IsZero() && deadline.Before(w.rws.conn.srv.now()) { + if !deadline.IsZero() && deadline.Before(time.Now()) { // If we're setting a deadline in the past, reset the stream immediately // so writes after SetWriteDeadline returns will fail. st.onWriteTimeout() @@ -2908,9 +2857,9 @@ func (w *responseWriter) SetWriteDeadline(deadline time.Time) error { if deadline.IsZero() { st.writeDeadline = nil } else if st.writeDeadline == nil { - st.writeDeadline = sc.srv.afterFunc(deadline.Sub(sc.srv.now()), st.onWriteTimeout) + st.writeDeadline = time.AfterFunc(deadline.Sub(time.Now()), st.onWriteTimeout) } else { - st.writeDeadline.Reset(deadline.Sub(sc.srv.now())) + st.writeDeadline.Reset(deadline.Sub(time.Now())) } }) return nil @@ -3189,7 +3138,7 @@ func (w *responseWriter) Push(target string, opts *http.PushOptions) error { method: opts.Method, url: u, header: cloneHeader(opts.Header), - done: errChanPool.Get().(chan error), + done: sc.srv.state.getErrChan(), } select { @@ -3206,7 +3155,7 @@ func (w *responseWriter) Push(target string, opts *http.PushOptions) error { case <-st.cw: return errStreamClosed case err := <-msg.done: - errChanPool.Put(msg.done) + sc.srv.state.putErrChan(msg.done) return err } } @@ -3270,12 +3219,12 @@ func (sc *serverConn) startPush(msg *startPushRequest) { // we start in "half closed (remote)" for simplicity. // See further comments at the definition of stateHalfClosedRemote. promised := sc.newStream(promisedID, msg.parent.id, stateHalfClosedRemote) - rw, req, err := sc.newWriterAndRequestNoBody(promised, requestParam{ - method: msg.method, - scheme: msg.url.Scheme, - authority: msg.url.Host, - path: msg.url.RequestURI(), - header: cloneHeader(msg.header), // clone since handler runs concurrently with writing the PUSH_PROMISE + rw, req, err := sc.newWriterAndRequestNoBody(promised, httpcommon.ServerRequestParam{ + Method: msg.method, + Scheme: msg.url.Scheme, + Authority: msg.url.Host, + Path: msg.url.RequestURI(), + Header: cloneHeader(msg.header), // clone since handler runs concurrently with writing the PUSH_PROMISE }) if err != nil { // Should not happen, since we've already validated msg.url. diff --git a/vendor/golang.org/x/net/http2/timer.go b/vendor/golang.org/x/net/http2/timer.go deleted file mode 100644 index 0b1c17b8..00000000 --- a/vendor/golang.org/x/net/http2/timer.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -package http2 - -import "time" - -// A timer is a time.Timer, as an interface which can be replaced in tests. -type timer = interface { - C() <-chan time.Time - Reset(d time.Duration) bool - Stop() bool -} - -// timeTimer adapts a time.Timer to the timer interface. -type timeTimer struct { - *time.Timer -} - -func (t timeTimer) C() <-chan time.Time { return t.Timer.C } diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index f2c166b6..1965913e 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -9,6 +9,7 @@ package http2 import ( "bufio" "bytes" + "compress/flate" "compress/gzip" "context" "crypto/rand" @@ -193,50 +194,6 @@ type Transport struct { type transportTestHooks struct { newclientconn func(*ClientConn) - group synctestGroupInterface -} - -func (t *Transport) markNewGoroutine() { - if t != nil && t.transportTestHooks != nil { - t.transportTestHooks.group.Join() - } -} - -func (t *Transport) now() time.Time { - if t != nil && t.transportTestHooks != nil { - return t.transportTestHooks.group.Now() - } - return time.Now() -} - -func (t *Transport) timeSince(when time.Time) time.Duration { - if t != nil && t.transportTestHooks != nil { - return t.now().Sub(when) - } - return time.Since(when) -} - -// newTimer creates a new time.Timer, or a synthetic timer in tests. -func (t *Transport) newTimer(d time.Duration) timer { - if t.transportTestHooks != nil { - return t.transportTestHooks.group.NewTimer(d) - } - return timeTimer{time.NewTimer(d)} -} - -// afterFunc creates a new time.AfterFunc timer, or a synthetic timer in tests. -func (t *Transport) afterFunc(d time.Duration, f func()) timer { - if t.transportTestHooks != nil { - return t.transportTestHooks.group.AfterFunc(d, f) - } - return timeTimer{time.AfterFunc(d, f)} -} - -func (t *Transport) contextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc) { - if t.transportTestHooks != nil { - return t.transportTestHooks.group.ContextWithTimeout(ctx, d) - } - return context.WithTimeout(ctx, d) } func (t *Transport) maxHeaderListSize() uint32 { @@ -366,7 +323,7 @@ type ClientConn struct { readerErr error // set before readerDone is closed idleTimeout time.Duration // or 0 for never - idleTimer timer + idleTimer *time.Timer mu sync.Mutex // guards following cond *sync.Cond // hold mu; broadcast on flow/closed changes @@ -399,6 +356,7 @@ type ClientConn struct { readIdleTimeout time.Duration pingTimeout time.Duration extendedConnectAllowed bool + strictMaxConcurrentStreams bool // rstStreamPingsBlocked works around an unfortunate gRPC behavior. // gRPC strictly limits the number of PING frames that it will receive. @@ -534,14 +492,12 @@ func (cs *clientStream) closeReqBodyLocked() { cs.reqBodyClosed = make(chan struct{}) reqBodyClosed := cs.reqBodyClosed go func() { - cs.cc.t.markNewGoroutine() cs.reqBody.Close() close(reqBodyClosed) }() } type stickyErrWriter struct { - group synctestGroupInterface conn net.Conn timeout time.Duration err *error @@ -551,7 +507,7 @@ func (sew stickyErrWriter) Write(p []byte) (n int, err error) { if *sew.err != nil { return 0, *sew.err } - n, err = writeWithByteTimeout(sew.group, sew.conn, sew.timeout, p) + n, err = writeWithByteTimeout(sew.conn, sew.timeout, p) *sew.err = err return n, err } @@ -650,9 +606,9 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res backoff := float64(uint(1) << (uint(retry) - 1)) backoff += backoff * (0.1 * mathrand.Float64()) d := time.Second * time.Duration(backoff) - tm := t.newTimer(d) + tm := time.NewTimer(d) select { - case <-tm.C(): + case <-tm.C: t.vlogf("RoundTrip retrying after failure: %v", roundTripErr) continue case <-req.Context().Done(): @@ -699,6 +655,7 @@ var ( errClientConnUnusable = errors.New("http2: client conn not usable") errClientConnNotEstablished = errors.New("http2: client conn could not be established") errClientConnGotGoAway = errors.New("http2: Transport received Server's graceful shutdown GOAWAY") + errClientConnForceClosed = errors.New("http2: client connection force closed via ClientConn.Close") ) // shouldRetryRequest is called by RoundTrip when a request fails to get @@ -829,7 +786,8 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro initialWindowSize: 65535, // spec default initialStreamRecvWindowSize: conf.MaxUploadBufferPerStream, maxConcurrentStreams: initialMaxConcurrentStreams, // "infinite", per spec. Use a smaller value until we have received server settings. - peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead. + strictMaxConcurrentStreams: conf.StrictMaxConcurrentRequests, + peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead. streams: make(map[uint32]*clientStream), singleUse: singleUse, seenSettingsChan: make(chan struct{}), @@ -838,14 +796,11 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro pingTimeout: conf.PingTimeout, pings: make(map[[8]byte]chan struct{}), reqHeaderMu: make(chan struct{}, 1), - lastActive: t.now(), + lastActive: time.Now(), } - var group synctestGroupInterface if t.transportTestHooks != nil { - t.markNewGoroutine() t.transportTestHooks.newclientconn(cc) c = cc.tconn - group = t.group } if VerboseLogs { t.vlogf("http2: Transport creating client conn %p to %v", cc, c.RemoteAddr()) @@ -857,7 +812,6 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro // TODO: adjust this writer size to account for frame size + // MTU + crypto/tls record padding. cc.bw = bufio.NewWriter(stickyErrWriter{ - group: group, conn: c, timeout: conf.WriteByteTimeout, err: &cc.werr, @@ -906,7 +860,7 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro // Start the idle timer after the connection is fully initialized. if d := t.idleConnTimeout(); d != 0 { cc.idleTimeout = d - cc.idleTimer = t.afterFunc(d, cc.onIdleTimeout) + cc.idleTimer = time.AfterFunc(d, cc.onIdleTimeout) } go cc.readLoop() @@ -917,7 +871,7 @@ func (cc *ClientConn) healthCheck() { pingTimeout := cc.pingTimeout // We don't need to periodically ping in the health check, because the readLoop of ClientConn will // trigger the healthCheck again if there is no frame received. - ctx, cancel := cc.t.contextWithTimeout(context.Background(), pingTimeout) + ctx, cancel := context.WithTimeout(context.Background(), pingTimeout) defer cancel() cc.vlogf("http2: Transport sending health check") err := cc.Ping(ctx) @@ -1067,7 +1021,7 @@ func (cc *ClientConn) idleStateLocked() (st clientConnIdleState) { return } var maxConcurrentOkay bool - if cc.t.StrictMaxConcurrentStreams { + if cc.strictMaxConcurrentStreams { // We'll tell the caller we can take a new request to // prevent the caller from dialing a new TCP // connection, but then we'll block later before @@ -1120,7 +1074,7 @@ func (cc *ClientConn) tooIdleLocked() bool { // times are compared based on their wall time. We don't want // to reuse a connection that's been sitting idle during // VM/laptop suspend if monotonic time was also frozen. - return cc.idleTimeout != 0 && !cc.lastIdle.IsZero() && cc.t.timeSince(cc.lastIdle.Round(0)) > cc.idleTimeout + return cc.idleTimeout != 0 && !cc.lastIdle.IsZero() && time.Since(cc.lastIdle.Round(0)) > cc.idleTimeout } // onIdleTimeout is called from a time.AfterFunc goroutine. It will @@ -1186,7 +1140,6 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error { done := make(chan struct{}) cancelled := false // guarded by cc.mu go func() { - cc.t.markNewGoroutine() cc.mu.Lock() defer cc.mu.Unlock() for { @@ -1257,8 +1210,7 @@ func (cc *ClientConn) closeForError(err error) { // // In-flight requests are interrupted. For a graceful shutdown, use Shutdown instead. func (cc *ClientConn) Close() error { - err := errors.New("http2: client connection force closed via ClientConn.Close") - cc.closeForError(err) + cc.closeForError(errClientConnForceClosed) return nil } @@ -1286,6 +1238,19 @@ func (cc *ClientConn) responseHeaderTimeout() time.Duration { return 0 } +// actualContentLength returns a sanitized version of +// req.ContentLength, where 0 actually means zero (not unknown) and -1 +// means unknown. +func actualContentLength(req *http.Request) int64 { + if req.Body == nil || req.Body == http.NoBody { + return 0 + } + if req.ContentLength != 0 { + return req.ContentLength + } + return -1 +} + func (cc *ClientConn) decrStreamReservations() { cc.mu.Lock() defer cc.mu.Unlock() @@ -1310,7 +1275,7 @@ func (cc *ClientConn) roundTrip(req *http.Request, streamf func(*clientStream)) reqCancel: req.Cancel, isHead: req.Method == "HEAD", reqBody: req.Body, - reqBodyContentLength: httpcommon.ActualContentLength(req), + reqBodyContentLength: actualContentLength(req), trace: httptrace.ContextClientTrace(ctx), peerClosed: make(chan struct{}), abort: make(chan struct{}), @@ -1318,7 +1283,7 @@ func (cc *ClientConn) roundTrip(req *http.Request, streamf func(*clientStream)) donec: make(chan struct{}), } - cs.requestedGzip = httpcommon.IsRequestGzip(req, cc.t.disableCompression()) + cs.requestedGzip = httpcommon.IsRequestGzip(req.Method, req.Header, cc.t.disableCompression()) go cs.doRequest(req, streamf) @@ -1349,7 +1314,7 @@ func (cc *ClientConn) roundTrip(req *http.Request, streamf func(*clientStream)) } res.Request = req res.TLS = cc.tlsState - if res.Body == noBody && httpcommon.ActualContentLength(req) == 0 { + if res.Body == noBody && actualContentLength(req) == 0 { // If there isn't a request or response body still being // written, then wait for the stream to be closed before // RoundTrip returns. @@ -1414,7 +1379,6 @@ func (cc *ClientConn) roundTrip(req *http.Request, streamf func(*clientStream)) // // It sends the request and performs post-request cleanup (closing Request.Body, etc.). func (cs *clientStream) doRequest(req *http.Request, streamf func(*clientStream)) { - cs.cc.t.markNewGoroutine() err := cs.writeRequest(req, streamf) cs.cleanupWriteRequest(err) } @@ -1545,9 +1509,9 @@ func (cs *clientStream) writeRequest(req *http.Request, streamf func(*clientStre var respHeaderTimer <-chan time.Time var respHeaderRecv chan struct{} if d := cc.responseHeaderTimeout(); d != 0 { - timer := cc.t.newTimer(d) + timer := time.NewTimer(d) defer timer.Stop() - respHeaderTimer = timer.C() + respHeaderTimer = timer.C respHeaderRecv = cs.respHeaderRecv } // Wait until the peer half-closes its end of the stream, @@ -1596,12 +1560,7 @@ func (cs *clientStream) encodeAndWriteHeaders(req *http.Request) error { // sent by writeRequestBody below, along with any Trailers, // again in form HEADERS{1}, CONTINUATION{0,}) cc.hbuf.Reset() - res, err := httpcommon.EncodeHeaders(httpcommon.EncodeHeadersParam{ - Request: req, - AddGzipHeader: cs.requestedGzip, - PeerMaxHeaderListSize: cc.peerMaxHeaderListSize, - DefaultUserAgent: defaultUserAgent, - }, func(name, value string) { + res, err := encodeRequestHeaders(req, cs.requestedGzip, cc.peerMaxHeaderListSize, func(name, value string) { cc.writeHeader(name, value) }) if err != nil { @@ -1617,6 +1576,22 @@ func (cs *clientStream) encodeAndWriteHeaders(req *http.Request) error { return err } +func encodeRequestHeaders(req *http.Request, addGzipHeader bool, peerMaxHeaderListSize uint64, headerf func(name, value string)) (httpcommon.EncodeHeadersResult, error) { + return httpcommon.EncodeHeaders(req.Context(), httpcommon.EncodeHeadersParam{ + Request: httpcommon.Request{ + Header: req.Header, + Trailer: req.Trailer, + URL: req.URL, + Host: req.Host, + Method: req.Method, + ActualContentLength: actualContentLength(req), + }, + AddGzipHeader: addGzipHeader, + PeerMaxHeaderListSize: peerMaxHeaderListSize, + DefaultUserAgent: defaultUserAgent, + }, headerf) +} + // cleanupWriteRequest performs post-request tasks. // // If err (the result of writeRequest) is non-nil and the stream is not closed, @@ -1729,7 +1704,7 @@ func (cc *ClientConn) awaitOpenSlotForStreamLocked(cs *clientStream) error { // Return a fatal error which aborts the retry loop. return errClientConnNotEstablished } - cc.lastActive = cc.t.now() + cc.lastActive = time.Now() if cc.closed || !cc.canTakeNewRequestLocked() { return errClientConnUnusable } @@ -2068,10 +2043,10 @@ func (cc *ClientConn) forgetStreamID(id uint32) { if len(cc.streams) != slen-1 { panic("forgetting unknown stream id") } - cc.lastActive = cc.t.now() + cc.lastActive = time.Now() if len(cc.streams) == 0 && cc.idleTimer != nil { cc.idleTimer.Reset(cc.idleTimeout) - cc.lastIdle = cc.t.now() + cc.lastIdle = time.Now() } // Wake up writeRequestBody via clientStream.awaitFlowControl and // wake up RoundTrip if there is a pending request. @@ -2097,7 +2072,6 @@ type clientConnReadLoop struct { // readLoop runs in its own goroutine and reads and dispatches frames. func (cc *ClientConn) readLoop() { - cc.t.markNewGoroutine() rl := &clientConnReadLoop{cc: cc} defer rl.cleanup() cc.readerErr = rl.run() @@ -2164,9 +2138,9 @@ func (rl *clientConnReadLoop) cleanup() { if cc.idleTimeout > 0 && unusedWaitTime > cc.idleTimeout { unusedWaitTime = cc.idleTimeout } - idleTime := cc.t.now().Sub(cc.lastActive) + idleTime := time.Now().Sub(cc.lastActive) if atomic.LoadUint32(&cc.atomicReused) == 0 && idleTime < unusedWaitTime && !cc.closedOnIdle { - cc.idleTimer = cc.t.afterFunc(unusedWaitTime-idleTime, func() { + cc.idleTimer = time.AfterFunc(unusedWaitTime-idleTime, func() { cc.t.connPool().MarkDead(cc) }) } else { @@ -2186,6 +2160,13 @@ func (rl *clientConnReadLoop) cleanup() { } cc.cond.Broadcast() cc.mu.Unlock() + + if !cc.seenSettings { + // If we have a pending request that wants extended CONNECT, + // let it continue and fail with the connection error. + cc.extendedConnectAllowed = true + close(cc.seenSettingsChan) + } } // countReadFrameError calls Transport.CountError with a string @@ -2219,9 +2200,9 @@ func (rl *clientConnReadLoop) run() error { cc := rl.cc gotSettings := false readIdleTimeout := cc.readIdleTimeout - var t timer + var t *time.Timer if readIdleTimeout != 0 { - t = cc.t.afterFunc(readIdleTimeout, cc.healthCheck) + t = time.AfterFunc(readIdleTimeout, cc.healthCheck) } for { f, err := cc.fr.ReadFrame() @@ -2278,9 +2259,6 @@ func (rl *clientConnReadLoop) run() error { if VerboseLogs { cc.vlogf("http2: Transport conn %p received error from processing frame %v: %v", cc, summarizeFrame(f), err) } - if !cc.seenSettings { - close(cc.seenSettingsChan) - } return err } } @@ -2970,7 +2948,6 @@ func (cc *ClientConn) Ping(ctx context.Context) error { var pingError error errc := make(chan struct{}) go func() { - cc.t.markNewGoroutine() cc.wmu.Lock() defer cc.wmu.Unlock() if pingError = cc.fr.WritePing(false, p); pingError != nil { @@ -3100,35 +3077,102 @@ type erringRoundTripper struct{ err error } func (rt erringRoundTripper) RoundTripErr() error { return rt.err } func (rt erringRoundTripper) RoundTrip(*http.Request) (*http.Response, error) { return nil, rt.err } +var errConcurrentReadOnResBody = errors.New("http2: concurrent read on response body") + // gzipReader wraps a response body so it can lazily -// call gzip.NewReader on the first call to Read +// get gzip.Reader from the pool on the first call to Read. +// After Close is called it puts gzip.Reader to the pool immediately +// if there is no Read in progress or later when Read completes. type gzipReader struct { _ incomparable body io.ReadCloser // underlying Response.Body - zr *gzip.Reader // lazily-initialized gzip reader - zerr error // sticky error + mu sync.Mutex // guards zr and zerr + zr *gzip.Reader // stores gzip reader from the pool between reads + zerr error // sticky gzip reader init error or sentinel value to detect concurrent read and read after close } -func (gz *gzipReader) Read(p []byte) (n int, err error) { +type eofReader struct{} + +func (eofReader) Read([]byte) (int, error) { return 0, io.EOF } +func (eofReader) ReadByte() (byte, error) { return 0, io.EOF } + +var gzipPool = sync.Pool{New: func() any { return new(gzip.Reader) }} + +// gzipPoolGet gets a gzip.Reader from the pool and resets it to read from r. +func gzipPoolGet(r io.Reader) (*gzip.Reader, error) { + zr := gzipPool.Get().(*gzip.Reader) + if err := zr.Reset(r); err != nil { + gzipPoolPut(zr) + return nil, err + } + return zr, nil +} + +// gzipPoolPut puts a gzip.Reader back into the pool. +func gzipPoolPut(zr *gzip.Reader) { + // Reset will allocate bufio.Reader if we pass it anything + // other than a flate.Reader, so ensure that it's getting one. + var r flate.Reader = eofReader{} + zr.Reset(r) + gzipPool.Put(zr) +} + +// acquire returns a gzip.Reader for reading response body. +// The reader must be released after use. +func (gz *gzipReader) acquire() (*gzip.Reader, error) { + gz.mu.Lock() + defer gz.mu.Unlock() if gz.zerr != nil { - return 0, gz.zerr + return nil, gz.zerr } if gz.zr == nil { - gz.zr, err = gzip.NewReader(gz.body) - if err != nil { - gz.zerr = err - return 0, err + gz.zr, gz.zerr = gzipPoolGet(gz.body) + if gz.zerr != nil { + return nil, gz.zerr } } - return gz.zr.Read(p) + ret := gz.zr + gz.zr, gz.zerr = nil, errConcurrentReadOnResBody + return ret, nil } -func (gz *gzipReader) Close() error { - if err := gz.body.Close(); err != nil { - return err +// release returns the gzip.Reader to the pool if Close was called during Read. +func (gz *gzipReader) release(zr *gzip.Reader) { + gz.mu.Lock() + defer gz.mu.Unlock() + if gz.zerr == errConcurrentReadOnResBody { + gz.zr, gz.zerr = zr, nil + } else { // fs.ErrClosed + gzipPoolPut(zr) + } +} + +// close returns the gzip.Reader to the pool immediately or +// signals release to do so after Read completes. +func (gz *gzipReader) close() { + gz.mu.Lock() + defer gz.mu.Unlock() + if gz.zerr == nil && gz.zr != nil { + gzipPoolPut(gz.zr) + gz.zr = nil } gz.zerr = fs.ErrClosed - return nil +} + +func (gz *gzipReader) Read(p []byte) (n int, err error) { + zr, err := gz.acquire() + if err != nil { + return 0, err + } + defer gz.release(zr) + + return zr.Read(p) +} + +func (gz *gzipReader) Close() error { + gz.close() + + return gz.body.Close() } type errorReader struct{ err error } @@ -3200,7 +3244,7 @@ func traceGotConn(req *http.Request, cc *ClientConn, reused bool) { cc.mu.Lock() ci.WasIdle = len(cc.streams) == 0 && reused if ci.WasIdle && !cc.lastActive.IsZero() { - ci.IdleTime = cc.t.timeSince(cc.lastActive) + ci.IdleTime = time.Since(cc.lastActive) } cc.mu.Unlock() diff --git a/vendor/golang.org/x/net/http2/writesched.go b/vendor/golang.org/x/net/http2/writesched.go index cc893adc..7de27be5 100644 --- a/vendor/golang.org/x/net/http2/writesched.go +++ b/vendor/golang.org/x/net/http2/writesched.go @@ -42,6 +42,8 @@ type OpenStreamOptions struct { // PusherID is zero if the stream was initiated by the client. Otherwise, // PusherID names the stream that pushed the newly opened stream. PusherID uint32 + // priority is used to set the priority of the newly opened stream. + priority PriorityParam } // FrameWriteRequest is a request to write a frame. @@ -183,45 +185,75 @@ func (wr *FrameWriteRequest) replyToWriter(err error) { } // writeQueue is used by implementations of WriteScheduler. +// +// Each writeQueue contains a queue of FrameWriteRequests, meant to store all +// FrameWriteRequests associated with a given stream. This is implemented as a +// two-stage queue: currQueue[currPos:] and nextQueue. Removing an item is done +// by incrementing currPos of currQueue. Adding an item is done by appending it +// to the nextQueue. If currQueue is empty when trying to remove an item, we +// can swap currQueue and nextQueue to remedy the situation. +// This two-stage queue is analogous to the use of two lists in Okasaki's +// purely functional queue but without the overhead of reversing the list when +// swapping stages. +// +// writeQueue also contains prev and next, this can be used by implementations +// of WriteScheduler to construct data structures that represent the order of +// writing between different streams (e.g. circular linked list). type writeQueue struct { - s []FrameWriteRequest + currQueue []FrameWriteRequest + nextQueue []FrameWriteRequest + currPos int + prev, next *writeQueue } -func (q *writeQueue) empty() bool { return len(q.s) == 0 } +func (q *writeQueue) empty() bool { + return (len(q.currQueue) - q.currPos + len(q.nextQueue)) == 0 +} func (q *writeQueue) push(wr FrameWriteRequest) { - q.s = append(q.s, wr) + q.nextQueue = append(q.nextQueue, wr) } func (q *writeQueue) shift() FrameWriteRequest { - if len(q.s) == 0 { + if q.empty() { panic("invalid use of queue") } - wr := q.s[0] - // TODO: less copy-happy queue. - copy(q.s, q.s[1:]) - q.s[len(q.s)-1] = FrameWriteRequest{} - q.s = q.s[:len(q.s)-1] + if q.currPos >= len(q.currQueue) { + q.currQueue, q.currPos, q.nextQueue = q.nextQueue, 0, q.currQueue[:0] + } + wr := q.currQueue[q.currPos] + q.currQueue[q.currPos] = FrameWriteRequest{} + q.currPos++ return wr } +func (q *writeQueue) peek() *FrameWriteRequest { + if q.currPos < len(q.currQueue) { + return &q.currQueue[q.currPos] + } + if len(q.nextQueue) > 0 { + return &q.nextQueue[0] + } + return nil +} + // consume consumes up to n bytes from q.s[0]. If the frame is // entirely consumed, it is removed from the queue. If the frame // is partially consumed, the frame is kept with the consumed // bytes removed. Returns true iff any bytes were consumed. func (q *writeQueue) consume(n int32) (FrameWriteRequest, bool) { - if len(q.s) == 0 { + if q.empty() { return FrameWriteRequest{}, false } - consumed, rest, numresult := q.s[0].Consume(n) + consumed, rest, numresult := q.peek().Consume(n) switch numresult { case 0: return FrameWriteRequest{}, false case 1: q.shift() case 2: - q.s[0] = rest + *q.peek() = rest } return consumed, true } @@ -230,10 +262,15 @@ type writeQueuePool []*writeQueue // put inserts an unused writeQueue into the pool. func (p *writeQueuePool) put(q *writeQueue) { - for i := range q.s { - q.s[i] = FrameWriteRequest{} + for i := range q.currQueue { + q.currQueue[i] = FrameWriteRequest{} + } + for i := range q.nextQueue { + q.nextQueue[i] = FrameWriteRequest{} } - q.s = q.s[:0] + q.currQueue = q.currQueue[:0] + q.nextQueue = q.nextQueue[:0] + q.currPos = 0 *p = append(*p, q) } diff --git a/vendor/golang.org/x/net/http2/writesched_priority.go b/vendor/golang.org/x/net/http2/writesched_priority_rfc7540.go similarity index 77% rename from vendor/golang.org/x/net/http2/writesched_priority.go rename to vendor/golang.org/x/net/http2/writesched_priority_rfc7540.go index f6783339..4e33c29a 100644 --- a/vendor/golang.org/x/net/http2/writesched_priority.go +++ b/vendor/golang.org/x/net/http2/writesched_priority_rfc7540.go @@ -11,7 +11,7 @@ import ( ) // RFC 7540, Section 5.3.5: the default weight is 16. -const priorityDefaultWeight = 15 // 16 = 15 + 1 +const priorityDefaultWeightRFC7540 = 15 // 16 = 15 + 1 // PriorityWriteSchedulerConfig configures a priorityWriteScheduler. type PriorityWriteSchedulerConfig struct { @@ -66,8 +66,8 @@ func NewPriorityWriteScheduler(cfg *PriorityWriteSchedulerConfig) WriteScheduler } } - ws := &priorityWriteScheduler{ - nodes: make(map[uint32]*priorityNode), + ws := &priorityWriteSchedulerRFC7540{ + nodes: make(map[uint32]*priorityNodeRFC7540), maxClosedNodesInTree: cfg.MaxClosedNodesInTree, maxIdleNodesInTree: cfg.MaxIdleNodesInTree, enableWriteThrottle: cfg.ThrottleOutOfOrderWrites, @@ -81,32 +81,32 @@ func NewPriorityWriteScheduler(cfg *PriorityWriteSchedulerConfig) WriteScheduler return ws } -type priorityNodeState int +type priorityNodeStateRFC7540 int const ( - priorityNodeOpen priorityNodeState = iota - priorityNodeClosed - priorityNodeIdle + priorityNodeOpenRFC7540 priorityNodeStateRFC7540 = iota + priorityNodeClosedRFC7540 + priorityNodeIdleRFC7540 ) -// priorityNode is a node in an HTTP/2 priority tree. +// priorityNodeRFC7540 is a node in an HTTP/2 priority tree. // Each node is associated with a single stream ID. // See RFC 7540, Section 5.3. -type priorityNode struct { - q writeQueue // queue of pending frames to write - id uint32 // id of the stream, or 0 for the root of the tree - weight uint8 // the actual weight is weight+1, so the value is in [1,256] - state priorityNodeState // open | closed | idle - bytes int64 // number of bytes written by this node, or 0 if closed - subtreeBytes int64 // sum(node.bytes) of all nodes in this subtree +type priorityNodeRFC7540 struct { + q writeQueue // queue of pending frames to write + id uint32 // id of the stream, or 0 for the root of the tree + weight uint8 // the actual weight is weight+1, so the value is in [1,256] + state priorityNodeStateRFC7540 // open | closed | idle + bytes int64 // number of bytes written by this node, or 0 if closed + subtreeBytes int64 // sum(node.bytes) of all nodes in this subtree // These links form the priority tree. - parent *priorityNode - kids *priorityNode // start of the kids list - prev, next *priorityNode // doubly-linked list of siblings + parent *priorityNodeRFC7540 + kids *priorityNodeRFC7540 // start of the kids list + prev, next *priorityNodeRFC7540 // doubly-linked list of siblings } -func (n *priorityNode) setParent(parent *priorityNode) { +func (n *priorityNodeRFC7540) setParent(parent *priorityNodeRFC7540) { if n == parent { panic("setParent to self") } @@ -141,7 +141,7 @@ func (n *priorityNode) setParent(parent *priorityNode) { } } -func (n *priorityNode) addBytes(b int64) { +func (n *priorityNodeRFC7540) addBytes(b int64) { n.bytes += b for ; n != nil; n = n.parent { n.subtreeBytes += b @@ -154,7 +154,7 @@ func (n *priorityNode) addBytes(b int64) { // // f(n, openParent) takes two arguments: the node to visit, n, and a bool that is true // if any ancestor p of n is still open (ignoring the root node). -func (n *priorityNode) walkReadyInOrder(openParent bool, tmp *[]*priorityNode, f func(*priorityNode, bool) bool) bool { +func (n *priorityNodeRFC7540) walkReadyInOrder(openParent bool, tmp *[]*priorityNodeRFC7540, f func(*priorityNodeRFC7540, bool) bool) bool { if !n.q.empty() && f(n, openParent) { return true } @@ -165,7 +165,7 @@ func (n *priorityNode) walkReadyInOrder(openParent bool, tmp *[]*priorityNode, f // Don't consider the root "open" when updating openParent since // we can't send data frames on the root stream (only control frames). if n.id != 0 { - openParent = openParent || (n.state == priorityNodeOpen) + openParent = openParent || (n.state == priorityNodeOpenRFC7540) } // Common case: only one kid or all kids have the same weight. @@ -195,7 +195,7 @@ func (n *priorityNode) walkReadyInOrder(openParent bool, tmp *[]*priorityNode, f *tmp = append(*tmp, n.kids) n.kids.setParent(nil) } - sort.Sort(sortPriorityNodeSiblings(*tmp)) + sort.Sort(sortPriorityNodeSiblingsRFC7540(*tmp)) for i := len(*tmp) - 1; i >= 0; i-- { (*tmp)[i].setParent(n) // setParent inserts at the head of n.kids } @@ -207,15 +207,15 @@ func (n *priorityNode) walkReadyInOrder(openParent bool, tmp *[]*priorityNode, f return false } -type sortPriorityNodeSiblings []*priorityNode +type sortPriorityNodeSiblingsRFC7540 []*priorityNodeRFC7540 -func (z sortPriorityNodeSiblings) Len() int { return len(z) } -func (z sortPriorityNodeSiblings) Swap(i, k int) { z[i], z[k] = z[k], z[i] } -func (z sortPriorityNodeSiblings) Less(i, k int) bool { +func (z sortPriorityNodeSiblingsRFC7540) Len() int { return len(z) } +func (z sortPriorityNodeSiblingsRFC7540) Swap(i, k int) { z[i], z[k] = z[k], z[i] } +func (z sortPriorityNodeSiblingsRFC7540) Less(i, k int) bool { // Prefer the subtree that has sent fewer bytes relative to its weight. // See sections 5.3.2 and 5.3.4. - wi, bi := float64(z[i].weight+1), float64(z[i].subtreeBytes) - wk, bk := float64(z[k].weight+1), float64(z[k].subtreeBytes) + wi, bi := float64(z[i].weight)+1, float64(z[i].subtreeBytes) + wk, bk := float64(z[k].weight)+1, float64(z[k].subtreeBytes) if bi == 0 && bk == 0 { return wi >= wk } @@ -225,13 +225,13 @@ func (z sortPriorityNodeSiblings) Less(i, k int) bool { return bi/bk <= wi/wk } -type priorityWriteScheduler struct { +type priorityWriteSchedulerRFC7540 struct { // root is the root of the priority tree, where root.id = 0. // The root queues control frames that are not associated with any stream. - root priorityNode + root priorityNodeRFC7540 // nodes maps stream ids to priority tree nodes. - nodes map[uint32]*priorityNode + nodes map[uint32]*priorityNodeRFC7540 // maxID is the maximum stream id in nodes. maxID uint32 @@ -239,7 +239,7 @@ type priorityWriteScheduler struct { // lists of nodes that have been closed or are idle, but are kept in // the tree for improved prioritization. When the lengths exceed either // maxClosedNodesInTree or maxIdleNodesInTree, old nodes are discarded. - closedNodes, idleNodes []*priorityNode + closedNodes, idleNodes []*priorityNodeRFC7540 // From the config. maxClosedNodesInTree int @@ -248,19 +248,19 @@ type priorityWriteScheduler struct { enableWriteThrottle bool // tmp is scratch space for priorityNode.walkReadyInOrder to reduce allocations. - tmp []*priorityNode + tmp []*priorityNodeRFC7540 // pool of empty queues for reuse. queuePool writeQueuePool } -func (ws *priorityWriteScheduler) OpenStream(streamID uint32, options OpenStreamOptions) { +func (ws *priorityWriteSchedulerRFC7540) OpenStream(streamID uint32, options OpenStreamOptions) { // The stream may be currently idle but cannot be opened or closed. if curr := ws.nodes[streamID]; curr != nil { - if curr.state != priorityNodeIdle { + if curr.state != priorityNodeIdleRFC7540 { panic(fmt.Sprintf("stream %d already opened", streamID)) } - curr.state = priorityNodeOpen + curr.state = priorityNodeOpenRFC7540 return } @@ -272,11 +272,11 @@ func (ws *priorityWriteScheduler) OpenStream(streamID uint32, options OpenStream if parent == nil { parent = &ws.root } - n := &priorityNode{ + n := &priorityNodeRFC7540{ q: *ws.queuePool.get(), id: streamID, - weight: priorityDefaultWeight, - state: priorityNodeOpen, + weight: priorityDefaultWeightRFC7540, + state: priorityNodeOpenRFC7540, } n.setParent(parent) ws.nodes[streamID] = n @@ -285,24 +285,23 @@ func (ws *priorityWriteScheduler) OpenStream(streamID uint32, options OpenStream } } -func (ws *priorityWriteScheduler) CloseStream(streamID uint32) { +func (ws *priorityWriteSchedulerRFC7540) CloseStream(streamID uint32) { if streamID == 0 { panic("violation of WriteScheduler interface: cannot close stream 0") } if ws.nodes[streamID] == nil { panic(fmt.Sprintf("violation of WriteScheduler interface: unknown stream %d", streamID)) } - if ws.nodes[streamID].state != priorityNodeOpen { + if ws.nodes[streamID].state != priorityNodeOpenRFC7540 { panic(fmt.Sprintf("violation of WriteScheduler interface: stream %d already closed", streamID)) } n := ws.nodes[streamID] - n.state = priorityNodeClosed + n.state = priorityNodeClosedRFC7540 n.addBytes(-n.bytes) q := n.q ws.queuePool.put(&q) - n.q.s = nil if ws.maxClosedNodesInTree > 0 { ws.addClosedOrIdleNode(&ws.closedNodes, ws.maxClosedNodesInTree, n) } else { @@ -310,7 +309,7 @@ func (ws *priorityWriteScheduler) CloseStream(streamID uint32) { } } -func (ws *priorityWriteScheduler) AdjustStream(streamID uint32, priority PriorityParam) { +func (ws *priorityWriteSchedulerRFC7540) AdjustStream(streamID uint32, priority PriorityParam) { if streamID == 0 { panic("adjustPriority on root") } @@ -324,11 +323,11 @@ func (ws *priorityWriteScheduler) AdjustStream(streamID uint32, priority Priorit return } ws.maxID = streamID - n = &priorityNode{ + n = &priorityNodeRFC7540{ q: *ws.queuePool.get(), id: streamID, - weight: priorityDefaultWeight, - state: priorityNodeIdle, + weight: priorityDefaultWeightRFC7540, + state: priorityNodeIdleRFC7540, } n.setParent(&ws.root) ws.nodes[streamID] = n @@ -340,7 +339,7 @@ func (ws *priorityWriteScheduler) AdjustStream(streamID uint32, priority Priorit parent := ws.nodes[priority.StreamDep] if parent == nil { n.setParent(&ws.root) - n.weight = priorityDefaultWeight + n.weight = priorityDefaultWeightRFC7540 return } @@ -381,8 +380,8 @@ func (ws *priorityWriteScheduler) AdjustStream(streamID uint32, priority Priorit n.weight = priority.Weight } -func (ws *priorityWriteScheduler) Push(wr FrameWriteRequest) { - var n *priorityNode +func (ws *priorityWriteSchedulerRFC7540) Push(wr FrameWriteRequest) { + var n *priorityNodeRFC7540 if wr.isControl() { n = &ws.root } else { @@ -401,8 +400,8 @@ func (ws *priorityWriteScheduler) Push(wr FrameWriteRequest) { n.q.push(wr) } -func (ws *priorityWriteScheduler) Pop() (wr FrameWriteRequest, ok bool) { - ws.root.walkReadyInOrder(false, &ws.tmp, func(n *priorityNode, openParent bool) bool { +func (ws *priorityWriteSchedulerRFC7540) Pop() (wr FrameWriteRequest, ok bool) { + ws.root.walkReadyInOrder(false, &ws.tmp, func(n *priorityNodeRFC7540, openParent bool) bool { limit := int32(math.MaxInt32) if openParent { limit = ws.writeThrottleLimit @@ -428,7 +427,7 @@ func (ws *priorityWriteScheduler) Pop() (wr FrameWriteRequest, ok bool) { return wr, ok } -func (ws *priorityWriteScheduler) addClosedOrIdleNode(list *[]*priorityNode, maxSize int, n *priorityNode) { +func (ws *priorityWriteSchedulerRFC7540) addClosedOrIdleNode(list *[]*priorityNodeRFC7540, maxSize int, n *priorityNodeRFC7540) { if maxSize == 0 { return } @@ -442,7 +441,7 @@ func (ws *priorityWriteScheduler) addClosedOrIdleNode(list *[]*priorityNode, max *list = append(*list, n) } -func (ws *priorityWriteScheduler) removeNode(n *priorityNode) { +func (ws *priorityWriteSchedulerRFC7540) removeNode(n *priorityNodeRFC7540) { for n.kids != nil { n.kids.setParent(n.parent) } diff --git a/vendor/golang.org/x/net/http2/writesched_priority_rfc9218.go b/vendor/golang.org/x/net/http2/writesched_priority_rfc9218.go new file mode 100644 index 00000000..cb4cadc3 --- /dev/null +++ b/vendor/golang.org/x/net/http2/writesched_priority_rfc9218.go @@ -0,0 +1,209 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "fmt" + "math" +) + +type streamMetadata struct { + location *writeQueue + priority PriorityParam +} + +type priorityWriteSchedulerRFC9218 struct { + // control contains control frames (SETTINGS, PING, etc.). + control writeQueue + + // heads contain the head of a circular list of streams. + // We put these heads within a nested array that represents urgency and + // incremental, as defined in + // https://www.rfc-editor.org/rfc/rfc9218.html#name-priority-parameters. + // 8 represents u=0 up to u=7, and 2 represents i=false and i=true. + heads [8][2]*writeQueue + + // streams contains a mapping between each stream ID and their metadata, so + // we can quickly locate them when needing to, for example, adjust their + // priority. + streams map[uint32]streamMetadata + + // queuePool are empty queues for reuse. + queuePool writeQueuePool + + // prioritizeIncremental is used to determine whether we should prioritize + // incremental streams or not, when urgency is the same in a given Pop() + // call. + prioritizeIncremental bool +} + +func newPriorityWriteSchedulerRFC9218() WriteScheduler { + ws := &priorityWriteSchedulerRFC9218{ + streams: make(map[uint32]streamMetadata), + } + return ws +} + +func (ws *priorityWriteSchedulerRFC9218) OpenStream(streamID uint32, opt OpenStreamOptions) { + if ws.streams[streamID].location != nil { + panic(fmt.Errorf("stream %d already opened", streamID)) + } + q := ws.queuePool.get() + ws.streams[streamID] = streamMetadata{ + location: q, + priority: opt.priority, + } + + u, i := opt.priority.urgency, opt.priority.incremental + if ws.heads[u][i] == nil { + ws.heads[u][i] = q + q.next = q + q.prev = q + } else { + // Queues are stored in a ring. + // Insert the new stream before ws.head, putting it at the end of the list. + q.prev = ws.heads[u][i].prev + q.next = ws.heads[u][i] + q.prev.next = q + q.next.prev = q + } +} + +func (ws *priorityWriteSchedulerRFC9218) CloseStream(streamID uint32) { + metadata := ws.streams[streamID] + q, u, i := metadata.location, metadata.priority.urgency, metadata.priority.incremental + if q == nil { + return + } + if q.next == q { + // This was the only open stream. + ws.heads[u][i] = nil + } else { + q.prev.next = q.next + q.next.prev = q.prev + if ws.heads[u][i] == q { + ws.heads[u][i] = q.next + } + } + delete(ws.streams, streamID) + ws.queuePool.put(q) +} + +func (ws *priorityWriteSchedulerRFC9218) AdjustStream(streamID uint32, priority PriorityParam) { + metadata := ws.streams[streamID] + q, u, i := metadata.location, metadata.priority.urgency, metadata.priority.incremental + if q == nil { + return + } + + // Remove stream from current location. + if q.next == q { + // This was the only open stream. + ws.heads[u][i] = nil + } else { + q.prev.next = q.next + q.next.prev = q.prev + if ws.heads[u][i] == q { + ws.heads[u][i] = q.next + } + } + + // Insert stream to the new queue. + u, i = priority.urgency, priority.incremental + if ws.heads[u][i] == nil { + ws.heads[u][i] = q + q.next = q + q.prev = q + } else { + // Queues are stored in a ring. + // Insert the new stream before ws.head, putting it at the end of the list. + q.prev = ws.heads[u][i].prev + q.next = ws.heads[u][i] + q.prev.next = q + q.next.prev = q + } + + // Update the metadata. + ws.streams[streamID] = streamMetadata{ + location: q, + priority: priority, + } +} + +func (ws *priorityWriteSchedulerRFC9218) Push(wr FrameWriteRequest) { + if wr.isControl() { + ws.control.push(wr) + return + } + q := ws.streams[wr.StreamID()].location + if q == nil { + // This is a closed stream. + // wr should not be a HEADERS or DATA frame. + // We push the request onto the control queue. + if wr.DataSize() > 0 { + panic("add DATA on non-open stream") + } + ws.control.push(wr) + return + } + q.push(wr) +} + +func (ws *priorityWriteSchedulerRFC9218) Pop() (FrameWriteRequest, bool) { + // Control and RST_STREAM frames first. + if !ws.control.empty() { + return ws.control.shift(), true + } + + // On the next Pop(), we want to prioritize incremental if we prioritized + // non-incremental request of the same urgency this time. Vice-versa. + // i.e. when there are incremental and non-incremental requests at the same + // priority, we give 50% of our bandwidth to the incremental ones in + // aggregate and 50% to the first non-incremental one (since + // non-incremental streams do not use round-robin writes). + ws.prioritizeIncremental = !ws.prioritizeIncremental + + // Always prioritize lowest u (i.e. highest urgency level). + for u := range ws.heads { + for i := range ws.heads[u] { + // When we want to prioritize incremental, we try to pop i=true + // first before i=false when u is the same. + if ws.prioritizeIncremental { + i = (i + 1) % 2 + } + q := ws.heads[u][i] + if q == nil { + continue + } + for { + if wr, ok := q.consume(math.MaxInt32); ok { + if i == 1 { + // For incremental streams, we update head to q.next so + // we can round-robin between multiple streams that can + // immediately benefit from partial writes. + ws.heads[u][i] = q.next + } else { + // For non-incremental streams, we try to finish one to + // completion rather than doing round-robin. However, + // we update head here so that if q.consume() is !ok + // (e.g. the stream has no more frame to consume), head + // is updated to the next q that has frames to consume + // on future iterations. This way, we do not prioritize + // writing to unavailable stream on next Pop() calls, + // preventing head-of-line blocking. + ws.heads[u][i] = q + } + return wr, true + } + q = q.next + if q == ws.heads[u][i] { + break + } + } + + } + } + return FrameWriteRequest{}, false +} diff --git a/vendor/golang.org/x/net/http2/writesched_roundrobin.go b/vendor/golang.org/x/net/http2/writesched_roundrobin.go index 54fe8632..737cff9e 100644 --- a/vendor/golang.org/x/net/http2/writesched_roundrobin.go +++ b/vendor/golang.org/x/net/http2/writesched_roundrobin.go @@ -25,7 +25,7 @@ type roundRobinWriteScheduler struct { } // newRoundRobinWriteScheduler constructs a new write scheduler. -// The round robin scheduler priorizes control frames +// The round robin scheduler prioritizes control frames // like SETTINGS and PING over DATA frames. // When there are no control frames to send, it performs a round-robin // selection from the ready streams. diff --git a/vendor/golang.org/x/net/internal/httpcommon/headermap.go b/vendor/golang.org/x/net/internal/httpcommon/headermap.go index ad3fbacd..92483d8e 100644 --- a/vendor/golang.org/x/net/internal/httpcommon/headermap.go +++ b/vendor/golang.org/x/net/internal/httpcommon/headermap.go @@ -5,7 +5,7 @@ package httpcommon import ( - "net/http" + "net/textproto" "sync" ) @@ -82,7 +82,7 @@ func buildCommonHeaderMaps() { commonLowerHeader = make(map[string]string, len(common)) commonCanonHeader = make(map[string]string, len(common)) for _, v := range common { - chk := http.CanonicalHeaderKey(v) + chk := textproto.CanonicalMIMEHeaderKey(v) commonLowerHeader[chk] = v commonCanonHeader[v] = chk } @@ -104,7 +104,7 @@ func CanonicalHeader(v string) string { if s, ok := commonCanonHeader[v]; ok { return s } - return http.CanonicalHeaderKey(v) + return textproto.CanonicalMIMEHeaderKey(v) } // CachedCanonicalHeader returns the canonical form of a well-known header name. diff --git a/vendor/golang.org/x/net/internal/httpcommon/request.go b/vendor/golang.org/x/net/internal/httpcommon/request.go index 34391477..1e10f89e 100644 --- a/vendor/golang.org/x/net/internal/httpcommon/request.go +++ b/vendor/golang.org/x/net/internal/httpcommon/request.go @@ -5,10 +5,12 @@ package httpcommon import ( + "context" "errors" "fmt" - "net/http" "net/http/httptrace" + "net/textproto" + "net/url" "sort" "strconv" "strings" @@ -21,9 +23,21 @@ var ( ErrRequestHeaderListSize = errors.New("request header list larger than peer's advertised limit") ) +// Request is a subset of http.Request. +// It'd be simpler to pass an *http.Request, of course, but we can't depend on net/http +// without creating a dependency cycle. +type Request struct { + URL *url.URL + Method string + Host string + Header map[string][]string + Trailer map[string][]string + ActualContentLength int64 // 0 means 0, -1 means unknown +} + // EncodeHeadersParam is parameters to EncodeHeaders. type EncodeHeadersParam struct { - Request *http.Request + Request Request // AddGzipHeader indicates that an "accept-encoding: gzip" header should be // added to the request. @@ -37,7 +51,7 @@ type EncodeHeadersParam struct { DefaultUserAgent string } -// EncodeHeadersParam is the result of EncodeHeaders. +// EncodeHeadersResult is the result of EncodeHeaders. type EncodeHeadersResult struct { HasBody bool HasTrailers bool @@ -47,11 +61,11 @@ type EncodeHeadersResult struct { // It validates a request and calls headerf with each pseudo-header and header // for the request. // The headerf function is called with the validated, canonicalized header name. -func EncodeHeaders(param EncodeHeadersParam, headerf func(name, value string)) (res EncodeHeadersResult, _ error) { +func EncodeHeaders(ctx context.Context, param EncodeHeadersParam, headerf func(name, value string)) (res EncodeHeadersResult, _ error) { req := param.Request // Check for invalid connection-level headers. - if err := checkConnHeaders(req); err != nil { + if err := checkConnHeaders(req.Header); err != nil { return res, err } @@ -73,7 +87,10 @@ func EncodeHeaders(param EncodeHeadersParam, headerf func(name, value string)) ( // isNormalConnect is true if this is a non-extended CONNECT request. isNormalConnect := false - protocol := req.Header.Get(":protocol") + var protocol string + if vv := req.Header[":protocol"]; len(vv) > 0 { + protocol = vv[0] + } if req.Method == "CONNECT" && protocol == "" { isNormalConnect = true } else if protocol != "" && req.Method != "CONNECT" { @@ -107,9 +124,7 @@ func EncodeHeaders(param EncodeHeadersParam, headerf func(name, value string)) ( return res, fmt.Errorf("invalid HTTP trailer %s", err) } - contentLength := ActualContentLength(req) - - trailers, err := commaSeparatedTrailers(req) + trailers, err := commaSeparatedTrailers(req.Trailer) if err != nil { return res, err } @@ -123,7 +138,7 @@ func EncodeHeaders(param EncodeHeadersParam, headerf func(name, value string)) ( f(":authority", host) m := req.Method if m == "" { - m = http.MethodGet + m = "GET" } f(":method", m) if !isNormalConnect { @@ -198,8 +213,8 @@ func EncodeHeaders(param EncodeHeadersParam, headerf func(name, value string)) ( f(k, v) } } - if shouldSendReqContentLength(req.Method, contentLength) { - f("content-length", strconv.FormatInt(contentLength, 10)) + if shouldSendReqContentLength(req.Method, req.ActualContentLength) { + f("content-length", strconv.FormatInt(req.ActualContentLength, 10)) } if param.AddGzipHeader { f("accept-encoding", "gzip") @@ -225,7 +240,7 @@ func EncodeHeaders(param EncodeHeadersParam, headerf func(name, value string)) ( } } - trace := httptrace.ContextClientTrace(req.Context()) + trace := httptrace.ContextClientTrace(ctx) // Header list size is ok. Write the headers. enumerateHeaders(func(name, value string) { @@ -243,19 +258,19 @@ func EncodeHeaders(param EncodeHeadersParam, headerf func(name, value string)) ( } }) - res.HasBody = contentLength != 0 + res.HasBody = req.ActualContentLength != 0 res.HasTrailers = trailers != "" return res, nil } // IsRequestGzip reports whether we should add an Accept-Encoding: gzip header // for a request. -func IsRequestGzip(req *http.Request, disableCompression bool) bool { +func IsRequestGzip(method string, header map[string][]string, disableCompression bool) bool { // TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere? if !disableCompression && - req.Header.Get("Accept-Encoding") == "" && - req.Header.Get("Range") == "" && - req.Method != "HEAD" { + len(header["Accept-Encoding"]) == 0 && + len(header["Range"]) == 0 && + method != "HEAD" { // Request gzip only, not deflate. Deflate is ambiguous and // not as universally supported anyway. // See: https://zlib.net/zlib_faq.html#faq39 @@ -280,22 +295,22 @@ func IsRequestGzip(req *http.Request, disableCompression bool) bool { // // Certain headers are special-cased as okay but not transmitted later. // For example, we allow "Transfer-Encoding: chunked", but drop the header when encoding. -func checkConnHeaders(req *http.Request) error { - if v := req.Header.Get("Upgrade"); v != "" { - return fmt.Errorf("invalid Upgrade request header: %q", req.Header["Upgrade"]) +func checkConnHeaders(h map[string][]string) error { + if vv := h["Upgrade"]; len(vv) > 0 && (vv[0] != "" && vv[0] != "chunked") { + return fmt.Errorf("invalid Upgrade request header: %q", vv) } - if vv := req.Header["Transfer-Encoding"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "chunked") { + if vv := h["Transfer-Encoding"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "chunked") { return fmt.Errorf("invalid Transfer-Encoding request header: %q", vv) } - if vv := req.Header["Connection"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && !asciiEqualFold(vv[0], "close") && !asciiEqualFold(vv[0], "keep-alive")) { + if vv := h["Connection"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && !asciiEqualFold(vv[0], "close") && !asciiEqualFold(vv[0], "keep-alive")) { return fmt.Errorf("invalid Connection request header: %q", vv) } return nil } -func commaSeparatedTrailers(req *http.Request) (string, error) { - keys := make([]string, 0, len(req.Trailer)) - for k := range req.Trailer { +func commaSeparatedTrailers(trailer map[string][]string) (string, error) { + keys := make([]string, 0, len(trailer)) + for k := range trailer { k = CanonicalHeader(k) switch k { case "Transfer-Encoding", "Trailer", "Content-Length": @@ -310,19 +325,6 @@ func commaSeparatedTrailers(req *http.Request) (string, error) { return "", nil } -// ActualContentLength returns a sanitized version of -// req.ContentLength, where 0 actually means zero (not unknown) and -1 -// means unknown. -func ActualContentLength(req *http.Request) int64 { - if req.Body == nil || req.Body == http.NoBody { - return 0 - } - if req.ContentLength != 0 { - return req.ContentLength - } - return -1 -} - // validPseudoPath reports whether v is a valid :path pseudo-header // value. It must be either: // @@ -340,7 +342,7 @@ func validPseudoPath(v string) bool { return (len(v) > 0 && v[0] == '/') || v == "*" } -func validateHeaders(hdrs http.Header) string { +func validateHeaders(hdrs map[string][]string) string { for k, vv := range hdrs { if !httpguts.ValidHeaderFieldName(k) && k != ":protocol" { return fmt.Sprintf("name %q", k) @@ -377,3 +379,89 @@ func shouldSendReqContentLength(method string, contentLength int64) bool { return false } } + +// ServerRequestParam is parameters to NewServerRequest. +type ServerRequestParam struct { + Method string + Scheme, Authority, Path string + Protocol string + Header map[string][]string +} + +// ServerRequestResult is the result of NewServerRequest. +type ServerRequestResult struct { + // Various http.Request fields. + URL *url.URL + RequestURI string + Trailer map[string][]string + + NeedsContinue bool // client provided an "Expect: 100-continue" header + + // If the request should be rejected, this is a short string suitable for passing + // to the http2 package's CountError function. + // It might be a bit odd to return errors this way rather than returning an error, + // but this ensures we don't forget to include a CountError reason. + InvalidReason string +} + +func NewServerRequest(rp ServerRequestParam) ServerRequestResult { + needsContinue := httpguts.HeaderValuesContainsToken(rp.Header["Expect"], "100-continue") + if needsContinue { + delete(rp.Header, "Expect") + } + // Merge Cookie headers into one "; "-delimited value. + if cookies := rp.Header["Cookie"]; len(cookies) > 1 { + rp.Header["Cookie"] = []string{strings.Join(cookies, "; ")} + } + + // Setup Trailers + var trailer map[string][]string + for _, v := range rp.Header["Trailer"] { + for _, key := range strings.Split(v, ",") { + key = textproto.CanonicalMIMEHeaderKey(textproto.TrimString(key)) + switch key { + case "Transfer-Encoding", "Trailer", "Content-Length": + // Bogus. (copy of http1 rules) + // Ignore. + default: + if trailer == nil { + trailer = make(map[string][]string) + } + trailer[key] = nil + } + } + } + delete(rp.Header, "Trailer") + + // "':authority' MUST NOT include the deprecated userinfo subcomponent + // for "http" or "https" schemed URIs." + // https://www.rfc-editor.org/rfc/rfc9113.html#section-8.3.1-2.3.8 + if strings.IndexByte(rp.Authority, '@') != -1 && (rp.Scheme == "http" || rp.Scheme == "https") { + return ServerRequestResult{ + InvalidReason: "userinfo_in_authority", + } + } + + var url_ *url.URL + var requestURI string + if rp.Method == "CONNECT" && rp.Protocol == "" { + url_ = &url.URL{Host: rp.Authority} + requestURI = rp.Authority // mimic HTTP/1 server behavior + } else { + var err error + url_, err = url.ParseRequestURI(rp.Path) + if err != nil { + return ServerRequestResult{ + InvalidReason: "bad_path", + } + } + requestURI = rp.Path + } + + return ServerRequestResult{ + URL: url_, + NeedsContinue: needsContinue, + RequestURI: requestURI, + Trailer: trailer, + } +} diff --git a/vendor/golang.org/x/net/trace/events.go b/vendor/golang.org/x/net/trace/events.go index c646a695..3aaffdd1 100644 --- a/vendor/golang.org/x/net/trace/events.go +++ b/vendor/golang.org/x/net/trace/events.go @@ -508,7 +508,7 @@ const eventsHTML = ` {{$el.When}} {{$el.ElapsedTime}} - {{$el.Title}} + {{$el.Title}} {{if $.Expanded}} diff --git a/vendor/golang.org/x/oauth2/clientcredentials/clientcredentials.go b/vendor/golang.org/x/oauth2/clientcredentials/clientcredentials.go index 51121a3d..e86346e8 100644 --- a/vendor/golang.org/x/oauth2/clientcredentials/clientcredentials.go +++ b/vendor/golang.org/x/oauth2/clientcredentials/clientcredentials.go @@ -55,7 +55,7 @@ type Config struct { // Token uses client credentials to retrieve a token. // -// The provided context optionally controls which HTTP client is used. See the oauth2.HTTPClient variable. +// The provided context optionally controls which HTTP client is used. See the [oauth2.HTTPClient] variable. func (c *Config) Token(ctx context.Context) (*oauth2.Token, error) { return c.TokenSource(ctx).Token() } @@ -64,18 +64,18 @@ func (c *Config) Token(ctx context.Context) (*oauth2.Token, error) { // The token will auto-refresh as necessary. // // The provided context optionally controls which HTTP client -// is returned. See the oauth2.HTTPClient variable. +// is returned. See the [oauth2.HTTPClient] variable. // -// The returned Client and its Transport should not be modified. +// The returned [http.Client] and its Transport should not be modified. func (c *Config) Client(ctx context.Context) *http.Client { return oauth2.NewClient(ctx, c.TokenSource(ctx)) } -// TokenSource returns a TokenSource that returns t until t expires, +// TokenSource returns a [oauth2.TokenSource] that returns t until t expires, // automatically refreshing it as necessary using the provided context and the // client ID and client secret. // -// Most users will use Config.Client instead. +// Most users will use [Config.Client] instead. func (c *Config) TokenSource(ctx context.Context) oauth2.TokenSource { source := &tokenSource{ ctx: ctx, diff --git a/vendor/golang.org/x/oauth2/internal/doc.go b/vendor/golang.org/x/oauth2/internal/doc.go index 03265e88..8c7c475f 100644 --- a/vendor/golang.org/x/oauth2/internal/doc.go +++ b/vendor/golang.org/x/oauth2/internal/doc.go @@ -2,5 +2,5 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Package internal contains support packages for oauth2 package. +// Package internal contains support packages for [golang.org/x/oauth2]. package internal diff --git a/vendor/golang.org/x/oauth2/internal/oauth2.go b/vendor/golang.org/x/oauth2/internal/oauth2.go index 14989bea..71ea6ad1 100644 --- a/vendor/golang.org/x/oauth2/internal/oauth2.go +++ b/vendor/golang.org/x/oauth2/internal/oauth2.go @@ -13,7 +13,7 @@ import ( ) // ParseKey converts the binary contents of a private key file -// to an *rsa.PrivateKey. It detects whether the private key is in a +// to an [*rsa.PrivateKey]. It detects whether the private key is in a // PEM container or not. If so, it extracts the private key // from PEM container before conversion. It only supports PEM // containers with no passphrase. diff --git a/vendor/golang.org/x/oauth2/internal/token.go b/vendor/golang.org/x/oauth2/internal/token.go index e83ddeef..8389f246 100644 --- a/vendor/golang.org/x/oauth2/internal/token.go +++ b/vendor/golang.org/x/oauth2/internal/token.go @@ -10,7 +10,6 @@ import ( "errors" "fmt" "io" - "io/ioutil" "math" "mime" "net/http" @@ -26,9 +25,9 @@ import ( // the requests to access protected resources on the OAuth 2.0 // provider's backend. // -// This type is a mirror of oauth2.Token and exists to break +// This type is a mirror of [golang.org/x/oauth2.Token] and exists to break // an otherwise-circular dependency. Other internal packages -// should convert this Token into an oauth2.Token before use. +// should convert this Token into an [golang.org/x/oauth2.Token] before use. type Token struct { // AccessToken is the token that authorizes and authenticates // the requests. @@ -50,9 +49,16 @@ type Token struct { // mechanisms for that TokenSource will not be used. Expiry time.Time + // ExpiresIn is the OAuth2 wire format "expires_in" field, + // which specifies how many seconds later the token expires, + // relative to an unknown time base approximately around "now". + // It is the application's responsibility to populate + // `Expiry` from `ExpiresIn` when required. + ExpiresIn int64 `json:"expires_in,omitempty"` + // Raw optionally contains extra metadata from the server // when updating a token. - Raw interface{} + Raw any } // tokenJSON is the struct representing the HTTP response from OAuth2 @@ -99,14 +105,6 @@ func (e *expirationTime) UnmarshalJSON(b []byte) error { return nil } -// RegisterBrokenAuthHeaderProvider previously did something. It is now a no-op. -// -// Deprecated: this function no longer does anything. Caller code that -// wants to avoid potential extra HTTP requests made during -// auto-probing of the provider's auth style should set -// Endpoint.AuthStyle. -func RegisterBrokenAuthHeaderProvider(tokenURL string) {} - // AuthStyle is a copy of the golang.org/x/oauth2 package's AuthStyle type. type AuthStyle int @@ -143,6 +141,11 @@ func (lc *LazyAuthStyleCache) Get() *AuthStyleCache { return c } +type authStyleCacheKey struct { + url string + clientID string +} + // AuthStyleCache is the set of tokenURLs we've successfully used via // RetrieveToken and which style auth we ended up using. // It's called a cache, but it doesn't (yet?) shrink. It's expected that @@ -150,26 +153,26 @@ func (lc *LazyAuthStyleCache) Get() *AuthStyleCache { // small. type AuthStyleCache struct { mu sync.Mutex - m map[string]AuthStyle // keyed by tokenURL + m map[authStyleCacheKey]AuthStyle } // lookupAuthStyle reports which auth style we last used with tokenURL // when calling RetrieveToken and whether we have ever done so. -func (c *AuthStyleCache) lookupAuthStyle(tokenURL string) (style AuthStyle, ok bool) { +func (c *AuthStyleCache) lookupAuthStyle(tokenURL, clientID string) (style AuthStyle, ok bool) { c.mu.Lock() defer c.mu.Unlock() - style, ok = c.m[tokenURL] + style, ok = c.m[authStyleCacheKey{tokenURL, clientID}] return } // setAuthStyle adds an entry to authStyleCache, documented above. -func (c *AuthStyleCache) setAuthStyle(tokenURL string, v AuthStyle) { +func (c *AuthStyleCache) setAuthStyle(tokenURL, clientID string, v AuthStyle) { c.mu.Lock() defer c.mu.Unlock() if c.m == nil { - c.m = make(map[string]AuthStyle) + c.m = make(map[authStyleCacheKey]AuthStyle) } - c.m[tokenURL] = v + c.m[authStyleCacheKey{tokenURL, clientID}] = v } // newTokenRequest returns a new *http.Request to retrieve a new token @@ -210,9 +213,9 @@ func cloneURLValues(v url.Values) url.Values { } func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string, v url.Values, authStyle AuthStyle, styleCache *AuthStyleCache) (*Token, error) { - needsAuthStyleProbe := authStyle == 0 + needsAuthStyleProbe := authStyle == AuthStyleUnknown if needsAuthStyleProbe { - if style, ok := styleCache.lookupAuthStyle(tokenURL); ok { + if style, ok := styleCache.lookupAuthStyle(tokenURL, clientID); ok { authStyle = style needsAuthStyleProbe = false } else { @@ -242,7 +245,7 @@ func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string, token, err = doTokenRoundTrip(ctx, req) } if needsAuthStyleProbe && err == nil { - styleCache.setAuthStyle(tokenURL, authStyle) + styleCache.setAuthStyle(tokenURL, clientID, authStyle) } // Don't overwrite `RefreshToken` with an empty value // if this was a token refreshing request. @@ -257,7 +260,7 @@ func doTokenRoundTrip(ctx context.Context, req *http.Request) (*Token, error) { if err != nil { return nil, err } - body, err := ioutil.ReadAll(io.LimitReader(r.Body, 1<<20)) + body, err := io.ReadAll(io.LimitReader(r.Body, 1<<20)) r.Body.Close() if err != nil { return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) @@ -312,7 +315,8 @@ func doTokenRoundTrip(ctx context.Context, req *http.Request) (*Token, error) { TokenType: tj.TokenType, RefreshToken: tj.RefreshToken, Expiry: tj.expiry(), - Raw: make(map[string]interface{}), + ExpiresIn: int64(tj.ExpiresIn), + Raw: make(map[string]any), } json.Unmarshal(body, &token.Raw) // no error checks for optional fields } diff --git a/vendor/golang.org/x/oauth2/internal/transport.go b/vendor/golang.org/x/oauth2/internal/transport.go index b9db01dd..afc0aeb2 100644 --- a/vendor/golang.org/x/oauth2/internal/transport.go +++ b/vendor/golang.org/x/oauth2/internal/transport.go @@ -9,8 +9,8 @@ import ( "net/http" ) -// HTTPClient is the context key to use with golang.org/x/net/context's -// WithValue function to associate an *http.Client value with a context. +// HTTPClient is the context key to use with [context.WithValue] +// to associate an [*http.Client] value with a context. var HTTPClient ContextKey // ContextKey is just an empty struct. It exists so HTTPClient can be diff --git a/vendor/golang.org/x/oauth2/jws/jws.go b/vendor/golang.org/x/oauth2/jws/jws.go index 27ab0613..9bc48440 100644 --- a/vendor/golang.org/x/oauth2/jws/jws.go +++ b/vendor/golang.org/x/oauth2/jws/jws.go @@ -4,7 +4,7 @@ // Package jws provides a partial implementation // of JSON Web Signature encoding and decoding. -// It exists to support the golang.org/x/oauth2 package. +// It exists to support the [golang.org/x/oauth2] package. // // See RFC 7515. // @@ -48,7 +48,7 @@ type ClaimSet struct { // See http://tools.ietf.org/html/draft-jones-json-web-token-10#section-4.3 // This array is marshalled using custom code (see (c *ClaimSet) encode()). - PrivateClaims map[string]interface{} `json:"-"` + PrivateClaims map[string]any `json:"-"` } func (c *ClaimSet) encode() (string, error) { @@ -152,7 +152,7 @@ func EncodeWithSigner(header *Header, c *ClaimSet, sg Signer) (string, error) { } // Encode encodes a signed JWS with provided header and claim set. -// This invokes EncodeWithSigner using crypto/rsa.SignPKCS1v15 with the given RSA private key. +// This invokes [EncodeWithSigner] using [crypto/rsa.SignPKCS1v15] with the given RSA private key. func Encode(header *Header, c *ClaimSet, key *rsa.PrivateKey) (string, error) { sg := func(data []byte) (sig []byte, err error) { h := sha256.New() diff --git a/vendor/golang.org/x/oauth2/jwt/jwt.go b/vendor/golang.org/x/oauth2/jwt/jwt.go index b2bf1829..38a92dac 100644 --- a/vendor/golang.org/x/oauth2/jwt/jwt.go +++ b/vendor/golang.org/x/oauth2/jwt/jwt.go @@ -13,7 +13,6 @@ import ( "encoding/json" "fmt" "io" - "io/ioutil" "net/http" "net/url" "strings" @@ -69,7 +68,7 @@ type Config struct { // PrivateClaims optionally specifies custom private claims in the JWT. // See http://tools.ietf.org/html/draft-jones-json-web-token-10#section-4.3 - PrivateClaims map[string]interface{} + PrivateClaims map[string]any // UseIDToken optionally specifies whether ID token should be used instead // of access token when the server returns both. @@ -136,7 +135,7 @@ func (js jwtSource) Token() (*oauth2.Token, error) { return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) } defer resp.Body.Close() - body, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20)) + body, err := io.ReadAll(io.LimitReader(resp.Body, 1<<20)) if err != nil { return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) } @@ -148,10 +147,8 @@ func (js jwtSource) Token() (*oauth2.Token, error) { } // tokenRes is the JSON response body. var tokenRes struct { - AccessToken string `json:"access_token"` - TokenType string `json:"token_type"` - IDToken string `json:"id_token"` - ExpiresIn int64 `json:"expires_in"` // relative seconds from now + oauth2.Token + IDToken string `json:"id_token"` } if err := json.Unmarshal(body, &tokenRes); err != nil { return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) @@ -160,7 +157,7 @@ func (js jwtSource) Token() (*oauth2.Token, error) { AccessToken: tokenRes.AccessToken, TokenType: tokenRes.TokenType, } - raw := make(map[string]interface{}) + raw := make(map[string]any) json.Unmarshal(body, &raw) // no error checks for optional fields token = token.WithExtra(raw) diff --git a/vendor/golang.org/x/oauth2/oauth2.go b/vendor/golang.org/x/oauth2/oauth2.go index eacdd7fd..3e3b6306 100644 --- a/vendor/golang.org/x/oauth2/oauth2.go +++ b/vendor/golang.org/x/oauth2/oauth2.go @@ -9,7 +9,6 @@ package oauth2 // import "golang.org/x/oauth2" import ( - "bytes" "context" "errors" "net/http" @@ -22,9 +21,9 @@ import ( ) // NoContext is the default context you should supply if not using -// your own context.Context (see https://golang.org/x/net/context). +// your own [context.Context]. // -// Deprecated: Use context.Background() or context.TODO() instead. +// Deprecated: Use [context.Background] or [context.TODO] instead. var NoContext = context.TODO() // RegisterBrokenAuthHeaderProvider previously did something. It is now a no-op. @@ -37,8 +36,8 @@ func RegisterBrokenAuthHeaderProvider(tokenURL string) {} // Config describes a typical 3-legged OAuth2 flow, with both the // client application information and the server's endpoint URLs. -// For the client credentials 2-legged OAuth2 flow, see the clientcredentials -// package (https://golang.org/x/oauth2/clientcredentials). +// For the client credentials 2-legged OAuth2 flow, see the +// [golang.org/x/oauth2/clientcredentials] package. type Config struct { // ClientID is the application's ID. ClientID string @@ -46,7 +45,7 @@ type Config struct { // ClientSecret is the application's secret. ClientSecret string - // Endpoint contains the resource server's token endpoint + // Endpoint contains the authorization server's token endpoint // URLs. These are constants specific to each server and are // often available via site-specific packages, such as // google.Endpoint or github.Endpoint. @@ -135,7 +134,7 @@ type setParam struct{ k, v string } func (p setParam) setValue(m url.Values) { m.Set(p.k, p.v) } -// SetAuthURLParam builds an AuthCodeOption which passes key/value parameters +// SetAuthURLParam builds an [AuthCodeOption] which passes key/value parameters // to a provider's authorization endpoint. func SetAuthURLParam(key, value string) AuthCodeOption { return setParam{key, value} @@ -148,8 +147,8 @@ func SetAuthURLParam(key, value string) AuthCodeOption { // request and callback. The authorization server includes this value when // redirecting the user agent back to the client. // -// Opts may include AccessTypeOnline or AccessTypeOffline, as well -// as ApprovalForce. +// Opts may include [AccessTypeOnline] or [AccessTypeOffline], as well +// as [ApprovalForce]. // // To protect against CSRF attacks, opts should include a PKCE challenge // (S256ChallengeOption). Not all servers support PKCE. An alternative is to @@ -158,7 +157,7 @@ func SetAuthURLParam(key, value string) AuthCodeOption { // PKCE), https://www.oauth.com/oauth2-servers/pkce/ and // https://www.ietf.org/archive/id/draft-ietf-oauth-v2-1-09.html#name-cross-site-request-forgery (describing both approaches) func (c *Config) AuthCodeURL(state string, opts ...AuthCodeOption) string { - var buf bytes.Buffer + var buf strings.Builder buf.WriteString(c.Endpoint.AuthURL) v := url.Values{ "response_type": {"code"}, @@ -194,7 +193,7 @@ func (c *Config) AuthCodeURL(state string, opts ...AuthCodeOption) string { // and when other authorization grant types are not available." // See https://tools.ietf.org/html/rfc6749#section-4.3 for more info. // -// The provided context optionally controls which HTTP client is used. See the HTTPClient variable. +// The provided context optionally controls which HTTP client is used. See the [HTTPClient] variable. func (c *Config) PasswordCredentialsToken(ctx context.Context, username, password string) (*Token, error) { v := url.Values{ "grant_type": {"password"}, @@ -212,10 +211,10 @@ func (c *Config) PasswordCredentialsToken(ctx context.Context, username, passwor // It is used after a resource provider redirects the user back // to the Redirect URI (the URL obtained from AuthCodeURL). // -// The provided context optionally controls which HTTP client is used. See the HTTPClient variable. +// The provided context optionally controls which HTTP client is used. See the [HTTPClient] variable. // -// The code will be in the *http.Request.FormValue("code"). Before -// calling Exchange, be sure to validate FormValue("state") if you are +// The code will be in the [http.Request.FormValue]("code"). Before +// calling Exchange, be sure to validate [http.Request.FormValue]("state") if you are // using it to protect against CSRF attacks. // // If using PKCE to protect against CSRF attacks, opts should include a @@ -242,10 +241,10 @@ func (c *Config) Client(ctx context.Context, t *Token) *http.Client { return NewClient(ctx, c.TokenSource(ctx, t)) } -// TokenSource returns a TokenSource that returns t until t expires, +// TokenSource returns a [TokenSource] that returns t until t expires, // automatically refreshing it as necessary using the provided context. // -// Most users will use Config.Client instead. +// Most users will use [Config.Client] instead. func (c *Config) TokenSource(ctx context.Context, t *Token) TokenSource { tkr := &tokenRefresher{ ctx: ctx, @@ -260,7 +259,7 @@ func (c *Config) TokenSource(ctx context.Context, t *Token) TokenSource { } } -// tokenRefresher is a TokenSource that makes "grant_type"=="refresh_token" +// tokenRefresher is a TokenSource that makes "grant_type=refresh_token" // HTTP requests to renew a token using a RefreshToken. type tokenRefresher struct { ctx context.Context // used to get HTTP requests @@ -305,8 +304,7 @@ type reuseTokenSource struct { } // Token returns the current token if it's still valid, else will -// refresh the current token (using r.Context for HTTP client -// information) and return the new one. +// refresh the current token and return the new one. func (s *reuseTokenSource) Token() (*Token, error) { s.mu.Lock() defer s.mu.Unlock() @@ -322,7 +320,7 @@ func (s *reuseTokenSource) Token() (*Token, error) { return t, nil } -// StaticTokenSource returns a TokenSource that always returns the same token. +// StaticTokenSource returns a [TokenSource] that always returns the same token. // Because the provided token t is never refreshed, StaticTokenSource is only // useful for tokens that never expire. func StaticTokenSource(t *Token) TokenSource { @@ -338,16 +336,16 @@ func (s staticTokenSource) Token() (*Token, error) { return s.t, nil } -// HTTPClient is the context key to use with golang.org/x/net/context's -// WithValue function to associate an *http.Client value with a context. +// HTTPClient is the context key to use with [context.WithValue] +// to associate a [*http.Client] value with a context. var HTTPClient internal.ContextKey -// NewClient creates an *http.Client from a Context and TokenSource. +// NewClient creates an [*http.Client] from a [context.Context] and [TokenSource]. // The returned client is not valid beyond the lifetime of the context. // -// Note that if a custom *http.Client is provided via the Context it +// Note that if a custom [*http.Client] is provided via the [context.Context] it // is used only for token acquisition and is not used to configure the -// *http.Client returned from NewClient. +// [*http.Client] returned from NewClient. // // As a special case, if src is nil, a non-OAuth2 client is returned // using the provided context. This exists to support related OAuth2 @@ -368,7 +366,7 @@ func NewClient(ctx context.Context, src TokenSource) *http.Client { } } -// ReuseTokenSource returns a TokenSource which repeatedly returns the +// ReuseTokenSource returns a [TokenSource] which repeatedly returns the // same token as long as it's valid, starting with t. // When its cached token is invalid, a new token is obtained from src. // @@ -376,10 +374,10 @@ func NewClient(ctx context.Context, src TokenSource) *http.Client { // (such as a file on disk) between runs of a program, rather than // obtaining new tokens unnecessarily. // -// The initial token t may be nil, in which case the TokenSource is +// The initial token t may be nil, in which case the [TokenSource] is // wrapped in a caching version if it isn't one already. This also // means it's always safe to wrap ReuseTokenSource around any other -// TokenSource without adverse effects. +// [TokenSource] without adverse effects. func ReuseTokenSource(t *Token, src TokenSource) TokenSource { // Don't wrap a reuseTokenSource in itself. That would work, // but cause an unnecessary number of mutex operations. @@ -397,8 +395,8 @@ func ReuseTokenSource(t *Token, src TokenSource) TokenSource { } } -// ReuseTokenSourceWithExpiry returns a TokenSource that acts in the same manner as the -// TokenSource returned by ReuseTokenSource, except the expiry buffer is +// ReuseTokenSourceWithExpiry returns a [TokenSource] that acts in the same manner as the +// [TokenSource] returned by [ReuseTokenSource], except the expiry buffer is // configurable. The expiration time of a token is calculated as // t.Expiry.Add(-earlyExpiry). func ReuseTokenSourceWithExpiry(t *Token, src TokenSource, earlyExpiry time.Duration) TokenSource { diff --git a/vendor/golang.org/x/oauth2/pkce.go b/vendor/golang.org/x/oauth2/pkce.go index 6a95da97..cea8374d 100644 --- a/vendor/golang.org/x/oauth2/pkce.go +++ b/vendor/golang.org/x/oauth2/pkce.go @@ -1,6 +1,7 @@ // Copyright 2023 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. + package oauth2 import ( @@ -20,9 +21,9 @@ const ( // This follows recommendations in RFC 7636. // // A fresh verifier should be generated for each authorization. -// S256ChallengeOption(verifier) should then be passed to Config.AuthCodeURL -// (or Config.DeviceAuth) and VerifierOption(verifier) to Config.Exchange -// (or Config.DeviceAccessToken). +// The resulting verifier should be passed to [Config.AuthCodeURL] or [Config.DeviceAuth] +// with [S256ChallengeOption], and to [Config.Exchange] or [Config.DeviceAccessToken] +// with [VerifierOption]. func GenerateVerifier() string { // "RECOMMENDED that the output of a suitable random number generator be // used to create a 32-octet sequence. The octet sequence is then @@ -36,22 +37,22 @@ func GenerateVerifier() string { return base64.RawURLEncoding.EncodeToString(data) } -// VerifierOption returns a PKCE code verifier AuthCodeOption. It should be -// passed to Config.Exchange or Config.DeviceAccessToken only. +// VerifierOption returns a PKCE code verifier [AuthCodeOption]. It should only be +// passed to [Config.Exchange] or [Config.DeviceAccessToken]. func VerifierOption(verifier string) AuthCodeOption { return setParam{k: codeVerifierKey, v: verifier} } // S256ChallengeFromVerifier returns a PKCE code challenge derived from verifier with method S256. // -// Prefer to use S256ChallengeOption where possible. +// Prefer to use [S256ChallengeOption] where possible. func S256ChallengeFromVerifier(verifier string) string { sha := sha256.Sum256([]byte(verifier)) return base64.RawURLEncoding.EncodeToString(sha[:]) } // S256ChallengeOption derives a PKCE code challenge derived from verifier with -// method S256. It should be passed to Config.AuthCodeURL or Config.DeviceAuth +// method S256. It should be passed to [Config.AuthCodeURL] or [Config.DeviceAuth] // only. func S256ChallengeOption(verifier string) AuthCodeOption { return challengeOption{ diff --git a/vendor/golang.org/x/oauth2/token.go b/vendor/golang.org/x/oauth2/token.go index 8c31136c..239ec329 100644 --- a/vendor/golang.org/x/oauth2/token.go +++ b/vendor/golang.org/x/oauth2/token.go @@ -44,7 +44,7 @@ type Token struct { // Expiry is the optional expiration time of the access token. // - // If zero, TokenSource implementations will reuse the same + // If zero, [TokenSource] implementations will reuse the same // token forever and RefreshToken or equivalent // mechanisms for that TokenSource will not be used. Expiry time.Time `json:"expiry,omitempty"` @@ -58,7 +58,7 @@ type Token struct { // raw optionally contains extra metadata from the server // when updating a token. - raw interface{} + raw any // expiryDelta is used to calculate when a token is considered // expired, by subtracting from Expiry. If zero, defaultExpiryDelta @@ -86,16 +86,16 @@ func (t *Token) Type() string { // SetAuthHeader sets the Authorization header to r using the access // token in t. // -// This method is unnecessary when using Transport or an HTTP Client +// This method is unnecessary when using [Transport] or an HTTP Client // returned by this package. func (t *Token) SetAuthHeader(r *http.Request) { r.Header.Set("Authorization", t.Type()+" "+t.AccessToken) } -// WithExtra returns a new Token that's a clone of t, but using the +// WithExtra returns a new [Token] that's a clone of t, but using the // provided raw extra map. This is only intended for use by packages // implementing derivative OAuth2 flows. -func (t *Token) WithExtra(extra interface{}) *Token { +func (t *Token) WithExtra(extra any) *Token { t2 := new(Token) *t2 = *t t2.raw = extra @@ -105,8 +105,8 @@ func (t *Token) WithExtra(extra interface{}) *Token { // Extra returns an extra field. // Extra fields are key-value pairs returned by the server as a // part of the token retrieval response. -func (t *Token) Extra(key string) interface{} { - if raw, ok := t.raw.(map[string]interface{}); ok { +func (t *Token) Extra(key string) any { + if raw, ok := t.raw.(map[string]any); ok { return raw[key] } @@ -163,6 +163,7 @@ func tokenFromInternal(t *internal.Token) *Token { TokenType: t.TokenType, RefreshToken: t.RefreshToken, Expiry: t.Expiry, + ExpiresIn: t.ExpiresIn, raw: t.Raw, } } diff --git a/vendor/golang.org/x/oauth2/transport.go b/vendor/golang.org/x/oauth2/transport.go index 90657915..8bbebbac 100644 --- a/vendor/golang.org/x/oauth2/transport.go +++ b/vendor/golang.org/x/oauth2/transport.go @@ -11,12 +11,12 @@ import ( "sync" ) -// Transport is an http.RoundTripper that makes OAuth 2.0 HTTP requests, -// wrapping a base RoundTripper and adding an Authorization header -// with a token from the supplied Sources. +// Transport is an [http.RoundTripper] that makes OAuth 2.0 HTTP requests, +// wrapping a base [http.RoundTripper] and adding an Authorization header +// with a token from the supplied [TokenSource]. // // Transport is a low-level mechanism. Most code will use the -// higher-level Config.Client method instead. +// higher-level [Config.Client] method instead. type Transport struct { // Source supplies the token to add to outgoing requests' // Authorization headers. @@ -47,7 +47,7 @@ func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { return nil, err } - req2 := cloneRequest(req) // per RoundTripper contract + req2 := req.Clone(req.Context()) token.SetAuthHeader(req2) // req.Body is assumed to be closed by the base RoundTripper. @@ -73,17 +73,3 @@ func (t *Transport) base() http.RoundTripper { } return http.DefaultTransport } - -// cloneRequest returns a clone of the provided *http.Request. -// The clone is a shallow copy of the struct and its Header map. -func cloneRequest(r *http.Request) *http.Request { - // shallow copy of the struct - r2 := new(http.Request) - *r2 = *r - // deep copy of the Header - r2.Header = make(http.Header, len(r.Header)) - for k, s := range r.Header { - r2.Header[k] = append([]string(nil), s...) - } - return r2 -} diff --git a/vendor/golang.org/x/sys/cpu/cpu.go b/vendor/golang.org/x/sys/cpu/cpu.go index 34c9ae76..63541994 100644 --- a/vendor/golang.org/x/sys/cpu/cpu.go +++ b/vendor/golang.org/x/sys/cpu/cpu.go @@ -92,9 +92,6 @@ var ARM64 struct { HasSHA2 bool // SHA2 hardware implementation HasCRC32 bool // CRC32 hardware implementation HasATOMICS bool // Atomic memory operation instruction set - HasHPDS bool // Hierarchical permission disables in translations tables - HasLOR bool // Limited ordering regions - HasPAN bool // Privileged access never HasFPHP bool // Half precision floating-point instruction set HasASIMDHP bool // Advanced SIMD half precision instruction set HasCPUID bool // CPUID identification scheme registers diff --git a/vendor/golang.org/x/sys/cpu/cpu_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_arm64.go index f449c679..af2aa99f 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_arm64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_arm64.go @@ -65,10 +65,10 @@ func setMinimalFeatures() { func readARM64Registers() { Initialized = true - parseARM64SystemRegisters(getisar0(), getisar1(), getmmfr1(), getpfr0()) + parseARM64SystemRegisters(getisar0(), getisar1(), getpfr0()) } -func parseARM64SystemRegisters(isar0, isar1, mmfr1, pfr0 uint64) { +func parseARM64SystemRegisters(isar0, isar1, pfr0 uint64) { // ID_AA64ISAR0_EL1 switch extractBits(isar0, 4, 7) { case 1: @@ -152,22 +152,6 @@ func parseARM64SystemRegisters(isar0, isar1, mmfr1, pfr0 uint64) { ARM64.HasI8MM = true } - // ID_AA64MMFR1_EL1 - switch extractBits(mmfr1, 12, 15) { - case 1, 2: - ARM64.HasHPDS = true - } - - switch extractBits(mmfr1, 16, 19) { - case 1: - ARM64.HasLOR = true - } - - switch extractBits(mmfr1, 20, 23) { - case 1, 2, 3: - ARM64.HasPAN = true - } - // ID_AA64PFR0_EL1 switch extractBits(pfr0, 16, 19) { case 0: diff --git a/vendor/golang.org/x/sys/cpu/cpu_arm64.s b/vendor/golang.org/x/sys/cpu/cpu_arm64.s index a4f24b3b..3b0450a0 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_arm64.s +++ b/vendor/golang.org/x/sys/cpu/cpu_arm64.s @@ -20,13 +20,6 @@ TEXT ·getisar1(SB),NOSPLIT,$0-8 MOVD R0, ret+0(FP) RET -// func getmmfr1() uint64 -TEXT ·getmmfr1(SB),NOSPLIT,$0-8 - // get Memory Model Feature Register 1 into x0 - MRS ID_AA64MMFR1_EL1, R0 - MOVD R0, ret+0(FP) - RET - // func getpfr0() uint64 TEXT ·getpfr0(SB),NOSPLIT,$0-8 // get Processor Feature Register 0 into x0 diff --git a/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go index e3fc5a8d..6ac6e1ef 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go @@ -8,6 +8,5 @@ package cpu func getisar0() uint64 func getisar1() uint64 -func getmmfr1() uint64 func getpfr0() uint64 func getzfr0() uint64 diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go index 8df2079e..7f194678 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go @@ -8,5 +8,4 @@ package cpu func getisar0() uint64 { return 0 } func getisar1() uint64 { return 0 } -func getmmfr1() uint64 { return 0 } func getpfr0() uint64 { return 0 } diff --git a/vendor/golang.org/x/sys/cpu/cpu_netbsd_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_netbsd_arm64.go index 19aea063..ebfb3fc8 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_netbsd_arm64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_netbsd_arm64.go @@ -167,7 +167,7 @@ func doinit() { setMinimalFeatures() return } - parseARM64SystemRegisters(cpuid.aa64isar0, cpuid.aa64isar1, cpuid.aa64mmfr1, cpuid.aa64pfr0) + parseARM64SystemRegisters(cpuid.aa64isar0, cpuid.aa64isar1, cpuid.aa64pfr0) Initialized = true } diff --git a/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.go index 87fd3a77..85b64d5c 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.go @@ -59,7 +59,7 @@ func doinit() { if !ok { return } - parseARM64SystemRegisters(isar0, isar1, 0, 0) + parseARM64SystemRegisters(isar0, isar1, 0) Initialized = true } diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index 42517077..fd39be4e 100644 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -256,6 +256,7 @@ struct ltchars { #include #include #include +#include #include #include #include @@ -613,7 +614,7 @@ ccflags="$@" $2 !~ /IOC_MAGIC/ && $2 ~ /^[A-Z][A-Z0-9_]+_MAGIC2?$/ || $2 ~ /^(VM|VMADDR)_/ || - $2 ~ /^IOCTL_VM_SOCKETS_/ || + $2 ~ /^(IOCTL_VM_SOCKETS_|IOCTL_MEI_)/ || $2 ~ /^(TASKSTATS|TS)_/ || $2 ~ /^CGROUPSTATS_/ || $2 ~ /^GENL_/ || diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go index d0a75da5..120a7b35 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -1615,6 +1615,8 @@ const ( IN_OPEN = 0x20 IN_Q_OVERFLOW = 0x4000 IN_UNMOUNT = 0x2000 + IOCTL_MEI_CONNECT_CLIENT = 0xc0104801 + IOCTL_MEI_CONNECT_CLIENT_VTAG = 0xc0144804 IPPROTO_AH = 0x33 IPPROTO_BEETPH = 0x5e IPPROTO_COMP = 0x6c diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index 1c37f9fb..97a61fc5 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -116,6 +116,8 @@ const ( IEXTEN = 0x8000 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 + IOCTL_MEI_NOTIFY_GET = 0x80044803 + IOCTL_MEI_NOTIFY_SET = 0x40044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 IPV6_FLOWINFO_MASK = 0xffffff0f IPV6_FLOWLABEL_MASK = 0xffff0f00 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index 6f54d34a..a0d6d498 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -116,6 +116,8 @@ const ( IEXTEN = 0x8000 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 + IOCTL_MEI_NOTIFY_GET = 0x80044803 + IOCTL_MEI_NOTIFY_SET = 0x40044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 IPV6_FLOWINFO_MASK = 0xffffff0f IPV6_FLOWLABEL_MASK = 0xffff0f00 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index 783ec5c1..dd9c903f 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -115,6 +115,8 @@ const ( IEXTEN = 0x8000 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 + IOCTL_MEI_NOTIFY_GET = 0x80044803 + IOCTL_MEI_NOTIFY_SET = 0x40044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 IPV6_FLOWINFO_MASK = 0xffffff0f IPV6_FLOWLABEL_MASK = 0xffff0f00 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index ca83d3ba..384c61ca 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -120,6 +120,8 @@ const ( IEXTEN = 0x8000 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 + IOCTL_MEI_NOTIFY_GET = 0x80044803 + IOCTL_MEI_NOTIFY_SET = 0x40044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 IPV6_FLOWINFO_MASK = 0xffffff0f IPV6_FLOWLABEL_MASK = 0xffff0f00 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go index 607e611c..6384c983 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go @@ -116,6 +116,8 @@ const ( IEXTEN = 0x8000 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 + IOCTL_MEI_NOTIFY_GET = 0x80044803 + IOCTL_MEI_NOTIFY_SET = 0x40044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 IPV6_FLOWINFO_MASK = 0xffffff0f IPV6_FLOWLABEL_MASK = 0xffff0f00 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index b9cb5bd3..553c1c6f 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -115,6 +115,8 @@ const ( IEXTEN = 0x100 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x80 + IOCTL_MEI_NOTIFY_GET = 0x40044803 + IOCTL_MEI_NOTIFY_SET = 0x80044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 IPV6_FLOWINFO_MASK = 0xfffffff IPV6_FLOWLABEL_MASK = 0xfffff diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index 65b078a6..b3339f20 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -115,6 +115,8 @@ const ( IEXTEN = 0x100 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x80 + IOCTL_MEI_NOTIFY_GET = 0x40044803 + IOCTL_MEI_NOTIFY_SET = 0x80044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 IPV6_FLOWINFO_MASK = 0xfffffff IPV6_FLOWLABEL_MASK = 0xfffff diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index 5298a303..177091d2 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -115,6 +115,8 @@ const ( IEXTEN = 0x100 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x80 + IOCTL_MEI_NOTIFY_GET = 0x40044803 + IOCTL_MEI_NOTIFY_SET = 0x80044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 IPV6_FLOWINFO_MASK = 0xffffff0f IPV6_FLOWLABEL_MASK = 0xffff0f00 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index 7bc557c8..c5abf156 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -115,6 +115,8 @@ const ( IEXTEN = 0x100 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x80 + IOCTL_MEI_NOTIFY_GET = 0x40044803 + IOCTL_MEI_NOTIFY_SET = 0x80044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 IPV6_FLOWINFO_MASK = 0xffffff0f IPV6_FLOWLABEL_MASK = 0xffff0f00 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go index 152399bb..f1f3fadf 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go @@ -115,6 +115,8 @@ const ( IEXTEN = 0x400 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 + IOCTL_MEI_NOTIFY_GET = 0x40044803 + IOCTL_MEI_NOTIFY_SET = 0x80044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 IPV6_FLOWINFO_MASK = 0xfffffff IPV6_FLOWLABEL_MASK = 0xfffff diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index 1a1ce240..203ad9c5 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -115,6 +115,8 @@ const ( IEXTEN = 0x400 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 + IOCTL_MEI_NOTIFY_GET = 0x40044803 + IOCTL_MEI_NOTIFY_SET = 0x80044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 IPV6_FLOWINFO_MASK = 0xfffffff IPV6_FLOWLABEL_MASK = 0xfffff diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index 4231a1fb..4b9abcb2 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -115,6 +115,8 @@ const ( IEXTEN = 0x400 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 + IOCTL_MEI_NOTIFY_GET = 0x40044803 + IOCTL_MEI_NOTIFY_SET = 0x80044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 IPV6_FLOWINFO_MASK = 0xffffff0f IPV6_FLOWLABEL_MASK = 0xffff0f00 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index 21c0e952..f8798303 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -115,6 +115,8 @@ const ( IEXTEN = 0x8000 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 + IOCTL_MEI_NOTIFY_GET = 0x80044803 + IOCTL_MEI_NOTIFY_SET = 0x40044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 IPV6_FLOWINFO_MASK = 0xffffff0f IPV6_FLOWLABEL_MASK = 0xffff0f00 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index f00d1cd7..64347eb3 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -115,6 +115,8 @@ const ( IEXTEN = 0x8000 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 + IOCTL_MEI_NOTIFY_GET = 0x80044803 + IOCTL_MEI_NOTIFY_SET = 0x40044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 IPV6_FLOWINFO_MASK = 0xfffffff IPV6_FLOWLABEL_MASK = 0xfffff diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index bc8d539e..7d719117 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -119,6 +119,8 @@ const ( IEXTEN = 0x8000 IN_CLOEXEC = 0x400000 IN_NONBLOCK = 0x4000 + IOCTL_MEI_NOTIFY_GET = 0x40044803 + IOCTL_MEI_NOTIFY_SET = 0x80044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 IPV6_FLOWINFO_MASK = 0xfffffff IPV6_FLOWLABEL_MASK = 0xfffff diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go index 439548ec..50e8e644 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go @@ -104,7 +104,7 @@ type Statvfs_t struct { Fsid uint32 Namemax uint32 Owner uint32 - Spare [4]uint32 + Spare [4]uint64 Fstypename [32]byte Mntonname [1024]byte Mntfromname [1024]byte diff --git a/vendor/golang.org/x/term/term_windows.go b/vendor/golang.org/x/term/term_windows.go index df6bf948..0ddd81c0 100644 --- a/vendor/golang.org/x/term/term_windows.go +++ b/vendor/golang.org/x/term/term_windows.go @@ -20,12 +20,14 @@ func isTerminal(fd int) bool { return err == nil } +// This is intended to be used on a console input handle. +// See https://learn.microsoft.com/en-us/windows/console/setconsolemode func makeRaw(fd int) (*State, error) { var st uint32 if err := windows.GetConsoleMode(windows.Handle(fd), &st); err != nil { return nil, err } - raw := st &^ (windows.ENABLE_ECHO_INPUT | windows.ENABLE_PROCESSED_INPUT | windows.ENABLE_LINE_INPUT | windows.ENABLE_PROCESSED_OUTPUT) + raw := st &^ (windows.ENABLE_ECHO_INPUT | windows.ENABLE_PROCESSED_INPUT | windows.ENABLE_LINE_INPUT) raw |= windows.ENABLE_VIRTUAL_TERMINAL_INPUT if err := windows.SetConsoleMode(windows.Handle(fd), raw); err != nil { return nil, err diff --git a/vendor/golang.org/x/term/terminal.go b/vendor/golang.org/x/term/terminal.go index 13e9a64a..9255449b 100644 --- a/vendor/golang.org/x/term/terminal.go +++ b/vendor/golang.org/x/term/terminal.go @@ -146,6 +146,7 @@ const ( keyCtrlD = 4 keyCtrlU = 21 keyEnter = '\r' + keyLF = '\n' keyEscape = 27 keyBackspace = 127 keyUnknown = 0xd800 /* UTF-16 surrogate area */ + iota @@ -412,7 +413,7 @@ func (t *Terminal) eraseNPreviousChars(n int) { } } -// countToLeftWord returns then number of characters from the cursor to the +// countToLeftWord returns the number of characters from the cursor to the // start of the previous word. func (t *Terminal) countToLeftWord() int { if t.pos == 0 { @@ -437,7 +438,7 @@ func (t *Terminal) countToLeftWord() int { return t.pos - pos } -// countToRightWord returns then number of characters from the cursor to the +// countToRightWord returns the number of characters from the cursor to the // start of the next word. func (t *Terminal) countToRightWord() int { pos := t.pos @@ -477,7 +478,7 @@ func visualLength(runes []rune) int { return length } -// histroryAt unlocks the terminal and relocks it while calling History.At. +// historyAt unlocks the terminal and relocks it while calling History.At. func (t *Terminal) historyAt(idx int) (string, bool) { t.lock.Unlock() // Unlock to avoid deadlock if History methods use the output writer. defer t.lock.Lock() // panic in At (or Len) protection. @@ -497,7 +498,7 @@ func (t *Terminal) historyAdd(entry string) { // handleKey processes the given key and, optionally, returns a line of text // that the user has entered. func (t *Terminal) handleKey(key rune) (line string, ok bool) { - if t.pasteActive && key != keyEnter { + if t.pasteActive && key != keyEnter && key != keyLF { t.addKeyToLine(key) return } @@ -567,7 +568,7 @@ func (t *Terminal) handleKey(key rune) (line string, ok bool) { t.setLine(runes, len(runes)) } } - case keyEnter: + case keyEnter, keyLF: t.moveCursorToPos(len(t.line)) t.queue([]rune("\r\n")) line = string(t.line) @@ -812,6 +813,10 @@ func (t *Terminal) readLine() (line string, err error) { if !t.pasteActive { lineIsPasted = false } + // If we have CR, consume LF if present (CRLF sequence) to avoid returning an extra empty line. + if key == keyEnter && len(rest) > 0 && rest[0] == keyLF { + rest = rest[1:] + } line, lineOk = t.handleKey(key) } if len(rest) > 0 { diff --git a/vendor/golang.org/x/text/unicode/bidi/core.go b/vendor/golang.org/x/text/unicode/bidi/core.go index 9d2ae547..fb827323 100644 --- a/vendor/golang.org/x/text/unicode/bidi/core.go +++ b/vendor/golang.org/x/text/unicode/bidi/core.go @@ -427,13 +427,6 @@ type isolatingRunSequence struct { func (i *isolatingRunSequence) Len() int { return len(i.indexes) } -func maxLevel(a, b level) level { - if a > b { - return a - } - return b -} - // Rule X10, second bullet: Determine the start-of-sequence (sos) and end-of-sequence (eos) types, // either L or R, for each isolating run sequence. func (p *paragraph) isolatingRunSequence(indexes []int) *isolatingRunSequence { @@ -474,8 +467,8 @@ func (p *paragraph) isolatingRunSequence(indexes []int) *isolatingRunSequence { indexes: indexes, types: types, level: level, - sos: typeForLevel(maxLevel(prevLevel, level)), - eos: typeForLevel(maxLevel(succLevel, level)), + sos: typeForLevel(max(prevLevel, level)), + eos: typeForLevel(max(succLevel, level)), } } diff --git a/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go b/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go index f388426b..d083dde3 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go @@ -1,4 +1,4 @@ -// Copyright 2024 Google LLC +// Copyright 2025 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go index 3cd9a5bb..e017ef07 100644 --- a/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go @@ -1,4 +1,4 @@ -// Copyright 2024 Google LLC +// Copyright 2025 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -703,6 +703,65 @@ type QuotaFailure_Violation struct { // For example: "Service disabled" or "Daily Limit for read operations // exceeded". Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + // The API Service from which the `QuotaFailure.Violation` orginates. In + // some cases, Quota issues originate from an API Service other than the one + // that was called. In other words, a dependency of the called API Service + // could be the cause of the `QuotaFailure`, and this field would have the + // dependency API service name. + // + // For example, if the called API is Kubernetes Engine API + // (container.googleapis.com), and a quota violation occurs in the + // Kubernetes Engine API itself, this field would be + // "container.googleapis.com". On the other hand, if the quota violation + // occurs when the Kubernetes Engine API creates VMs in the Compute Engine + // API (compute.googleapis.com), this field would be + // "compute.googleapis.com". + ApiService string `protobuf:"bytes,3,opt,name=api_service,json=apiService,proto3" json:"api_service,omitempty"` + // The metric of the violated quota. A quota metric is a named counter to + // measure usage, such as API requests or CPUs. When an activity occurs in a + // service, such as Virtual Machine allocation, one or more quota metrics + // may be affected. + // + // For example, "compute.googleapis.com/cpus_per_vm_family", + // "storage.googleapis.com/internet_egress_bandwidth". + QuotaMetric string `protobuf:"bytes,4,opt,name=quota_metric,json=quotaMetric,proto3" json:"quota_metric,omitempty"` + // The id of the violated quota. Also know as "limit name", this is the + // unique identifier of a quota in the context of an API service. + // + // For example, "CPUS-PER-VM-FAMILY-per-project-region". + QuotaId string `protobuf:"bytes,5,opt,name=quota_id,json=quotaId,proto3" json:"quota_id,omitempty"` + // The dimensions of the violated quota. Every non-global quota is enforced + // on a set of dimensions. While quota metric defines what to count, the + // dimensions specify for what aspects the counter should be increased. + // + // For example, the quota "CPUs per region per VM family" enforces a limit + // on the metric "compute.googleapis.com/cpus_per_vm_family" on dimensions + // "region" and "vm_family". And if the violation occurred in region + // "us-central1" and for VM family "n1", the quota_dimensions would be, + // + // { + // "region": "us-central1", + // "vm_family": "n1", + // } + // + // When a quota is enforced globally, the quota_dimensions would always be + // empty. + QuotaDimensions map[string]string `protobuf:"bytes,6,rep,name=quota_dimensions,json=quotaDimensions,proto3" json:"quota_dimensions,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // The enforced quota value at the time of the `QuotaFailure`. + // + // For example, if the enforced quota value at the time of the + // `QuotaFailure` on the number of CPUs is "10", then the value of this + // field would reflect this quantity. + QuotaValue int64 `protobuf:"varint,7,opt,name=quota_value,json=quotaValue,proto3" json:"quota_value,omitempty"` + // The new quota value being rolled out at the time of the violation. At the + // completion of the rollout, this value will be enforced in place of + // quota_value. If no rollout is in progress at the time of the violation, + // this field is not set. + // + // For example, if at the time of the violation a rollout is in progress + // changing the number of CPUs quota from 10 to 20, 20 would be the value of + // this field. + FutureQuotaValue *int64 `protobuf:"varint,8,opt,name=future_quota_value,json=futureQuotaValue,proto3,oneof" json:"future_quota_value,omitempty"` } func (x *QuotaFailure_Violation) Reset() { @@ -751,6 +810,48 @@ func (x *QuotaFailure_Violation) GetDescription() string { return "" } +func (x *QuotaFailure_Violation) GetApiService() string { + if x != nil { + return x.ApiService + } + return "" +} + +func (x *QuotaFailure_Violation) GetQuotaMetric() string { + if x != nil { + return x.QuotaMetric + } + return "" +} + +func (x *QuotaFailure_Violation) GetQuotaId() string { + if x != nil { + return x.QuotaId + } + return "" +} + +func (x *QuotaFailure_Violation) GetQuotaDimensions() map[string]string { + if x != nil { + return x.QuotaDimensions + } + return nil +} + +func (x *QuotaFailure_Violation) GetQuotaValue() int64 { + if x != nil { + return x.QuotaValue + } + return 0 +} + +func (x *QuotaFailure_Violation) GetFutureQuotaValue() int64 { + if x != nil && x.FutureQuotaValue != nil { + return *x.FutureQuotaValue + } + return 0 +} + // A message type used to describe a single precondition failure. type PreconditionFailure_Violation struct { state protoimpl.MessageState @@ -775,7 +876,7 @@ type PreconditionFailure_Violation struct { func (x *PreconditionFailure_Violation) Reset() { *x = PreconditionFailure_Violation{} if protoimpl.UnsafeEnabled { - mi := &file_google_rpc_error_details_proto_msgTypes[12] + mi := &file_google_rpc_error_details_proto_msgTypes[13] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -788,7 +889,7 @@ func (x *PreconditionFailure_Violation) String() string { func (*PreconditionFailure_Violation) ProtoMessage() {} func (x *PreconditionFailure_Violation) ProtoReflect() protoreflect.Message { - mi := &file_google_rpc_error_details_proto_msgTypes[12] + mi := &file_google_rpc_error_details_proto_msgTypes[13] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -886,7 +987,7 @@ type BadRequest_FieldViolation struct { func (x *BadRequest_FieldViolation) Reset() { *x = BadRequest_FieldViolation{} if protoimpl.UnsafeEnabled { - mi := &file_google_rpc_error_details_proto_msgTypes[13] + mi := &file_google_rpc_error_details_proto_msgTypes[14] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -899,7 +1000,7 @@ func (x *BadRequest_FieldViolation) String() string { func (*BadRequest_FieldViolation) ProtoMessage() {} func (x *BadRequest_FieldViolation) ProtoReflect() protoreflect.Message { - mi := &file_google_rpc_error_details_proto_msgTypes[13] + mi := &file_google_rpc_error_details_proto_msgTypes[14] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -958,7 +1059,7 @@ type Help_Link struct { func (x *Help_Link) Reset() { *x = Help_Link{} if protoimpl.UnsafeEnabled { - mi := &file_google_rpc_error_details_proto_msgTypes[14] + mi := &file_google_rpc_error_details_proto_msgTypes[15] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -971,7 +1072,7 @@ func (x *Help_Link) String() string { func (*Help_Link) ProtoMessage() {} func (x *Help_Link) ProtoReflect() protoreflect.Message { - mi := &file_google_rpc_error_details_proto_msgTypes[14] + mi := &file_google_rpc_error_details_proto_msgTypes[15] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1029,79 +1130,102 @@ var file_google_rpc_error_details_proto_rawDesc = []byte{ 0x0a, 0x0d, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x22, 0x9b, 0x01, 0x0a, 0x0c, + 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x22, 0x8e, 0x04, 0x0a, 0x0c, 0x51, 0x75, 0x6f, 0x74, 0x61, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x12, 0x42, 0x0a, 0x0a, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x51, 0x75, 0x6f, 0x74, 0x61, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x2e, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x1a, 0x47, 0x0a, 0x09, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, - 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, - 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, - 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xbd, 0x01, 0x0a, 0x13, 0x50, 0x72, - 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, - 0x65, 0x12, 0x49, 0x0a, 0x0a, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, - 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, - 0x70, 0x63, 0x2e, 0x50, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x46, - 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x2e, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x0a, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x5b, 0x0a, 0x09, - 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x18, 0x0a, - 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, - 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, - 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x8c, 0x02, 0x0a, 0x0a, 0x42, 0x61, - 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x50, 0x0a, 0x10, 0x66, 0x69, 0x65, 0x6c, - 0x64, 0x5f, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, - 0x42, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, - 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0f, 0x66, 0x69, 0x65, 0x6c, 0x64, - 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0xab, 0x01, 0x0a, 0x0e, 0x46, - 0x69, 0x65, 0x6c, 0x64, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, - 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x66, 0x69, - 0x65, 0x6c, 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x49, 0x0a, - 0x11, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, - 0x67, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x4d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x10, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x7a, 0x65, - 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x4f, 0x0a, 0x0b, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x72, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x6e, - 0x67, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x6e, 0x67, 0x44, 0x61, 0x74, 0x61, 0x22, 0x90, 0x01, 0x0a, 0x0c, 0x52, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, - 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, - 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x6f, 0x0a, 0x04, - 0x48, 0x65, 0x6c, 0x70, 0x12, 0x2b, 0x0a, 0x05, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x18, 0x01, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, - 0x2e, 0x48, 0x65, 0x6c, 0x70, 0x2e, 0x4c, 0x69, 0x6e, 0x6b, 0x52, 0x05, 0x6c, 0x69, 0x6e, 0x6b, - 0x73, 0x1a, 0x3a, 0x0a, 0x04, 0x4c, 0x69, 0x6e, 0x6b, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, - 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x75, - 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x22, 0x44, 0x0a, - 0x10, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, - 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x06, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, - 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, - 0x61, 0x67, 0x65, 0x42, 0x6c, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x72, 0x70, 0x63, 0x42, 0x11, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x44, 0x65, 0x74, 0x61, - 0x69, 0x6c, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3f, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, - 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, - 0x73, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x65, 0x72, 0x72, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, - 0x3b, 0x65, 0x72, 0x72, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0xa2, 0x02, 0x03, 0x52, 0x50, - 0x43, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x1a, 0xb9, 0x03, 0x0a, 0x09, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, + 0x0a, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x70, + 0x69, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0a, 0x61, 0x70, 0x69, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x71, + 0x75, 0x6f, 0x74, 0x61, 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0b, 0x71, 0x75, 0x6f, 0x74, 0x61, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x12, 0x19, + 0x0a, 0x08, 0x71, 0x75, 0x6f, 0x74, 0x61, 0x5f, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x07, 0x71, 0x75, 0x6f, 0x74, 0x61, 0x49, 0x64, 0x12, 0x62, 0x0a, 0x10, 0x71, 0x75, 0x6f, + 0x74, 0x61, 0x5f, 0x64, 0x69, 0x6d, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x06, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, + 0x2e, 0x51, 0x75, 0x6f, 0x74, 0x61, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x2e, 0x56, 0x69, + 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x51, 0x75, 0x6f, 0x74, 0x61, 0x44, 0x69, 0x6d, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0f, 0x71, 0x75, + 0x6f, 0x74, 0x61, 0x44, 0x69, 0x6d, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x0a, + 0x0b, 0x71, 0x75, 0x6f, 0x74, 0x61, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x0a, 0x71, 0x75, 0x6f, 0x74, 0x61, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x31, + 0x0a, 0x12, 0x66, 0x75, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x71, 0x75, 0x6f, 0x74, 0x61, 0x5f, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x10, 0x66, 0x75, + 0x74, 0x75, 0x72, 0x65, 0x51, 0x75, 0x6f, 0x74, 0x61, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x88, 0x01, + 0x01, 0x1a, 0x42, 0x0a, 0x14, 0x51, 0x75, 0x6f, 0x74, 0x61, 0x44, 0x69, 0x6d, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x15, 0x0a, 0x13, 0x5f, 0x66, 0x75, 0x74, 0x75, 0x72, 0x65, + 0x5f, 0x71, 0x75, 0x6f, 0x74, 0x61, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xbd, 0x01, 0x0a, + 0x13, 0x50, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x61, 0x69, + 0x6c, 0x75, 0x72, 0x65, 0x12, 0x49, 0x0a, 0x0a, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x50, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x2e, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, + 0x5b, 0x0a, 0x09, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, + 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, + 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x8c, 0x02, 0x0a, + 0x0a, 0x42, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x50, 0x0a, 0x10, 0x66, + 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, + 0x70, 0x63, 0x2e, 0x42, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x46, 0x69, + 0x65, 0x6c, 0x64, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0f, 0x66, 0x69, + 0x65, 0x6c, 0x64, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0xab, 0x01, + 0x0a, 0x0e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x14, 0x0a, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x61, 0x73, + 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, + 0x12, 0x49, 0x0a, 0x11, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x5f, 0x6d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x7a, + 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x10, 0x6c, 0x6f, 0x63, 0x61, 0x6c, + 0x69, 0x7a, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x4f, 0x0a, 0x0b, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, + 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x44, 0x61, 0x74, 0x61, 0x22, 0x90, 0x01, 0x0a, + 0x0c, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x23, 0x0a, + 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, + 0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x20, 0x0a, + 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, + 0x6f, 0x0a, 0x04, 0x48, 0x65, 0x6c, 0x70, 0x12, 0x2b, 0x0a, 0x05, 0x6c, 0x69, 0x6e, 0x6b, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x72, 0x70, 0x63, 0x2e, 0x48, 0x65, 0x6c, 0x70, 0x2e, 0x4c, 0x69, 0x6e, 0x6b, 0x52, 0x05, 0x6c, + 0x69, 0x6e, 0x6b, 0x73, 0x1a, 0x3a, 0x0a, 0x04, 0x4c, 0x69, 0x6e, 0x6b, 0x12, 0x20, 0x0a, 0x0b, + 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, + 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, + 0x22, 0x44, 0x0a, 0x10, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x65, 0x12, 0x18, 0x0a, 0x07, + 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x6c, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x42, 0x11, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x44, + 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3f, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, + 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x61, 0x70, 0x69, 0x73, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x65, 0x72, 0x72, 0x64, 0x65, 0x74, 0x61, + 0x69, 0x6c, 0x73, 0x3b, 0x65, 0x72, 0x72, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0xa2, 0x02, + 0x03, 0x52, 0x50, 0x43, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -1116,7 +1240,7 @@ func file_google_rpc_error_details_proto_rawDescGZIP() []byte { return file_google_rpc_error_details_proto_rawDescData } -var file_google_rpc_error_details_proto_msgTypes = make([]protoimpl.MessageInfo, 15) +var file_google_rpc_error_details_proto_msgTypes = make([]protoimpl.MessageInfo, 16) var file_google_rpc_error_details_proto_goTypes = []interface{}{ (*ErrorInfo)(nil), // 0: google.rpc.ErrorInfo (*RetryInfo)(nil), // 1: google.rpc.RetryInfo @@ -1130,24 +1254,26 @@ var file_google_rpc_error_details_proto_goTypes = []interface{}{ (*LocalizedMessage)(nil), // 9: google.rpc.LocalizedMessage nil, // 10: google.rpc.ErrorInfo.MetadataEntry (*QuotaFailure_Violation)(nil), // 11: google.rpc.QuotaFailure.Violation - (*PreconditionFailure_Violation)(nil), // 12: google.rpc.PreconditionFailure.Violation - (*BadRequest_FieldViolation)(nil), // 13: google.rpc.BadRequest.FieldViolation - (*Help_Link)(nil), // 14: google.rpc.Help.Link - (*durationpb.Duration)(nil), // 15: google.protobuf.Duration + nil, // 12: google.rpc.QuotaFailure.Violation.QuotaDimensionsEntry + (*PreconditionFailure_Violation)(nil), // 13: google.rpc.PreconditionFailure.Violation + (*BadRequest_FieldViolation)(nil), // 14: google.rpc.BadRequest.FieldViolation + (*Help_Link)(nil), // 15: google.rpc.Help.Link + (*durationpb.Duration)(nil), // 16: google.protobuf.Duration } var file_google_rpc_error_details_proto_depIdxs = []int32{ 10, // 0: google.rpc.ErrorInfo.metadata:type_name -> google.rpc.ErrorInfo.MetadataEntry - 15, // 1: google.rpc.RetryInfo.retry_delay:type_name -> google.protobuf.Duration + 16, // 1: google.rpc.RetryInfo.retry_delay:type_name -> google.protobuf.Duration 11, // 2: google.rpc.QuotaFailure.violations:type_name -> google.rpc.QuotaFailure.Violation - 12, // 3: google.rpc.PreconditionFailure.violations:type_name -> google.rpc.PreconditionFailure.Violation - 13, // 4: google.rpc.BadRequest.field_violations:type_name -> google.rpc.BadRequest.FieldViolation - 14, // 5: google.rpc.Help.links:type_name -> google.rpc.Help.Link - 9, // 6: google.rpc.BadRequest.FieldViolation.localized_message:type_name -> google.rpc.LocalizedMessage - 7, // [7:7] is the sub-list for method output_type - 7, // [7:7] is the sub-list for method input_type - 7, // [7:7] is the sub-list for extension type_name - 7, // [7:7] is the sub-list for extension extendee - 0, // [0:7] is the sub-list for field type_name + 13, // 3: google.rpc.PreconditionFailure.violations:type_name -> google.rpc.PreconditionFailure.Violation + 14, // 4: google.rpc.BadRequest.field_violations:type_name -> google.rpc.BadRequest.FieldViolation + 15, // 5: google.rpc.Help.links:type_name -> google.rpc.Help.Link + 12, // 6: google.rpc.QuotaFailure.Violation.quota_dimensions:type_name -> google.rpc.QuotaFailure.Violation.QuotaDimensionsEntry + 9, // 7: google.rpc.BadRequest.FieldViolation.localized_message:type_name -> google.rpc.LocalizedMessage + 8, // [8:8] is the sub-list for method output_type + 8, // [8:8] is the sub-list for method input_type + 8, // [8:8] is the sub-list for extension type_name + 8, // [8:8] is the sub-list for extension extendee + 0, // [0:8] is the sub-list for field type_name } func init() { file_google_rpc_error_details_proto_init() } @@ -1288,7 +1414,7 @@ func file_google_rpc_error_details_proto_init() { return nil } } - file_google_rpc_error_details_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + file_google_rpc_error_details_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*PreconditionFailure_Violation); i { case 0: return &v.state @@ -1300,7 +1426,7 @@ func file_google_rpc_error_details_proto_init() { return nil } } - file_google_rpc_error_details_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + file_google_rpc_error_details_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*BadRequest_FieldViolation); i { case 0: return &v.state @@ -1312,7 +1438,7 @@ func file_google_rpc_error_details_proto_init() { return nil } } - file_google_rpc_error_details_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + file_google_rpc_error_details_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Help_Link); i { case 0: return &v.state @@ -1325,13 +1451,14 @@ func file_google_rpc_error_details_proto_init() { } } } + file_google_rpc_error_details_proto_msgTypes[11].OneofWrappers = []interface{}{} type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_google_rpc_error_details_proto_rawDesc, NumEnums: 0, - NumMessages: 15, + NumMessages: 16, NumExtensions: 0, NumServices: 0, }, diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go index 6ad1b1c1..06a3f710 100644 --- a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go @@ -1,4 +1,4 @@ -// Copyright 2024 Google LLC +// Copyright 2025 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/google.golang.org/grpc/CONTRIBUTING.md b/vendor/google.golang.org/grpc/CONTRIBUTING.md index d9bfa6e1..2079de7b 100644 --- a/vendor/google.golang.org/grpc/CONTRIBUTING.md +++ b/vendor/google.golang.org/grpc/CONTRIBUTING.md @@ -1,73 +1,159 @@ # How to contribute -We definitely welcome your patches and contributions to gRPC! Please read the gRPC -organization's [governance rules](https://github.com/grpc/grpc-community/blob/master/governance.md) -and [contribution guidelines](https://github.com/grpc/grpc-community/blob/master/CONTRIBUTING.md) before proceeding. +We welcome your patches and contributions to gRPC! Please read the gRPC +organization's [governance +rules](https://github.com/grpc/grpc-community/blob/master/governance.md) before +proceeding. If you are new to GitHub, please start by reading [Pull Request howto](https://help.github.com/articles/about-pull-requests/) ## Legal requirements In order to protect both you and ourselves, you will need to sign the -[Contributor License Agreement](https://identity.linuxfoundation.org/projects/cncf). +[Contributor License +Agreement](https://identity.linuxfoundation.org/projects/cncf). When you create +your first PR, a link will be added as a comment that contains the steps needed +to complete this process. + +## Getting Started + +A great way to start is by searching through our open issues. [Unassigned issues +labeled as "help +wanted"](https://github.com/grpc/grpc-go/issues?q=sort%3Aupdated-desc%20is%3Aissue%20is%3Aopen%20label%3A%22Status%3A%20Help%20Wanted%22%20no%3Aassignee) +are especially nice for first-time contributors, as they should be well-defined +problems that already have agreed-upon solutions. + +## Code Style + +We follow [Google's published Go style +guide](https://google.github.io/styleguide/go/). Note that there are three +primary documents that make up this style guide; please follow them as closely +as possible. If a reviewer recommends something that contradicts those +guidelines, there may be valid reasons to do so, but it should be rare. ## Guidelines for Pull Requests -How to get your contributions merged smoothly and quickly. + +Please read the following carefully to ensure your contributions can be merged +smoothly and quickly. + +### PR Contents - Create **small PRs** that are narrowly focused on **addressing a single - concern**. We often times receive PRs that are trying to fix several things at - a time, but only one fix is considered acceptable, nothing gets merged and - both author's & review's time is wasted. Create more PRs to address different - concerns and everyone will be happy. + concern**. We often receive PRs that attempt to fix several things at the same + time, and if one part of the PR has a problem, that will hold up the entire + PR. + +- If your change does not address an **open issue** with an **agreed + resolution**, consider opening an issue and discussing it first. If you are + suggesting a behavioral or API change, consider starting with a [gRFC + proposal](https://github.com/grpc/proposal). Many new features that are not + bug fixes will require cross-language agreement. + +- If you want to fix **formatting or style**, consider whether your changes are + an obvious improvement or might be considered a personal preference. If a + style change is based on preference, it likely will not be accepted. If it + corrects widely agreed-upon anti-patterns, then please do create a PR and + explain the benefits of the change. + +- For correcting **misspellings**, please be aware that we use some terms that + are sometimes flagged by spell checkers. As an example, "if an only if" is + often written as "iff". Please do not make spelling correction changes unless + you are certain they are misspellings. + +- **All tests need to be passing** before your change can be merged. We + recommend you run tests locally before creating your PR to catch breakages + early on: -- If you are searching for features to work on, issues labeled [Status: Help - Wanted](https://github.com/grpc/grpc-go/issues?q=is%3Aissue+is%3Aopen+sort%3Aupdated-desc+label%3A%22Status%3A+Help+Wanted%22) - is a great place to start. These issues are well-documented and usually can be - resolved with a single pull request. + - `./scripts/vet.sh` to catch vet errors. + - `go test -cpu 1,4 -timeout 7m ./...` to run the tests. + - `go test -race -cpu 1,4 -timeout 7m ./...` to run tests in race mode. -- If you are adding a new file, make sure it has the copyright message template - at the top as a comment. You can copy over the message from an existing file - and update the year. + Note that we have a multi-module repo, so `go test` commands may need to be + run from the root of each module in order to cause all tests to run. + + *Alternatively*, you may find it easier to push your changes to your fork on + GitHub, which will trigger a GitHub Actions run that you can use to verify + everything is passing. + +- Note that there are two GitHub actions checks that need not be green: + + 1. We test the freshness of the generated proto code we maintain via the + `vet-proto` check. If the source proto files are updated, but our repo is + not updated, an optional checker will fail. This will be fixed by our team + in a separate PR and will not prevent the merge of your PR. + + 2. We run a checker that will fail if there is any change in dependencies of + an exported package via the `dependencies` check. If new dependencies are + added that are not appropriate, we may not accept your PR (see below). + +- If you are adding a **new file**, make sure it has the **copyright message** + template at the top as a comment. You can copy the message from an existing + file and update the year. - The grpc package should only depend on standard Go packages and a small number - of exceptions. If your contribution introduces new dependencies which are NOT - in the [list](https://godoc.org/google.golang.org/grpc?imports), you need a - discussion with gRPC-Go authors and consultants. + of exceptions. **If your contribution introduces new dependencies**, you will + need a discussion with gRPC-Go maintainers. -- For speculative changes, consider opening an issue and discussing it first. If - you are suggesting a behavioral or API change, consider starting with a [gRFC - proposal](https://github.com/grpc/proposal). +### PR Descriptions -- Provide a good **PR description** as a record of **what** change is being made - and **why** it was made. Link to a GitHub issue if it exists. +- **PR titles** should start with the name of the component being addressed, or + the type of change. Examples: transport, client, server, round_robin, xds, + cleanup, deps. -- If you want to fix formatting or style, consider whether your changes are an - obvious improvement or might be considered a personal preference. If a style - change is based on preference, it likely will not be accepted. If it corrects - widely agreed-upon anti-patterns, then please do create a PR and explain the - benefits of the change. +- Read and follow the **guidelines for PR titles and descriptions** here: + https://google.github.io/eng-practices/review/developer/cl-descriptions.html -- Unless your PR is trivial, you should expect there will be reviewer comments - that you'll need to address before merging. We'll mark it as `Status: Requires - Reporter Clarification` if we expect you to respond to these comments in a - timely manner. If the PR remains inactive for 6 days, it will be marked as - `stale` and automatically close 7 days after that if we don't hear back from - you. + *particularly* the sections "First Line" and "Body is Informative". -- Maintain **clean commit history** and use **meaningful commit messages**. PRs - with messy commit history are difficult to review and won't be merged. Use - `rebase -i upstream/master` to curate your commit history and/or to bring in - latest changes from master (but avoid rebasing in the middle of a code - review). + Note: your PR description will be used as the git commit message in a + squash-and-merge if your PR is approved. We may make changes to this as + necessary. -- Keep your PR up to date with upstream/master (if there are merge conflicts, we - can't really merge your change). +- **Does this PR relate to an open issue?** On the first line, please use the + tag `Fixes #` to ensure the issue is closed when the PR is merged. Or + use `Updates #` if the PR is related to an open issue, but does not fix + it. Consider filing an issue if one does not already exist. -- **All tests need to be passing** before your change can be merged. We - recommend you **run tests locally** before creating your PR to catch breakages - early on. - - `./scripts/vet.sh` to catch vet errors - - `go test -cpu 1,4 -timeout 7m ./...` to run the tests - - `go test -race -cpu 1,4 -timeout 7m ./...` to run tests in race mode +- PR descriptions *must* conclude with **release notes** as follows: + + ``` + RELEASE NOTES: + * : + ``` + + This need not match the PR title. + + The summary must: + + * be something that gRPC users will understand. + + * clearly explain the feature being added, the issue being fixed, or the + behavior being changed, etc. If fixing a bug, be clear about how the bug + can be triggered by an end-user. + + * begin with a capital letter and use complete sentences. -- Exceptions to the rules can be made if there's a compelling reason for doing so. + * be as short as possible to describe the change being made. + + If a PR is *not* end-user visible -- e.g. a cleanup, testing change, or + GitHub-related, use `RELEASE NOTES: n/a`. + +### PR Process + +- Please **self-review** your code changes before sending your PR. This will + prevent simple, obvious errors from causing delays. + +- Maintain a **clean commit history** and use **meaningful commit messages**. + PRs with messy commit histories are difficult to review and won't be merged. + Before sending your PR, ensure your changes are based on top of the latest + `upstream/master` commits, and avoid rebasing in the middle of a code review. + You should **never use `git push -f`** unless absolutely necessary during a + review, as it can interfere with GitHub's tracking of comments. + +- Unless your PR is trivial, you should **expect reviewer comments** that you + will need to address before merging. We'll label the PR as `Status: Requires + Reporter Clarification` if we expect you to respond to these comments in a + timely manner. If the PR remains inactive for 6 days, it will be marked as + `stale`, and we will automatically close it after 7 days if we don't hear back + from you. Please feel free to ping issues or bugs if you do not get a response + within a week. diff --git a/vendor/google.golang.org/grpc/MAINTAINERS.md b/vendor/google.golang.org/grpc/MAINTAINERS.md index 5d4096d4..df35bb9a 100644 --- a/vendor/google.golang.org/grpc/MAINTAINERS.md +++ b/vendor/google.golang.org/grpc/MAINTAINERS.md @@ -9,21 +9,19 @@ for general contribution guidelines. ## Maintainers (in alphabetical order) -- [aranjans](https://github.com/aranjans), Google LLC - [arjan-bal](https://github.com/arjan-bal), Google LLC - [arvindbr8](https://github.com/arvindbr8), Google LLC - [atollena](https://github.com/atollena), Datadog, Inc. - [dfawley](https://github.com/dfawley), Google LLC - [easwars](https://github.com/easwars), Google LLC -- [erm-g](https://github.com/erm-g), Google LLC - [gtcooke94](https://github.com/gtcooke94), Google LLC -- [purnesh42h](https://github.com/purnesh42h), Google LLC -- [zasweq](https://github.com/zasweq), Google LLC ## Emeritus Maintainers (in alphabetical order) - [adelez](https://github.com/adelez) +- [aranjans](https://github.com/aranjans) - [canguler](https://github.com/canguler) - [cesarghali](https://github.com/cesarghali) +- [erm-g](https://github.com/erm-g) - [iamqizhao](https://github.com/iamqizhao) - [jeanbza](https://github.com/jeanbza) - [jtattermusch](https://github.com/jtattermusch) @@ -32,5 +30,7 @@ for general contribution guidelines. - [matt-kwong](https://github.com/matt-kwong) - [menghanl](https://github.com/menghanl) - [nicolasnoble](https://github.com/nicolasnoble) +- [purnesh42h](https://github.com/purnesh42h) - [srini100](https://github.com/srini100) - [yongni](https://github.com/yongni) +- [zasweq](https://github.com/zasweq) diff --git a/vendor/google.golang.org/grpc/README.md b/vendor/google.golang.org/grpc/README.md index b572707c..f9a88d59 100644 --- a/vendor/google.golang.org/grpc/README.md +++ b/vendor/google.golang.org/grpc/README.md @@ -32,6 +32,7 @@ import "google.golang.org/grpc" - [Low-level technical docs](Documentation) from this repository - [Performance benchmark][] - [Examples](examples) +- [Contribution guidelines](CONTRIBUTING.md) ## FAQ diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go index c9b343c7..b1264017 100644 --- a/vendor/google.golang.org/grpc/balancer/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/balancer.go @@ -360,6 +360,10 @@ type Balancer interface { // call SubConn.Shutdown for its existing SubConns; however, this will be // required in a future release, so it is recommended. Close() + // ExitIdle instructs the LB policy to reconnect to backends / exit the + // IDLE state, if appropriate and possible. Note that SubConns that enter + // the IDLE state will not reconnect until SubConn.Connect is called. + ExitIdle() } // ExitIdler is an optional interface for balancers to implement. If @@ -367,8 +371,8 @@ type Balancer interface { // the ClientConn is idle. If unimplemented, ClientConn.Connect will cause // all SubConns to connect. // -// Notice: it will be required for all balancers to implement this in a future -// release. +// Deprecated: All balancers must implement this interface. This interface will +// be removed in a future release. type ExitIdler interface { // ExitIdle instructs the LB policy to reconnect to backends / exit the // IDLE state, if appropriate and possible. Note that SubConns that enter diff --git a/vendor/google.golang.org/grpc/balancer/base/balancer.go b/vendor/google.golang.org/grpc/balancer/base/balancer.go index d5ed172a..4d576876 100644 --- a/vendor/google.golang.org/grpc/balancer/base/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/base/balancer.go @@ -41,7 +41,7 @@ func (bb *baseBuilder) Build(cc balancer.ClientConn, _ balancer.BuildOptions) ba cc: cc, pickerBuilder: bb.pickerBuilder, - subConns: resolver.NewAddressMap(), + subConns: resolver.NewAddressMapV2[balancer.SubConn](), scStates: make(map[balancer.SubConn]connectivity.State), csEvltr: &balancer.ConnectivityStateEvaluator{}, config: bb.config, @@ -65,7 +65,7 @@ type baseBalancer struct { csEvltr *balancer.ConnectivityStateEvaluator state connectivity.State - subConns *resolver.AddressMap + subConns *resolver.AddressMapV2[balancer.SubConn] scStates map[balancer.SubConn]connectivity.State picker balancer.Picker config Config @@ -100,7 +100,7 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { // Successful resolution; clear resolver error and ensure we return nil. b.resolverErr = nil // addrsSet is the set converted from addrs, it's used for quick lookup of an address. - addrsSet := resolver.NewAddressMap() + addrsSet := resolver.NewAddressMapV2[any]() for _, a := range s.ResolverState.Addresses { addrsSet.Set(a, nil) if _, ok := b.subConns.Get(a); !ok { @@ -122,8 +122,7 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { } } for _, a := range b.subConns.Keys() { - sci, _ := b.subConns.Get(a) - sc := sci.(balancer.SubConn) + sc, _ := b.subConns.Get(a) // a was removed by resolver. if _, ok := addrsSet.Get(a); !ok { sc.Shutdown() @@ -173,8 +172,7 @@ func (b *baseBalancer) regeneratePicker() { // Filter out all ready SCs from full subConn map. for _, addr := range b.subConns.Keys() { - sci, _ := b.subConns.Get(addr) - sc := sci.(balancer.SubConn) + sc, _ := b.subConns.Get(addr) if st, ok := b.scStates[sc]; ok && st == connectivity.Ready { readySCs[sc] = SubConnInfo{Address: addr} } diff --git a/vendor/google.golang.org/grpc/balancer/endpointsharding/endpointsharding.go b/vendor/google.golang.org/grpc/balancer/endpointsharding/endpointsharding.go index 421c4fec..360db08e 100644 --- a/vendor/google.golang.org/grpc/balancer/endpointsharding/endpointsharding.go +++ b/vendor/google.golang.org/grpc/balancer/endpointsharding/endpointsharding.go @@ -37,6 +37,8 @@ import ( "google.golang.org/grpc/resolver" ) +var randIntN = rand.IntN + // ChildState is the balancer state of a child along with the endpoint which // identifies the child balancer. type ChildState struct { @@ -45,7 +47,15 @@ type ChildState struct { // Balancer exposes only the ExitIdler interface of the child LB policy. // Other methods of the child policy are called only by endpointsharding. - Balancer balancer.ExitIdler + Balancer ExitIdler +} + +// ExitIdler provides access to only the ExitIdle method of the child balancer. +type ExitIdler interface { + // ExitIdle instructs the LB policy to reconnect to backends / exit the + // IDLE state, if appropriate and possible. Note that SubConns that enter + // the IDLE state will not reconnect until SubConn.Connect is called. + ExitIdle() } // Options are the options to configure the behaviour of the @@ -73,7 +83,7 @@ func NewBalancer(cc balancer.ClientConn, opts balancer.BuildOptions, childBuilde esOpts: esOpts, childBuilder: childBuilder, } - es.children.Store(resolver.NewEndpointMap()) + es.children.Store(resolver.NewEndpointMap[*balancerWrapper]()) return es } @@ -90,7 +100,7 @@ type endpointSharding struct { // calls into a child. To avoid deadlocks, do not acquire childMu while // holding mu. childMu sync.Mutex - children atomic.Pointer[resolver.EndpointMap] // endpoint -> *balancerWrapper + children atomic.Pointer[resolver.EndpointMap[*balancerWrapper]] // inhibitChildUpdates is set during UpdateClientConnState/ResolverError // calls (calls to children will each produce an update, only want one @@ -104,6 +114,21 @@ type endpointSharding struct { mu sync.Mutex } +// rotateEndpoints returns a slice of all the input endpoints rotated a random +// amount. +func rotateEndpoints(es []resolver.Endpoint) []resolver.Endpoint { + les := len(es) + if les == 0 { + return es + } + r := randIntN(les) + // Make a copy to avoid mutating data beyond the end of es. + ret := make([]resolver.Endpoint, les) + copy(ret, es[r:]) + copy(ret[les-r:], es[:r]) + return ret +} + // UpdateClientConnState creates a child for new endpoints and deletes children // for endpoints that are no longer present. It also updates all the children, // and sends a single synchronous update of the childrens' aggregated state at @@ -122,18 +147,17 @@ func (es *endpointSharding) UpdateClientConnState(state balancer.ClientConnState var ret error children := es.children.Load() - newChildren := resolver.NewEndpointMap() + newChildren := resolver.NewEndpointMap[*balancerWrapper]() // Update/Create new children. - for _, endpoint := range state.ResolverState.Endpoints { + for _, endpoint := range rotateEndpoints(state.ResolverState.Endpoints) { if _, ok := newChildren.Get(endpoint); ok { // Endpoint child was already created, continue to avoid duplicate // update. continue } - var childBalancer *balancerWrapper - if val, ok := children.Get(endpoint); ok { - childBalancer = val.(*balancerWrapper) + childBalancer, ok := children.Get(endpoint) + if ok { // Endpoint attributes may have changed, update the stored endpoint. es.mu.Lock() childBalancer.childState.Endpoint = endpoint @@ -166,7 +190,7 @@ func (es *endpointSharding) UpdateClientConnState(state balancer.ClientConnState for _, e := range children.Keys() { child, _ := children.Get(e) if _, ok := newChildren.Get(e); !ok { - child.(*balancerWrapper).closeLocked() + child.closeLocked() } } es.children.Store(newChildren) @@ -189,7 +213,7 @@ func (es *endpointSharding) ResolverError(err error) { }() children := es.children.Load() for _, child := range children.Values() { - child.(*balancerWrapper).resolverErrorLocked(err) + child.resolverErrorLocked(err) } } @@ -202,7 +226,17 @@ func (es *endpointSharding) Close() { defer es.childMu.Unlock() children := es.children.Load() for _, child := range children.Values() { - child.(*balancerWrapper).closeLocked() + child.closeLocked() + } +} + +func (es *endpointSharding) ExitIdle() { + es.childMu.Lock() + defer es.childMu.Unlock() + for _, bw := range es.children.Load().Values() { + if !bw.isClosed { + bw.child.ExitIdle() + } } } @@ -222,8 +256,7 @@ func (es *endpointSharding) updateState() { childStates := make([]ChildState, 0, children.Len()) for _, child := range children.Values() { - bw := child.(*balancerWrapper) - childState := bw.childState + childState := child.childState childStates = append(childStates, childState) childPicker := childState.State.Picker switch childState.State.ConnectivityState { @@ -263,7 +296,7 @@ func (es *endpointSharding) updateState() { p := &pickerWithChildStates{ pickers: pickers, childStates: childStates, - next: uint32(rand.IntN(len(pickers))), + next: uint32(randIntN(len(pickers))), } es.cc.UpdateState(balancer.State{ ConnectivityState: aggState, @@ -328,15 +361,13 @@ func (bw *balancerWrapper) UpdateState(state balancer.State) { // ExitIdle pings an IDLE child balancer to exit idle in a new goroutine to // avoid deadlocks due to synchronous balancer state updates. func (bw *balancerWrapper) ExitIdle() { - if ei, ok := bw.child.(balancer.ExitIdler); ok { - go func() { - bw.es.childMu.Lock() - if !bw.isClosed { - ei.ExitIdle() - } - bw.es.childMu.Unlock() - }() - } + go func() { + bw.es.childMu.Lock() + if !bw.isClosed { + bw.child.ExitIdle() + } + bw.es.childMu.Unlock() + }() } // updateClientConnStateLocked delivers the ClientConnState to the child diff --git a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go index ea889981..b4bc3a2b 100644 --- a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go +++ b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go @@ -16,55 +16,124 @@ * */ -// Package pickfirst contains the pick_first load balancing policy. +// Package pickfirst contains the pick_first load balancing policy which +// is the universal leaf policy. package pickfirst import ( "encoding/json" "errors" "fmt" - rand "math/rand/v2" + "net" + "net/netip" + "sync" + "time" "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/pickfirst/internal" "google.golang.org/grpc/connectivity" + expstats "google.golang.org/grpc/experimental/stats" "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/internal/envconfig" internalgrpclog "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" - - _ "google.golang.org/grpc/balancer/pickfirst/pickfirstleaf" // For automatically registering the new pickfirst if required. ) func init() { - if envconfig.NewPickFirstEnabled { - return - } balancer.Register(pickfirstBuilder{}) } -var logger = grpclog.Component("pick-first-lb") +// Name is the name of the pick_first balancer. +const Name = "pick_first" + +// enableHealthListenerKeyType is a unique key type used in resolver +// attributes to indicate whether the health listener usage is enabled. +type enableHealthListenerKeyType struct{} + +var ( + logger = grpclog.Component("pick-first-leaf-lb") + disconnectionsMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{ + Name: "grpc.lb.pick_first.disconnections", + Description: "EXPERIMENTAL. Number of times the selected subchannel becomes disconnected.", + Unit: "{disconnection}", + Labels: []string{"grpc.target"}, + Default: false, + }) + connectionAttemptsSucceededMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{ + Name: "grpc.lb.pick_first.connection_attempts_succeeded", + Description: "EXPERIMENTAL. Number of successful connection attempts.", + Unit: "{attempt}", + Labels: []string{"grpc.target"}, + Default: false, + }) + connectionAttemptsFailedMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{ + Name: "grpc.lb.pick_first.connection_attempts_failed", + Description: "EXPERIMENTAL. Number of failed connection attempts.", + Unit: "{attempt}", + Labels: []string{"grpc.target"}, + Default: false, + }) +) const ( - // Name is the name of the pick_first balancer. - Name = "pick_first" - logPrefix = "[pick-first-lb %p] " + // TODO: change to pick-first when this becomes the default pick_first policy. + logPrefix = "[pick-first-leaf-lb %p] " + // connectionDelayInterval is the time to wait for during the happy eyeballs + // pass before starting the next connection attempt. + connectionDelayInterval = 250 * time.Millisecond +) + +type ipAddrFamily int + +const ( + // ipAddrFamilyUnknown represents strings that can't be parsed as an IP + // address. + ipAddrFamilyUnknown ipAddrFamily = iota + ipAddrFamilyV4 + ipAddrFamilyV6 ) type pickfirstBuilder struct{} -func (pickfirstBuilder) Build(cc balancer.ClientConn, _ balancer.BuildOptions) balancer.Balancer { - b := &pickfirstBalancer{cc: cc} +func (pickfirstBuilder) Build(cc balancer.ClientConn, bo balancer.BuildOptions) balancer.Balancer { + b := &pickfirstBalancer{ + cc: cc, + target: bo.Target.String(), + metricsRecorder: cc.MetricsRecorder(), + + subConns: resolver.NewAddressMapV2[*scData](), + state: connectivity.Connecting, + cancelConnectionTimer: func() {}, + } b.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(logPrefix, b)) return b } -func (pickfirstBuilder) Name() string { +func (b pickfirstBuilder) Name() string { return Name } +func (pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + var cfg pfConfig + if err := json.Unmarshal(js, &cfg); err != nil { + return nil, fmt.Errorf("pickfirst: unable to unmarshal LB policy config: %s, error: %v", string(js), err) + } + return cfg, nil +} + +// EnableHealthListener updates the state to configure pickfirst for using a +// generic health listener. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a later +// release. +func EnableHealthListener(state resolver.State) resolver.State { + state.Attributes = state.Attributes.WithValue(enableHealthListenerKeyType{}, true) + return state +} + type pfConfig struct { serviceconfig.LoadBalancingConfig `json:"-"` @@ -74,90 +143,129 @@ type pfConfig struct { ShuffleAddressList bool `json:"shuffleAddressList"` } -func (pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { - var cfg pfConfig - if err := json.Unmarshal(js, &cfg); err != nil { - return nil, fmt.Errorf("pickfirst: unable to unmarshal LB policy config: %s, error: %v", string(js), err) +// scData keeps track of the current state of the subConn. +// It is not safe for concurrent access. +type scData struct { + // The following fields are initialized at build time and read-only after + // that. + subConn balancer.SubConn + addr resolver.Address + + rawConnectivityState connectivity.State + // The effective connectivity state based on raw connectivity, health state + // and after following sticky TransientFailure behaviour defined in A62. + effectiveState connectivity.State + lastErr error + connectionFailedInFirstPass bool +} + +func (b *pickfirstBalancer) newSCData(addr resolver.Address) (*scData, error) { + sd := &scData{ + rawConnectivityState: connectivity.Idle, + effectiveState: connectivity.Idle, + addr: addr, } - return cfg, nil + sc, err := b.cc.NewSubConn([]resolver.Address{addr}, balancer.NewSubConnOptions{ + StateListener: func(state balancer.SubConnState) { + b.updateSubConnState(sd, state) + }, + }) + if err != nil { + return nil, err + } + sd.subConn = sc + return sd, nil } type pickfirstBalancer struct { - logger *internalgrpclog.PrefixLogger - state connectivity.State - cc balancer.ClientConn - subConn balancer.SubConn + // The following fields are initialized at build time and read-only after + // that and therefore do not need to be guarded by a mutex. + logger *internalgrpclog.PrefixLogger + cc balancer.ClientConn + target string + metricsRecorder expstats.MetricsRecorder // guaranteed to be non nil + + // The mutex is used to ensure synchronization of updates triggered + // from the idle picker and the already serialized resolver, + // SubConn state updates. + mu sync.Mutex + // State reported to the channel based on SubConn states and resolver + // updates. + state connectivity.State + // scData for active subonns mapped by address. + subConns *resolver.AddressMapV2[*scData] + addressList addressList + firstPass bool + numTF int + cancelConnectionTimer func() + healthCheckingEnabled bool } +// ResolverError is called by the ClientConn when the name resolver produces +// an error or when pickfirst determined the resolver update to be invalid. func (b *pickfirstBalancer) ResolverError(err error) { + b.mu.Lock() + defer b.mu.Unlock() + b.resolverErrorLocked(err) +} + +func (b *pickfirstBalancer) resolverErrorLocked(err error) { if b.logger.V(2) { b.logger.Infof("Received error from the name resolver: %v", err) } - if b.subConn == nil { - b.state = connectivity.TransientFailure - } - if b.state != connectivity.TransientFailure { - // The picker will not change since the balancer does not currently - // report an error. + // The picker will not change since the balancer does not currently + // report an error. If the balancer hasn't received a single good resolver + // update yet, transition to TRANSIENT_FAILURE. + if b.state != connectivity.TransientFailure && b.addressList.size() > 0 { + if b.logger.V(2) { + b.logger.Infof("Ignoring resolver error because balancer is using a previous good update.") + } return } - b.cc.UpdateState(balancer.State{ + + b.updateBalancerState(balancer.State{ ConnectivityState: connectivity.TransientFailure, Picker: &picker{err: fmt.Errorf("name resolver error: %v", err)}, }) } -// Shuffler is an interface for shuffling an address list. -type Shuffler interface { - ShuffleAddressListForTesting(n int, swap func(i, j int)) -} - -// ShuffleAddressListForTesting pseudo-randomizes the order of addresses. n -// is the number of elements. swap swaps the elements with indexes i and j. -func ShuffleAddressListForTesting(n int, swap func(i, j int)) { rand.Shuffle(n, swap) } - func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error { + b.mu.Lock() + defer b.mu.Unlock() + b.cancelConnectionTimer() if len(state.ResolverState.Addresses) == 0 && len(state.ResolverState.Endpoints) == 0 { - // The resolver reported an empty address list. Treat it like an error by - // calling b.ResolverError. - if b.subConn != nil { - // Shut down the old subConn. All addresses were removed, so it is - // no longer valid. - b.subConn.Shutdown() - b.subConn = nil - } - b.ResolverError(errors.New("produced zero addresses")) + // Cleanup state pertaining to the previous resolver state. + // Treat an empty address list like an error by calling b.ResolverError. + b.closeSubConnsLocked() + b.addressList.updateAddrs(nil) + b.resolverErrorLocked(errors.New("produced zero addresses")) return balancer.ErrBadResolverState } - // We don't have to guard this block with the env var because ParseConfig - // already does so. + b.healthCheckingEnabled = state.ResolverState.Attributes.Value(enableHealthListenerKeyType{}) != nil cfg, ok := state.BalancerConfig.(pfConfig) if state.BalancerConfig != nil && !ok { - return fmt.Errorf("pickfirst: received illegal BalancerConfig (type %T): %v", state.BalancerConfig, state.BalancerConfig) + return fmt.Errorf("pickfirst: received illegal BalancerConfig (type %T): %v: %w", state.BalancerConfig, state.BalancerConfig, balancer.ErrBadResolverState) } if b.logger.V(2) { b.logger.Infof("Received new config %s, resolver state %s", pretty.ToJSON(cfg), pretty.ToJSON(state.ResolverState)) } - var addrs []resolver.Address + var newAddrs []resolver.Address if endpoints := state.ResolverState.Endpoints; len(endpoints) != 0 { - // Perform the optional shuffling described in gRFC A62. The shuffling will - // change the order of endpoints but not touch the order of the addresses - // within each endpoint. - A61 + // Perform the optional shuffling described in gRFC A62. The shuffling + // will change the order of endpoints but not touch the order of the + // addresses within each endpoint. - A61 if cfg.ShuffleAddressList { endpoints = append([]resolver.Endpoint{}, endpoints...) internal.RandShuffle(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] }) } - // "Flatten the list by concatenating the ordered list of addresses for each - // of the endpoints, in order." - A61 + // "Flatten the list by concatenating the ordered list of addresses for + // each of the endpoints, in order." - A61 for _, endpoint := range endpoints { - // "In the flattened list, interleave addresses from the two address - // families, as per RFC-8304 section 4." - A61 - // TODO: support the above language. - addrs = append(addrs, endpoint.Addresses...) + newAddrs = append(newAddrs, endpoint.Addresses...) } } else { // Endpoints not set, process addresses until we migrate resolver @@ -166,42 +274,53 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState // target do not forward the corresponding correct endpoints down/split // endpoints properly. Once all balancers correctly forward endpoints // down, can delete this else conditional. - addrs = state.ResolverState.Addresses + newAddrs = state.ResolverState.Addresses if cfg.ShuffleAddressList { - addrs = append([]resolver.Address{}, addrs...) - rand.Shuffle(len(addrs), func(i, j int) { addrs[i], addrs[j] = addrs[j], addrs[i] }) + newAddrs = append([]resolver.Address{}, newAddrs...) + internal.RandShuffle(len(newAddrs), func(i, j int) { newAddrs[i], newAddrs[j] = newAddrs[j], newAddrs[i] }) } } - if b.subConn != nil { - b.cc.UpdateAddresses(b.subConn, addrs) + // If an address appears in multiple endpoints or in the same endpoint + // multiple times, we keep it only once. We will create only one SubConn + // for the address because an AddressMap is used to store SubConns. + // Not de-duplicating would result in attempting to connect to the same + // SubConn multiple times in the same pass. We don't want this. + newAddrs = deDupAddresses(newAddrs) + newAddrs = interleaveAddresses(newAddrs) + + prevAddr := b.addressList.currentAddress() + prevSCData, found := b.subConns.Get(prevAddr) + prevAddrsCount := b.addressList.size() + isPrevRawConnectivityStateReady := found && prevSCData.rawConnectivityState == connectivity.Ready + b.addressList.updateAddrs(newAddrs) + + // If the previous ready SubConn exists in new address list, + // keep this connection and don't create new SubConns. + if isPrevRawConnectivityStateReady && b.addressList.seekTo(prevAddr) { return nil } - var subConn balancer.SubConn - subConn, err := b.cc.NewSubConn(addrs, balancer.NewSubConnOptions{ - StateListener: func(state balancer.SubConnState) { - b.updateSubConnState(subConn, state) - }, - }) - if err != nil { - if b.logger.V(2) { - b.logger.Infof("Failed to create new SubConn: %v", err) - } - b.state = connectivity.TransientFailure - b.cc.UpdateState(balancer.State{ - ConnectivityState: connectivity.TransientFailure, - Picker: &picker{err: fmt.Errorf("error creating connection: %v", err)}, + b.reconcileSubConnsLocked(newAddrs) + // If it's the first resolver update or the balancer was already READY + // (but the new address list does not contain the ready SubConn) or + // CONNECTING, enter CONNECTING. + // We may be in TRANSIENT_FAILURE due to a previous empty address list, + // we should still enter CONNECTING because the sticky TF behaviour + // mentioned in A62 applies only when the TRANSIENT_FAILURE is reported + // due to connectivity failures. + if isPrevRawConnectivityStateReady || b.state == connectivity.Connecting || prevAddrsCount == 0 { + // Start connection attempt at first address. + b.forceUpdateConcludedStateLocked(balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: &picker{err: balancer.ErrNoSubConnAvailable}, }) - return balancer.ErrBadResolverState + b.startFirstPassLocked() + } else if b.state == connectivity.TransientFailure { + // If we're in TRANSIENT_FAILURE, we stay in TRANSIENT_FAILURE until + // we're READY. See A62. + b.startFirstPassLocked() } - b.subConn = subConn - b.state = connectivity.Idle - b.cc.UpdateState(balancer.State{ - ConnectivityState: connectivity.Connecting, - Picker: &picker{err: balancer.ErrNoSubConnAvailable}, - }) - b.subConn.Connect() return nil } @@ -211,63 +330,484 @@ func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state b b.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", subConn, state) } -func (b *pickfirstBalancer) updateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) { - if b.logger.V(2) { - b.logger.Infof("Received SubConn state update: %p, %+v", subConn, state) +func (b *pickfirstBalancer) Close() { + b.mu.Lock() + defer b.mu.Unlock() + b.closeSubConnsLocked() + b.cancelConnectionTimer() + b.state = connectivity.Shutdown +} + +// ExitIdle moves the balancer out of idle state. It can be called concurrently +// by the idlePicker and clientConn so access to variables should be +// synchronized. +func (b *pickfirstBalancer) ExitIdle() { + b.mu.Lock() + defer b.mu.Unlock() + if b.state == connectivity.Idle { + // Move the balancer into CONNECTING state immediately. This is done to + // avoid staying in IDLE if a resolver update arrives before the first + // SubConn reports CONNECTING. + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: &picker{err: balancer.ErrNoSubConnAvailable}, + }) + b.startFirstPassLocked() + } +} + +func (b *pickfirstBalancer) startFirstPassLocked() { + b.firstPass = true + b.numTF = 0 + // Reset the connection attempt record for existing SubConns. + for _, sd := range b.subConns.Values() { + sd.connectionFailedInFirstPass = false + } + b.requestConnectionLocked() +} + +func (b *pickfirstBalancer) closeSubConnsLocked() { + for _, sd := range b.subConns.Values() { + sd.subConn.Shutdown() + } + b.subConns = resolver.NewAddressMapV2[*scData]() +} + +// deDupAddresses ensures that each address appears only once in the slice. +func deDupAddresses(addrs []resolver.Address) []resolver.Address { + seenAddrs := resolver.NewAddressMapV2[bool]() + retAddrs := []resolver.Address{} + + for _, addr := range addrs { + if _, ok := seenAddrs.Get(addr); ok { + continue + } + seenAddrs.Set(addr, true) + retAddrs = append(retAddrs, addr) + } + return retAddrs +} + +// interleaveAddresses interleaves addresses of both families (IPv4 and IPv6) +// as per RFC-8305 section 4. +// Whichever address family is first in the list is followed by an address of +// the other address family; that is, if the first address in the list is IPv6, +// then the first IPv4 address should be moved up in the list to be second in +// the list. It doesn't support configuring "First Address Family Count", i.e. +// there will always be a single member of the first address family at the +// beginning of the interleaved list. +// Addresses that are neither IPv4 nor IPv6 are treated as part of a third +// "unknown" family for interleaving. +// See: https://datatracker.ietf.org/doc/html/rfc8305#autoid-6 +func interleaveAddresses(addrs []resolver.Address) []resolver.Address { + familyAddrsMap := map[ipAddrFamily][]resolver.Address{} + interleavingOrder := []ipAddrFamily{} + for _, addr := range addrs { + family := addressFamily(addr.Addr) + if _, found := familyAddrsMap[family]; !found { + interleavingOrder = append(interleavingOrder, family) + } + familyAddrsMap[family] = append(familyAddrsMap[family], addr) + } + + interleavedAddrs := make([]resolver.Address, 0, len(addrs)) + + for curFamilyIdx := 0; len(interleavedAddrs) < len(addrs); curFamilyIdx = (curFamilyIdx + 1) % len(interleavingOrder) { + // Some IP types may have fewer addresses than others, so we look for + // the next type that has a remaining member to add to the interleaved + // list. + family := interleavingOrder[curFamilyIdx] + remainingMembers := familyAddrsMap[family] + if len(remainingMembers) > 0 { + interleavedAddrs = append(interleavedAddrs, remainingMembers[0]) + familyAddrsMap[family] = remainingMembers[1:] + } + } + + return interleavedAddrs +} + +// addressFamily returns the ipAddrFamily after parsing the address string. +// If the address isn't of the format "ip-address:port", it returns +// ipAddrFamilyUnknown. The address may be valid even if it's not an IP when +// using a resolver like passthrough where the address may be a hostname in +// some format that the dialer can resolve. +func addressFamily(address string) ipAddrFamily { + // Parse the IP after removing the port. + host, _, err := net.SplitHostPort(address) + if err != nil { + return ipAddrFamilyUnknown + } + ip, err := netip.ParseAddr(host) + if err != nil { + return ipAddrFamilyUnknown + } + switch { + case ip.Is4() || ip.Is4In6(): + return ipAddrFamilyV4 + case ip.Is6(): + return ipAddrFamilyV6 + default: + return ipAddrFamilyUnknown + } +} + +// reconcileSubConnsLocked updates the active subchannels based on a new address +// list from the resolver. It does this by: +// - closing subchannels: any existing subchannels associated with addresses +// that are no longer in the updated list are shut down. +// - removing subchannels: entries for these closed subchannels are removed +// from the subchannel map. +// +// This ensures that the subchannel map accurately reflects the current set of +// addresses received from the name resolver. +func (b *pickfirstBalancer) reconcileSubConnsLocked(newAddrs []resolver.Address) { + newAddrsMap := resolver.NewAddressMapV2[bool]() + for _, addr := range newAddrs { + newAddrsMap.Set(addr, true) + } + + for _, oldAddr := range b.subConns.Keys() { + if _, ok := newAddrsMap.Get(oldAddr); ok { + continue + } + val, _ := b.subConns.Get(oldAddr) + val.subConn.Shutdown() + b.subConns.Delete(oldAddr) + } +} + +// shutdownRemainingLocked shuts down remaining subConns. Called when a subConn +// becomes ready, which means that all other subConn must be shutdown. +func (b *pickfirstBalancer) shutdownRemainingLocked(selected *scData) { + b.cancelConnectionTimer() + for _, sd := range b.subConns.Values() { + if sd.subConn != selected.subConn { + sd.subConn.Shutdown() + } + } + b.subConns = resolver.NewAddressMapV2[*scData]() + b.subConns.Set(selected.addr, selected) +} + +// requestConnectionLocked starts connecting on the subchannel corresponding to +// the current address. If no subchannel exists, one is created. If the current +// subchannel is in TransientFailure, a connection to the next address is +// attempted until a subchannel is found. +func (b *pickfirstBalancer) requestConnectionLocked() { + if !b.addressList.isValid() { + return + } + var lastErr error + for valid := true; valid; valid = b.addressList.increment() { + curAddr := b.addressList.currentAddress() + sd, ok := b.subConns.Get(curAddr) + if !ok { + var err error + // We want to assign the new scData to sd from the outer scope, + // hence we can't use := below. + sd, err = b.newSCData(curAddr) + if err != nil { + // This should never happen, unless the clientConn is being shut + // down. + if b.logger.V(2) { + b.logger.Infof("Failed to create a subConn for address %v: %v", curAddr.String(), err) + } + // Do nothing, the LB policy will be closed soon. + return + } + b.subConns.Set(curAddr, sd) + } + + switch sd.rawConnectivityState { + case connectivity.Idle: + sd.subConn.Connect() + b.scheduleNextConnectionLocked() + return + case connectivity.TransientFailure: + // The SubConn is being re-used and failed during a previous pass + // over the addressList. It has not completed backoff yet. + // Mark it as having failed and try the next address. + sd.connectionFailedInFirstPass = true + lastErr = sd.lastErr + continue + case connectivity.Connecting: + // Wait for the connection attempt to complete or the timer to fire + // before attempting the next address. + b.scheduleNextConnectionLocked() + return + default: + b.logger.Errorf("SubConn with unexpected state %v present in SubConns map.", sd.rawConnectivityState) + return + + } + } + + // All the remaining addresses in the list are in TRANSIENT_FAILURE, end the + // first pass if possible. + b.endFirstPassIfPossibleLocked(lastErr) +} + +func (b *pickfirstBalancer) scheduleNextConnectionLocked() { + b.cancelConnectionTimer() + if !b.addressList.hasNext() { + return } - if b.subConn != subConn { + curAddr := b.addressList.currentAddress() + cancelled := false // Access to this is protected by the balancer's mutex. + closeFn := internal.TimeAfterFunc(connectionDelayInterval, func() { + b.mu.Lock() + defer b.mu.Unlock() + // If the scheduled task is cancelled while acquiring the mutex, return. + if cancelled { + return + } if b.logger.V(2) { - b.logger.Infof("Ignored state change because subConn is not recognized") + b.logger.Infof("Happy Eyeballs timer expired while waiting for connection to %q.", curAddr.Addr) + } + if b.addressList.increment() { + b.requestConnectionLocked() } + }) + // Access to the cancellation callback held by the balancer is guarded by + // the balancer's mutex, so it's safe to set the boolean from the callback. + b.cancelConnectionTimer = sync.OnceFunc(func() { + cancelled = true + closeFn() + }) +} + +func (b *pickfirstBalancer) updateSubConnState(sd *scData, newState balancer.SubConnState) { + b.mu.Lock() + defer b.mu.Unlock() + oldState := sd.rawConnectivityState + sd.rawConnectivityState = newState.ConnectivityState + // Previously relevant SubConns can still callback with state updates. + // To prevent pickers from returning these obsolete SubConns, this logic + // is included to check if the current list of active SubConns includes this + // SubConn. + if !b.isActiveSCData(sd) { return } - if state.ConnectivityState == connectivity.Shutdown { - b.subConn = nil + if newState.ConnectivityState == connectivity.Shutdown { + sd.effectiveState = connectivity.Shutdown return } - switch state.ConnectivityState { - case connectivity.Ready: - b.cc.UpdateState(balancer.State{ - ConnectivityState: state.ConnectivityState, - Picker: &picker{result: balancer.PickResult{SubConn: subConn}}, - }) - case connectivity.Connecting: - if b.state == connectivity.TransientFailure { - // We stay in TransientFailure until we are Ready. See A62. + // Record a connection attempt when exiting CONNECTING. + if newState.ConnectivityState == connectivity.TransientFailure { + sd.connectionFailedInFirstPass = true + connectionAttemptsFailedMetric.Record(b.metricsRecorder, 1, b.target) + } + + if newState.ConnectivityState == connectivity.Ready { + connectionAttemptsSucceededMetric.Record(b.metricsRecorder, 1, b.target) + b.shutdownRemainingLocked(sd) + if !b.addressList.seekTo(sd.addr) { + // This should not fail as we should have only one SubConn after + // entering READY. The SubConn should be present in the addressList. + b.logger.Errorf("Address %q not found address list in %v", sd.addr, b.addressList.addresses) return } - b.cc.UpdateState(balancer.State{ - ConnectivityState: state.ConnectivityState, + if !b.healthCheckingEnabled { + if b.logger.V(2) { + b.logger.Infof("SubConn %p reported connectivity state READY and the health listener is disabled. Transitioning SubConn to READY.", sd.subConn) + } + + sd.effectiveState = connectivity.Ready + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.Ready, + Picker: &picker{result: balancer.PickResult{SubConn: sd.subConn}}, + }) + return + } + if b.logger.V(2) { + b.logger.Infof("SubConn %p reported connectivity state READY. Registering health listener.", sd.subConn) + } + // Send a CONNECTING update to take the SubConn out of sticky-TF if + // required. + sd.effectiveState = connectivity.Connecting + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.Connecting, Picker: &picker{err: balancer.ErrNoSubConnAvailable}, }) + sd.subConn.RegisterHealthListener(func(scs balancer.SubConnState) { + b.updateSubConnHealthState(sd, scs) + }) + return + } + + // If the LB policy is READY, and it receives a subchannel state change, + // it means that the READY subchannel has failed. + // A SubConn can also transition from CONNECTING directly to IDLE when + // a transport is successfully created, but the connection fails + // before the SubConn can send the notification for READY. We treat + // this as a successful connection and transition to IDLE. + // TODO: https://github.com/grpc/grpc-go/issues/7862 - Remove the second + // part of the if condition below once the issue is fixed. + if oldState == connectivity.Ready || (oldState == connectivity.Connecting && newState.ConnectivityState == connectivity.Idle) { + // Once a transport fails, the balancer enters IDLE and starts from + // the first address when the picker is used. + b.shutdownRemainingLocked(sd) + sd.effectiveState = newState.ConnectivityState + // READY SubConn interspliced in between CONNECTING and IDLE, need to + // account for that. + if oldState == connectivity.Connecting { + // A known issue (https://github.com/grpc/grpc-go/issues/7862) + // causes a race that prevents the READY state change notification. + // This works around it. + connectionAttemptsSucceededMetric.Record(b.metricsRecorder, 1, b.target) + } + disconnectionsMetric.Record(b.metricsRecorder, 1, b.target) + b.addressList.reset() + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.Idle, + Picker: &idlePicker{exitIdle: sync.OnceFunc(b.ExitIdle)}, + }) + return + } + + if b.firstPass { + switch newState.ConnectivityState { + case connectivity.Connecting: + // The effective state can be in either IDLE, CONNECTING or + // TRANSIENT_FAILURE. If it's TRANSIENT_FAILURE, stay in + // TRANSIENT_FAILURE until it's READY. See A62. + if sd.effectiveState != connectivity.TransientFailure { + sd.effectiveState = connectivity.Connecting + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: &picker{err: balancer.ErrNoSubConnAvailable}, + }) + } + case connectivity.TransientFailure: + sd.lastErr = newState.ConnectionError + sd.effectiveState = connectivity.TransientFailure + // Since we're re-using common SubConns while handling resolver + // updates, we could receive an out of turn TRANSIENT_FAILURE from + // a pass over the previous address list. Happy Eyeballs will also + // cause out of order updates to arrive. + + if curAddr := b.addressList.currentAddress(); equalAddressIgnoringBalAttributes(&curAddr, &sd.addr) { + b.cancelConnectionTimer() + if b.addressList.increment() { + b.requestConnectionLocked() + return + } + } + + // End the first pass if we've seen a TRANSIENT_FAILURE from all + // SubConns once. + b.endFirstPassIfPossibleLocked(newState.ConnectionError) + } + return + } + + // We have finished the first pass, keep re-connecting failing SubConns. + switch newState.ConnectivityState { + case connectivity.TransientFailure: + b.numTF = (b.numTF + 1) % b.subConns.Len() + sd.lastErr = newState.ConnectionError + if b.numTF%b.subConns.Len() == 0 { + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: &picker{err: newState.ConnectionError}, + }) + } + // We don't need to request re-resolution since the SubConn already + // does that before reporting TRANSIENT_FAILURE. + // TODO: #7534 - Move re-resolution requests from SubConn into + // pick_first. case connectivity.Idle: - if b.state == connectivity.TransientFailure { - // We stay in TransientFailure until we are Ready. Also kick the - // subConn out of Idle into Connecting. See A62. - b.subConn.Connect() + sd.subConn.Connect() + } +} + +// endFirstPassIfPossibleLocked ends the first happy-eyeballs pass if all the +// addresses are tried and their SubConns have reported a failure. +func (b *pickfirstBalancer) endFirstPassIfPossibleLocked(lastErr error) { + // An optimization to avoid iterating over the entire SubConn map. + if b.addressList.isValid() { + return + } + // Connect() has been called on all the SubConns. The first pass can be + // ended if all the SubConns have reported a failure. + for _, sd := range b.subConns.Values() { + if !sd.connectionFailedInFirstPass { return } - b.cc.UpdateState(balancer.State{ - ConnectivityState: state.ConnectivityState, - Picker: &idlePicker{subConn: subConn}, + } + b.firstPass = false + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: &picker{err: lastErr}, + }) + // Start re-connecting all the SubConns that are already in IDLE. + for _, sd := range b.subConns.Values() { + if sd.rawConnectivityState == connectivity.Idle { + sd.subConn.Connect() + } + } +} + +func (b *pickfirstBalancer) isActiveSCData(sd *scData) bool { + activeSD, found := b.subConns.Get(sd.addr) + return found && activeSD == sd +} + +func (b *pickfirstBalancer) updateSubConnHealthState(sd *scData, state balancer.SubConnState) { + b.mu.Lock() + defer b.mu.Unlock() + // Previously relevant SubConns can still callback with state updates. + // To prevent pickers from returning these obsolete SubConns, this logic + // is included to check if the current list of active SubConns includes + // this SubConn. + if !b.isActiveSCData(sd) { + return + } + sd.effectiveState = state.ConnectivityState + switch state.ConnectivityState { + case connectivity.Ready: + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.Ready, + Picker: &picker{result: balancer.PickResult{SubConn: sd.subConn}}, }) case connectivity.TransientFailure: - b.cc.UpdateState(balancer.State{ - ConnectivityState: state.ConnectivityState, - Picker: &picker{err: state.ConnectionError}, + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: &picker{err: fmt.Errorf("pickfirst: health check failure: %v", state.ConnectionError)}, + }) + case connectivity.Connecting: + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: &picker{err: balancer.ErrNoSubConnAvailable}, }) + default: + b.logger.Errorf("Got unexpected health update for SubConn %p: %v", state) } - b.state = state.ConnectivityState } -func (b *pickfirstBalancer) Close() { +// updateBalancerState stores the state reported to the channel and calls +// ClientConn.UpdateState(). As an optimization, it avoids sending duplicate +// updates to the channel. +func (b *pickfirstBalancer) updateBalancerState(newState balancer.State) { + // In case of TransientFailures allow the picker to be updated to update + // the connectivity error, in all other cases don't send duplicate state + // updates. + if newState.ConnectivityState == b.state && b.state != connectivity.TransientFailure { + return + } + b.forceUpdateConcludedStateLocked(newState) } -func (b *pickfirstBalancer) ExitIdle() { - if b.subConn != nil && b.state == connectivity.Idle { - b.subConn.Connect() - } +// forceUpdateConcludedStateLocked stores the state reported to the channel and +// calls ClientConn.UpdateState(). +// A separate function is defined to force update the ClientConn state since the +// channel doesn't correctly assume that LB policies start in CONNECTING and +// relies on LB policy to send an initial CONNECTING update. +func (b *pickfirstBalancer) forceUpdateConcludedStateLocked(newState balancer.State) { + b.state = newState.ConnectivityState + b.cc.UpdateState(newState) } type picker struct { @@ -282,10 +822,87 @@ func (p *picker) Pick(balancer.PickInfo) (balancer.PickResult, error) { // idlePicker is used when the SubConn is IDLE and kicks the SubConn into // CONNECTING when Pick is called. type idlePicker struct { - subConn balancer.SubConn + exitIdle func() } func (i *idlePicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { - i.subConn.Connect() + i.exitIdle() return balancer.PickResult{}, balancer.ErrNoSubConnAvailable } + +// addressList manages sequentially iterating over addresses present in a list +// of endpoints. It provides a 1 dimensional view of the addresses present in +// the endpoints. +// This type is not safe for concurrent access. +type addressList struct { + addresses []resolver.Address + idx int +} + +func (al *addressList) isValid() bool { + return al.idx < len(al.addresses) +} + +func (al *addressList) size() int { + return len(al.addresses) +} + +// increment moves to the next index in the address list. +// This method returns false if it went off the list, true otherwise. +func (al *addressList) increment() bool { + if !al.isValid() { + return false + } + al.idx++ + return al.idx < len(al.addresses) +} + +// currentAddress returns the current address pointed to in the addressList. +// If the list is in an invalid state, it returns an empty address instead. +func (al *addressList) currentAddress() resolver.Address { + if !al.isValid() { + return resolver.Address{} + } + return al.addresses[al.idx] +} + +func (al *addressList) reset() { + al.idx = 0 +} + +func (al *addressList) updateAddrs(addrs []resolver.Address) { + al.addresses = addrs + al.reset() +} + +// seekTo returns false if the needle was not found and the current index was +// left unchanged. +func (al *addressList) seekTo(needle resolver.Address) bool { + for ai, addr := range al.addresses { + if !equalAddressIgnoringBalAttributes(&addr, &needle) { + continue + } + al.idx = ai + return true + } + return false +} + +// hasNext returns whether incrementing the addressList will result in moving +// past the end of the list. If the list has already moved past the end, it +// returns false. +func (al *addressList) hasNext() bool { + if !al.isValid() { + return false + } + return al.idx+1 < len(al.addresses) +} + +// equalAddressIgnoringBalAttributes returns true is a and b are considered +// equal. This is different from the Equal method on the resolver.Address type +// which considers all fields to determine equality. Here, we only consider +// fields that are meaningful to the SubConn. +func equalAddressIgnoringBalAttributes(a, b *resolver.Address) bool { + return a.Addr == b.Addr && a.ServerName == b.ServerName && + a.Attributes.Equal(b.Attributes) +} diff --git a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go deleted file mode 100644 index 113181e6..00000000 --- a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go +++ /dev/null @@ -1,932 +0,0 @@ -/* - * - * Copyright 2024 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package pickfirstleaf contains the pick_first load balancing policy which -// will be the universal leaf policy after dualstack changes are implemented. -// -// # Experimental -// -// Notice: This package is EXPERIMENTAL and may be changed or removed in a -// later release. -package pickfirstleaf - -import ( - "encoding/json" - "errors" - "fmt" - "net" - "net/netip" - "sync" - "time" - - "google.golang.org/grpc/balancer" - "google.golang.org/grpc/balancer/pickfirst/internal" - "google.golang.org/grpc/connectivity" - expstats "google.golang.org/grpc/experimental/stats" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/internal/envconfig" - internalgrpclog "google.golang.org/grpc/internal/grpclog" - "google.golang.org/grpc/internal/pretty" - "google.golang.org/grpc/resolver" - "google.golang.org/grpc/serviceconfig" -) - -func init() { - if envconfig.NewPickFirstEnabled { - // Register as the default pick_first balancer. - Name = "pick_first" - } - balancer.Register(pickfirstBuilder{}) -} - -type ( - // enableHealthListenerKeyType is a unique key type used in resolver - // attributes to indicate whether the health listener usage is enabled. - enableHealthListenerKeyType struct{} - // managedByPickfirstKeyType is an attribute key type to inform Outlier - // Detection that the generic health listener is being used. - // TODO: https://github.com/grpc/grpc-go/issues/7915 - Remove this when - // implementing the dualstack design. This is a hack. Once Dualstack is - // completed, outlier detection will stop sending ejection updates through - // the connectivity listener. - managedByPickfirstKeyType struct{} -) - -var ( - logger = grpclog.Component("pick-first-leaf-lb") - // Name is the name of the pick_first_leaf balancer. - // It is changed to "pick_first" in init() if this balancer is to be - // registered as the default pickfirst. - Name = "pick_first_leaf" - disconnectionsMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{ - Name: "grpc.lb.pick_first.disconnections", - Description: "EXPERIMENTAL. Number of times the selected subchannel becomes disconnected.", - Unit: "disconnection", - Labels: []string{"grpc.target"}, - Default: false, - }) - connectionAttemptsSucceededMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{ - Name: "grpc.lb.pick_first.connection_attempts_succeeded", - Description: "EXPERIMENTAL. Number of successful connection attempts.", - Unit: "attempt", - Labels: []string{"grpc.target"}, - Default: false, - }) - connectionAttemptsFailedMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{ - Name: "grpc.lb.pick_first.connection_attempts_failed", - Description: "EXPERIMENTAL. Number of failed connection attempts.", - Unit: "attempt", - Labels: []string{"grpc.target"}, - Default: false, - }) -) - -const ( - // TODO: change to pick-first when this becomes the default pick_first policy. - logPrefix = "[pick-first-leaf-lb %p] " - // connectionDelayInterval is the time to wait for during the happy eyeballs - // pass before starting the next connection attempt. - connectionDelayInterval = 250 * time.Millisecond -) - -type ipAddrFamily int - -const ( - // ipAddrFamilyUnknown represents strings that can't be parsed as an IP - // address. - ipAddrFamilyUnknown ipAddrFamily = iota - ipAddrFamilyV4 - ipAddrFamilyV6 -) - -type pickfirstBuilder struct{} - -func (pickfirstBuilder) Build(cc balancer.ClientConn, bo balancer.BuildOptions) balancer.Balancer { - b := &pickfirstBalancer{ - cc: cc, - target: bo.Target.String(), - metricsRecorder: cc.MetricsRecorder(), - - subConns: resolver.NewAddressMap(), - state: connectivity.Connecting, - cancelConnectionTimer: func() {}, - } - b.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(logPrefix, b)) - return b -} - -func (b pickfirstBuilder) Name() string { - return Name -} - -func (pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { - var cfg pfConfig - if err := json.Unmarshal(js, &cfg); err != nil { - return nil, fmt.Errorf("pickfirst: unable to unmarshal LB policy config: %s, error: %v", string(js), err) - } - return cfg, nil -} - -// EnableHealthListener updates the state to configure pickfirst for using a -// generic health listener. -func EnableHealthListener(state resolver.State) resolver.State { - state.Attributes = state.Attributes.WithValue(enableHealthListenerKeyType{}, true) - return state -} - -// IsManagedByPickfirst returns whether an address belongs to a SubConn -// managed by the pickfirst LB policy. -// TODO: https://github.com/grpc/grpc-go/issues/7915 - This is a hack to disable -// outlier_detection via the with connectivity listener when using pick_first. -// Once Dualstack changes are complete, all SubConns will be created by -// pick_first and outlier detection will only use the health listener for -// ejection. This hack can then be removed. -func IsManagedByPickfirst(addr resolver.Address) bool { - return addr.BalancerAttributes.Value(managedByPickfirstKeyType{}) != nil -} - -type pfConfig struct { - serviceconfig.LoadBalancingConfig `json:"-"` - - // If set to true, instructs the LB policy to shuffle the order of the list - // of endpoints received from the name resolver before attempting to - // connect to them. - ShuffleAddressList bool `json:"shuffleAddressList"` -} - -// scData keeps track of the current state of the subConn. -// It is not safe for concurrent access. -type scData struct { - // The following fields are initialized at build time and read-only after - // that. - subConn balancer.SubConn - addr resolver.Address - - rawConnectivityState connectivity.State - // The effective connectivity state based on raw connectivity, health state - // and after following sticky TransientFailure behaviour defined in A62. - effectiveState connectivity.State - lastErr error - connectionFailedInFirstPass bool -} - -func (b *pickfirstBalancer) newSCData(addr resolver.Address) (*scData, error) { - addr.BalancerAttributes = addr.BalancerAttributes.WithValue(managedByPickfirstKeyType{}, true) - sd := &scData{ - rawConnectivityState: connectivity.Idle, - effectiveState: connectivity.Idle, - addr: addr, - } - sc, err := b.cc.NewSubConn([]resolver.Address{addr}, balancer.NewSubConnOptions{ - StateListener: func(state balancer.SubConnState) { - b.updateSubConnState(sd, state) - }, - }) - if err != nil { - return nil, err - } - sd.subConn = sc - return sd, nil -} - -type pickfirstBalancer struct { - // The following fields are initialized at build time and read-only after - // that and therefore do not need to be guarded by a mutex. - logger *internalgrpclog.PrefixLogger - cc balancer.ClientConn - target string - metricsRecorder expstats.MetricsRecorder // guaranteed to be non nil - - // The mutex is used to ensure synchronization of updates triggered - // from the idle picker and the already serialized resolver, - // SubConn state updates. - mu sync.Mutex - // State reported to the channel based on SubConn states and resolver - // updates. - state connectivity.State - // scData for active subonns mapped by address. - subConns *resolver.AddressMap - addressList addressList - firstPass bool - numTF int - cancelConnectionTimer func() - healthCheckingEnabled bool -} - -// ResolverError is called by the ClientConn when the name resolver produces -// an error or when pickfirst determined the resolver update to be invalid. -func (b *pickfirstBalancer) ResolverError(err error) { - b.mu.Lock() - defer b.mu.Unlock() - b.resolverErrorLocked(err) -} - -func (b *pickfirstBalancer) resolverErrorLocked(err error) { - if b.logger.V(2) { - b.logger.Infof("Received error from the name resolver: %v", err) - } - - // The picker will not change since the balancer does not currently - // report an error. If the balancer hasn't received a single good resolver - // update yet, transition to TRANSIENT_FAILURE. - if b.state != connectivity.TransientFailure && b.addressList.size() > 0 { - if b.logger.V(2) { - b.logger.Infof("Ignoring resolver error because balancer is using a previous good update.") - } - return - } - - b.updateBalancerState(balancer.State{ - ConnectivityState: connectivity.TransientFailure, - Picker: &picker{err: fmt.Errorf("name resolver error: %v", err)}, - }) -} - -func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error { - b.mu.Lock() - defer b.mu.Unlock() - b.cancelConnectionTimer() - if len(state.ResolverState.Addresses) == 0 && len(state.ResolverState.Endpoints) == 0 { - // Cleanup state pertaining to the previous resolver state. - // Treat an empty address list like an error by calling b.ResolverError. - b.closeSubConnsLocked() - b.addressList.updateAddrs(nil) - b.resolverErrorLocked(errors.New("produced zero addresses")) - return balancer.ErrBadResolverState - } - b.healthCheckingEnabled = state.ResolverState.Attributes.Value(enableHealthListenerKeyType{}) != nil - cfg, ok := state.BalancerConfig.(pfConfig) - if state.BalancerConfig != nil && !ok { - return fmt.Errorf("pickfirst: received illegal BalancerConfig (type %T): %v: %w", state.BalancerConfig, state.BalancerConfig, balancer.ErrBadResolverState) - } - - if b.logger.V(2) { - b.logger.Infof("Received new config %s, resolver state %s", pretty.ToJSON(cfg), pretty.ToJSON(state.ResolverState)) - } - - var newAddrs []resolver.Address - if endpoints := state.ResolverState.Endpoints; len(endpoints) != 0 { - // Perform the optional shuffling described in gRFC A62. The shuffling - // will change the order of endpoints but not touch the order of the - // addresses within each endpoint. - A61 - if cfg.ShuffleAddressList { - endpoints = append([]resolver.Endpoint{}, endpoints...) - internal.RandShuffle(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] }) - } - - // "Flatten the list by concatenating the ordered list of addresses for - // each of the endpoints, in order." - A61 - for _, endpoint := range endpoints { - newAddrs = append(newAddrs, endpoint.Addresses...) - } - } else { - // Endpoints not set, process addresses until we migrate resolver - // emissions fully to Endpoints. The top channel does wrap emitted - // addresses with endpoints, however some balancers such as weighted - // target do not forward the corresponding correct endpoints down/split - // endpoints properly. Once all balancers correctly forward endpoints - // down, can delete this else conditional. - newAddrs = state.ResolverState.Addresses - if cfg.ShuffleAddressList { - newAddrs = append([]resolver.Address{}, newAddrs...) - internal.RandShuffle(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] }) - } - } - - // If an address appears in multiple endpoints or in the same endpoint - // multiple times, we keep it only once. We will create only one SubConn - // for the address because an AddressMap is used to store SubConns. - // Not de-duplicating would result in attempting to connect to the same - // SubConn multiple times in the same pass. We don't want this. - newAddrs = deDupAddresses(newAddrs) - newAddrs = interleaveAddresses(newAddrs) - - prevAddr := b.addressList.currentAddress() - prevSCData, found := b.subConns.Get(prevAddr) - prevAddrsCount := b.addressList.size() - isPrevRawConnectivityStateReady := found && prevSCData.(*scData).rawConnectivityState == connectivity.Ready - b.addressList.updateAddrs(newAddrs) - - // If the previous ready SubConn exists in new address list, - // keep this connection and don't create new SubConns. - if isPrevRawConnectivityStateReady && b.addressList.seekTo(prevAddr) { - return nil - } - - b.reconcileSubConnsLocked(newAddrs) - // If it's the first resolver update or the balancer was already READY - // (but the new address list does not contain the ready SubConn) or - // CONNECTING, enter CONNECTING. - // We may be in TRANSIENT_FAILURE due to a previous empty address list, - // we should still enter CONNECTING because the sticky TF behaviour - // mentioned in A62 applies only when the TRANSIENT_FAILURE is reported - // due to connectivity failures. - if isPrevRawConnectivityStateReady || b.state == connectivity.Connecting || prevAddrsCount == 0 { - // Start connection attempt at first address. - b.forceUpdateConcludedStateLocked(balancer.State{ - ConnectivityState: connectivity.Connecting, - Picker: &picker{err: balancer.ErrNoSubConnAvailable}, - }) - b.startFirstPassLocked() - } else if b.state == connectivity.TransientFailure { - // If we're in TRANSIENT_FAILURE, we stay in TRANSIENT_FAILURE until - // we're READY. See A62. - b.startFirstPassLocked() - } - return nil -} - -// UpdateSubConnState is unused as a StateListener is always registered when -// creating SubConns. -func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) { - b.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", subConn, state) -} - -func (b *pickfirstBalancer) Close() { - b.mu.Lock() - defer b.mu.Unlock() - b.closeSubConnsLocked() - b.cancelConnectionTimer() - b.state = connectivity.Shutdown -} - -// ExitIdle moves the balancer out of idle state. It can be called concurrently -// by the idlePicker and clientConn so access to variables should be -// synchronized. -func (b *pickfirstBalancer) ExitIdle() { - b.mu.Lock() - defer b.mu.Unlock() - if b.state == connectivity.Idle { - b.startFirstPassLocked() - } -} - -func (b *pickfirstBalancer) startFirstPassLocked() { - b.firstPass = true - b.numTF = 0 - // Reset the connection attempt record for existing SubConns. - for _, sd := range b.subConns.Values() { - sd.(*scData).connectionFailedInFirstPass = false - } - b.requestConnectionLocked() -} - -func (b *pickfirstBalancer) closeSubConnsLocked() { - for _, sd := range b.subConns.Values() { - sd.(*scData).subConn.Shutdown() - } - b.subConns = resolver.NewAddressMap() -} - -// deDupAddresses ensures that each address appears only once in the slice. -func deDupAddresses(addrs []resolver.Address) []resolver.Address { - seenAddrs := resolver.NewAddressMap() - retAddrs := []resolver.Address{} - - for _, addr := range addrs { - if _, ok := seenAddrs.Get(addr); ok { - continue - } - retAddrs = append(retAddrs, addr) - } - return retAddrs -} - -// interleaveAddresses interleaves addresses of both families (IPv4 and IPv6) -// as per RFC-8305 section 4. -// Whichever address family is first in the list is followed by an address of -// the other address family; that is, if the first address in the list is IPv6, -// then the first IPv4 address should be moved up in the list to be second in -// the list. It doesn't support configuring "First Address Family Count", i.e. -// there will always be a single member of the first address family at the -// beginning of the interleaved list. -// Addresses that are neither IPv4 nor IPv6 are treated as part of a third -// "unknown" family for interleaving. -// See: https://datatracker.ietf.org/doc/html/rfc8305#autoid-6 -func interleaveAddresses(addrs []resolver.Address) []resolver.Address { - familyAddrsMap := map[ipAddrFamily][]resolver.Address{} - interleavingOrder := []ipAddrFamily{} - for _, addr := range addrs { - family := addressFamily(addr.Addr) - if _, found := familyAddrsMap[family]; !found { - interleavingOrder = append(interleavingOrder, family) - } - familyAddrsMap[family] = append(familyAddrsMap[family], addr) - } - - interleavedAddrs := make([]resolver.Address, 0, len(addrs)) - - for curFamilyIdx := 0; len(interleavedAddrs) < len(addrs); curFamilyIdx = (curFamilyIdx + 1) % len(interleavingOrder) { - // Some IP types may have fewer addresses than others, so we look for - // the next type that has a remaining member to add to the interleaved - // list. - family := interleavingOrder[curFamilyIdx] - remainingMembers := familyAddrsMap[family] - if len(remainingMembers) > 0 { - interleavedAddrs = append(interleavedAddrs, remainingMembers[0]) - familyAddrsMap[family] = remainingMembers[1:] - } - } - - return interleavedAddrs -} - -// addressFamily returns the ipAddrFamily after parsing the address string. -// If the address isn't of the format "ip-address:port", it returns -// ipAddrFamilyUnknown. The address may be valid even if it's not an IP when -// using a resolver like passthrough where the address may be a hostname in -// some format that the dialer can resolve. -func addressFamily(address string) ipAddrFamily { - // Parse the IP after removing the port. - host, _, err := net.SplitHostPort(address) - if err != nil { - return ipAddrFamilyUnknown - } - ip, err := netip.ParseAddr(host) - if err != nil { - return ipAddrFamilyUnknown - } - switch { - case ip.Is4() || ip.Is4In6(): - return ipAddrFamilyV4 - case ip.Is6(): - return ipAddrFamilyV6 - default: - return ipAddrFamilyUnknown - } -} - -// reconcileSubConnsLocked updates the active subchannels based on a new address -// list from the resolver. It does this by: -// - closing subchannels: any existing subchannels associated with addresses -// that are no longer in the updated list are shut down. -// - removing subchannels: entries for these closed subchannels are removed -// from the subchannel map. -// -// This ensures that the subchannel map accurately reflects the current set of -// addresses received from the name resolver. -func (b *pickfirstBalancer) reconcileSubConnsLocked(newAddrs []resolver.Address) { - newAddrsMap := resolver.NewAddressMap() - for _, addr := range newAddrs { - newAddrsMap.Set(addr, true) - } - - for _, oldAddr := range b.subConns.Keys() { - if _, ok := newAddrsMap.Get(oldAddr); ok { - continue - } - val, _ := b.subConns.Get(oldAddr) - val.(*scData).subConn.Shutdown() - b.subConns.Delete(oldAddr) - } -} - -// shutdownRemainingLocked shuts down remaining subConns. Called when a subConn -// becomes ready, which means that all other subConn must be shutdown. -func (b *pickfirstBalancer) shutdownRemainingLocked(selected *scData) { - b.cancelConnectionTimer() - for _, v := range b.subConns.Values() { - sd := v.(*scData) - if sd.subConn != selected.subConn { - sd.subConn.Shutdown() - } - } - b.subConns = resolver.NewAddressMap() - b.subConns.Set(selected.addr, selected) -} - -// requestConnectionLocked starts connecting on the subchannel corresponding to -// the current address. If no subchannel exists, one is created. If the current -// subchannel is in TransientFailure, a connection to the next address is -// attempted until a subchannel is found. -func (b *pickfirstBalancer) requestConnectionLocked() { - if !b.addressList.isValid() { - return - } - var lastErr error - for valid := true; valid; valid = b.addressList.increment() { - curAddr := b.addressList.currentAddress() - sd, ok := b.subConns.Get(curAddr) - if !ok { - var err error - // We want to assign the new scData to sd from the outer scope, - // hence we can't use := below. - sd, err = b.newSCData(curAddr) - if err != nil { - // This should never happen, unless the clientConn is being shut - // down. - if b.logger.V(2) { - b.logger.Infof("Failed to create a subConn for address %v: %v", curAddr.String(), err) - } - // Do nothing, the LB policy will be closed soon. - return - } - b.subConns.Set(curAddr, sd) - } - - scd := sd.(*scData) - switch scd.rawConnectivityState { - case connectivity.Idle: - scd.subConn.Connect() - b.scheduleNextConnectionLocked() - return - case connectivity.TransientFailure: - // The SubConn is being re-used and failed during a previous pass - // over the addressList. It has not completed backoff yet. - // Mark it as having failed and try the next address. - scd.connectionFailedInFirstPass = true - lastErr = scd.lastErr - continue - case connectivity.Connecting: - // Wait for the connection attempt to complete or the timer to fire - // before attempting the next address. - b.scheduleNextConnectionLocked() - return - default: - b.logger.Errorf("SubConn with unexpected state %v present in SubConns map.", scd.rawConnectivityState) - return - - } - } - - // All the remaining addresses in the list are in TRANSIENT_FAILURE, end the - // first pass if possible. - b.endFirstPassIfPossibleLocked(lastErr) -} - -func (b *pickfirstBalancer) scheduleNextConnectionLocked() { - b.cancelConnectionTimer() - if !b.addressList.hasNext() { - return - } - curAddr := b.addressList.currentAddress() - cancelled := false // Access to this is protected by the balancer's mutex. - closeFn := internal.TimeAfterFunc(connectionDelayInterval, func() { - b.mu.Lock() - defer b.mu.Unlock() - // If the scheduled task is cancelled while acquiring the mutex, return. - if cancelled { - return - } - if b.logger.V(2) { - b.logger.Infof("Happy Eyeballs timer expired while waiting for connection to %q.", curAddr.Addr) - } - if b.addressList.increment() { - b.requestConnectionLocked() - } - }) - // Access to the cancellation callback held by the balancer is guarded by - // the balancer's mutex, so it's safe to set the boolean from the callback. - b.cancelConnectionTimer = sync.OnceFunc(func() { - cancelled = true - closeFn() - }) -} - -func (b *pickfirstBalancer) updateSubConnState(sd *scData, newState balancer.SubConnState) { - b.mu.Lock() - defer b.mu.Unlock() - oldState := sd.rawConnectivityState - sd.rawConnectivityState = newState.ConnectivityState - // Previously relevant SubConns can still callback with state updates. - // To prevent pickers from returning these obsolete SubConns, this logic - // is included to check if the current list of active SubConns includes this - // SubConn. - if !b.isActiveSCData(sd) { - return - } - if newState.ConnectivityState == connectivity.Shutdown { - sd.effectiveState = connectivity.Shutdown - return - } - - // Record a connection attempt when exiting CONNECTING. - if newState.ConnectivityState == connectivity.TransientFailure { - sd.connectionFailedInFirstPass = true - connectionAttemptsFailedMetric.Record(b.metricsRecorder, 1, b.target) - } - - if newState.ConnectivityState == connectivity.Ready { - connectionAttemptsSucceededMetric.Record(b.metricsRecorder, 1, b.target) - b.shutdownRemainingLocked(sd) - if !b.addressList.seekTo(sd.addr) { - // This should not fail as we should have only one SubConn after - // entering READY. The SubConn should be present in the addressList. - b.logger.Errorf("Address %q not found address list in %v", sd.addr, b.addressList.addresses) - return - } - if !b.healthCheckingEnabled { - if b.logger.V(2) { - b.logger.Infof("SubConn %p reported connectivity state READY and the health listener is disabled. Transitioning SubConn to READY.", sd.subConn) - } - - sd.effectiveState = connectivity.Ready - b.updateBalancerState(balancer.State{ - ConnectivityState: connectivity.Ready, - Picker: &picker{result: balancer.PickResult{SubConn: sd.subConn}}, - }) - return - } - if b.logger.V(2) { - b.logger.Infof("SubConn %p reported connectivity state READY. Registering health listener.", sd.subConn) - } - // Send a CONNECTING update to take the SubConn out of sticky-TF if - // required. - sd.effectiveState = connectivity.Connecting - b.updateBalancerState(balancer.State{ - ConnectivityState: connectivity.Connecting, - Picker: &picker{err: balancer.ErrNoSubConnAvailable}, - }) - sd.subConn.RegisterHealthListener(func(scs balancer.SubConnState) { - b.updateSubConnHealthState(sd, scs) - }) - return - } - - // If the LB policy is READY, and it receives a subchannel state change, - // it means that the READY subchannel has failed. - // A SubConn can also transition from CONNECTING directly to IDLE when - // a transport is successfully created, but the connection fails - // before the SubConn can send the notification for READY. We treat - // this as a successful connection and transition to IDLE. - // TODO: https://github.com/grpc/grpc-go/issues/7862 - Remove the second - // part of the if condition below once the issue is fixed. - if oldState == connectivity.Ready || (oldState == connectivity.Connecting && newState.ConnectivityState == connectivity.Idle) { - // Once a transport fails, the balancer enters IDLE and starts from - // the first address when the picker is used. - b.shutdownRemainingLocked(sd) - sd.effectiveState = newState.ConnectivityState - // READY SubConn interspliced in between CONNECTING and IDLE, need to - // account for that. - if oldState == connectivity.Connecting { - // A known issue (https://github.com/grpc/grpc-go/issues/7862) - // causes a race that prevents the READY state change notification. - // This works around it. - connectionAttemptsSucceededMetric.Record(b.metricsRecorder, 1, b.target) - } - disconnectionsMetric.Record(b.metricsRecorder, 1, b.target) - b.addressList.reset() - b.updateBalancerState(balancer.State{ - ConnectivityState: connectivity.Idle, - Picker: &idlePicker{exitIdle: sync.OnceFunc(b.ExitIdle)}, - }) - return - } - - if b.firstPass { - switch newState.ConnectivityState { - case connectivity.Connecting: - // The effective state can be in either IDLE, CONNECTING or - // TRANSIENT_FAILURE. If it's TRANSIENT_FAILURE, stay in - // TRANSIENT_FAILURE until it's READY. See A62. - if sd.effectiveState != connectivity.TransientFailure { - sd.effectiveState = connectivity.Connecting - b.updateBalancerState(balancer.State{ - ConnectivityState: connectivity.Connecting, - Picker: &picker{err: balancer.ErrNoSubConnAvailable}, - }) - } - case connectivity.TransientFailure: - sd.lastErr = newState.ConnectionError - sd.effectiveState = connectivity.TransientFailure - // Since we're re-using common SubConns while handling resolver - // updates, we could receive an out of turn TRANSIENT_FAILURE from - // a pass over the previous address list. Happy Eyeballs will also - // cause out of order updates to arrive. - - if curAddr := b.addressList.currentAddress(); equalAddressIgnoringBalAttributes(&curAddr, &sd.addr) { - b.cancelConnectionTimer() - if b.addressList.increment() { - b.requestConnectionLocked() - return - } - } - - // End the first pass if we've seen a TRANSIENT_FAILURE from all - // SubConns once. - b.endFirstPassIfPossibleLocked(newState.ConnectionError) - } - return - } - - // We have finished the first pass, keep re-connecting failing SubConns. - switch newState.ConnectivityState { - case connectivity.TransientFailure: - b.numTF = (b.numTF + 1) % b.subConns.Len() - sd.lastErr = newState.ConnectionError - if b.numTF%b.subConns.Len() == 0 { - b.updateBalancerState(balancer.State{ - ConnectivityState: connectivity.TransientFailure, - Picker: &picker{err: newState.ConnectionError}, - }) - } - // We don't need to request re-resolution since the SubConn already - // does that before reporting TRANSIENT_FAILURE. - // TODO: #7534 - Move re-resolution requests from SubConn into - // pick_first. - case connectivity.Idle: - sd.subConn.Connect() - } -} - -// endFirstPassIfPossibleLocked ends the first happy-eyeballs pass if all the -// addresses are tried and their SubConns have reported a failure. -func (b *pickfirstBalancer) endFirstPassIfPossibleLocked(lastErr error) { - // An optimization to avoid iterating over the entire SubConn map. - if b.addressList.isValid() { - return - } - // Connect() has been called on all the SubConns. The first pass can be - // ended if all the SubConns have reported a failure. - for _, v := range b.subConns.Values() { - sd := v.(*scData) - if !sd.connectionFailedInFirstPass { - return - } - } - b.firstPass = false - b.updateBalancerState(balancer.State{ - ConnectivityState: connectivity.TransientFailure, - Picker: &picker{err: lastErr}, - }) - // Start re-connecting all the SubConns that are already in IDLE. - for _, v := range b.subConns.Values() { - sd := v.(*scData) - if sd.rawConnectivityState == connectivity.Idle { - sd.subConn.Connect() - } - } -} - -func (b *pickfirstBalancer) isActiveSCData(sd *scData) bool { - activeSD, found := b.subConns.Get(sd.addr) - return found && activeSD == sd -} - -func (b *pickfirstBalancer) updateSubConnHealthState(sd *scData, state balancer.SubConnState) { - b.mu.Lock() - defer b.mu.Unlock() - // Previously relevant SubConns can still callback with state updates. - // To prevent pickers from returning these obsolete SubConns, this logic - // is included to check if the current list of active SubConns includes - // this SubConn. - if !b.isActiveSCData(sd) { - return - } - sd.effectiveState = state.ConnectivityState - switch state.ConnectivityState { - case connectivity.Ready: - b.updateBalancerState(balancer.State{ - ConnectivityState: connectivity.Ready, - Picker: &picker{result: balancer.PickResult{SubConn: sd.subConn}}, - }) - case connectivity.TransientFailure: - b.updateBalancerState(balancer.State{ - ConnectivityState: connectivity.TransientFailure, - Picker: &picker{err: fmt.Errorf("pickfirst: health check failure: %v", state.ConnectionError)}, - }) - case connectivity.Connecting: - b.updateBalancerState(balancer.State{ - ConnectivityState: connectivity.Connecting, - Picker: &picker{err: balancer.ErrNoSubConnAvailable}, - }) - default: - b.logger.Errorf("Got unexpected health update for SubConn %p: %v", state) - } -} - -// updateBalancerState stores the state reported to the channel and calls -// ClientConn.UpdateState(). As an optimization, it avoids sending duplicate -// updates to the channel. -func (b *pickfirstBalancer) updateBalancerState(newState balancer.State) { - // In case of TransientFailures allow the picker to be updated to update - // the connectivity error, in all other cases don't send duplicate state - // updates. - if newState.ConnectivityState == b.state && b.state != connectivity.TransientFailure { - return - } - b.forceUpdateConcludedStateLocked(newState) -} - -// forceUpdateConcludedStateLocked stores the state reported to the channel and -// calls ClientConn.UpdateState(). -// A separate function is defined to force update the ClientConn state since the -// channel doesn't correctly assume that LB policies start in CONNECTING and -// relies on LB policy to send an initial CONNECTING update. -func (b *pickfirstBalancer) forceUpdateConcludedStateLocked(newState balancer.State) { - b.state = newState.ConnectivityState - b.cc.UpdateState(newState) -} - -type picker struct { - result balancer.PickResult - err error -} - -func (p *picker) Pick(balancer.PickInfo) (balancer.PickResult, error) { - return p.result, p.err -} - -// idlePicker is used when the SubConn is IDLE and kicks the SubConn into -// CONNECTING when Pick is called. -type idlePicker struct { - exitIdle func() -} - -func (i *idlePicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { - i.exitIdle() - return balancer.PickResult{}, balancer.ErrNoSubConnAvailable -} - -// addressList manages sequentially iterating over addresses present in a list -// of endpoints. It provides a 1 dimensional view of the addresses present in -// the endpoints. -// This type is not safe for concurrent access. -type addressList struct { - addresses []resolver.Address - idx int -} - -func (al *addressList) isValid() bool { - return al.idx < len(al.addresses) -} - -func (al *addressList) size() int { - return len(al.addresses) -} - -// increment moves to the next index in the address list. -// This method returns false if it went off the list, true otherwise. -func (al *addressList) increment() bool { - if !al.isValid() { - return false - } - al.idx++ - return al.idx < len(al.addresses) -} - -// currentAddress returns the current address pointed to in the addressList. -// If the list is in an invalid state, it returns an empty address instead. -func (al *addressList) currentAddress() resolver.Address { - if !al.isValid() { - return resolver.Address{} - } - return al.addresses[al.idx] -} - -func (al *addressList) reset() { - al.idx = 0 -} - -func (al *addressList) updateAddrs(addrs []resolver.Address) { - al.addresses = addrs - al.reset() -} - -// seekTo returns false if the needle was not found and the current index was -// left unchanged. -func (al *addressList) seekTo(needle resolver.Address) bool { - for ai, addr := range al.addresses { - if !equalAddressIgnoringBalAttributes(&addr, &needle) { - continue - } - al.idx = ai - return true - } - return false -} - -// hasNext returns whether incrementing the addressList will result in moving -// past the end of the list. If the list has already moved past the end, it -// returns false. -func (al *addressList) hasNext() bool { - if !al.isValid() { - return false - } - return al.idx+1 < len(al.addresses) -} - -// equalAddressIgnoringBalAttributes returns true is a and b are considered -// equal. This is different from the Equal method on the resolver.Address type -// which considers all fields to determine equality. Here, we only consider -// fields that are meaningful to the SubConn. -func equalAddressIgnoringBalAttributes(a, b *resolver.Address) bool { - return a.Addr == b.Addr && a.ServerName == b.ServerName && - a.Attributes.Equal(b.Attributes) && - a.Metadata == b.Metadata -} diff --git a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go index 35da5d1e..22e6e326 100644 --- a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go +++ b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go @@ -26,7 +26,7 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/endpointsharding" - "google.golang.org/grpc/balancer/pickfirst/pickfirstleaf" + "google.golang.org/grpc/balancer/pickfirst" "google.golang.org/grpc/grpclog" internalgrpclog "google.golang.org/grpc/internal/grpclog" ) @@ -47,7 +47,7 @@ func (bb builder) Name() string { } func (bb builder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { - childBuilder := balancer.Get(pickfirstleaf.Name).Build + childBuilder := balancer.Get(pickfirst.Name).Build bal := &rrBalancer{ cc: cc, Balancer: endpointsharding.NewBalancer(cc, opts, childBuilder, endpointsharding.Options{}), @@ -67,13 +67,6 @@ func (b *rrBalancer) UpdateClientConnState(ccs balancer.ClientConnState) error { return b.Balancer.UpdateClientConnState(balancer.ClientConnState{ // Enable the health listener in pickfirst children for client side health // checks and outlier detection, if configured. - ResolverState: pickfirstleaf.EnableHealthListener(ccs.ResolverState), + ResolverState: pickfirst.EnableHealthListener(ccs.ResolverState), }) } - -func (b *rrBalancer) ExitIdle() { - // Should always be ok, as child is endpoint sharding. - if ei, ok := b.Balancer.(balancer.ExitIdler); ok { - ei.ExitIdle() - } -} diff --git a/vendor/google.golang.org/grpc/balancer_wrapper.go b/vendor/google.golang.org/grpc/balancer_wrapper.go index 948a21ef..2c760e62 100644 --- a/vendor/google.golang.org/grpc/balancer_wrapper.go +++ b/vendor/google.golang.org/grpc/balancer_wrapper.go @@ -450,13 +450,14 @@ func (acbw *acBalancerWrapper) healthListenerRegFn() func(context.Context, func( if acbw.ccb.cc.dopts.disableHealthCheck { return noOpRegisterHealthListenerFn } + cfg := acbw.ac.cc.healthCheckConfig() + if cfg == nil { + return noOpRegisterHealthListenerFn + } regHealthLisFn := internal.RegisterClientHealthCheckListener if regHealthLisFn == nil { // The health package is not imported. - return noOpRegisterHealthListenerFn - } - cfg := acbw.ac.cc.healthCheckConfig() - if cfg == nil { + channelz.Error(logger, acbw.ac.channelz, "Health check is requested but health package is not imported.") return noOpRegisterHealthListenerFn } return func(ctx context.Context, listener func(balancer.SubConnState)) func() { diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go index b2f8fc7f..42c61cf9 100644 --- a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go +++ b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go @@ -18,7 +18,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.4 +// protoc-gen-go v1.36.10 // protoc v5.27.1 // source: grpc/binlog/v1/binarylog.proto @@ -858,133 +858,68 @@ func (x *Address) GetIpPort() uint32 { var File_grpc_binlog_v1_binarylog_proto protoreflect.FileDescriptor -var file_grpc_binlog_v1_binarylog_proto_rawDesc = string([]byte{ - 0x0a, 0x1e, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x2f, 0x76, 0x31, - 0x2f, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x12, 0x11, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, - 0x2e, 0x76, 0x31, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xbb, 0x07, 0x0a, 0x0c, 0x47, 0x72, 0x70, 0x63, 0x4c, 0x6f, 0x67, - 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, - 0x6d, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, - 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, - 0x17, 0x0a, 0x07, 0x63, 0x61, 0x6c, 0x6c, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, - 0x52, 0x06, 0x63, 0x61, 0x6c, 0x6c, 0x49, 0x64, 0x12, 0x35, 0x0a, 0x17, 0x73, 0x65, 0x71, 0x75, - 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x5f, 0x77, 0x69, 0x74, 0x68, 0x69, 0x6e, 0x5f, 0x63, - 0x61, 0x6c, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x14, 0x73, 0x65, 0x71, 0x75, 0x65, - 0x6e, 0x63, 0x65, 0x49, 0x64, 0x57, 0x69, 0x74, 0x68, 0x69, 0x6e, 0x43, 0x61, 0x6c, 0x6c, 0x12, - 0x3d, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, - 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, - 0x31, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x45, - 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x3e, - 0x0a, 0x06, 0x6c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, - 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, - 0x76, 0x31, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x2e, - 0x4c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x52, 0x06, 0x6c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x12, 0x46, - 0x0a, 0x0d, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, - 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, - 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, - 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x48, 0x00, 0x52, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, - 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x46, 0x0a, 0x0d, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, - 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, - 0x31, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x48, 0x00, - 0x52, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x36, - 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, - 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, 0x52, 0x07, 0x6d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x36, 0x0a, 0x07, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x65, - 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, - 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x72, 0x61, 0x69, - 0x6c, 0x65, 0x72, 0x48, 0x00, 0x52, 0x07, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x65, 0x72, 0x12, 0x2b, - 0x0a, 0x11, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x74, 0x72, 0x75, 0x6e, 0x63, 0x61, - 0x74, 0x65, 0x64, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x70, 0x61, 0x79, 0x6c, 0x6f, - 0x61, 0x64, 0x54, 0x72, 0x75, 0x6e, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x04, 0x70, - 0x65, 0x65, 0x72, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, - 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x64, - 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x04, 0x70, 0x65, 0x65, 0x72, 0x22, 0xf5, 0x01, 0x0a, 0x09, - 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x12, 0x45, 0x56, 0x45, - 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, - 0x00, 0x12, 0x1c, 0x0a, 0x18, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x48, 0x45, 0x41, 0x44, 0x45, 0x52, 0x10, 0x01, 0x12, - 0x1c, 0x0a, 0x18, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x45, - 0x52, 0x56, 0x45, 0x52, 0x5f, 0x48, 0x45, 0x41, 0x44, 0x45, 0x52, 0x10, 0x02, 0x12, 0x1d, 0x0a, - 0x19, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x4c, 0x49, 0x45, - 0x4e, 0x54, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x03, 0x12, 0x1d, 0x0a, 0x19, - 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x45, - 0x52, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x04, 0x12, 0x20, 0x0a, 0x1c, 0x45, - 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, - 0x5f, 0x48, 0x41, 0x4c, 0x46, 0x5f, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x10, 0x05, 0x12, 0x1d, 0x0a, - 0x19, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, - 0x45, 0x52, 0x5f, 0x54, 0x52, 0x41, 0x49, 0x4c, 0x45, 0x52, 0x10, 0x06, 0x12, 0x15, 0x0a, 0x11, - 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x41, 0x4e, 0x43, 0x45, - 0x4c, 0x10, 0x07, 0x22, 0x42, 0x0a, 0x06, 0x4c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x12, 0x12, 0x0a, - 0x0e, 0x4c, 0x4f, 0x47, 0x47, 0x45, 0x52, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, - 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x4c, 0x4f, 0x47, 0x47, 0x45, 0x52, 0x5f, 0x43, 0x4c, 0x49, 0x45, - 0x4e, 0x54, 0x10, 0x01, 0x12, 0x11, 0x0a, 0x0d, 0x4c, 0x4f, 0x47, 0x47, 0x45, 0x52, 0x5f, 0x53, - 0x45, 0x52, 0x56, 0x45, 0x52, 0x10, 0x02, 0x42, 0x09, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, - 0x61, 0x64, 0x22, 0xbb, 0x01, 0x0a, 0x0c, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x48, 0x65, 0x61, - 0x64, 0x65, 0x72, 0x12, 0x37, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, - 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, - 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1f, 0x0a, 0x0b, - 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0a, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1c, 0x0a, - 0x09, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x09, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x33, 0x0a, 0x07, 0x74, - 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, - 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, - 0x22, 0x47, 0x0a, 0x0c, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, - 0x12, 0x37, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, - 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, - 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0xb1, 0x01, 0x0a, 0x07, 0x54, 0x72, - 0x61, 0x69, 0x6c, 0x65, 0x72, 0x12, 0x37, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, - 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, - 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x61, - 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1f, - 0x0a, 0x0b, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x12, - 0x25, 0x0a, 0x0e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, - 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x4d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x5f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0d, - 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x22, 0x35, 0x0a, - 0x07, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x65, 0x6e, 0x67, - 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, - 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, - 0x64, 0x61, 0x74, 0x61, 0x22, 0x42, 0x0a, 0x08, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, - 0x12, 0x36, 0x0a, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x20, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, - 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x52, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x22, 0x37, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x61, - 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x22, 0xb8, 0x01, 0x0a, 0x07, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x33, 0x0a, - 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1f, 0x2e, 0x67, 0x72, - 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, - 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, - 0x70, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x17, 0x0a, 0x07, - 0x69, 0x70, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x69, - 0x70, 0x50, 0x6f, 0x72, 0x74, 0x22, 0x45, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x10, 0x0a, - 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, - 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x50, 0x56, 0x34, 0x10, 0x01, 0x12, 0x0d, - 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x50, 0x56, 0x36, 0x10, 0x02, 0x12, 0x0d, 0x0a, - 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x49, 0x58, 0x10, 0x03, 0x42, 0x5c, 0x0a, 0x14, - 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, - 0x67, 0x2e, 0x76, 0x31, 0x42, 0x0e, 0x42, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x4c, 0x6f, 0x67, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x32, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, - 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x62, - 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x62, 0x69, - 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x5f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x33, -}) +const file_grpc_binlog_v1_binarylog_proto_rawDesc = "" + + "\n" + + "\x1egrpc/binlog/v1/binarylog.proto\x12\x11grpc.binarylog.v1\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\"\xbb\a\n" + + "\fGrpcLogEntry\x128\n" + + "\ttimestamp\x18\x01 \x01(\v2\x1a.google.protobuf.TimestampR\ttimestamp\x12\x17\n" + + "\acall_id\x18\x02 \x01(\x04R\x06callId\x125\n" + + "\x17sequence_id_within_call\x18\x03 \x01(\x04R\x14sequenceIdWithinCall\x12=\n" + + "\x04type\x18\x04 \x01(\x0e2).grpc.binarylog.v1.GrpcLogEntry.EventTypeR\x04type\x12>\n" + + "\x06logger\x18\x05 \x01(\x0e2&.grpc.binarylog.v1.GrpcLogEntry.LoggerR\x06logger\x12F\n" + + "\rclient_header\x18\x06 \x01(\v2\x1f.grpc.binarylog.v1.ClientHeaderH\x00R\fclientHeader\x12F\n" + + "\rserver_header\x18\a \x01(\v2\x1f.grpc.binarylog.v1.ServerHeaderH\x00R\fserverHeader\x126\n" + + "\amessage\x18\b \x01(\v2\x1a.grpc.binarylog.v1.MessageH\x00R\amessage\x126\n" + + "\atrailer\x18\t \x01(\v2\x1a.grpc.binarylog.v1.TrailerH\x00R\atrailer\x12+\n" + + "\x11payload_truncated\x18\n" + + " \x01(\bR\x10payloadTruncated\x12.\n" + + "\x04peer\x18\v \x01(\v2\x1a.grpc.binarylog.v1.AddressR\x04peer\"\xf5\x01\n" + + "\tEventType\x12\x16\n" + + "\x12EVENT_TYPE_UNKNOWN\x10\x00\x12\x1c\n" + + "\x18EVENT_TYPE_CLIENT_HEADER\x10\x01\x12\x1c\n" + + "\x18EVENT_TYPE_SERVER_HEADER\x10\x02\x12\x1d\n" + + "\x19EVENT_TYPE_CLIENT_MESSAGE\x10\x03\x12\x1d\n" + + "\x19EVENT_TYPE_SERVER_MESSAGE\x10\x04\x12 \n" + + "\x1cEVENT_TYPE_CLIENT_HALF_CLOSE\x10\x05\x12\x1d\n" + + "\x19EVENT_TYPE_SERVER_TRAILER\x10\x06\x12\x15\n" + + "\x11EVENT_TYPE_CANCEL\x10\a\"B\n" + + "\x06Logger\x12\x12\n" + + "\x0eLOGGER_UNKNOWN\x10\x00\x12\x11\n" + + "\rLOGGER_CLIENT\x10\x01\x12\x11\n" + + "\rLOGGER_SERVER\x10\x02B\t\n" + + "\apayload\"\xbb\x01\n" + + "\fClientHeader\x127\n" + + "\bmetadata\x18\x01 \x01(\v2\x1b.grpc.binarylog.v1.MetadataR\bmetadata\x12\x1f\n" + + "\vmethod_name\x18\x02 \x01(\tR\n" + + "methodName\x12\x1c\n" + + "\tauthority\x18\x03 \x01(\tR\tauthority\x123\n" + + "\atimeout\x18\x04 \x01(\v2\x19.google.protobuf.DurationR\atimeout\"G\n" + + "\fServerHeader\x127\n" + + "\bmetadata\x18\x01 \x01(\v2\x1b.grpc.binarylog.v1.MetadataR\bmetadata\"\xb1\x01\n" + + "\aTrailer\x127\n" + + "\bmetadata\x18\x01 \x01(\v2\x1b.grpc.binarylog.v1.MetadataR\bmetadata\x12\x1f\n" + + "\vstatus_code\x18\x02 \x01(\rR\n" + + "statusCode\x12%\n" + + "\x0estatus_message\x18\x03 \x01(\tR\rstatusMessage\x12%\n" + + "\x0estatus_details\x18\x04 \x01(\fR\rstatusDetails\"5\n" + + "\aMessage\x12\x16\n" + + "\x06length\x18\x01 \x01(\rR\x06length\x12\x12\n" + + "\x04data\x18\x02 \x01(\fR\x04data\"B\n" + + "\bMetadata\x126\n" + + "\x05entry\x18\x01 \x03(\v2 .grpc.binarylog.v1.MetadataEntryR\x05entry\"7\n" + + "\rMetadataEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\fR\x05value\"\xb8\x01\n" + + "\aAddress\x123\n" + + "\x04type\x18\x01 \x01(\x0e2\x1f.grpc.binarylog.v1.Address.TypeR\x04type\x12\x18\n" + + "\aaddress\x18\x02 \x01(\tR\aaddress\x12\x17\n" + + "\aip_port\x18\x03 \x01(\rR\x06ipPort\"E\n" + + "\x04Type\x12\x10\n" + + "\fTYPE_UNKNOWN\x10\x00\x12\r\n" + + "\tTYPE_IPV4\x10\x01\x12\r\n" + + "\tTYPE_IPV6\x10\x02\x12\r\n" + + "\tTYPE_UNIX\x10\x03B\\\n" + + "\x14io.grpc.binarylog.v1B\x0eBinaryLogProtoP\x01Z2google.golang.org/grpc/binarylog/grpc_binarylog_v1b\x06proto3" var ( file_grpc_binlog_v1_binarylog_proto_rawDescOnce sync.Once diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index a319ef97..b767d3e3 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -35,16 +35,19 @@ import ( "google.golang.org/grpc/balancer/pickfirst" "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/credentials" + expstats "google.golang.org/grpc/experimental/stats" "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/idle" iresolver "google.golang.org/grpc/internal/resolver" - "google.golang.org/grpc/internal/stats" + istats "google.golang.org/grpc/internal/stats" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" + "google.golang.org/grpc/stats" "google.golang.org/grpc/status" _ "google.golang.org/grpc/balancer/roundrobin" // To register roundrobin. @@ -97,6 +100,41 @@ var ( errTransportCredentialsMissing = errors.New("grpc: the credentials require transport level security (use grpc.WithTransportCredentials() to set)") ) +var ( + disconnectionsMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{ + Name: "grpc.subchannel.disconnections", + Description: "EXPERIMENTAL. Number of times the selected subchannel becomes disconnected.", + Unit: "{disconnection}", + Labels: []string{"grpc.target"}, + OptionalLabels: []string{"grpc.lb.backend_service", "grpc.lb.locality", "grpc.disconnect_error"}, + Default: false, + }) + connectionAttemptsSucceededMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{ + Name: "grpc.subchannel.connection_attempts_succeeded", + Description: "EXPERIMENTAL. Number of successful connection attempts.", + Unit: "{attempt}", + Labels: []string{"grpc.target"}, + OptionalLabels: []string{"grpc.lb.backend_service", "grpc.lb.locality"}, + Default: false, + }) + connectionAttemptsFailedMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{ + Name: "grpc.subchannel.connection_attempts_failed", + Description: "EXPERIMENTAL. Number of failed connection attempts.", + Unit: "{attempt}", + Labels: []string{"grpc.target"}, + OptionalLabels: []string{"grpc.lb.backend_service", "grpc.lb.locality"}, + Default: false, + }) + openConnectionsMetric = expstats.RegisterInt64UpDownCount(expstats.MetricDescriptor{ + Name: "grpc.subchannel.open_connections", + Description: "EXPERIMENTAL. Number of open connections.", + Unit: "{attempt}", + Labels: []string{"grpc.target"}, + OptionalLabels: []string{"grpc.lb.backend_service", "grpc.security_level", "grpc.lb.locality"}, + Default: false, + }) +) + const ( defaultClientMaxReceiveMessageSize = 1024 * 1024 * 4 defaultClientMaxSendMessageSize = math.MaxInt32 @@ -208,9 +246,10 @@ func NewClient(target string, opts ...DialOption) (conn *ClientConn, err error) channelz.Infof(logger, cc.channelz, "Channel authority set to %q", cc.authority) cc.csMgr = newConnectivityStateManager(cc.ctx, cc.channelz) - cc.pickerWrapper = newPickerWrapper(cc.dopts.copts.StatsHandlers) + cc.pickerWrapper = newPickerWrapper() - cc.metricsRecorderList = stats.NewMetricsRecorderList(cc.dopts.copts.StatsHandlers) + cc.metricsRecorderList = istats.NewMetricsRecorderList(cc.dopts.copts.StatsHandlers) + cc.statsHandler = istats.NewCombinedHandler(cc.dopts.copts.StatsHandlers...) cc.initIdleStateLocked() // Safe to call without the lock, since nothing else has a reference to cc. cc.idlenessMgr = idle.NewManager((*idler)(cc), cc.dopts.idleTimeout) @@ -260,9 +299,10 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * }() // This creates the name resolver, load balancer, etc. - if err := cc.idlenessMgr.ExitIdleMode(); err != nil { - return nil, err + if err := cc.exitIdleMode(); err != nil { + return nil, fmt.Errorf("failed to exit idle mode: %w", err) } + cc.idlenessMgr.UnsafeSetNotIdle() // Return now for non-blocking dials. if !cc.dopts.block { @@ -330,7 +370,7 @@ func (cc *ClientConn) addTraceEvent(msg string) { Severity: channelz.CtInfo, } } - channelz.AddTraceEvent(logger, cc.channelz, 0, ted) + channelz.AddTraceEvent(logger, cc.channelz, 1, ted) } type idler ClientConn @@ -339,14 +379,17 @@ func (i *idler) EnterIdleMode() { (*ClientConn)(i).enterIdleMode() } -func (i *idler) ExitIdleMode() error { - return (*ClientConn)(i).exitIdleMode() +func (i *idler) ExitIdleMode() { + // Ignore the error returned from this method, because from the perspective + // of the caller (idleness manager), the channel would have always moved out + // of IDLE by the time this method returns. + (*ClientConn)(i).exitIdleMode() } // exitIdleMode moves the channel out of idle mode by recreating the name // resolver and load balancer. This should never be called directly; use // cc.idlenessMgr.ExitIdleMode instead. -func (cc *ClientConn) exitIdleMode() (err error) { +func (cc *ClientConn) exitIdleMode() error { cc.mu.Lock() if cc.conns == nil { cc.mu.Unlock() @@ -354,11 +397,23 @@ func (cc *ClientConn) exitIdleMode() (err error) { } cc.mu.Unlock() + // Set state to CONNECTING before building the name resolver + // so the channel does not remain in IDLE. + cc.csMgr.updateState(connectivity.Connecting) + // This needs to be called without cc.mu because this builds a new resolver // which might update state or report error inline, which would then need to // acquire cc.mu. if err := cc.resolverWrapper.start(); err != nil { - return err + // If resolver creation fails, treat it like an error reported by the + // resolver before any valid updates. Set channel's state to + // TransientFailure, and set an erroring picker with the resolver build + // error, which will returned as part of any subsequent RPCs. + logger.Warningf("Failed to start resolver: %v", err) + cc.csMgr.updateState(connectivity.TransientFailure) + cc.mu.Lock() + cc.updateResolverStateAndUnlock(resolver.State{}, err) + return fmt.Errorf("failed to start resolver: %w", err) } cc.addTraceEvent("exiting idle mode") @@ -456,7 +511,7 @@ func (cc *ClientConn) validateTransportCredentials() error { func (cc *ClientConn) channelzRegistration(target string) { parentChannel, _ := cc.dopts.channelzParent.(*channelz.Channel) cc.channelz = channelz.RegisterChannel(parentChannel, target) - cc.addTraceEvent("created") + cc.addTraceEvent(fmt.Sprintf("created for target %q", target)) } // chainUnaryClientInterceptors chains all unary client interceptors into one. @@ -621,7 +676,8 @@ type ClientConn struct { channelz *channelz.Channel // Channelz object. resolverBuilder resolver.Builder // See initParsedTargetAndResolverBuilder(). idlenessMgr *idle.Manager - metricsRecorderList *stats.MetricsRecorderList + metricsRecorderList *istats.MetricsRecorderList + statsHandler stats.Handler // The following provide their own synchronization, and therefore don't // require cc.mu to be held to access them. @@ -678,10 +734,8 @@ func (cc *ClientConn) GetState() connectivity.State { // Notice: This API is EXPERIMENTAL and may be changed or removed in a later // release. func (cc *ClientConn) Connect() { - if err := cc.idlenessMgr.ExitIdleMode(); err != nil { - cc.addTraceEvent(err.Error()) - return - } + cc.idlenessMgr.ExitIdleMode() + // If the ClientConn was not in idle mode, we need to call ExitIdle on the // LB policy so that connections can be created. cc.mu.Lock() @@ -689,22 +743,31 @@ func (cc *ClientConn) Connect() { cc.mu.Unlock() } -// waitForResolvedAddrs blocks until the resolver has provided addresses or the -// context expires. Returns nil unless the context expires first; otherwise -// returns a status error based on the context. -func (cc *ClientConn) waitForResolvedAddrs(ctx context.Context) error { +// waitForResolvedAddrs blocks until the resolver provides addresses or the +// context expires, whichever happens first. +// +// Error is nil unless the context expires first; otherwise returns a status +// error based on the context. +// +// The returned boolean indicates whether it did block or not. If the +// resolution has already happened once before, it returns false without +// blocking. Otherwise, it wait for the resolution and return true if +// resolution has succeeded or return false along with error if resolution has +// failed. +func (cc *ClientConn) waitForResolvedAddrs(ctx context.Context) (bool, error) { // This is on the RPC path, so we use a fast path to avoid the // more-expensive "select" below after the resolver has returned once. if cc.firstResolveEvent.HasFired() { - return nil + return false, nil } + internal.NewStreamWaitingForResolver() select { case <-cc.firstResolveEvent.Done(): - return nil + return true, nil case <-ctx.Done(): - return status.FromContextError(ctx.Err()).Err() + return false, status.FromContextError(ctx.Err()).Err() case <-cc.ctx.Done(): - return ErrClientConnClosing + return false, ErrClientConnClosing } } @@ -723,8 +786,8 @@ func init() { internal.EnterIdleModeForTesting = func(cc *ClientConn) { cc.idlenessMgr.EnterIdleModeForTesting() } - internal.ExitIdleModeForTesting = func(cc *ClientConn) error { - return cc.idlenessMgr.ExitIdleMode() + internal.ExitIdleModeForTesting = func(cc *ClientConn) { + cc.idlenessMgr.ExitIdleMode() } } @@ -849,6 +912,7 @@ func (cc *ClientConn) newAddrConnLocked(addrs []resolver.Address, opts balancer. channelz: channelz.RegisterSubChannel(cc.channelz, ""), resetBackoff: make(chan struct{}), } + ac.updateTelemetryLabelsLocked() ac.ctx, ac.cancel = context.WithCancel(cc.ctx) // Start with our address set to the first address; this may be updated if // we connect to different addresses. @@ -965,7 +1029,7 @@ func (ac *addrConn) updateAddrs(addrs []resolver.Address) { } ac.addrs = addrs - + ac.updateTelemetryLabelsLocked() if ac.state == connectivity.Shutdown || ac.state == connectivity.TransientFailure || ac.state == connectivity.Idle { @@ -1067,13 +1131,6 @@ func (cc *ClientConn) healthCheckConfig() *healthCheckConfig { return cc.sc.healthCheckConfig } -func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, balancer.PickResult, error) { - return cc.pickerWrapper.pick(ctx, failfast, balancer.PickInfo{ - Ctx: ctx, - FullMethodName: method, - }) -} - func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, configSelector iresolver.ConfigSelector) { if sc == nil { // should never reach here. @@ -1211,6 +1268,9 @@ type addrConn struct { resetBackoff chan struct{} channelz *channelz.SubChannel + + localityLabel string + backendServiceLabel string } // Note: this requires a lock on ac.mu. @@ -1218,6 +1278,18 @@ func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error) if ac.state == s { return } + + // If we are transitioning out of Ready, it means there is a disconnection. + // A SubConn can also transition from CONNECTING directly to IDLE when + // a transport is successfully created, but the connection fails + // before the SubConn can send the notification for READY. We treat + // this as a successful connection and transition to IDLE. + // TODO: https://github.com/grpc/grpc-go/issues/7862 - Remove the second + // part of the if condition below once the issue is fixed. + if ac.state == connectivity.Ready || (ac.state == connectivity.Connecting && s == connectivity.Idle) { + disconnectionsMetric.Record(ac.cc.metricsRecorderList, 1, ac.cc.target, ac.backendServiceLabel, ac.localityLabel, "unknown") + openConnectionsMetric.Record(ac.cc.metricsRecorderList, -1, ac.cc.target, ac.backendServiceLabel, ac.securityLevelLocked(), ac.localityLabel) + } ac.state = s ac.channelz.ChannelMetrics.State.Store(&s) if lastErr == nil { @@ -1231,8 +1303,7 @@ func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error) // adjustParams updates parameters used to create transports upon // receiving a GoAway. func (ac *addrConn) adjustParams(r transport.GoAwayReason) { - switch r { - case transport.GoAwayTooManyPings: + if r == transport.GoAwayTooManyPings { v := 2 * ac.dopts.copts.KeepaliveParams.Time ac.cc.mu.Lock() if v > ac.cc.keepaliveParams.Time { @@ -1276,6 +1347,15 @@ func (ac *addrConn) resetTransportAndUnlock() { ac.mu.Unlock() if err := ac.tryAllAddrs(acCtx, addrs, connectDeadline); err != nil { + if !errors.Is(err, context.Canceled) { + connectionAttemptsFailedMetric.Record(ac.cc.metricsRecorderList, 1, ac.cc.target, ac.backendServiceLabel, ac.localityLabel) + } else { + if logger.V(2) { + // This records cancelled connection attempts which can be later + // replaced by a metric. + logger.Infof("Context cancellation detected; not recording this as a failed connection attempt.") + } + } // TODO: #7534 - Move re-resolution requests into the pick_first LB policy // to ensure one resolution request per pass instead of per subconn failure. ac.cc.resolveNow(resolver.ResolveNowOptions{}) @@ -1315,10 +1395,50 @@ func (ac *addrConn) resetTransportAndUnlock() { } // Success; reset backoff. ac.mu.Lock() + connectionAttemptsSucceededMetric.Record(ac.cc.metricsRecorderList, 1, ac.cc.target, ac.backendServiceLabel, ac.localityLabel) + openConnectionsMetric.Record(ac.cc.metricsRecorderList, 1, ac.cc.target, ac.backendServiceLabel, ac.securityLevelLocked(), ac.localityLabel) ac.backoffIdx = 0 ac.mu.Unlock() } +// updateTelemetryLabelsLocked calculates and caches the telemetry labels based on the +// first address in addrConn. +func (ac *addrConn) updateTelemetryLabelsLocked() { + labelsFunc, ok := internal.AddressToTelemetryLabels.(func(resolver.Address) map[string]string) + if !ok || len(ac.addrs) == 0 { + // Reset defaults + ac.localityLabel = "" + ac.backendServiceLabel = "" + return + } + labels := labelsFunc(ac.addrs[0]) + ac.localityLabel = labels["grpc.lb.locality"] + ac.backendServiceLabel = labels["grpc.lb.backend_service"] +} + +type securityLevelKey struct{} + +func (ac *addrConn) securityLevelLocked() string { + var secLevel string + // During disconnection, ac.transport is nil. Fall back to the security level + // stored in the current address during connection. + if ac.transport == nil { + secLevel, _ = ac.curAddr.Attributes.Value(securityLevelKey{}).(string) + return secLevel + } + authInfo := ac.transport.Peer().AuthInfo + if ci, ok := authInfo.(interface { + GetCommonAuthInfo() credentials.CommonAuthInfo + }); ok { + secLevel = ci.GetCommonAuthInfo().SecurityLevel.String() + // Store the security level in the current address' attributes so + // that it remains available for disconnection metrics after the + // transport is closed. + ac.curAddr.Attributes = ac.curAddr.Attributes.WithValue(securityLevelKey{}, secLevel) + } + return secLevel +} + // tryAllAddrs tries to create a connection to the addresses, and stop when at // the first successful one. It returns an error if no address was successfully // connected, or updates ac appropriately with the new transport. @@ -1823,7 +1943,7 @@ func (cc *ClientConn) initAuthority() error { } else if auth, ok := cc.resolverBuilder.(resolver.AuthorityOverrider); ok { cc.authority = auth.OverrideAuthority(cc.parsedTarget) } else if strings.HasPrefix(endpoint, ":") { - cc.authority = "localhost" + endpoint + cc.authority = "localhost" + encodeAuthority(endpoint) } else { cc.authority = encodeAuthority(endpoint) } diff --git a/vendor/google.golang.org/grpc/credentials/credentials.go b/vendor/google.golang.org/grpc/credentials/credentials.go index 665e790b..06f6c6c7 100644 --- a/vendor/google.golang.org/grpc/credentials/credentials.go +++ b/vendor/google.golang.org/grpc/credentials/credentials.go @@ -44,8 +44,7 @@ type PerRPCCredentials interface { // A54). uri is the URI of the entry point for the request. When supported // by the underlying implementation, ctx can be used for timeout and // cancellation. Additionally, RequestInfo data will be available via ctx - // to this call. TODO(zhaoq): Define the set of the qualified keys instead - // of leaving it as an arbitrary string. + // to this call. GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) // RequireTransportSecurity indicates whether the credentials requires // transport security. @@ -96,10 +95,11 @@ func (c CommonAuthInfo) GetCommonAuthInfo() CommonAuthInfo { return c } -// ProtocolInfo provides information regarding the gRPC wire protocol version, -// security protocol, security protocol version in use, server name, etc. +// ProtocolInfo provides static information regarding transport credentials. type ProtocolInfo struct { // ProtocolVersion is the gRPC wire protocol version. + // + // Deprecated: this is unused by gRPC. ProtocolVersion string // SecurityProtocol is the security protocol in use. SecurityProtocol string @@ -109,7 +109,16 @@ type ProtocolInfo struct { // // Deprecated: please use Peer.AuthInfo. SecurityVersion string - // ServerName is the user-configured server name. + // ServerName is the user-configured server name. If set, this overrides + // the default :authority header used for all RPCs on the channel using the + // containing credentials, unless grpc.WithAuthority is set on the channel, + // in which case that setting will take precedence. + // + // This must be a valid `:authority` header according to + // [RFC3986](https://datatracker.ietf.org/doc/html/rfc3986#section-3.2). + // + // Deprecated: Users should use grpc.WithAuthority to override the authority + // on a channel instead of configuring the credentials. ServerName string } @@ -120,6 +129,20 @@ type AuthInfo interface { AuthType() string } +// AuthorityValidator validates the authority used to override the `:authority` +// header. This is an optional interface that implementations of AuthInfo can +// implement if they support per-RPC authority overrides. It is invoked when the +// application attempts to override the HTTP/2 `:authority` header using the +// CallAuthority call option. +type AuthorityValidator interface { + // ValidateAuthority checks the authority value used to override the + // `:authority` header. The authority parameter is the override value + // provided by the application via the CallAuthority option. This value + // typically corresponds to the server hostname or endpoint the RPC is + // targeting. It returns non-nil error if the validation fails. + ValidateAuthority(authority string) error +} + // ErrConnDispatched indicates that rawConn has been dispatched out of gRPC // and the caller should not close rawConn. var ErrConnDispatched = errors.New("credentials: rawConn is dispatched out of gRPC") @@ -159,12 +182,17 @@ type TransportCredentials interface { // Clone makes a copy of this TransportCredentials. Clone() TransportCredentials // OverrideServerName specifies the value used for the following: + // // - verifying the hostname on the returned certificates // - as SNI in the client's handshake to support virtual hosting // - as the value for `:authority` header at stream creation time // - // Deprecated: use grpc.WithAuthority instead. Will be supported - // throughout 1.x. + // The provided string should be a valid `:authority` header according to + // [RFC3986](https://datatracker.ietf.org/doc/html/rfc3986#section-3.2). + // + // Deprecated: this method is unused by gRPC. Users should use + // grpc.WithAuthority to override the authority on a channel instead of + // configuring the credentials. OverrideServerName(string) error } @@ -207,14 +235,32 @@ type RequestInfo struct { AuthInfo AuthInfo } +// requestInfoKey is a struct to be used as the key to store RequestInfo in a +// context. +type requestInfoKey struct{} + // RequestInfoFromContext extracts the RequestInfo from the context if it exists. // // This API is experimental. func RequestInfoFromContext(ctx context.Context) (ri RequestInfo, ok bool) { - ri, ok = icredentials.RequestInfoFromContext(ctx).(RequestInfo) + ri, ok = ctx.Value(requestInfoKey{}).(RequestInfo) return ri, ok } +// NewContextWithRequestInfo creates a new context from ctx and attaches ri to it. +// +// This RequestInfo will be accessible via RequestInfoFromContext. +// +// Intended to be used from tests for PerRPCCredentials implementations (that +// often need to check connection's SecurityLevel). Should not be used from +// non-test code: the gRPC client already prepares a context with the correct +// RequestInfo attached when calling PerRPCCredentials.GetRequestMetadata. +// +// This API is experimental. +func NewContextWithRequestInfo(ctx context.Context, ri RequestInfo) context.Context { + return context.WithValue(ctx, requestInfoKey{}, ri) +} + // ClientHandshakeInfo holds data to be passed to ClientHandshake. This makes // it possible to pass arbitrary data to the handshaker from gRPC, resolver, // balancer etc. Individual credential implementations control the actual diff --git a/vendor/google.golang.org/grpc/credentials/insecure/insecure.go b/vendor/google.golang.org/grpc/credentials/insecure/insecure.go index 4c805c64..93156c0f 100644 --- a/vendor/google.golang.org/grpc/credentials/insecure/insecure.go +++ b/vendor/google.golang.org/grpc/credentials/insecure/insecure.go @@ -30,7 +30,7 @@ import ( // NewCredentials returns a credentials which disables transport security. // // Note that using this credentials with per-RPC credentials which require -// transport security is incompatible and will cause grpc.Dial() to fail. +// transport security is incompatible and will cause RPCs to fail. func NewCredentials() credentials.TransportCredentials { return insecureTC{} } @@ -71,6 +71,12 @@ func (info) AuthType() string { return "insecure" } +// ValidateAuthority allows any value to be overridden for the :authority +// header. +func (info) ValidateAuthority(string) error { + return nil +} + // insecureBundle implements an insecure bundle. // An insecure bundle provides a thin wrapper around insecureTC to support // the credentials.Bundle interface. diff --git a/vendor/google.golang.org/grpc/credentials/tls.go b/vendor/google.golang.org/grpc/credentials/tls.go index bd5fe22b..8277be7d 100644 --- a/vendor/google.golang.org/grpc/credentials/tls.go +++ b/vendor/google.golang.org/grpc/credentials/tls.go @@ -22,6 +22,7 @@ import ( "context" "crypto/tls" "crypto/x509" + "errors" "fmt" "net" "net/url" @@ -50,6 +51,21 @@ func (t TLSInfo) AuthType() string { return "tls" } +// ValidateAuthority validates the provided authority being used to override the +// :authority header by verifying it against the peer certificates. It returns a +// non-nil error if the validation fails. +func (t TLSInfo) ValidateAuthority(authority string) error { + var errs []error + for _, cert := range t.State.PeerCertificates { + var err error + if err = cert.VerifyHostname(authority); err == nil { + return nil + } + errs = append(errs, err) + } + return fmt.Errorf("credentials: invalid authority %q: %v", authority, errors.Join(errs...)) +} + // cipherSuiteLookup returns the string version of a TLS cipher suite ID. func cipherSuiteLookup(cipherSuiteID uint16) string { for _, s := range tls.CipherSuites() { @@ -94,14 +110,14 @@ func (c tlsCreds) Info() ProtocolInfo { func (c *tlsCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (_ net.Conn, _ AuthInfo, err error) { // use local cfg to avoid clobbering ServerName if using multiple endpoints cfg := credinternal.CloneTLSConfig(c.config) - if cfg.ServerName == "" { - serverName, _, err := net.SplitHostPort(authority) - if err != nil { - // If the authority had no host port or if the authority cannot be parsed, use it as-is. - serverName = authority - } - cfg.ServerName = serverName + + serverName, _, err := net.SplitHostPort(authority) + if err != nil { + // If the authority had no host port or if the authority cannot be parsed, use it as-is. + serverName = authority } + cfg.ServerName = serverName + conn := tls.Client(rawConn, cfg) errChannel := make(chan error, 1) go func() { @@ -243,9 +259,11 @@ func applyDefaults(c *tls.Config) *tls.Config { // certificates to establish the identity of the client need to be included in // the credentials (eg: for mTLS), use NewTLS instead, where a complete // tls.Config can be specified. -// serverNameOverride is for testing only. If set to a non empty string, -// it will override the virtual host name of authority (e.g. :authority header -// field) in requests. +// +// serverNameOverride is for testing only. If set to a non empty string, it will +// override the virtual host name of authority (e.g. :authority header field) in +// requests. Users should use grpc.WithAuthority passed to grpc.NewClient to +// override the authority of the client instead. func NewClientTLSFromCert(cp *x509.CertPool, serverNameOverride string) TransportCredentials { return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp}) } @@ -255,9 +273,11 @@ func NewClientTLSFromCert(cp *x509.CertPool, serverNameOverride string) Transpor // certificates to establish the identity of the client need to be included in // the credentials (eg: for mTLS), use NewTLS instead, where a complete // tls.Config can be specified. -// serverNameOverride is for testing only. If set to a non empty string, -// it will override the virtual host name of authority (e.g. :authority header -// field) in requests. +// +// serverNameOverride is for testing only. If set to a non empty string, it will +// override the virtual host name of authority (e.g. :authority header field) in +// requests. Users should use grpc.WithAuthority passed to grpc.NewClient to +// override the authority of the client instead. func NewClientTLSFromFile(certFile, serverNameOverride string) (TransportCredentials, error) { b, err := os.ReadFile(certFile) if err != nil { diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go index 405a2ffe..7a5ac2e7 100644 --- a/vendor/google.golang.org/grpc/dialoptions.go +++ b/vendor/google.golang.org/grpc/dialoptions.go @@ -213,6 +213,7 @@ func WithReadBufferSize(s int) DialOption { func WithInitialWindowSize(s int32) DialOption { return newFuncDialOption(func(o *dialOptions) { o.copts.InitialWindowSize = s + o.copts.StaticWindowSize = true }) } @@ -222,6 +223,26 @@ func WithInitialWindowSize(s int32) DialOption { func WithInitialConnWindowSize(s int32) DialOption { return newFuncDialOption(func(o *dialOptions) { o.copts.InitialConnWindowSize = s + o.copts.StaticWindowSize = true + }) +} + +// WithStaticStreamWindowSize returns a DialOption which sets the initial +// stream window size to the value provided and disables dynamic flow control. +func WithStaticStreamWindowSize(s int32) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.InitialWindowSize = s + o.copts.StaticWindowSize = true + }) +} + +// WithStaticConnWindowSize returns a DialOption which sets the initial +// connection window size to the value provided and disables dynamic flow +// control. +func WithStaticConnWindowSize(s int32) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.InitialConnWindowSize = s + o.copts.StaticWindowSize = true }) } @@ -360,7 +381,7 @@ func WithReturnConnectionError() DialOption { // // Note that using this DialOption with per-RPC credentials (through // WithCredentialsBundle or WithPerRPCCredentials) which require transport -// security is incompatible and will cause grpc.Dial() to fail. +// security is incompatible and will cause RPCs to fail. // // Deprecated: use WithTransportCredentials and insecure.NewCredentials() // instead. Will be supported throughout 1.x. @@ -587,6 +608,8 @@ func WithChainStreamInterceptor(interceptors ...StreamClientInterceptor) DialOpt // WithAuthority returns a DialOption that specifies the value to be used as the // :authority pseudo-header and as the server name in authentication handshake. +// This overrides all other ways of setting authority on the channel, but can be +// overridden per-call by using grpc.CallAuthority. func WithAuthority(a string) DialOption { return newFuncDialOption(func(o *dialOptions) { o.authority = a diff --git a/vendor/google.golang.org/grpc/encoding/encoding.go b/vendor/google.golang.org/grpc/encoding/encoding.go index 11d0ae14..dadd21e4 100644 --- a/vendor/google.golang.org/grpc/encoding/encoding.go +++ b/vendor/google.golang.org/grpc/encoding/encoding.go @@ -27,8 +27,10 @@ package encoding import ( "io" + "slices" "strings" + "google.golang.org/grpc/encoding/internal" "google.golang.org/grpc/internal/grpcutil" ) @@ -36,6 +38,24 @@ import ( // It is intended for grpc internal use only. const Identity = "identity" +func init() { + internal.RegisterCompressorForTesting = func(c Compressor) func() { + name := c.Name() + curCompressor, found := registeredCompressor[name] + RegisterCompressor(c) + return func() { + if found { + registeredCompressor[name] = curCompressor + return + } + delete(registeredCompressor, name) + grpcutil.RegisteredCompressorNames = slices.DeleteFunc(grpcutil.RegisteredCompressorNames, func(s string) bool { + return s == name + }) + } + } +} + // Compressor is used for compressing and decompressing when sending or // receiving messages. // diff --git a/vendor/google.golang.org/grpc/encoding/internal/internal.go b/vendor/google.golang.org/grpc/encoding/internal/internal.go new file mode 100644 index 00000000..ee9acb43 --- /dev/null +++ b/vendor/google.golang.org/grpc/encoding/internal/internal.go @@ -0,0 +1,28 @@ +/* + * + * Copyright 2025 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package internal contains code internal to the encoding package. +package internal + +// RegisterCompressorForTesting registers a compressor in the global compressor +// registry. It returns a cleanup function that should be called at the end +// of the test to unregister the compressor. +// +// This prevents compressors registered in one test from appearing in the +// encoding headers of subsequent tests. +var RegisterCompressorForTesting any // func RegisterCompressor(c Compressor) func() diff --git a/vendor/google.golang.org/grpc/encoding/proto/proto.go b/vendor/google.golang.org/grpc/encoding/proto/proto.go index ceec319d..1ab874c7 100644 --- a/vendor/google.golang.org/grpc/encoding/proto/proto.go +++ b/vendor/google.golang.org/grpc/encoding/proto/proto.go @@ -46,9 +46,25 @@ func (c *codecV2) Marshal(v any) (data mem.BufferSlice, err error) { return nil, fmt.Errorf("proto: failed to marshal, message is %T, want proto.Message", v) } + // Important: if we remove this Size call then we cannot use + // UseCachedSize in MarshalOptions below. size := proto.Size(vv) + + // MarshalOptions with UseCachedSize allows reusing the result from the + // previous Size call. This is safe here because: + // + // 1. We just computed the size. + // 2. We assume the message is not being mutated concurrently. + // + // Important: If the proto.Size call above is removed, using UseCachedSize + // becomes unsafe and may lead to incorrect marshaling. + // + // For more details, see the doc of UseCachedSize: + // https://pkg.go.dev/google.golang.org/protobuf/proto#MarshalOptions + marshalOptions := proto.MarshalOptions{UseCachedSize: true} + if mem.IsBelowBufferPoolingThreshold(size) { - buf, err := proto.Marshal(vv) + buf, err := marshalOptions.Marshal(vv) if err != nil { return nil, err } @@ -56,7 +72,7 @@ func (c *codecV2) Marshal(v any) (data mem.BufferSlice, err error) { } else { pool := mem.DefaultBufferPool() buf := pool.Get(size) - if _, err := (proto.MarshalOptions{}).MarshalAppend((*buf)[:0], vv); err != nil { + if _, err := marshalOptions.MarshalAppend((*buf)[:0], vv); err != nil { pool.Put(buf) return nil, err } diff --git a/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go b/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go index ad75313a..472813f5 100644 --- a/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go +++ b/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go @@ -75,6 +75,8 @@ const ( MetricTypeIntHisto MetricTypeFloatHisto MetricTypeIntGauge + MetricTypeIntUpDownCount + MetricTypeIntAsyncGauge ) // Int64CountHandle is a typed handle for a int count metric. This handle @@ -93,6 +95,23 @@ func (h *Int64CountHandle) Record(recorder MetricsRecorder, incr int64, labels . recorder.RecordInt64Count(h, incr, labels...) } +// Int64UpDownCountHandle is a typed handle for an int up-down counter metric. +// This handle is passed at the recording point in order to know which metric +// to record on. +type Int64UpDownCountHandle MetricDescriptor + +// Descriptor returns the int64 up-down counter handle typecast to a pointer to a +// MetricDescriptor. +func (h *Int64UpDownCountHandle) Descriptor() *MetricDescriptor { + return (*MetricDescriptor)(h) +} + +// Record records the int64 up-down counter value on the metrics recorder provided. +// The value 'v' can be positive to increment or negative to decrement. +func (h *Int64UpDownCountHandle) Record(recorder MetricsRecorder, v int64, labels ...string) { + recorder.RecordInt64UpDownCount(h, v, labels...) +} + // Float64CountHandle is a typed handle for a float count metric. This handle is // passed at the recording point in order to know which metric to record on. type Float64CountHandle MetricDescriptor @@ -154,6 +173,30 @@ func (h *Int64GaugeHandle) Record(recorder MetricsRecorder, incr int64, labels . recorder.RecordInt64Gauge(h, incr, labels...) } +// AsyncMetric is a marker interface for asynchronous metric types. +type AsyncMetric interface { + isAsync() + Descriptor() *MetricDescriptor +} + +// Int64AsyncGaugeHandle is a typed handle for an int gauge metric. This handle is +// passed at the recording point in order to know which metric to record on. +type Int64AsyncGaugeHandle MetricDescriptor + +// isAsync implements the AsyncMetric interface. +func (h *Int64AsyncGaugeHandle) isAsync() {} + +// Descriptor returns the int64 gauge handle typecast to a pointer to a +// MetricDescriptor. +func (h *Int64AsyncGaugeHandle) Descriptor() *MetricDescriptor { + return (*MetricDescriptor)(h) +} + +// Record records the int64 gauge value on the metrics recorder provided. +func (h *Int64AsyncGaugeHandle) Record(recorder AsyncMetricsRecorder, value int64, labels ...string) { + recorder.RecordInt64AsyncGauge(h, value, labels...) +} + // registeredMetrics are the registered metric descriptor names. var registeredMetrics = make(map[string]bool) @@ -249,6 +292,35 @@ func RegisterInt64Gauge(descriptor MetricDescriptor) *Int64GaugeHandle { return (*Int64GaugeHandle)(descPtr) } +// RegisterInt64UpDownCount registers the metric description onto the global registry. +// It returns a typed handle to use for recording data. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple metrics are +// registered with the same name, this function will panic. +func RegisterInt64UpDownCount(descriptor MetricDescriptor) *Int64UpDownCountHandle { + registerMetric(descriptor.Name, descriptor.Default) + // Set the specific metric type for the up-down counter + descriptor.Type = MetricTypeIntUpDownCount + descPtr := &descriptor + metricsRegistry[descriptor.Name] = descPtr + return (*Int64UpDownCountHandle)(descPtr) +} + +// RegisterInt64AsyncGauge registers the metric description onto the global registry. +// It returns a typed handle to use for recording data. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple metrics are +// registered with the same name, this function will panic. +func RegisterInt64AsyncGauge(descriptor MetricDescriptor) *Int64AsyncGaugeHandle { + registerMetric(descriptor.Name, descriptor.Default) + descriptor.Type = MetricTypeIntAsyncGauge + descPtr := &descriptor + metricsRegistry[descriptor.Name] = descPtr + return (*Int64AsyncGaugeHandle)(descPtr) +} + // snapshotMetricsRegistryForTesting snapshots the global data of the metrics // registry. Returns a cleanup function that sets the metrics registry to its // original state. diff --git a/vendor/google.golang.org/grpc/experimental/stats/metrics.go b/vendor/google.golang.org/grpc/experimental/stats/metrics.go index ee142360..d7d404cb 100644 --- a/vendor/google.golang.org/grpc/experimental/stats/metrics.go +++ b/vendor/google.golang.org/grpc/experimental/stats/metrics.go @@ -38,6 +38,16 @@ type MetricsRecorder interface { // RecordInt64Gauge records the measurement alongside labels on the int // gauge associated with the provided handle. RecordInt64Gauge(handle *Int64GaugeHandle, incr int64, labels ...string) + // RecordInt64UpDownCounter records the measurement alongside labels on the int + // count associated with the provided handle. + RecordInt64UpDownCount(handle *Int64UpDownCountHandle, incr int64, labels ...string) +} + +// AsyncMetricsRecorder records on asynchronous metrics derived from metric registry. +type AsyncMetricsRecorder interface { + // RecordInt64AsyncGauge records the measurement alongside labels on the int + // count associated with the provided handle asynchronously + RecordInt64AsyncGauge(handle *Int64AsyncGaugeHandle, incr int64, labels ...string) } // Metrics is an experimental legacy alias of the now-stable stats.MetricSet. diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go index 94177b05..8f7d9f6b 100644 --- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.4 +// protoc-gen-go v1.36.10 // protoc v5.27.1 // source: grpc/health/v1/health.proto @@ -178,46 +178,112 @@ func (x *HealthCheckResponse) GetStatus() HealthCheckResponse_ServingStatus { return HealthCheckResponse_UNKNOWN } +type HealthListRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *HealthListRequest) Reset() { + *x = HealthListRequest{} + mi := &file_grpc_health_v1_health_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *HealthListRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HealthListRequest) ProtoMessage() {} + +func (x *HealthListRequest) ProtoReflect() protoreflect.Message { + mi := &file_grpc_health_v1_health_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HealthListRequest.ProtoReflect.Descriptor instead. +func (*HealthListRequest) Descriptor() ([]byte, []int) { + return file_grpc_health_v1_health_proto_rawDescGZIP(), []int{2} +} + +type HealthListResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + // statuses contains all the services and their respective status. + Statuses map[string]*HealthCheckResponse `protobuf:"bytes,1,rep,name=statuses,proto3" json:"statuses,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *HealthListResponse) Reset() { + *x = HealthListResponse{} + mi := &file_grpc_health_v1_health_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *HealthListResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HealthListResponse) ProtoMessage() {} + +func (x *HealthListResponse) ProtoReflect() protoreflect.Message { + mi := &file_grpc_health_v1_health_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HealthListResponse.ProtoReflect.Descriptor instead. +func (*HealthListResponse) Descriptor() ([]byte, []int) { + return file_grpc_health_v1_health_proto_rawDescGZIP(), []int{3} +} + +func (x *HealthListResponse) GetStatuses() map[string]*HealthCheckResponse { + if x != nil { + return x.Statuses + } + return nil +} + var File_grpc_health_v1_health_proto protoreflect.FileDescriptor -var file_grpc_health_v1_health_proto_rawDesc = string([]byte{ - 0x0a, 0x1b, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2f, 0x76, 0x31, - 0x2f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0e, 0x67, - 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x22, 0x2e, 0x0a, - 0x12, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x22, 0xb1, 0x01, - 0x0a, 0x13, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x49, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61, - 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, - 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, - 0x6e, 0x67, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x22, 0x4f, 0x0a, 0x0d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x53, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0b, - 0x0a, 0x07, 0x53, 0x45, 0x52, 0x56, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0f, 0x0a, 0x0b, 0x4e, - 0x4f, 0x54, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x49, 0x4e, 0x47, 0x10, 0x02, 0x12, 0x13, 0x0a, 0x0f, - 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, - 0x03, 0x32, 0xae, 0x01, 0x0a, 0x06, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x12, 0x50, 0x0a, 0x05, - 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x22, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61, - 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, - 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x67, 0x72, 0x70, 0x63, - 0x2e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, - 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x52, - 0x0a, 0x05, 0x57, 0x61, 0x74, 0x63, 0x68, 0x12, 0x22, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, - 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, - 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x67, 0x72, - 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, - 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x30, 0x01, 0x42, 0x70, 0x0a, 0x11, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, - 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x42, 0x0b, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, - 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x68, - 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x74, - 0x68, 0x5f, 0x76, 0x31, 0xa2, 0x02, 0x0c, 0x47, 0x72, 0x70, 0x63, 0x48, 0x65, 0x61, 0x6c, 0x74, - 0x68, 0x56, 0x31, 0xaa, 0x02, 0x0e, 0x47, 0x72, 0x70, 0x63, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, - 0x68, 0x2e, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -}) +const file_grpc_health_v1_health_proto_rawDesc = "" + + "\n" + + "\x1bgrpc/health/v1/health.proto\x12\x0egrpc.health.v1\".\n" + + "\x12HealthCheckRequest\x12\x18\n" + + "\aservice\x18\x01 \x01(\tR\aservice\"\xb1\x01\n" + + "\x13HealthCheckResponse\x12I\n" + + "\x06status\x18\x01 \x01(\x0e21.grpc.health.v1.HealthCheckResponse.ServingStatusR\x06status\"O\n" + + "\rServingStatus\x12\v\n" + + "\aUNKNOWN\x10\x00\x12\v\n" + + "\aSERVING\x10\x01\x12\x0f\n" + + "\vNOT_SERVING\x10\x02\x12\x13\n" + + "\x0fSERVICE_UNKNOWN\x10\x03\"\x13\n" + + "\x11HealthListRequest\"\xc4\x01\n" + + "\x12HealthListResponse\x12L\n" + + "\bstatuses\x18\x01 \x03(\v20.grpc.health.v1.HealthListResponse.StatusesEntryR\bstatuses\x1a`\n" + + "\rStatusesEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x129\n" + + "\x05value\x18\x02 \x01(\v2#.grpc.health.v1.HealthCheckResponseR\x05value:\x028\x012\xfd\x01\n" + + "\x06Health\x12P\n" + + "\x05Check\x12\".grpc.health.v1.HealthCheckRequest\x1a#.grpc.health.v1.HealthCheckResponse\x12M\n" + + "\x04List\x12!.grpc.health.v1.HealthListRequest\x1a\".grpc.health.v1.HealthListResponse\x12R\n" + + "\x05Watch\x12\".grpc.health.v1.HealthCheckRequest\x1a#.grpc.health.v1.HealthCheckResponse0\x01Bp\n" + + "\x11io.grpc.health.v1B\vHealthProtoP\x01Z,google.golang.org/grpc/health/grpc_health_v1\xa2\x02\fGrpcHealthV1\xaa\x02\x0eGrpc.Health.V1b\x06proto3" var ( file_grpc_health_v1_health_proto_rawDescOnce sync.Once @@ -232,23 +298,30 @@ func file_grpc_health_v1_health_proto_rawDescGZIP() []byte { } var file_grpc_health_v1_health_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_grpc_health_v1_health_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_grpc_health_v1_health_proto_msgTypes = make([]protoimpl.MessageInfo, 5) var file_grpc_health_v1_health_proto_goTypes = []any{ (HealthCheckResponse_ServingStatus)(0), // 0: grpc.health.v1.HealthCheckResponse.ServingStatus (*HealthCheckRequest)(nil), // 1: grpc.health.v1.HealthCheckRequest (*HealthCheckResponse)(nil), // 2: grpc.health.v1.HealthCheckResponse + (*HealthListRequest)(nil), // 3: grpc.health.v1.HealthListRequest + (*HealthListResponse)(nil), // 4: grpc.health.v1.HealthListResponse + nil, // 5: grpc.health.v1.HealthListResponse.StatusesEntry } var file_grpc_health_v1_health_proto_depIdxs = []int32{ 0, // 0: grpc.health.v1.HealthCheckResponse.status:type_name -> grpc.health.v1.HealthCheckResponse.ServingStatus - 1, // 1: grpc.health.v1.Health.Check:input_type -> grpc.health.v1.HealthCheckRequest - 1, // 2: grpc.health.v1.Health.Watch:input_type -> grpc.health.v1.HealthCheckRequest - 2, // 3: grpc.health.v1.Health.Check:output_type -> grpc.health.v1.HealthCheckResponse - 2, // 4: grpc.health.v1.Health.Watch:output_type -> grpc.health.v1.HealthCheckResponse - 3, // [3:5] is the sub-list for method output_type - 1, // [1:3] is the sub-list for method input_type - 1, // [1:1] is the sub-list for extension type_name - 1, // [1:1] is the sub-list for extension extendee - 0, // [0:1] is the sub-list for field type_name + 5, // 1: grpc.health.v1.HealthListResponse.statuses:type_name -> grpc.health.v1.HealthListResponse.StatusesEntry + 2, // 2: grpc.health.v1.HealthListResponse.StatusesEntry.value:type_name -> grpc.health.v1.HealthCheckResponse + 1, // 3: grpc.health.v1.Health.Check:input_type -> grpc.health.v1.HealthCheckRequest + 3, // 4: grpc.health.v1.Health.List:input_type -> grpc.health.v1.HealthListRequest + 1, // 5: grpc.health.v1.Health.Watch:input_type -> grpc.health.v1.HealthCheckRequest + 2, // 6: grpc.health.v1.Health.Check:output_type -> grpc.health.v1.HealthCheckResponse + 4, // 7: grpc.health.v1.Health.List:output_type -> grpc.health.v1.HealthListResponse + 2, // 8: grpc.health.v1.Health.Watch:output_type -> grpc.health.v1.HealthCheckResponse + 6, // [6:9] is the sub-list for method output_type + 3, // [3:6] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name } func init() { file_grpc_health_v1_health_proto_init() } @@ -262,7 +335,7 @@ func file_grpc_health_v1_health_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: unsafe.Slice(unsafe.StringData(file_grpc_health_v1_health_proto_rawDesc), len(file_grpc_health_v1_health_proto_rawDesc)), NumEnums: 1, - NumMessages: 2, + NumMessages: 5, NumExtensions: 0, NumServices: 1, }, diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go index f96b8ab4..e99cd5c8 100644 --- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.5.1 +// - protoc-gen-go-grpc v1.6.0 // - protoc v5.27.1 // source: grpc/health/v1/health.proto @@ -37,6 +37,7 @@ const _ = grpc.SupportPackageIsVersion9 const ( Health_Check_FullMethodName = "/grpc.health.v1.Health/Check" + Health_List_FullMethodName = "/grpc.health.v1.Health/List" Health_Watch_FullMethodName = "/grpc.health.v1.Health/Watch" ) @@ -55,9 +56,19 @@ type HealthClient interface { // // Clients should set a deadline when calling Check, and can declare the // server unhealthy if they do not receive a timely response. - // - // Check implementations should be idempotent and side effect free. Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) + // List provides a non-atomic snapshot of the health of all the available + // services. + // + // The server may respond with a RESOURCE_EXHAUSTED error if too many services + // exist. + // + // Clients should set a deadline when calling List, and can declare the server + // unhealthy if they do not receive a timely response. + // + // Clients should keep in mind that the list of health services exposed by an + // application can change over the lifetime of the process. + List(ctx context.Context, in *HealthListRequest, opts ...grpc.CallOption) (*HealthListResponse, error) // Performs a watch for the serving status of the requested service. // The server will immediately send back a message indicating the current // serving status. It will then subsequently send a new message whenever @@ -94,6 +105,16 @@ func (c *healthClient) Check(ctx context.Context, in *HealthCheckRequest, opts . return out, nil } +func (c *healthClient) List(ctx context.Context, in *HealthListRequest, opts ...grpc.CallOption) (*HealthListResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(HealthListResponse) + err := c.cc.Invoke(ctx, Health_List_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *healthClient) Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[HealthCheckResponse], error) { cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) stream, err := c.cc.NewStream(ctx, &Health_ServiceDesc.Streams[0], Health_Watch_FullMethodName, cOpts...) @@ -128,9 +149,19 @@ type HealthServer interface { // // Clients should set a deadline when calling Check, and can declare the // server unhealthy if they do not receive a timely response. - // - // Check implementations should be idempotent and side effect free. Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error) + // List provides a non-atomic snapshot of the health of all the available + // services. + // + // The server may respond with a RESOURCE_EXHAUSTED error if too many services + // exist. + // + // Clients should set a deadline when calling List, and can declare the server + // unhealthy if they do not receive a timely response. + // + // Clients should keep in mind that the list of health services exposed by an + // application can change over the lifetime of the process. + List(context.Context, *HealthListRequest) (*HealthListResponse, error) // Performs a watch for the serving status of the requested service. // The server will immediately send back a message indicating the current // serving status. It will then subsequently send a new message whenever @@ -157,10 +188,13 @@ type HealthServer interface { type UnimplementedHealthServer struct{} func (UnimplementedHealthServer) Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Check not implemented") + return nil, status.Error(codes.Unimplemented, "method Check not implemented") +} +func (UnimplementedHealthServer) List(context.Context, *HealthListRequest) (*HealthListResponse, error) { + return nil, status.Error(codes.Unimplemented, "method List not implemented") } func (UnimplementedHealthServer) Watch(*HealthCheckRequest, grpc.ServerStreamingServer[HealthCheckResponse]) error { - return status.Errorf(codes.Unimplemented, "method Watch not implemented") + return status.Error(codes.Unimplemented, "method Watch not implemented") } func (UnimplementedHealthServer) testEmbeddedByValue() {} @@ -200,6 +234,24 @@ func _Health_Check_Handler(srv interface{}, ctx context.Context, dec func(interf return interceptor(ctx, in, info, handler) } +func _Health_List_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(HealthListRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(HealthServer).List(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Health_List_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(HealthServer).List(ctx, req.(*HealthListRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _Health_Watch_Handler(srv interface{}, stream grpc.ServerStream) error { m := new(HealthCheckRequest) if err := stream.RecvMsg(m); err != nil { @@ -222,6 +274,10 @@ var Health_ServiceDesc = grpc.ServiceDesc{ MethodName: "Check", Handler: _Health_Check_Handler, }, + { + MethodName: "List", + Handler: _Health_List_Handler, + }, }, Streams: []grpc.StreamDesc{ { diff --git a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go index fbc1ca35..f38de74a 100644 --- a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go +++ b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go @@ -67,6 +67,10 @@ type Balancer struct { // balancerCurrent before the UpdateSubConnState is called on the // balancerCurrent. currentMu sync.Mutex + + // activeGoroutines tracks all the goroutines that this balancer has started + // and that should be waited on when the balancer closes. + activeGoroutines sync.WaitGroup } // swap swaps out the current lb with the pending lb and updates the ClientConn. @@ -76,7 +80,9 @@ func (gsb *Balancer) swap() { cur := gsb.balancerCurrent gsb.balancerCurrent = gsb.balancerPending gsb.balancerPending = nil + gsb.activeGoroutines.Add(1) go func() { + defer gsb.activeGoroutines.Done() gsb.currentMu.Lock() defer gsb.currentMu.Unlock() cur.Close() @@ -223,15 +229,7 @@ func (gsb *Balancer) ExitIdle() { // There is no need to protect this read with a mutex, as the write to the // Balancer field happens in SwitchTo, which completes before this can be // called. - if ei, ok := balToUpdate.Balancer.(balancer.ExitIdler); ok { - ei.ExitIdle() - return - } - gsb.mu.Lock() - defer gsb.mu.Unlock() - for sc := range balToUpdate.subconns { - sc.Connect() - } + balToUpdate.ExitIdle() } // updateSubConnState forwards the update to the appropriate child. @@ -282,6 +280,7 @@ func (gsb *Balancer) Close() { currentBalancerToClose.Close() pendingBalancerToClose.Close() + gsb.activeGoroutines.Wait() } // balancerWrapper wraps a balancer.Balancer, and overrides some Balancer @@ -332,7 +331,12 @@ func (bw *balancerWrapper) UpdateState(state balancer.State) { defer bw.gsb.mu.Unlock() bw.lastState = state + // If Close() acquires the mutex before UpdateState(), the balancer + // will already have been removed from the current or pending state when + // reaching this point. if !bw.gsb.balancerCurrentOrPending(bw) { + // Returning here ensures that (*Balancer).swap() is not invoked after + // (*Balancer).Close() and therefore prevents "use after close". return } diff --git a/vendor/google.golang.org/grpc/internal/buffer/unbounded.go b/vendor/google.golang.org/grpc/internal/buffer/unbounded.go index 11f91668..467392b8 100644 --- a/vendor/google.golang.org/grpc/internal/buffer/unbounded.go +++ b/vendor/google.golang.org/grpc/internal/buffer/unbounded.go @@ -83,6 +83,7 @@ func (b *Unbounded) Load() { default: } } else if b.closing && !b.closed { + b.closed = true close(b.c) } } diff --git a/vendor/google.golang.org/grpc/internal/channelz/trace.go b/vendor/google.golang.org/grpc/internal/channelz/trace.go index 2bffe477..3b7ba596 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/trace.go +++ b/vendor/google.golang.org/grpc/internal/channelz/trace.go @@ -194,7 +194,7 @@ func (r RefChannelType) String() string { // If channelz is not turned ON, this will simply log the event descriptions. func AddTraceEvent(l grpclog.DepthLoggerV2, e Entity, depth int, desc *TraceEvent) { // Log only the trace description associated with the bottom most entity. - d := fmt.Sprintf("[%s]%s", e, desc.Desc) + d := fmt.Sprintf("[%s] %s", e, desc.Desc) switch desc.Severity { case CtUnknown, CtInfo: l.InfoDepth(depth+1, d) diff --git a/vendor/google.golang.org/grpc/internal/credentials/credentials.go b/vendor/google.golang.org/grpc/internal/credentials/credentials.go index 9deee7f6..48b22d9c 100644 --- a/vendor/google.golang.org/grpc/internal/credentials/credentials.go +++ b/vendor/google.golang.org/grpc/internal/credentials/credentials.go @@ -20,20 +20,6 @@ import ( "context" ) -// requestInfoKey is a struct to be used as the key to store RequestInfo in a -// context. -type requestInfoKey struct{} - -// NewRequestInfoContext creates a context with ri. -func NewRequestInfoContext(ctx context.Context, ri any) context.Context { - return context.WithValue(ctx, requestInfoKey{}, ri) -} - -// RequestInfoFromContext extracts the RequestInfo from ctx. -func RequestInfoFromContext(ctx context.Context) any { - return ctx.Value(requestInfoKey{}) -} - // clientHandshakeInfoKey is a struct used as the key to store // ClientHandshakeInfo in a context. type clientHandshakeInfoKey struct{} diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go index 1e42b6fd..6414ee4b 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go @@ -26,35 +26,62 @@ import ( ) var ( - // TXTErrIgnore is set if TXT errors should be ignored ("GRPC_GO_IGNORE_TXT_ERRORS" is not "false"). + // EnableTXTServiceConfig is set if the DNS resolver should perform TXT + // lookups for service config ("GRPC_ENABLE_TXT_SERVICE_CONFIG" is not + // "false"). + EnableTXTServiceConfig = boolFromEnv("GRPC_ENABLE_TXT_SERVICE_CONFIG", true) + + // TXTErrIgnore is set if TXT errors should be ignored + // ("GRPC_GO_IGNORE_TXT_ERRORS" is not "false"). TXTErrIgnore = boolFromEnv("GRPC_GO_IGNORE_TXT_ERRORS", true) + // RingHashCap indicates the maximum ring size which defaults to 4096 // entries but may be overridden by setting the environment variable // "GRPC_RING_HASH_CAP". This does not override the default bounds // checking which NACKs configs specifying ring sizes > 8*1024*1024 (~8M). RingHashCap = uint64FromEnv("GRPC_RING_HASH_CAP", 4096, 1, 8*1024*1024) - // LeastRequestLB is set if we should support the least_request_experimental - // LB policy, which can be enabled by setting the environment variable - // "GRPC_EXPERIMENTAL_ENABLE_LEAST_REQUEST" to "true". - LeastRequestLB = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_LEAST_REQUEST", false) + // ALTSMaxConcurrentHandshakes is the maximum number of concurrent ALTS // handshakes that can be performed. ALTSMaxConcurrentHandshakes = uint64FromEnv("GRPC_ALTS_MAX_CONCURRENT_HANDSHAKES", 100, 1, 100) + // EnforceALPNEnabled is set if TLS connections to servers with ALPN disabled // should be rejected. The HTTP/2 protocol requires ALPN to be enabled, this // option is present for backward compatibility. This option may be overridden // by setting the environment variable "GRPC_ENFORCE_ALPN_ENABLED" to "true" // or "false". EnforceALPNEnabled = boolFromEnv("GRPC_ENFORCE_ALPN_ENABLED", true) - // XDSFallbackSupport is the env variable that controls whether support for - // xDS fallback is turned on. If this is unset or is false, only the first - // xDS server in the list of server configs will be used. - XDSFallbackSupport = boolFromEnv("GRPC_EXPERIMENTAL_XDS_FALLBACK", true) - // NewPickFirstEnabled is set if the new pickfirst leaf policy is to be used - // instead of the exiting pickfirst implementation. This can be enabled by - // setting the environment variable "GRPC_EXPERIMENTAL_ENABLE_NEW_PICK_FIRST" - // to "true". - NewPickFirstEnabled = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_NEW_PICK_FIRST", false) + + // XDSEndpointHashKeyBackwardCompat controls the parsing of the endpoint hash + // key from EDS LbEndpoint metadata. Endpoint hash keys can be disabled by + // setting "GRPC_XDS_ENDPOINT_HASH_KEY_BACKWARD_COMPAT" to "true". When the + // implementation of A76 is stable, we will flip the default value to false + // in a subsequent release. A final release will remove this environment + // variable, enabling the new behavior unconditionally. + XDSEndpointHashKeyBackwardCompat = boolFromEnv("GRPC_XDS_ENDPOINT_HASH_KEY_BACKWARD_COMPAT", true) + + // RingHashSetRequestHashKey is set if the ring hash balancer can get the + // request hash header by setting the "requestHashHeader" field, according + // to gRFC A76. It can be enabled by setting the environment variable + // "GRPC_EXPERIMENTAL_RING_HASH_SET_REQUEST_HASH_KEY" to "true". + RingHashSetRequestHashKey = boolFromEnv("GRPC_EXPERIMENTAL_RING_HASH_SET_REQUEST_HASH_KEY", false) + + // ALTSHandshakerKeepaliveParams is set if we should add the + // KeepaliveParams when dial the ALTS handshaker service. + ALTSHandshakerKeepaliveParams = boolFromEnv("GRPC_EXPERIMENTAL_ALTS_HANDSHAKER_KEEPALIVE_PARAMS", false) + + // EnableDefaultPortForProxyTarget controls whether the resolver adds a default port 443 + // to a target address that lacks one. This flag only has an effect when all of + // the following conditions are met: + // - A connect proxy is being used. + // - Target resolution is disabled. + // - The DNS resolver is being used. + EnableDefaultPortForProxyTarget = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_DEFAULT_PORT_FOR_PROXY_TARGET", true) + + // XDSAuthorityRewrite indicates whether xDS authority rewriting is enabled. + // This feature is defined in gRFC A81 and is enabled by setting the + // environment variable GRPC_EXPERIMENTAL_XDS_AUTHORITY_REWRITE to "true". + XDSAuthorityRewrite = boolFromEnv("GRPC_EXPERIMENTAL_XDS_AUTHORITY_REWRITE", false) ) func boolFromEnv(envVar string, def bool) bool { diff --git a/vendor/google.golang.org/grpc/internal/envconfig/xds.go b/vendor/google.golang.org/grpc/internal/envconfig/xds.go index 2eb97f83..7685d08b 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/xds.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/xds.go @@ -63,4 +63,20 @@ var ( // For more details, see: // https://github.com/grpc/proposal/blob/master/A82-xds-system-root-certs.md. XDSSystemRootCertsEnabled = boolFromEnv("GRPC_EXPERIMENTAL_XDS_SYSTEM_ROOT_CERTS", false) + + // XDSSPIFFEEnabled controls if SPIFFE Bundle Maps can be used as roots of + // trust. For more details, see: + // https://github.com/grpc/proposal/blob/master/A87-mtls-spiffe-support.md + XDSSPIFFEEnabled = boolFromEnv("GRPC_EXPERIMENTAL_XDS_MTLS_SPIFFE", false) + + // XDSHTTPConnectEnabled is true if gRPC should parse custom Metadata + // configuring use of an HTTP CONNECT proxy via xDS from cluster resources. + // For more details, see: + // https://github.com/grpc/proposal/blob/master/A86-xds-http-connect.md + XDSHTTPConnectEnabled = boolFromEnv("GRPC_EXPERIMENTAL_XDS_HTTP_CONNECT", false) + + // XDSBootstrapCallCredsEnabled controls if call credentials can be used in + // xDS bootstrap configuration via the `call_creds` field. For more details, + // see: https://github.com/grpc/proposal/blob/master/A97-xds-jwt-call-creds.md + XDSBootstrapCallCredsEnabled = boolFromEnv("GRPC_EXPERIMENTAL_XDS_BOOTSTRAP_CALL_CREDS", false) ) diff --git a/vendor/google.golang.org/grpc/internal/experimental.go b/vendor/google.golang.org/grpc/internal/experimental.go index 7617be21..c90cc51b 100644 --- a/vendor/google.golang.org/grpc/internal/experimental.go +++ b/vendor/google.golang.org/grpc/internal/experimental.go @@ -25,4 +25,8 @@ var ( // BufferPool is implemented by the grpc package and returns a server // option to configure a shared buffer pool for a grpc.Server. BufferPool any // func (grpc.SharedBufferPool) grpc.ServerOption + + // AcceptCompressors is implemented by the grpc package and returns + // a call option that restricts the grpc-accept-encoding header for a call. + AcceptCompressors any // func(...string) grpc.CallOption ) diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go index 8e8e8612..9b6d8a1f 100644 --- a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go +++ b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go @@ -80,25 +80,11 @@ func (cs *CallbackSerializer) ScheduleOr(f func(ctx context.Context), onFailure func (cs *CallbackSerializer) run(ctx context.Context) { defer close(cs.done) - // TODO: when Go 1.21 is the oldest supported version, this loop and Close - // can be replaced with: - // - // context.AfterFunc(ctx, cs.callbacks.Close) - for ctx.Err() == nil { - select { - case <-ctx.Done(): - // Do nothing here. Next iteration of the for loop will not happen, - // since ctx.Err() would be non-nil. - case cb := <-cs.callbacks.Get(): - cs.callbacks.Load() - cb.(func(context.Context))(ctx) - } - } - - // Close the buffer to prevent new callbacks from being added. - cs.callbacks.Close() + // Close the buffer when the context is canceled + // to prevent new callbacks from being added. + context.AfterFunc(ctx, cs.callbacks.Close) - // Run all pending callbacks. + // Run all callbacks. for cb := range cs.callbacks.Get() { cs.callbacks.Load() cb.(func(context.Context))(ctx) diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/event.go b/vendor/google.golang.org/grpc/internal/grpcsync/event.go index fbe697c3..d788c249 100644 --- a/vendor/google.golang.org/grpc/internal/grpcsync/event.go +++ b/vendor/google.golang.org/grpc/internal/grpcsync/event.go @@ -21,28 +21,25 @@ package grpcsync import ( - "sync" "sync/atomic" ) // Event represents a one-time event that may occur in the future. type Event struct { - fired int32 + fired atomic.Bool c chan struct{} - o sync.Once } // Fire causes e to complete. It is safe to call multiple times, and // concurrently. It returns true iff this call to Fire caused the signaling -// channel returned by Done to close. +// channel returned by Done to close. If Fire returns false, it is possible +// the Done channel has not been closed yet. func (e *Event) Fire() bool { - ret := false - e.o.Do(func() { - atomic.StoreInt32(&e.fired, 1) + if e.fired.CompareAndSwap(false, true) { close(e.c) - ret = true - }) - return ret + return true + } + return false } // Done returns a channel that will be closed when Fire is called. @@ -52,7 +49,7 @@ func (e *Event) Done() <-chan struct{} { // HasFired returns true if Fire has been called. func (e *Event) HasFired() bool { - return atomic.LoadInt32(&e.fired) == 1 + return e.fired.Load() } // NewEvent returns a new, ready-to-use Event. diff --git a/vendor/google.golang.org/grpc/internal/idle/idle.go b/vendor/google.golang.org/grpc/internal/idle/idle.go index 2c13ee9d..d3cd24f8 100644 --- a/vendor/google.golang.org/grpc/internal/idle/idle.go +++ b/vendor/google.golang.org/grpc/internal/idle/idle.go @@ -21,7 +21,6 @@ package idle import ( - "fmt" "math" "sync" "sync/atomic" @@ -33,15 +32,15 @@ var timeAfterFunc = func(d time.Duration, f func()) *time.Timer { return time.AfterFunc(d, f) } -// Enforcer is the functionality provided by grpc.ClientConn to enter -// and exit from idle mode. -type Enforcer interface { - ExitIdleMode() error +// ClientConn is the functionality provided by grpc.ClientConn to enter and exit +// from idle mode. +type ClientConn interface { + ExitIdleMode() EnterIdleMode() } -// Manager implements idleness detection and calls the configured Enforcer to -// enter/exit idle mode when appropriate. Must be created by NewManager. +// Manager implements idleness detection and calls the ClientConn to enter/exit +// idle mode when appropriate. Must be created by NewManager. type Manager struct { // State accessed atomically. lastCallEndTime int64 // Unix timestamp in nanos; time when the most recent RPC completed. @@ -51,8 +50,8 @@ type Manager struct { // Can be accessed without atomics or mutex since these are set at creation // time and read-only after that. - enforcer Enforcer // Functionality provided by grpc.ClientConn. - timeout time.Duration + cc ClientConn // Functionality provided by grpc.ClientConn. + timeout time.Duration // idleMu is used to guarantee mutual exclusion in two scenarios: // - Opposing intentions: @@ -72,9 +71,9 @@ type Manager struct { // NewManager creates a new idleness manager implementation for the // given idle timeout. It begins in idle mode. -func NewManager(enforcer Enforcer, timeout time.Duration) *Manager { +func NewManager(cc ClientConn, timeout time.Duration) *Manager { return &Manager{ - enforcer: enforcer, + cc: cc, timeout: timeout, actuallyIdle: true, activeCallsCount: -math.MaxInt32, @@ -127,7 +126,7 @@ func (m *Manager) handleIdleTimeout() { // Now that we've checked that there has been no activity, attempt to enter // idle mode, which is very likely to succeed. - if m.tryEnterIdleMode() { + if m.tryEnterIdleMode(true) { // Successfully entered idle mode. No timer needed until we exit idle. return } @@ -142,10 +141,13 @@ func (m *Manager) handleIdleTimeout() { // that, it performs a last minute check to ensure that no new RPC has come in, // making the channel active. // +// checkActivity controls if a check for RPC activity, since the last time the +// idle_timeout fired, is made. + // Return value indicates whether or not the channel moved to idle mode. // // Holds idleMu which ensures mutual exclusion with exitIdleMode. -func (m *Manager) tryEnterIdleMode() bool { +func (m *Manager) tryEnterIdleMode(checkActivity bool) bool { // Setting the activeCallsCount to -math.MaxInt32 indicates to OnCallBegin() // that the channel is either in idle mode or is trying to get there. if !atomic.CompareAndSwapInt32(&m.activeCallsCount, 0, -math.MaxInt32) { @@ -166,7 +168,7 @@ func (m *Manager) tryEnterIdleMode() bool { atomic.AddInt32(&m.activeCallsCount, math.MaxInt32) return false } - if atomic.LoadInt32(&m.activeSinceLastTimerCheck) == 1 { + if checkActivity && atomic.LoadInt32(&m.activeSinceLastTimerCheck) == 1 { // A very short RPC could have come in (and also finished) after we // checked for calls count and activity in handleIdleTimeout(), but // before the CAS operation. So, we need to check for activity again. @@ -177,44 +179,37 @@ func (m *Manager) tryEnterIdleMode() bool { // No new RPCs have come in since we set the active calls count value to // -math.MaxInt32. And since we have the lock, it is safe to enter idle mode // unconditionally now. - m.enforcer.EnterIdleMode() + m.cc.EnterIdleMode() m.actuallyIdle = true return true } // EnterIdleModeForTesting instructs the channel to enter idle mode. func (m *Manager) EnterIdleModeForTesting() { - m.tryEnterIdleMode() + m.tryEnterIdleMode(false) } // OnCallBegin is invoked at the start of every RPC. -func (m *Manager) OnCallBegin() error { +func (m *Manager) OnCallBegin() { if m.isClosed() { - return nil + return } if atomic.AddInt32(&m.activeCallsCount, 1) > 0 { // Channel is not idle now. Set the activity bit and allow the call. atomic.StoreInt32(&m.activeSinceLastTimerCheck, 1) - return nil + return } // Channel is either in idle mode or is in the process of moving to idle // mode. Attempt to exit idle mode to allow this RPC. - if err := m.ExitIdleMode(); err != nil { - // Undo the increment to calls count, and return an error causing the - // RPC to fail. - atomic.AddInt32(&m.activeCallsCount, -1) - return err - } - + m.ExitIdleMode() atomic.StoreInt32(&m.activeSinceLastTimerCheck, 1) - return nil } -// ExitIdleMode instructs m to call the enforcer's ExitIdleMode and update m's +// ExitIdleMode instructs m to call the ClientConn's ExitIdleMode and update its // internal state. -func (m *Manager) ExitIdleMode() error { +func (m *Manager) ExitIdleMode() { // Holds idleMu which ensures mutual exclusion with tryEnterIdleMode. m.idleMu.Lock() defer m.idleMu.Unlock() @@ -231,12 +226,10 @@ func (m *Manager) ExitIdleMode() error { // m.ExitIdleMode. // // In any case, there is nothing to do here. - return nil + return } - if err := m.enforcer.ExitIdleMode(); err != nil { - return fmt.Errorf("failed to exit idle mode: %w", err) - } + m.cc.ExitIdleMode() // Undo the idle entry process. This also respects any new RPC attempts. atomic.AddInt32(&m.activeCallsCount, math.MaxInt32) @@ -244,7 +237,23 @@ func (m *Manager) ExitIdleMode() error { // Start a new timer to fire after the configured idle timeout. m.resetIdleTimerLocked(m.timeout) - return nil +} + +// UnsafeSetNotIdle instructs the Manager to update its internal state to +// reflect the reality that the channel is no longer in IDLE mode. +// +// N.B. This method is intended only for internal use by the gRPC client +// when it exits IDLE mode **manually** from `Dial`. The callsite must ensure: +// - The channel was **actually in IDLE mode** immediately prior to the call. +// - There is **no concurrent activity** that could cause the channel to exit +// IDLE mode *naturally* at the same time. +func (m *Manager) UnsafeSetNotIdle() { + m.idleMu.Lock() + defer m.idleMu.Unlock() + + atomic.AddInt32(&m.activeCallsCount, math.MaxInt32) + m.actuallyIdle = false + m.resetIdleTimerLocked(m.timeout) } // OnCallEnd is invoked at the end of every RPC. diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go index 13e1f386..27bef83d 100644 --- a/vendor/google.golang.org/grpc/internal/internal.go +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -182,35 +182,6 @@ var ( // other features, including the CSDS service. NewXDSResolverWithClientForTesting any // func(xdsclient.XDSClient) (resolver.Builder, error) - // RegisterRLSClusterSpecifierPluginForTesting registers the RLS Cluster - // Specifier Plugin for testing purposes, regardless of the XDSRLS environment - // variable. - // - // TODO: Remove this function once the RLS env var is removed. - RegisterRLSClusterSpecifierPluginForTesting func() - - // UnregisterRLSClusterSpecifierPluginForTesting unregisters the RLS Cluster - // Specifier Plugin for testing purposes. This is needed because there is no way - // to unregister the RLS Cluster Specifier Plugin after registering it solely - // for testing purposes using RegisterRLSClusterSpecifierPluginForTesting(). - // - // TODO: Remove this function once the RLS env var is removed. - UnregisterRLSClusterSpecifierPluginForTesting func() - - // RegisterRBACHTTPFilterForTesting registers the RBAC HTTP Filter for testing - // purposes, regardless of the RBAC environment variable. - // - // TODO: Remove this function once the RBAC env var is removed. - RegisterRBACHTTPFilterForTesting func() - - // UnregisterRBACHTTPFilterForTesting unregisters the RBAC HTTP Filter for - // testing purposes. This is needed because there is no way to unregister the - // HTTP Filter after registering it solely for testing purposes using - // RegisterRBACHTTPFilterForTesting(). - // - // TODO: Remove this function once the RBAC env var is removed. - UnregisterRBACHTTPFilterForTesting func() - // ORCAAllowAnyMinReportingInterval is for examples/orca use ONLY. ORCAAllowAnyMinReportingInterval any // func(so *orca.ServiceOptions) @@ -259,6 +230,24 @@ var ( // SetBufferPoolingThresholdForTesting updates the buffer pooling threshold, for // testing purposes. SetBufferPoolingThresholdForTesting any // func(int) + + // TimeAfterFunc is used to create timers. During tests the function is + // replaced to track allocated timers and fail the test if a timer isn't + // cancelled. + TimeAfterFunc = func(d time.Duration, f func()) Timer { + return time.AfterFunc(d, f) + } + + // NewStreamWaitingForResolver is a test hook that is triggered when a + // new stream blocks while waiting for name resolution. This can be + // used in tests to synchronize resolver updates and avoid race conditions. + // When set, the function will be called before the stream enters + // the blocking state. + NewStreamWaitingForResolver = func() {} + + // AddressToTelemetryLabels is an xDS-provided function to extract telemetry + // labels from a resolver.Address. Callers must assert its type before calling. + AddressToTelemetryLabels any // func(addr resolver.Address) map[string]string ) // HealthChecker defines the signature of the client-side LB channel health @@ -300,3 +289,9 @@ type EnforceSubConnEmbedding interface { type EnforceClientConnEmbedding interface { enforceClientConnEmbedding() } + +// Timer is an interface to allow injecting different time.Timer implementations +// during tests. +type Timer interface { + Stop() bool +} diff --git a/vendor/google.golang.org/grpc/internal/metadata/metadata.go b/vendor/google.golang.org/grpc/internal/metadata/metadata.go index 900bfb71..c4055bc0 100644 --- a/vendor/google.golang.org/grpc/internal/metadata/metadata.go +++ b/vendor/google.golang.org/grpc/internal/metadata/metadata.go @@ -97,13 +97,11 @@ func hasNotPrintable(msg string) bool { return false } -// ValidatePair validate a key-value pair with the following rules (the pseudo-header will be skipped) : -// -// - key must contain one or more characters. -// - the characters in the key must be contained in [0-9 a-z _ - .]. -// - if the key ends with a "-bin" suffix, no validation of the corresponding value is performed. -// - the characters in the every value must be printable (in [%x20-%x7E]). -func ValidatePair(key string, vals ...string) error { +// ValidateKey validates a key with the following rules (pseudo-headers are +// skipped): +// - the key must contain one or more characters. +// - the characters in the key must be in [0-9 a-z _ - .]. +func ValidateKey(key string) error { // key should not be empty if key == "" { return fmt.Errorf("there is an empty key in the header") @@ -119,6 +117,20 @@ func ValidatePair(key string, vals ...string) error { return fmt.Errorf("header key %q contains illegal characters not in [0-9a-z-_.]", key) } } + return nil +} + +// ValidatePair validates a key-value pair with the following rules +// (pseudo-header are skipped): +// - the key must contain one or more characters. +// - the characters in the key must be in [0-9 a-z _ - .]. +// - if the key ends with a "-bin" suffix, no validation of the corresponding +// value is performed. +// - the characters in every value must be printable (in [%x20-%x7E]). +func ValidatePair(key string, vals ...string) error { + if err := ValidateKey(key); err != nil { + return err + } if strings.HasSuffix(key, "-bin") { return nil } diff --git a/vendor/google.golang.org/grpc/internal/resolver/delegatingresolver/delegatingresolver.go b/vendor/google.golang.org/grpc/internal/resolver/delegatingresolver/delegatingresolver.go index 7b93f692..5bfa67b7 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/delegatingresolver/delegatingresolver.go +++ b/vendor/google.golang.org/grpc/internal/resolver/delegatingresolver/delegatingresolver.go @@ -22,12 +22,16 @@ package delegatingresolver import ( "fmt" + "net" "net/http" "net/url" "sync" "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/proxyattributes" + "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/internal/transport/networktype" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" ) @@ -38,9 +42,11 @@ var ( HTTPSProxyFromEnvironment = http.ProxyFromEnvironment ) +const defaultPort = "443" + // delegatingResolver manages both target URI and proxy address resolution by // delegating these tasks to separate child resolvers. Essentially, it acts as -// a intermediary between the gRPC ClientConn and the child resolvers. +// an intermediary between the gRPC ClientConn and the child resolvers. // // It implements the [resolver.Resolver] interface. type delegatingResolver struct { @@ -48,6 +54,9 @@ type delegatingResolver struct { cc resolver.ClientConn // gRPC ClientConn proxyURL *url.URL // proxy URL, derived from proxy environment and target + // We do not hold both mu and childMu in the same goroutine. Avoid holding + // both locks when calling into the child, as the child resolver may + // synchronously callback into the channel. mu sync.Mutex // protects all the fields below targetResolverState *resolver.State // state of the target resolver proxyAddrs []resolver.Address // resolved proxy addresses; empty if no proxy is configured @@ -66,8 +75,8 @@ func (nopResolver) ResolveNow(resolver.ResolveNowOptions) {} func (nopResolver) Close() {} -// proxyURLForTarget determines the proxy URL for the given address based on -// the environment. It can return the following: +// proxyURLForTarget determines the proxy URL for the given address based on the +// environment. It can return the following: // - nil URL, nil error: No proxy is configured or the address is excluded // using the `NO_PROXY` environment variable or if req.URL.Host is // "localhost" (with or without // a port number) @@ -86,7 +95,8 @@ func proxyURLForTarget(address string) (*url.URL, error) { // resolvers: // - one to resolve the proxy address specified using the supported // environment variables. This uses the registered resolver for the "dns" -// scheme. +// scheme. It is lazily built when a target resolver update contains at least +// one TCP address. // - one to resolve the target URI using the resolver specified by the scheme // in the target URI or specified by the user using the WithResolvers dial // option. As a special case, if the target URI's scheme is "dns" and a @@ -95,14 +105,24 @@ func proxyURLForTarget(address string) (*url.URL, error) { // resolution is enabled using the dial option. func New(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions, targetResolverBuilder resolver.Builder, targetResolutionEnabled bool) (resolver.Resolver, error) { r := &delegatingResolver{ - target: target, - cc: cc, + target: target, + cc: cc, + proxyResolver: nopResolver{}, + targetResolver: nopResolver{}, } + addr := target.Endpoint() var err error - r.proxyURL, err = proxyURLForTarget(target.Endpoint()) + if target.URL.Scheme == "dns" && !targetResolutionEnabled && envconfig.EnableDefaultPortForProxyTarget { + addr, err = parseTarget(addr) + if err != nil { + return nil, fmt.Errorf("delegating_resolver: invalid target address %q: %v", target.Endpoint(), err) + } + } + + r.proxyURL, err = proxyURLForTarget(addr) if err != nil { - return nil, fmt.Errorf("delegating_resolver: failed to determine proxy URL for target %s: %v", target, err) + return nil, fmt.Errorf("delegating_resolver: failed to determine proxy URL for target %q: %v", target, err) } // proxy is not configured or proxy address excluded using `NO_PROXY` env @@ -123,37 +143,26 @@ func New(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOpti // resolution should be handled by the proxy, not the client. Therefore, we // bypass the target resolver and store the unresolved target address. if target.URL.Scheme == "dns" && !targetResolutionEnabled { - state := resolver.State{ - Addresses: []resolver.Address{{Addr: target.Endpoint()}}, - Endpoints: []resolver.Endpoint{{Addresses: []resolver.Address{{Addr: target.Endpoint()}}}}, - } - r.targetResolverState = &state - } else { - wcc := &wrappingClientConn{ - stateListener: r.updateTargetResolverState, - parent: r, - } - if r.targetResolver, err = targetResolverBuilder.Build(target, wcc, opts); err != nil { - return nil, fmt.Errorf("delegating_resolver: unable to build the resolver for target %s: %v", target, err) + r.targetResolverState = &resolver.State{ + Addresses: []resolver.Address{{Addr: addr}}, + Endpoints: []resolver.Endpoint{{Addresses: []resolver.Address{{Addr: addr}}}}, } + r.updateTargetResolverState(*r.targetResolverState) + return r, nil } - - if r.proxyResolver, err = r.proxyURIResolver(opts); err != nil { - return nil, fmt.Errorf("delegating_resolver: failed to build resolver for proxy URL %q: %v", r.proxyURL, err) - } - - if r.targetResolver == nil { - r.targetResolver = nopResolver{} + wcc := &wrappingClientConn{ + stateListener: r.updateTargetResolverState, + parent: r, } - if r.proxyResolver == nil { - r.proxyResolver = nopResolver{} + if r.targetResolver, err = targetResolverBuilder.Build(target, wcc, opts); err != nil { + return nil, fmt.Errorf("delegating_resolver: unable to build the resolver for target %s: %v", target, err) } return r, nil } -// proxyURIResolver creates a resolver for resolving proxy URIs using the -// "dns" scheme. It adjusts the proxyURL to conform to the "dns:///" format and -// builds a resolver with a wrappingClientConn to capture resolved addresses. +// proxyURIResolver creates a resolver for resolving proxy URIs using the "dns" +// scheme. It adjusts the proxyURL to conform to the "dns:///" format and builds +// a resolver with a wrappingClientConn to capture resolved addresses. func (r *delegatingResolver) proxyURIResolver(opts resolver.BuildOptions) (resolver.Resolver, error) { proxyBuilder := resolver.Get("dns") if proxyBuilder == nil { @@ -189,18 +198,96 @@ func (r *delegatingResolver) Close() { r.proxyResolver = nil } -// updateClientConnStateLocked creates a list of combined addresses by -// pairing each proxy address with every target address. For each pair, it -// generates a new [resolver.Address] using the proxy address, and adding the -// target address as the attribute along with user info. It returns nil if -// either resolver has not sent update even once and returns the error from -// ClientConn update once both resolvers have sent update atleast once. +func needsProxyResolver(state *resolver.State) bool { + for _, addr := range state.Addresses { + if !skipProxy(addr) { + return true + } + } + for _, endpoint := range state.Endpoints { + for _, addr := range endpoint.Addresses { + if !skipProxy(addr) { + return true + } + } + } + return false +} + +// parseTarget takes a target string and ensures it is a valid "host:port" target. +// +// It does the following: +// 1. If the target already has a port (e.g., "host:port", "[ipv6]:port"), +// it is returned as is. +// 2. If the host part is empty (e.g., ":80"), it defaults to "localhost", +// returning "localhost:80". +// 3. If the target is missing a port (e.g., "host", "ipv6"), the defaultPort +// is added. +// +// An error is returned for empty targets or targets with a trailing colon +// but no port (e.g., "host:"). +func parseTarget(target string) (string, error) { + if target == "" { + return "", fmt.Errorf("missing address") + } + + host, port, err := net.SplitHostPort(target) + if err != nil { + // If SplitHostPort fails, it's likely because the port is missing. + // We append the default port and return the result. + return net.JoinHostPort(target, defaultPort), nil + } + + // If SplitHostPort succeeds, we check for edge cases. + if port == "" { + // A success with an empty port means the target had a trailing colon, + // e.g., "host:", which is an error. + return "", fmt.Errorf("missing port after port-separator colon") + } + if host == "" { + // A success with an empty host means the target was like ":80". + // We default the host to "localhost". + host = "localhost" + } + return net.JoinHostPort(host, port), nil +} + +func skipProxy(address resolver.Address) bool { + // Avoid proxy when network is not tcp. + networkType, ok := networktype.Get(address) + if !ok { + networkType, _ = transport.ParseDialTarget(address.Addr) + } + if networkType != "tcp" { + return true + } + + req := &http.Request{URL: &url.URL{ + Scheme: "https", + Host: address.Addr, + }} + // Avoid proxy when address included in `NO_PROXY` environment variable or + // fails to get the proxy address. + url, err := HTTPSProxyFromEnvironment(req) + if err != nil || url == nil { + return true + } + return false +} + +// updateClientConnStateLocked constructs a combined list of addresses by +// pairing each proxy address with every target address of type TCP. For each +// pair, it creates a new [resolver.Address] using the proxy address and +// attaches the corresponding target address and user info as attributes. Target +// addresses that are not of type TCP are appended to the list as-is. The +// function returns nil if either resolver has not yet provided an update, and +// returns the result of ClientConn.UpdateState once both resolvers have +// provided at least one update. func (r *delegatingResolver) updateClientConnStateLocked() error { if r.targetResolverState == nil || r.proxyAddrs == nil { return nil } - curState := *r.targetResolverState // If multiple resolved proxy addresses are present, we send only the // unresolved proxy host and let net.Dial handle the proxy host name // resolution when creating the transport. Sending all resolved addresses @@ -218,24 +305,29 @@ func (r *delegatingResolver) updateClientConnStateLocked() error { } var addresses []resolver.Address for _, targetAddr := range (*r.targetResolverState).Addresses { + if skipProxy(targetAddr) { + addresses = append(addresses, targetAddr) + continue + } addresses = append(addresses, proxyattributes.Set(proxyAddr, proxyattributes.Options{ User: r.proxyURL.User, ConnectAddr: targetAddr.Addr, })) } - // Create a list of combined endpoints by pairing all proxy endpoints - // with every target endpoint. Each time, it constructs a new - // [resolver.Endpoint] using the all addresses from all the proxy endpoint - // and the target addresses from one endpoint. The target address and user - // information from the proxy URL are added as attributes to the proxy - // address.The resulting list of addresses is then grouped into endpoints, - // covering all combinations of proxy and target endpoints. + // For each target endpoint, construct a new [resolver.Endpoint] that + // includes all addresses from all proxy endpoints and the addresses from + // that target endpoint, preserving the number of target endpoints. var endpoints []resolver.Endpoint for _, endpt := range (*r.targetResolverState).Endpoints { var addrs []resolver.Address - for _, proxyAddr := range r.proxyAddrs { - for _, targetAddr := range endpt.Addresses { + for _, targetAddr := range endpt.Addresses { + // Avoid proxy when network is not tcp. + if skipProxy(targetAddr) { + addrs = append(addrs, targetAddr) + continue + } + for _, proxyAddr := range r.proxyAddrs { addrs = append(addrs, proxyattributes.Set(proxyAddr, proxyattributes.Options{ User: r.proxyURL.User, ConnectAddr: targetAddr.Addr, @@ -246,8 +338,9 @@ func (r *delegatingResolver) updateClientConnStateLocked() error { } // Use the targetResolverState for its service config and attributes // contents. The state update is only sent after both the target and proxy - // resolvers have sent their updates, and curState has been updated with - // the combined addresses. + // resolvers have sent their updates, and curState has been updated with the + // combined addresses. + curState := *r.targetResolverState curState.Addresses = addresses curState.Endpoints = endpoints return r.cc.UpdateState(curState) @@ -257,7 +350,8 @@ func (r *delegatingResolver) updateClientConnStateLocked() error { // addresses and endpoints, marking the resolver as ready, and triggering a // state update if both proxy and target resolvers are ready. If the ClientConn // returns a non-nil error, it calls `ResolveNow()` on the target resolver. It -// is a StateListener function of wrappingClientConn passed to the proxy resolver. +// is a StateListener function of wrappingClientConn passed to the proxy +// resolver. func (r *delegatingResolver) updateProxyResolverState(state resolver.State) error { r.mu.Lock() defer r.mu.Unlock() @@ -265,8 +359,8 @@ func (r *delegatingResolver) updateProxyResolverState(state resolver.State) erro logger.Infof("Addresses received from proxy resolver: %s", state.Addresses) } if len(state.Endpoints) > 0 { - // We expect exactly one address per endpoint because the proxy - // resolver uses "dns" resolution. + // We expect exactly one address per endpoint because the proxy resolver + // uses "dns" resolution. r.proxyAddrs = make([]resolver.Address, 0, len(state.Endpoints)) for _, endpoint := range state.Endpoints { r.proxyAddrs = append(r.proxyAddrs, endpoint.Addresses...) @@ -294,11 +388,14 @@ func (r *delegatingResolver) updateProxyResolverState(state resolver.State) erro return err } -// updateTargetResolverState updates the target resolver state by storing target -// addresses, endpoints, and service config, marking the resolver as ready, and -// triggering a state update if both resolvers are ready. If the ClientConn -// returns a non-nil error, it calls `ResolveNow()` on the proxy resolver. It -// is a StateListener function of wrappingClientConn passed to the target resolver. +// updateTargetResolverState is the StateListener function provided to the +// target resolver via wrappingClientConn. It updates the resolver state and +// marks the target resolver as ready. If the update includes at least one TCP +// address and the proxy resolver has not yet been constructed, it initializes +// the proxy resolver. A combined state update is triggered once both resolvers +// are ready. If all addresses are non-TCP, it proceeds without waiting for the +// proxy resolver. If ClientConn.UpdateState returns a non-nil error, +// ResolveNow() is called on the proxy resolver. func (r *delegatingResolver) updateTargetResolverState(state resolver.State) error { r.mu.Lock() defer r.mu.Unlock() @@ -307,6 +404,32 @@ func (r *delegatingResolver) updateTargetResolverState(state resolver.State) err logger.Infof("Addresses received from target resolver: %v", state.Addresses) } r.targetResolverState = &state + // If all addresses returned by the target resolver have a non-TCP network + // type, or are listed in the `NO_PROXY` environment variable, do not wait + // for proxy update. + if !needsProxyResolver(r.targetResolverState) { + return r.cc.UpdateState(*r.targetResolverState) + } + + // The proxy resolver may be rebuilt multiple times, specifically each time + // the target resolver sends an update, even if the target resolver is built + // successfully but building the proxy resolver fails. + if len(r.proxyAddrs) == 0 { + go func() { + r.childMu.Lock() + defer r.childMu.Unlock() + if _, ok := r.proxyResolver.(nopResolver); !ok { + return + } + proxyResolver, err := r.proxyURIResolver(resolver.BuildOptions{}) + if err != nil { + r.cc.ReportError(fmt.Errorf("delegating_resolver: unable to build the proxy resolver: %v", err)) + return + } + r.proxyResolver = proxyResolver + }() + } + err := r.updateClientConnStateLocked() if err != nil { go func() { @@ -335,7 +458,8 @@ func (wcc *wrappingClientConn) UpdateState(state resolver.State) error { return wcc.stateListener(state) } -// ReportError intercepts errors from the child resolvers and passes them to ClientConn. +// ReportError intercepts errors from the child resolvers and passes them to +// ClientConn. func (wcc *wrappingClientConn) ReportError(err error) { wcc.parent.cc.ReportError(err) } @@ -346,8 +470,8 @@ func (wcc *wrappingClientConn) NewAddress(addrs []resolver.Address) { wcc.UpdateState(resolver.State{Addresses: addrs}) } -// ParseServiceConfig parses the provided service config and returns an -// object that provides the parsed config. +// ParseServiceConfig parses the provided service config and returns an object +// that provides the parsed config. func (wcc *wrappingClientConn) ParseServiceConfig(serviceConfigJSON string) *serviceconfig.ParseResult { return wcc.parent.cc.ParseServiceConfig(serviceConfigJSON) } diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go index ba5c5a95..ada5251c 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go +++ b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go @@ -132,13 +132,13 @@ func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts // DNS address (non-IP). ctx, cancel := context.WithCancel(context.Background()) d := &dnsResolver{ - host: host, - port: port, - ctx: ctx, - cancel: cancel, - cc: cc, - rn: make(chan struct{}, 1), - disableServiceConfig: opts.DisableServiceConfig, + host: host, + port: port, + ctx: ctx, + cancel: cancel, + cc: cc, + rn: make(chan struct{}, 1), + enableServiceConfig: envconfig.EnableTXTServiceConfig && !opts.DisableServiceConfig, } d.resolver, err = internal.NewNetResolver(target.URL.Host) @@ -181,8 +181,8 @@ type dnsResolver struct { // finishes, race detector sometimes will warn lookup (READ the lookup // function pointers) inside watcher() goroutine has data race with // replaceNetFunc (WRITE the lookup function pointers). - wg sync.WaitGroup - disableServiceConfig bool + wg sync.WaitGroup + enableServiceConfig bool } // ResolveNow invoke an immediate resolution of the target that this @@ -346,7 +346,7 @@ func (d *dnsResolver) lookup() (*resolver.State, error) { if len(srv) > 0 { state = grpclbstate.Set(state, &grpclbstate.State{BalancerAddresses: srv}) } - if !d.disableServiceConfig { + if d.enableServiceConfig { state.ServiceConfig = d.lookupTXT(ctx) } return &state, nil diff --git a/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go b/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go index 79044657..d5f7e4d6 100644 --- a/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go +++ b/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go @@ -64,6 +64,16 @@ func (l *MetricsRecorderList) RecordInt64Count(handle *estats.Int64CountHandle, } } +// RecordInt64UpDownCount records the measurement alongside labels on the int +// count associated with the provided handle. +func (l *MetricsRecorderList) RecordInt64UpDownCount(handle *estats.Int64UpDownCountHandle, incr int64, labels ...string) { + verifyLabels(handle.Descriptor(), labels...) + + for _, metricRecorder := range l.metricsRecorders { + metricRecorder.RecordInt64UpDownCount(handle, incr, labels...) + } +} + // RecordFloat64Count records the measurement alongside labels on the float // count associated with the provided handle. func (l *MetricsRecorderList) RecordFloat64Count(handle *estats.Float64CountHandle, incr float64, labels ...string) { diff --git a/vendor/google.golang.org/grpc/internal/stats/stats.go b/vendor/google.golang.org/grpc/internal/stats/stats.go new file mode 100644 index 00000000..49019b80 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/stats/stats.go @@ -0,0 +1,70 @@ +/* + * + * Copyright 2025 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package stats + +import ( + "context" + + "google.golang.org/grpc/stats" +) + +type combinedHandler struct { + handlers []stats.Handler +} + +// NewCombinedHandler combines multiple stats.Handlers into a single handler. +// +// It returns nil if no handlers are provided. If only one handler is +// provided, it is returned directly without wrapping. +func NewCombinedHandler(handlers ...stats.Handler) stats.Handler { + switch len(handlers) { + case 0: + return nil + case 1: + return handlers[0] + default: + return &combinedHandler{handlers: handlers} + } +} + +func (ch *combinedHandler) TagRPC(ctx context.Context, info *stats.RPCTagInfo) context.Context { + for _, h := range ch.handlers { + ctx = h.TagRPC(ctx, info) + } + return ctx +} + +func (ch *combinedHandler) HandleRPC(ctx context.Context, stats stats.RPCStats) { + for _, h := range ch.handlers { + h.HandleRPC(ctx, stats) + } +} + +func (ch *combinedHandler) TagConn(ctx context.Context, info *stats.ConnTagInfo) context.Context { + for _, h := range ch.handlers { + ctx = h.TagConn(ctx, info) + } + return ctx +} + +func (ch *combinedHandler) HandleConn(ctx context.Context, stats stats.ConnStats) { + for _, h := range ch.handlers { + h.HandleConn(ctx, stats) + } +} diff --git a/vendor/google.golang.org/grpc/internal/status/status.go b/vendor/google.golang.org/grpc/internal/status/status.go index 1186f1e9..aad171cd 100644 --- a/vendor/google.golang.org/grpc/internal/status/status.go +++ b/vendor/google.golang.org/grpc/internal/status/status.go @@ -236,3 +236,11 @@ func IsRestrictedControlPlaneCode(s *Status) bool { } return false } + +// RawStatusProto returns the internal protobuf message for use by gRPC itself. +func RawStatusProto(s *Status) *spb.Status { + if s == nil { + return nil + } + return s.s +} diff --git a/vendor/google.golang.org/grpc/internal/transport/client_stream.go b/vendor/google.golang.org/grpc/internal/transport/client_stream.go index 8ed347c5..98045251 100644 --- a/vendor/google.golang.org/grpc/internal/transport/client_stream.go +++ b/vendor/google.golang.org/grpc/internal/transport/client_stream.go @@ -29,25 +29,27 @@ import ( // ClientStream implements streaming functionality for a gRPC client. type ClientStream struct { - *Stream // Embed for common stream functionality. + Stream // Embed for common stream functionality. ct *http2Client done chan struct{} // closed at the end of stream to unblock writers. doneFunc func() // invoked at the end of stream. - headerChan chan struct{} // closed to indicate the end of header metadata. - headerChanClosed uint32 // set when headerChan is closed. Used to avoid closing headerChan multiple times. + headerChan chan struct{} // closed to indicate the end of header metadata. + header metadata.MD // the received header metadata + + status *status.Status // the status error received from the server + + // Non-pointer fields are at the end to optimize GC allocations. + // headerValid indicates whether a valid header was received. Only // meaningful after headerChan is closed (always call waitOnHeader() before // reading its value). - headerValid bool - header metadata.MD // the received header metadata - noHeaders bool // set if the client never received headers (set only after the stream is done). - - bytesReceived atomic.Bool // indicates whether any bytes have been received on this stream - unprocessed atomic.Bool // set if the server sends a refused stream or GOAWAY including this stream - - status *status.Status // the status error received from the server + headerValid bool + noHeaders bool // set if the client never received headers (set only after the stream is done). + headerChanClosed uint32 // set when headerChan is closed. Used to avoid closing headerChan multiple times. + bytesReceived atomic.Bool // indicates whether any bytes have been received on this stream + unprocessed atomic.Bool // set if the server sends a refused stream or GOAWAY including this stream } // Read reads an n byte message from the input stream. @@ -59,7 +61,7 @@ func (s *ClientStream) Read(n int) (mem.BufferSlice, error) { return b, err } -// Close closes the stream and popagates err to any readers. +// Close closes the stream and propagates err to any readers. func (s *ClientStream) Close(err error) { var ( rst bool @@ -142,3 +144,11 @@ func (s *ClientStream) TrailersOnly() bool { func (s *ClientStream) Status() *status.Status { return s.status } + +func (s *ClientStream) requestRead(n int) { + s.ct.adjustWindow(s, uint32(n)) +} + +func (s *ClientStream) updateWindow(n int) { + s.ct.updateWindow(s, uint32(n)) +} diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go index ef72fbb3..2dcd1e63 100644 --- a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go +++ b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go @@ -40,6 +40,13 @@ var updateHeaderTblSize = func(e *hpack.Encoder, v uint32) { e.SetMaxDynamicTableSizeLimit(v) } +// itemNodePool is used to reduce heap allocations. +var itemNodePool = sync.Pool{ + New: func() any { + return &itemNode{} + }, +} + type itemNode struct { it any next *itemNode @@ -51,7 +58,9 @@ type itemList struct { } func (il *itemList) enqueue(i any) { - n := &itemNode{it: i} + n := itemNodePool.Get().(*itemNode) + n.next = nil + n.it = i if il.tail == nil { il.head, il.tail = n, n return @@ -71,7 +80,9 @@ func (il *itemList) dequeue() any { return nil } i := il.head.it + temp := il.head il.head = il.head.next + itemNodePool.Put(temp) if il.head == nil { il.tail = nil } @@ -146,10 +157,11 @@ type earlyAbortStream struct { func (*earlyAbortStream) isTransportResponseFrame() bool { return false } type dataFrame struct { - streamID uint32 - endStream bool - h []byte - reader mem.Reader + streamID uint32 + endStream bool + h []byte + data mem.BufferSlice + processing bool // onEachWrite is called every time // a part of data is written out. onEachWrite func() @@ -234,6 +246,7 @@ type outStream struct { itl *itemList bytesOutStanding int wq *writeQuota + reader mem.Reader next *outStream prev *outStream @@ -461,7 +474,9 @@ func (c *controlBuffer) finish() { v.onOrphaned(ErrConnClosing) } case *dataFrame: - _ = v.reader.Close() + if !v.processing { + v.data.Free() + } } } @@ -481,6 +496,16 @@ const ( serverSide ) +// maxWriteBufSize is the maximum length (number of elements) the cached +// writeBuf can grow to. The length depends on the number of buffers +// contained within the BufferSlice produced by the codec, which is +// generally small. +// +// If a writeBuf larger than this limit is required, it will be allocated +// and freed after use, rather than being cached. This avoids holding +// on to large amounts of memory. +const maxWriteBufSize = 64 + // Loopy receives frames from the control buffer. // Each frame is handled individually; most of the work done by loopy goes // into handling data frames. Loopy maintains a queue of active streams, and each @@ -515,6 +540,8 @@ type loopyWriter struct { // Side-specific handlers ssGoAwayHandler func(*goAway) (bool, error) + + writeBuf [][]byte // cached slice to avoid heap allocations for calls to mem.Reader.Peek. } func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator, conn net.Conn, logger *grpclog.PrefixLogger, goAwayHandler func(*goAway) (bool, error), bufferPool mem.BufferPool) *loopyWriter { @@ -790,10 +817,13 @@ func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error { // a RST_STREAM before stream initialization thus the stream might // not be established yet. delete(l.estdStreams, c.streamID) + str.reader.Close() str.deleteSelf() for head := str.itl.dequeueAll(); head != nil; head = head.next { if df, ok := head.it.(*dataFrame); ok { - _ = df.reader.Close() + if !df.processing { + df.data.Free() + } } } } @@ -928,7 +958,13 @@ func (l *loopyWriter) processData() (bool, error) { if str == nil { return true, nil } + reader := &str.reader dataItem := str.itl.peek().(*dataFrame) // Peek at the first data item this stream. + if !dataItem.processing { + dataItem.processing = true + reader.Reset(dataItem.data) + dataItem.data.Free() + } // A data item is represented by a dataFrame, since it later translates into // multiple HTTP2 data frames. // Every dataFrame has two buffers; h that keeps grpc-message header and data @@ -936,13 +972,13 @@ func (l *loopyWriter) processData() (bool, error) { // from data is copied to h to make as big as the maximum possible HTTP2 frame // size. - if len(dataItem.h) == 0 && dataItem.reader.Remaining() == 0 { // Empty data frame + if len(dataItem.h) == 0 && reader.Remaining() == 0 { // Empty data frame // Client sends out empty data frame with endStream = true - if err := l.framer.fr.WriteData(dataItem.streamID, dataItem.endStream, nil); err != nil { + if err := l.framer.writeData(dataItem.streamID, dataItem.endStream, nil); err != nil { return false, err } str.itl.dequeue() // remove the empty data item from stream - _ = dataItem.reader.Close() + reader.Close() if str.itl.isEmpty() { str.state = empty } else if trailer, ok := str.itl.peek().(*headerFrame); ok { // the next item is trailers. @@ -971,29 +1007,24 @@ func (l *loopyWriter) processData() (bool, error) { } // Compute how much of the header and data we can send within quota and max frame length hSize := min(maxSize, len(dataItem.h)) - dSize := min(maxSize-hSize, dataItem.reader.Remaining()) - remainingBytes := len(dataItem.h) + dataItem.reader.Remaining() - hSize - dSize + dSize := min(maxSize-hSize, reader.Remaining()) + remainingBytes := len(dataItem.h) + reader.Remaining() - hSize - dSize size := hSize + dSize - var buf *[]byte - - if hSize != 0 && dSize == 0 { - buf = &dataItem.h - } else { - // Note: this is only necessary because the http2.Framer does not support - // partially writing a frame, so the sequence must be materialized into a buffer. - // TODO: Revisit once https://github.com/golang/go/issues/66655 is addressed. - pool := l.bufferPool - if pool == nil { - // Note that this is only supposed to be nil in tests. Otherwise, stream is - // always initialized with a BufferPool. - pool = mem.DefaultBufferPool() + l.writeBuf = l.writeBuf[:0] + if hSize > 0 { + l.writeBuf = append(l.writeBuf, dataItem.h[:hSize]) + } + if dSize > 0 { + var err error + l.writeBuf, err = reader.Peek(dSize, l.writeBuf) + if err != nil { + // This must never happen since the reader must have at least dSize + // bytes. + // Log an error to fail tests. + l.logger.Errorf("unexpected error while reading Data frame payload: %v", err) + return false, err } - buf = pool.Get(size) - defer pool.Put(buf) - - copy((*buf)[:hSize], dataItem.h) - _, _ = dataItem.reader.Read((*buf)[hSize:]) } // Now that outgoing flow controls are checked we can replenish str's write quota @@ -1006,7 +1037,14 @@ func (l *loopyWriter) processData() (bool, error) { if dataItem.onEachWrite != nil { dataItem.onEachWrite() } - if err := l.framer.fr.WriteData(dataItem.streamID, endStream, (*buf)[:size]); err != nil { + err := l.framer.writeData(dataItem.streamID, endStream, l.writeBuf) + reader.Discard(dSize) + if cap(l.writeBuf) > maxWriteBufSize { + l.writeBuf = nil + } else { + clear(l.writeBuf) + } + if err != nil { return false, err } str.bytesOutStanding += size @@ -1014,7 +1052,7 @@ func (l *loopyWriter) processData() (bool, error) { dataItem.h = dataItem.h[hSize:] if remainingBytes == 0 { // All the data from that message was written out. - _ = dataItem.reader.Close() + reader.Close() str.itl.dequeue() } if str.itl.isEmpty() { diff --git a/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go b/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go index dfc0f224..7cfbc963 100644 --- a/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go +++ b/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go @@ -28,7 +28,7 @@ import ( // writeQuota is a soft limit on the amount of data a stream can // schedule before some of it is written out. type writeQuota struct { - quota int32 + _ noCopy // get waits on read from when quota goes less than or equal to zero. // replenish writes on it when quota goes positive again. ch chan struct{} @@ -38,16 +38,17 @@ type writeQuota struct { // It is implemented as a field so that it can be updated // by tests. replenish func(n int) + quota int32 } -func newWriteQuota(sz int32, done <-chan struct{}) *writeQuota { - w := &writeQuota{ - quota: sz, - ch: make(chan struct{}, 1), - done: done, - } +// init allows a writeQuota to be initialized in-place, which is useful for +// resetting a buffer or for avoiding a heap allocation when the buffer is +// embedded in another struct. +func (w *writeQuota) init(sz int32, done <-chan struct{}) { + w.quota = sz + w.ch = make(chan struct{}, 1) + w.done = done w.replenish = w.realReplenish - return w } func (w *writeQuota) get(sz int32) error { @@ -67,9 +68,9 @@ func (w *writeQuota) get(sz int32) error { func (w *writeQuota) realReplenish(n int) { sz := int32(n) - a := atomic.AddInt32(&w.quota, sz) - b := a - sz - if b <= 0 && a > 0 { + newQuota := atomic.AddInt32(&w.quota, sz) + previousQuota := newQuota - sz + if previousQuota <= 0 && newQuota > 0 { select { case w.ch <- struct{}{}: default: diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go index 3dea2357..7ab3422b 100644 --- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go @@ -50,7 +50,7 @@ import ( // NewServerHandlerTransport returns a ServerTransport handling gRPC from // inside an http.Handler, or writes an HTTP error to w and returns an error. // It requires that the http Server supports HTTP/2. -func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []stats.Handler, bufferPool mem.BufferPool) (ServerTransport, error) { +func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats stats.Handler, bufferPool mem.BufferPool) (ServerTransport, error) { if r.Method != http.MethodPost { w.Header().Set("Allow", http.MethodPost) msg := fmt.Sprintf("invalid gRPC request method %q", r.Method) @@ -170,7 +170,7 @@ type serverHandlerTransport struct { // TODO make sure this is consistent across handler_server and http2_server contentSubtype string - stats []stats.Handler + stats stats.Handler logger *grpclog.PrefixLogger bufferPool mem.BufferPool @@ -274,14 +274,14 @@ func (ht *serverHandlerTransport) writeStatus(s *ServerStream, st *status.Status } }) - if err == nil { // transport has not been closed + if err == nil && ht.stats != nil { // transport has not been closed // Note: The trailer fields are compressed with hpack after this call returns. // No WireLength field is set here. - for _, sh := range ht.stats { - sh.HandleRPC(s.Context(), &stats.OutTrailer{ - Trailer: s.trailer.Copy(), - }) - } + s.hdrMu.Lock() + ht.stats.HandleRPC(s.Context(), &stats.OutTrailer{ + Trailer: s.trailer.Copy(), + }) + s.hdrMu.Unlock() } ht.Close(errors.New("finished writing status")) return err @@ -372,19 +372,23 @@ func (ht *serverHandlerTransport) writeHeader(s *ServerStream, md metadata.MD) e ht.rw.(http.Flusher).Flush() }) - if err == nil { - for _, sh := range ht.stats { - // Note: The header fields are compressed with hpack after this call returns. - // No WireLength field is set here. - sh.HandleRPC(s.Context(), &stats.OutHeader{ - Header: md.Copy(), - Compression: s.sendCompress, - }) - } + if err == nil && ht.stats != nil { + // Note: The header fields are compressed with hpack after this call returns. + // No WireLength field is set here. + ht.stats.HandleRPC(s.Context(), &stats.OutHeader{ + Header: md.Copy(), + Compression: s.sendCompress, + }) } return err } +func (ht *serverHandlerTransport) adjustWindow(*ServerStream, uint32) { +} + +func (ht *serverHandlerTransport) updateWindow(*ServerStream, uint32) { +} + func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream func(*ServerStream)) { // With this transport type there will be exactly 1 stream: this HTTP request. var cancel context.CancelFunc @@ -409,11 +413,9 @@ func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream ctx = metadata.NewIncomingContext(ctx, ht.headerMD) req := ht.req s := &ServerStream{ - Stream: &Stream{ + Stream: Stream{ id: 0, // irrelevant ctx: ctx, - requestRead: func(int) {}, - buf: newRecvBuffer(), method: req.URL.Path, recvCompress: req.Header.Get("grpc-encoding"), contentSubtype: ht.contentSubtype, @@ -422,9 +424,11 @@ func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream st: ht, headerWireLength: 0, // won't have access to header wire length until golang/go#18997. } - s.trReader = &transportReader{ - reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf}, - windowHandler: func(int) {}, + s.Stream.buf.init() + s.readRequester = s + s.trReader = transportReader{ + reader: recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: &s.buf}, + windowHandler: s, } // readerDone is closed when the Body.Read-ing goroutine exits. diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go index 513dbb93..38ca031a 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -44,6 +44,7 @@ import ( "google.golang.org/grpc/internal/grpcutil" imetadata "google.golang.org/grpc/internal/metadata" "google.golang.org/grpc/internal/proxyattributes" + istats "google.golang.org/grpc/internal/stats" istatus "google.golang.org/grpc/internal/status" isyscall "google.golang.org/grpc/internal/syscall" "google.golang.org/grpc/internal/transport/networktype" @@ -105,7 +106,7 @@ type http2Client struct { kp keepalive.ClientParameters keepaliveEnabled bool - statsHandlers []stats.Handler + statsHandler stats.Handler initialWindowSize int32 @@ -176,7 +177,7 @@ func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error return fn(ctx, address) } if !ok { - networkType, address = parseDialTarget(address) + networkType, address = ParseDialTarget(address) } if opts, present := proxyattributes.Get(addr); present { return proxyDial(ctx, addr, grpcUA, opts) @@ -309,11 +310,9 @@ func NewHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts scheme = "https" } } - dynamicWindow := true icwz := int32(initialWindowSize) if opts.InitialConnWindowSize >= defaultWindowSize { icwz = opts.InitialConnWindowSize - dynamicWindow = false } writeBufSize := opts.WriteBufferSize readBufSize := opts.ReadBufferSize @@ -337,14 +336,14 @@ func NewHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts writerDone: make(chan struct{}), goAway: make(chan struct{}), keepaliveDone: make(chan struct{}), - framer: newFramer(conn, writeBufSize, readBufSize, opts.SharedWriteBuffer, maxHeaderListSize), + framer: newFramer(conn, writeBufSize, readBufSize, opts.SharedWriteBuffer, maxHeaderListSize, opts.BufferPool), fc: &trInFlow{limit: uint32(icwz)}, scheme: scheme, activeStreams: make(map[uint32]*ClientStream), isSecure: isSecure, perRPCCreds: perRPCCreds, kp: kp, - statsHandlers: opts.StatsHandlers, + statsHandler: istats.NewCombinedHandler(opts.StatsHandlers...), initialWindowSize: initialWindowSize, nextID: 1, maxConcurrentStreams: defaultMaxStreamsClient, @@ -371,7 +370,7 @@ func NewHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts }) t.logger = prefixLoggerForClientTransport(t) // Add peer information to the http2client context. - t.ctx = peer.NewContext(t.ctx, t.getPeer()) + t.ctx = peer.NewContext(t.ctx, t.Peer()) if md, ok := addr.Metadata.(*metadata.MD); ok { t.md = *md @@ -381,23 +380,21 @@ func NewHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts t.controlBuf = newControlBuffer(t.ctxDone) if opts.InitialWindowSize >= defaultWindowSize { t.initialWindowSize = opts.InitialWindowSize - dynamicWindow = false } - if dynamicWindow { + if !opts.StaticWindowSize { t.bdpEst = &bdpEstimator{ bdp: initialWindowSize, updateFlowControl: t.updateFlowControl, } } - for _, sh := range t.statsHandlers { - t.ctx = sh.TagConn(t.ctx, &stats.ConnTagInfo{ + if t.statsHandler != nil { + t.ctx = t.statsHandler.TagConn(t.ctx, &stats.ConnTagInfo{ RemoteAddr: t.remoteAddr, LocalAddr: t.localAddr, }) - connBegin := &stats.ConnBegin{ + t.statsHandler.HandleConn(t.ctx, &stats.ConnBegin{ Client: true, - } - sh.HandleConn(t.ctx, connBegin) + }) } if t.keepaliveEnabled { t.kpDormancyCond = sync.NewCond(&t.mu) @@ -484,10 +481,9 @@ func NewHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *ClientStream { // TODO(zhaoq): Handle uint32 overflow of Stream.id. s := &ClientStream{ - Stream: &Stream{ + Stream: Stream{ method: callHdr.Method, sendCompress: callHdr.SendCompress, - buf: newRecvBuffer(), contentSubtype: callHdr.ContentSubtype, }, ct: t, @@ -495,31 +491,26 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *ClientSt headerChan: make(chan struct{}), doneFunc: callHdr.DoneFunc, } - s.wq = newWriteQuota(defaultWriteQuota, s.done) - s.requestRead = func(n int) { - t.adjustWindow(s, uint32(n)) - } + s.Stream.buf.init() + s.Stream.wq.init(defaultWriteQuota, s.done) + s.readRequester = s // The client side stream context should have exactly the same life cycle with the user provided context. // That means, s.ctx should be read-only. And s.ctx is done iff ctx is done. // So we use the original context here instead of creating a copy. s.ctx = ctx - s.trReader = &transportReader{ - reader: &recvBufferReader{ - ctx: s.ctx, - ctxDone: s.ctx.Done(), - recv: s.buf, - closeStream: func(err error) { - s.Close(err) - }, - }, - windowHandler: func(n int) { - t.updateWindow(s, uint32(n)) + s.trReader = transportReader{ + reader: recvBufferReader{ + ctx: s.ctx, + ctxDone: s.ctx.Done(), + recv: &s.buf, + clientStream: s, }, + windowHandler: s, } return s } -func (t *http2Client) getPeer() *peer.Peer { +func (t *http2Client) Peer() *peer.Peer { return &peer.Peer{ Addr: t.remoteAddr, AuthInfo: t.authInfo, // Can be nil @@ -545,7 +536,7 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) Method: callHdr.Method, AuthInfo: t.authInfo, } - ctxWithRequestInfo := icredentials.NewRequestInfoContext(ctx, ri) + ctxWithRequestInfo := credentials.NewContextWithRequestInfo(ctx, ri) authData, err := t.getTrAuthData(ctxWithRequestInfo, aud) if err != nil { return nil, err @@ -559,6 +550,22 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) // Make the slice of certain predictable size to reduce allocations made by append. hfLen := 7 // :method, :scheme, :path, :authority, content-type, user-agent, te hfLen += len(authData) + len(callAuthData) + registeredCompressors := t.registeredCompressors + if callHdr.AcceptedCompressors != nil { + registeredCompressors = *callHdr.AcceptedCompressors + } + if callHdr.PreviousAttempts > 0 { + hfLen++ + } + if callHdr.SendCompress != "" { + hfLen++ + } + if registeredCompressors != "" { + hfLen++ + } + if _, ok := ctx.Deadline(); ok { + hfLen++ + } headerFields := make([]hpack.HeaderField, 0, hfLen) headerFields = append(headerFields, hpack.HeaderField{Name: ":method", Value: "POST"}) headerFields = append(headerFields, hpack.HeaderField{Name: ":scheme", Value: t.scheme}) @@ -571,7 +578,6 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-previous-rpc-attempts", Value: strconv.Itoa(callHdr.PreviousAttempts)}) } - registeredCompressors := t.registeredCompressors if callHdr.SendCompress != "" { headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: callHdr.SendCompress}) // Include the outgoing compressor name when compressor is not registered @@ -592,6 +598,9 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) // Send out timeout regardless its value. The server can detect timeout context by itself. // TODO(mmukhi): Perhaps this field should be updated when actually writing out to the wire. timeout := time.Until(dl) + if timeout <= 0 { + return nil, status.Error(codes.DeadlineExceeded, context.DeadlineExceeded.Error()) + } headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-timeout", Value: grpcutil.EncodeDuration(timeout)}) } for k, v := range authData { @@ -736,7 +745,7 @@ func (e NewStreamError) Error() string { // NewStream creates a stream and registers it into the transport as "active" // streams. All non-nil errors returned will be *NewStreamError. func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*ClientStream, error) { - ctx = peer.NewContext(ctx, t.getPeer()) + ctx = peer.NewContext(ctx, t.Peer()) // ServerName field of the resolver returned address takes precedence over // Host field of CallHdr to determine the :authority header. This is because, @@ -749,6 +758,25 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*ClientS callHdr = &newCallHdr } + // The authority specified via the `CallAuthority` CallOption takes the + // highest precedence when determining the `:authority` header. It overrides + // any value present in the Host field of CallHdr. Before applying this + // override, the authority string is validated. If the credentials do not + // implement the AuthorityValidator interface, or if validation fails, the + // RPC is failed with a status code of `UNAVAILABLE`. + if callHdr.Authority != "" { + auth, ok := t.authInfo.(credentials.AuthorityValidator) + if !ok { + return nil, &NewStreamError{Err: status.Errorf(codes.Unavailable, "credentials type %q does not implement the AuthorityValidator interface, but authority override specified with CallAuthority call option", t.authInfo.AuthType())} + } + if err := auth.ValidateAuthority(callHdr.Authority); err != nil { + return nil, &NewStreamError{Err: status.Errorf(codes.Unavailable, "failed to validate authority %q : %v", callHdr.Authority, err)} + } + newCallHdr := *callHdr + newCallHdr.Host = callHdr.Authority + callHdr = &newCallHdr + } + headerFields, err := t.createHeaderFields(ctx, callHdr) if err != nil { return nil, &NewStreamError{Err: err, AllowTransparentRetry: false} @@ -792,7 +820,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*ClientS return nil }, onOrphaned: cleanup, - wq: s.wq, + wq: &s.wq, } firstTry := true var ch chan struct{} @@ -823,7 +851,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*ClientS transportDrainRequired = t.nextID > MaxStreamID s.id = hdr.streamID - s.fc = &inFlow{limit: uint32(t.initialWindowSize)} + s.fc = inFlow{limit: uint32(t.initialWindowSize)} t.activeStreams[s.id] = s t.mu.Unlock() @@ -874,27 +902,23 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*ClientS return nil, &NewStreamError{Err: ErrConnClosing, AllowTransparentRetry: true} } } - if len(t.statsHandlers) != 0 { + if t.statsHandler != nil { header, ok := metadata.FromOutgoingContext(ctx) if ok { header.Set("user-agent", t.userAgent) } else { header = metadata.Pairs("user-agent", t.userAgent) } - for _, sh := range t.statsHandlers { - // Note: The header fields are compressed with hpack after this call returns. - // No WireLength field is set here. - // Note: Creating a new stats object to prevent pollution. - outHeader := &stats.OutHeader{ - Client: true, - FullMethod: callHdr.Method, - RemoteAddr: t.remoteAddr, - LocalAddr: t.localAddr, - Compression: callHdr.SendCompress, - Header: header, - } - sh.HandleRPC(s.ctx, outHeader) - } + // Note: The header fields are compressed with hpack after this call returns. + // No WireLength field is set here. + t.statsHandler.HandleRPC(s.ctx, &stats.OutHeader{ + Client: true, + FullMethod: callHdr.Method, + RemoteAddr: t.remoteAddr, + LocalAddr: t.localAddr, + Compression: callHdr.SendCompress, + Header: header, + }) } if transportDrainRequired { if t.logger.V(logLevel) { @@ -971,6 +995,9 @@ func (t *http2Client) closeStream(s *ClientStream, err error, rst bool, rstCode // accessed anymore. func (t *http2Client) Close(err error) { t.conn.SetWriteDeadline(time.Now().Add(time.Second * 10)) + // For background on the deadline value chosen here, see + // https://github.com/grpc/grpc-go/issues/8425#issuecomment-3057938248 . + t.conn.SetReadDeadline(time.Now().Add(time.Second)) t.mu.Lock() // Make sure we only close once. if t.state == closing { @@ -1032,11 +1059,10 @@ func (t *http2Client) Close(err error) { for _, s := range streams { t.closeStream(s, err, false, http2.ErrCodeNo, st, nil, false) } - for _, sh := range t.statsHandlers { - connEnd := &stats.ConnEnd{ + if t.statsHandler != nil { + t.statsHandler.HandleConn(t.ctx, &stats.ConnEnd{ Client: true, - } - sh.HandleConn(t.ctx, connEnd) + }) } } @@ -1069,32 +1095,29 @@ func (t *http2Client) GracefulClose() { // Write formats the data into HTTP2 data frame(s) and sends it out. The caller // should proceed only if Write returns nil. func (t *http2Client) write(s *ClientStream, hdr []byte, data mem.BufferSlice, opts *WriteOptions) error { - reader := data.Reader() - if opts.Last { // If it's the last message, update stream state. if !s.compareAndSwapState(streamActive, streamWriteDone) { - _ = reader.Close() return errStreamDone } } else if s.getState() != streamActive { - _ = reader.Close() return errStreamDone } df := &dataFrame{ streamID: s.id, endStream: opts.Last, h: hdr, - reader: reader, + data: data, } - if hdr != nil || df.reader.Remaining() != 0 { // If it's not an empty data frame, check quota. - if err := s.wq.get(int32(len(hdr) + df.reader.Remaining())); err != nil { - _ = reader.Close() + dataLen := data.Len() + if hdr != nil || dataLen != 0 { // If it's not an empty data frame, check quota. + if err := s.wq.get(int32(len(hdr) + dataLen)); err != nil { return err } } + data.Ref() if err := t.controlBuf.put(df); err != nil { - _ = reader.Close() + data.Free() return err } t.incrMsgSent() @@ -1150,7 +1173,7 @@ func (t *http2Client) updateFlowControl(n uint32) { }) } -func (t *http2Client) handleData(f *http2.DataFrame) { +func (t *http2Client) handleData(f *parsedDataFrame) { size := f.Header().Length var sendBDPPing bool if t.bdpEst != nil { @@ -1194,22 +1217,15 @@ func (t *http2Client) handleData(f *http2.DataFrame) { t.closeStream(s, io.EOF, true, http2.ErrCodeFlowControl, status.New(codes.Internal, err.Error()), nil, false) return } + dataLen := f.data.Len() if f.Header().Flags.Has(http2.FlagDataPadded) { - if w := s.fc.onRead(size - uint32(len(f.Data()))); w > 0 { + if w := s.fc.onRead(size - uint32(dataLen)); w > 0 { t.controlBuf.put(&outgoingWindowUpdate{s.id, w}) } } - // TODO(bradfitz, zhaoq): A copy is required here because there is no - // guarantee f.Data() is consumed before the arrival of next frame. - // Can this copy be eliminated? - if len(f.Data()) > 0 { - pool := t.bufferPool - if pool == nil { - // Note that this is only supposed to be nil in tests. Otherwise, stream is - // always initialized with a BufferPool. - pool = mem.DefaultBufferPool() - } - s.write(recvMsg{buffer: mem.Copy(f.Data(), pool)}) + if dataLen > 0 { + f.data.Ref() + s.write(recvMsg{buffer: f.data}) } } // The server has closed the stream without sending trailers. Record that @@ -1242,7 +1258,8 @@ func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) { statusCode = codes.DeadlineExceeded } } - t.closeStream(s, io.EOF, false, http2.ErrCodeNo, status.Newf(statusCode, "stream terminated by RST_STREAM with error code: %v", f.ErrCode), nil, false) + st := status.Newf(statusCode, "stream terminated by RST_STREAM with error code: %v", f.ErrCode) + t.closeStream(s, st.Err(), false, http2.ErrCodeNo, st, nil, false) } func (t *http2Client) handleSettings(f *http2.SettingsFrame, isFirst bool) { @@ -1390,8 +1407,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) error { // the caller. func (t *http2Client) setGoAwayReason(f *http2.GoAwayFrame) { t.goAwayReason = GoAwayNoReason - switch f.ErrCode { - case http2.ErrCodeEnhanceYourCalm: + if f.ErrCode == http2.ErrCodeEnhanceYourCalm { if string(f.DebugData()) == "too_many_pings" { t.goAwayReason = GoAwayTooManyPings } @@ -1449,17 +1465,14 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { contentTypeErr = "malformed header: missing HTTP content-type" grpcMessage string recvCompress string - httpStatusCode *int httpStatusErr string - rawStatusCode = codes.Unknown + // the code from the grpc-status header, if present + grpcStatusCode = codes.Unknown // headerError is set if an error is encountered while parsing the headers headerError string + httpStatus string ) - if initialHeader { - httpStatusErr = "malformed header: missing HTTP status" - } - for _, hf := range frame.Fields { switch hf.Name { case "content-type": @@ -1475,35 +1488,15 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { case "grpc-status": code, err := strconv.ParseInt(hf.Value, 10, 32) if err != nil { - se := status.New(codes.Internal, fmt.Sprintf("transport: malformed grpc-status: %v", err)) + se := status.New(codes.Unknown, fmt.Sprintf("transport: malformed grpc-status: %v", err)) t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) return } - rawStatusCode = codes.Code(uint32(code)) + grpcStatusCode = codes.Code(uint32(code)) case "grpc-message": grpcMessage = decodeGrpcMessage(hf.Value) case ":status": - if hf.Value == "200" { - httpStatusErr = "" - statusCode := 200 - httpStatusCode = &statusCode - break - } - - c, err := strconv.ParseInt(hf.Value, 10, 32) - if err != nil { - se := status.New(codes.Internal, fmt.Sprintf("transport: malformed http-status: %v", err)) - t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) - return - } - statusCode := int(c) - httpStatusCode = &statusCode - - httpStatusErr = fmt.Sprintf( - "unexpected HTTP status code received from server: %d (%s)", - statusCode, - http.StatusText(statusCode), - ) + httpStatus = hf.Value default: if isReservedHeader(hf.Name) && !isWhitelistedHeader(hf.Name) { break @@ -1518,25 +1511,52 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { } } - if !isGRPC || httpStatusErr != "" { - var code = codes.Internal // when header does not include HTTP status, return INTERNAL - - if httpStatusCode != nil { + // If a non-gRPC response is received, then evaluate the HTTP status to + // process the response and close the stream. + // In case http status doesn't provide any error information (status : 200), + // then evalute response code to be Unknown. + if !isGRPC { + var grpcErrorCode = codes.Internal + if httpStatus == "" { + httpStatusErr = "malformed header: missing HTTP status" + } else { + // Parse the status codes (e.g. "200", 404"). + statusCode, err := strconv.Atoi(httpStatus) + if err != nil { + se := status.New(grpcErrorCode, fmt.Sprintf("transport: malformed http-status: %v", err)) + t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) + return + } + if statusCode >= 100 && statusCode < 200 { + if endStream { + se := status.New(codes.Internal, fmt.Sprintf( + "protocol error: informational header with status code %d must not have END_STREAM set", statusCode)) + t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) + } + // In case of informational headers, return. + return + } + httpStatusErr = fmt.Sprintf( + "unexpected HTTP status code received from server: %d (%s)", + statusCode, + http.StatusText(statusCode), + ) var ok bool - code, ok = HTTPStatusConvTab[*httpStatusCode] + grpcErrorCode, ok = HTTPStatusConvTab[statusCode] if !ok { - code = codes.Unknown + grpcErrorCode = codes.Unknown } } var errs []string if httpStatusErr != "" { errs = append(errs, httpStatusErr) } + if contentTypeErr != "" { errs = append(errs, contentTypeErr) } - // Verify the HTTP response is a 200. - se := status.New(code, strings.Join(errs, "; ")) + + se := status.New(grpcErrorCode, strings.Join(errs, "; ")) t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) return } @@ -1567,22 +1587,20 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { } } - for _, sh := range t.statsHandlers { + if t.statsHandler != nil { if !endStream { - inHeader := &stats.InHeader{ + t.statsHandler.HandleRPC(s.ctx, &stats.InHeader{ Client: true, WireLength: int(frame.Header().Length), Header: metadata.MD(mdata).Copy(), Compression: s.recvCompress, - } - sh.HandleRPC(s.ctx, inHeader) + }) } else { - inTrailer := &stats.InTrailer{ + t.statsHandler.HandleRPC(s.ctx, &stats.InTrailer{ Client: true, WireLength: int(frame.Header().Length), Trailer: metadata.MD(mdata).Copy(), - } - sh.HandleRPC(s.ctx, inTrailer) + }) } } @@ -1590,7 +1608,7 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { return } - status := istatus.NewWithProto(rawStatusCode, grpcMessage, mdata[grpcStatusDetailsBinHeader]) + status := istatus.NewWithProto(grpcStatusCode, grpcMessage, mdata[grpcStatusDetailsBinHeader]) // If client received END_STREAM from server while stream was still active, // send RST_STREAM. @@ -1637,7 +1655,7 @@ func (t *http2Client) reader(errCh chan<- error) { // loop to keep reading incoming messages on this transport. for { t.controlBuf.throttle() - frame, err := t.framer.fr.ReadFrame() + frame, err := t.framer.readFrame() if t.keepaliveEnabled { atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) } @@ -1652,7 +1670,7 @@ func (t *http2Client) reader(errCh chan<- error) { if s != nil { // use error detail to provide better err message code := http2ErrConvTab[se.Code] - errorDetail := t.framer.fr.ErrorDetail() + errorDetail := t.framer.errorDetail() var msg string if errorDetail != nil { msg = errorDetail.Error() @@ -1670,8 +1688,9 @@ func (t *http2Client) reader(errCh chan<- error) { switch frame := frame.(type) { case *http2.MetaHeadersFrame: t.operateHeaders(frame) - case *http2.DataFrame: + case *parsedDataFrame: t.handleData(frame) + frame.data.Free() case *http2.RSTStreamFrame: t.handleRSTStream(frame) case *http2.SettingsFrame: @@ -1791,8 +1810,6 @@ func (t *http2Client) socketMetrics() *channelz.EphemeralSocketMetrics { } } -func (t *http2Client) RemoteAddr() net.Addr { return t.remoteAddr } - func (t *http2Client) incrMsgSent() { if channelz.IsOn() { t.channelz.SocketMetrics.MessagesSent.Add(1) diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go index 997b0a59..6f78a6b0 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -35,12 +35,15 @@ import ( "golang.org/x/net/http2" "golang.org/x/net/http2/hpack" + "google.golang.org/protobuf/proto" + + "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcutil" "google.golang.org/grpc/internal/pretty" + istatus "google.golang.org/grpc/internal/status" "google.golang.org/grpc/internal/syscall" "google.golang.org/grpc/mem" - "google.golang.org/protobuf/proto" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" @@ -84,7 +87,7 @@ type http2Server struct { // updates, reset streams, and various settings) to the controller. controlBuf *controlBuffer fc *trInFlow - stats []stats.Handler + stats stats.Handler // Keepalive and max-age parameters for the server. kp keepalive.ServerParameters // Keepalive enforcement policy. @@ -130,6 +133,10 @@ type http2Server struct { maxStreamID uint32 // max stream ID ever seen logger *grpclog.PrefixLogger + // setResetPingStrikes is stored as a closure instead of making this a + // method on http2Server to avoid a heap allocation when converting a method + // to a closure for passing to frames objects. + setResetPingStrikes func() } // NewServerTransport creates a http2 transport with conn and configuration @@ -162,7 +169,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, if config.MaxHeaderListSize != nil { maxHeaderListSize = *config.MaxHeaderListSize } - framer := newFramer(conn, writeBufSize, readBufSize, config.SharedWriteBuffer, maxHeaderListSize) + framer := newFramer(conn, writeBufSize, readBufSize, config.SharedWriteBuffer, maxHeaderListSize, config.BufferPool) // Send initial settings as connection preface to client. isettings := []http2.Setting{{ ID: http2.SettingMaxFrameSize, @@ -174,16 +181,13 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, Val: config.MaxStreams, }) } - dynamicWindow := true iwz := int32(initialWindowSize) if config.InitialWindowSize >= defaultWindowSize { iwz = config.InitialWindowSize - dynamicWindow = false } icwz := int32(initialWindowSize) if config.InitialConnWindowSize >= defaultWindowSize { icwz = config.InitialConnWindowSize - dynamicWindow = false } if iwz != defaultWindowSize { isettings = append(isettings, http2.Setting{ @@ -257,13 +261,16 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, fc: &trInFlow{limit: uint32(icwz)}, state: reachable, activeStreams: make(map[uint32]*ServerStream), - stats: config.StatsHandlers, + stats: config.StatsHandler, kp: kp, idle: time.Now(), kep: kep, initialWindowSize: iwz, bufferPool: config.BufferPool, } + t.setResetPingStrikes = func() { + atomic.StoreUint32(&t.resetPingStrikes, 1) + } var czSecurity credentials.ChannelzSecurityValue if au, ok := authInfo.(credentials.ChannelzSecurityInfo); ok { czSecurity = au.GetSecurityValue() @@ -283,7 +290,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, t.logger = prefixLoggerForServerTransport(t) t.controlBuf = newControlBuffer(t.done) - if dynamicWindow { + if !config.StaticWindowSize { t.bdpEst = &bdpEstimator{ bdp: initialWindowSize, updateFlowControl: t.updateFlowControl, @@ -384,16 +391,15 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade } t.maxStreamID = streamID - buf := newRecvBuffer() s := &ServerStream{ - Stream: &Stream{ - id: streamID, - buf: buf, - fc: &inFlow{limit: uint32(t.initialWindowSize)}, + Stream: Stream{ + id: streamID, + fc: inFlow{limit: uint32(t.initialWindowSize)}, }, st: t, headerWireLength: int(frame.Header().Length), } + s.Stream.buf.init() var ( // if false, content-type was missing or invalid isGRPC = false @@ -594,34 +600,61 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade return nil } } + + if s.ctx.Err() != nil { + t.mu.Unlock() + // Early abort in case the timeout was zero or so low it already fired. + t.controlBuf.put(&earlyAbortStream{ + httpStatus: http.StatusOK, + streamID: s.id, + contentSubtype: s.contentSubtype, + status: status.New(codes.DeadlineExceeded, context.DeadlineExceeded.Error()), + rst: !frame.StreamEnded(), + }) + return nil + } + t.activeStreams[streamID] = s if len(t.activeStreams) == 1 { t.idle = time.Time{} } + + // Start a timer to close the stream on reaching the deadline. + if timeoutSet { + // We need to wait for s.cancel to be updated before calling + // t.closeStream to avoid data races. + cancelUpdated := make(chan struct{}) + timer := internal.TimeAfterFunc(timeout, func() { + <-cancelUpdated + t.closeStream(s, true, http2.ErrCodeCancel, false) + }) + oldCancel := s.cancel + s.cancel = func() { + oldCancel() + timer.Stop() + } + close(cancelUpdated) + } t.mu.Unlock() if channelz.IsOn() { t.channelz.SocketMetrics.StreamsStarted.Add(1) t.channelz.SocketMetrics.LastRemoteStreamCreatedTimestamp.Store(time.Now().UnixNano()) } - s.requestRead = func(n int) { - t.adjustWindow(s, uint32(n)) - } + s.readRequester = s s.ctxDone = s.ctx.Done() - s.wq = newWriteQuota(defaultWriteQuota, s.ctxDone) - s.trReader = &transportReader{ - reader: &recvBufferReader{ + s.Stream.wq.init(defaultWriteQuota, s.ctxDone) + s.trReader = transportReader{ + reader: recvBufferReader{ ctx: s.ctx, ctxDone: s.ctxDone, - recv: s.buf, - }, - windowHandler: func(n int) { - t.updateWindow(s, uint32(n)) + recv: &s.buf, }, + windowHandler: s, } // Register the stream with loopy. t.controlBuf.put(®isterStream{ streamID: s.id, - wq: s.wq, + wq: &s.wq, }) handle(s) return nil @@ -637,7 +670,7 @@ func (t *http2Server) HandleStreams(ctx context.Context, handle func(*ServerStre }() for { t.controlBuf.throttle() - frame, err := t.framer.fr.ReadFrame() + frame, err := t.framer.readFrame() atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) if err != nil { if se, ok := err.(http2.StreamError); ok { @@ -674,8 +707,9 @@ func (t *http2Server) HandleStreams(ctx context.Context, handle func(*ServerStre }) continue } - case *http2.DataFrame: + case *parsedDataFrame: t.handleData(frame) + frame.data.Free() case *http2.RSTStreamFrame: t.handleRSTStream(frame) case *http2.SettingsFrame: @@ -755,7 +789,7 @@ func (t *http2Server) updateFlowControl(n uint32) { } -func (t *http2Server) handleData(f *http2.DataFrame) { +func (t *http2Server) handleData(f *parsedDataFrame) { size := f.Header().Length var sendBDPPing bool if t.bdpEst != nil { @@ -800,22 +834,15 @@ func (t *http2Server) handleData(f *http2.DataFrame) { t.closeStream(s, true, http2.ErrCodeFlowControl, false) return } + dataLen := f.data.Len() if f.Header().Flags.Has(http2.FlagDataPadded) { - if w := s.fc.onRead(size - uint32(len(f.Data()))); w > 0 { + if w := s.fc.onRead(size - uint32(dataLen)); w > 0 { t.controlBuf.put(&outgoingWindowUpdate{s.id, w}) } } - // TODO(bradfitz, zhaoq): A copy is required here because there is no - // guarantee f.Data() is consumed before the arrival of next frame. - // Can this copy be eliminated? - if len(f.Data()) > 0 { - pool := t.bufferPool - if pool == nil { - // Note that this is only supposed to be nil in tests. Otherwise, stream is - // always initialized with a BufferPool. - pool = mem.DefaultBufferPool() - } - s.write(recvMsg{buffer: mem.Copy(f.Data(), pool)}) + if dataLen > 0 { + f.data.Ref() + s.write(recvMsg{buffer: f.data}) } } if f.StreamEnded() { @@ -998,10 +1025,6 @@ func (t *http2Server) writeHeader(s *ServerStream, md metadata.MD) error { return nil } -func (t *http2Server) setResetPingStrikes() { - atomic.StoreUint32(&t.resetPingStrikes, 1) -} - func (t *http2Server) writeHeaderLocked(s *ServerStream) error { // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields // first and create a slice of that exact size. @@ -1026,19 +1049,18 @@ func (t *http2Server) writeHeaderLocked(s *ServerStream) error { t.closeStream(s, true, http2.ErrCodeInternal, false) return ErrHeaderListSizeLimitViolation } - for _, sh := range t.stats { + if t.stats != nil { // Note: Headers are compressed with hpack after this call returns. // No WireLength field is set here. - outHeader := &stats.OutHeader{ + t.stats.HandleRPC(s.Context(), &stats.OutHeader{ Header: s.header.Copy(), Compression: s.sendCompress, - } - sh.HandleRPC(s.Context(), outHeader) + }) } return nil } -// WriteStatus sends stream status to the client and terminates the stream. +// writeStatus sends stream status to the client and terminates the stream. // There is no further I/O operations being able to perform on this stream. // TODO(zhaoq): Now it indicates the end of entire stream. Revisit if early // OK is adopted. @@ -1066,7 +1088,7 @@ func (t *http2Server) writeStatus(s *ServerStream, st *status.Status) error { headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status", Value: strconv.Itoa(int(st.Code()))}) headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-message", Value: encodeGrpcMessage(st.Message())}) - if p := st.Proto(); p != nil && len(p.Details) > 0 { + if p := istatus.RawStatusProto(st); len(p.GetDetails()) > 0 { // Do not use the user's grpc-status-details-bin (if present) if we are // even attempting to set our own. delete(s.trailer, grpcStatusDetailsBinHeader) @@ -1101,10 +1123,10 @@ func (t *http2Server) writeStatus(s *ServerStream, st *status.Status) error { // Send a RST_STREAM after the trailers if the client has not already half-closed. rst := s.getState() == streamActive t.finishStream(s, rst, http2.ErrCodeNo, trailingHeader, true) - for _, sh := range t.stats { + if t.stats != nil { // Note: The trailer fields are compressed with hpack after this call returns. // No WireLength field is set here. - sh.HandleRPC(s.Context(), &stats.OutTrailer{ + t.stats.HandleRPC(s.Context(), &stats.OutTrailer{ Trailer: s.trailer.Copy(), }) } @@ -1114,17 +1136,13 @@ func (t *http2Server) writeStatus(s *ServerStream, st *status.Status) error { // Write converts the data into HTTP2 data frame and sends it out. Non-nil error // is returns if it fails (e.g., framing error, transport error). func (t *http2Server) write(s *ServerStream, hdr []byte, data mem.BufferSlice, _ *WriteOptions) error { - reader := data.Reader() - if !s.isHeaderSent() { // Headers haven't been written yet. if err := t.writeHeader(s, nil); err != nil { - _ = reader.Close() return err } } else { // Writing headers checks for this condition. if s.getState() == streamDone { - _ = reader.Close() return t.streamContextErr(s) } } @@ -1132,15 +1150,16 @@ func (t *http2Server) write(s *ServerStream, hdr []byte, data mem.BufferSlice, _ df := &dataFrame{ streamID: s.id, h: hdr, - reader: reader, + data: data, onEachWrite: t.setResetPingStrikes, } - if err := s.wq.get(int32(len(hdr) + df.reader.Remaining())); err != nil { - _ = reader.Close() + dataLen := data.Len() + if err := s.wq.get(int32(len(hdr) + dataLen)); err != nil { return t.streamContextErr(s) } + data.Ref() if err := t.controlBuf.put(df); err != nil { - _ = reader.Close() + data.Free() return err } t.incrMsgSent() @@ -1274,9 +1293,9 @@ func (t *http2Server) Close(err error) { // deleteStream deletes the stream s from transport's active streams. func (t *http2Server) deleteStream(s *ServerStream, eosReceived bool) { - t.mu.Lock() - if _, ok := t.activeStreams[s.id]; ok { + _, isActive := t.activeStreams[s.id] + if isActive { delete(t.activeStreams, s.id) if len(t.activeStreams) == 0 { t.idle = time.Now() @@ -1284,7 +1303,7 @@ func (t *http2Server) deleteStream(s *ServerStream, eosReceived bool) { } t.mu.Unlock() - if channelz.IsOn() { + if isActive && channelz.IsOn() { if eosReceived { t.channelz.SocketMetrics.StreamsSucceeded.Add(1) } else { @@ -1324,6 +1343,9 @@ func (t *http2Server) closeStream(s *ServerStream, rst bool, rstCode http2.ErrCo // called to interrupt the potential blocking on other goroutines. s.cancel() + // We can't return early even if the stream's state is "done" as the state + // might have been set by the `finishStream` method. Deleting the stream via + // `finishStream` can get blocked on flow control. s.swapState(streamDone) t.deleteStream(s, eosReceived) diff --git a/vendor/google.golang.org/grpc/internal/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go index 3613d7b6..5bbb641a 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http_util.go +++ b/vendor/google.golang.org/grpc/internal/transport/http_util.go @@ -25,7 +25,6 @@ import ( "fmt" "io" "math" - "net" "net/http" "net/url" "strconv" @@ -37,6 +36,7 @@ import ( "golang.org/x/net/http2" "golang.org/x/net/http2/hpack" "google.golang.org/grpc/codes" + "google.golang.org/grpc/mem" ) const ( @@ -196,11 +196,11 @@ func decodeTimeout(s string) (time.Duration, error) { if !ok { return 0, fmt.Errorf("transport: timeout unit is not recognized: %q", s) } - t, err := strconv.ParseInt(s[:size-1], 10, 64) + t, err := strconv.ParseUint(s[:size-1], 10, 64) if err != nil { return 0, err } - const maxHours = math.MaxInt64 / int64(time.Hour) + const maxHours = math.MaxInt64 / uint64(time.Hour) if d == time.Hour && t > maxHours { // This timeout would overflow math.MaxInt64; clamp it. return time.Duration(math.MaxInt64), nil @@ -300,11 +300,11 @@ type bufWriter struct { buf []byte offset int batchSize int - conn net.Conn + conn io.Writer err error } -func newBufWriter(conn net.Conn, batchSize int, pool *sync.Pool) *bufWriter { +func newBufWriter(conn io.Writer, batchSize int, pool *sync.Pool) *bufWriter { w := &bufWriter{ batchSize: batchSize, conn: conn, @@ -388,15 +388,29 @@ func toIOError(err error) error { return ioError{error: err} } +type parsedDataFrame struct { + http2.FrameHeader + data mem.Buffer +} + +func (df *parsedDataFrame) StreamEnded() bool { + return df.FrameHeader.Flags.Has(http2.FlagDataEndStream) +} + type framer struct { - writer *bufWriter - fr *http2.Framer + writer *bufWriter + fr *http2.Framer + headerBuf []byte // cached slice for framer headers to reduce heap allocs. + reader io.Reader + dataFrame parsedDataFrame // Cached data frame to avoid heap allocations. + pool mem.BufferPool + errDetail error } var writeBufferPoolMap = make(map[int]*sync.Pool) var writeBufferMutex sync.Mutex -func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, sharedWriteBuffer bool, maxHeaderListSize uint32) *framer { +func newFramer(conn io.ReadWriter, writeBufferSize, readBufferSize int, sharedWriteBuffer bool, maxHeaderListSize uint32, memPool mem.BufferPool) *framer { if writeBufferSize < 0 { writeBufferSize = 0 } @@ -412,6 +426,8 @@ func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, sharedWriteBu f := &framer{ writer: w, fr: http2.NewFramer(w, r), + reader: r, + pool: memPool, } f.fr.SetMaxReadFrameSize(http2MaxFrameLen) // Opt-in to Frame reuse API on framer to reduce garbage. @@ -422,6 +438,146 @@ func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, sharedWriteBu return f } +// writeData writes a DATA frame. +// +// It is the caller's responsibility not to violate the maximum frame size. +func (f *framer) writeData(streamID uint32, endStream bool, data [][]byte) error { + var flags http2.Flags + if endStream { + flags = http2.FlagDataEndStream + } + length := uint32(0) + for _, d := range data { + length += uint32(len(d)) + } + // TODO: Replace the header write with the framer API being added in + // https://github.com/golang/go/issues/66655. + f.headerBuf = append(f.headerBuf[:0], + byte(length>>16), + byte(length>>8), + byte(length), + byte(http2.FrameData), + byte(flags), + byte(streamID>>24), + byte(streamID>>16), + byte(streamID>>8), + byte(streamID)) + if _, err := f.writer.Write(f.headerBuf); err != nil { + return err + } + for _, d := range data { + if _, err := f.writer.Write(d); err != nil { + return err + } + } + return nil +} + +// readFrame reads a single frame. The returned Frame is only valid +// until the next call to readFrame. +func (f *framer) readFrame() (any, error) { + f.errDetail = nil + fh, err := f.fr.ReadFrameHeader() + if err != nil { + f.errDetail = f.fr.ErrorDetail() + return nil, err + } + // Read the data frame directly from the underlying io.Reader to avoid + // copies. + if fh.Type == http2.FrameData { + err = f.readDataFrame(fh) + return &f.dataFrame, err + } + fr, err := f.fr.ReadFrameForHeader(fh) + if err != nil { + f.errDetail = f.fr.ErrorDetail() + return nil, err + } + return fr, err +} + +// errorDetail returns a more detailed error of the last error +// returned by framer.readFrame. For instance, if readFrame +// returns a StreamError with code PROTOCOL_ERROR, errorDetail +// will say exactly what was invalid. errorDetail is not guaranteed +// to return a non-nil value. +// errorDetail is reset after the next call to readFrame. +func (f *framer) errorDetail() error { + return f.errDetail +} + +func (f *framer) readDataFrame(fh http2.FrameHeader) (err error) { + if fh.StreamID == 0 { + // DATA frames MUST be associated with a stream. If a + // DATA frame is received whose stream identifier + // field is 0x0, the recipient MUST respond with a + // connection error (Section 5.4.1) of type + // PROTOCOL_ERROR. + f.errDetail = errors.New("DATA frame with stream ID 0") + return http2.ConnectionError(http2.ErrCodeProtocol) + } + // Converting a *[]byte to a mem.SliceBuffer incurs a heap allocation. This + // conversion is performed by mem.NewBuffer. To avoid the extra allocation + // a []byte is allocated directly if required and cast to a mem.SliceBuffer. + var buf []byte + // poolHandle is the pointer returned by the buffer pool (if it's used.). + var poolHandle *[]byte + useBufferPool := !mem.IsBelowBufferPoolingThreshold(int(fh.Length)) + if useBufferPool { + poolHandle = f.pool.Get(int(fh.Length)) + buf = *poolHandle + defer func() { + if err != nil { + f.pool.Put(poolHandle) + } + }() + } else { + buf = make([]byte, int(fh.Length)) + } + if fh.Flags.Has(http2.FlagDataPadded) { + if fh.Length == 0 { + return io.ErrUnexpectedEOF + } + // This initial 1-byte read can be inefficient for unbuffered readers, + // but it allows the rest of the payload to be read directly to the + // start of the destination slice. This makes it easy to return the + // original slice back to the buffer pool. + if _, err := io.ReadFull(f.reader, buf[:1]); err != nil { + return err + } + padSize := buf[0] + buf = buf[:len(buf)-1] + if int(padSize) > len(buf) { + // If the length of the padding is greater than the + // length of the frame payload, the recipient MUST + // treat this as a connection error. + // Filed: https://github.com/http2/http2-spec/issues/610 + f.errDetail = errors.New("pad size larger than data payload") + return http2.ConnectionError(http2.ErrCodeProtocol) + } + if _, err := io.ReadFull(f.reader, buf); err != nil { + return err + } + buf = buf[:len(buf)-int(padSize)] + } else if _, err := io.ReadFull(f.reader, buf); err != nil { + return err + } + + f.dataFrame.FrameHeader = fh + if useBufferPool { + // Update the handle to point to the (potentially re-sliced) buf. + *poolHandle = buf + f.dataFrame.data = mem.NewBuffer(poolHandle, f.pool) + } else { + f.dataFrame.data = mem.SliceBuffer(buf) + } + return nil +} + +func (df *parsedDataFrame) Header() http2.FrameHeader { + return df.FrameHeader +} + func getWriteBufferPool(size int) *sync.Pool { writeBufferMutex.Lock() defer writeBufferMutex.Unlock() @@ -439,8 +595,8 @@ func getWriteBufferPool(size int) *sync.Pool { return pool } -// parseDialTarget returns the network and address to pass to dialer. -func parseDialTarget(target string) (string, string) { +// ParseDialTarget returns the network and address to pass to dialer. +func ParseDialTarget(target string) (string, string) { net := "tcp" m1 := strings.Index(target, ":") m2 := strings.Index(target, ":/") diff --git a/vendor/google.golang.org/grpc/internal/transport/server_stream.go b/vendor/google.golang.org/grpc/internal/transport/server_stream.go index a22a9015..ed6a13b7 100644 --- a/vendor/google.golang.org/grpc/internal/transport/server_stream.go +++ b/vendor/google.golang.org/grpc/internal/transport/server_stream.go @@ -32,21 +32,24 @@ import ( // ServerStream implements streaming functionality for a gRPC server. type ServerStream struct { - *Stream // Embed for common stream functionality. + Stream // Embed for common stream functionality. st internalServerTransport - ctxDone <-chan struct{} // closed at the end of stream. Cache of ctx.Done() (for performance) - cancel context.CancelFunc // invoked at the end of stream to cancel ctx. + ctxDone <-chan struct{} // closed at the end of stream. Cache of ctx.Done() (for performance) + // cancel is invoked at the end of stream to cancel ctx. It also stops the + // timer for monitoring the rpc deadline if configured. + cancel func() // Holds compressor names passed in grpc-accept-encoding metadata from the // client. clientAdvertisedCompressors string - headerWireLength int // hdrMu protects outgoing header and trailer metadata. hdrMu sync.Mutex header metadata.MD // the outgoing header metadata. Updated by WriteHeader. headerSent atomic.Bool // atomically set when the headers are sent out. + + headerWireLength int } // Read reads an n byte message from the input stream. @@ -176,3 +179,11 @@ func (s *ServerStream) SetTrailer(md metadata.MD) error { s.hdrMu.Unlock() return nil } + +func (s *ServerStream) requestRead(n int) { + s.st.adjustWindow(s, uint32(n)) +} + +func (s *ServerStream) updateWindow(n int) { + s.st.updateWindow(s, uint32(n)) +} diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go index af4a4aea..6daf1e00 100644 --- a/vendor/google.golang.org/grpc/internal/transport/transport.go +++ b/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -68,11 +68,11 @@ type recvBuffer struct { err error } -func newRecvBuffer() *recvBuffer { - b := &recvBuffer{ - c: make(chan recvMsg, 1), - } - return b +// init allows a recvBuffer to be initialized in-place, which is useful +// for resetting a buffer or for avoiding a heap allocation when the buffer +// is embedded in another struct. +func (b *recvBuffer) init() { + b.c = make(chan recvMsg, 1) } func (b *recvBuffer) put(r recvMsg) { @@ -123,12 +123,13 @@ func (b *recvBuffer) get() <-chan recvMsg { // recvBufferReader implements io.Reader interface to read the data from // recvBuffer. type recvBufferReader struct { - closeStream func(error) // Closes the client transport stream with the given error and nil trailer metadata. - ctx context.Context - ctxDone <-chan struct{} // cache of ctx.Done() (for performance). - recv *recvBuffer - last mem.Buffer // Stores the remaining data in the previous calls. - err error + _ noCopy + clientStream *ClientStream // The client transport stream is closed with a status representing ctx.Err() and nil trailer metadata. + ctx context.Context + ctxDone <-chan struct{} // cache of ctx.Done() (for performance). + recv *recvBuffer + last mem.Buffer // Stores the remaining data in the previous calls. + err error } func (r *recvBufferReader) ReadMessageHeader(header []byte) (n int, err error) { @@ -139,7 +140,7 @@ func (r *recvBufferReader) ReadMessageHeader(header []byte) (n int, err error) { n, r.last = mem.ReadUnsafe(header, r.last) return n, nil } - if r.closeStream != nil { + if r.clientStream != nil { n, r.err = r.readMessageHeaderClient(header) } else { n, r.err = r.readMessageHeader(header) @@ -164,7 +165,7 @@ func (r *recvBufferReader) Read(n int) (buf mem.Buffer, err error) { } return buf, nil } - if r.closeStream != nil { + if r.clientStream != nil { buf, r.err = r.readClient(n) } else { buf, r.err = r.read(n) @@ -209,7 +210,7 @@ func (r *recvBufferReader) readMessageHeaderClient(header []byte) (n int, err er // TODO: delaying ctx error seems like a unnecessary side effect. What // we really want is to mark the stream as done, and return ctx error // faster. - r.closeStream(ContextErr(r.ctx.Err())) + r.clientStream.Close(ContextErr(r.ctx.Err())) m := <-r.recv.get() return r.readMessageHeaderAdditional(m, header) case m := <-r.recv.get(): @@ -236,7 +237,7 @@ func (r *recvBufferReader) readClient(n int) (buf mem.Buffer, err error) { // TODO: delaying ctx error seems like a unnecessary side effect. What // we really want is to mark the stream as done, and return ctx error // faster. - r.closeStream(ContextErr(r.ctx.Err())) + r.clientStream.Close(ContextErr(r.ctx.Err())) m := <-r.recv.get() return r.readAdditional(m, n) case m := <-r.recv.get(): @@ -285,27 +286,32 @@ const ( // Stream represents an RPC in the transport layer. type Stream struct { - id uint32 ctx context.Context // the associated context of the stream method string // the associated RPC method of the stream recvCompress string sendCompress string - buf *recvBuffer - trReader *transportReader - fc *inFlow - wq *writeQuota - - // Callback to state application's intentions to read data. This - // is used to adjust flow control, if needed. - requestRead func(int) - state streamState + readRequester readRequester // contentSubtype is the content-subtype for requests. // this must be lowercase or the behavior is undefined. contentSubtype string trailer metadata.MD // the key-value map of trailer metadata. + + // Non-pointer fields are at the end to optimize GC performance. + state streamState + id uint32 + buf recvBuffer + trReader transportReader + fc inFlow + wq writeQuota +} + +// readRequester is used to state application's intentions to read data. This +// is used to adjust flow control, if needed. +type readRequester interface { + requestRead(int) } func (s *Stream) swapState(st streamState) streamState { @@ -355,7 +361,7 @@ func (s *Stream) ReadMessageHeader(header []byte) (err error) { if er := s.trReader.er; er != nil { return er } - s.requestRead(len(header)) + s.readRequester.requestRead(len(header)) for len(header) != 0 { n, err := s.trReader.ReadMessageHeader(header) header = header[n:] @@ -378,7 +384,7 @@ func (s *Stream) read(n int) (data mem.BufferSlice, err error) { if er := s.trReader.er; er != nil { return nil, er } - s.requestRead(n) + s.readRequester.requestRead(n) for n != 0 { buf, err := s.trReader.Read(n) var bufLen int @@ -401,16 +407,34 @@ func (s *Stream) read(n int) (data mem.BufferSlice, err error) { return data, nil } +// noCopy may be embedded into structs which must not be copied +// after the first use. +// +// See https://golang.org/issues/8005#issuecomment-190753527 +// for details. +type noCopy struct { +} + +func (*noCopy) Lock() {} +func (*noCopy) Unlock() {} + // transportReader reads all the data available for this Stream from the transport and // passes them into the decoder, which converts them into a gRPC message stream. // The error is io.EOF when the stream is done or another non-nil error if // the stream broke. type transportReader struct { - reader *recvBufferReader + _ noCopy // The handler to control the window update procedure for both this // particular stream and the associated transport. - windowHandler func(int) + windowHandler windowHandler er error + reader recvBufferReader +} + +// The handler to control the window update procedure for both this +// particular stream and the associated transport. +type windowHandler interface { + updateWindow(int) } func (t *transportReader) ReadMessageHeader(header []byte) (int, error) { @@ -419,7 +443,7 @@ func (t *transportReader) ReadMessageHeader(header []byte) (int, error) { t.er = err return 0, err } - t.windowHandler(n) + t.windowHandler.updateWindow(n) return n, nil } @@ -429,7 +453,7 @@ func (t *transportReader) Read(n int) (mem.Buffer, error) { t.er = err return buf, err } - t.windowHandler(buf.Len()) + t.windowHandler.updateWindow(buf.Len()) return buf, nil } @@ -454,7 +478,7 @@ type ServerConfig struct { ConnectionTimeout time.Duration Credentials credentials.TransportCredentials InTapHandle tap.ServerInHandle - StatsHandlers []stats.Handler + StatsHandler stats.Handler KeepaliveParams keepalive.ServerParameters KeepalivePolicy keepalive.EnforcementPolicy InitialWindowSize int32 @@ -466,6 +490,7 @@ type ServerConfig struct { MaxHeaderListSize *uint32 HeaderTableSize *uint32 BufferPool mem.BufferPool + StaticWindowSize bool } // ConnectOptions covers all relevant options for communicating with the server. @@ -504,6 +529,8 @@ type ConnectOptions struct { MaxHeaderListSize *uint32 // The mem.BufferPool to use when reading/writing to the wire. BufferPool mem.BufferPool + // StaticWindowSize controls whether dynamic window sizing is enabled. + StaticWindowSize bool } // WriteOptions provides additional hints and information for message @@ -526,6 +553,12 @@ type CallHdr struct { // outbound message. SendCompress string + // AcceptedCompressors overrides the grpc-accept-encoding header for this + // call. When nil, the transport advertises the default set of registered + // compressors. A non-nil pointer overrides that value (including the empty + // string to advertise none). + AcceptedCompressors *string + // Creds specifies credentials.PerRPCCredentials for a call. Creds credentials.PerRPCCredentials @@ -540,6 +573,11 @@ type CallHdr struct { PreviousAttempts int // value of grpc-previous-rpc-attempts header to set DoneFunc func() // called when the stream is finished + + // Authority is used to explicitly override the `:authority` header. If set, + // this value takes precedence over the Host field and will be used as the + // value for the `:authority` header. + Authority string } // ClientTransport is the common interface for all gRPC client-side transport @@ -576,8 +614,9 @@ type ClientTransport interface { // with a human readable string with debug info. GetGoAwayReason() (GoAwayReason, string) - // RemoteAddr returns the remote network address. - RemoteAddr() net.Addr + // Peer returns information about the peer associated with the Transport. + // The returned information includes authentication and network address details. + Peer() *peer.Peer } // ServerTransport is the common interface for all gRPC server-side transport @@ -607,6 +646,8 @@ type internalServerTransport interface { write(s *ServerStream, hdr []byte, data mem.BufferSlice, opts *WriteOptions) error writeStatus(s *ServerStream, st *status.Status) error incrMsgRecv() + adjustWindow(s *ServerStream, n uint32) + updateWindow(s *ServerStream, n uint32) } // connectionErrorf creates an ConnectionError with the specified error description. diff --git a/vendor/google.golang.org/grpc/mem/buffer_pool.go b/vendor/google.golang.org/grpc/mem/buffer_pool.go index c37c58c0..e37afdd1 100644 --- a/vendor/google.golang.org/grpc/mem/buffer_pool.go +++ b/vendor/google.golang.org/grpc/mem/buffer_pool.go @@ -32,12 +32,17 @@ type BufferPool interface { Get(length int) *[]byte // Put returns a buffer to the pool. + // + // The provided pointer must hold a prefix of the buffer obtained via + // BufferPool.Get to ensure the buffer's entire capacity can be re-used. Put(*[]byte) } +const goPageSize = 4 << 10 // 4KiB. N.B. this must be a power of 2. + var defaultBufferPoolSizes = []int{ 256, - 4 << 10, // 4KB (go page size) + goPageSize, 16 << 10, // 16KB (max HTTP/2 frame size used by gRPC) 32 << 10, // 32KB (default buffer size for io.Copy) 1 << 20, // 1MB @@ -118,7 +123,11 @@ type sizedBufferPool struct { } func (p *sizedBufferPool) Get(size int) *[]byte { - buf := p.pool.Get().(*[]byte) + buf, ok := p.pool.Get().(*[]byte) + if !ok { + buf := make([]byte, size, p.defaultSize) + return &buf + } b := *buf clear(b[:cap(b)]) *buf = b[:size] @@ -137,12 +146,6 @@ func (p *sizedBufferPool) Put(buf *[]byte) { func newSizedBufferPool(size int) *sizedBufferPool { return &sizedBufferPool{ - pool: sync.Pool{ - New: func() any { - buf := make([]byte, size) - return &buf - }, - }, defaultSize: size, } } @@ -160,6 +163,7 @@ type simpleBufferPool struct { func (p *simpleBufferPool) Get(size int) *[]byte { bs, ok := p.pool.Get().(*[]byte) if ok && cap(*bs) >= size { + clear((*bs)[:cap(*bs)]) *bs = (*bs)[:size] return bs } @@ -170,7 +174,14 @@ func (p *simpleBufferPool) Get(size int) *[]byte { p.pool.Put(bs) } - b := make([]byte, size) + // If we're going to allocate, round up to the nearest page. This way if + // requests frequently arrive with small variation we don't allocate + // repeatedly if we get unlucky and they increase over time. By default we + // only allocate here if size > 1MiB. Because goPageSize is a power of 2, we + // can round up efficiently. + allocSize := (size + goPageSize - 1) & ^(goPageSize - 1) + + b := make([]byte, size, allocSize) return &b } diff --git a/vendor/google.golang.org/grpc/mem/buffer_slice.go b/vendor/google.golang.org/grpc/mem/buffer_slice.go index 65002e2c..084fb19c 100644 --- a/vendor/google.golang.org/grpc/mem/buffer_slice.go +++ b/vendor/google.golang.org/grpc/mem/buffer_slice.go @@ -19,6 +19,7 @@ package mem import ( + "fmt" "io" ) @@ -117,47 +118,53 @@ func (s BufferSlice) MaterializeToBuffer(pool BufferPool) Buffer { // Reader returns a new Reader for the input slice after taking references to // each underlying buffer. -func (s BufferSlice) Reader() Reader { +func (s BufferSlice) Reader() *Reader { s.Ref() - return &sliceReader{ + return &Reader{ data: s, len: s.Len(), } } // Reader exposes a BufferSlice's data as an io.Reader, allowing it to interface -// with other parts systems. It also provides an additional convenience method -// Remaining(), which returns the number of unread bytes remaining in the slice. +// with other systems. +// // Buffers will be freed as they are read. -type Reader interface { - io.Reader - io.ByteReader - // Close frees the underlying BufferSlice and never returns an error. Subsequent - // calls to Read will return (0, io.EOF). - Close() error - // Remaining returns the number of unread bytes remaining in the slice. - Remaining() int -} - -type sliceReader struct { +// +// A Reader can be constructed from a BufferSlice; alternatively the zero value +// of a Reader may be used after calling Reset on it. +type Reader struct { data BufferSlice len int // The index into data[0].ReadOnlyData(). bufferIdx int } -func (r *sliceReader) Remaining() int { +// Remaining returns the number of unread bytes remaining in the slice. +func (r *Reader) Remaining() int { return r.len } -func (r *sliceReader) Close() error { +// Reset frees the currently held buffer slice and starts reading from the +// provided slice. This allows reusing the reader object. +func (r *Reader) Reset(s BufferSlice) { + r.data.Free() + s.Ref() + r.data = s + r.len = s.Len() + r.bufferIdx = 0 +} + +// Close frees the underlying BufferSlice and never returns an error. Subsequent +// calls to Read will return (0, io.EOF). +func (r *Reader) Close() error { r.data.Free() r.data = nil r.len = 0 return nil } -func (r *sliceReader) freeFirstBufferIfEmpty() bool { +func (r *Reader) freeFirstBufferIfEmpty() bool { if len(r.data) == 0 || r.bufferIdx != len(r.data[0].ReadOnlyData()) { return false } @@ -168,7 +175,7 @@ func (r *sliceReader) freeFirstBufferIfEmpty() bool { return true } -func (r *sliceReader) Read(buf []byte) (n int, _ error) { +func (r *Reader) Read(buf []byte) (n int, _ error) { if r.len == 0 { return 0, io.EOF } @@ -191,7 +198,8 @@ func (r *sliceReader) Read(buf []byte) (n int, _ error) { return n, nil } -func (r *sliceReader) ReadByte() (byte, error) { +// ReadByte reads a single byte. +func (r *Reader) ReadByte() (byte, error) { if r.len == 0 { return 0, io.EOF } @@ -279,3 +287,59 @@ nextBuffer: } } } + +// Discard skips the next n bytes, returning the number of bytes discarded. +// +// It frees buffers as they are fully consumed. +// +// If Discard skips fewer than n bytes, it also returns an error. +func (r *Reader) Discard(n int) (discarded int, err error) { + total := n + for n > 0 && r.len > 0 { + curData := r.data[0].ReadOnlyData() + curSize := min(n, len(curData)-r.bufferIdx) + n -= curSize + r.len -= curSize + r.bufferIdx += curSize + if r.bufferIdx >= len(curData) { + r.data[0].Free() + r.data = r.data[1:] + r.bufferIdx = 0 + } + } + discarded = total - n + if n > 0 { + return discarded, fmt.Errorf("insufficient bytes in reader") + } + return discarded, nil +} + +// Peek returns the next n bytes without advancing the reader. +// +// Peek appends results to the provided res slice and returns the updated slice. +// This pattern allows re-using the storage of res if it has sufficient +// capacity. +// +// The returned subslices are views into the underlying buffers and are only +// valid until the reader is advanced past the corresponding buffer. +// +// If Peek returns fewer than n bytes, it also returns an error. +func (r *Reader) Peek(n int, res [][]byte) ([][]byte, error) { + for i := 0; n > 0 && i < len(r.data); i++ { + curData := r.data[i].ReadOnlyData() + start := 0 + if i == 0 { + start = r.bufferIdx + } + curSize := min(n, len(curData)-start) + if curSize == 0 { + continue + } + res = append(res, curData[start:start+curSize]) + n -= curSize + } + if n > 0 { + return nil, fmt.Errorf("insufficient bytes in reader") + } + return res, nil +} diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go index a2d2a798..aa52bfe9 100644 --- a/vendor/google.golang.org/grpc/picker_wrapper.go +++ b/vendor/google.golang.org/grpc/picker_wrapper.go @@ -29,7 +29,6 @@ import ( "google.golang.org/grpc/internal/channelz" istatus "google.golang.org/grpc/internal/status" "google.golang.org/grpc/internal/transport" - "google.golang.org/grpc/stats" "google.golang.org/grpc/status" ) @@ -48,14 +47,11 @@ type pickerGeneration struct { // actions and unblock when there's a picker update. type pickerWrapper struct { // If pickerGen holds a nil pointer, the pickerWrapper is closed. - pickerGen atomic.Pointer[pickerGeneration] - statsHandlers []stats.Handler // to record blocking picker calls + pickerGen atomic.Pointer[pickerGeneration] } -func newPickerWrapper(statsHandlers []stats.Handler) *pickerWrapper { - pw := &pickerWrapper{ - statsHandlers: statsHandlers, - } +func newPickerWrapper() *pickerWrapper { + pw := &pickerWrapper{} pw.pickerGen.Store(&pickerGeneration{ blockingCh: make(chan struct{}), }) @@ -93,6 +89,12 @@ func doneChannelzWrapper(acbw *acBalancerWrapper, result *balancer.PickResult) { } } +type pick struct { + transport transport.ClientTransport // the selected transport + result balancer.PickResult // the contents of the pick from the LB policy + blocked bool // set if a picker call queued for a new picker +} + // pick returns the transport that will be used for the RPC. // It may block in the following cases: // - there's no picker @@ -100,15 +102,16 @@ func doneChannelzWrapper(acbw *acBalancerWrapper, result *balancer.PickResult) { // - the current picker returns other errors and failfast is false. // - the subConn returned by the current picker is not READY // When one of these situations happens, pick blocks until the picker gets updated. -func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.PickInfo) (transport.ClientTransport, balancer.PickResult, error) { +func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.PickInfo) (pick, error) { var ch chan struct{} var lastPickErr error + pickBlocked := false for { pg := pw.pickerGen.Load() if pg == nil { - return nil, balancer.PickResult{}, ErrClientConnClosing + return pick{}, ErrClientConnClosing } if pg.picker == nil { ch = pg.blockingCh @@ -127,9 +130,9 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. } switch ctx.Err() { case context.DeadlineExceeded: - return nil, balancer.PickResult{}, status.Error(codes.DeadlineExceeded, errStr) + return pick{}, status.Error(codes.DeadlineExceeded, errStr) case context.Canceled: - return nil, balancer.PickResult{}, status.Error(codes.Canceled, errStr) + return pick{}, status.Error(codes.Canceled, errStr) } case <-ch: } @@ -145,9 +148,7 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. // In the second case, the only way it will get to this conditional is // if there is a new picker. if ch != nil { - for _, sh := range pw.statsHandlers { - sh.HandleRPC(ctx, &stats.PickerUpdated{}) - } + pickBlocked = true } ch = pg.blockingCh @@ -164,7 +165,7 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. if istatus.IsRestrictedControlPlaneCode(st) { err = status.Errorf(codes.Internal, "received picker error with illegal status: %v", err) } - return nil, balancer.PickResult{}, dropError{error: err} + return pick{}, dropError{error: err} } // For all other errors, wait for ready RPCs should block and other // RPCs should fail with unavailable. @@ -172,7 +173,7 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. lastPickErr = err continue } - return nil, balancer.PickResult{}, status.Error(codes.Unavailable, err.Error()) + return pick{}, status.Error(codes.Unavailable, err.Error()) } acbw, ok := pickResult.SubConn.(*acBalancerWrapper) @@ -183,9 +184,8 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. if t := acbw.ac.getReadyTransport(); t != nil { if channelz.IsOn() { doneChannelzWrapper(acbw, &pickResult) - return t, pickResult, nil } - return t, pickResult, nil + return pick{transport: t, result: pickResult, blocked: pickBlocked}, nil } if pickResult.Done != nil { // Calling done with nil error, no bytes sent and no bytes received. diff --git a/vendor/google.golang.org/grpc/preloader.go b/vendor/google.golang.org/grpc/preloader.go index ee0ff969..1e783feb 100644 --- a/vendor/google.golang.org/grpc/preloader.go +++ b/vendor/google.golang.org/grpc/preloader.go @@ -47,9 +47,6 @@ func (p *PreparedMsg) Encode(s Stream, msg any) error { } // check if the context has the relevant information to prepareMsg - if rpcInfo.preloaderInfo == nil { - return status.Errorf(codes.Internal, "grpc: rpcInfo.preloaderInfo is nil") - } if rpcInfo.preloaderInfo.codec == nil { return status.Errorf(codes.Internal, "grpc: rpcInfo.preloaderInfo.codec is nil") } diff --git a/vendor/google.golang.org/grpc/resolver/map.go b/vendor/google.golang.org/grpc/resolver/map.go index 975b4997..c3c15ac9 100644 --- a/vendor/google.golang.org/grpc/resolver/map.go +++ b/vendor/google.golang.org/grpc/resolver/map.go @@ -24,16 +24,22 @@ import ( "strings" ) -type addressMapEntry struct { +type addressMapEntry[T any] struct { addr Address - value any + value T } -// AddressMap is a map of addresses to arbitrary values taking into account +// AddressMap is an AddressMapV2[any]. It will be deleted in an upcoming +// release of grpc-go. +// +// Deprecated: use the generic AddressMapV2 type instead. +type AddressMap = AddressMapV2[any] + +// AddressMapV2 is a map of addresses to arbitrary values taking into account // Attributes. BalancerAttributes are ignored, as are Metadata and Type. // Multiple accesses may not be performed concurrently. Must be created via // NewAddressMap; do not construct directly. -type AddressMap struct { +type AddressMapV2[T any] struct { // The underlying map is keyed by an Address with fields that we don't care // about being set to their zero values. The only fields that we care about // are `Addr`, `ServerName` and `Attributes`. Since we need to be able to @@ -47,23 +53,30 @@ type AddressMap struct { // The value type of the map contains a slice of addresses which match the key // in their `Addr` and `ServerName` fields and contain the corresponding value // associated with them. - m map[Address]addressMapEntryList + m map[Address]addressMapEntryList[T] } func toMapKey(addr *Address) Address { return Address{Addr: addr.Addr, ServerName: addr.ServerName} } -type addressMapEntryList []*addressMapEntry +type addressMapEntryList[T any] []*addressMapEntry[T] -// NewAddressMap creates a new AddressMap. +// NewAddressMap creates a new AddressMapV2[any]. +// +// Deprecated: use the generic NewAddressMapV2 constructor instead. func NewAddressMap() *AddressMap { - return &AddressMap{m: make(map[Address]addressMapEntryList)} + return NewAddressMapV2[any]() +} + +// NewAddressMapV2 creates a new AddressMapV2. +func NewAddressMapV2[T any]() *AddressMapV2[T] { + return &AddressMapV2[T]{m: make(map[Address]addressMapEntryList[T])} } // find returns the index of addr in the addressMapEntry slice, or -1 if not // present. -func (l addressMapEntryList) find(addr Address) int { +func (l addressMapEntryList[T]) find(addr Address) int { for i, entry := range l { // Attributes are the only thing to match on here, since `Addr` and // `ServerName` are already equal. @@ -75,28 +88,28 @@ func (l addressMapEntryList) find(addr Address) int { } // Get returns the value for the address in the map, if present. -func (a *AddressMap) Get(addr Address) (value any, ok bool) { +func (a *AddressMapV2[T]) Get(addr Address) (value T, ok bool) { addrKey := toMapKey(&addr) entryList := a.m[addrKey] if entry := entryList.find(addr); entry != -1 { return entryList[entry].value, true } - return nil, false + return value, false } // Set updates or adds the value to the address in the map. -func (a *AddressMap) Set(addr Address, value any) { +func (a *AddressMapV2[T]) Set(addr Address, value T) { addrKey := toMapKey(&addr) entryList := a.m[addrKey] if entry := entryList.find(addr); entry != -1 { entryList[entry].value = value return } - a.m[addrKey] = append(entryList, &addressMapEntry{addr: addr, value: value}) + a.m[addrKey] = append(entryList, &addressMapEntry[T]{addr: addr, value: value}) } // Delete removes addr from the map. -func (a *AddressMap) Delete(addr Address) { +func (a *AddressMapV2[T]) Delete(addr Address) { addrKey := toMapKey(&addr) entryList := a.m[addrKey] entry := entryList.find(addr) @@ -113,7 +126,7 @@ func (a *AddressMap) Delete(addr Address) { } // Len returns the number of entries in the map. -func (a *AddressMap) Len() int { +func (a *AddressMapV2[T]) Len() int { ret := 0 for _, entryList := range a.m { ret += len(entryList) @@ -122,7 +135,7 @@ func (a *AddressMap) Len() int { } // Keys returns a slice of all current map keys. -func (a *AddressMap) Keys() []Address { +func (a *AddressMapV2[T]) Keys() []Address { ret := make([]Address, 0, a.Len()) for _, entryList := range a.m { for _, entry := range entryList { @@ -133,8 +146,8 @@ func (a *AddressMap) Keys() []Address { } // Values returns a slice of all current map values. -func (a *AddressMap) Values() []any { - ret := make([]any, 0, a.Len()) +func (a *AddressMapV2[T]) Values() []T { + ret := make([]T, 0, a.Len()) for _, entryList := range a.m { for _, entry := range entryList { ret = append(ret, entry.value) @@ -149,21 +162,21 @@ type endpointMapKey string // unordered set of address strings within an endpoint. This map is not thread // safe, thus it is unsafe to access concurrently. Must be created via // NewEndpointMap; do not construct directly. -type EndpointMap struct { - endpoints map[endpointMapKey]endpointData +type EndpointMap[T any] struct { + endpoints map[endpointMapKey]endpointData[T] } -type endpointData struct { +type endpointData[T any] struct { // decodedKey stores the original key to avoid decoding when iterating on // EndpointMap keys. decodedKey Endpoint - value any + value T } // NewEndpointMap creates a new EndpointMap. -func NewEndpointMap() *EndpointMap { - return &EndpointMap{ - endpoints: make(map[endpointMapKey]endpointData), +func NewEndpointMap[T any]() *EndpointMap[T] { + return &EndpointMap[T]{ + endpoints: make(map[endpointMapKey]endpointData[T]), } } @@ -183,25 +196,25 @@ func encodeEndpoint(e Endpoint) endpointMapKey { } // Get returns the value for the address in the map, if present. -func (em *EndpointMap) Get(e Endpoint) (value any, ok bool) { +func (em *EndpointMap[T]) Get(e Endpoint) (value T, ok bool) { val, found := em.endpoints[encodeEndpoint(e)] if found { return val.value, true } - return nil, false + return value, false } // Set updates or adds the value to the address in the map. -func (em *EndpointMap) Set(e Endpoint, value any) { +func (em *EndpointMap[T]) Set(e Endpoint, value T) { en := encodeEndpoint(e) - em.endpoints[en] = endpointData{ + em.endpoints[en] = endpointData[T]{ decodedKey: Endpoint{Addresses: e.Addresses}, value: value, } } // Len returns the number of entries in the map. -func (em *EndpointMap) Len() int { +func (em *EndpointMap[T]) Len() int { return len(em.endpoints) } @@ -210,7 +223,7 @@ func (em *EndpointMap) Len() int { // the unordered set of addresses. Thus, endpoint information returned is not // the full endpoint data (drops duplicated addresses and attributes) but can be // used for EndpointMap accesses. -func (em *EndpointMap) Keys() []Endpoint { +func (em *EndpointMap[T]) Keys() []Endpoint { ret := make([]Endpoint, 0, len(em.endpoints)) for _, en := range em.endpoints { ret = append(ret, en.decodedKey) @@ -219,8 +232,8 @@ func (em *EndpointMap) Keys() []Endpoint { } // Values returns a slice of all current map values. -func (em *EndpointMap) Values() []any { - ret := make([]any, 0, len(em.endpoints)) +func (em *EndpointMap[T]) Values() []T { + ret := make([]T, 0, len(em.endpoints)) for _, val := range em.endpoints { ret = append(ret, val.value) } @@ -228,7 +241,7 @@ func (em *EndpointMap) Values() []any { } // Delete removes the specified endpoint from the map. -func (em *EndpointMap) Delete(e Endpoint) { +func (em *EndpointMap[T]) Delete(e Endpoint) { en := encodeEndpoint(e) delete(em.endpoints, en) } diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go index b84ef26d..8e6af951 100644 --- a/vendor/google.golang.org/grpc/resolver/resolver.go +++ b/vendor/google.golang.org/grpc/resolver/resolver.go @@ -332,6 +332,11 @@ type AuthorityOverrider interface { // OverrideAuthority returns the authority to use for a ClientConn with the // given target. The implementation must generate it without blocking, // typically in line, and must keep it unchanged. + // + // The returned string must be a valid ":authority" header value, i.e. be + // encoded according to + // [RFC3986](https://datatracker.ietf.org/doc/html/rfc3986#section-3.2) as + // necessary. OverrideAuthority(Target) string } diff --git a/vendor/google.golang.org/grpc/resolver_wrapper.go b/vendor/google.golang.org/grpc/resolver_wrapper.go index 80e16a32..6e613764 100644 --- a/vendor/google.golang.org/grpc/resolver_wrapper.go +++ b/vendor/google.golang.org/grpc/resolver_wrapper.go @@ -69,6 +69,7 @@ func (ccr *ccResolverWrapper) start() error { errCh := make(chan error) ccr.serializer.TrySchedule(func(ctx context.Context) { if ctx.Err() != nil { + errCh <- ctx.Err() return } opts := resolver.BuildOptions{ diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go index ad20e9df..8160f943 100644 --- a/vendor/google.golang.org/grpc/rpc_util.go +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -33,6 +33,8 @@ import ( "google.golang.org/grpc/credentials" "google.golang.org/grpc/encoding" "google.golang.org/grpc/encoding/proto" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/grpcutil" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/mem" "google.golang.org/grpc/metadata" @@ -41,6 +43,10 @@ import ( "google.golang.org/grpc/status" ) +func init() { + internal.AcceptCompressors = acceptCompressors +} + // Compressor defines the interface gRPC uses to compress a message. // // Deprecated: use package encoding. @@ -151,15 +157,32 @@ func (d *gzipDecompressor) Type() string { // callInfo contains all related configuration and information about an RPC. type callInfo struct { - compressorName string - failFast bool - maxReceiveMessageSize *int - maxSendMessageSize *int - creds credentials.PerRPCCredentials - contentSubtype string - codec baseCodec - maxRetryRPCBufferSize int - onFinish []func(err error) + compressorName string + failFast bool + maxReceiveMessageSize *int + maxSendMessageSize *int + creds credentials.PerRPCCredentials + contentSubtype string + codec baseCodec + maxRetryRPCBufferSize int + onFinish []func(err error) + authority string + acceptedResponseCompressors []string +} + +func acceptedCompressorAllows(allowed []string, name string) bool { + if allowed == nil { + return true + } + if name == "" || name == encoding.Identity { + return true + } + for _, a := range allowed { + if a == name { + return true + } + } + return false } func defaultCallInfo() *callInfo { @@ -169,6 +192,29 @@ func defaultCallInfo() *callInfo { } } +func newAcceptedCompressionConfig(names []string) ([]string, error) { + if len(names) == 0 { + return nil, nil + } + var allowed []string + seen := make(map[string]struct{}, len(names)) + for _, name := range names { + name = strings.TrimSpace(name) + if name == "" || name == encoding.Identity { + continue + } + if !grpcutil.IsCompressorNameRegistered(name) { + return nil, status.Errorf(codes.InvalidArgument, "grpc: compressor %q is not registered", name) + } + if _, dup := seen[name]; dup { + continue + } + seen[name] = struct{}{} + allowed = append(allowed, name) + } + return allowed, nil +} + // CallOption configures a Call before it starts or extracts information from // a Call after it completes. type CallOption interface { @@ -365,6 +411,36 @@ func (o MaxRecvMsgSizeCallOption) before(c *callInfo) error { } func (o MaxRecvMsgSizeCallOption) after(*callInfo, *csAttempt) {} +// CallAuthority returns a CallOption that sets the HTTP/2 :authority header of +// an RPC to the specified value. When using CallAuthority, the credentials in +// use must implement the AuthorityValidator interface. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a later +// release. +func CallAuthority(authority string) CallOption { + return AuthorityOverrideCallOption{Authority: authority} +} + +// AuthorityOverrideCallOption is a CallOption that indicates the HTTP/2 +// :authority header value to use for the call. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a later +// release. +type AuthorityOverrideCallOption struct { + Authority string +} + +func (o AuthorityOverrideCallOption) before(c *callInfo) error { + c.authority = o.Authority + return nil +} + +func (o AuthorityOverrideCallOption) after(*callInfo, *csAttempt) {} + // MaxCallSendMsgSize returns a CallOption which sets the maximum message size // in bytes the client can send. If this is not set, gRPC uses the default // `math.MaxInt32`. @@ -440,6 +516,31 @@ func (o CompressorCallOption) before(c *callInfo) error { } func (o CompressorCallOption) after(*callInfo, *csAttempt) {} +// acceptCompressors returns a CallOption that limits the compression algorithms +// advertised in the grpc-accept-encoding header for response messages. +// Compression algorithms not in the provided list will not be advertised, and +// responses compressed with non-listed algorithms will be rejected. +func acceptCompressors(names ...string) CallOption { + cp := append([]string(nil), names...) + return acceptCompressorsCallOption{names: cp} +} + +// acceptCompressorsCallOption is a CallOption that limits response compression. +type acceptCompressorsCallOption struct { + names []string +} + +func (o acceptCompressorsCallOption) before(c *callInfo) error { + allowed, err := newAcceptedCompressionConfig(o.names) + if err != nil { + return err + } + c.acceptedResponseCompressors = allowed + return nil +} + +func (acceptCompressorsCallOption) after(*callInfo, *csAttempt) {} + // CallContentSubtype returns a CallOption that will set the content-subtype // for a call. For example, if content-subtype is "json", the Content-Type over // the wire will be "application/grpc+json". The content-subtype is converted @@ -626,8 +727,20 @@ type streamReader interface { Read(n int) (mem.BufferSlice, error) } +// noCopy may be embedded into structs which must not be copied +// after the first use. +// +// See https://golang.org/issues/8005#issuecomment-190753527 +// for details. +type noCopy struct { +} + +func (*noCopy) Lock() {} +func (*noCopy) Unlock() {} + // parser reads complete gRPC messages from the underlying reader. type parser struct { + _ noCopy // r is the underlying reader. // See the comment on recvMsg for the permissible // error types. @@ -814,8 +927,7 @@ func (p *payloadInfo) free() { // the buffer is no longer needed. // TODO: Refactor this function to reduce the number of arguments. // See: https://google.github.io/styleguide/go/best-practices.html#function-argument-lists -func recvAndDecompress(p *parser, s recvCompressor, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, isServer bool, -) (out mem.BufferSlice, err error) { +func recvAndDecompress(p *parser, s recvCompressor, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, isServer bool) (out mem.BufferSlice, err error) { pf, compressed, err := p.recvMsg(maxReceiveMessageSize) if err != nil { return nil, err @@ -918,7 +1030,7 @@ func recv(p *parser, c baseCodec, s recvCompressor, dc Decompressor, m any, maxR // Information about RPC type rpcInfo struct { failfast bool - preloaderInfo *compressorInfo + preloaderInfo compressorInfo } // Information about Preloader @@ -937,7 +1049,7 @@ type rpcInfoContextKey struct{} func newContextWithRPCInfo(ctx context.Context, failfast bool, codec baseCodec, cp Compressor, comp encoding.Compressor) context.Context { return context.WithValue(ctx, rpcInfoContextKey{}, &rpcInfo{ failfast: failfast, - preloaderInfo: &compressorInfo{ + preloaderInfo: compressorInfo{ codec: codec, cp: cp, comp: comp, diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index 976e70ae..ddd37734 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -124,7 +124,8 @@ type serviceInfo struct { // Server is a gRPC server to serve RPC requests. type Server struct { - opts serverOptions + opts serverOptions + statsHandler stats.Handler mu sync.Mutex // guards following lis map[net.Listener]bool @@ -179,6 +180,7 @@ type serverOptions struct { numServerWorkers uint32 bufferPool mem.BufferPool waitForHandlers bool + staticWindowSize bool } var defaultServerOptions = serverOptions{ @@ -279,6 +281,7 @@ func ReadBufferSize(s int) ServerOption { func InitialWindowSize(s int32) ServerOption { return newFuncServerOption(func(o *serverOptions) { o.initialWindowSize = s + o.staticWindowSize = true }) } @@ -287,6 +290,29 @@ func InitialWindowSize(s int32) ServerOption { func InitialConnWindowSize(s int32) ServerOption { return newFuncServerOption(func(o *serverOptions) { o.initialConnWindowSize = s + o.staticWindowSize = true + }) +} + +// StaticStreamWindowSize returns a ServerOption to set the initial stream +// window size to the value provided and disables dynamic flow control. +// The lower bound for window size is 64K and any value smaller than that +// will be ignored. +func StaticStreamWindowSize(s int32) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.initialWindowSize = s + o.staticWindowSize = true + }) +} + +// StaticConnWindowSize returns a ServerOption to set the initial connection +// window size to the value provided and disables dynamic flow control. +// The lower bound for window size is 64K and any value smaller than that +// will be ignored. +func StaticConnWindowSize(s int32) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.initialConnWindowSize = s + o.staticWindowSize = true }) } @@ -667,13 +693,14 @@ func NewServer(opt ...ServerOption) *Server { o.apply(&opts) } s := &Server{ - lis: make(map[net.Listener]bool), - opts: opts, - conns: make(map[string]map[transport.ServerTransport]bool), - services: make(map[string]*serviceInfo), - quit: grpcsync.NewEvent(), - done: grpcsync.NewEvent(), - channelz: channelz.RegisterServer(""), + lis: make(map[net.Listener]bool), + opts: opts, + statsHandler: istats.NewCombinedHandler(opts.statsHandlers...), + conns: make(map[string]map[transport.ServerTransport]bool), + services: make(map[string]*serviceInfo), + quit: grpcsync.NewEvent(), + done: grpcsync.NewEvent(), + channelz: channelz.RegisterServer(""), } chainUnaryServerInterceptors(s) chainStreamServerInterceptors(s) @@ -974,7 +1001,7 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { ConnectionTimeout: s.opts.connectionTimeout, Credentials: s.opts.creds, InTapHandle: s.opts.inTapHandle, - StatsHandlers: s.opts.statsHandlers, + StatsHandler: s.statsHandler, KeepaliveParams: s.opts.keepaliveParams, KeepalivePolicy: s.opts.keepalivePolicy, InitialWindowSize: s.opts.initialWindowSize, @@ -986,6 +1013,7 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { MaxHeaderListSize: s.opts.maxHeaderListSize, HeaderTableSize: s.opts.headerTableSize, BufferPool: s.opts.bufferPool, + StaticWindowSize: s.opts.staticWindowSize, } st, err := transport.NewServerTransport(c, config) if err != nil { @@ -1010,18 +1038,18 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { func (s *Server) serveStreams(ctx context.Context, st transport.ServerTransport, rawConn net.Conn) { ctx = transport.SetConnection(ctx, rawConn) ctx = peer.NewContext(ctx, st.Peer()) - for _, sh := range s.opts.statsHandlers { - ctx = sh.TagConn(ctx, &stats.ConnTagInfo{ + if s.statsHandler != nil { + ctx = s.statsHandler.TagConn(ctx, &stats.ConnTagInfo{ RemoteAddr: st.Peer().Addr, LocalAddr: st.Peer().LocalAddr, }) - sh.HandleConn(ctx, &stats.ConnBegin{}) + s.statsHandler.HandleConn(ctx, &stats.ConnBegin{}) } defer func() { st.Close(errors.New("finished serving streams for the server transport")) - for _, sh := range s.opts.statsHandlers { - sh.HandleConn(ctx, &stats.ConnEnd{}) + if s.statsHandler != nil { + s.statsHandler.HandleConn(ctx, &stats.ConnEnd{}) } }() @@ -1078,7 +1106,7 @@ var _ http.Handler = (*Server)(nil) // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { - st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandlers, s.opts.bufferPool) + st, err := transport.NewServerHandlerTransport(w, r, s.statsHandler, s.opts.bufferPool) if err != nil { // Errors returned from transport.NewServerHandlerTransport have // already been written to w. @@ -1172,12 +1200,8 @@ func (s *Server) sendResponse(ctx context.Context, stream *transport.ServerStrea return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", payloadLen, s.opts.maxSendMessageSize) } err = stream.Write(hdr, payload, opts) - if err == nil { - if len(s.opts.statsHandlers) != 0 { - for _, sh := range s.opts.statsHandlers { - sh.HandleRPC(ctx, outPayload(false, msg, dataLen, payloadLen, time.Now())) - } - } + if err == nil && s.statsHandler != nil { + s.statsHandler.HandleRPC(ctx, outPayload(false, msg, dataLen, payloadLen, time.Now())) } return err } @@ -1219,16 +1243,15 @@ func getChainUnaryHandler(interceptors []UnaryServerInterceptor, curr int, info } func (s *Server) processUnaryRPC(ctx context.Context, stream *transport.ServerStream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) { - shs := s.opts.statsHandlers - if len(shs) != 0 || trInfo != nil || channelz.IsOn() { + sh := s.statsHandler + if sh != nil || trInfo != nil || channelz.IsOn() { if channelz.IsOn() { s.incrCallsStarted() } var statsBegin *stats.Begin - for _, sh := range shs { - beginTime := time.Now() + if sh != nil { statsBegin = &stats.Begin{ - BeginTime: beginTime, + BeginTime: time.Now(), IsClientStream: false, IsServerStream: false, } @@ -1256,7 +1279,7 @@ func (s *Server) processUnaryRPC(ctx context.Context, stream *transport.ServerSt trInfo.tr.Finish() } - for _, sh := range shs { + if sh != nil { end := &stats.End{ BeginTime: statsBegin.BeginTime, EndTime: time.Now(), @@ -1353,7 +1376,7 @@ func (s *Server) processUnaryRPC(ctx context.Context, stream *transport.ServerSt } var payInfo *payloadInfo - if len(shs) != 0 || len(binlogs) != 0 { + if sh != nil || len(binlogs) != 0 { payInfo = &payloadInfo{} defer payInfo.free() } @@ -1379,7 +1402,7 @@ func (s *Server) processUnaryRPC(ctx context.Context, stream *transport.ServerSt return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err) } - for _, sh := range shs { + if sh != nil { sh.HandleRPC(ctx, &stats.InPayload{ RecvTime: time.Now(), Payload: v, @@ -1553,32 +1576,30 @@ func (s *Server) processStreamingRPC(ctx context.Context, stream *transport.Serv if channelz.IsOn() { s.incrCallsStarted() } - shs := s.opts.statsHandlers + sh := s.statsHandler var statsBegin *stats.Begin - if len(shs) != 0 { - beginTime := time.Now() + if sh != nil { statsBegin = &stats.Begin{ - BeginTime: beginTime, + BeginTime: time.Now(), IsClientStream: sd.ClientStreams, IsServerStream: sd.ServerStreams, } - for _, sh := range shs { - sh.HandleRPC(ctx, statsBegin) - } + sh.HandleRPC(ctx, statsBegin) } ctx = NewContextWithServerTransportStream(ctx, stream) ss := &serverStream{ ctx: ctx, s: stream, - p: &parser{r: stream, bufferPool: s.opts.bufferPool}, + p: parser{r: stream, bufferPool: s.opts.bufferPool}, codec: s.getCodec(stream.ContentSubtype()), + desc: sd, maxReceiveMessageSize: s.opts.maxReceiveMessageSize, maxSendMessageSize: s.opts.maxSendMessageSize, trInfo: trInfo, - statsHandler: shs, + statsHandler: sh, } - if len(shs) != 0 || trInfo != nil || channelz.IsOn() { + if sh != nil || trInfo != nil || channelz.IsOn() { // See comment in processUnaryRPC on defers. defer func() { if trInfo != nil { @@ -1592,7 +1613,7 @@ func (s *Server) processStreamingRPC(ctx context.Context, stream *transport.Serv ss.mu.Unlock() } - if len(shs) != 0 { + if sh != nil { end := &stats.End{ BeginTime: statsBegin.BeginTime, EndTime: time.Now(), @@ -1600,9 +1621,7 @@ func (s *Server) processStreamingRPC(ctx context.Context, stream *transport.Serv if err != nil && err != io.EOF { end.Error = toRPCErr(err) } - for _, sh := range shs { - sh.HandleRPC(ctx, end) - } + sh.HandleRPC(ctx, end) } if channelz.IsOn() { @@ -1791,19 +1810,17 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Ser method := sm[pos+1:] // FromIncomingContext is expensive: skip if there are no statsHandlers - if len(s.opts.statsHandlers) > 0 { + if s.statsHandler != nil { md, _ := metadata.FromIncomingContext(ctx) - for _, sh := range s.opts.statsHandlers { - ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: stream.Method()}) - sh.HandleRPC(ctx, &stats.InHeader{ - FullMethod: stream.Method(), - RemoteAddr: t.Peer().Addr, - LocalAddr: t.Peer().LocalAddr, - Compression: stream.RecvCompress(), - WireLength: stream.HeaderWireLength(), - Header: md, - }) - } + ctx = s.statsHandler.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: stream.Method()}) + s.statsHandler.HandleRPC(ctx, &stats.InHeader{ + FullMethod: stream.Method(), + RemoteAddr: t.Peer().Addr, + LocalAddr: t.Peer().LocalAddr, + Compression: stream.RecvCompress(), + WireLength: stream.HeaderWireLength(), + Header: md, + }) } // To have calls in stream callouts work. Will delete once all stats handler // calls come from the gRPC layer. diff --git a/vendor/google.golang.org/grpc/stats/handlers.go b/vendor/google.golang.org/grpc/stats/handlers.go index dc03731e..67194a59 100644 --- a/vendor/google.golang.org/grpc/stats/handlers.go +++ b/vendor/google.golang.org/grpc/stats/handlers.go @@ -38,6 +38,15 @@ type RPCTagInfo struct { // FailFast indicates if this RPC is failfast. // This field is only valid on client side, it's always false on server side. FailFast bool + // NameResolutionDelay indicates if the RPC needed to wait for the + // initial name resolver update before it could begin. This should only + // happen if the channel is IDLE when the RPC is started. Note that + // all retry or hedging attempts for an RPC that experienced a delay + // will have it set. + // + // This field is only valid on the client side; it is always false on + // the server side. + NameResolutionDelay bool } // Handler defines the interface for the related stats handling (e.g., RPCs, connections). diff --git a/vendor/google.golang.org/grpc/stats/stats.go b/vendor/google.golang.org/grpc/stats/stats.go index 6f20d2d5..10bf998a 100644 --- a/vendor/google.golang.org/grpc/stats/stats.go +++ b/vendor/google.golang.org/grpc/stats/stats.go @@ -36,7 +36,12 @@ type RPCStats interface { IsClient() bool } -// Begin contains stats when an RPC attempt begins. +// Begin contains stats for the start of an RPC attempt. +// +// - Server-side: Triggered after `InHeader`, as headers are processed +// before the RPC lifecycle begins. +// - Client-side: The first stats event recorded. +// // FailFast is only valid if this Begin is from client side. type Begin struct { // Client is true if this Begin is from client side. @@ -59,17 +64,23 @@ func (s *Begin) IsClient() bool { return s.Client } func (s *Begin) isRPCStats() {} -// PickerUpdated indicates that the LB policy provided a new picker while the -// RPC was waiting for one. -type PickerUpdated struct{} +// DelayedPickComplete indicates that the RPC is unblocked following a delay in +// selecting a connection for the call. +type DelayedPickComplete struct{} + +// IsClient indicates DelayedPickComplete is available on the client. +func (*DelayedPickComplete) IsClient() bool { return true } -// IsClient indicates if the stats information is from client side. Only Client -// Side interfaces with a Picker, thus always returns true. -func (*PickerUpdated) IsClient() bool { return true } +func (*DelayedPickComplete) isRPCStats() {} -func (*PickerUpdated) isRPCStats() {} +// PickerUpdated indicates that the RPC is unblocked following a delay in +// selecting a connection for the call. +// +// Deprecated: will be removed in a future release; use DelayedPickComplete +// instead. +type PickerUpdated = DelayedPickComplete -// InPayload contains the information for an incoming payload. +// InPayload contains stats about an incoming payload. type InPayload struct { // Client is true if this InPayload is from client side. Client bool @@ -98,7 +109,9 @@ func (s *InPayload) IsClient() bool { return s.Client } func (s *InPayload) isRPCStats() {} -// InHeader contains stats when a header is received. +// InHeader contains stats about header reception. +// +// - Server-side: The first stats event after the RPC request is received. type InHeader struct { // Client is true if this InHeader is from client side. Client bool @@ -123,7 +136,7 @@ func (s *InHeader) IsClient() bool { return s.Client } func (s *InHeader) isRPCStats() {} -// InTrailer contains stats when a trailer is received. +// InTrailer contains stats about trailer reception. type InTrailer struct { // Client is true if this InTrailer is from client side. Client bool @@ -139,7 +152,7 @@ func (s *InTrailer) IsClient() bool { return s.Client } func (s *InTrailer) isRPCStats() {} -// OutPayload contains the information for an outgoing payload. +// OutPayload contains stats about an outgoing payload. type OutPayload struct { // Client is true if this OutPayload is from client side. Client bool @@ -166,7 +179,10 @@ func (s *OutPayload) IsClient() bool { return s.Client } func (s *OutPayload) isRPCStats() {} -// OutHeader contains stats when a header is sent. +// OutHeader contains stats about header transmission. +// +// - Client-side: Only occurs after 'Begin', as headers are always the first +// thing sent on a stream. type OutHeader struct { // Client is true if this OutHeader is from client side. Client bool @@ -189,14 +205,15 @@ func (s *OutHeader) IsClient() bool { return s.Client } func (s *OutHeader) isRPCStats() {} -// OutTrailer contains stats when a trailer is sent. +// OutTrailer contains stats about trailer transmission. type OutTrailer struct { // Client is true if this OutTrailer is from client side. Client bool // WireLength is the wire length of trailer. // - // Deprecated: This field is never set. The length is not known when this message is - // emitted because the trailer fields are compressed with hpack after that. + // Deprecated: This field is never set. The length is not known when this + // message is emitted because the trailer fields are compressed with hpack + // after that. WireLength int // Trailer contains the trailer metadata sent to the client. This // field is only valid if this OutTrailer is from the server side. @@ -208,7 +225,7 @@ func (s *OutTrailer) IsClient() bool { return s.Client } func (s *OutTrailer) isRPCStats() {} -// End contains stats when an RPC ends. +// End contains stats about RPC completion. type End struct { // Client is true if this End is from client side. Client bool @@ -238,7 +255,7 @@ type ConnStats interface { IsClient() bool } -// ConnBegin contains the stats of a connection when it is established. +// ConnBegin contains stats about connection establishment. type ConnBegin struct { // Client is true if this ConnBegin is from client side. Client bool @@ -249,7 +266,7 @@ func (s *ConnBegin) IsClient() bool { return s.Client } func (s *ConnBegin) isConnStats() {} -// ConnEnd contains the stats of a connection when it ends. +// ConnEnd contains stats about connection termination. type ConnEnd struct { // Client is true if this ConnEnd is from client side. Client bool diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go index 12163150..ec9577b2 100644 --- a/vendor/google.golang.org/grpc/stream.go +++ b/vendor/google.golang.org/grpc/stream.go @@ -25,6 +25,7 @@ import ( "math" rand "math/rand/v2" "strconv" + "strings" "sync" "time" @@ -101,9 +102,9 @@ type ClientStream interface { // It must only be called after stream.CloseAndRecv has returned, or // stream.Recv has returned a non-nil error (including io.EOF). Trailer() metadata.MD - // CloseSend closes the send direction of the stream. It closes the stream - // when non-nil error is met. It is also not safe to call CloseSend - // concurrently with SendMsg. + // CloseSend closes the send direction of the stream. This method always + // returns a nil error. The status of the stream may be discovered using + // RecvMsg. It is also not safe to call CloseSend concurrently with SendMsg. CloseSend() error // Context returns the context for this stream. // @@ -177,13 +178,43 @@ func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth return cc.NewStream(ctx, desc, method, opts...) } +var emptyMethodConfig = serviceconfig.MethodConfig{} + +// endOfClientStream performs cleanup actions required for both successful and +// failed streams. This includes incrementing channelz stats and invoking all +// registered OnFinish call options. +func endOfClientStream(cc *ClientConn, err error, opts ...CallOption) { + if channelz.IsOn() { + if err != nil { + cc.incrCallsFailed() + } else { + cc.incrCallsSucceeded() + } + } + + for _, o := range opts { + if o, ok := o.(OnFinishCallOption); ok { + o.OnFinish(err) + } + } +} + func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) { + if channelz.IsOn() { + cc.incrCallsStarted() + } + defer func() { + if err != nil { + // Ensure cleanup when stream creation fails. + endOfClientStream(cc, err, opts...) + } + }() + // Start tracking the RPC for idleness purposes. This is where a stream is // created for both streaming and unary RPCs, and hence is a good place to // track active RPC count. - if err := cc.idlenessMgr.OnCallBegin(); err != nil { - return nil, err - } + cc.idlenessMgr.OnCallBegin() + // Add a calloption, to decrement the active call count, that gets executed // when the RPC completes. opts = append([]CallOption{OnFinish(func(error) { cc.idlenessMgr.OnCallEnd() })}, opts...) @@ -202,24 +233,17 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth } } } - if channelz.IsOn() { - cc.incrCallsStarted() - defer func() { - if err != nil { - cc.incrCallsFailed() - } - }() - } // Provide an opportunity for the first RPC to see the first service config // provided by the resolver. - if err := cc.waitForResolvedAddrs(ctx); err != nil { + nameResolutionDelayed, err := cc.waitForResolvedAddrs(ctx) + if err != nil { return nil, err } - var mc serviceconfig.MethodConfig + mc := &emptyMethodConfig var onCommit func() newStream := func(ctx context.Context, done func()) (iresolver.ClientStream, error) { - return newClientStreamWithParams(ctx, desc, cc, method, mc, onCommit, done, opts...) + return newClientStreamWithParams(ctx, desc, cc, method, mc, onCommit, done, nameResolutionDelayed, opts...) } rpcInfo := iresolver.RPCInfo{Context: ctx, Method: method} @@ -239,7 +263,7 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth if rpcConfig.Context != nil { ctx = rpcConfig.Context } - mc = rpcConfig.MethodConfig + mc = &rpcConfig.MethodConfig onCommit = rpcConfig.OnCommitted if rpcConfig.Interceptor != nil { rpcInfo.Context = nil @@ -257,7 +281,7 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth return newStream(ctx, func() {}) } -func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, mc serviceconfig.MethodConfig, onCommit, doneFunc func(), opts ...CallOption) (_ iresolver.ClientStream, err error) { +func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, mc *serviceconfig.MethodConfig, onCommit, doneFunc func(), nameResolutionDelayed bool, opts ...CallOption) (_ iresolver.ClientStream, err error) { callInfo := defaultCallInfo() if mc.WaitForReady != nil { callInfo.failFast = !*mc.WaitForReady @@ -296,6 +320,11 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client Method: method, ContentSubtype: callInfo.contentSubtype, DoneFunc: doneFunc, + Authority: callInfo.authority, + } + if allowed := callInfo.acceptedResponseCompressors; len(allowed) > 0 { + headerValue := strings.Join(allowed, ",") + callHdr.AcceptedCompressors = &headerValue } // Set our outgoing compression according to the UseCompressor CallOption, if @@ -321,19 +350,20 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client } cs := &clientStream{ - callHdr: callHdr, - ctx: ctx, - methodConfig: &mc, - opts: opts, - callInfo: callInfo, - cc: cc, - desc: desc, - codec: callInfo.codec, - compressorV0: compressorV0, - compressorV1: compressorV1, - cancel: cancel, - firstAttempt: true, - onCommit: onCommit, + callHdr: callHdr, + ctx: ctx, + methodConfig: mc, + opts: opts, + callInfo: callInfo, + cc: cc, + desc: desc, + codec: callInfo.codec, + compressorV0: compressorV0, + compressorV1: compressorV1, + cancel: cancel, + firstAttempt: true, + onCommit: onCommit, + nameResolutionDelay: nameResolutionDelayed, } if !cc.dopts.disableRetry { cs.retryThrottler = cc.retryThrottler.Load().(*retryThrottler) @@ -415,19 +445,21 @@ func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error) ctx := newContextWithRPCInfo(cs.ctx, cs.callInfo.failFast, cs.callInfo.codec, cs.compressorV0, cs.compressorV1) method := cs.callHdr.Method var beginTime time.Time - shs := cs.cc.dopts.copts.StatsHandlers - for _, sh := range shs { - ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: cs.callInfo.failFast}) + sh := cs.cc.statsHandler + if sh != nil { beginTime = time.Now() - begin := &stats.Begin{ + ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{ + FullMethodName: method, FailFast: cs.callInfo.failFast, + NameResolutionDelay: cs.nameResolutionDelay, + }) + sh.HandleRPC(ctx, &stats.Begin{ Client: true, BeginTime: beginTime, FailFast: cs.callInfo.failFast, IsClientStream: cs.desc.ClientStreams, IsServerStream: cs.desc.ServerStreams, IsTransparentRetryAttempt: isTransparent, - } - sh.HandleRPC(ctx, begin) + }) } var trInfo *traceInfo @@ -458,7 +490,7 @@ func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error) beginTime: beginTime, cs: cs, decompressorV0: cs.cc.dopts.dc, - statsHandlers: shs, + statsHandler: sh, trInfo: trInfo, }, nil } @@ -466,8 +498,9 @@ func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error) func (a *csAttempt) getTransport() error { cs := a.cs - var err error - a.transport, a.pickResult, err = cs.cc.getTransport(a.ctx, cs.callInfo.failFast, cs.callHdr.Method) + pickInfo := balancer.PickInfo{Ctx: a.ctx, FullMethodName: cs.callHdr.Method} + pick, err := cs.cc.pickerWrapper.pick(a.ctx, cs.callInfo.failFast, pickInfo) + a.transport, a.pickResult = pick.transport, pick.result if err != nil { if de, ok := err.(dropError); ok { err = de.error @@ -476,7 +509,10 @@ func (a *csAttempt) getTransport() error { return err } if a.trInfo != nil { - a.trInfo.firstLine.SetRemoteAddr(a.transport.RemoteAddr()) + a.trInfo.firstLine.SetRemoteAddr(a.transport.Peer().Addr) + } + if pick.blocked && a.statsHandler != nil { + a.statsHandler.HandleRPC(a.ctx, &stats.DelayedPickComplete{}) } return nil } @@ -520,7 +556,7 @@ func (a *csAttempt) newStream() error { } a.transportStream = s a.ctx = s.Context() - a.parser = &parser{r: s, bufferPool: a.cs.cc.dopts.copts.BufferPool} + a.parser = parser{r: s, bufferPool: a.cs.cc.dopts.copts.BufferPool} return nil } @@ -540,6 +576,8 @@ type clientStream struct { sentLast bool // sent an end stream + receivedFirstMsg bool // set after the first message is received + methodConfig *MethodConfig ctx context.Context // the application's context, wrapped by stats/tracing @@ -573,6 +611,9 @@ type clientStream struct { onCommit func() replayBuffer []replayOp // operations to replay on retry replayBufferSize int // current size of replayBuffer + // nameResolutionDelay indicates if there was a delay in the name resolution. + // This field is only valid on client side, it's always false on server side. + nameResolutionDelay bool } type replayOp struct { @@ -587,7 +628,7 @@ type csAttempt struct { cs *clientStream transport transport.ClientTransport transportStream *transport.ClientStream - parser *parser + parser parser pickResult balancer.PickResult finished bool @@ -601,8 +642,8 @@ type csAttempt struct { // and cleared when the finish method is called. trInfo *traceInfo - statsHandlers []stats.Handler - beginTime time.Time + statsHandler stats.Handler + beginTime time.Time // set for newStream errors that may be transparently retried allowTransparentRetry bool @@ -987,7 +1028,7 @@ func (cs *clientStream) RecvMsg(m any) error { func (cs *clientStream) CloseSend() error { if cs.sentLast { - // TODO: return an error and finish the stream instead, due to API misuse? + // Return a nil error on repeated calls to this method. return nil } cs.sentLast = true @@ -1008,7 +1049,10 @@ func (cs *clientStream) CloseSend() error { binlog.Log(cs.ctx, chc) } } - // We never returned an error here for reasons. + // We don't return an error here as we expect users to read all messages + // from the stream and get the RPC status from RecvMsg(). Note that + // SendMsg() must return an error when one occurs so the application + // knows to stop sending messages, but that does not apply here. return nil } @@ -1023,9 +1067,6 @@ func (cs *clientStream) finish(err error) { return } cs.finished = true - for _, onFinish := range cs.callInfo.onFinish { - onFinish(err) - } cs.commitAttemptLocked() if cs.attempt != nil { cs.attempt.finish(err) @@ -1065,13 +1106,7 @@ func (cs *clientStream) finish(err error) { if err == nil { cs.retryThrottler.successfulRPC() } - if channelz.IsOn() { - if err != nil { - cs.cc.incrCallsFailed() - } else { - cs.cc.incrCallsSucceeded() - } - } + endOfClientStream(cs.cc, err, cs.opts...) cs.cancel() } @@ -1093,17 +1128,15 @@ func (a *csAttempt) sendMsg(m any, hdr []byte, payld mem.BufferSlice, dataLength } return io.EOF } - if len(a.statsHandlers) != 0 { - for _, sh := range a.statsHandlers { - sh.HandleRPC(a.ctx, outPayload(true, m, dataLength, payloadLength, time.Now())) - } + if a.statsHandler != nil { + a.statsHandler.HandleRPC(a.ctx, outPayload(true, m, dataLength, payloadLength, time.Now())) } return nil } func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) { cs := a.cs - if len(a.statsHandlers) != 0 && payInfo == nil { + if a.statsHandler != nil && payInfo == nil { payInfo = &payloadInfo{} defer payInfo.free() } @@ -1117,6 +1150,10 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) { a.decompressorV0 = nil a.decompressorV1 = encoding.GetCompressor(ct) } + // Validate that the compression method is acceptable for this call. + if !acceptedCompressorAllows(cs.callInfo.acceptedResponseCompressors, ct) { + return status.Errorf(codes.Internal, "grpc: peer compressed the response with %q which is not allowed by AcceptCompressors", ct) + } } else { // No compression is used; disable our decompressor. a.decompressorV0 = nil @@ -1124,16 +1161,21 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) { // Only initialize this state once per stream. a.decompressorSet = true } - if err := recv(a.parser, cs.codec, a.transportStream, a.decompressorV0, m, *cs.callInfo.maxReceiveMessageSize, payInfo, a.decompressorV1, false); err != nil { + if err := recv(&a.parser, cs.codec, a.transportStream, a.decompressorV0, m, *cs.callInfo.maxReceiveMessageSize, payInfo, a.decompressorV1, false); err != nil { if err == io.EOF { if statusErr := a.transportStream.Status().Err(); statusErr != nil { return statusErr } + // Received no msg and status OK for non-server streaming rpcs. + if !cs.desc.ServerStreams && !cs.receivedFirstMsg { + return status.Error(codes.Internal, "cardinality violation: received no response message from non-server-streaming RPC") + } return io.EOF // indicates successful end of stream. } return toRPCErr(err) } + cs.receivedFirstMsg = true if a.trInfo != nil { a.mu.Lock() if a.trInfo.tr != nil { @@ -1141,8 +1183,8 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) { } a.mu.Unlock() } - for _, sh := range a.statsHandlers { - sh.HandleRPC(a.ctx, &stats.InPayload{ + if a.statsHandler != nil { + a.statsHandler.HandleRPC(a.ctx, &stats.InPayload{ Client: true, RecvTime: time.Now(), Payload: m, @@ -1157,12 +1199,12 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) { } // Special handling for non-server-stream rpcs. // This recv expects EOF or errors, so we don't collect inPayload. - if err := recv(a.parser, cs.codec, a.transportStream, a.decompressorV0, m, *cs.callInfo.maxReceiveMessageSize, nil, a.decompressorV1, false); err == io.EOF { + if err := recv(&a.parser, cs.codec, a.transportStream, a.decompressorV0, m, *cs.callInfo.maxReceiveMessageSize, nil, a.decompressorV1, false); err == io.EOF { return a.transportStream.Status().Err() // non-server streaming Recv returns nil on success } else if err != nil { return toRPCErr(err) } - return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want ")) + return status.Error(codes.Internal, "cardinality violation: expected for non server-streaming RPCs, but received another message") } func (a *csAttempt) finish(err error) { @@ -1195,15 +1237,14 @@ func (a *csAttempt) finish(err error) { ServerLoad: balancerload.Parse(tr), }) } - for _, sh := range a.statsHandlers { - end := &stats.End{ + if a.statsHandler != nil { + a.statsHandler.HandleRPC(a.ctx, &stats.End{ Client: true, BeginTime: a.beginTime, EndTime: time.Now(), Trailer: tr, Error: err, - } - sh.HandleRPC(a.ctx, end) + }) } if a.trInfo != nil && a.trInfo.tr != nil { if err == nil { @@ -1309,7 +1350,7 @@ func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method strin return nil, err } as.transportStream = s - as.parser = &parser{r: s, bufferPool: ac.dopts.copts.BufferPool} + as.parser = parser{r: s, bufferPool: ac.dopts.copts.BufferPool} ac.incrCallsStarted() if desc != unaryStreamDesc { // Listen on stream context to cleanup when the stream context is @@ -1344,6 +1385,7 @@ type addrConnStream struct { transport transport.ClientTransport ctx context.Context sentLast bool + receivedFirstMsg bool desc *StreamDesc codec baseCodec sendCompressorV0 Compressor @@ -1351,7 +1393,7 @@ type addrConnStream struct { decompressorSet bool decompressorV0 Decompressor decompressorV1 encoding.Compressor - parser *parser + parser parser // mu guards finished and is held for the entire finish method. mu sync.Mutex @@ -1372,7 +1414,7 @@ func (as *addrConnStream) Trailer() metadata.MD { func (as *addrConnStream) CloseSend() error { if as.sentLast { - // TODO: return an error and finish the stream instead, due to API misuse? + // Return a nil error on repeated calls to this method. return nil } as.sentLast = true @@ -1457,6 +1499,10 @@ func (as *addrConnStream) RecvMsg(m any) (err error) { as.decompressorV0 = nil as.decompressorV1 = encoding.GetCompressor(ct) } + // Validate that the compression method is acceptable for this call. + if !acceptedCompressorAllows(as.callInfo.acceptedResponseCompressors, ct) { + return status.Errorf(codes.Internal, "grpc: peer compressed the response with %q which is not allowed by AcceptCompressors", ct) + } } else { // No compression is used; disable our decompressor. as.decompressorV0 = nil @@ -1464,15 +1510,20 @@ func (as *addrConnStream) RecvMsg(m any) (err error) { // Only initialize this state once per stream. as.decompressorSet = true } - if err := recv(as.parser, as.codec, as.transportStream, as.decompressorV0, m, *as.callInfo.maxReceiveMessageSize, nil, as.decompressorV1, false); err != nil { + if err := recv(&as.parser, as.codec, as.transportStream, as.decompressorV0, m, *as.callInfo.maxReceiveMessageSize, nil, as.decompressorV1, false); err != nil { if err == io.EOF { if statusErr := as.transportStream.Status().Err(); statusErr != nil { return statusErr } + // Received no msg and status OK for non-server streaming rpcs. + if !as.desc.ServerStreams && !as.receivedFirstMsg { + return status.Error(codes.Internal, "cardinality violation: received no response message from non-server-streaming RPC") + } return io.EOF // indicates successful end of stream. } return toRPCErr(err) } + as.receivedFirstMsg = true if as.desc.ServerStreams { // Subsequent messages should be received by subsequent RecvMsg calls. @@ -1481,12 +1532,12 @@ func (as *addrConnStream) RecvMsg(m any) (err error) { // Special handling for non-server-stream rpcs. // This recv expects EOF or errors, so we don't collect inPayload. - if err := recv(as.parser, as.codec, as.transportStream, as.decompressorV0, m, *as.callInfo.maxReceiveMessageSize, nil, as.decompressorV1, false); err == io.EOF { + if err := recv(&as.parser, as.codec, as.transportStream, as.decompressorV0, m, *as.callInfo.maxReceiveMessageSize, nil, as.decompressorV1, false); err == io.EOF { return as.transportStream.Status().Err() // non-server streaming Recv returns nil on success } else if err != nil { return toRPCErr(err) } - return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want ")) + return status.Error(codes.Internal, "cardinality violation: expected for non server-streaming RPCs, but received another message") } func (as *addrConnStream) finish(err error) { @@ -1569,8 +1620,9 @@ type ServerStream interface { type serverStream struct { ctx context.Context s *transport.ServerStream - p *parser + p parser codec baseCodec + desc *StreamDesc compressorV0 Compressor compressorV1 encoding.Compressor @@ -1579,11 +1631,13 @@ type serverStream struct { sendCompressorName string + recvFirstMsg bool // set after the first message is received + maxReceiveMessageSize int maxSendMessageSize int trInfo *traceInfo - statsHandler []stats.Handler + statsHandler stats.Handler binlogs []binarylog.MethodLogger // serverHeaderBinlogged indicates whether server header has been logged. It @@ -1719,10 +1773,8 @@ func (ss *serverStream) SendMsg(m any) (err error) { binlog.Log(ss.ctx, sm) } } - if len(ss.statsHandler) != 0 { - for _, sh := range ss.statsHandler { - sh.HandleRPC(ss.s.Context(), outPayload(false, m, dataLen, payloadLen, time.Now())) - } + if ss.statsHandler != nil { + ss.statsHandler.HandleRPC(ss.s.Context(), outPayload(false, m, dataLen, payloadLen, time.Now())) } return nil } @@ -1753,11 +1805,11 @@ func (ss *serverStream) RecvMsg(m any) (err error) { } }() var payInfo *payloadInfo - if len(ss.statsHandler) != 0 || len(ss.binlogs) != 0 { + if ss.statsHandler != nil || len(ss.binlogs) != 0 { payInfo = &payloadInfo{} defer payInfo.free() } - if err := recv(ss.p, ss.codec, ss.s, ss.decompressorV0, m, ss.maxReceiveMessageSize, payInfo, ss.decompressorV1, true); err != nil { + if err := recv(&ss.p, ss.codec, ss.s, ss.decompressorV0, m, ss.maxReceiveMessageSize, payInfo, ss.decompressorV1, true); err != nil { if err == io.EOF { if len(ss.binlogs) != 0 { chc := &binarylog.ClientHalfClose{} @@ -1765,6 +1817,10 @@ func (ss *serverStream) RecvMsg(m any) (err error) { binlog.Log(ss.ctx, chc) } } + // Received no request msg for non-client streaming rpcs. + if !ss.desc.ClientStreams && !ss.recvFirstMsg { + return status.Error(codes.Internal, "cardinality violation: received no request message from non-client-streaming RPC") + } return err } if err == io.ErrUnexpectedEOF { @@ -1772,16 +1828,15 @@ func (ss *serverStream) RecvMsg(m any) (err error) { } return toRPCErr(err) } - if len(ss.statsHandler) != 0 { - for _, sh := range ss.statsHandler { - sh.HandleRPC(ss.s.Context(), &stats.InPayload{ - RecvTime: time.Now(), - Payload: m, - Length: payInfo.uncompressedBytes.Len(), - WireLength: payInfo.compressedLength + headerLen, - CompressedLength: payInfo.compressedLength, - }) - } + ss.recvFirstMsg = true + if ss.statsHandler != nil { + ss.statsHandler.HandleRPC(ss.s.Context(), &stats.InPayload{ + RecvTime: time.Now(), + Payload: m, + Length: payInfo.uncompressedBytes.Len(), + WireLength: payInfo.compressedLength + headerLen, + CompressedLength: payInfo.compressedLength, + }) } if len(ss.binlogs) != 0 { cm := &binarylog.ClientMessage{ @@ -1791,7 +1846,19 @@ func (ss *serverStream) RecvMsg(m any) (err error) { binlog.Log(ss.ctx, cm) } } - return nil + + if ss.desc.ClientStreams { + // Subsequent messages should be received by subsequent RecvMsg calls. + return nil + } + // Special handling for non-client-stream rpcs. + // This recv expects EOF or errors, so we don't collect inPayload. + if err := recv(&ss.p, ss.codec, ss.s, ss.decompressorV0, m, ss.maxReceiveMessageSize, nil, ss.decompressorV1, true); err == io.EOF { + return nil + } else if err != nil { + return err + } + return status.Error(codes.Internal, "cardinality violation: received multiple request messages for non-client-streaming RPC") } // MethodFromServerStream returns the method string for the input stream. diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index 3c148a81..ff7840fd 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.71.1" +const Version = "1.78.0" diff --git a/vendor/google.golang.org/protobuf/encoding/protowire/wire.go b/vendor/google.golang.org/protobuf/encoding/protowire/wire.go index e942bc98..743bfb81 100644 --- a/vendor/google.golang.org/protobuf/encoding/protowire/wire.go +++ b/vendor/google.golang.org/protobuf/encoding/protowire/wire.go @@ -371,7 +371,31 @@ func ConsumeVarint(b []byte) (v uint64, n int) { func SizeVarint(v uint64) int { // This computes 1 + (bits.Len64(v)-1)/7. // 9/64 is a good enough approximation of 1/7 - return int(9*uint32(bits.Len64(v))+64) / 64 + // + // The Go compiler can translate the bits.LeadingZeros64 call into the LZCNT + // instruction, which is very fast on CPUs from the last few years. The + // specific way of expressing the calculation matches C++ Protobuf, see + // https://godbolt.org/z/4P3h53oM4 for the C++ code and how gcc/clang + // optimize that function for GOAMD64=v1 and GOAMD64=v3 (-march=haswell). + + // By OR'ing v with 1, we guarantee that v is never 0, without changing the + // result of SizeVarint. LZCNT is not defined for 0, meaning the compiler + // needs to add extra instructions to handle that case. + // + // The Go compiler currently (go1.24.4) does not make use of this knowledge. + // This opportunity (removing the XOR instruction, which handles the 0 case) + // results in a small (1%) performance win across CPU architectures. + // + // Independently of avoiding the 0 case, we need the v |= 1 line because + // it allows the Go compiler to eliminate an extra XCHGL barrier. + v |= 1 + + // It would be clearer to write log2value := 63 - uint32(...), but + // writing uint32(...) ^ 63 is much more efficient (-14% ARM, -20% Intel). + // Proof of identity for our value range [0..63]: + // https://go.dev/play/p/Pdn9hEWYakX + log2value := uint32(bits.LeadingZeros64(v)) ^ 63 + return int((log2value*9 + (64 + 9)) / 64) } // AppendFixed32 appends v to b as a little-endian uint32. diff --git a/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb b/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb index 323829da1477e4496d664b2a1092a9f9cec275d4..04696351eeeef14cbbd69fd1f4250530b1fbfd56 100644 GIT binary patch literal 154 zcmX}mI}(5(3Eat$;}$;v literal 146 zcmX}mF%Ezr3X5(&e%rBRTLK{CjOa+)E@2mYkk=mEF7 B6)FG# diff --git a/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go b/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go index bf1aba0e..7b9f01af 100644 --- a/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go +++ b/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go @@ -9,7 +9,7 @@ import "google.golang.org/protobuf/types/descriptorpb" const ( Minimum = descriptorpb.Edition_EDITION_PROTO2 - Maximum = descriptorpb.Edition_EDITION_2023 + Maximum = descriptorpb.Edition_EDITION_2024 // MaximumKnown is the maximum edition that is known to Go Protobuf, but not // declared as supported. In other words: end users cannot use it, but diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go index 688aabe4..dbcf90b8 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go @@ -72,9 +72,10 @@ type ( EditionFeatures EditionFeatures } FileL2 struct { - Options func() protoreflect.ProtoMessage - Imports FileImports - Locations SourceLocations + Options func() protoreflect.ProtoMessage + Imports FileImports + OptionImports func() protoreflect.FileImports + Locations SourceLocations } // EditionFeatures is a frequently-instantiated struct, so please take care @@ -126,12 +127,9 @@ func (fd *File) ParentFile() protoreflect.FileDescriptor { return fd } func (fd *File) Parent() protoreflect.Descriptor { return nil } func (fd *File) Index() int { return 0 } func (fd *File) Syntax() protoreflect.Syntax { return fd.L1.Syntax } - -// Not exported and just used to reconstruct the original FileDescriptor proto -func (fd *File) Edition() int32 { return int32(fd.L1.Edition) } -func (fd *File) Name() protoreflect.Name { return fd.L1.Package.Name() } -func (fd *File) FullName() protoreflect.FullName { return fd.L1.Package } -func (fd *File) IsPlaceholder() bool { return false } +func (fd *File) Name() protoreflect.Name { return fd.L1.Package.Name() } +func (fd *File) FullName() protoreflect.FullName { return fd.L1.Package } +func (fd *File) IsPlaceholder() bool { return false } func (fd *File) Options() protoreflect.ProtoMessage { if f := fd.lazyInit().Options; f != nil { return f() @@ -150,6 +148,16 @@ func (fd *File) Format(s fmt.State, r rune) { descfmt.FormatD func (fd *File) ProtoType(protoreflect.FileDescriptor) {} func (fd *File) ProtoInternal(pragma.DoNotImplement) {} +// The next two are not part of the FileDescriptor interface. They are just used to reconstruct +// the original FileDescriptor proto. +func (fd *File) Edition() int32 { return int32(fd.L1.Edition) } +func (fd *File) OptionImports() protoreflect.FileImports { + if f := fd.lazyInit().OptionImports; f != nil { + return f() + } + return emptyFiles +} + func (fd *File) lazyInit() *FileL2 { if atomic.LoadUint32(&fd.once) == 0 { fd.lazyInitOnce() @@ -182,9 +190,9 @@ type ( L2 *EnumL2 // protected by fileDesc.once } EnumL1 struct { - eagerValues bool // controls whether EnumL2.Values is already populated - EditionFeatures EditionFeatures + Visibility int32 + eagerValues bool // controls whether EnumL2.Values is already populated } EnumL2 struct { Options func() protoreflect.ProtoMessage @@ -219,6 +227,11 @@ func (ed *Enum) ReservedNames() protoreflect.Names { return &ed.lazyInit() func (ed *Enum) ReservedRanges() protoreflect.EnumRanges { return &ed.lazyInit().ReservedRanges } func (ed *Enum) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, ed) } func (ed *Enum) ProtoType(protoreflect.EnumDescriptor) {} + +// This is not part of the EnumDescriptor interface. It is just used to reconstruct +// the original FileDescriptor proto. +func (ed *Enum) Visibility() int32 { return ed.L1.Visibility } + func (ed *Enum) lazyInit() *EnumL2 { ed.L0.ParentFile.lazyInit() // implicitly initializes L2 return ed.L2 @@ -244,13 +257,13 @@ type ( L2 *MessageL2 // protected by fileDesc.once } MessageL1 struct { - Enums Enums - Messages Messages - Extensions Extensions - IsMapEntry bool // promoted from google.protobuf.MessageOptions - IsMessageSet bool // promoted from google.protobuf.MessageOptions - + Enums Enums + Messages Messages + Extensions Extensions EditionFeatures EditionFeatures + Visibility int32 + IsMapEntry bool // promoted from google.protobuf.MessageOptions + IsMessageSet bool // promoted from google.protobuf.MessageOptions } MessageL2 struct { Options func() protoreflect.ProtoMessage @@ -319,6 +332,11 @@ func (md *Message) Messages() protoreflect.MessageDescriptors { return &md.L func (md *Message) Extensions() protoreflect.ExtensionDescriptors { return &md.L1.Extensions } func (md *Message) ProtoType(protoreflect.MessageDescriptor) {} func (md *Message) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, md) } + +// This is not part of the MessageDescriptor interface. It is just used to reconstruct +// the original FileDescriptor proto. +func (md *Message) Visibility() int32 { return md.L1.Visibility } + func (md *Message) lazyInit() *MessageL2 { md.L0.ParentFile.lazyInit() // implicitly initializes L2 return md.L2 diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go index d2f54949..e91860f5 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go @@ -284,6 +284,13 @@ func (ed *Enum) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd protorefl case genid.EnumDescriptorProto_Value_field_number: numValues++ } + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case genid.EnumDescriptorProto_Visibility_field_number: + ed.L1.Visibility = int32(v) + } default: m := protowire.ConsumeFieldValue(num, typ, b) b = b[m:] @@ -365,6 +372,13 @@ func (md *Message) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd protor md.unmarshalSeedOptions(v) } prevField = num + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case genid.DescriptorProto_Visibility_field_number: + md.L1.Visibility = int32(v) + } default: m := protowire.ConsumeFieldValue(num, typ, b) b = b[m:] diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go index d4c94458..dd31faae 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go @@ -134,6 +134,7 @@ func (fd *File) unmarshalFull(b []byte) { var enumIdx, messageIdx, extensionIdx, serviceIdx int var rawOptions []byte + var optionImports []string fd.L2 = new(FileL2) for len(b) > 0 { num, typ, n := protowire.ConsumeTag(b) @@ -157,6 +158,8 @@ func (fd *File) unmarshalFull(b []byte) { imp = PlaceholderFile(path) } fd.L2.Imports = append(fd.L2.Imports, protoreflect.FileImport{FileDescriptor: imp}) + case genid.FileDescriptorProto_OptionDependency_field_number: + optionImports = append(optionImports, sb.MakeString(v)) case genid.FileDescriptorProto_EnumType_field_number: fd.L1.Enums.List[enumIdx].unmarshalFull(v, sb) enumIdx++ @@ -178,6 +181,23 @@ func (fd *File) unmarshalFull(b []byte) { } } fd.L2.Options = fd.builder.optionsUnmarshaler(&descopts.File, rawOptions) + if len(optionImports) > 0 { + var imps FileImports + var once sync.Once + fd.L2.OptionImports = func() protoreflect.FileImports { + once.Do(func() { + imps = make(FileImports, len(optionImports)) + for i, path := range optionImports { + imp, _ := fd.builder.FileRegistry.FindFileByPath(path) + if imp == nil { + imp = PlaceholderFile(path) + } + imps[i] = protoreflect.FileImport{FileDescriptor: imp} + } + }) + return &imps + } + } } func (ed *Enum) unmarshalFull(b []byte, sb *strs.Builder) { diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/editions.go b/vendor/google.golang.org/protobuf/internal/filedesc/editions.go index b08b7183..66ba9068 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/editions.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/editions.go @@ -13,8 +13,10 @@ import ( "google.golang.org/protobuf/reflect/protoreflect" ) -var defaultsCache = make(map[Edition]EditionFeatures) -var defaultsKeys = []Edition{} +var ( + defaultsCache = make(map[Edition]EditionFeatures) + defaultsKeys = []Edition{} +) func init() { unmarshalEditionDefaults(editiondefaults.Defaults) @@ -41,7 +43,7 @@ func unmarshalGoFeature(b []byte, parent EditionFeatures) EditionFeatures { b = b[m:] parent.StripEnumPrefix = int(v) default: - panic(fmt.Sprintf("unkown field number %d while unmarshalling GoFeatures", num)) + panic(fmt.Sprintf("unknown field number %d while unmarshalling GoFeatures", num)) } } return parent @@ -72,8 +74,11 @@ func unmarshalFeatureSet(b []byte, parent EditionFeatures) EditionFeatures { case genid.FeatureSet_EnforceNamingStyle_field_number: // EnforceNamingStyle is enforced in protoc, languages other than C++ // are not supposed to do anything with this feature. + case genid.FeatureSet_DefaultSymbolVisibility_field_number: + // DefaultSymbolVisibility is enforced in protoc, runtimes should not + // inspect this value. default: - panic(fmt.Sprintf("unkown field number %d while unmarshalling FeatureSet", num)) + panic(fmt.Sprintf("unknown field number %d while unmarshalling FeatureSet", num)) } case protowire.BytesType: v, m := protowire.ConsumeBytes(b) @@ -147,7 +152,7 @@ func unmarshalEditionDefaults(b []byte) { _, m := protowire.ConsumeVarint(b) b = b[m:] default: - panic(fmt.Sprintf("unkown field number %d while unmarshalling EditionDefault", num)) + panic(fmt.Sprintf("unknown field number %d while unmarshalling EditionDefault", num)) } } } diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/presence.go b/vendor/google.golang.org/protobuf/internal/filedesc/presence.go new file mode 100644 index 00000000..a12ec979 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/filedesc/presence.go @@ -0,0 +1,33 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package filedesc + +import "google.golang.org/protobuf/reflect/protoreflect" + +// UsePresenceForField reports whether the presence bitmap should be used for +// the specified field. +func UsePresenceForField(fd protoreflect.FieldDescriptor) (usePresence, canBeLazy bool) { + switch { + case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic(): + // Oneof fields never use the presence bitmap. + // + // Synthetic oneofs are an exception: Those are used to implement proto3 + // optional fields and hence should follow non-oneof field semantics. + return false, false + + case fd.IsMap(): + // Map-typed fields never use the presence bitmap. + return false, false + + case fd.Kind() == protoreflect.MessageKind || fd.Kind() == protoreflect.GroupKind: + // Lazy fields always use the presence bitmap (only messages can be lazy). + isLazy := fd.(interface{ IsLazy() bool }).IsLazy() + return isLazy, isLazy + + default: + // If the field has presence, use the presence bitmap. + return fd.HasPresence(), false + } +} diff --git a/vendor/google.golang.org/protobuf/internal/genid/api_gen.go b/vendor/google.golang.org/protobuf/internal/genid/api_gen.go index df8f9185..3ceb6fa7 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/api_gen.go +++ b/vendor/google.golang.org/protobuf/internal/genid/api_gen.go @@ -27,6 +27,7 @@ const ( Api_SourceContext_field_name protoreflect.Name = "source_context" Api_Mixins_field_name protoreflect.Name = "mixins" Api_Syntax_field_name protoreflect.Name = "syntax" + Api_Edition_field_name protoreflect.Name = "edition" Api_Name_field_fullname protoreflect.FullName = "google.protobuf.Api.name" Api_Methods_field_fullname protoreflect.FullName = "google.protobuf.Api.methods" @@ -35,6 +36,7 @@ const ( Api_SourceContext_field_fullname protoreflect.FullName = "google.protobuf.Api.source_context" Api_Mixins_field_fullname protoreflect.FullName = "google.protobuf.Api.mixins" Api_Syntax_field_fullname protoreflect.FullName = "google.protobuf.Api.syntax" + Api_Edition_field_fullname protoreflect.FullName = "google.protobuf.Api.edition" ) // Field numbers for google.protobuf.Api. @@ -46,6 +48,7 @@ const ( Api_SourceContext_field_number protoreflect.FieldNumber = 5 Api_Mixins_field_number protoreflect.FieldNumber = 6 Api_Syntax_field_number protoreflect.FieldNumber = 7 + Api_Edition_field_number protoreflect.FieldNumber = 8 ) // Names for google.protobuf.Method. @@ -63,6 +66,7 @@ const ( Method_ResponseStreaming_field_name protoreflect.Name = "response_streaming" Method_Options_field_name protoreflect.Name = "options" Method_Syntax_field_name protoreflect.Name = "syntax" + Method_Edition_field_name protoreflect.Name = "edition" Method_Name_field_fullname protoreflect.FullName = "google.protobuf.Method.name" Method_RequestTypeUrl_field_fullname protoreflect.FullName = "google.protobuf.Method.request_type_url" @@ -71,6 +75,7 @@ const ( Method_ResponseStreaming_field_fullname protoreflect.FullName = "google.protobuf.Method.response_streaming" Method_Options_field_fullname protoreflect.FullName = "google.protobuf.Method.options" Method_Syntax_field_fullname protoreflect.FullName = "google.protobuf.Method.syntax" + Method_Edition_field_fullname protoreflect.FullName = "google.protobuf.Method.edition" ) // Field numbers for google.protobuf.Method. @@ -82,6 +87,7 @@ const ( Method_ResponseStreaming_field_number protoreflect.FieldNumber = 5 Method_Options_field_number protoreflect.FieldNumber = 6 Method_Syntax_field_number protoreflect.FieldNumber = 7 + Method_Edition_field_number protoreflect.FieldNumber = 8 ) // Names for google.protobuf.Mixin. diff --git a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go index 39524782..950a6a32 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go +++ b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go @@ -34,6 +34,19 @@ const ( Edition_EDITION_MAX_enum_value = 2147483647 ) +// Full and short names for google.protobuf.SymbolVisibility. +const ( + SymbolVisibility_enum_fullname = "google.protobuf.SymbolVisibility" + SymbolVisibility_enum_name = "SymbolVisibility" +) + +// Enum values for google.protobuf.SymbolVisibility. +const ( + SymbolVisibility_VISIBILITY_UNSET_enum_value = 0 + SymbolVisibility_VISIBILITY_LOCAL_enum_value = 1 + SymbolVisibility_VISIBILITY_EXPORT_enum_value = 2 +) + // Names for google.protobuf.FileDescriptorSet. const ( FileDescriptorSet_message_name protoreflect.Name = "FileDescriptorSet" @@ -65,6 +78,7 @@ const ( FileDescriptorProto_Dependency_field_name protoreflect.Name = "dependency" FileDescriptorProto_PublicDependency_field_name protoreflect.Name = "public_dependency" FileDescriptorProto_WeakDependency_field_name protoreflect.Name = "weak_dependency" + FileDescriptorProto_OptionDependency_field_name protoreflect.Name = "option_dependency" FileDescriptorProto_MessageType_field_name protoreflect.Name = "message_type" FileDescriptorProto_EnumType_field_name protoreflect.Name = "enum_type" FileDescriptorProto_Service_field_name protoreflect.Name = "service" @@ -79,6 +93,7 @@ const ( FileDescriptorProto_Dependency_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.dependency" FileDescriptorProto_PublicDependency_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.public_dependency" FileDescriptorProto_WeakDependency_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.weak_dependency" + FileDescriptorProto_OptionDependency_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.option_dependency" FileDescriptorProto_MessageType_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.message_type" FileDescriptorProto_EnumType_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.enum_type" FileDescriptorProto_Service_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.service" @@ -96,6 +111,7 @@ const ( FileDescriptorProto_Dependency_field_number protoreflect.FieldNumber = 3 FileDescriptorProto_PublicDependency_field_number protoreflect.FieldNumber = 10 FileDescriptorProto_WeakDependency_field_number protoreflect.FieldNumber = 11 + FileDescriptorProto_OptionDependency_field_number protoreflect.FieldNumber = 15 FileDescriptorProto_MessageType_field_number protoreflect.FieldNumber = 4 FileDescriptorProto_EnumType_field_number protoreflect.FieldNumber = 5 FileDescriptorProto_Service_field_number protoreflect.FieldNumber = 6 @@ -124,6 +140,7 @@ const ( DescriptorProto_Options_field_name protoreflect.Name = "options" DescriptorProto_ReservedRange_field_name protoreflect.Name = "reserved_range" DescriptorProto_ReservedName_field_name protoreflect.Name = "reserved_name" + DescriptorProto_Visibility_field_name protoreflect.Name = "visibility" DescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.name" DescriptorProto_Field_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.field" @@ -135,6 +152,7 @@ const ( DescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.options" DescriptorProto_ReservedRange_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.reserved_range" DescriptorProto_ReservedName_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.reserved_name" + DescriptorProto_Visibility_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.visibility" ) // Field numbers for google.protobuf.DescriptorProto. @@ -149,6 +167,7 @@ const ( DescriptorProto_Options_field_number protoreflect.FieldNumber = 7 DescriptorProto_ReservedRange_field_number protoreflect.FieldNumber = 9 DescriptorProto_ReservedName_field_number protoreflect.FieldNumber = 10 + DescriptorProto_Visibility_field_number protoreflect.FieldNumber = 11 ) // Names for google.protobuf.DescriptorProto.ExtensionRange. @@ -388,12 +407,14 @@ const ( EnumDescriptorProto_Options_field_name protoreflect.Name = "options" EnumDescriptorProto_ReservedRange_field_name protoreflect.Name = "reserved_range" EnumDescriptorProto_ReservedName_field_name protoreflect.Name = "reserved_name" + EnumDescriptorProto_Visibility_field_name protoreflect.Name = "visibility" EnumDescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.name" EnumDescriptorProto_Value_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.value" EnumDescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.options" EnumDescriptorProto_ReservedRange_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.reserved_range" EnumDescriptorProto_ReservedName_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.reserved_name" + EnumDescriptorProto_Visibility_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.visibility" ) // Field numbers for google.protobuf.EnumDescriptorProto. @@ -403,6 +424,7 @@ const ( EnumDescriptorProto_Options_field_number protoreflect.FieldNumber = 3 EnumDescriptorProto_ReservedRange_field_number protoreflect.FieldNumber = 4 EnumDescriptorProto_ReservedName_field_number protoreflect.FieldNumber = 5 + EnumDescriptorProto_Visibility_field_number protoreflect.FieldNumber = 6 ) // Names for google.protobuf.EnumDescriptorProto.EnumReservedRange. @@ -1008,32 +1030,35 @@ const ( // Field names for google.protobuf.FeatureSet. const ( - FeatureSet_FieldPresence_field_name protoreflect.Name = "field_presence" - FeatureSet_EnumType_field_name protoreflect.Name = "enum_type" - FeatureSet_RepeatedFieldEncoding_field_name protoreflect.Name = "repeated_field_encoding" - FeatureSet_Utf8Validation_field_name protoreflect.Name = "utf8_validation" - FeatureSet_MessageEncoding_field_name protoreflect.Name = "message_encoding" - FeatureSet_JsonFormat_field_name protoreflect.Name = "json_format" - FeatureSet_EnforceNamingStyle_field_name protoreflect.Name = "enforce_naming_style" - - FeatureSet_FieldPresence_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.field_presence" - FeatureSet_EnumType_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.enum_type" - FeatureSet_RepeatedFieldEncoding_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.repeated_field_encoding" - FeatureSet_Utf8Validation_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.utf8_validation" - FeatureSet_MessageEncoding_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.message_encoding" - FeatureSet_JsonFormat_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.json_format" - FeatureSet_EnforceNamingStyle_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.enforce_naming_style" + FeatureSet_FieldPresence_field_name protoreflect.Name = "field_presence" + FeatureSet_EnumType_field_name protoreflect.Name = "enum_type" + FeatureSet_RepeatedFieldEncoding_field_name protoreflect.Name = "repeated_field_encoding" + FeatureSet_Utf8Validation_field_name protoreflect.Name = "utf8_validation" + FeatureSet_MessageEncoding_field_name protoreflect.Name = "message_encoding" + FeatureSet_JsonFormat_field_name protoreflect.Name = "json_format" + FeatureSet_EnforceNamingStyle_field_name protoreflect.Name = "enforce_naming_style" + FeatureSet_DefaultSymbolVisibility_field_name protoreflect.Name = "default_symbol_visibility" + + FeatureSet_FieldPresence_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.field_presence" + FeatureSet_EnumType_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.enum_type" + FeatureSet_RepeatedFieldEncoding_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.repeated_field_encoding" + FeatureSet_Utf8Validation_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.utf8_validation" + FeatureSet_MessageEncoding_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.message_encoding" + FeatureSet_JsonFormat_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.json_format" + FeatureSet_EnforceNamingStyle_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.enforce_naming_style" + FeatureSet_DefaultSymbolVisibility_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.default_symbol_visibility" ) // Field numbers for google.protobuf.FeatureSet. const ( - FeatureSet_FieldPresence_field_number protoreflect.FieldNumber = 1 - FeatureSet_EnumType_field_number protoreflect.FieldNumber = 2 - FeatureSet_RepeatedFieldEncoding_field_number protoreflect.FieldNumber = 3 - FeatureSet_Utf8Validation_field_number protoreflect.FieldNumber = 4 - FeatureSet_MessageEncoding_field_number protoreflect.FieldNumber = 5 - FeatureSet_JsonFormat_field_number protoreflect.FieldNumber = 6 - FeatureSet_EnforceNamingStyle_field_number protoreflect.FieldNumber = 7 + FeatureSet_FieldPresence_field_number protoreflect.FieldNumber = 1 + FeatureSet_EnumType_field_number protoreflect.FieldNumber = 2 + FeatureSet_RepeatedFieldEncoding_field_number protoreflect.FieldNumber = 3 + FeatureSet_Utf8Validation_field_number protoreflect.FieldNumber = 4 + FeatureSet_MessageEncoding_field_number protoreflect.FieldNumber = 5 + FeatureSet_JsonFormat_field_number protoreflect.FieldNumber = 6 + FeatureSet_EnforceNamingStyle_field_number protoreflect.FieldNumber = 7 + FeatureSet_DefaultSymbolVisibility_field_number protoreflect.FieldNumber = 8 ) // Full and short names for google.protobuf.FeatureSet.FieldPresence. @@ -1128,6 +1153,27 @@ const ( FeatureSet_STYLE_LEGACY_enum_value = 2 ) +// Names for google.protobuf.FeatureSet.VisibilityFeature. +const ( + FeatureSet_VisibilityFeature_message_name protoreflect.Name = "VisibilityFeature" + FeatureSet_VisibilityFeature_message_fullname protoreflect.FullName = "google.protobuf.FeatureSet.VisibilityFeature" +) + +// Full and short names for google.protobuf.FeatureSet.VisibilityFeature.DefaultSymbolVisibility. +const ( + FeatureSet_VisibilityFeature_DefaultSymbolVisibility_enum_fullname = "google.protobuf.FeatureSet.VisibilityFeature.DefaultSymbolVisibility" + FeatureSet_VisibilityFeature_DefaultSymbolVisibility_enum_name = "DefaultSymbolVisibility" +) + +// Enum values for google.protobuf.FeatureSet.VisibilityFeature.DefaultSymbolVisibility. +const ( + FeatureSet_VisibilityFeature_DEFAULT_SYMBOL_VISIBILITY_UNKNOWN_enum_value = 0 + FeatureSet_VisibilityFeature_EXPORT_ALL_enum_value = 1 + FeatureSet_VisibilityFeature_EXPORT_TOP_LEVEL_enum_value = 2 + FeatureSet_VisibilityFeature_LOCAL_ALL_enum_value = 3 + FeatureSet_VisibilityFeature_STRICT_enum_value = 4 +) + // Names for google.protobuf.FeatureSetDefaults. const ( FeatureSetDefaults_message_name protoreflect.Name = "FeatureSetDefaults" diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go b/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go index 41c1f74e..bdad12a9 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go @@ -11,6 +11,7 @@ import ( "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/internal/encoding/messageset" + "google.golang.org/protobuf/internal/filedesc" "google.golang.org/protobuf/internal/order" "google.golang.org/protobuf/reflect/protoreflect" piface "google.golang.org/protobuf/runtime/protoiface" @@ -80,7 +81,7 @@ func (mi *MessageInfo) makeOpaqueCoderMethods(t reflect.Type, si opaqueStructInf // permit us to skip over definitely-unset fields at marshal time. var hasPresence bool - hasPresence, cf.isLazy = usePresenceForField(si, fd) + hasPresence, cf.isLazy = filedesc.UsePresenceForField(fd) if hasPresence { cf.presenceIndex, mi.presenceSize = presenceIndex(mi.Desc, fd) diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go b/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go index dd55e8e0..5a439daa 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go @@ -11,6 +11,7 @@ import ( "strings" "sync/atomic" + "google.golang.org/protobuf/internal/filedesc" "google.golang.org/protobuf/reflect/protoreflect" ) @@ -53,7 +54,7 @@ func opaqueInitHook(mi *MessageInfo) bool { fd := fds.Get(i) fs := si.fieldsByNumber[fd.Number()] var fi fieldInfo - usePresence, _ := usePresenceForField(si, fd) + usePresence, _ := filedesc.UsePresenceForField(fd) switch { case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic(): @@ -343,17 +344,15 @@ func (mi *MessageInfo) fieldInfoForMessageListOpaqueNoPresence(si opaqueStructIn if p.IsNil() { return false } - sp := p.Apply(fieldOffset).AtomicGetPointer() - if sp.IsNil() { + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if rv.IsNil() { return false } - rv := sp.AsValueOf(fs.Type.Elem()) return rv.Elem().Len() > 0 }, clear: func(p pointer) { - sp := p.Apply(fieldOffset).AtomicGetPointer() - if !sp.IsNil() { - rv := sp.AsValueOf(fs.Type.Elem()) + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if !rv.IsNil() { rv.Elem().Set(reflect.Zero(rv.Type().Elem())) } }, @@ -361,11 +360,10 @@ func (mi *MessageInfo) fieldInfoForMessageListOpaqueNoPresence(si opaqueStructIn if p.IsNil() { return conv.Zero() } - sp := p.Apply(fieldOffset).AtomicGetPointer() - if sp.IsNil() { + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if rv.IsNil() { return conv.Zero() } - rv := sp.AsValueOf(fs.Type.Elem()) if rv.Elem().Len() == 0 { return conv.Zero() } @@ -598,30 +596,3 @@ func (mi *MessageInfo) clearPresent(p pointer, index uint32) { func (mi *MessageInfo) present(p pointer, index uint32) bool { return p.Apply(mi.presenceOffset).PresenceInfo().Present(index) } - -// usePresenceForField implements the somewhat intricate logic of when -// the presence bitmap is used for a field. The main logic is that a -// field that is optional or that can be lazy will use the presence -// bit, but for proto2, also maps have a presence bit. It also records -// if the field can ever be lazy, which is true if we have a -// lazyOffset and the field is a message or a slice of messages. A -// field that is lazy will always need a presence bit. Oneofs are not -// lazy and do not use presence, unless they are a synthetic oneof, -// which is a proto3 optional field. For proto3 optionals, we use the -// presence and they can also be lazy when applicable (a message). -func usePresenceForField(si opaqueStructInfo, fd protoreflect.FieldDescriptor) (usePresence, canBeLazy bool) { - hasLazyField := fd.(interface{ IsLazy() bool }).IsLazy() - - // Non-oneof scalar fields with explicit field presence use the presence array. - usesPresenceArray := fd.HasPresence() && fd.Message() == nil && (fd.ContainingOneof() == nil || fd.ContainingOneof().IsSynthetic()) - switch { - case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic(): - return false, false - case fd.IsMap(): - return false, false - case fd.Kind() == protoreflect.MessageKind || fd.Kind() == protoreflect.GroupKind: - return hasLazyField, hasLazyField - default: - return usesPresenceArray || (hasLazyField && fd.HasPresence()), false - } -} diff --git a/vendor/google.golang.org/protobuf/internal/impl/presence.go b/vendor/google.golang.org/protobuf/internal/impl/presence.go index 914cb1de..443afe81 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/presence.go +++ b/vendor/google.golang.org/protobuf/internal/impl/presence.go @@ -32,9 +32,6 @@ func (p presence) toElem(num uint32) (ret *uint32) { // Present checks for the presence of a specific field number in a presence set. func (p presence) Present(num uint32) bool { - if p.P == nil { - return false - } return Export{}.Present(p.toElem(num), num) } diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go index aac1cb18..77de0f23 100644 --- a/vendor/google.golang.org/protobuf/internal/version/version.go +++ b/vendor/google.golang.org/protobuf/internal/version/version.go @@ -52,7 +52,7 @@ import ( const ( Major = 1 Minor = 36 - Patch = 6 + Patch = 10 PreRelease = "" ) diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go index 823dbf3b..9196288e 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go @@ -152,6 +152,28 @@ func (o FileOptions) New(fd *descriptorpb.FileDescriptorProto, r Resolver) (prot imp := &f.L2.Imports[i] imps.importPublic(imp.Imports()) } + if len(fd.GetOptionDependency()) > 0 { + optionImports := make(filedesc.FileImports, len(fd.GetOptionDependency())) + for i, path := range fd.GetOptionDependency() { + imp := &optionImports[i] + f, err := r.FindFileByPath(path) + if err == protoregistry.NotFound { + // We always allow option imports to be unresolvable. + f = filedesc.PlaceholderFile(path) + } else if err != nil { + return nil, errors.New("could not resolve import %q: %v", path, err) + } + imp.FileDescriptor = f + + if imps[imp.Path()] { + return nil, errors.New("already imported %q", path) + } + imps[imp.Path()] = true + } + f.L2.OptionImports = func() protoreflect.FileImports { + return &optionImports + } + } // Handle source locations. f.L2.Locations.File = f diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go index 9da34998..c826ad04 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go @@ -29,6 +29,7 @@ func (r descsByName) initEnumDeclarations(eds []*descriptorpb.EnumDescriptorProt e.L2.Options = func() protoreflect.ProtoMessage { return opts } } e.L1.EditionFeatures = mergeEditionFeatures(parent, ed.GetOptions().GetFeatures()) + e.L1.Visibility = int32(ed.GetVisibility()) for _, s := range ed.GetReservedName() { e.L2.ReservedNames.List = append(e.L2.ReservedNames.List, protoreflect.Name(s)) } @@ -70,6 +71,7 @@ func (r descsByName) initMessagesDeclarations(mds []*descriptorpb.DescriptorProt return nil, err } m.L1.EditionFeatures = mergeEditionFeatures(parent, md.GetOptions().GetFeatures()) + m.L1.Visibility = int32(md.GetVisibility()) if opts := md.GetOptions(); opts != nil { opts = proto.Clone(opts).(*descriptorpb.MessageOptions) m.L2.Options = func() protoreflect.ProtoMessage { return opts } diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go b/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go index 9b880aa8..6f91074e 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go @@ -70,16 +70,27 @@ func ToFileDescriptorProto(file protoreflect.FileDescriptor) *descriptorpb.FileD if syntax := file.Syntax(); syntax != protoreflect.Proto2 && syntax.IsValid() { p.Syntax = proto.String(file.Syntax().String()) } + desc := file + if fileImportDesc, ok := file.(protoreflect.FileImport); ok { + desc = fileImportDesc.FileDescriptor + } if file.Syntax() == protoreflect.Editions { - desc := file - if fileImportDesc, ok := file.(protoreflect.FileImport); ok { - desc = fileImportDesc.FileDescriptor - } - if editionsInterface, ok := desc.(interface{ Edition() int32 }); ok { p.Edition = descriptorpb.Edition(editionsInterface.Edition()).Enum() } } + type hasOptionImports interface { + OptionImports() protoreflect.FileImports + } + if opts, ok := desc.(hasOptionImports); ok { + if optionImports := opts.OptionImports(); optionImports.Len() > 0 { + optionDeps := make([]string, optionImports.Len()) + for i := range optionImports.Len() { + optionDeps[i] = optionImports.Get(i).Path() + } + p.OptionDependency = optionDeps + } + } return p } @@ -123,6 +134,14 @@ func ToDescriptorProto(message protoreflect.MessageDescriptor) *descriptorpb.Des for i, names := 0, message.ReservedNames(); i < names.Len(); i++ { p.ReservedName = append(p.ReservedName, string(names.Get(i))) } + type hasVisibility interface { + Visibility() int32 + } + if vis, ok := message.(hasVisibility); ok { + if visibility := vis.Visibility(); visibility > 0 { + p.Visibility = descriptorpb.SymbolVisibility(visibility).Enum() + } + } return p } @@ -216,6 +235,14 @@ func ToEnumDescriptorProto(enum protoreflect.EnumDescriptor) *descriptorpb.EnumD for i, names := 0, enum.ReservedNames(); i < names.Len(); i++ { p.ReservedName = append(p.ReservedName, string(names.Get(i))) } + type hasVisibility interface { + Visibility() int32 + } + if vis, ok := enum.(hasVisibility); ok { + if visibility := vis.Visibility(); visibility > 0 { + p.Visibility = descriptorpb.SymbolVisibility(visibility).Enum() + } + } return p } diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go index a4a0a297..730331e6 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go @@ -21,6 +21,8 @@ func (p *SourcePath) appendFileDescriptorProto(b []byte) []byte { b = p.appendRepeatedField(b, "public_dependency", nil) case 11: b = p.appendRepeatedField(b, "weak_dependency", nil) + case 15: + b = p.appendRepeatedField(b, "option_dependency", nil) case 4: b = p.appendRepeatedField(b, "message_type", (*SourcePath).appendDescriptorProto) case 5: @@ -66,6 +68,8 @@ func (p *SourcePath) appendDescriptorProto(b []byte) []byte { b = p.appendRepeatedField(b, "reserved_range", (*SourcePath).appendDescriptorProto_ReservedRange) case 10: b = p.appendRepeatedField(b, "reserved_name", nil) + case 11: + b = p.appendSingularField(b, "visibility", nil) } return b } @@ -85,6 +89,8 @@ func (p *SourcePath) appendEnumDescriptorProto(b []byte) []byte { b = p.appendRepeatedField(b, "reserved_range", (*SourcePath).appendEnumDescriptorProto_EnumReservedRange) case 5: b = p.appendRepeatedField(b, "reserved_name", nil) + case 6: + b = p.appendSingularField(b, "visibility", nil) } return b } @@ -400,6 +406,8 @@ func (p *SourcePath) appendFeatureSet(b []byte) []byte { b = p.appendSingularField(b, "json_format", nil) case 7: b = p.appendSingularField(b, "enforce_naming_style", nil) + case 8: + b = p.appendSingularField(b, "default_symbol_visibility", nil) } return b } diff --git a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go index 7fe280f1..4eacb523 100644 --- a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go +++ b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go @@ -151,6 +151,70 @@ func (Edition) EnumDescriptor() ([]byte, []int) { return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{0} } +// Describes the 'visibility' of a symbol with respect to the proto import +// system. Symbols can only be imported when the visibility rules do not prevent +// it (ex: local symbols cannot be imported). Visibility modifiers can only set +// on `message` and `enum` as they are the only types available to be referenced +// from other files. +type SymbolVisibility int32 + +const ( + SymbolVisibility_VISIBILITY_UNSET SymbolVisibility = 0 + SymbolVisibility_VISIBILITY_LOCAL SymbolVisibility = 1 + SymbolVisibility_VISIBILITY_EXPORT SymbolVisibility = 2 +) + +// Enum value maps for SymbolVisibility. +var ( + SymbolVisibility_name = map[int32]string{ + 0: "VISIBILITY_UNSET", + 1: "VISIBILITY_LOCAL", + 2: "VISIBILITY_EXPORT", + } + SymbolVisibility_value = map[string]int32{ + "VISIBILITY_UNSET": 0, + "VISIBILITY_LOCAL": 1, + "VISIBILITY_EXPORT": 2, + } +) + +func (x SymbolVisibility) Enum() *SymbolVisibility { + p := new(SymbolVisibility) + *p = x + return p +} + +func (x SymbolVisibility) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (SymbolVisibility) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[1].Descriptor() +} + +func (SymbolVisibility) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[1] +} + +func (x SymbolVisibility) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *SymbolVisibility) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = SymbolVisibility(num) + return nil +} + +// Deprecated: Use SymbolVisibility.Descriptor instead. +func (SymbolVisibility) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{1} +} + // The verification state of the extension range. type ExtensionRangeOptions_VerificationState int32 @@ -183,11 +247,11 @@ func (x ExtensionRangeOptions_VerificationState) String() string { } func (ExtensionRangeOptions_VerificationState) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[1].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[2].Descriptor() } func (ExtensionRangeOptions_VerificationState) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[1] + return &file_google_protobuf_descriptor_proto_enumTypes[2] } func (x ExtensionRangeOptions_VerificationState) Number() protoreflect.EnumNumber { @@ -299,11 +363,11 @@ func (x FieldDescriptorProto_Type) String() string { } func (FieldDescriptorProto_Type) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[2].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[3].Descriptor() } func (FieldDescriptorProto_Type) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[2] + return &file_google_protobuf_descriptor_proto_enumTypes[3] } func (x FieldDescriptorProto_Type) Number() protoreflect.EnumNumber { @@ -362,11 +426,11 @@ func (x FieldDescriptorProto_Label) String() string { } func (FieldDescriptorProto_Label) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[3].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[4].Descriptor() } func (FieldDescriptorProto_Label) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[3] + return &file_google_protobuf_descriptor_proto_enumTypes[4] } func (x FieldDescriptorProto_Label) Number() protoreflect.EnumNumber { @@ -423,11 +487,11 @@ func (x FileOptions_OptimizeMode) String() string { } func (FileOptions_OptimizeMode) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[4].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[5].Descriptor() } func (FileOptions_OptimizeMode) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[4] + return &file_google_protobuf_descriptor_proto_enumTypes[5] } func (x FileOptions_OptimizeMode) Number() protoreflect.EnumNumber { @@ -489,11 +553,11 @@ func (x FieldOptions_CType) String() string { } func (FieldOptions_CType) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[5].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[6].Descriptor() } func (FieldOptions_CType) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[5] + return &file_google_protobuf_descriptor_proto_enumTypes[6] } func (x FieldOptions_CType) Number() protoreflect.EnumNumber { @@ -551,11 +615,11 @@ func (x FieldOptions_JSType) String() string { } func (FieldOptions_JSType) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[6].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[7].Descriptor() } func (FieldOptions_JSType) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[6] + return &file_google_protobuf_descriptor_proto_enumTypes[7] } func (x FieldOptions_JSType) Number() protoreflect.EnumNumber { @@ -611,11 +675,11 @@ func (x FieldOptions_OptionRetention) String() string { } func (FieldOptions_OptionRetention) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[7].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[8].Descriptor() } func (FieldOptions_OptionRetention) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[7] + return &file_google_protobuf_descriptor_proto_enumTypes[8] } func (x FieldOptions_OptionRetention) Number() protoreflect.EnumNumber { @@ -694,11 +758,11 @@ func (x FieldOptions_OptionTargetType) String() string { } func (FieldOptions_OptionTargetType) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[8].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[9].Descriptor() } func (FieldOptions_OptionTargetType) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[8] + return &file_google_protobuf_descriptor_proto_enumTypes[9] } func (x FieldOptions_OptionTargetType) Number() protoreflect.EnumNumber { @@ -756,11 +820,11 @@ func (x MethodOptions_IdempotencyLevel) String() string { } func (MethodOptions_IdempotencyLevel) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[9].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[10].Descriptor() } func (MethodOptions_IdempotencyLevel) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[9] + return &file_google_protobuf_descriptor_proto_enumTypes[10] } func (x MethodOptions_IdempotencyLevel) Number() protoreflect.EnumNumber { @@ -818,11 +882,11 @@ func (x FeatureSet_FieldPresence) String() string { } func (FeatureSet_FieldPresence) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[10].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[11].Descriptor() } func (FeatureSet_FieldPresence) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[10] + return &file_google_protobuf_descriptor_proto_enumTypes[11] } func (x FeatureSet_FieldPresence) Number() protoreflect.EnumNumber { @@ -877,11 +941,11 @@ func (x FeatureSet_EnumType) String() string { } func (FeatureSet_EnumType) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[11].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[12].Descriptor() } func (FeatureSet_EnumType) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[11] + return &file_google_protobuf_descriptor_proto_enumTypes[12] } func (x FeatureSet_EnumType) Number() protoreflect.EnumNumber { @@ -936,11 +1000,11 @@ func (x FeatureSet_RepeatedFieldEncoding) String() string { } func (FeatureSet_RepeatedFieldEncoding) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[12].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[13].Descriptor() } func (FeatureSet_RepeatedFieldEncoding) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[12] + return &file_google_protobuf_descriptor_proto_enumTypes[13] } func (x FeatureSet_RepeatedFieldEncoding) Number() protoreflect.EnumNumber { @@ -995,11 +1059,11 @@ func (x FeatureSet_Utf8Validation) String() string { } func (FeatureSet_Utf8Validation) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[13].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[14].Descriptor() } func (FeatureSet_Utf8Validation) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[13] + return &file_google_protobuf_descriptor_proto_enumTypes[14] } func (x FeatureSet_Utf8Validation) Number() protoreflect.EnumNumber { @@ -1054,11 +1118,11 @@ func (x FeatureSet_MessageEncoding) String() string { } func (FeatureSet_MessageEncoding) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[14].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[15].Descriptor() } func (FeatureSet_MessageEncoding) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[14] + return &file_google_protobuf_descriptor_proto_enumTypes[15] } func (x FeatureSet_MessageEncoding) Number() protoreflect.EnumNumber { @@ -1113,11 +1177,11 @@ func (x FeatureSet_JsonFormat) String() string { } func (FeatureSet_JsonFormat) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[15].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[16].Descriptor() } func (FeatureSet_JsonFormat) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[15] + return &file_google_protobuf_descriptor_proto_enumTypes[16] } func (x FeatureSet_JsonFormat) Number() protoreflect.EnumNumber { @@ -1172,11 +1236,11 @@ func (x FeatureSet_EnforceNamingStyle) String() string { } func (FeatureSet_EnforceNamingStyle) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[16].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[17].Descriptor() } func (FeatureSet_EnforceNamingStyle) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[16] + return &file_google_protobuf_descriptor_proto_enumTypes[17] } func (x FeatureSet_EnforceNamingStyle) Number() protoreflect.EnumNumber { @@ -1198,6 +1262,77 @@ func (FeatureSet_EnforceNamingStyle) EnumDescriptor() ([]byte, []int) { return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 6} } +type FeatureSet_VisibilityFeature_DefaultSymbolVisibility int32 + +const ( + FeatureSet_VisibilityFeature_DEFAULT_SYMBOL_VISIBILITY_UNKNOWN FeatureSet_VisibilityFeature_DefaultSymbolVisibility = 0 + // Default pre-EDITION_2024, all UNSET visibility are export. + FeatureSet_VisibilityFeature_EXPORT_ALL FeatureSet_VisibilityFeature_DefaultSymbolVisibility = 1 + // All top-level symbols default to export, nested default to local. + FeatureSet_VisibilityFeature_EXPORT_TOP_LEVEL FeatureSet_VisibilityFeature_DefaultSymbolVisibility = 2 + // All symbols default to local. + FeatureSet_VisibilityFeature_LOCAL_ALL FeatureSet_VisibilityFeature_DefaultSymbolVisibility = 3 + // All symbols local by default. Nested types cannot be exported. + // With special case caveat for message { enum {} reserved 1 to max; } + // This is the recommended setting for new protos. + FeatureSet_VisibilityFeature_STRICT FeatureSet_VisibilityFeature_DefaultSymbolVisibility = 4 +) + +// Enum value maps for FeatureSet_VisibilityFeature_DefaultSymbolVisibility. +var ( + FeatureSet_VisibilityFeature_DefaultSymbolVisibility_name = map[int32]string{ + 0: "DEFAULT_SYMBOL_VISIBILITY_UNKNOWN", + 1: "EXPORT_ALL", + 2: "EXPORT_TOP_LEVEL", + 3: "LOCAL_ALL", + 4: "STRICT", + } + FeatureSet_VisibilityFeature_DefaultSymbolVisibility_value = map[string]int32{ + "DEFAULT_SYMBOL_VISIBILITY_UNKNOWN": 0, + "EXPORT_ALL": 1, + "EXPORT_TOP_LEVEL": 2, + "LOCAL_ALL": 3, + "STRICT": 4, + } +) + +func (x FeatureSet_VisibilityFeature_DefaultSymbolVisibility) Enum() *FeatureSet_VisibilityFeature_DefaultSymbolVisibility { + p := new(FeatureSet_VisibilityFeature_DefaultSymbolVisibility) + *p = x + return p +} + +func (x FeatureSet_VisibilityFeature_DefaultSymbolVisibility) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FeatureSet_VisibilityFeature_DefaultSymbolVisibility) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[18].Descriptor() +} + +func (FeatureSet_VisibilityFeature_DefaultSymbolVisibility) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[18] +} + +func (x FeatureSet_VisibilityFeature_DefaultSymbolVisibility) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FeatureSet_VisibilityFeature_DefaultSymbolVisibility) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FeatureSet_VisibilityFeature_DefaultSymbolVisibility(num) + return nil +} + +// Deprecated: Use FeatureSet_VisibilityFeature_DefaultSymbolVisibility.Descriptor instead. +func (FeatureSet_VisibilityFeature_DefaultSymbolVisibility) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 0, 0} +} + // Represents the identified object's effect on the element in the original // .proto file. type GeneratedCodeInfo_Annotation_Semantic int32 @@ -1236,11 +1371,11 @@ func (x GeneratedCodeInfo_Annotation_Semantic) String() string { } func (GeneratedCodeInfo_Annotation_Semantic) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[17].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[19].Descriptor() } func (GeneratedCodeInfo_Annotation_Semantic) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[17] + return &file_google_protobuf_descriptor_proto_enumTypes[19] } func (x GeneratedCodeInfo_Annotation_Semantic) Number() protoreflect.EnumNumber { @@ -1321,6 +1456,9 @@ type FileDescriptorProto struct { // Indexes of the weak imported files in the dependency list. // For Google-internal migration only. Do not use. WeakDependency []int32 `protobuf:"varint,11,rep,name=weak_dependency,json=weakDependency" json:"weak_dependency,omitempty"` + // Names of files imported by this file purely for the purpose of providing + // option extensions. These are excluded from the dependency list above. + OptionDependency []string `protobuf:"bytes,15,rep,name=option_dependency,json=optionDependency" json:"option_dependency,omitempty"` // All top-level definitions in this file. MessageType []*DescriptorProto `protobuf:"bytes,4,rep,name=message_type,json=messageType" json:"message_type,omitempty"` EnumType []*EnumDescriptorProto `protobuf:"bytes,5,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"` @@ -1414,6 +1552,13 @@ func (x *FileDescriptorProto) GetWeakDependency() []int32 { return nil } +func (x *FileDescriptorProto) GetOptionDependency() []string { + if x != nil { + return x.OptionDependency + } + return nil +} + func (x *FileDescriptorProto) GetMessageType() []*DescriptorProto { if x != nil { return x.MessageType @@ -1484,7 +1629,9 @@ type DescriptorProto struct { ReservedRange []*DescriptorProto_ReservedRange `protobuf:"bytes,9,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` // Reserved field names, which may not be used by fields in the same message. // A given name may only be reserved once. - ReservedName []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` + ReservedName []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` + // Support for `export` and `local` keywords on enums. + Visibility *SymbolVisibility `protobuf:"varint,11,opt,name=visibility,enum=google.protobuf.SymbolVisibility" json:"visibility,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } @@ -1589,6 +1736,13 @@ func (x *DescriptorProto) GetReservedName() []string { return nil } +func (x *DescriptorProto) GetVisibility() SymbolVisibility { + if x != nil && x.Visibility != nil { + return *x.Visibility + } + return SymbolVisibility_VISIBILITY_UNSET +} + type ExtensionRangeOptions struct { state protoimpl.MessageState `protogen:"open.v1"` // The parser stores options it doesn't recognize here. See above. @@ -1901,7 +2055,9 @@ type EnumDescriptorProto struct { ReservedRange []*EnumDescriptorProto_EnumReservedRange `protobuf:"bytes,4,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` // Reserved enum value names, which may not be reused. A given name may only // be reserved once. - ReservedName []string `protobuf:"bytes,5,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` + ReservedName []string `protobuf:"bytes,5,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` + // Support for `export` and `local` keywords on enums. + Visibility *SymbolVisibility `protobuf:"varint,6,opt,name=visibility,enum=google.protobuf.SymbolVisibility" json:"visibility,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } @@ -1971,6 +2127,13 @@ func (x *EnumDescriptorProto) GetReservedName() []string { return nil } +func (x *EnumDescriptorProto) GetVisibility() SymbolVisibility { + if x != nil && x.Visibility != nil { + return *x.Visibility + } + return SymbolVisibility_VISIBILITY_UNSET +} + // Describes a value within an enum. type EnumValueDescriptorProto struct { state protoimpl.MessageState `protogen:"open.v1"` @@ -2710,7 +2873,10 @@ type FieldOptions struct { // for accessors, or it will be completely ignored; in the very least, this // is a formalization for deprecating fields. Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // DEPRECATED. DO NOT USE! // For Google-internal migration only. Do not use. + // + // Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"` // Indicate that the field value should not be printed out when using debug // formats, e.g. when the field contains sensitive credentials. @@ -2814,6 +2980,7 @@ func (x *FieldOptions) GetDeprecated() bool { return Default_FieldOptions_Deprecated } +// Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. func (x *FieldOptions) GetWeak() bool { if x != nil && x.Weak != nil { return *x.Weak @@ -3392,17 +3559,18 @@ func (x *UninterpretedOption) GetAggregateValue() string { // be designed and implemented to handle this, hopefully before we ever hit a // conflict here. type FeatureSet struct { - state protoimpl.MessageState `protogen:"open.v1"` - FieldPresence *FeatureSet_FieldPresence `protobuf:"varint,1,opt,name=field_presence,json=fieldPresence,enum=google.protobuf.FeatureSet_FieldPresence" json:"field_presence,omitempty"` - EnumType *FeatureSet_EnumType `protobuf:"varint,2,opt,name=enum_type,json=enumType,enum=google.protobuf.FeatureSet_EnumType" json:"enum_type,omitempty"` - RepeatedFieldEncoding *FeatureSet_RepeatedFieldEncoding `protobuf:"varint,3,opt,name=repeated_field_encoding,json=repeatedFieldEncoding,enum=google.protobuf.FeatureSet_RepeatedFieldEncoding" json:"repeated_field_encoding,omitempty"` - Utf8Validation *FeatureSet_Utf8Validation `protobuf:"varint,4,opt,name=utf8_validation,json=utf8Validation,enum=google.protobuf.FeatureSet_Utf8Validation" json:"utf8_validation,omitempty"` - MessageEncoding *FeatureSet_MessageEncoding `protobuf:"varint,5,opt,name=message_encoding,json=messageEncoding,enum=google.protobuf.FeatureSet_MessageEncoding" json:"message_encoding,omitempty"` - JsonFormat *FeatureSet_JsonFormat `protobuf:"varint,6,opt,name=json_format,json=jsonFormat,enum=google.protobuf.FeatureSet_JsonFormat" json:"json_format,omitempty"` - EnforceNamingStyle *FeatureSet_EnforceNamingStyle `protobuf:"varint,7,opt,name=enforce_naming_style,json=enforceNamingStyle,enum=google.protobuf.FeatureSet_EnforceNamingStyle" json:"enforce_naming_style,omitempty"` - extensionFields protoimpl.ExtensionFields - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + FieldPresence *FeatureSet_FieldPresence `protobuf:"varint,1,opt,name=field_presence,json=fieldPresence,enum=google.protobuf.FeatureSet_FieldPresence" json:"field_presence,omitempty"` + EnumType *FeatureSet_EnumType `protobuf:"varint,2,opt,name=enum_type,json=enumType,enum=google.protobuf.FeatureSet_EnumType" json:"enum_type,omitempty"` + RepeatedFieldEncoding *FeatureSet_RepeatedFieldEncoding `protobuf:"varint,3,opt,name=repeated_field_encoding,json=repeatedFieldEncoding,enum=google.protobuf.FeatureSet_RepeatedFieldEncoding" json:"repeated_field_encoding,omitempty"` + Utf8Validation *FeatureSet_Utf8Validation `protobuf:"varint,4,opt,name=utf8_validation,json=utf8Validation,enum=google.protobuf.FeatureSet_Utf8Validation" json:"utf8_validation,omitempty"` + MessageEncoding *FeatureSet_MessageEncoding `protobuf:"varint,5,opt,name=message_encoding,json=messageEncoding,enum=google.protobuf.FeatureSet_MessageEncoding" json:"message_encoding,omitempty"` + JsonFormat *FeatureSet_JsonFormat `protobuf:"varint,6,opt,name=json_format,json=jsonFormat,enum=google.protobuf.FeatureSet_JsonFormat" json:"json_format,omitempty"` + EnforceNamingStyle *FeatureSet_EnforceNamingStyle `protobuf:"varint,7,opt,name=enforce_naming_style,json=enforceNamingStyle,enum=google.protobuf.FeatureSet_EnforceNamingStyle" json:"enforce_naming_style,omitempty"` + DefaultSymbolVisibility *FeatureSet_VisibilityFeature_DefaultSymbolVisibility `protobuf:"varint,8,opt,name=default_symbol_visibility,json=defaultSymbolVisibility,enum=google.protobuf.FeatureSet_VisibilityFeature_DefaultSymbolVisibility" json:"default_symbol_visibility,omitempty"` + extensionFields protoimpl.ExtensionFields + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *FeatureSet) Reset() { @@ -3484,6 +3652,13 @@ func (x *FeatureSet) GetEnforceNamingStyle() FeatureSet_EnforceNamingStyle { return FeatureSet_ENFORCE_NAMING_STYLE_UNKNOWN } +func (x *FeatureSet) GetDefaultSymbolVisibility() FeatureSet_VisibilityFeature_DefaultSymbolVisibility { + if x != nil && x.DefaultSymbolVisibility != nil { + return *x.DefaultSymbolVisibility + } + return FeatureSet_VisibilityFeature_DEFAULT_SYMBOL_VISIBILITY_UNKNOWN +} + // A compiled specification for the defaults of a set of features. These // messages are generated from FeatureSet extensions and can be used to seed // feature resolution. The resolution with this object becomes a simple search @@ -4144,6 +4319,42 @@ func (x *UninterpretedOption_NamePart) GetIsExtension() bool { return false } +type FeatureSet_VisibilityFeature struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *FeatureSet_VisibilityFeature) Reset() { + *x = FeatureSet_VisibilityFeature{} + mi := &file_google_protobuf_descriptor_proto_msgTypes[30] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *FeatureSet_VisibilityFeature) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FeatureSet_VisibilityFeature) ProtoMessage() {} + +func (x *FeatureSet_VisibilityFeature) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[30] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FeatureSet_VisibilityFeature.ProtoReflect.Descriptor instead. +func (*FeatureSet_VisibilityFeature) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 0} +} + // A map from every known edition with a unique set of defaults to its // defaults. Not all editions may be contained here. For a given edition, // the defaults at the closest matching edition ordered at or before it should @@ -4161,7 +4372,7 @@ type FeatureSetDefaults_FeatureSetEditionDefault struct { func (x *FeatureSetDefaults_FeatureSetEditionDefault) Reset() { *x = FeatureSetDefaults_FeatureSetEditionDefault{} - mi := &file_google_protobuf_descriptor_proto_msgTypes[30] + mi := &file_google_protobuf_descriptor_proto_msgTypes[31] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4173,7 +4384,7 @@ func (x *FeatureSetDefaults_FeatureSetEditionDefault) String() string { func (*FeatureSetDefaults_FeatureSetEditionDefault) ProtoMessage() {} func (x *FeatureSetDefaults_FeatureSetEditionDefault) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[30] + mi := &file_google_protobuf_descriptor_proto_msgTypes[31] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4309,7 +4520,7 @@ type SourceCodeInfo_Location struct { func (x *SourceCodeInfo_Location) Reset() { *x = SourceCodeInfo_Location{} - mi := &file_google_protobuf_descriptor_proto_msgTypes[31] + mi := &file_google_protobuf_descriptor_proto_msgTypes[32] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4321,7 +4532,7 @@ func (x *SourceCodeInfo_Location) String() string { func (*SourceCodeInfo_Location) ProtoMessage() {} func (x *SourceCodeInfo_Location) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[31] + mi := &file_google_protobuf_descriptor_proto_msgTypes[32] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4393,7 +4604,7 @@ type GeneratedCodeInfo_Annotation struct { func (x *GeneratedCodeInfo_Annotation) Reset() { *x = GeneratedCodeInfo_Annotation{} - mi := &file_google_protobuf_descriptor_proto_msgTypes[32] + mi := &file_google_protobuf_descriptor_proto_msgTypes[33] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4405,7 +4616,7 @@ func (x *GeneratedCodeInfo_Annotation) String() string { func (*GeneratedCodeInfo_Annotation) ProtoMessage() {} func (x *GeneratedCodeInfo_Annotation) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[32] + mi := &file_google_protobuf_descriptor_proto_msgTypes[33] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4462,7 +4673,7 @@ const file_google_protobuf_descriptor_proto_rawDesc = "" + "\n" + " google/protobuf/descriptor.proto\x12\x0fgoogle.protobuf\"[\n" + "\x11FileDescriptorSet\x128\n" + - "\x04file\x18\x01 \x03(\v2$.google.protobuf.FileDescriptorProtoR\x04file*\f\b\x80\xec\xca\xff\x01\x10\x81\xec\xca\xff\x01\"\x98\x05\n" + + "\x04file\x18\x01 \x03(\v2$.google.protobuf.FileDescriptorProtoR\x04file*\f\b\x80\xec\xca\xff\x01\x10\x81\xec\xca\xff\x01\"\xc5\x05\n" + "\x13FileDescriptorProto\x12\x12\n" + "\x04name\x18\x01 \x01(\tR\x04name\x12\x18\n" + "\apackage\x18\x02 \x01(\tR\apackage\x12\x1e\n" + @@ -4471,7 +4682,8 @@ const file_google_protobuf_descriptor_proto_rawDesc = "" + "dependency\x12+\n" + "\x11public_dependency\x18\n" + " \x03(\x05R\x10publicDependency\x12'\n" + - "\x0fweak_dependency\x18\v \x03(\x05R\x0eweakDependency\x12C\n" + + "\x0fweak_dependency\x18\v \x03(\x05R\x0eweakDependency\x12+\n" + + "\x11option_dependency\x18\x0f \x03(\tR\x10optionDependency\x12C\n" + "\fmessage_type\x18\x04 \x03(\v2 .google.protobuf.DescriptorProtoR\vmessageType\x12A\n" + "\tenum_type\x18\x05 \x03(\v2$.google.protobuf.EnumDescriptorProtoR\benumType\x12A\n" + "\aservice\x18\x06 \x03(\v2'.google.protobuf.ServiceDescriptorProtoR\aservice\x12C\n" + @@ -4479,7 +4691,7 @@ const file_google_protobuf_descriptor_proto_rawDesc = "" + "\aoptions\x18\b \x01(\v2\x1c.google.protobuf.FileOptionsR\aoptions\x12I\n" + "\x10source_code_info\x18\t \x01(\v2\x1f.google.protobuf.SourceCodeInfoR\x0esourceCodeInfo\x12\x16\n" + "\x06syntax\x18\f \x01(\tR\x06syntax\x122\n" + - "\aedition\x18\x0e \x01(\x0e2\x18.google.protobuf.EditionR\aedition\"\xb9\x06\n" + + "\aedition\x18\x0e \x01(\x0e2\x18.google.protobuf.EditionR\aedition\"\xfc\x06\n" + "\x0fDescriptorProto\x12\x12\n" + "\x04name\x18\x01 \x01(\tR\x04name\x12;\n" + "\x05field\x18\x02 \x03(\v2%.google.protobuf.FieldDescriptorProtoR\x05field\x12C\n" + @@ -4493,7 +4705,10 @@ const file_google_protobuf_descriptor_proto_rawDesc = "" + "\aoptions\x18\a \x01(\v2\x1f.google.protobuf.MessageOptionsR\aoptions\x12U\n" + "\x0ereserved_range\x18\t \x03(\v2..google.protobuf.DescriptorProto.ReservedRangeR\rreservedRange\x12#\n" + "\rreserved_name\x18\n" + - " \x03(\tR\freservedName\x1az\n" + + " \x03(\tR\freservedName\x12A\n" + + "\n" + + "visibility\x18\v \x01(\x0e2!.google.protobuf.SymbolVisibilityR\n" + + "visibility\x1az\n" + "\x0eExtensionRange\x12\x14\n" + "\x05start\x18\x01 \x01(\x05R\x05start\x12\x10\n" + "\x03end\x18\x02 \x01(\x05R\x03end\x12@\n" + @@ -4562,13 +4777,16 @@ const file_google_protobuf_descriptor_proto_rawDesc = "" + "\x0eLABEL_REQUIRED\x10\x02\"c\n" + "\x14OneofDescriptorProto\x12\x12\n" + "\x04name\x18\x01 \x01(\tR\x04name\x127\n" + - "\aoptions\x18\x02 \x01(\v2\x1d.google.protobuf.OneofOptionsR\aoptions\"\xe3\x02\n" + + "\aoptions\x18\x02 \x01(\v2\x1d.google.protobuf.OneofOptionsR\aoptions\"\xa6\x03\n" + "\x13EnumDescriptorProto\x12\x12\n" + "\x04name\x18\x01 \x01(\tR\x04name\x12?\n" + "\x05value\x18\x02 \x03(\v2).google.protobuf.EnumValueDescriptorProtoR\x05value\x126\n" + "\aoptions\x18\x03 \x01(\v2\x1c.google.protobuf.EnumOptionsR\aoptions\x12]\n" + "\x0ereserved_range\x18\x04 \x03(\v26.google.protobuf.EnumDescriptorProto.EnumReservedRangeR\rreservedRange\x12#\n" + - "\rreserved_name\x18\x05 \x03(\tR\freservedName\x1a;\n" + + "\rreserved_name\x18\x05 \x03(\tR\freservedName\x12A\n" + + "\n" + + "visibility\x18\x06 \x01(\x0e2!.google.protobuf.SymbolVisibilityR\n" + + "visibility\x1a;\n" + "\x11EnumReservedRange\x12\x14\n" + "\x05start\x18\x01 \x01(\x05R\x05start\x12\x10\n" + "\x03end\x18\x02 \x01(\x05R\x03end\"\x83\x01\n" + @@ -4629,7 +4847,7 @@ const file_google_protobuf_descriptor_proto_rawDesc = "" + "&deprecated_legacy_json_field_conflicts\x18\v \x01(\bB\x02\x18\x01R\"deprecatedLegacyJsonFieldConflicts\x127\n" + "\bfeatures\x18\f \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12X\n" + "\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption*\t\b\xe8\a\x10\x80\x80\x80\x80\x02J\x04\b\x04\x10\x05J\x04\b\x05\x10\x06J\x04\b\x06\x10\aJ\x04\b\b\x10\tJ\x04\b\t\x10\n" + - "\"\x9d\r\n" + + "\"\xa1\r\n" + "\fFieldOptions\x12A\n" + "\x05ctype\x18\x01 \x01(\x0e2#.google.protobuf.FieldOptions.CType:\x06STRINGR\x05ctype\x12\x16\n" + "\x06packed\x18\x02 \x01(\bR\x06packed\x12G\n" + @@ -4638,9 +4856,9 @@ const file_google_protobuf_descriptor_proto_rawDesc = "" + "\x0funverified_lazy\x18\x0f \x01(\b:\x05falseR\x0eunverifiedLazy\x12%\n" + "\n" + "deprecated\x18\x03 \x01(\b:\x05falseR\n" + - "deprecated\x12\x19\n" + + "deprecated\x12\x1d\n" + "\x04weak\x18\n" + - " \x01(\b:\x05falseR\x04weak\x12(\n" + + " \x01(\b:\x05falseB\x02\x18\x01R\x04weak\x12(\n" + "\fdebug_redact\x18\x10 \x01(\b:\x05falseR\vdebugRedact\x12K\n" + "\tretention\x18\x11 \x01(\x0e2-.google.protobuf.FieldOptions.OptionRetentionR\tretention\x12H\n" + "\atargets\x18\x13 \x03(\x0e2..google.protobuf.FieldOptions.OptionTargetTypeR\atargets\x12W\n" + @@ -4728,7 +4946,7 @@ const file_google_protobuf_descriptor_proto_rawDesc = "" + "\x0faggregate_value\x18\b \x01(\tR\x0eaggregateValue\x1aJ\n" + "\bNamePart\x12\x1b\n" + "\tname_part\x18\x01 \x02(\tR\bnamePart\x12!\n" + - "\fis_extension\x18\x02 \x02(\bR\visExtension\"\xae\f\n" + + "\fis_extension\x18\x02 \x02(\bR\visExtension\"\x8e\x0f\n" + "\n" + "FeatureSet\x12\x91\x01\n" + "\x0efield_presence\x18\x01 \x01(\x0e2).google.protobuf.FeatureSet.FieldPresenceB?\x88\x01\x01\x98\x01\x04\x98\x01\x01\xa2\x01\r\x12\bEXPLICIT\x18\x84\a\xa2\x01\r\x12\bIMPLICIT\x18\xe7\a\xa2\x01\r\x12\bEXPLICIT\x18\xe8\a\xb2\x01\x03\b\xe8\aR\rfieldPresence\x12l\n" + @@ -4739,7 +4957,18 @@ const file_google_protobuf_descriptor_proto_rawDesc = "" + "\vjson_format\x18\x06 \x01(\x0e2&.google.protobuf.FeatureSet.JsonFormatB9\x88\x01\x01\x98\x01\x03\x98\x01\x06\x98\x01\x01\xa2\x01\x17\x12\x12LEGACY_BEST_EFFORT\x18\x84\a\xa2\x01\n" + "\x12\x05ALLOW\x18\xe7\a\xb2\x01\x03\b\xe8\aR\n" + "jsonFormat\x12\xab\x01\n" + - "\x14enforce_naming_style\x18\a \x01(\x0e2..google.protobuf.FeatureSet.EnforceNamingStyleBI\x88\x01\x02\x98\x01\x01\x98\x01\x02\x98\x01\x03\x98\x01\x04\x98\x01\x05\x98\x01\x06\x98\x01\a\x98\x01\b\x98\x01\t\xa2\x01\x11\x12\fSTYLE_LEGACY\x18\x84\a\xa2\x01\x0e\x12\tSTYLE2024\x18\xe9\a\xb2\x01\x03\b\xe9\aR\x12enforceNamingStyle\"\\\n" + + "\x14enforce_naming_style\x18\a \x01(\x0e2..google.protobuf.FeatureSet.EnforceNamingStyleBI\x88\x01\x02\x98\x01\x01\x98\x01\x02\x98\x01\x03\x98\x01\x04\x98\x01\x05\x98\x01\x06\x98\x01\a\x98\x01\b\x98\x01\t\xa2\x01\x11\x12\fSTYLE_LEGACY\x18\x84\a\xa2\x01\x0e\x12\tSTYLE2024\x18\xe9\a\xb2\x01\x03\b\xe9\aR\x12enforceNamingStyle\x12\xb9\x01\n" + + "\x19default_symbol_visibility\x18\b \x01(\x0e2E.google.protobuf.FeatureSet.VisibilityFeature.DefaultSymbolVisibilityB6\x88\x01\x02\x98\x01\x01\xa2\x01\x0f\x12\n" + + "EXPORT_ALL\x18\x84\a\xa2\x01\x15\x12\x10EXPORT_TOP_LEVEL\x18\xe9\a\xb2\x01\x03\b\xe9\aR\x17defaultSymbolVisibility\x1a\xa1\x01\n" + + "\x11VisibilityFeature\"\x81\x01\n" + + "\x17DefaultSymbolVisibility\x12%\n" + + "!DEFAULT_SYMBOL_VISIBILITY_UNKNOWN\x10\x00\x12\x0e\n" + + "\n" + + "EXPORT_ALL\x10\x01\x12\x14\n" + + "\x10EXPORT_TOP_LEVEL\x10\x02\x12\r\n" + + "\tLOCAL_ALL\x10\x03\x12\n" + + "\n" + + "\x06STRICT\x10\x04J\b\b\x01\x10\x80\x80\x80\x80\x02\"\\\n" + "\rFieldPresence\x12\x1a\n" + "\x16FIELD_PRESENCE_UNKNOWN\x10\x00\x12\f\n" + "\bEXPLICIT\x10\x01\x12\f\n" + @@ -4817,7 +5046,11 @@ const file_google_protobuf_descriptor_proto_rawDesc = "" + "\x17EDITION_99997_TEST_ONLY\x10\x9d\x8d\x06\x12\x1d\n" + "\x17EDITION_99998_TEST_ONLY\x10\x9e\x8d\x06\x12\x1d\n" + "\x17EDITION_99999_TEST_ONLY\x10\x9f\x8d\x06\x12\x13\n" + - "\vEDITION_MAX\x10\xff\xff\xff\xff\aB~\n" + + "\vEDITION_MAX\x10\xff\xff\xff\xff\a*U\n" + + "\x10SymbolVisibility\x12\x14\n" + + "\x10VISIBILITY_UNSET\x10\x00\x12\x14\n" + + "\x10VISIBILITY_LOCAL\x10\x01\x12\x15\n" + + "\x11VISIBILITY_EXPORT\x10\x02B~\n" + "\x13com.google.protobufB\x10DescriptorProtosH\x01Z-google.golang.org/protobuf/types/descriptorpb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1aGoogle.Protobuf.Reflection" var ( @@ -4832,145 +5065,151 @@ func file_google_protobuf_descriptor_proto_rawDescGZIP() []byte { return file_google_protobuf_descriptor_proto_rawDescData } -var file_google_protobuf_descriptor_proto_enumTypes = make([]protoimpl.EnumInfo, 18) -var file_google_protobuf_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 33) +var file_google_protobuf_descriptor_proto_enumTypes = make([]protoimpl.EnumInfo, 20) +var file_google_protobuf_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 34) var file_google_protobuf_descriptor_proto_goTypes = []any{ - (Edition)(0), // 0: google.protobuf.Edition - (ExtensionRangeOptions_VerificationState)(0), // 1: google.protobuf.ExtensionRangeOptions.VerificationState - (FieldDescriptorProto_Type)(0), // 2: google.protobuf.FieldDescriptorProto.Type - (FieldDescriptorProto_Label)(0), // 3: google.protobuf.FieldDescriptorProto.Label - (FileOptions_OptimizeMode)(0), // 4: google.protobuf.FileOptions.OptimizeMode - (FieldOptions_CType)(0), // 5: google.protobuf.FieldOptions.CType - (FieldOptions_JSType)(0), // 6: google.protobuf.FieldOptions.JSType - (FieldOptions_OptionRetention)(0), // 7: google.protobuf.FieldOptions.OptionRetention - (FieldOptions_OptionTargetType)(0), // 8: google.protobuf.FieldOptions.OptionTargetType - (MethodOptions_IdempotencyLevel)(0), // 9: google.protobuf.MethodOptions.IdempotencyLevel - (FeatureSet_FieldPresence)(0), // 10: google.protobuf.FeatureSet.FieldPresence - (FeatureSet_EnumType)(0), // 11: google.protobuf.FeatureSet.EnumType - (FeatureSet_RepeatedFieldEncoding)(0), // 12: google.protobuf.FeatureSet.RepeatedFieldEncoding - (FeatureSet_Utf8Validation)(0), // 13: google.protobuf.FeatureSet.Utf8Validation - (FeatureSet_MessageEncoding)(0), // 14: google.protobuf.FeatureSet.MessageEncoding - (FeatureSet_JsonFormat)(0), // 15: google.protobuf.FeatureSet.JsonFormat - (FeatureSet_EnforceNamingStyle)(0), // 16: google.protobuf.FeatureSet.EnforceNamingStyle - (GeneratedCodeInfo_Annotation_Semantic)(0), // 17: google.protobuf.GeneratedCodeInfo.Annotation.Semantic - (*FileDescriptorSet)(nil), // 18: google.protobuf.FileDescriptorSet - (*FileDescriptorProto)(nil), // 19: google.protobuf.FileDescriptorProto - (*DescriptorProto)(nil), // 20: google.protobuf.DescriptorProto - (*ExtensionRangeOptions)(nil), // 21: google.protobuf.ExtensionRangeOptions - (*FieldDescriptorProto)(nil), // 22: google.protobuf.FieldDescriptorProto - (*OneofDescriptorProto)(nil), // 23: google.protobuf.OneofDescriptorProto - (*EnumDescriptorProto)(nil), // 24: google.protobuf.EnumDescriptorProto - (*EnumValueDescriptorProto)(nil), // 25: google.protobuf.EnumValueDescriptorProto - (*ServiceDescriptorProto)(nil), // 26: google.protobuf.ServiceDescriptorProto - (*MethodDescriptorProto)(nil), // 27: google.protobuf.MethodDescriptorProto - (*FileOptions)(nil), // 28: google.protobuf.FileOptions - (*MessageOptions)(nil), // 29: google.protobuf.MessageOptions - (*FieldOptions)(nil), // 30: google.protobuf.FieldOptions - (*OneofOptions)(nil), // 31: google.protobuf.OneofOptions - (*EnumOptions)(nil), // 32: google.protobuf.EnumOptions - (*EnumValueOptions)(nil), // 33: google.protobuf.EnumValueOptions - (*ServiceOptions)(nil), // 34: google.protobuf.ServiceOptions - (*MethodOptions)(nil), // 35: google.protobuf.MethodOptions - (*UninterpretedOption)(nil), // 36: google.protobuf.UninterpretedOption - (*FeatureSet)(nil), // 37: google.protobuf.FeatureSet - (*FeatureSetDefaults)(nil), // 38: google.protobuf.FeatureSetDefaults - (*SourceCodeInfo)(nil), // 39: google.protobuf.SourceCodeInfo - (*GeneratedCodeInfo)(nil), // 40: google.protobuf.GeneratedCodeInfo - (*DescriptorProto_ExtensionRange)(nil), // 41: google.protobuf.DescriptorProto.ExtensionRange - (*DescriptorProto_ReservedRange)(nil), // 42: google.protobuf.DescriptorProto.ReservedRange - (*ExtensionRangeOptions_Declaration)(nil), // 43: google.protobuf.ExtensionRangeOptions.Declaration - (*EnumDescriptorProto_EnumReservedRange)(nil), // 44: google.protobuf.EnumDescriptorProto.EnumReservedRange - (*FieldOptions_EditionDefault)(nil), // 45: google.protobuf.FieldOptions.EditionDefault - (*FieldOptions_FeatureSupport)(nil), // 46: google.protobuf.FieldOptions.FeatureSupport - (*UninterpretedOption_NamePart)(nil), // 47: google.protobuf.UninterpretedOption.NamePart - (*FeatureSetDefaults_FeatureSetEditionDefault)(nil), // 48: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault - (*SourceCodeInfo_Location)(nil), // 49: google.protobuf.SourceCodeInfo.Location - (*GeneratedCodeInfo_Annotation)(nil), // 50: google.protobuf.GeneratedCodeInfo.Annotation + (Edition)(0), // 0: google.protobuf.Edition + (SymbolVisibility)(0), // 1: google.protobuf.SymbolVisibility + (ExtensionRangeOptions_VerificationState)(0), // 2: google.protobuf.ExtensionRangeOptions.VerificationState + (FieldDescriptorProto_Type)(0), // 3: google.protobuf.FieldDescriptorProto.Type + (FieldDescriptorProto_Label)(0), // 4: google.protobuf.FieldDescriptorProto.Label + (FileOptions_OptimizeMode)(0), // 5: google.protobuf.FileOptions.OptimizeMode + (FieldOptions_CType)(0), // 6: google.protobuf.FieldOptions.CType + (FieldOptions_JSType)(0), // 7: google.protobuf.FieldOptions.JSType + (FieldOptions_OptionRetention)(0), // 8: google.protobuf.FieldOptions.OptionRetention + (FieldOptions_OptionTargetType)(0), // 9: google.protobuf.FieldOptions.OptionTargetType + (MethodOptions_IdempotencyLevel)(0), // 10: google.protobuf.MethodOptions.IdempotencyLevel + (FeatureSet_FieldPresence)(0), // 11: google.protobuf.FeatureSet.FieldPresence + (FeatureSet_EnumType)(0), // 12: google.protobuf.FeatureSet.EnumType + (FeatureSet_RepeatedFieldEncoding)(0), // 13: google.protobuf.FeatureSet.RepeatedFieldEncoding + (FeatureSet_Utf8Validation)(0), // 14: google.protobuf.FeatureSet.Utf8Validation + (FeatureSet_MessageEncoding)(0), // 15: google.protobuf.FeatureSet.MessageEncoding + (FeatureSet_JsonFormat)(0), // 16: google.protobuf.FeatureSet.JsonFormat + (FeatureSet_EnforceNamingStyle)(0), // 17: google.protobuf.FeatureSet.EnforceNamingStyle + (FeatureSet_VisibilityFeature_DefaultSymbolVisibility)(0), // 18: google.protobuf.FeatureSet.VisibilityFeature.DefaultSymbolVisibility + (GeneratedCodeInfo_Annotation_Semantic)(0), // 19: google.protobuf.GeneratedCodeInfo.Annotation.Semantic + (*FileDescriptorSet)(nil), // 20: google.protobuf.FileDescriptorSet + (*FileDescriptorProto)(nil), // 21: google.protobuf.FileDescriptorProto + (*DescriptorProto)(nil), // 22: google.protobuf.DescriptorProto + (*ExtensionRangeOptions)(nil), // 23: google.protobuf.ExtensionRangeOptions + (*FieldDescriptorProto)(nil), // 24: google.protobuf.FieldDescriptorProto + (*OneofDescriptorProto)(nil), // 25: google.protobuf.OneofDescriptorProto + (*EnumDescriptorProto)(nil), // 26: google.protobuf.EnumDescriptorProto + (*EnumValueDescriptorProto)(nil), // 27: google.protobuf.EnumValueDescriptorProto + (*ServiceDescriptorProto)(nil), // 28: google.protobuf.ServiceDescriptorProto + (*MethodDescriptorProto)(nil), // 29: google.protobuf.MethodDescriptorProto + (*FileOptions)(nil), // 30: google.protobuf.FileOptions + (*MessageOptions)(nil), // 31: google.protobuf.MessageOptions + (*FieldOptions)(nil), // 32: google.protobuf.FieldOptions + (*OneofOptions)(nil), // 33: google.protobuf.OneofOptions + (*EnumOptions)(nil), // 34: google.protobuf.EnumOptions + (*EnumValueOptions)(nil), // 35: google.protobuf.EnumValueOptions + (*ServiceOptions)(nil), // 36: google.protobuf.ServiceOptions + (*MethodOptions)(nil), // 37: google.protobuf.MethodOptions + (*UninterpretedOption)(nil), // 38: google.protobuf.UninterpretedOption + (*FeatureSet)(nil), // 39: google.protobuf.FeatureSet + (*FeatureSetDefaults)(nil), // 40: google.protobuf.FeatureSetDefaults + (*SourceCodeInfo)(nil), // 41: google.protobuf.SourceCodeInfo + (*GeneratedCodeInfo)(nil), // 42: google.protobuf.GeneratedCodeInfo + (*DescriptorProto_ExtensionRange)(nil), // 43: google.protobuf.DescriptorProto.ExtensionRange + (*DescriptorProto_ReservedRange)(nil), // 44: google.protobuf.DescriptorProto.ReservedRange + (*ExtensionRangeOptions_Declaration)(nil), // 45: google.protobuf.ExtensionRangeOptions.Declaration + (*EnumDescriptorProto_EnumReservedRange)(nil), // 46: google.protobuf.EnumDescriptorProto.EnumReservedRange + (*FieldOptions_EditionDefault)(nil), // 47: google.protobuf.FieldOptions.EditionDefault + (*FieldOptions_FeatureSupport)(nil), // 48: google.protobuf.FieldOptions.FeatureSupport + (*UninterpretedOption_NamePart)(nil), // 49: google.protobuf.UninterpretedOption.NamePart + (*FeatureSet_VisibilityFeature)(nil), // 50: google.protobuf.FeatureSet.VisibilityFeature + (*FeatureSetDefaults_FeatureSetEditionDefault)(nil), // 51: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault + (*SourceCodeInfo_Location)(nil), // 52: google.protobuf.SourceCodeInfo.Location + (*GeneratedCodeInfo_Annotation)(nil), // 53: google.protobuf.GeneratedCodeInfo.Annotation } var file_google_protobuf_descriptor_proto_depIdxs = []int32{ - 19, // 0: google.protobuf.FileDescriptorSet.file:type_name -> google.protobuf.FileDescriptorProto - 20, // 1: google.protobuf.FileDescriptorProto.message_type:type_name -> google.protobuf.DescriptorProto - 24, // 2: google.protobuf.FileDescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto - 26, // 3: google.protobuf.FileDescriptorProto.service:type_name -> google.protobuf.ServiceDescriptorProto - 22, // 4: google.protobuf.FileDescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto - 28, // 5: google.protobuf.FileDescriptorProto.options:type_name -> google.protobuf.FileOptions - 39, // 6: google.protobuf.FileDescriptorProto.source_code_info:type_name -> google.protobuf.SourceCodeInfo + 21, // 0: google.protobuf.FileDescriptorSet.file:type_name -> google.protobuf.FileDescriptorProto + 22, // 1: google.protobuf.FileDescriptorProto.message_type:type_name -> google.protobuf.DescriptorProto + 26, // 2: google.protobuf.FileDescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto + 28, // 3: google.protobuf.FileDescriptorProto.service:type_name -> google.protobuf.ServiceDescriptorProto + 24, // 4: google.protobuf.FileDescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto + 30, // 5: google.protobuf.FileDescriptorProto.options:type_name -> google.protobuf.FileOptions + 41, // 6: google.protobuf.FileDescriptorProto.source_code_info:type_name -> google.protobuf.SourceCodeInfo 0, // 7: google.protobuf.FileDescriptorProto.edition:type_name -> google.protobuf.Edition - 22, // 8: google.protobuf.DescriptorProto.field:type_name -> google.protobuf.FieldDescriptorProto - 22, // 9: google.protobuf.DescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto - 20, // 10: google.protobuf.DescriptorProto.nested_type:type_name -> google.protobuf.DescriptorProto - 24, // 11: google.protobuf.DescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto - 41, // 12: google.protobuf.DescriptorProto.extension_range:type_name -> google.protobuf.DescriptorProto.ExtensionRange - 23, // 13: google.protobuf.DescriptorProto.oneof_decl:type_name -> google.protobuf.OneofDescriptorProto - 29, // 14: google.protobuf.DescriptorProto.options:type_name -> google.protobuf.MessageOptions - 42, // 15: google.protobuf.DescriptorProto.reserved_range:type_name -> google.protobuf.DescriptorProto.ReservedRange - 36, // 16: google.protobuf.ExtensionRangeOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 43, // 17: google.protobuf.ExtensionRangeOptions.declaration:type_name -> google.protobuf.ExtensionRangeOptions.Declaration - 37, // 18: google.protobuf.ExtensionRangeOptions.features:type_name -> google.protobuf.FeatureSet - 1, // 19: google.protobuf.ExtensionRangeOptions.verification:type_name -> google.protobuf.ExtensionRangeOptions.VerificationState - 3, // 20: google.protobuf.FieldDescriptorProto.label:type_name -> google.protobuf.FieldDescriptorProto.Label - 2, // 21: google.protobuf.FieldDescriptorProto.type:type_name -> google.protobuf.FieldDescriptorProto.Type - 30, // 22: google.protobuf.FieldDescriptorProto.options:type_name -> google.protobuf.FieldOptions - 31, // 23: google.protobuf.OneofDescriptorProto.options:type_name -> google.protobuf.OneofOptions - 25, // 24: google.protobuf.EnumDescriptorProto.value:type_name -> google.protobuf.EnumValueDescriptorProto - 32, // 25: google.protobuf.EnumDescriptorProto.options:type_name -> google.protobuf.EnumOptions - 44, // 26: google.protobuf.EnumDescriptorProto.reserved_range:type_name -> google.protobuf.EnumDescriptorProto.EnumReservedRange - 33, // 27: google.protobuf.EnumValueDescriptorProto.options:type_name -> google.protobuf.EnumValueOptions - 27, // 28: google.protobuf.ServiceDescriptorProto.method:type_name -> google.protobuf.MethodDescriptorProto - 34, // 29: google.protobuf.ServiceDescriptorProto.options:type_name -> google.protobuf.ServiceOptions - 35, // 30: google.protobuf.MethodDescriptorProto.options:type_name -> google.protobuf.MethodOptions - 4, // 31: google.protobuf.FileOptions.optimize_for:type_name -> google.protobuf.FileOptions.OptimizeMode - 37, // 32: google.protobuf.FileOptions.features:type_name -> google.protobuf.FeatureSet - 36, // 33: google.protobuf.FileOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 37, // 34: google.protobuf.MessageOptions.features:type_name -> google.protobuf.FeatureSet - 36, // 35: google.protobuf.MessageOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 5, // 36: google.protobuf.FieldOptions.ctype:type_name -> google.protobuf.FieldOptions.CType - 6, // 37: google.protobuf.FieldOptions.jstype:type_name -> google.protobuf.FieldOptions.JSType - 7, // 38: google.protobuf.FieldOptions.retention:type_name -> google.protobuf.FieldOptions.OptionRetention - 8, // 39: google.protobuf.FieldOptions.targets:type_name -> google.protobuf.FieldOptions.OptionTargetType - 45, // 40: google.protobuf.FieldOptions.edition_defaults:type_name -> google.protobuf.FieldOptions.EditionDefault - 37, // 41: google.protobuf.FieldOptions.features:type_name -> google.protobuf.FeatureSet - 46, // 42: google.protobuf.FieldOptions.feature_support:type_name -> google.protobuf.FieldOptions.FeatureSupport - 36, // 43: google.protobuf.FieldOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 37, // 44: google.protobuf.OneofOptions.features:type_name -> google.protobuf.FeatureSet - 36, // 45: google.protobuf.OneofOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 37, // 46: google.protobuf.EnumOptions.features:type_name -> google.protobuf.FeatureSet - 36, // 47: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 37, // 48: google.protobuf.EnumValueOptions.features:type_name -> google.protobuf.FeatureSet - 46, // 49: google.protobuf.EnumValueOptions.feature_support:type_name -> google.protobuf.FieldOptions.FeatureSupport - 36, // 50: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 37, // 51: google.protobuf.ServiceOptions.features:type_name -> google.protobuf.FeatureSet - 36, // 52: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 9, // 53: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel - 37, // 54: google.protobuf.MethodOptions.features:type_name -> google.protobuf.FeatureSet - 36, // 55: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 47, // 56: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart - 10, // 57: google.protobuf.FeatureSet.field_presence:type_name -> google.protobuf.FeatureSet.FieldPresence - 11, // 58: google.protobuf.FeatureSet.enum_type:type_name -> google.protobuf.FeatureSet.EnumType - 12, // 59: google.protobuf.FeatureSet.repeated_field_encoding:type_name -> google.protobuf.FeatureSet.RepeatedFieldEncoding - 13, // 60: google.protobuf.FeatureSet.utf8_validation:type_name -> google.protobuf.FeatureSet.Utf8Validation - 14, // 61: google.protobuf.FeatureSet.message_encoding:type_name -> google.protobuf.FeatureSet.MessageEncoding - 15, // 62: google.protobuf.FeatureSet.json_format:type_name -> google.protobuf.FeatureSet.JsonFormat - 16, // 63: google.protobuf.FeatureSet.enforce_naming_style:type_name -> google.protobuf.FeatureSet.EnforceNamingStyle - 48, // 64: google.protobuf.FeatureSetDefaults.defaults:type_name -> google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault - 0, // 65: google.protobuf.FeatureSetDefaults.minimum_edition:type_name -> google.protobuf.Edition - 0, // 66: google.protobuf.FeatureSetDefaults.maximum_edition:type_name -> google.protobuf.Edition - 49, // 67: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location - 50, // 68: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation - 21, // 69: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions - 0, // 70: google.protobuf.FieldOptions.EditionDefault.edition:type_name -> google.protobuf.Edition - 0, // 71: google.protobuf.FieldOptions.FeatureSupport.edition_introduced:type_name -> google.protobuf.Edition - 0, // 72: google.protobuf.FieldOptions.FeatureSupport.edition_deprecated:type_name -> google.protobuf.Edition - 0, // 73: google.protobuf.FieldOptions.FeatureSupport.edition_removed:type_name -> google.protobuf.Edition - 0, // 74: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition:type_name -> google.protobuf.Edition - 37, // 75: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.overridable_features:type_name -> google.protobuf.FeatureSet - 37, // 76: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.fixed_features:type_name -> google.protobuf.FeatureSet - 17, // 77: google.protobuf.GeneratedCodeInfo.Annotation.semantic:type_name -> google.protobuf.GeneratedCodeInfo.Annotation.Semantic - 78, // [78:78] is the sub-list for method output_type - 78, // [78:78] is the sub-list for method input_type - 78, // [78:78] is the sub-list for extension type_name - 78, // [78:78] is the sub-list for extension extendee - 0, // [0:78] is the sub-list for field type_name + 24, // 8: google.protobuf.DescriptorProto.field:type_name -> google.protobuf.FieldDescriptorProto + 24, // 9: google.protobuf.DescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto + 22, // 10: google.protobuf.DescriptorProto.nested_type:type_name -> google.protobuf.DescriptorProto + 26, // 11: google.protobuf.DescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto + 43, // 12: google.protobuf.DescriptorProto.extension_range:type_name -> google.protobuf.DescriptorProto.ExtensionRange + 25, // 13: google.protobuf.DescriptorProto.oneof_decl:type_name -> google.protobuf.OneofDescriptorProto + 31, // 14: google.protobuf.DescriptorProto.options:type_name -> google.protobuf.MessageOptions + 44, // 15: google.protobuf.DescriptorProto.reserved_range:type_name -> google.protobuf.DescriptorProto.ReservedRange + 1, // 16: google.protobuf.DescriptorProto.visibility:type_name -> google.protobuf.SymbolVisibility + 38, // 17: google.protobuf.ExtensionRangeOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 45, // 18: google.protobuf.ExtensionRangeOptions.declaration:type_name -> google.protobuf.ExtensionRangeOptions.Declaration + 39, // 19: google.protobuf.ExtensionRangeOptions.features:type_name -> google.protobuf.FeatureSet + 2, // 20: google.protobuf.ExtensionRangeOptions.verification:type_name -> google.protobuf.ExtensionRangeOptions.VerificationState + 4, // 21: google.protobuf.FieldDescriptorProto.label:type_name -> google.protobuf.FieldDescriptorProto.Label + 3, // 22: google.protobuf.FieldDescriptorProto.type:type_name -> google.protobuf.FieldDescriptorProto.Type + 32, // 23: google.protobuf.FieldDescriptorProto.options:type_name -> google.protobuf.FieldOptions + 33, // 24: google.protobuf.OneofDescriptorProto.options:type_name -> google.protobuf.OneofOptions + 27, // 25: google.protobuf.EnumDescriptorProto.value:type_name -> google.protobuf.EnumValueDescriptorProto + 34, // 26: google.protobuf.EnumDescriptorProto.options:type_name -> google.protobuf.EnumOptions + 46, // 27: google.protobuf.EnumDescriptorProto.reserved_range:type_name -> google.protobuf.EnumDescriptorProto.EnumReservedRange + 1, // 28: google.protobuf.EnumDescriptorProto.visibility:type_name -> google.protobuf.SymbolVisibility + 35, // 29: google.protobuf.EnumValueDescriptorProto.options:type_name -> google.protobuf.EnumValueOptions + 29, // 30: google.protobuf.ServiceDescriptorProto.method:type_name -> google.protobuf.MethodDescriptorProto + 36, // 31: google.protobuf.ServiceDescriptorProto.options:type_name -> google.protobuf.ServiceOptions + 37, // 32: google.protobuf.MethodDescriptorProto.options:type_name -> google.protobuf.MethodOptions + 5, // 33: google.protobuf.FileOptions.optimize_for:type_name -> google.protobuf.FileOptions.OptimizeMode + 39, // 34: google.protobuf.FileOptions.features:type_name -> google.protobuf.FeatureSet + 38, // 35: google.protobuf.FileOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 39, // 36: google.protobuf.MessageOptions.features:type_name -> google.protobuf.FeatureSet + 38, // 37: google.protobuf.MessageOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 6, // 38: google.protobuf.FieldOptions.ctype:type_name -> google.protobuf.FieldOptions.CType + 7, // 39: google.protobuf.FieldOptions.jstype:type_name -> google.protobuf.FieldOptions.JSType + 8, // 40: google.protobuf.FieldOptions.retention:type_name -> google.protobuf.FieldOptions.OptionRetention + 9, // 41: google.protobuf.FieldOptions.targets:type_name -> google.protobuf.FieldOptions.OptionTargetType + 47, // 42: google.protobuf.FieldOptions.edition_defaults:type_name -> google.protobuf.FieldOptions.EditionDefault + 39, // 43: google.protobuf.FieldOptions.features:type_name -> google.protobuf.FeatureSet + 48, // 44: google.protobuf.FieldOptions.feature_support:type_name -> google.protobuf.FieldOptions.FeatureSupport + 38, // 45: google.protobuf.FieldOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 39, // 46: google.protobuf.OneofOptions.features:type_name -> google.protobuf.FeatureSet + 38, // 47: google.protobuf.OneofOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 39, // 48: google.protobuf.EnumOptions.features:type_name -> google.protobuf.FeatureSet + 38, // 49: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 39, // 50: google.protobuf.EnumValueOptions.features:type_name -> google.protobuf.FeatureSet + 48, // 51: google.protobuf.EnumValueOptions.feature_support:type_name -> google.protobuf.FieldOptions.FeatureSupport + 38, // 52: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 39, // 53: google.protobuf.ServiceOptions.features:type_name -> google.protobuf.FeatureSet + 38, // 54: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 10, // 55: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel + 39, // 56: google.protobuf.MethodOptions.features:type_name -> google.protobuf.FeatureSet + 38, // 57: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 49, // 58: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart + 11, // 59: google.protobuf.FeatureSet.field_presence:type_name -> google.protobuf.FeatureSet.FieldPresence + 12, // 60: google.protobuf.FeatureSet.enum_type:type_name -> google.protobuf.FeatureSet.EnumType + 13, // 61: google.protobuf.FeatureSet.repeated_field_encoding:type_name -> google.protobuf.FeatureSet.RepeatedFieldEncoding + 14, // 62: google.protobuf.FeatureSet.utf8_validation:type_name -> google.protobuf.FeatureSet.Utf8Validation + 15, // 63: google.protobuf.FeatureSet.message_encoding:type_name -> google.protobuf.FeatureSet.MessageEncoding + 16, // 64: google.protobuf.FeatureSet.json_format:type_name -> google.protobuf.FeatureSet.JsonFormat + 17, // 65: google.protobuf.FeatureSet.enforce_naming_style:type_name -> google.protobuf.FeatureSet.EnforceNamingStyle + 18, // 66: google.protobuf.FeatureSet.default_symbol_visibility:type_name -> google.protobuf.FeatureSet.VisibilityFeature.DefaultSymbolVisibility + 51, // 67: google.protobuf.FeatureSetDefaults.defaults:type_name -> google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault + 0, // 68: google.protobuf.FeatureSetDefaults.minimum_edition:type_name -> google.protobuf.Edition + 0, // 69: google.protobuf.FeatureSetDefaults.maximum_edition:type_name -> google.protobuf.Edition + 52, // 70: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location + 53, // 71: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation + 23, // 72: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions + 0, // 73: google.protobuf.FieldOptions.EditionDefault.edition:type_name -> google.protobuf.Edition + 0, // 74: google.protobuf.FieldOptions.FeatureSupport.edition_introduced:type_name -> google.protobuf.Edition + 0, // 75: google.protobuf.FieldOptions.FeatureSupport.edition_deprecated:type_name -> google.protobuf.Edition + 0, // 76: google.protobuf.FieldOptions.FeatureSupport.edition_removed:type_name -> google.protobuf.Edition + 0, // 77: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition:type_name -> google.protobuf.Edition + 39, // 78: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.overridable_features:type_name -> google.protobuf.FeatureSet + 39, // 79: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.fixed_features:type_name -> google.protobuf.FeatureSet + 19, // 80: google.protobuf.GeneratedCodeInfo.Annotation.semantic:type_name -> google.protobuf.GeneratedCodeInfo.Annotation.Semantic + 81, // [81:81] is the sub-list for method output_type + 81, // [81:81] is the sub-list for method input_type + 81, // [81:81] is the sub-list for extension type_name + 81, // [81:81] is the sub-list for extension extendee + 0, // [0:81] is the sub-list for field type_name } func init() { file_google_protobuf_descriptor_proto_init() } @@ -4983,8 +5222,8 @@ func file_google_protobuf_descriptor_proto_init() { File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_descriptor_proto_rawDesc), len(file_google_protobuf_descriptor_proto_rawDesc)), - NumEnums: 18, - NumMessages: 33, + NumEnums: 20, + NumMessages: 34, NumExtensions: 0, NumServices: 0, }, diff --git a/vendor/modules.txt b/vendor/modules.txt index ba42c1d4..b3aeee38 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -18,8 +18,8 @@ github.com/aws/aws-lambda-go/lambda github.com/aws/aws-lambda-go/lambda/handlertrace github.com/aws/aws-lambda-go/lambda/messages github.com/aws/aws-lambda-go/lambdacontext -# github.com/aws/aws-sdk-go-v2 v1.36.3 -## explicit; go 1.22 +# github.com/aws/aws-sdk-go-v2 v1.41.0 +## explicit; go 1.23 github.com/aws/aws-sdk-go-v2/aws github.com/aws/aws-sdk-go-v2/aws/arn github.com/aws/aws-sdk-go-v2/aws/defaults @@ -124,8 +124,8 @@ github.com/aws/aws-sdk-go-v2/service/ssooidc/types github.com/aws/aws-sdk-go-v2/service/sts github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints github.com/aws/aws-sdk-go-v2/service/sts/types -# github.com/aws/smithy-go v1.22.2 -## explicit; go 1.21 +# github.com/aws/smithy-go v1.24.0 +## explicit; go 1.23 github.com/aws/smithy-go github.com/aws/smithy-go/auth github.com/aws/smithy-go/auth/bearer @@ -156,10 +156,13 @@ github.com/aws/smithy-go/waiter # github.com/benbjohnson/clock v1.3.5 ## explicit; go 1.15 github.com/benbjohnson/clock -# github.com/cenkalti/backoff/v4 v4.3.0 -## explicit; go 1.18 -github.com/cenkalti/backoff/v4 -# github.com/conductorone/baton-sdk v0.7.10 +# github.com/cenkalti/backoff/v5 v5.0.3 +## explicit; go 1.23 +github.com/cenkalti/backoff/v5 +# github.com/cespare/xxhash/v2 v2.3.0 +## explicit; go 1.11 +github.com/cespare/xxhash/v2 +# github.com/conductorone/baton-sdk v0.7.12 ## explicit; go 1.25.2 github.com/conductorone/baton-sdk/internal/connector github.com/conductorone/baton-sdk/pb/c1/c1z/v1 @@ -269,13 +272,13 @@ github.com/fsnotify/fsnotify/internal # github.com/glebarez/go-sqlite v1.22.0 ## explicit; go 1.17 github.com/glebarez/go-sqlite -# github.com/go-jose/go-jose/v4 v4.0.5 -## explicit; go 1.21 +# github.com/go-jose/go-jose/v4 v4.1.3 +## explicit; go 1.24.0 github.com/go-jose/go-jose/v4 github.com/go-jose/go-jose/v4/cipher github.com/go-jose/go-jose/v4/json github.com/go-jose/go-jose/v4/jwt -# github.com/go-logr/logr v1.4.2 +# github.com/go-logr/logr v1.4.3 ## explicit; go 1.18 github.com/go-logr/logr github.com/go-logr/logr/funcr @@ -318,8 +321,8 @@ github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/ctxzap github.com/grpc-ecosystem/go-grpc-middleware/recovery github.com/grpc-ecosystem/go-grpc-middleware/tags github.com/grpc-ecosystem/go-grpc-middleware/validator -# github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.1 -## explicit; go 1.22 +# github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3 +## explicit; go 1.24.0 github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule github.com/grpc-ecosystem/grpc-gateway/v2/runtime github.com/grpc-ecosystem/grpc-gateway/v2/utilities @@ -488,78 +491,88 @@ github.com/tklauser/numcpus # github.com/yusufpapurcu/wmi v1.2.4 ## explicit; go 1.16 github.com/yusufpapurcu/wmi -# go.opentelemetry.io/auto/sdk v1.1.0 -## explicit; go 1.22.0 +# go.opentelemetry.io/auto/sdk v1.2.1 +## explicit; go 1.24.0 go.opentelemetry.io/auto/sdk go.opentelemetry.io/auto/sdk/internal/telemetry -# go.opentelemetry.io/contrib/bridges/otelzap v0.10.0 -## explicit; go 1.22.0 +# go.opentelemetry.io/contrib/bridges/otelzap v0.14.0 +## explicit; go 1.24.0 go.opentelemetry.io/contrib/bridges/otelzap -# go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.59.0 -## explicit; go 1.22.0 +# go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.63.0 +## explicit; go 1.23.0 go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal -# go.opentelemetry.io/otel v1.35.0 -## explicit; go 1.22.0 +# go.opentelemetry.io/otel v1.39.0 +## explicit; go 1.24.0 go.opentelemetry.io/otel go.opentelemetry.io/otel/attribute +go.opentelemetry.io/otel/attribute/internal +go.opentelemetry.io/otel/attribute/internal/xxhash go.opentelemetry.io/otel/baggage go.opentelemetry.io/otel/codes -go.opentelemetry.io/otel/internal -go.opentelemetry.io/otel/internal/attribute go.opentelemetry.io/otel/internal/baggage go.opentelemetry.io/otel/internal/global go.opentelemetry.io/otel/propagation -go.opentelemetry.io/otel/semconv/v1.17.0 -go.opentelemetry.io/otel/semconv/v1.26.0 go.opentelemetry.io/otel/semconv/v1.27.0 -# go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.11.0 -## explicit; go 1.22.0 +go.opentelemetry.io/otel/semconv/v1.37.0 +go.opentelemetry.io/otel/semconv/v1.37.0/otelconv +go.opentelemetry.io/otel/semconv/v1.37.0/rpcconv +# go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.15.0 +## explicit; go 1.24.0 go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/observ go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/retry go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/transform -# go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0 -## explicit; go 1.22.0 +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/x +# go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.39.0 +## explicit; go 1.24.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform -# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0 -## explicit; go 1.22.0 +# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.39.0 +## explicit; go 1.24.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/counter go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/observ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry -# go.opentelemetry.io/otel/log v0.11.0 -## explicit; go 1.22.0 +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/x +# go.opentelemetry.io/otel/log v0.15.0 +## explicit; go 1.24.0 go.opentelemetry.io/otel/log go.opentelemetry.io/otel/log/embedded go.opentelemetry.io/otel/log/global go.opentelemetry.io/otel/log/internal/global go.opentelemetry.io/otel/log/noop -# go.opentelemetry.io/otel/metric v1.35.0 -## explicit; go 1.22.0 +# go.opentelemetry.io/otel/metric v1.39.0 +## explicit; go 1.24.0 go.opentelemetry.io/otel/metric go.opentelemetry.io/otel/metric/embedded go.opentelemetry.io/otel/metric/noop -# go.opentelemetry.io/otel/sdk v1.35.0 -## explicit; go 1.22.0 +# go.opentelemetry.io/otel/sdk v1.39.0 +## explicit; go 1.24.0 go.opentelemetry.io/otel/sdk go.opentelemetry.io/otel/sdk/instrumentation -go.opentelemetry.io/otel/sdk/internal/env go.opentelemetry.io/otel/sdk/internal/x go.opentelemetry.io/otel/sdk/resource go.opentelemetry.io/otel/sdk/trace -# go.opentelemetry.io/otel/sdk/log v0.11.0 -## explicit; go 1.22.0 +go.opentelemetry.io/otel/sdk/trace/internal/env +go.opentelemetry.io/otel/sdk/trace/internal/observ +# go.opentelemetry.io/otel/sdk/log v0.15.0 +## explicit; go 1.24.0 go.opentelemetry.io/otel/sdk/log -# go.opentelemetry.io/otel/trace v1.35.0 -## explicit; go 1.22.0 +go.opentelemetry.io/otel/sdk/log/internal/observ +go.opentelemetry.io/otel/sdk/log/internal/x +# go.opentelemetry.io/otel/trace v1.39.0 +## explicit; go 1.24.0 go.opentelemetry.io/otel/trace go.opentelemetry.io/otel/trace/embedded go.opentelemetry.io/otel/trace/internal/telemetry go.opentelemetry.io/otel/trace/noop -# go.opentelemetry.io/proto/otlp v1.5.0 -## explicit; go 1.22.0 +# go.opentelemetry.io/proto/otlp v1.9.0 +## explicit; go 1.23.0 go.opentelemetry.io/proto/otlp/collector/logs/v1 go.opentelemetry.io/proto/otlp/collector/trace/v1 go.opentelemetry.io/proto/otlp/common/v1 @@ -572,7 +585,7 @@ go.uber.org/multierr # go.uber.org/ratelimit v0.3.1 ## explicit; go 1.20 go.uber.org/ratelimit -# go.uber.org/zap v1.27.0 +# go.uber.org/zap v1.27.1 ## explicit; go 1.19 go.uber.org/zap go.uber.org/zap/buffer @@ -583,8 +596,8 @@ go.uber.org/zap/internal/exit go.uber.org/zap/internal/pool go.uber.org/zap/internal/stacktrace go.uber.org/zap/zapcore -# golang.org/x/crypto v0.34.0 -## explicit; go 1.23.0 +# golang.org/x/crypto v0.44.0 +## explicit; go 1.24.0 golang.org/x/crypto/blowfish golang.org/x/crypto/chacha20 golang.org/x/crypto/chacha20poly1305 @@ -603,8 +616,8 @@ golang.org/x/exp/slices golang.org/x/exp/slog golang.org/x/exp/slog/internal golang.org/x/exp/slog/internal/buffer -# golang.org/x/net v0.35.0 -## explicit; go 1.18 +# golang.org/x/net v0.47.0 +## explicit; go 1.24.0 golang.org/x/net/context/ctxhttp golang.org/x/net/http/httpguts golang.org/x/net/http2 @@ -613,18 +626,18 @@ golang.org/x/net/idna golang.org/x/net/internal/httpcommon golang.org/x/net/internal/timeseries golang.org/x/net/trace -# golang.org/x/oauth2 v0.29.0 -## explicit; go 1.23.0 +# golang.org/x/oauth2 v0.32.0 +## explicit; go 1.24.0 golang.org/x/oauth2 golang.org/x/oauth2/clientcredentials golang.org/x/oauth2/internal golang.org/x/oauth2/jws golang.org/x/oauth2/jwt -# golang.org/x/sync v0.13.0 -## explicit; go 1.23.0 +# golang.org/x/sync v0.18.0 +## explicit; go 1.24.0 golang.org/x/sync/semaphore golang.org/x/sync/singleflight -# golang.org/x/sys v0.38.0 +# golang.org/x/sys v0.39.0 ## explicit; go 1.24.0 golang.org/x/sys/cpu golang.org/x/sys/plan9 @@ -635,11 +648,11 @@ golang.org/x/sys/windows/svc golang.org/x/sys/windows/svc/debug golang.org/x/sys/windows/svc/eventlog golang.org/x/sys/windows/svc/mgr -# golang.org/x/term v0.33.0 -## explicit; go 1.23.0 +# golang.org/x/term v0.37.0 +## explicit; go 1.24.0 golang.org/x/term -# golang.org/x/text v0.24.0 -## explicit; go 1.23.0 +# golang.org/x/text v0.31.0 +## explicit; go 1.24.0 golang.org/x/text/cases golang.org/x/text/encoding golang.org/x/text/encoding/internal @@ -659,15 +672,15 @@ golang.org/x/text/unicode/norm # golang.org/x/time v0.8.0 ## explicit; go 1.18 golang.org/x/time/rate -# google.golang.org/genproto/googleapis/api v0.0.0-20250218202821-56aae31c358a -## explicit; go 1.22 +# google.golang.org/genproto/googleapis/api v0.0.0-20251202230838-ff82c1b0f217 +## explicit; go 1.24.0 google.golang.org/genproto/googleapis/api/httpbody -# google.golang.org/genproto/googleapis/rpc v0.0.0-20250219182151-9fdb1cabc7b2 -## explicit; go 1.23.0 +# google.golang.org/genproto/googleapis/rpc v0.0.0-20251202230838-ff82c1b0f217 +## explicit; go 1.24.0 google.golang.org/genproto/googleapis/rpc/errdetails google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.71.1 -## explicit; go 1.22.0 +# google.golang.org/grpc v1.78.0 +## explicit; go 1.24.0 google.golang.org/grpc google.golang.org/grpc/attributes google.golang.org/grpc/backoff @@ -677,7 +690,6 @@ google.golang.org/grpc/balancer/endpointsharding google.golang.org/grpc/balancer/grpclb/state google.golang.org/grpc/balancer/pickfirst google.golang.org/grpc/balancer/pickfirst/internal -google.golang.org/grpc/balancer/pickfirst/pickfirstleaf google.golang.org/grpc/balancer/roundrobin google.golang.org/grpc/binarylog/grpc_binarylog_v1 google.golang.org/grpc/channelz @@ -687,6 +699,7 @@ google.golang.org/grpc/credentials google.golang.org/grpc/credentials/insecure google.golang.org/grpc/encoding google.golang.org/grpc/encoding/gzip +google.golang.org/grpc/encoding/internal google.golang.org/grpc/encoding/proto google.golang.org/grpc/experimental/stats google.golang.org/grpc/grpclog @@ -730,8 +743,8 @@ google.golang.org/grpc/serviceconfig google.golang.org/grpc/stats google.golang.org/grpc/status google.golang.org/grpc/tap -# google.golang.org/protobuf v1.36.6 -## explicit; go 1.22 +# google.golang.org/protobuf v1.36.10 +## explicit; go 1.23 google.golang.org/protobuf/encoding/protojson google.golang.org/protobuf/encoding/prototext google.golang.org/protobuf/encoding/protowire