aboutsummaryrefslogtreecommitdiffstats
path: root/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketWebsite.go
diff options
context:
space:
mode:
authorvitalyisaev <vitalyisaev@ydb.tech>2023-12-12 21:55:07 +0300
committervitalyisaev <vitalyisaev@ydb.tech>2023-12-12 22:25:10 +0300
commit4967f99474a4040ba150eb04995de06342252718 (patch)
treec9c118836513a8fab6e9fcfb25be5d404338bca7 /vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketWebsite.go
parent2ce9cccb9b0bdd4cd7a3491dc5cbf8687cda51de (diff)
downloadydb-4967f99474a4040ba150eb04995de06342252718.tar.gz
YQ Connector: prepare code base for S3 integration
1. Кодовая база Коннектора переписана с помощью Go дженериков так, чтобы добавление нового источника данных (в частности S3 + csv) максимально переиспользовало имеющийся код (чтобы сохранялась логика нарезания на блоки данных, учёт трафика и пр.) 2. API Connector расширено для работы с S3, но ещё пока не протестировано.
Diffstat (limited to 'vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketWebsite.go')
-rw-r--r--vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketWebsite.go413
1 files changed, 413 insertions, 0 deletions
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketWebsite.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketWebsite.go
new file mode 100644
index 0000000000..e21b4c171a
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketWebsite.go
@@ -0,0 +1,413 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "github.com/aws/aws-sdk-go-v2/aws"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ internalauth "github.com/aws/aws-sdk-go-v2/internal/auth"
+ "github.com/aws/aws-sdk-go-v2/internal/v4a"
+ internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ smithyendpoints "github.com/aws/smithy-go/endpoints"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Sets the configuration of the website that is specified in the website
+// subresource. To configure a bucket as a website, you can add this subresource on
+// the bucket with website configuration information such as the file name of the
+// index document and any redirect rules. For more information, see Hosting
+// Websites on Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html)
+// . This PUT action requires the S3:PutBucketWebsite permission. By default, only
+// the bucket owner can configure the website attached to a bucket; however, bucket
+// owners can allow other users to set the website configuration by writing a
+// bucket policy that grants them the S3:PutBucketWebsite permission. To redirect
+// all website requests sent to the bucket's website endpoint, you add a website
+// configuration with the following elements. Because all requests are sent to
+// another website, you don't need to provide index document name for the bucket.
+// - WebsiteConfiguration
+// - RedirectAllRequestsTo
+// - HostName
+// - Protocol
+//
+// If you want granular control over redirects, you can use the following elements
+// to add routing rules that describe conditions for redirecting requests and
+// information about the redirect destination. In this case, the website
+// configuration must provide an index document for the bucket, because some
+// requests might not be redirected.
+// - WebsiteConfiguration
+// - IndexDocument
+// - Suffix
+// - ErrorDocument
+// - Key
+// - RoutingRules
+// - RoutingRule
+// - Condition
+// - HttpErrorCodeReturnedEquals
+// - KeyPrefixEquals
+// - Redirect
+// - Protocol
+// - HostName
+// - ReplaceKeyPrefixWith
+// - ReplaceKeyWith
+// - HttpRedirectCode
+//
+// Amazon S3 has a limitation of 50 routing rules per website configuration. If
+// you require more than 50 routing rules, you can use object redirect. For more
+// information, see Configuring an Object Redirect (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html)
+// in the Amazon S3 User Guide. The maximum request length is limited to 128 KB.
+func (c *Client) PutBucketWebsite(ctx context.Context, params *PutBucketWebsiteInput, optFns ...func(*Options)) (*PutBucketWebsiteOutput, error) {
+ if params == nil {
+ params = &PutBucketWebsiteInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "PutBucketWebsite", params, optFns, c.addOperationPutBucketWebsiteMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*PutBucketWebsiteOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type PutBucketWebsiteInput struct {
+
+ // The bucket name.
+ //
+ // This member is required.
+ Bucket *string
+
+ // Container for the request.
+ //
+ // This member is required.
+ WebsiteConfiguration *types.WebsiteConfiguration
+
+ // Indicates the algorithm used to create the checksum for the object when using
+ // the SDK. This header will not provide any additional functionality if not using
+ // the SDK. When sending this header, there must be a corresponding x-amz-checksum
+ // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the
+ // HTTP status code 400 Bad Request . For more information, see Checking object
+ // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
+ // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3
+ // ignores any provided ChecksumAlgorithm parameter.
+ ChecksumAlgorithm types.ChecksumAlgorithm
+
+ // The base64-encoded 128-bit MD5 digest of the data. You must use this header as
+ // a message integrity check to verify that the request body was not corrupted in
+ // transit. For more information, see RFC 1864 (http://www.ietf.org/rfc/rfc1864.txt)
+ // . For requests made using the Amazon Web Services Command Line Interface (CLI)
+ // or Amazon Web Services SDKs, this field is calculated automatically.
+ ContentMD5 *string
+
+ // The account ID of the expected bucket owner. If the bucket is owned by a
+ // different account, the request fails with the HTTP status code 403 Forbidden
+ // (access denied).
+ ExpectedBucketOwner *string
+
+ noSmithyDocumentSerde
+}
+
+type PutBucketWebsiteOutput struct {
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationPutBucketWebsiteMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketWebsite{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketWebsite{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ return err
+ }
+ if err = addRetryMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addHTTPSignerV4Middleware(stack, options); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addPutBucketWebsiteResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addOpPutBucketWebsiteValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketWebsite(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketWebsiteInputChecksumMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addPutBucketWebsiteUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (v *PutBucketWebsiteInput) bucket() (string, bool) {
+ if v.Bucket == nil {
+ return "", false
+ }
+ return *v.Bucket, true
+}
+
+func newServiceMetadataMiddleware_opPutBucketWebsite(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ SigningName: "s3",
+ OperationName: "PutBucketWebsite",
+ }
+}
+
+// getPutBucketWebsiteRequestAlgorithmMember gets the request checksum algorithm
+// value provided as input.
+func getPutBucketWebsiteRequestAlgorithmMember(input interface{}) (string, bool) {
+ in := input.(*PutBucketWebsiteInput)
+ if len(in.ChecksumAlgorithm) == 0 {
+ return "", false
+ }
+ return string(in.ChecksumAlgorithm), true
+}
+
+func addPutBucketWebsiteInputChecksumMiddlewares(stack *middleware.Stack, options Options) error {
+ return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{
+ GetAlgorithm: getPutBucketWebsiteRequestAlgorithmMember,
+ RequireChecksum: true,
+ EnableTrailingChecksum: false,
+ EnableComputeSHA256PayloadHash: true,
+ EnableDecodedContentLengthHeader: true,
+ })
+}
+
+// getPutBucketWebsiteBucketMember returns a pointer to string denoting a provided
+// bucket member valueand a boolean indicating if the input has a modeled bucket
+// name,
+func getPutBucketWebsiteBucketMember(input interface{}) (*string, bool) {
+ in := input.(*PutBucketWebsiteInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addPutBucketWebsiteUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getPutBucketWebsiteBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ TargetS3ObjectLambda: false,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseARNRegion: options.UseARNRegion,
+ DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints,
+ })
+}
+
+type opPutBucketWebsiteResolveEndpointMiddleware struct {
+ EndpointResolver EndpointResolverV2
+ BuiltInResolver builtInParameterResolver
+}
+
+func (*opPutBucketWebsiteResolveEndpointMiddleware) ID() string {
+ return "ResolveEndpointV2"
+}
+
+func (m *opPutBucketWebsiteResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ if awsmiddleware.GetRequiresLegacyEndpoints(ctx) {
+ return next.HandleSerialize(ctx, in)
+ }
+
+ req, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown transport type %T", in.Request)
+ }
+
+ input, ok := in.Parameters.(*PutBucketWebsiteInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown transport type %T", in.Request)
+ }
+
+ if m.EndpointResolver == nil {
+ return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil")
+ }
+
+ params := EndpointParameters{}
+
+ m.BuiltInResolver.ResolveBuiltIns(&params)
+
+ params.Bucket = input.Bucket
+
+ var resolvedEndpoint smithyendpoints.Endpoint
+ resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params)
+ if err != nil {
+ return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err)
+ }
+
+ req.URL = &resolvedEndpoint.URI
+
+ for k := range resolvedEndpoint.Headers {
+ req.Header.Set(
+ k,
+ resolvedEndpoint.Headers.Get(k),
+ )
+ }
+
+ authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties)
+ if err != nil {
+ var nfe *internalauth.NoAuthenticationSchemesFoundError
+ if errors.As(err, &nfe) {
+ // if no auth scheme is found, default to sigv4
+ signingName := "s3"
+ signingRegion := m.BuiltInResolver.(*builtInResolver).Region
+ ctx = awsmiddleware.SetSigningName(ctx, signingName)
+ ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion)
+ ctx = s3cust.SetSignerVersion(ctx, internalauth.SigV4)
+ }
+ var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError
+ if errors.As(err, &ue) {
+ return out, metadata, fmt.Errorf(
+ "This operation requests signer version(s) %v but the client only supports %v",
+ ue.UnsupportedSchemes,
+ internalauth.SupportedSchemes,
+ )
+ }
+ }
+
+ for _, authScheme := range authSchemes {
+ switch authScheme.(type) {
+ case *internalauth.AuthenticationSchemeV4:
+ v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4)
+ var signingName, signingRegion string
+ if v4Scheme.SigningName == nil {
+ signingName = "s3"
+ } else {
+ signingName = *v4Scheme.SigningName
+ }
+ if v4Scheme.SigningRegion == nil {
+ signingRegion = m.BuiltInResolver.(*builtInResolver).Region
+ } else {
+ signingRegion = *v4Scheme.SigningRegion
+ }
+ if v4Scheme.DisableDoubleEncoding != nil {
+ // The signer sets an equivalent value at client initialization time.
+ // Setting this context value will cause the signer to extract it
+ // and override the value set at client initialization time.
+ ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding)
+ }
+ ctx = awsmiddleware.SetSigningName(ctx, signingName)
+ ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion)
+ ctx = s3cust.SetSignerVersion(ctx, v4Scheme.Name)
+ break
+ case *internalauth.AuthenticationSchemeV4A:
+ v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A)
+ if v4aScheme.SigningName == nil {
+ v4aScheme.SigningName = aws.String("s3")
+ }
+ if v4aScheme.DisableDoubleEncoding != nil {
+ // The signer sets an equivalent value at client initialization time.
+ // Setting this context value will cause the signer to extract it
+ // and override the value set at client initialization time.
+ ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding)
+ }
+ ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName)
+ ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0])
+ ctx = s3cust.SetSignerVersion(ctx, v4a.Version)
+ break
+ case *internalauth.AuthenticationSchemeNone:
+ break
+ }
+ }
+
+ return next.HandleSerialize(ctx, in)
+}
+
+func addPutBucketWebsiteResolveEndpointMiddleware(stack *middleware.Stack, options Options) error {
+ return stack.Serialize.Insert(&opPutBucketWebsiteResolveEndpointMiddleware{
+ EndpointResolver: options.EndpointResolverV2,
+ BuiltInResolver: &builtInResolver{
+ Region: options.Region,
+ UseFIPS: options.EndpointOptions.UseFIPSEndpoint,
+ UseDualStack: options.EndpointOptions.UseDualStackEndpoint,
+ Endpoint: options.BaseEndpoint,
+ ForcePathStyle: options.UsePathStyle,
+ Accelerate: options.UseAccelerate,
+ DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints,
+ UseArnRegion: options.UseARNRegion,
+ },
+ }, "ResolveEndpoint", middleware.After)
+}