2021-03-03 18:14:12 +00:00
package etcd
import (
"context"
"crypto/tls"
"crypto/x509"
"encoding/base64"
"encoding/pem"
"fmt"
2023-10-02 23:20:22 +00:00
"io/ioutil"
2021-03-03 18:14:12 +00:00
"net/http"
2023-10-02 23:20:22 +00:00
"net/textproto"
2021-03-03 18:14:12 +00:00
"os"
2023-09-29 08:42:34 +00:00
"path"
2021-03-03 18:14:12 +00:00
"path/filepath"
2023-10-02 23:20:22 +00:00
"runtime"
2021-03-03 18:14:12 +00:00
"sort"
2023-09-30 01:58:48 +00:00
"strconv"
2021-03-03 18:14:12 +00:00
"strings"
2021-11-29 18:30:04 +00:00
"time"
2021-03-03 18:14:12 +00:00
2022-03-02 23:47:27 +00:00
"github.com/k3s-io/k3s/pkg/daemons/config"
2023-10-10 01:06:32 +00:00
"github.com/k3s-io/k3s/pkg/util"
2023-10-02 23:20:22 +00:00
"github.com/k3s-io/k3s/pkg/version"
2021-03-03 18:14:12 +00:00
"github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7/pkg/credentials"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
2023-09-29 02:28:11 +00:00
v1 "k8s.io/api/core/v1"
2021-11-29 18:30:04 +00:00
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
2021-03-03 18:14:12 +00:00
)
2023-10-02 23:20:22 +00:00
var (
clusterIDKey = textproto . CanonicalMIMEHeaderKey ( version . Program + "-cluster-id" )
2023-10-10 01:06:32 +00:00
tokenHashKey = textproto . CanonicalMIMEHeaderKey ( version . Program + "-token-hash" )
2023-10-02 23:20:22 +00:00
nodeNameKey = textproto . CanonicalMIMEHeaderKey ( version . Program + "-node-name" )
)
2021-06-30 20:29:03 +00:00
// S3 maintains state for S3 functionality.
type S3 struct {
2023-10-02 23:20:22 +00:00
config * config . Control
client * minio . Client
clusterID string
2023-10-10 01:06:32 +00:00
tokenHash string
2023-10-02 23:20:22 +00:00
nodeName string
2021-03-03 18:14:12 +00:00
}
// newS3 creates a new value of type s3 pointer with a
// copy of the config.Control pointer and initializes
// a new Minio client.
2021-06-30 20:29:03 +00:00
func NewS3 ( ctx context . Context , config * config . Control ) ( * S3 , error ) {
2021-11-29 18:30:04 +00:00
if config . EtcdS3BucketName == "" {
return nil , errors . New ( "s3 bucket name was not set" )
}
2021-03-03 18:14:12 +00:00
tr := http . DefaultTransport
2021-09-28 17:13:50 +00:00
switch {
case config . EtcdS3EndpointCA != "" :
2021-03-03 18:14:12 +00:00
trCA , err := setTransportCA ( tr , config . EtcdS3EndpointCA , config . EtcdS3SkipSSLVerify )
if err != nil {
return nil , err
}
tr = trCA
2021-09-28 17:13:50 +00:00
case config . EtcdS3 && config . EtcdS3SkipSSLVerify :
tr . ( * http . Transport ) . TLSClientConfig = & tls . Config {
InsecureSkipVerify : config . EtcdS3SkipSSLVerify ,
}
2021-03-03 18:14:12 +00:00
}
var creds * credentials . Credentials
if len ( config . EtcdS3AccessKey ) == 0 && len ( config . EtcdS3SecretKey ) == 0 {
creds = credentials . NewIAM ( "" ) // for running on ec2 instance
} else {
creds = credentials . NewStaticV4 ( config . EtcdS3AccessKey , config . EtcdS3SecretKey , "" )
}
opt := minio . Options {
Creds : creds ,
2021-09-05 15:56:15 +00:00
Secure : ! config . EtcdS3Insecure ,
2021-03-03 18:14:12 +00:00
Region : config . EtcdS3Region ,
Transport : tr ,
BucketLookup : bucketLookupType ( config . EtcdS3Endpoint ) ,
}
c , err := minio . New ( config . EtcdS3Endpoint , & opt )
if err != nil {
return nil , err
}
logrus . Infof ( "Checking if S3 bucket %s exists" , config . EtcdS3BucketName )
2021-05-07 23:10:04 +00:00
2021-10-15 17:24:14 +00:00
ctx , cancel := context . WithTimeout ( ctx , config . EtcdS3Timeout )
2021-05-07 23:10:04 +00:00
defer cancel ( )
2021-03-03 18:14:12 +00:00
exists , err := c . BucketExists ( ctx , config . EtcdS3BucketName )
if err != nil {
return nil , err
}
if ! exists {
2023-10-02 23:20:22 +00:00
return nil , fmt . Errorf ( "bucket %s does not exist" , config . EtcdS3BucketName )
2021-03-03 18:14:12 +00:00
}
logrus . Infof ( "S3 bucket %s exists" , config . EtcdS3BucketName )
2023-10-02 23:20:22 +00:00
for config . Runtime . Core == nil {
runtime . Gosched ( )
}
// cluster id hack: see https://groups.google.com/forum/#!msg/kubernetes-sig-architecture/mVGobfD4TpY/nkdbkX1iBwAJ
var clusterID string
if ns , err := config . Runtime . Core . Core ( ) . V1 ( ) . Namespace ( ) . Get ( metav1 . NamespaceSystem , metav1 . GetOptions { } ) ; err != nil {
logrus . Warnf ( "Failed to set cluster ID: %v" , err )
} else {
clusterID = string ( ns . UID )
}
2023-10-10 01:06:32 +00:00
tokenHash , err := util . GetTokenHash ( config )
if err != nil {
return nil , errors . Wrap ( err , "failed to get server token hash for etcd snapshot" )
}
2021-06-30 20:29:03 +00:00
return & S3 {
2023-10-02 23:20:22 +00:00
config : config ,
client : c ,
clusterID : clusterID ,
2023-10-10 01:06:32 +00:00
tokenHash : tokenHash ,
2023-10-02 23:20:22 +00:00
nodeName : os . Getenv ( "NODE_NAME" ) ,
2021-03-03 18:14:12 +00:00
} , nil
}
// upload uploads the given snapshot to the configured S3
// compatible backend.
2023-09-29 02:28:11 +00:00
func ( s * S3 ) upload ( ctx context . Context , snapshot string , extraMetadata * v1 . ConfigMap , now time . Time ) ( * snapshotFile , error ) {
2023-10-02 23:20:22 +00:00
logrus . Infof ( "Uploading snapshot to s3://%s/%s" , s . config . EtcdS3BucketName , snapshot )
2021-03-03 18:14:12 +00:00
basename := filepath . Base ( snapshot )
2023-10-02 23:20:22 +00:00
metadata := filepath . Join ( filepath . Dir ( snapshot ) , ".." , metadataDir , basename )
snapshotKey := path . Join ( s . config . EtcdS3Folder , basename )
metadataKey := path . Join ( s . config . EtcdS3Folder , metadataDir , basename )
2023-09-29 08:42:34 +00:00
sf := & snapshotFile {
2023-09-29 16:59:24 +00:00
Name : basename ,
2023-10-02 23:20:22 +00:00
Location : fmt . Sprintf ( "s3://%s/%s" , s . config . EtcdS3BucketName , snapshotKey ) ,
2023-09-29 16:59:24 +00:00
NodeName : "s3" ,
CreatedAt : & metav1 . Time {
Time : now ,
} ,
2023-09-29 08:42:34 +00:00
S3 : & s3Config {
Endpoint : s . config . EtcdS3Endpoint ,
EndpointCA : s . config . EtcdS3EndpointCA ,
SkipSSLVerify : s . config . EtcdS3SkipSSLVerify ,
Bucket : s . config . EtcdS3BucketName ,
Region : s . config . EtcdS3Region ,
Folder : s . config . EtcdS3Folder ,
Insecure : s . config . EtcdS3Insecure ,
} ,
2023-10-02 23:20:22 +00:00
Compressed : strings . HasSuffix ( snapshot , compressedExtension ) ,
2023-09-29 08:42:34 +00:00
metadataSource : extraMetadata ,
2023-10-03 17:13:26 +00:00
nodeSource : s . nodeName ,
2023-09-29 08:42:34 +00:00
}
2023-10-02 23:20:22 +00:00
uploadInfo , err := s . uploadSnapshot ( ctx , snapshotKey , snapshot )
2021-11-29 18:30:04 +00:00
if err != nil {
2023-09-29 08:42:34 +00:00
sf . Status = failedSnapshotStatus
sf . Message = base64 . StdEncoding . EncodeToString ( [ ] byte ( err . Error ( ) ) )
2021-11-29 18:30:04 +00:00
} else {
2023-09-29 08:42:34 +00:00
sf . Status = successfulSnapshotStatus
sf . Size = uploadInfo . Size
2023-10-10 01:06:32 +00:00
sf . tokenHash = s . tokenHash
2021-11-29 18:30:04 +00:00
}
2023-10-02 23:20:22 +00:00
if _ , err := s . uploadSnapshotMetadata ( ctx , metadataKey , metadata ) ; err != nil {
logrus . Warnf ( "Failed to upload snapshot metadata to S3: %v" , err )
} else {
logrus . Infof ( "Uploaded snapshot metadata s3://%s/%s" , s . config . EtcdS3BucketName , metadata )
}
2023-09-29 08:42:34 +00:00
return sf , err
2021-03-03 18:14:12 +00:00
}
2023-10-02 23:20:22 +00:00
// uploadSnapshot uploads the snapshot file to S3 using the minio API.
func ( s * S3 ) uploadSnapshot ( ctx context . Context , key , path string ) ( info minio . UploadInfo , err error ) {
opts := minio . PutObjectOptions {
NumThreads : 2 ,
UserMetadata : map [ string ] string {
clusterIDKey : s . clusterID ,
nodeNameKey : s . nodeName ,
2023-10-10 01:06:32 +00:00
tokenHashKey : s . tokenHash ,
2023-10-02 23:20:22 +00:00
} ,
}
if strings . HasSuffix ( key , compressedExtension ) {
opts . ContentType = "application/zip"
} else {
opts . ContentType = "application/octet-stream"
}
ctx , cancel := context . WithTimeout ( ctx , s . config . EtcdS3Timeout )
2021-05-07 23:10:04 +00:00
defer cancel ( )
2023-10-02 23:20:22 +00:00
return s . client . FPutObject ( ctx , s . config . EtcdS3BucketName , key , path , opts )
}
// uploadSnapshotMetadata marshals and uploads the snapshot metadata to S3 using the minio API.
// The upload is silently skipped if no extra metadata is provided.
func ( s * S3 ) uploadSnapshotMetadata ( ctx context . Context , key , path string ) ( info minio . UploadInfo , err error ) {
if _ , err := os . Stat ( path ) ; err != nil {
if os . IsNotExist ( err ) {
return minio . UploadInfo { } , nil
}
return minio . UploadInfo { } , err
}
opts := minio . PutObjectOptions {
NumThreads : 2 ,
ContentType : "application/json" ,
UserMetadata : map [ string ] string {
clusterIDKey : s . clusterID ,
nodeNameKey : s . nodeName ,
} ,
2021-03-03 18:14:12 +00:00
}
2023-10-02 23:20:22 +00:00
ctx , cancel := context . WithTimeout ( ctx , s . config . EtcdS3Timeout )
defer cancel ( )
return s . client . FPutObject ( ctx , s . config . EtcdS3BucketName , key , path , opts )
}
2021-03-03 18:14:12 +00:00
2023-10-05 16:49:22 +00:00
// Download downloads the given snapshot from the configured S3
2023-10-02 23:20:22 +00:00
// compatible backend.
func ( s * S3 ) Download ( ctx context . Context ) error {
snapshotKey := path . Join ( s . config . EtcdS3Folder , s . config . ClusterResetRestorePath )
metadataKey := path . Join ( s . config . EtcdS3Folder , metadataDir , s . config . ClusterResetRestorePath )
2021-08-09 16:04:18 +00:00
snapshotDir , err := snapshotDir ( s . config , true )
2021-03-03 18:14:12 +00:00
if err != nil {
return errors . Wrap ( err , "failed to get the snapshot dir" )
}
2023-10-02 23:20:22 +00:00
snapshotFile := filepath . Join ( snapshotDir , s . config . ClusterResetRestorePath )
metadataFile := filepath . Join ( snapshotDir , ".." , metadataDir , s . config . ClusterResetRestorePath )
2021-03-03 18:14:12 +00:00
2023-10-02 23:20:22 +00:00
logrus . Debugf ( "Downloading snapshot from s3://%s/%s" , s . config . EtcdS3BucketName , snapshotKey )
if err := s . downloadSnapshot ( ctx , snapshotKey , snapshotFile ) ; err != nil {
2021-03-03 18:14:12 +00:00
return err
}
2023-10-02 23:20:22 +00:00
if err := s . downloadSnapshotMetadata ( ctx , metadataKey , metadataFile ) ; err != nil {
2021-03-03 18:14:12 +00:00
return err
}
2023-10-02 23:20:22 +00:00
s . config . ClusterResetRestorePath = snapshotFile
return nil
}
2021-03-03 18:14:12 +00:00
2023-10-02 23:20:22 +00:00
// downloadSnapshot downloads the snapshot file from S3 using the minio API.
func ( s * S3 ) downloadSnapshot ( ctx context . Context , key , file string ) error {
ctx , cancel := context . WithTimeout ( ctx , s . config . EtcdS3Timeout )
defer cancel ( )
defer os . Chmod ( file , 0600 )
return s . client . FGetObject ( ctx , s . config . EtcdS3BucketName , key , file , minio . GetObjectOptions { } )
}
2021-03-03 18:14:12 +00:00
2023-10-02 23:20:22 +00:00
// downloadSnapshotMetadata downloads the snapshot metadata file from S3 using the minio API.
// No error is returned if the metadata file does not exist, as it is optional.
func ( s * S3 ) downloadSnapshotMetadata ( ctx context . Context , key , file string ) error {
logrus . Debugf ( "Downloading snapshot metadata from s3://%s/%s" , s . config . EtcdS3BucketName , key )
ctx , cancel := context . WithTimeout ( ctx , s . config . EtcdS3Timeout )
defer cancel ( )
defer os . Chmod ( file , 0600 )
err := s . client . FGetObject ( ctx , s . config . EtcdS3BucketName , key , file , minio . GetObjectOptions { } )
if resp := minio . ToErrorResponse ( err ) ; resp . StatusCode == http . StatusNotFound {
return nil
}
return err
2021-03-03 18:14:12 +00:00
}
2021-05-01 01:26:39 +00:00
// snapshotPrefix returns the prefix used in the
// naming of the snapshots.
2021-06-30 20:29:03 +00:00
func ( s * S3 ) snapshotPrefix ( ) string {
2023-09-29 08:42:34 +00:00
return path . Join ( s . config . EtcdS3Folder , s . config . EtcdSnapshotName )
2021-05-01 01:26:39 +00:00
}
2021-11-29 18:30:04 +00:00
// snapshotRetention prunes snapshots in the configured S3 compatible backend for this specific node.
2021-06-30 20:29:03 +00:00
func ( s * S3 ) snapshotRetention ( ctx context . Context ) error {
2021-11-29 18:30:04 +00:00
if s . config . EtcdSnapshotRetention < 1 {
return nil
}
2023-10-02 23:20:22 +00:00
logrus . Infof ( "Applying snapshot retention=%d to snapshots stored in s3://%s/%s" , s . config . EtcdSnapshotRetention , s . config . EtcdS3BucketName , s . snapshotPrefix ( ) )
2021-11-29 18:30:04 +00:00
2021-05-01 01:26:39 +00:00
var snapshotFiles [ ] minio . ObjectInfo
2021-03-03 18:14:12 +00:00
2021-10-15 17:24:14 +00:00
toCtx , cancel := context . WithTimeout ( ctx , s . config . EtcdS3Timeout )
2021-05-07 23:10:04 +00:00
defer cancel ( )
2023-10-02 23:20:22 +00:00
opts := minio . ListObjectsOptions {
2021-05-01 01:26:39 +00:00
Prefix : s . snapshotPrefix ( ) ,
2023-10-02 23:20:22 +00:00
Recursive : true ,
2021-03-03 18:14:12 +00:00
}
2023-10-02 23:20:22 +00:00
for info := range s . client . ListObjects ( toCtx , s . config . EtcdS3BucketName , opts ) {
2021-03-03 18:14:12 +00:00
if info . Err != nil {
return info . Err
}
2023-10-02 23:20:22 +00:00
// skip metadata
if path . Base ( path . Dir ( info . Key ) ) == metadataDir {
continue
}
2021-03-03 18:14:12 +00:00
snapshotFiles = append ( snapshotFiles , info )
}
if len ( snapshotFiles ) <= s . config . EtcdSnapshotRetention {
return nil
}
2023-09-29 08:42:34 +00:00
// sort newest-first so we can prune entries past the retention count
sort . Slice ( snapshotFiles , func ( i , j int ) bool {
return snapshotFiles [ j ] . LastModified . Before ( snapshotFiles [ i ] . LastModified )
2021-03-03 18:14:12 +00:00
} )
2023-09-29 08:42:34 +00:00
for _ , df := range snapshotFiles [ s . config . EtcdSnapshotRetention : ] {
2023-10-02 23:20:22 +00:00
logrus . Infof ( "Removing S3 snapshot: s3://%s/%s" , s . config . EtcdS3BucketName , df . Key )
2023-10-05 16:49:22 +00:00
if err := s . client . RemoveObject ( toCtx , s . config . EtcdS3BucketName , df . Key , minio . RemoveObjectOptions { } ) ; err != nil {
2021-03-03 18:14:12 +00:00
return err
}
2023-10-02 23:20:22 +00:00
metadataKey := path . Join ( path . Dir ( df . Key ) , metadataDir , path . Base ( df . Key ) )
2023-10-05 16:49:22 +00:00
if err := s . client . RemoveObject ( toCtx , s . config . EtcdS3BucketName , metadataKey , minio . RemoveObjectOptions { } ) ; err != nil {
if isNotExist ( err ) {
2023-10-02 23:20:22 +00:00
return nil
}
return err
}
2021-03-03 18:14:12 +00:00
}
return nil
}
2023-10-05 16:49:22 +00:00
func ( s * S3 ) deleteSnapshot ( ctx context . Context , key string ) error {
ctx , cancel := context . WithTimeout ( ctx , s . config . EtcdS3Timeout )
defer cancel ( )
key = path . Join ( s . config . EtcdS3Folder , key )
err := s . client . RemoveObject ( ctx , s . config . EtcdS3BucketName , key , minio . RemoveObjectOptions { } )
if err == nil || isNotExist ( err ) {
metadataKey := path . Join ( path . Dir ( key ) , metadataDir , path . Base ( key ) )
if merr := s . client . RemoveObject ( ctx , s . config . EtcdS3BucketName , metadataKey , minio . RemoveObjectOptions { } ) ; merr != nil && ! isNotExist ( merr ) {
err = merr
}
}
return err
}
2023-09-30 01:58:48 +00:00
// listSnapshots provides a list of currently stored
// snapshots in S3 along with their relevant
// metadata.
func ( s * S3 ) listSnapshots ( ctx context . Context ) ( map [ string ] snapshotFile , error ) {
2023-10-02 23:20:22 +00:00
snapshots := map [ string ] snapshotFile { }
metadatas := [ ] string { }
2023-10-05 16:49:22 +00:00
ctx , cancel := context . WithTimeout ( ctx , s . config . EtcdS3Timeout )
2023-09-30 01:58:48 +00:00
defer cancel ( )
2023-10-02 23:20:22 +00:00
opts := minio . ListObjectsOptions {
2023-10-03 17:13:26 +00:00
Prefix : s . config . EtcdS3Folder ,
Recursive : true ,
WithMetadata : true ,
2023-09-30 01:58:48 +00:00
}
2023-10-02 23:20:22 +00:00
objects := s . client . ListObjects ( ctx , s . config . EtcdS3BucketName , opts )
2023-09-30 01:58:48 +00:00
for obj := range objects {
if obj . Err != nil {
return nil , obj . Err
}
if obj . Size == 0 {
continue
}
2023-10-02 23:20:22 +00:00
if o , err := s . client . StatObject ( ctx , s . config . EtcdS3BucketName , obj . Key , minio . StatObjectOptions { } ) ; err != nil {
logrus . Warnf ( "Failed to get object metadata: %v" , err )
} else {
obj = o
}
2023-09-30 01:58:48 +00:00
filename := path . Base ( obj . Key )
2023-10-02 23:20:22 +00:00
if path . Base ( path . Dir ( obj . Key ) ) == metadataDir {
metadatas = append ( metadatas , obj . Key )
continue
}
2023-09-30 01:58:48 +00:00
basename , compressed := strings . CutSuffix ( filename , compressedExtension )
ts , err := strconv . ParseInt ( basename [ strings . LastIndexByte ( basename , '-' ) + 1 : ] , 10 , 64 )
if err != nil {
ts = obj . LastModified . Unix ( )
}
sf := snapshotFile {
Name : filename ,
2023-10-02 23:20:22 +00:00
Location : fmt . Sprintf ( "s3://%s/%s" , s . config . EtcdS3BucketName , obj . Key ) ,
2023-09-30 01:58:48 +00:00
NodeName : "s3" ,
CreatedAt : & metav1 . Time {
Time : time . Unix ( ts , 0 ) ,
} ,
Size : obj . Size ,
S3 : & s3Config {
Endpoint : s . config . EtcdS3Endpoint ,
EndpointCA : s . config . EtcdS3EndpointCA ,
SkipSSLVerify : s . config . EtcdS3SkipSSLVerify ,
Bucket : s . config . EtcdS3BucketName ,
Region : s . config . EtcdS3Region ,
Folder : s . config . EtcdS3Folder ,
Insecure : s . config . EtcdS3Insecure ,
} ,
Status : successfulSnapshotStatus ,
Compressed : compressed ,
2023-10-03 17:13:26 +00:00
nodeSource : obj . UserMetadata [ nodeNameKey ] ,
2023-10-10 01:06:32 +00:00
tokenHash : obj . UserMetadata [ tokenHashKey ] ,
2023-09-30 01:58:48 +00:00
}
sfKey := generateSnapshotConfigMapKey ( sf )
snapshots [ sfKey ] = sf
}
2023-10-02 23:20:22 +00:00
for _ , metadataKey := range metadatas {
filename := path . Base ( metadataKey )
sfKey := generateSnapshotConfigMapKey ( snapshotFile { Name : filename , NodeName : "s3" } )
if sf , ok := snapshots [ sfKey ] ; ok {
logrus . Debugf ( "Loading snapshot metadata from s3://%s/%s" , s . config . EtcdS3BucketName , metadataKey )
if obj , err := s . client . GetObject ( ctx , s . config . EtcdS3BucketName , metadataKey , minio . GetObjectOptions { } ) ; err != nil {
logrus . Warnf ( "Failed to get snapshot metadata: %v" , err )
} else {
if m , err := ioutil . ReadAll ( obj ) ; err != nil {
logrus . Warnf ( "Failed to read snapshot metadata: %v" , err )
} else {
sf . Metadata = base64 . StdEncoding . EncodeToString ( m )
snapshots [ sfKey ] = sf
}
}
}
}
2023-09-30 01:58:48 +00:00
return snapshots , nil
}
2021-03-03 18:14:12 +00:00
func readS3EndpointCA ( endpointCA string ) ( [ ] byte , error ) {
ca , err := base64 . StdEncoding . DecodeString ( endpointCA )
if err != nil {
2022-10-08 00:36:57 +00:00
return os . ReadFile ( endpointCA )
2021-03-03 18:14:12 +00:00
}
return ca , nil
}
func setTransportCA ( tr http . RoundTripper , endpointCA string , insecureSkipVerify bool ) ( http . RoundTripper , error ) {
ca , err := readS3EndpointCA ( endpointCA )
if err != nil {
return tr , err
}
if ! isValidCertificate ( ca ) {
return tr , errors . New ( "endpoint-ca is not a valid x509 certificate" )
}
certPool := x509 . NewCertPool ( )
certPool . AppendCertsFromPEM ( ca )
tr . ( * http . Transport ) . TLSClientConfig = & tls . Config {
RootCAs : certPool ,
InsecureSkipVerify : insecureSkipVerify ,
}
return tr , nil
}
// isValidCertificate checks to see if the given
// byte slice is a valid x509 certificate.
func isValidCertificate ( c [ ] byte ) bool {
p , _ := pem . Decode ( c )
if p == nil {
return false
}
if _ , err := x509 . ParseCertificates ( p . Bytes ) ; err != nil {
return false
}
return true
}
func bucketLookupType ( endpoint string ) minio . BucketLookupType {
if strings . Contains ( endpoint , "aliyun" ) { // backwards compt with RKE1
return minio . BucketLookupDNS
}
return minio . BucketLookupAuto
}