mirror of
https://github.com/k3s-io/k3s.git
synced 2024-06-07 19:41:36 +00:00
Etcd Snapshot/Restore to/from S3 Compatible Backends (#2902)
* Add functionality for etcd snapshot/restore to and from S3 compatible backends. * Update etcd restore functionality to extract and write certificates and configs from snapshot.
This commit is contained in:
parent
1bf04b6a50
commit
4d1f9eda9d
1
go.mod
1
go.mod
@ -88,6 +88,7 @@ require (
|
||||
github.com/kubernetes-sigs/cri-tools v0.0.0-00010101000000-000000000000
|
||||
github.com/lib/pq v1.8.0
|
||||
github.com/mattn/go-sqlite3 v1.14.4
|
||||
github.com/minio/minio-go/v7 v7.0.7
|
||||
github.com/natefinch/lumberjack v2.0.0+incompatible
|
||||
github.com/opencontainers/runc v1.0.0-rc92
|
||||
github.com/opencontainers/selinux v1.6.0
|
||||
|
26
go.sum
26
go.sum
@ -131,6 +131,7 @@ github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5/go.mod h1:/iP1
|
||||
github.com/checkpoint-restore/go-criu/v4 v4.1.0 h1:WW2B2uxx9KWF6bGlHqhm8Okiafwwx7Y2kcpn8lCpjgo=
|
||||
github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw=
|
||||
github.com/cheekybits/genny v0.0.0-20170328200008-9127e812e1e9/go.mod h1:+tQajlRqAUrPI7DOSpB0XAqZYtQakVtB7wXkRAgjxjQ=
|
||||
github.com/cheggaaa/pb v1.0.29/go.mod h1:W40334L7FMC5JKWldsTWbdGjLo0RxUKK73K+TuPxX30=
|
||||
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
||||
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
|
||||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
||||
@ -257,6 +258,7 @@ github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZM
|
||||
github.com/fatih/camelcase v1.0.0 h1:hxNvNX/xYBp0ovncs8WyWZrOrpBNub/JfaMvbURyft8=
|
||||
github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc=
|
||||
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||
github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
|
||||
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
|
||||
github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k=
|
||||
github.com/form3tech-oss/jwt-go v3.2.2+incompatible h1:TcekIExNqud5crz4xD2pavyTgWiPvpYe4Xau31I0PRk=
|
||||
@ -425,6 +427,7 @@ github.com/googleapis/gnostic v0.4.1 h1:DLJCy1n/vrD4HPjOvYcT8aYQXpPIzoRZONaYwyyc
|
||||
github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg=
|
||||
github.com/gophercloud/gophercloud v0.1.0 h1:P/nh25+rzXouhytV2pUHBb65fnds26Ghl8/391+sT5o=
|
||||
github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
|
||||
github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
||||
@ -503,6 +506,7 @@ github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr
|
||||
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
|
||||
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
|
||||
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
|
||||
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||
github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes=
|
||||
@ -581,6 +585,9 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o
|
||||
github.com/klauspost/compress v1.11.7 h1:0hzRabrMN4tSTvMfnL3SCv1ZGeAP23ynzodBgaHeMeg=
|
||||
github.com/klauspost/compress v1.11.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||
github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
|
||||
github.com/klauspost/cpuid v1.2.3/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
|
||||
github.com/klauspost/cpuid v1.3.1 h1:5JNjFYYQrZeKRJ0734q51WCEEn2huer72Dc7K+R/b6s=
|
||||
github.com/klauspost/cpuid v1.3.1/go.mod h1:bYW4mA6ZgKPob1/Dlai2LviZJO7KGI3uoWLd42rAQw4=
|
||||
github.com/knative/build v0.6.0/go.mod h1:/sU74ZQkwlYA5FwYDJhYTy61i/Kn+5eWfln2jDbw3Qo=
|
||||
github.com/knative/pkg v0.0.0-20190514205332-5e4512dcb2ca/go.mod h1:7Ijfhw7rfB+H9VtosIsDYvZQ+qYTz7auK3fHW/5z4ww=
|
||||
github.com/knative/serving v0.6.1/go.mod h1:ljvMfwQy2qanaM/8xnBSK4Mz3Vv2NawC2fo5kFRJS1A=
|
||||
@ -622,9 +629,13 @@ github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7
|
||||
github.com/marten-seemann/qtls v0.2.3/go.mod h1:xzjG7avBwGGbdZ8dTGxlBnLArsVKLvwmjgmPuiQEcYk=
|
||||
github.com/mattbaird/jsonpatch v0.0.0-20171005235357-81af80346b1a/go.mod h1:M1qoD/MqPgTZIk0EWKB38wE28ACRfVcn+cU08jyArI0=
|
||||
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
|
||||
github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
|
||||
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||
github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE=
|
||||
github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
||||
github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
||||
github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o=
|
||||
github.com/mattn/go-sqlite3 v1.14.4 h1:4rQjbDxdu9fSgI/r3KN72G3c2goxknAqHHgPWWs8UlI=
|
||||
github.com/mattn/go-sqlite3 v1.14.4/go.mod h1:WVKg1VTActs4Qso6iwGbiFih2UIHo0ENGwNd0Lj+XmI=
|
||||
@ -638,10 +649,18 @@ github.com/miekg/dns v1.1.4 h1:rCMZsU2ScVSYcAsOXgmC6+AKOK+6pmQTOcw03nfwYV0=
|
||||
github.com/miekg/dns v1.1.4/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
|
||||
github.com/mindprince/gonvml v0.0.0-20190828220739-9ebdce4bb989 h1:PS1dLCGtD8bb9RPKJrc8bS7qHL6JnW1CZvwzH9dPoUs=
|
||||
github.com/mindprince/gonvml v0.0.0-20190828220739-9ebdce4bb989/go.mod h1:2eu9pRWp8mo84xCg6KswZ+USQHjwgRhNp06sozOdsTY=
|
||||
github.com/minio/md5-simd v1.1.0 h1:QPfiOqlZH+Cj9teu0t9b1nTBfPbyTl16Of5MeuShdK4=
|
||||
github.com/minio/md5-simd v1.1.0/go.mod h1:XpBqgZULrMYD3R+M28PcmP0CkI7PEMzB3U77ZrKZ0Gw=
|
||||
github.com/minio/minio-go/v7 v7.0.7 h1:Qld/xb8C1Pwbu0jU46xAceyn9xXKCMW+3XfNbpmTB70=
|
||||
github.com/minio/minio-go/v7 v7.0.7/go.mod h1:pEZBUa+L2m9oECoIA6IcSK8bv/qggtQVLovjeKK5jYc=
|
||||
github.com/minio/sha256-simd v0.1.1 h1:5QHSlgo3nt5yKOJrC7W8w7X+NFl8cMPZm96iu8kKUJU=
|
||||
github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM=
|
||||
github.com/minio/sio v0.2.1/go.mod h1:8b0yPp2avGThviy/+OCJBI6OMpvxoUuiLvE6F1lebhw=
|
||||
github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible h1:aKW/4cBs+yK6gpqU3K/oIwk9Q/XICqd3zOX/UFuvqmk=
|
||||
github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4=
|
||||
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
|
||||
github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
|
||||
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
|
||||
github.com/mitchellh/go-wordwrap v1.0.0 h1:6GlHJ/LTGMrIJbwgdqdl2eEH8o+Exx/0m8ir9Gns0u4=
|
||||
@ -790,6 +809,8 @@ github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6So
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/rootless-containers/rootlesskit v0.10.0 h1:62HHP8s8qYYcolEtAsuo4GU6qau6pWmcQ1Te+TZTFds=
|
||||
github.com/rootless-containers/rootlesskit v0.10.0/go.mod h1:OZQfuRPb+2MA1p+hmjHmSmDRv9SdTzlQ3taNA/0d7XM=
|
||||
github.com/rs/xid v1.2.1 h1:mhH9Nq+C1fY2l1XIpgxIiUOfNpRBYH1kKcr+qfKgjRc=
|
||||
github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ=
|
||||
github.com/rubiojr/go-vhd v0.0.0-20200706105327-02e210299021 h1:if3/24+h9Sq6eDx8UUz1SO9cT9tizyIsATfB7b4D3tc=
|
||||
github.com/rubiojr/go-vhd v0.0.0-20200706105327-02e210299021/go.mod h1:DM5xW0nvfNNm2uytzsvhI3OnX8uzaRAg8UX/CnDqbto=
|
||||
github.com/russross/blackfriday v0.0.0-20170610170232-067529f716f4/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
|
||||
@ -815,7 +836,10 @@ github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I
|
||||
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
|
||||
github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM=
|
||||
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM=
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||
github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
|
||||
github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s=
|
||||
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
|
||||
github.com/soheilhy/cmux v0.1.4 h1:0HKaf1o97UwFjHH9o5XsHUOF+tqmdA7KEzXLpiyaw0E=
|
||||
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
|
||||
@ -1084,6 +1108,8 @@ gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKW
|
||||
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
|
||||
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||
gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/ini.v1 v1.57.0 h1:9unxIsFcTt4I55uWluz+UmL95q4kdJ0buvQ1ZIqVQww=
|
||||
gopkg.in/ini.v1 v1.57.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/mcuadros/go-syslog.v2 v2.2.1/go.mod h1:l5LPIyOOyIdQquNg+oU6Z3524YwrcqEm0aKH+5zpt2U=
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8=
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
|
||||
|
@ -26,7 +26,7 @@ import (
|
||||
"github.com/rancher/k3s/pkg/cli/cmds"
|
||||
"github.com/rancher/k3s/pkg/clientaccess"
|
||||
"github.com/rancher/k3s/pkg/daemons/config"
|
||||
"github.com/rancher/k3s/pkg/daemons/control"
|
||||
"github.com/rancher/k3s/pkg/daemons/control/deps"
|
||||
"github.com/rancher/k3s/pkg/version"
|
||||
"github.com/sirupsen/logrus"
|
||||
"k8s.io/apimachinery/pkg/util/json"
|
||||
@ -375,7 +375,7 @@ func get(envInfo *cmds.Agent, proxy proxy.Proxy) (*config.Node, error) {
|
||||
}
|
||||
|
||||
kubeconfigKubelet := filepath.Join(envInfo.DataDir, "agent", "kubelet.kubeconfig")
|
||||
if err := control.KubeConfig(kubeconfigKubelet, proxy.APIServerURL(), serverCAFile, clientKubeletCert, clientKubeletKey); err != nil {
|
||||
if err := deps.KubeConfig(kubeconfigKubelet, proxy.APIServerURL(), serverCAFile, clientKubeletCert, clientKubeletKey); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -386,7 +386,7 @@ func get(envInfo *cmds.Agent, proxy proxy.Proxy) (*config.Node, error) {
|
||||
}
|
||||
|
||||
kubeconfigKubeproxy := filepath.Join(envInfo.DataDir, "agent", "kubeproxy.kubeconfig")
|
||||
if err := control.KubeConfig(kubeconfigKubeproxy, proxy.APIServerURL(), serverCAFile, clientKubeProxyCert, clientKubeProxyKey); err != nil {
|
||||
if err := deps.KubeConfig(kubeconfigKubeproxy, proxy.APIServerURL(), serverCAFile, clientKubeProxyCert, clientKubeProxyKey); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -397,7 +397,7 @@ func get(envInfo *cmds.Agent, proxy proxy.Proxy) (*config.Node, error) {
|
||||
}
|
||||
|
||||
kubeconfigK3sController := filepath.Join(envInfo.DataDir, "agent", version.Program+"controller.kubeconfig")
|
||||
if err := control.KubeConfig(kubeconfigK3sController, proxy.APIServerURL(), serverCAFile, clientK3sControllerCert, clientK3sControllerKey); err != nil {
|
||||
if err := deps.KubeConfig(kubeconfigK3sController, proxy.APIServerURL(), serverCAFile, clientK3sControllerCert, clientK3sControllerKey); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
@ -34,6 +34,55 @@ func NewEtcdSnapshotCommand(action func(*cli.Context) error) cli.Command {
|
||||
Usage: "(db) Directory to save etcd on-demand snapshot. (default: ${data-dir}/db/snapshots)",
|
||||
Destination: &ServerConfig.EtcdSnapshotDir,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "s3",
|
||||
Usage: "(db) Enable backup to S3",
|
||||
Destination: &ServerConfig.EtcdS3,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "s3-endpoint",
|
||||
Usage: "(db) S3 endpoint url",
|
||||
Destination: &ServerConfig.EtcdS3Endpoint,
|
||||
Value: "s3.amazonaws.com",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "s3-endpoint-ca",
|
||||
Usage: "(db) S3 custom CA cert to connect to S3 endpoint",
|
||||
Destination: &ServerConfig.EtcdS3EndpointCA,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "s3-skip-ssl-verify",
|
||||
Usage: "(db) Disables S3 SSL certificate validation",
|
||||
Destination: &ServerConfig.EtcdS3SkipSSLVerify,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "s3-access-key",
|
||||
Usage: "(db) S3 access key",
|
||||
EnvVar: "AWS_ACCESS_KEY_ID",
|
||||
Destination: &ServerConfig.EtcdS3AccessKey,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "s3-secret-key",
|
||||
Usage: "(db) S3 secret key",
|
||||
EnvVar: "AWS_SECRET_ACCESS_KEY",
|
||||
Destination: &ServerConfig.EtcdS3SecretKey,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "s3-bucket",
|
||||
Usage: "(db) S3 bucket name",
|
||||
Destination: &ServerConfig.EtcdS3BucketName,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "s3-region",
|
||||
Usage: "(db) S3 region / bucket location (optional)",
|
||||
Destination: &ServerConfig.EtcdS3Region,
|
||||
Value: "us-east-1",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "s3-folder",
|
||||
Usage: "(db) S3 folder",
|
||||
Destination: &ServerConfig.EtcdS3Folder,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
@ -69,6 +69,15 @@ type Server struct {
|
||||
EtcdSnapshotDir string
|
||||
EtcdSnapshotCron string
|
||||
EtcdSnapshotRetention int
|
||||
EtcdS3 bool
|
||||
EtcdS3Endpoint string
|
||||
EtcdS3EndpointCA string
|
||||
EtcdS3SkipSSLVerify bool
|
||||
EtcdS3AccessKey string
|
||||
EtcdS3SecretKey string
|
||||
EtcdS3BucketName string
|
||||
EtcdS3Region string
|
||||
EtcdS3Folder string
|
||||
}
|
||||
|
||||
var ServerConfig Server
|
||||
@ -255,6 +264,55 @@ func NewServerCommand(action func(*cli.Context) error) cli.Command {
|
||||
Usage: "(db) Directory to save db snapshots. (Default location: ${data-dir}/db/snapshots)",
|
||||
Destination: &ServerConfig.EtcdSnapshotDir,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "etcd-s3",
|
||||
Usage: "(db) Enable backup to S3",
|
||||
Destination: &ServerConfig.EtcdS3,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "etcd-s3-endpoint",
|
||||
Usage: "(db) S3 endpoint url",
|
||||
Destination: &ServerConfig.EtcdS3Endpoint,
|
||||
Value: "s3.amazonaws.com",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "etcd-s3-endpoint-ca",
|
||||
Usage: "(db) S3 custom CA cert to connect to S3 endpoint",
|
||||
Destination: &ServerConfig.EtcdS3EndpointCA,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "etcd-s3-skip-ssl-verify",
|
||||
Usage: "(db) Disables S3 SSL certificate validation",
|
||||
Destination: &ServerConfig.EtcdS3SkipSSLVerify,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "etcd-s3-access-key",
|
||||
Usage: "(db) S3 access key",
|
||||
EnvVar: "AWS_ACCESS_KEY_ID",
|
||||
Destination: &ServerConfig.EtcdS3AccessKey,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "etcd-s3-secret-key",
|
||||
Usage: "(db) S3 secret key",
|
||||
EnvVar: "AWS_SECRET_ACCESS_KEY",
|
||||
Destination: &ServerConfig.EtcdS3SecretKey,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "etcd-s3-bucket",
|
||||
Usage: "(db) S3 bucket name",
|
||||
Destination: &ServerConfig.EtcdS3BucketName,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "etcd-s3-region",
|
||||
Usage: "(db) S3 region / bucket location (optional)",
|
||||
Destination: &ServerConfig.EtcdS3Region,
|
||||
Value: "us-east-1",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "etcd-s3-folder",
|
||||
Usage: "(db) S3 folder",
|
||||
Destination: &ServerConfig.EtcdS3Folder,
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "default-local-storage-path",
|
||||
Usage: "(storage) Default local storage path for local provisioner storage class",
|
||||
|
@ -37,6 +37,15 @@ func run(app *cli.Context, cfg *cmds.Server) error {
|
||||
serverConfig.ControlConfig.EtcdSnapshotName = cfg.EtcdSnapshotName
|
||||
serverConfig.ControlConfig.EtcdSnapshotDir = cfg.EtcdSnapshotDir
|
||||
serverConfig.ControlConfig.EtcdSnapshotRetention = 0 // disable retention check
|
||||
serverConfig.ControlConfig.EtcdS3 = cfg.EtcdS3
|
||||
serverConfig.ControlConfig.EtcdS3Endpoint = cfg.EtcdS3Endpoint
|
||||
serverConfig.ControlConfig.EtcdS3EndpointCA = cfg.EtcdS3EndpointCA
|
||||
serverConfig.ControlConfig.EtcdS3SkipSSLVerify = cfg.EtcdS3SkipSSLVerify
|
||||
serverConfig.ControlConfig.EtcdS3AccessKey = cfg.EtcdS3AccessKey
|
||||
serverConfig.ControlConfig.EtcdS3SecretKey = cfg.EtcdS3SecretKey
|
||||
serverConfig.ControlConfig.EtcdS3BucketName = cfg.EtcdS3BucketName
|
||||
serverConfig.ControlConfig.EtcdS3Region = cfg.EtcdS3Region
|
||||
serverConfig.ControlConfig.EtcdS3Folder = cfg.EtcdS3Folder
|
||||
serverConfig.ControlConfig.Runtime = &config.ControlRuntime{}
|
||||
serverConfig.ControlConfig.Runtime.ETCDServerCA = filepath.Join(dataDir, "tls", "etcd", "server-ca.crt")
|
||||
serverConfig.ControlConfig.Runtime.ClientETCDCert = filepath.Join(dataDir, "tls", "etcd", "client.crt")
|
||||
|
@ -126,11 +126,30 @@ func run(app *cli.Context, cfg *cmds.Server) error {
|
||||
serverConfig.ControlConfig.EtcdSnapshotRetention = cfg.EtcdSnapshotRetention
|
||||
serverConfig.ControlConfig.EtcdDisableSnapshots = cfg.EtcdDisableSnapshots
|
||||
serverConfig.ControlConfig.EtcdExposeMetrics = cfg.EtcdExposeMetrics
|
||||
serverConfig.ControlConfig.EtcdS3 = cfg.EtcdS3
|
||||
serverConfig.ControlConfig.EtcdS3Endpoint = cfg.EtcdS3Endpoint
|
||||
serverConfig.ControlConfig.EtcdS3EndpointCA = cfg.EtcdS3EndpointCA
|
||||
serverConfig.ControlConfig.EtcdS3SkipSSLVerify = cfg.EtcdS3SkipSSLVerify
|
||||
serverConfig.ControlConfig.EtcdS3AccessKey = cfg.EtcdS3AccessKey
|
||||
serverConfig.ControlConfig.EtcdS3SecretKey = cfg.EtcdS3SecretKey
|
||||
serverConfig.ControlConfig.EtcdS3BucketName = cfg.EtcdS3BucketName
|
||||
serverConfig.ControlConfig.EtcdS3Region = cfg.EtcdS3Region
|
||||
serverConfig.ControlConfig.EtcdS3Folder = cfg.EtcdS3Folder
|
||||
|
||||
if cfg.ClusterResetRestorePath != "" && !cfg.ClusterReset {
|
||||
return errors.New("invalid flag use. --cluster-reset required with --cluster-reset-restore-path")
|
||||
}
|
||||
|
||||
// make sure components are disabled so we only perform a restore
|
||||
// and bail out
|
||||
if cfg.ClusterResetRestorePath != "" && cfg.ClusterReset {
|
||||
serverConfig.ControlConfig.ClusterInit = true
|
||||
serverConfig.ControlConfig.DisableAPIServer = true
|
||||
serverConfig.ControlConfig.DisableControllerManager = true
|
||||
serverConfig.ControlConfig.DisableScheduler = true
|
||||
serverConfig.ControlConfig.DisableCCM = true
|
||||
}
|
||||
|
||||
serverConfig.ControlConfig.ClusterReset = cfg.ClusterReset
|
||||
serverConfig.ControlConfig.ClusterResetRestorePath = cfg.ClusterResetRestorePath
|
||||
|
||||
|
@ -8,6 +8,7 @@ import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@ -48,6 +49,53 @@ func (c *Cluster) testClusterDB(ctx context.Context) (<-chan struct{}, error) {
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// cleanCerts removes existing certificatates previously
|
||||
// generated for use by the cluster.
|
||||
func (c *Cluster) cleanCerts() {
|
||||
certs := []string{filepath.Join(c.config.DataDir, "tls", "client-ca.crt"),
|
||||
filepath.Join(c.config.DataDir, "tls", "client-ca.key"),
|
||||
filepath.Join(c.config.DataDir, "tls", "server-ca.crt"),
|
||||
filepath.Join(c.config.DataDir, "tls", "server-ca.key"),
|
||||
filepath.Join(c.config.DataDir, "tls", "request-header-ca.crt"),
|
||||
filepath.Join(c.config.DataDir, "tls", "request-header-ca.key"),
|
||||
filepath.Join(c.config.DataDir, "tls", "service.key"),
|
||||
filepath.Join(c.config.DataDir, "tls", "client-admin.crt"),
|
||||
filepath.Join(c.config.DataDir, "tls", "client-admin.key"),
|
||||
filepath.Join(c.config.DataDir, "tls", "client-controller.crt"),
|
||||
filepath.Join(c.config.DataDir, "tls", "client-controller.key"),
|
||||
filepath.Join(c.config.DataDir, "tls", "client-cloud-controller.crt"),
|
||||
filepath.Join(c.config.DataDir, "tls", "client-cloud-controller.key"),
|
||||
filepath.Join(c.config.DataDir, "tls", "client-scheduler.crt"),
|
||||
filepath.Join(c.config.DataDir, "tls", "client-scheduler.key"),
|
||||
filepath.Join(c.config.DataDir, "tls", "client-kube-apiserver.crt"),
|
||||
filepath.Join(c.config.DataDir, "tls", "client-kube-apiserver.key"),
|
||||
filepath.Join(c.config.DataDir, "tls", "client-kube-proxy.crt"),
|
||||
filepath.Join(c.config.DataDir, "tls", "client-kube-proxy.key"),
|
||||
filepath.Join(c.config.DataDir, "tls", "client-"+version.Program+"-controller.crt"),
|
||||
filepath.Join(c.config.DataDir, "tls", "client-"+version.Program+"-controller.key"),
|
||||
filepath.Join(c.config.DataDir, "tls", "serving-kube-apiserver.crt"),
|
||||
filepath.Join(c.config.DataDir, "tls", "serving-kube-apiserver.key"),
|
||||
filepath.Join(c.config.DataDir, "tls", "client-kubelet.key"),
|
||||
filepath.Join(c.config.DataDir, "tls", "serving-kubelet.key"),
|
||||
filepath.Join(c.config.DataDir, "tls", "serving-kubelet.key"),
|
||||
filepath.Join(c.config.DataDir, "tls", "client-auth-proxy.key"),
|
||||
filepath.Join(c.config.DataDir, "tls", "etcd", "server-ca.crt"),
|
||||
filepath.Join(c.config.DataDir, "tls", "etcd", "server-ca.key"),
|
||||
filepath.Join(c.config.DataDir, "tls", "etcd", "peer-ca.crt"),
|
||||
filepath.Join(c.config.DataDir, "tls", "etcd", "peer-ca.key"),
|
||||
filepath.Join(c.config.DataDir, "tls", "etcd", "server-client.crt"),
|
||||
filepath.Join(c.config.DataDir, "tls", "etcd", "server-client.key"),
|
||||
filepath.Join(c.config.DataDir, "tls", "etcd", "peer-server-client.crt"),
|
||||
filepath.Join(c.config.DataDir, "tls", "etcd", "peer-server-client.key"),
|
||||
filepath.Join(c.config.DataDir, "tls", "etcd", "client.crt"),
|
||||
filepath.Join(c.config.DataDir, "tls", "etcd", "client.key"),
|
||||
}
|
||||
|
||||
for _, cert := range certs {
|
||||
os.Remove(cert)
|
||||
}
|
||||
}
|
||||
|
||||
// start starts the database, unless a cluster reset has been requested, in which case
|
||||
// it does that instead.
|
||||
func (c *Cluster) start(ctx context.Context) error {
|
||||
@ -64,7 +112,13 @@ func (c *Cluster) start(ctx context.Context) error {
|
||||
} else {
|
||||
return fmt.Errorf("cluster-reset was successfully performed, please remove the cluster-reset flag and start %s normally, if you need to perform another cluster reset, you must first manually delete the %s file", version.Program, resetFile)
|
||||
}
|
||||
return c.managedDB.Reset(ctx)
|
||||
|
||||
rebootstrap := func() error {
|
||||
return c.storageBootstrap(ctx)
|
||||
}
|
||||
if err := c.managedDB.Reset(ctx, rebootstrap, c.cleanCerts); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// removing the reset file and ignore error if the file doesn't exist
|
||||
os.Remove(resetFile)
|
||||
|
@ -16,7 +16,7 @@ var (
|
||||
type Driver interface {
|
||||
IsInitialized(ctx context.Context, config *config.Control) (bool, error)
|
||||
Register(ctx context.Context, config *config.Control, handler http.Handler) (http.Handler, error)
|
||||
Reset(ctx context.Context) error
|
||||
Reset(ctx context.Context, reboostrap func() error, cleanCerts func()) error
|
||||
Start(ctx context.Context, clientAccessInfo *clientaccess.Info) error
|
||||
Test(ctx context.Context) error
|
||||
Restore(ctx context.Context) error
|
||||
|
@ -145,6 +145,15 @@ type Control struct {
|
||||
EtcdSnapshotDir string
|
||||
EtcdSnapshotCron string
|
||||
EtcdSnapshotRetention int
|
||||
EtcdS3 bool
|
||||
EtcdS3Endpoint string
|
||||
EtcdS3EndpointCA string
|
||||
EtcdS3SkipSSLVerify bool
|
||||
EtcdS3AccessKey string
|
||||
EtcdS3SecretKey string
|
||||
EtcdS3BucketName string
|
||||
EtcdS3Region string
|
||||
EtcdS3Folder string
|
||||
|
||||
BindAddress string
|
||||
SANs []string
|
||||
|
650
pkg/daemons/control/deps/deps.go
Normal file
650
pkg/daemons/control/deps/deps.go
Normal file
@ -0,0 +1,650 @@
|
||||
package deps
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
cryptorand "crypto/rand"
|
||||
"crypto/x509"
|
||||
b64 "encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"text/template"
|
||||
"time"
|
||||
|
||||
certutil "github.com/rancher/dynamiclistener/cert"
|
||||
"github.com/rancher/k3s/pkg/clientaccess"
|
||||
"github.com/rancher/k3s/pkg/daemons/config"
|
||||
"github.com/rancher/k3s/pkg/passwd"
|
||||
"github.com/rancher/k3s/pkg/token"
|
||||
"github.com/rancher/k3s/pkg/version"
|
||||
"github.com/sirupsen/logrus"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
apiserverconfigv1 "k8s.io/apiserver/pkg/apis/config/v1"
|
||||
"k8s.io/kubernetes/pkg/controlplane"
|
||||
)
|
||||
|
||||
const (
|
||||
ipsecTokenSize = 48
|
||||
aescbcKeySize = 32
|
||||
|
||||
RequestHeaderCN = "system:auth-proxy"
|
||||
)
|
||||
|
||||
var (
|
||||
kubeconfigTemplate = template.Must(template.New("kubeconfig").Parse(`apiVersion: v1
|
||||
clusters:
|
||||
- cluster:
|
||||
server: {{.URL}}
|
||||
certificate-authority: {{.CACert}}
|
||||
name: local
|
||||
contexts:
|
||||
- context:
|
||||
cluster: local
|
||||
namespace: default
|
||||
user: user
|
||||
name: Default
|
||||
current-context: Default
|
||||
kind: Config
|
||||
preferences: {}
|
||||
users:
|
||||
- name: user
|
||||
user:
|
||||
client-certificate: {{.ClientCert}}
|
||||
client-key: {{.ClientKey}}
|
||||
`))
|
||||
)
|
||||
|
||||
func migratePassword(p *passwd.Passwd) error {
|
||||
server, _ := p.Pass("server")
|
||||
node, _ := p.Pass("node")
|
||||
if server == "" && node != "" {
|
||||
return p.EnsureUser("server", version.Program+":server", node)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func KubeConfig(dest, url, caCert, clientCert, clientKey string) error {
|
||||
data := struct {
|
||||
URL string
|
||||
CACert string
|
||||
ClientCert string
|
||||
ClientKey string
|
||||
}{
|
||||
URL: url,
|
||||
CACert: caCert,
|
||||
ClientCert: clientCert,
|
||||
ClientKey: clientKey,
|
||||
}
|
||||
|
||||
output, err := os.Create(dest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer output.Close()
|
||||
|
||||
return kubeconfigTemplate.Execute(output, &data)
|
||||
}
|
||||
|
||||
// GenServerDeps is responsible for generating the cluster dependencies
|
||||
// needed to successfully bootstrap a cluster.
|
||||
func GenServerDeps(config *config.Control, runtime *config.ControlRuntime) error {
|
||||
if err := genCerts(config, runtime); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := genServiceAccount(runtime); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := genUsers(config, runtime); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := genEncryptedNetworkInfo(config, runtime); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := genEncryptionConfig(config, runtime); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return readTokens(runtime)
|
||||
}
|
||||
|
||||
func readTokens(runtime *config.ControlRuntime) error {
|
||||
tokens, err := passwd.Read(runtime.PasswdFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if nodeToken, ok := tokens.Pass("node"); ok {
|
||||
runtime.AgentToken = "node:" + nodeToken
|
||||
}
|
||||
if serverToken, ok := tokens.Pass("server"); ok {
|
||||
runtime.ServerToken = "server:" + serverToken
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getNodePass(config *config.Control, serverPass string) string {
|
||||
if config.AgentToken == "" {
|
||||
if _, passwd, ok := clientaccess.ParseUsernamePassword(serverPass); ok {
|
||||
return passwd
|
||||
}
|
||||
return serverPass
|
||||
}
|
||||
return config.AgentToken
|
||||
}
|
||||
|
||||
func genUsers(config *config.Control, runtime *config.ControlRuntime) error {
|
||||
passwd, err := passwd.Read(runtime.PasswdFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := migratePassword(passwd); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
serverPass, err := getServerPass(passwd, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
nodePass := getNodePass(config, serverPass)
|
||||
|
||||
if err := passwd.EnsureUser("node", version.Program+":agent", nodePass); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := passwd.EnsureUser("server", version.Program+":server", serverPass); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return passwd.Write(runtime.PasswdFile)
|
||||
}
|
||||
|
||||
func genEncryptedNetworkInfo(controlConfig *config.Control, runtime *config.ControlRuntime) error {
|
||||
if s, err := os.Stat(runtime.IPSECKey); err == nil && s.Size() > 0 {
|
||||
psk, err := ioutil.ReadFile(runtime.IPSECKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
controlConfig.IPSECPSK = strings.TrimSpace(string(psk))
|
||||
return nil
|
||||
}
|
||||
|
||||
psk, err := token.Random(ipsecTokenSize)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
controlConfig.IPSECPSK = psk
|
||||
if err := ioutil.WriteFile(runtime.IPSECKey, []byte(psk+"\n"), 0600); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getServerPass(passwd *passwd.Passwd, config *config.Control) (string, error) {
|
||||
var (
|
||||
err error
|
||||
)
|
||||
|
||||
serverPass := config.Token
|
||||
if serverPass == "" {
|
||||
serverPass, _ = passwd.Pass("server")
|
||||
}
|
||||
if serverPass == "" {
|
||||
serverPass, err = token.Random(16)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
||||
return serverPass, nil
|
||||
}
|
||||
|
||||
func genCerts(config *config.Control, runtime *config.ControlRuntime) error {
|
||||
if err := genClientCerts(config, runtime); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := genServerCerts(config, runtime); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := genRequestHeaderCerts(config, runtime); err != nil {
|
||||
return err
|
||||
}
|
||||
return genETCDCerts(config, runtime)
|
||||
}
|
||||
|
||||
func getSigningCertFactory(regen bool, altNames *certutil.AltNames, extKeyUsage []x509.ExtKeyUsage, caCertFile, caKeyFile string) signedCertFactory {
|
||||
return func(commonName string, organization []string, certFile, keyFile string) (bool, error) {
|
||||
return createClientCertKey(regen, commonName, organization, altNames, extKeyUsage, caCertFile, caKeyFile, certFile, keyFile)
|
||||
}
|
||||
}
|
||||
|
||||
func genClientCerts(config *config.Control, runtime *config.ControlRuntime) error {
|
||||
regen, err := createSigningCertKey(version.Program+"-client", runtime.ClientCA, runtime.ClientCAKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
factory := getSigningCertFactory(regen, nil, []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, runtime.ClientCA, runtime.ClientCAKey)
|
||||
|
||||
var certGen bool
|
||||
apiEndpoint := fmt.Sprintf("https://127.0.0.1:%d", config.APIServerPort)
|
||||
|
||||
certGen, err = factory("system:admin", []string{"system:masters"}, runtime.ClientAdminCert, runtime.ClientAdminKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if certGen {
|
||||
if err := KubeConfig(runtime.KubeConfigAdmin, apiEndpoint, runtime.ServerCA, runtime.ClientAdminCert, runtime.ClientAdminKey); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
certGen, err = factory("system:kube-controller-manager", nil, runtime.ClientControllerCert, runtime.ClientControllerKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if certGen {
|
||||
if err := KubeConfig(runtime.KubeConfigController, apiEndpoint, runtime.ServerCA, runtime.ClientControllerCert, runtime.ClientControllerKey); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
certGen, err = factory("system:kube-scheduler", nil, runtime.ClientSchedulerCert, runtime.ClientSchedulerKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if certGen {
|
||||
if err := KubeConfig(runtime.KubeConfigScheduler, apiEndpoint, runtime.ServerCA, runtime.ClientSchedulerCert, runtime.ClientSchedulerKey); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
certGen, err = factory("kube-apiserver", nil, runtime.ClientKubeAPICert, runtime.ClientKubeAPIKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if certGen {
|
||||
if err := KubeConfig(runtime.KubeConfigAPIServer, apiEndpoint, runtime.ServerCA, runtime.ClientKubeAPICert, runtime.ClientKubeAPIKey); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if _, err = factory("system:kube-proxy", nil, runtime.ClientKubeProxyCert, runtime.ClientKubeProxyKey); err != nil {
|
||||
return err
|
||||
}
|
||||
// This user (system:k3s-controller by default) must be bound to a role in rolebindings.yaml or the downstream equivalent
|
||||
if _, err = factory("system:"+version.Program+"-controller", nil, runtime.ClientK3sControllerCert, runtime.ClientK3sControllerKey); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, _, err := certutil.LoadOrGenerateKeyFile(runtime.ClientKubeletKey, regen); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
certGen, err = factory("cloud-controller-manager", nil, runtime.ClientCloudControllerCert, runtime.ClientCloudControllerKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if certGen {
|
||||
if err := KubeConfig(runtime.KubeConfigCloudController, apiEndpoint, runtime.ServerCA, runtime.ClientCloudControllerCert, runtime.ClientCloudControllerKey); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func genServerCerts(config *config.Control, runtime *config.ControlRuntime) error {
|
||||
regen, err := createServerSigningCertKey(config, runtime)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, apiServerServiceIP, err := controlplane.ServiceIPRange(*config.ServiceIPRange)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
altNames := &certutil.AltNames{
|
||||
DNSNames: []string{"kubernetes.default.svc", "kubernetes.default", "kubernetes", "localhost"},
|
||||
IPs: []net.IP{apiServerServiceIP},
|
||||
}
|
||||
|
||||
addSANs(altNames, config.SANs)
|
||||
|
||||
if _, err := createClientCertKey(regen, "kube-apiserver", nil,
|
||||
altNames, []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
|
||||
runtime.ServerCA, runtime.ServerCAKey,
|
||||
runtime.ServingKubeAPICert, runtime.ServingKubeAPIKey); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, _, err := certutil.LoadOrGenerateKeyFile(runtime.ServingKubeletKey, regen); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func genETCDCerts(config *config.Control, runtime *config.ControlRuntime) error {
|
||||
regen, err := createSigningCertKey("etcd-server", runtime.ETCDServerCA, runtime.ETCDServerCAKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
altNames := &certutil.AltNames{
|
||||
DNSNames: []string{"localhost"},
|
||||
}
|
||||
addSANs(altNames, config.SANs)
|
||||
|
||||
if _, err := createClientCertKey(regen, "etcd-server", nil,
|
||||
altNames, []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth},
|
||||
runtime.ETCDServerCA, runtime.ETCDServerCAKey,
|
||||
runtime.ServerETCDCert, runtime.ServerETCDKey); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := createClientCertKey(regen, "etcd-client", nil,
|
||||
nil, []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth},
|
||||
runtime.ETCDServerCA, runtime.ETCDServerCAKey,
|
||||
runtime.ClientETCDCert, runtime.ClientETCDKey); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
regen, err = createSigningCertKey("etcd-peer", runtime.ETCDPeerCA, runtime.ETCDPeerCAKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := createClientCertKey(regen, "etcd-peer", nil,
|
||||
altNames, []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth},
|
||||
runtime.ETCDPeerCA, runtime.ETCDPeerCAKey,
|
||||
runtime.PeerServerClientETCDCert, runtime.PeerServerClientETCDKey); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func genRequestHeaderCerts(config *config.Control, runtime *config.ControlRuntime) error {
|
||||
regen, err := createSigningCertKey(version.Program+"-request-header", runtime.RequestHeaderCA, runtime.RequestHeaderCAKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := createClientCertKey(regen, RequestHeaderCN, nil,
|
||||
nil, []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth},
|
||||
runtime.RequestHeaderCA, runtime.RequestHeaderCAKey,
|
||||
runtime.ClientAuthProxyCert, runtime.ClientAuthProxyKey); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type signedCertFactory = func(commonName string, organization []string, certFile, keyFile string) (bool, error)
|
||||
|
||||
func createServerSigningCertKey(config *config.Control, runtime *config.ControlRuntime) (bool, error) {
|
||||
TokenCA := filepath.Join(config.DataDir, "tls", "token-ca.crt")
|
||||
TokenCAKey := filepath.Join(config.DataDir, "tls", "token-ca.key")
|
||||
|
||||
if exists(TokenCA, TokenCAKey) && !exists(runtime.ServerCA) && !exists(runtime.ServerCAKey) {
|
||||
logrus.Infof("Upgrading token-ca files to server-ca")
|
||||
if err := os.Link(TokenCA, runtime.ServerCA); err != nil {
|
||||
return false, err
|
||||
}
|
||||
if err := os.Link(TokenCAKey, runtime.ServerCAKey); err != nil {
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
return createSigningCertKey(version.Program+"-server", runtime.ServerCA, runtime.ServerCAKey)
|
||||
}
|
||||
|
||||
func addSANs(altNames *certutil.AltNames, sans []string) {
|
||||
for _, san := range sans {
|
||||
ip := net.ParseIP(san)
|
||||
if ip == nil {
|
||||
altNames.DNSNames = append(altNames.DNSNames, san)
|
||||
} else {
|
||||
altNames.IPs = append(altNames.IPs, ip)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func sansChanged(certFile string, sans *certutil.AltNames) bool {
|
||||
if sans == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
certBytes, err := ioutil.ReadFile(certFile)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
certificates, err := certutil.ParseCertsPEM(certBytes)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
if len(certificates) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
if !sets.NewString(certificates[0].DNSNames...).HasAll(sans.DNSNames...) {
|
||||
return true
|
||||
}
|
||||
|
||||
ips := sets.NewString()
|
||||
for _, ip := range certificates[0].IPAddresses {
|
||||
ips.Insert(ip.String())
|
||||
}
|
||||
|
||||
for _, ip := range sans.IPs {
|
||||
if !ips.Has(ip.String()) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func createClientCertKey(regen bool, commonName string, organization []string, altNames *certutil.AltNames, extKeyUsage []x509.ExtKeyUsage, caCertFile, caKeyFile, certFile, keyFile string) (bool, error) {
|
||||
caBytes, err := ioutil.ReadFile(caCertFile)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
pool := x509.NewCertPool()
|
||||
pool.AppendCertsFromPEM(caBytes)
|
||||
|
||||
// check for certificate expiration
|
||||
if !regen {
|
||||
regen = expired(certFile, pool)
|
||||
}
|
||||
|
||||
if !regen {
|
||||
regen = sansChanged(certFile, altNames)
|
||||
}
|
||||
|
||||
if !regen {
|
||||
if exists(certFile, keyFile) {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
caKeyBytes, err := ioutil.ReadFile(caKeyFile)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
caKey, err := certutil.ParsePrivateKeyPEM(caKeyBytes)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
caCert, err := certutil.ParseCertsPEM(caBytes)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
keyBytes, _, err := certutil.LoadOrGenerateKeyFile(keyFile, regen)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
key, err := certutil.ParsePrivateKeyPEM(keyBytes)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
cfg := certutil.Config{
|
||||
CommonName: commonName,
|
||||
Organization: organization,
|
||||
Usages: extKeyUsage,
|
||||
}
|
||||
if altNames != nil {
|
||||
cfg.AltNames = *altNames
|
||||
}
|
||||
cert, err := certutil.NewSignedCert(cfg, key.(crypto.Signer), caCert[0], caKey.(crypto.Signer))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return true, certutil.WriteCert(certFile, append(certutil.EncodeCertPEM(cert), certutil.EncodeCertPEM(caCert[0])...))
|
||||
}
|
||||
|
||||
func exists(files ...string) bool {
|
||||
for _, file := range files {
|
||||
if _, err := os.Stat(file); err != nil {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func genServiceAccount(runtime *config.ControlRuntime) error {
|
||||
_, keyErr := os.Stat(runtime.ServiceKey)
|
||||
if keyErr == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
key, err := certutil.NewPrivateKey()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return certutil.WriteKey(runtime.ServiceKey, certutil.EncodePrivateKeyPEM(key))
|
||||
}
|
||||
|
||||
func createSigningCertKey(prefix, certFile, keyFile string) (bool, error) {
|
||||
if exists(certFile, keyFile) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
caKeyBytes, _, err := certutil.LoadOrGenerateKeyFile(keyFile, false)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
caKey, err := certutil.ParsePrivateKeyPEM(caKeyBytes)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
cfg := certutil.Config{
|
||||
CommonName: fmt.Sprintf("%s-ca@%d", prefix, time.Now().Unix()),
|
||||
}
|
||||
|
||||
cert, err := certutil.NewSelfSignedCACert(cfg, caKey.(crypto.Signer))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if err := certutil.WriteCert(certFile, certutil.EncodeCertPEM(cert)); err != nil {
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func expired(certFile string, pool *x509.CertPool) bool {
|
||||
certBytes, err := ioutil.ReadFile(certFile)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
certificates, err := certutil.ParseCertsPEM(certBytes)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
_, err = certificates[0].Verify(x509.VerifyOptions{
|
||||
Roots: pool,
|
||||
KeyUsages: []x509.ExtKeyUsage{
|
||||
x509.ExtKeyUsageAny,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return true
|
||||
}
|
||||
return certutil.IsCertExpired(certificates[0], config.CertificateRenewDays)
|
||||
}
|
||||
|
||||
func genEncryptionConfig(controlConfig *config.Control, runtime *config.ControlRuntime) error {
|
||||
if !controlConfig.EncryptSecrets {
|
||||
return nil
|
||||
}
|
||||
if s, err := os.Stat(runtime.EncryptionConfig); err == nil && s.Size() > 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
aescbcKey := make([]byte, aescbcKeySize, aescbcKeySize)
|
||||
_, err := cryptorand.Read(aescbcKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
encodedKey := b64.StdEncoding.EncodeToString(aescbcKey)
|
||||
|
||||
encConfig := apiserverconfigv1.EncryptionConfiguration{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "EncryptionConfiguration",
|
||||
APIVersion: "apiserver.config.k8s.io/v1",
|
||||
},
|
||||
Resources: []apiserverconfigv1.ResourceConfiguration{
|
||||
{
|
||||
Resources: []string{"secrets"},
|
||||
Providers: []apiserverconfigv1.ProviderConfiguration{
|
||||
{
|
||||
AESCBC: &apiserverconfigv1.AESConfiguration{
|
||||
Keys: []apiserverconfigv1.Key{
|
||||
{
|
||||
Name: "aescbckey",
|
||||
Secret: encodedKey,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Identity: &apiserverconfigv1.IdentityConfiguration{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
jsonfile, err := json.Marshal(encConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return ioutil.WriteFile(runtime.EncryptionConfig, jsonfile, 0600)
|
||||
}
|
@ -2,13 +2,6 @@ package control
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto"
|
||||
cryptorand "crypto/rand"
|
||||
"crypto/x509"
|
||||
b64 "encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"net"
|
||||
"net/http"
|
||||
@ -16,24 +9,17 @@ import (
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"text/template"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
certutil "github.com/rancher/dynamiclistener/cert"
|
||||
"github.com/rancher/k3s/pkg/clientaccess"
|
||||
"github.com/rancher/k3s/pkg/cluster"
|
||||
"github.com/rancher/k3s/pkg/daemons/config"
|
||||
"github.com/rancher/k3s/pkg/daemons/control/deps"
|
||||
"github.com/rancher/k3s/pkg/daemons/executor"
|
||||
"github.com/rancher/k3s/pkg/passwd"
|
||||
"github.com/rancher/k3s/pkg/token"
|
||||
"github.com/rancher/k3s/pkg/version"
|
||||
"github.com/rancher/wrangler-api/pkg/generated/controllers/rbac"
|
||||
"github.com/sirupsen/logrus"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
apiserverconfigv1 "k8s.io/apiserver/pkg/apis/config/v1"
|
||||
"k8s.io/apiserver/pkg/authentication/authenticator"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
@ -41,7 +27,6 @@ import (
|
||||
ccmapp "k8s.io/cloud-provider/app"
|
||||
ccmopt "k8s.io/cloud-provider/options"
|
||||
app2 "k8s.io/controller-manager/app"
|
||||
"k8s.io/kubernetes/pkg/controlplane"
|
||||
"k8s.io/kubernetes/pkg/kubeapiserver/authorizer/modes"
|
||||
"k8s.io/kubernetes/pkg/proxy/util"
|
||||
|
||||
@ -51,36 +36,7 @@ import (
|
||||
_ "k8s.io/component-base/metrics/prometheus/restclient"
|
||||
)
|
||||
|
||||
var (
|
||||
localhostIP = net.ParseIP("127.0.0.1")
|
||||
requestHeaderCN = "system:auth-proxy"
|
||||
kubeconfigTemplate = template.Must(template.New("kubeconfig").Parse(`apiVersion: v1
|
||||
clusters:
|
||||
- cluster:
|
||||
server: {{.URL}}
|
||||
certificate-authority: {{.CACert}}
|
||||
name: local
|
||||
contexts:
|
||||
- context:
|
||||
cluster: local
|
||||
namespace: default
|
||||
user: user
|
||||
name: Default
|
||||
current-context: Default
|
||||
kind: Config
|
||||
preferences: {}
|
||||
users:
|
||||
- name: user
|
||||
user:
|
||||
client-certificate: {{.ClientCert}}
|
||||
client-key: {{.ClientKey}}
|
||||
`))
|
||||
)
|
||||
|
||||
const (
|
||||
ipsecTokenSize = 48
|
||||
aescbcKeySize = 32
|
||||
)
|
||||
var localhostIP = net.ParseIP("127.0.0.1")
|
||||
|
||||
func Server(ctx context.Context, cfg *config.Control) error {
|
||||
rand.Seed(time.Now().UTC().UnixNano())
|
||||
@ -217,7 +173,7 @@ func apiServer(ctx context.Context, cfg *config.Control, runtime *config.Control
|
||||
argsMap["kubelet-client-certificate"] = runtime.ClientKubeAPICert
|
||||
argsMap["kubelet-client-key"] = runtime.ClientKubeAPIKey
|
||||
argsMap["requestheader-client-ca-file"] = runtime.RequestHeaderCA
|
||||
argsMap["requestheader-allowed-names"] = requestHeaderCN
|
||||
argsMap["requestheader-allowed-names"] = deps.RequestHeaderCN
|
||||
argsMap["proxy-client-cert-file"] = runtime.ClientAuthProxyCert
|
||||
argsMap["proxy-client-key-file"] = runtime.ClientAuthProxyKey
|
||||
argsMap["requestheader-extra-headers-prefix"] = "X-Remote-Extra-"
|
||||
@ -234,6 +190,7 @@ func apiServer(ctx context.Context, cfg *config.Control, runtime *config.Control
|
||||
args := config.GetArgsList(argsMap, cfg.ExtraAPIArgs)
|
||||
|
||||
logrus.Infof("Running kube-apiserver %s", config.ArgString(args))
|
||||
|
||||
return executor.APIServer(ctx, runtime.ETCDReady, args)
|
||||
}
|
||||
|
||||
@ -349,27 +306,7 @@ func prepare(ctx context.Context, config *config.Control, runtime *config.Contro
|
||||
return err
|
||||
}
|
||||
|
||||
if err := genCerts(config, runtime); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := genServiceAccount(runtime); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := genUsers(config, runtime); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := genEncryptedNetworkInfo(config, runtime); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := genEncryptionConfig(config, runtime); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := readTokens(runtime); err != nil {
|
||||
if err := deps.GenServerDeps(config, runtime); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -382,467 +319,6 @@ func prepare(ctx context.Context, config *config.Control, runtime *config.Contro
|
||||
return nil
|
||||
}
|
||||
|
||||
func readTokens(runtime *config.ControlRuntime) error {
|
||||
tokens, err := passwd.Read(runtime.PasswdFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if nodeToken, ok := tokens.Pass("node"); ok {
|
||||
runtime.AgentToken = "node:" + nodeToken
|
||||
}
|
||||
if serverToken, ok := tokens.Pass("server"); ok {
|
||||
runtime.ServerToken = "server:" + serverToken
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func genEncryptedNetworkInfo(controlConfig *config.Control, runtime *config.ControlRuntime) error {
|
||||
if s, err := os.Stat(runtime.IPSECKey); err == nil && s.Size() > 0 {
|
||||
psk, err := ioutil.ReadFile(runtime.IPSECKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
controlConfig.IPSECPSK = strings.TrimSpace(string(psk))
|
||||
return nil
|
||||
}
|
||||
|
||||
psk, err := token.Random(ipsecTokenSize)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
controlConfig.IPSECPSK = psk
|
||||
if err := ioutil.WriteFile(runtime.IPSECKey, []byte(psk+"\n"), 0600); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func migratePassword(p *passwd.Passwd) error {
|
||||
server, _ := p.Pass("server")
|
||||
node, _ := p.Pass("node")
|
||||
if server == "" && node != "" {
|
||||
return p.EnsureUser("server", version.Program+":server", node)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getServerPass(passwd *passwd.Passwd, config *config.Control) (string, error) {
|
||||
var (
|
||||
err error
|
||||
)
|
||||
|
||||
serverPass := config.Token
|
||||
if serverPass == "" {
|
||||
serverPass, _ = passwd.Pass("server")
|
||||
}
|
||||
if serverPass == "" {
|
||||
serverPass, err = token.Random(16)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
||||
return serverPass, nil
|
||||
}
|
||||
|
||||
func getNodePass(config *config.Control, serverPass string) string {
|
||||
if config.AgentToken == "" {
|
||||
if _, passwd, ok := clientaccess.ParseUsernamePassword(serverPass); ok {
|
||||
return passwd
|
||||
}
|
||||
return serverPass
|
||||
}
|
||||
return config.AgentToken
|
||||
}
|
||||
|
||||
func genUsers(config *config.Control, runtime *config.ControlRuntime) error {
|
||||
passwd, err := passwd.Read(runtime.PasswdFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := migratePassword(passwd); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
serverPass, err := getServerPass(passwd, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
nodePass := getNodePass(config, serverPass)
|
||||
|
||||
if err := passwd.EnsureUser("node", version.Program+":agent", nodePass); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := passwd.EnsureUser("server", version.Program+":server", serverPass); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return passwd.Write(runtime.PasswdFile)
|
||||
}
|
||||
|
||||
func genCerts(config *config.Control, runtime *config.ControlRuntime) error {
|
||||
if err := genClientCerts(config, runtime); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := genServerCerts(config, runtime); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := genRequestHeaderCerts(config, runtime); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := genETCDCerts(config, runtime); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type signedCertFactory = func(commonName string, organization []string, certFile, keyFile string) (bool, error)
|
||||
|
||||
func getSigningCertFactory(regen bool, altNames *certutil.AltNames, extKeyUsage []x509.ExtKeyUsage, caCertFile, caKeyFile string) signedCertFactory {
|
||||
return func(commonName string, organization []string, certFile, keyFile string) (bool, error) {
|
||||
return createClientCertKey(regen, commonName, organization, altNames, extKeyUsage, caCertFile, caKeyFile, certFile, keyFile)
|
||||
}
|
||||
}
|
||||
|
||||
func genClientCerts(config *config.Control, runtime *config.ControlRuntime) error {
|
||||
regen, err := createSigningCertKey(version.Program+"-client", runtime.ClientCA, runtime.ClientCAKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
factory := getSigningCertFactory(regen, nil, []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, runtime.ClientCA, runtime.ClientCAKey)
|
||||
|
||||
var certGen bool
|
||||
apiEndpoint := fmt.Sprintf("https://127.0.0.1:%d", config.APIServerPort)
|
||||
|
||||
certGen, err = factory("system:admin", []string{"system:masters"}, runtime.ClientAdminCert, runtime.ClientAdminKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if certGen {
|
||||
if err := KubeConfig(runtime.KubeConfigAdmin, apiEndpoint, runtime.ServerCA, runtime.ClientAdminCert, runtime.ClientAdminKey); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
certGen, err = factory("system:kube-controller-manager", nil, runtime.ClientControllerCert, runtime.ClientControllerKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if certGen {
|
||||
if err := KubeConfig(runtime.KubeConfigController, apiEndpoint, runtime.ServerCA, runtime.ClientControllerCert, runtime.ClientControllerKey); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
certGen, err = factory("system:kube-scheduler", nil, runtime.ClientSchedulerCert, runtime.ClientSchedulerKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if certGen {
|
||||
if err := KubeConfig(runtime.KubeConfigScheduler, apiEndpoint, runtime.ServerCA, runtime.ClientSchedulerCert, runtime.ClientSchedulerKey); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
certGen, err = factory("kube-apiserver", nil, runtime.ClientKubeAPICert, runtime.ClientKubeAPIKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if certGen {
|
||||
if err := KubeConfig(runtime.KubeConfigAPIServer, apiEndpoint, runtime.ServerCA, runtime.ClientKubeAPICert, runtime.ClientKubeAPIKey); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if _, err = factory("system:kube-proxy", nil, runtime.ClientKubeProxyCert, runtime.ClientKubeProxyKey); err != nil {
|
||||
return err
|
||||
}
|
||||
// This user (system:k3s-controller by default) must be bound to a role in rolebindings.yaml or the downstream equivalent
|
||||
if _, err = factory("system:"+version.Program+"-controller", nil, runtime.ClientK3sControllerCert, runtime.ClientK3sControllerKey); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, _, err := certutil.LoadOrGenerateKeyFile(runtime.ClientKubeletKey, regen); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
certGen, err = factory("cloud-controller-manager", nil, runtime.ClientCloudControllerCert, runtime.ClientCloudControllerKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if certGen {
|
||||
if err := KubeConfig(runtime.KubeConfigCloudController, apiEndpoint, runtime.ServerCA, runtime.ClientCloudControllerCert, runtime.ClientCloudControllerKey); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func createServerSigningCertKey(config *config.Control, runtime *config.ControlRuntime) (bool, error) {
|
||||
TokenCA := filepath.Join(config.DataDir, "tls", "token-ca.crt")
|
||||
TokenCAKey := filepath.Join(config.DataDir, "tls", "token-ca.key")
|
||||
|
||||
if exists(TokenCA, TokenCAKey) && !exists(runtime.ServerCA) && !exists(runtime.ServerCAKey) {
|
||||
logrus.Infof("Upgrading token-ca files to server-ca")
|
||||
if err := os.Link(TokenCA, runtime.ServerCA); err != nil {
|
||||
return false, err
|
||||
}
|
||||
if err := os.Link(TokenCAKey, runtime.ServerCAKey); err != nil {
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
return createSigningCertKey(version.Program+"-server", runtime.ServerCA, runtime.ServerCAKey)
|
||||
}
|
||||
|
||||
func addSANs(altNames *certutil.AltNames, sans []string) {
|
||||
for _, san := range sans {
|
||||
ip := net.ParseIP(san)
|
||||
if ip == nil {
|
||||
altNames.DNSNames = append(altNames.DNSNames, san)
|
||||
} else {
|
||||
altNames.IPs = append(altNames.IPs, ip)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func genServerCerts(config *config.Control, runtime *config.ControlRuntime) error {
|
||||
regen, err := createServerSigningCertKey(config, runtime)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, apiServerServiceIP, err := controlplane.ServiceIPRange(*config.ServiceIPRange)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
altNames := &certutil.AltNames{
|
||||
DNSNames: []string{"kubernetes.default.svc", "kubernetes.default", "kubernetes", "localhost"},
|
||||
IPs: []net.IP{apiServerServiceIP},
|
||||
}
|
||||
|
||||
addSANs(altNames, config.SANs)
|
||||
|
||||
if _, err := createClientCertKey(regen, "kube-apiserver", nil,
|
||||
altNames, []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
|
||||
runtime.ServerCA, runtime.ServerCAKey,
|
||||
runtime.ServingKubeAPICert, runtime.ServingKubeAPIKey); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, _, err := certutil.LoadOrGenerateKeyFile(runtime.ServingKubeletKey, regen); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func genETCDCerts(config *config.Control, runtime *config.ControlRuntime) error {
|
||||
regen, err := createSigningCertKey("etcd-server", runtime.ETCDServerCA, runtime.ETCDServerCAKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
altNames := &certutil.AltNames{
|
||||
DNSNames: []string{"localhost"},
|
||||
}
|
||||
addSANs(altNames, config.SANs)
|
||||
|
||||
if _, err := createClientCertKey(regen, "etcd-server", nil,
|
||||
altNames, []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth},
|
||||
runtime.ETCDServerCA, runtime.ETCDServerCAKey,
|
||||
runtime.ServerETCDCert, runtime.ServerETCDKey); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := createClientCertKey(regen, "etcd-client", nil,
|
||||
nil, []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth},
|
||||
runtime.ETCDServerCA, runtime.ETCDServerCAKey,
|
||||
runtime.ClientETCDCert, runtime.ClientETCDKey); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
regen, err = createSigningCertKey("etcd-peer", runtime.ETCDPeerCA, runtime.ETCDPeerCAKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := createClientCertKey(regen, "etcd-peer", nil,
|
||||
altNames, []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth},
|
||||
runtime.ETCDPeerCA, runtime.ETCDPeerCAKey,
|
||||
runtime.PeerServerClientETCDCert, runtime.PeerServerClientETCDKey); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func genRequestHeaderCerts(config *config.Control, runtime *config.ControlRuntime) error {
|
||||
regen, err := createSigningCertKey(version.Program+"-request-header", runtime.RequestHeaderCA, runtime.RequestHeaderCAKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := createClientCertKey(regen, requestHeaderCN, nil,
|
||||
nil, []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth},
|
||||
runtime.RequestHeaderCA, runtime.RequestHeaderCAKey,
|
||||
runtime.ClientAuthProxyCert, runtime.ClientAuthProxyKey); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func createClientCertKey(regen bool, commonName string, organization []string, altNames *certutil.AltNames, extKeyUsage []x509.ExtKeyUsage, caCertFile, caKeyFile, certFile, keyFile string) (bool, error) {
|
||||
caBytes, err := ioutil.ReadFile(caCertFile)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
pool := x509.NewCertPool()
|
||||
pool.AppendCertsFromPEM(caBytes)
|
||||
|
||||
// check for certificate expiration
|
||||
if !regen {
|
||||
regen = expired(certFile, pool)
|
||||
}
|
||||
|
||||
if !regen {
|
||||
regen = sansChanged(certFile, altNames)
|
||||
}
|
||||
|
||||
if !regen {
|
||||
if exists(certFile, keyFile) {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
caKeyBytes, err := ioutil.ReadFile(caKeyFile)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
caKey, err := certutil.ParsePrivateKeyPEM(caKeyBytes)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
caCert, err := certutil.ParseCertsPEM(caBytes)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
keyBytes, _, err := certutil.LoadOrGenerateKeyFile(keyFile, regen)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
key, err := certutil.ParsePrivateKeyPEM(keyBytes)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
cfg := certutil.Config{
|
||||
CommonName: commonName,
|
||||
Organization: organization,
|
||||
Usages: extKeyUsage,
|
||||
}
|
||||
if altNames != nil {
|
||||
cfg.AltNames = *altNames
|
||||
}
|
||||
cert, err := certutil.NewSignedCert(cfg, key.(crypto.Signer), caCert[0], caKey.(crypto.Signer))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return true, certutil.WriteCert(certFile, append(certutil.EncodeCertPEM(cert), certutil.EncodeCertPEM(caCert[0])...))
|
||||
}
|
||||
|
||||
func exists(files ...string) bool {
|
||||
for _, file := range files {
|
||||
if _, err := os.Stat(file); err != nil {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func genServiceAccount(runtime *config.ControlRuntime) error {
|
||||
_, keyErr := os.Stat(runtime.ServiceKey)
|
||||
if keyErr == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
key, err := certutil.NewPrivateKey()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return certutil.WriteKey(runtime.ServiceKey, certutil.EncodePrivateKeyPEM(key))
|
||||
}
|
||||
|
||||
func createSigningCertKey(prefix, certFile, keyFile string) (bool, error) {
|
||||
if exists(certFile, keyFile) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
caKeyBytes, _, err := certutil.LoadOrGenerateKeyFile(keyFile, false)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
caKey, err := certutil.ParsePrivateKeyPEM(caKeyBytes)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
cfg := certutil.Config{
|
||||
CommonName: fmt.Sprintf("%s-ca@%d", prefix, time.Now().Unix()),
|
||||
}
|
||||
|
||||
cert, err := certutil.NewSelfSignedCACert(cfg, caKey.(crypto.Signer))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if err := certutil.WriteCert(certFile, certutil.EncodeCertPEM(cert)); err != nil {
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func KubeConfig(dest, url, caCert, clientCert, clientKey string) error {
|
||||
data := struct {
|
||||
URL string
|
||||
CACert string
|
||||
ClientCert string
|
||||
ClientKey string
|
||||
}{
|
||||
URL: url,
|
||||
CACert: caCert,
|
||||
ClientCert: clientCert,
|
||||
ClientKey: clientKey,
|
||||
}
|
||||
|
||||
output, err := os.Create(dest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer output.Close()
|
||||
|
||||
return kubeconfigTemplate.Execute(output, &data)
|
||||
}
|
||||
|
||||
func setupStorageBackend(argsMap map[string]string, cfg *config.Control) {
|
||||
argsMap["storage-backend"] = "etcd3"
|
||||
// specify the endpoints
|
||||
@ -861,64 +337,6 @@ func setupStorageBackend(argsMap map[string]string, cfg *config.Control) {
|
||||
}
|
||||
}
|
||||
|
||||
func sansChanged(certFile string, sans *certutil.AltNames) bool {
|
||||
if sans == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
certBytes, err := ioutil.ReadFile(certFile)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
certificates, err := certutil.ParseCertsPEM(certBytes)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
if len(certificates) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
if !sets.NewString(certificates[0].DNSNames...).HasAll(sans.DNSNames...) {
|
||||
return true
|
||||
}
|
||||
|
||||
ips := sets.NewString()
|
||||
for _, ip := range certificates[0].IPAddresses {
|
||||
ips.Insert(ip.String())
|
||||
}
|
||||
|
||||
for _, ip := range sans.IPs {
|
||||
if !ips.Has(ip.String()) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func expired(certFile string, pool *x509.CertPool) bool {
|
||||
certBytes, err := ioutil.ReadFile(certFile)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
certificates, err := certutil.ParseCertsPEM(certBytes)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
_, err = certificates[0].Verify(x509.VerifyOptions{
|
||||
Roots: pool,
|
||||
KeyUsages: []x509.ExtKeyUsage{
|
||||
x509.ExtKeyUsageAny,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return true
|
||||
}
|
||||
return certutil.IsCertExpired(certificates[0], config.CertificateRenewDays)
|
||||
}
|
||||
|
||||
func cloudControllerManager(ctx context.Context, cfg *config.Control, runtime *config.ControlRuntime) {
|
||||
// Reference: https://github.com/kubernetes/kubernetes/pull/93764
|
||||
// The above-linked change made the in-tree cloud-controller-manager command much more flexible
|
||||
@ -1061,51 +479,3 @@ func promise(f func() error) <-chan error {
|
||||
}()
|
||||
return c
|
||||
}
|
||||
|
||||
func genEncryptionConfig(controlConfig *config.Control, runtime *config.ControlRuntime) error {
|
||||
if !controlConfig.EncryptSecrets {
|
||||
return nil
|
||||
}
|
||||
if s, err := os.Stat(runtime.EncryptionConfig); err == nil && s.Size() > 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
aescbcKey := make([]byte, aescbcKeySize, aescbcKeySize)
|
||||
_, err := cryptorand.Read(aescbcKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
encodedKey := b64.StdEncoding.EncodeToString(aescbcKey)
|
||||
|
||||
encConfig := apiserverconfigv1.EncryptionConfiguration{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "EncryptionConfiguration",
|
||||
APIVersion: "apiserver.config.k8s.io/v1",
|
||||
},
|
||||
Resources: []apiserverconfigv1.ResourceConfiguration{
|
||||
{
|
||||
Resources: []string{"secrets"},
|
||||
Providers: []apiserverconfigv1.ProviderConfiguration{
|
||||
{
|
||||
AESCBC: &apiserverconfigv1.AESConfiguration{
|
||||
Keys: []apiserverconfigv1.Key{
|
||||
{
|
||||
Name: "aescbckey",
|
||||
Secret: encodedKey,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Identity: &apiserverconfigv1.IdentityConfiguration{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
jsonfile, err := json.Marshal(encConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return ioutil.WriteFile(runtime.EncryptionConfig, jsonfile, 0600)
|
||||
}
|
||||
|
@ -22,6 +22,7 @@ import (
|
||||
certutil "github.com/rancher/dynamiclistener/cert"
|
||||
"github.com/rancher/k3s/pkg/clientaccess"
|
||||
"github.com/rancher/k3s/pkg/daemons/config"
|
||||
"github.com/rancher/k3s/pkg/daemons/control/deps"
|
||||
"github.com/rancher/k3s/pkg/daemons/executor"
|
||||
"github.com/rancher/k3s/pkg/version"
|
||||
"github.com/robfig/cron/v3"
|
||||
@ -41,6 +42,7 @@ type ETCD struct {
|
||||
runtime *config.ControlRuntime
|
||||
address string
|
||||
cron *cron.Cron
|
||||
s3 *s3
|
||||
}
|
||||
|
||||
type learnerProgress struct {
|
||||
@ -135,12 +137,12 @@ func walDir(config *config.Control) string {
|
||||
return filepath.Join(etcdDBDir(config), "member", "wal")
|
||||
}
|
||||
|
||||
// nameFile returns the path to etcdDBDir/name
|
||||
// nameFile returns the path to etcdDBDir/name.
|
||||
func nameFile(config *config.Control) string {
|
||||
return filepath.Join(etcdDBDir(config), "name")
|
||||
}
|
||||
|
||||
// ResetFile returns the path to etcdDBDir/reset-flag
|
||||
// ResetFile returns the path to etcdDBDir/reset-flag.
|
||||
func ResetFile(config *config.Control) string {
|
||||
return filepath.Join(config.DataDir, "db", "reset-flag")
|
||||
}
|
||||
@ -159,7 +161,7 @@ func (e *ETCD) IsInitialized(ctx context.Context, config *config.Control) (bool,
|
||||
}
|
||||
|
||||
// Reset resets an etcd node
|
||||
func (e *ETCD) Reset(ctx context.Context) error {
|
||||
func (e *ETCD) Reset(ctx context.Context, rebootstrap func() error, cleanCerts func()) error {
|
||||
// Wait for etcd to come up as a new single-node cluster, then exit
|
||||
go func() {
|
||||
t := time.NewTicker(5 * time.Second)
|
||||
@ -171,6 +173,18 @@ func (e *ETCD) Reset(ctx context.Context) error {
|
||||
continue
|
||||
}
|
||||
|
||||
// storageBootstrap() - runtime structure has been written with correct certificate data
|
||||
if err := rebootstrap(); err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
|
||||
cleanCerts()
|
||||
|
||||
// call functions to rewrite them from daemons/control/server.go (prepare())
|
||||
if err := deps.GenServerDeps(e.config, e.runtime); err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
|
||||
if len(members.Members) == 1 && members.Members[0].Name == e.name {
|
||||
logrus.Infof("Etcd is running, restart without --cluster-reset flag now. Backup and delete ${datadir}/server/db on each peer etcd server and rejoin the nodes")
|
||||
os.Exit(0)
|
||||
@ -181,6 +195,21 @@ func (e *ETCD) Reset(ctx context.Context) error {
|
||||
|
||||
// If asked to restore from a snapshot, do so
|
||||
if e.config.ClusterResetRestorePath != "" {
|
||||
if e.config.EtcdS3 {
|
||||
if e.s3 == nil {
|
||||
s3, err := newS3(ctx, e.config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
e.s3 = s3
|
||||
}
|
||||
logrus.Infof("Retrieving etcd snapshot %s from S3", e.config.ClusterResetRestorePath)
|
||||
if err := e.s3.download(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
logrus.Infof("S3 download complete for %s", e.config.ClusterResetRestorePath)
|
||||
}
|
||||
|
||||
info, err := os.Stat(e.config.ClusterResetRestorePath)
|
||||
if os.IsNotExist(err) {
|
||||
return fmt.Errorf("etcd: snapshot path does not exist: %s", e.config.ClusterResetRestorePath)
|
||||
@ -798,6 +827,29 @@ func (e *ETCD) Snapshot(ctx context.Context, config *config.Control) error {
|
||||
return errors.Wrap(err, "failed to save snapshot")
|
||||
}
|
||||
|
||||
if e.config.EtcdS3 {
|
||||
logrus.Infof("Saving etcd snapshot %s to S3", snapshotName)
|
||||
if e.s3 == nil {
|
||||
s3, err := newS3(ctx, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
e.s3 = s3
|
||||
}
|
||||
if err := e.s3.upload(ctx, snapshotPath); err != nil {
|
||||
return err
|
||||
}
|
||||
logrus.Infof("S3 upload complete for %s", snapshotName)
|
||||
|
||||
if e.config.EtcdSnapshotRetention >= 1 {
|
||||
if err := e.s3.snapshotRetention(ctx); err != nil {
|
||||
return errors.Wrap(err, "failed to apply s3 snapshot retention")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// check if we need to perform a retention check
|
||||
if e.config.EtcdSnapshotRetention >= 1 {
|
||||
if err := snapshotRetention(e.config.EtcdSnapshotRetention, snapshotDir); err != nil {
|
||||
@ -870,7 +922,15 @@ func snapshotRetention(retention int, snapshotDir string) error {
|
||||
sort.Slice(snapshotFiles, func(i, j int) bool {
|
||||
return snapshotFiles[i].Name() < snapshotFiles[j].Name()
|
||||
})
|
||||
return os.Remove(filepath.Join(snapshotDir, snapshotFiles[0].Name()))
|
||||
|
||||
delCount := len(snapshotFiles) - retention
|
||||
for _, df := range snapshotFiles[:delCount] {
|
||||
if err := os.Remove(filepath.Join(snapshotDir, df.Name())); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// backupDirWithRetention will move the dir to a backup dir
|
||||
|
232
pkg/etcd/s3.go
Normal file
232
pkg/etcd/s3.go
Normal file
@ -0,0 +1,232 @@
|
||||
package etcd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"encoding/base64"
|
||||
"encoding/pem"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/minio/minio-go/v7"
|
||||
"github.com/minio/minio-go/v7/pkg/credentials"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rancher/k3s/pkg/daemons/config"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// s3 maintains state for S3 functionality.
|
||||
type s3 struct {
|
||||
config *config.Control
|
||||
client *minio.Client
|
||||
}
|
||||
|
||||
// newS3 creates a new value of type s3 pointer with a
|
||||
// copy of the config.Control pointer and initializes
|
||||
// a new Minio client.
|
||||
func newS3(ctx context.Context, config *config.Control) (*s3, error) {
|
||||
tr := http.DefaultTransport
|
||||
if config.EtcdS3EndpointCA != "" {
|
||||
trCA, err := setTransportCA(tr, config.EtcdS3EndpointCA, config.EtcdS3SkipSSLVerify)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tr = trCA
|
||||
}
|
||||
|
||||
var creds *credentials.Credentials
|
||||
if len(config.EtcdS3AccessKey) == 0 && len(config.EtcdS3SecretKey) == 0 {
|
||||
creds = credentials.NewIAM("") // for running on ec2 instance
|
||||
} else {
|
||||
creds = credentials.NewStaticV4(config.EtcdS3AccessKey, config.EtcdS3SecretKey, "")
|
||||
}
|
||||
|
||||
opt := minio.Options{
|
||||
Creds: creds,
|
||||
Secure: true,
|
||||
Region: config.EtcdS3Region,
|
||||
Transport: tr,
|
||||
BucketLookup: bucketLookupType(config.EtcdS3Endpoint),
|
||||
}
|
||||
c, err := minio.New(config.EtcdS3Endpoint, &opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
logrus.Infof("Checking if S3 bucket %s exists", config.EtcdS3BucketName)
|
||||
exists, err := c.BucketExists(ctx, config.EtcdS3BucketName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !exists {
|
||||
return nil, fmt.Errorf("bucket: %s does not exist", config.EtcdS3BucketName)
|
||||
}
|
||||
logrus.Infof("S3 bucket %s exists", config.EtcdS3BucketName)
|
||||
|
||||
return &s3{
|
||||
config: config,
|
||||
client: c,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// upload uploads the given snapshot to the configured S3
|
||||
// compatible backend.
|
||||
func (s *s3) upload(ctx context.Context, snapshot string) error {
|
||||
basename := filepath.Base(snapshot)
|
||||
var snapshotFileName string
|
||||
if s.config.EtcdS3Folder != "" {
|
||||
snapshotFileName = filepath.Join(s.config.EtcdS3Folder, basename)
|
||||
} else {
|
||||
snapshotFileName = basename
|
||||
}
|
||||
|
||||
opts := minio.PutObjectOptions{
|
||||
ContentType: "application/zip",
|
||||
NumThreads: 2,
|
||||
}
|
||||
if _, err := s.client.FPutObject(ctx, s.config.EtcdS3BucketName, snapshotFileName, snapshot, opts); err != nil {
|
||||
logrus.Errorf("Error received in attempt to upload snapshot to S3: %s", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// download downloads the given snapshot from the configured S3
|
||||
// compatible backend.
|
||||
func (s *s3) download(ctx context.Context) error {
|
||||
var remotePath string
|
||||
if s.config.EtcdS3Folder != "" {
|
||||
remotePath = filepath.Join(s.config.EtcdS3Folder, s.config.ClusterResetRestorePath)
|
||||
} else {
|
||||
remotePath = s.config.ClusterResetRestorePath
|
||||
}
|
||||
|
||||
logrus.Debugf("retrieving snapshot: %s", remotePath)
|
||||
r, err := s.client.GetObject(ctx, s.config.EtcdS3BucketName, remotePath, minio.GetObjectOptions{})
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
defer r.Close()
|
||||
|
||||
snapshotDir, err := snapshotDir(s.config)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get the snapshot dir")
|
||||
}
|
||||
|
||||
fullSnapshotPath := filepath.Join(snapshotDir, s.config.ClusterResetRestorePath)
|
||||
sf, err := os.Create(fullSnapshotPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer sf.Close()
|
||||
|
||||
stat, err := r.Stat()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := io.CopyN(sf, r, stat.Size); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.config.ClusterResetRestorePath = fullSnapshotPath
|
||||
|
||||
return os.Chmod(fullSnapshotPath, 0600)
|
||||
}
|
||||
|
||||
// snapshotRetention deletes the given snapshot from the configured S3
|
||||
// compatible backend.
|
||||
func (s *s3) snapshotRetention(ctx context.Context) error {
|
||||
var snapshotFiles []minio.ObjectInfo
|
||||
|
||||
var prefix string
|
||||
if s.config.EtcdS3Folder != "" {
|
||||
prefix = filepath.Join(s.config.EtcdS3Folder, snapshotPrefix)
|
||||
} else {
|
||||
prefix = snapshotPrefix
|
||||
}
|
||||
|
||||
loo := minio.ListObjectsOptions{
|
||||
Recursive: true,
|
||||
Prefix: prefix,
|
||||
}
|
||||
for info := range s.client.ListObjects(ctx, s.config.EtcdS3BucketName, loo) {
|
||||
if info.Err != nil {
|
||||
return info.Err
|
||||
}
|
||||
snapshotFiles = append(snapshotFiles, info)
|
||||
}
|
||||
|
||||
if len(snapshotFiles) <= s.config.EtcdSnapshotRetention {
|
||||
return nil
|
||||
}
|
||||
|
||||
sort.Slice(snapshotFiles, func(i, j int) bool {
|
||||
return snapshotFiles[i].Key < snapshotFiles[j].Key
|
||||
})
|
||||
|
||||
delCount := len(snapshotFiles) - s.config.EtcdSnapshotRetention
|
||||
for _, df := range snapshotFiles[:delCount] {
|
||||
logrus.Debugf("Removing snapshot: %s", df.Key)
|
||||
if err := s.client.RemoveObject(ctx, s.config.EtcdS3BucketName, df.Key, minio.RemoveObjectOptions{}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func readS3EndpointCA(endpointCA string) ([]byte, error) {
|
||||
ca, err := base64.StdEncoding.DecodeString(endpointCA)
|
||||
if err != nil {
|
||||
return ioutil.ReadFile(endpointCA)
|
||||
}
|
||||
return ca, nil
|
||||
}
|
||||
|
||||
func setTransportCA(tr http.RoundTripper, endpointCA string, insecureSkipVerify bool) (http.RoundTripper, error) {
|
||||
ca, err := readS3EndpointCA(endpointCA)
|
||||
if err != nil {
|
||||
return tr, err
|
||||
}
|
||||
if !isValidCertificate(ca) {
|
||||
return tr, errors.New("endpoint-ca is not a valid x509 certificate")
|
||||
}
|
||||
|
||||
certPool := x509.NewCertPool()
|
||||
certPool.AppendCertsFromPEM(ca)
|
||||
|
||||
tr.(*http.Transport).TLSClientConfig = &tls.Config{
|
||||
RootCAs: certPool,
|
||||
InsecureSkipVerify: insecureSkipVerify,
|
||||
}
|
||||
|
||||
return tr, nil
|
||||
}
|
||||
|
||||
// isValidCertificate checks to see if the given
|
||||
// byte slice is a valid x509 certificate.
|
||||
func isValidCertificate(c []byte) bool {
|
||||
p, _ := pem.Decode(c)
|
||||
if p == nil {
|
||||
return false
|
||||
}
|
||||
if _, err := x509.ParseCertificates(p.Bytes); err != nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func bucketLookupType(endpoint string) minio.BucketLookupType {
|
||||
if strings.Contains(endpoint, "aliyun") { // backwards compt with RKE1
|
||||
return minio.BucketLookupDNS
|
||||
}
|
||||
return minio.BucketLookupAuto
|
||||
}
|
24
vendor/github.com/klauspost/cpuid/.gitignore
generated
vendored
Normal file
24
vendor/github.com/klauspost/cpuid/.gitignore
generated
vendored
Normal file
@ -0,0 +1,24 @@
|
||||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
||||
*.test
|
||||
*.prof
|
46
vendor/github.com/klauspost/cpuid/.travis.yml
generated
vendored
Normal file
46
vendor/github.com/klauspost/cpuid/.travis.yml
generated
vendored
Normal file
@ -0,0 +1,46 @@
|
||||
language: go
|
||||
|
||||
os:
|
||||
- linux
|
||||
- osx
|
||||
- windows
|
||||
|
||||
arch:
|
||||
- amd64
|
||||
- arm64
|
||||
|
||||
go:
|
||||
- 1.12.x
|
||||
- 1.13.x
|
||||
- 1.14.x
|
||||
- master
|
||||
|
||||
script:
|
||||
- go vet ./...
|
||||
- go test -race ./...
|
||||
- go test -tags=noasm ./...
|
||||
|
||||
stages:
|
||||
- gofmt
|
||||
- test
|
||||
|
||||
matrix:
|
||||
allow_failures:
|
||||
- go: 'master'
|
||||
fast_finish: true
|
||||
include:
|
||||
- stage: gofmt
|
||||
go: 1.14.x
|
||||
os: linux
|
||||
arch: amd64
|
||||
script:
|
||||
- diff <(gofmt -d .) <(printf "")
|
||||
- diff <(gofmt -d ./private) <(printf "")
|
||||
- go install github.com/klauspost/asmfmt/cmd/asmfmt
|
||||
- diff <(asmfmt -d .) <(printf "")
|
||||
- stage: i386
|
||||
go: 1.14.x
|
||||
os: linux
|
||||
arch: amd64
|
||||
script:
|
||||
- GOOS=linux GOARCH=386 go test .
|
35
vendor/github.com/klauspost/cpuid/CONTRIBUTING.txt
generated
vendored
Normal file
35
vendor/github.com/klauspost/cpuid/CONTRIBUTING.txt
generated
vendored
Normal file
@ -0,0 +1,35 @@
|
||||
Developer Certificate of Origin
|
||||
Version 1.1
|
||||
|
||||
Copyright (C) 2015- Klaus Post & Contributors.
|
||||
Email: klauspost@gmail.com
|
||||
|
||||
Everyone is permitted to copy and distribute verbatim copies of this
|
||||
license document, but changing it is not allowed.
|
||||
|
||||
|
||||
Developer's Certificate of Origin 1.1
|
||||
|
||||
By making a contribution to this project, I certify that:
|
||||
|
||||
(a) The contribution was created in whole or in part by me and I
|
||||
have the right to submit it under the open source license
|
||||
indicated in the file; or
|
||||
|
||||
(b) The contribution is based upon previous work that, to the best
|
||||
of my knowledge, is covered under an appropriate open source
|
||||
license and I have the right under that license to submit that
|
||||
work with modifications, whether created in whole or in part
|
||||
by me, under the same open source license (unless I am
|
||||
permitted to submit under a different license), as indicated
|
||||
in the file; or
|
||||
|
||||
(c) The contribution was provided directly to me by some other
|
||||
person who certified (a), (b) or (c) and I have not modified
|
||||
it.
|
||||
|
||||
(d) I understand and agree that this project and the contribution
|
||||
are public and that a record of the contribution (including all
|
||||
personal information I submit with it, including my sign-off) is
|
||||
maintained indefinitely and may be redistributed consistent with
|
||||
this project or the open source license(s) involved.
|
22
vendor/github.com/klauspost/cpuid/LICENSE
generated
vendored
Normal file
22
vendor/github.com/klauspost/cpuid/LICENSE
generated
vendored
Normal file
@ -0,0 +1,22 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2015 Klaus Post
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
|
191
vendor/github.com/klauspost/cpuid/README.md
generated
vendored
Normal file
191
vendor/github.com/klauspost/cpuid/README.md
generated
vendored
Normal file
@ -0,0 +1,191 @@
|
||||
# cpuid
|
||||
Package cpuid provides information about the CPU running the current program.
|
||||
|
||||
CPU features are detected on startup, and kept for fast access through the life of the application.
|
||||
Currently x86 / x64 (AMD64/i386) and ARM (ARM64) is supported, and no external C (cgo) code is used, which should make the library very easy to use.
|
||||
|
||||
You can access the CPU information by accessing the shared CPU variable of the cpuid library.
|
||||
|
||||
Package home: https://github.com/klauspost/cpuid
|
||||
|
||||
[![GoDoc][1]][2] [![Build Status][3]][4]
|
||||
|
||||
[1]: https://godoc.org/github.com/klauspost/cpuid?status.svg
|
||||
[2]: https://godoc.org/github.com/klauspost/cpuid
|
||||
[3]: https://travis-ci.org/klauspost/cpuid.svg?branch=master
|
||||
[4]: https://travis-ci.org/klauspost/cpuid
|
||||
|
||||
# features
|
||||
|
||||
## x86 CPU Instructions
|
||||
* **CMOV** (i686 CMOV)
|
||||
* **NX** (NX (No-Execute) bit)
|
||||
* **AMD3DNOW** (AMD 3DNOW)
|
||||
* **AMD3DNOWEXT** (AMD 3DNowExt)
|
||||
* **MMX** (standard MMX)
|
||||
* **MMXEXT** (SSE integer functions or AMD MMX ext)
|
||||
* **SSE** (SSE functions)
|
||||
* **SSE2** (P4 SSE functions)
|
||||
* **SSE3** (Prescott SSE3 functions)
|
||||
* **SSSE3** (Conroe SSSE3 functions)
|
||||
* **SSE4** (Penryn SSE4.1 functions)
|
||||
* **SSE4A** (AMD Barcelona microarchitecture SSE4a instructions)
|
||||
* **SSE42** (Nehalem SSE4.2 functions)
|
||||
* **AVX** (AVX functions)
|
||||
* **AVX2** (AVX2 functions)
|
||||
* **FMA3** (Intel FMA 3)
|
||||
* **FMA4** (Bulldozer FMA4 functions)
|
||||
* **XOP** (Bulldozer XOP functions)
|
||||
* **F16C** (Half-precision floating-point conversion)
|
||||
* **BMI1** (Bit Manipulation Instruction Set 1)
|
||||
* **BMI2** (Bit Manipulation Instruction Set 2)
|
||||
* **TBM** (AMD Trailing Bit Manipulation)
|
||||
* **LZCNT** (LZCNT instruction)
|
||||
* **POPCNT** (POPCNT instruction)
|
||||
* **AESNI** (Advanced Encryption Standard New Instructions)
|
||||
* **CLMUL** (Carry-less Multiplication)
|
||||
* **HTT** (Hyperthreading (enabled))
|
||||
* **HLE** (Hardware Lock Elision)
|
||||
* **RTM** (Restricted Transactional Memory)
|
||||
* **RDRAND** (RDRAND instruction is available)
|
||||
* **RDSEED** (RDSEED instruction is available)
|
||||
* **ADX** (Intel ADX (Multi-Precision Add-Carry Instruction Extensions))
|
||||
* **SHA** (Intel SHA Extensions)
|
||||
* **AVX512F** (AVX-512 Foundation)
|
||||
* **AVX512DQ** (AVX-512 Doubleword and Quadword Instructions)
|
||||
* **AVX512IFMA** (AVX-512 Integer Fused Multiply-Add Instructions)
|
||||
* **AVX512PF** (AVX-512 Prefetch Instructions)
|
||||
* **AVX512ER** (AVX-512 Exponential and Reciprocal Instructions)
|
||||
* **AVX512CD** (AVX-512 Conflict Detection Instructions)
|
||||
* **AVX512BW** (AVX-512 Byte and Word Instructions)
|
||||
* **AVX512VL** (AVX-512 Vector Length Extensions)
|
||||
* **AVX512VBMI** (AVX-512 Vector Bit Manipulation Instructions)
|
||||
* **AVX512VBMI2** (AVX-512 Vector Bit Manipulation Instructions, Version 2)
|
||||
* **AVX512VNNI** (AVX-512 Vector Neural Network Instructions)
|
||||
* **AVX512VPOPCNTDQ** (AVX-512 Vector Population Count Doubleword and Quadword)
|
||||
* **GFNI** (Galois Field New Instructions)
|
||||
* **VAES** (Vector AES)
|
||||
* **AVX512BITALG** (AVX-512 Bit Algorithms)
|
||||
* **VPCLMULQDQ** (Carry-Less Multiplication Quadword)
|
||||
* **AVX512BF16** (AVX-512 BFLOAT16 Instructions)
|
||||
* **AVX512VP2INTERSECT** (AVX-512 Intersect for D/Q)
|
||||
* **MPX** (Intel MPX (Memory Protection Extensions))
|
||||
* **ERMS** (Enhanced REP MOVSB/STOSB)
|
||||
* **RDTSCP** (RDTSCP Instruction)
|
||||
* **CX16** (CMPXCHG16B Instruction)
|
||||
* **SGX** (Software Guard Extensions, with activation details)
|
||||
* **VMX** (Virtual Machine Extensions)
|
||||
|
||||
## Performance
|
||||
* **RDTSCP()** Returns current cycle count. Can be used for benchmarking.
|
||||
* **SSE2SLOW** (SSE2 is supported, but usually not faster)
|
||||
* **SSE3SLOW** (SSE3 is supported, but usually not faster)
|
||||
* **ATOM** (Atom processor, some SSSE3 instructions are slower)
|
||||
* **Cache line** (Probable size of a cache line).
|
||||
* **L1, L2, L3 Cache size** on newer Intel/AMD CPUs.
|
||||
|
||||
## ARM CPU features
|
||||
|
||||
# ARM FEATURE DETECTION DISABLED!
|
||||
|
||||
See [#52](https://github.com/klauspost/cpuid/issues/52).
|
||||
|
||||
Currently only `arm64` platforms are implemented.
|
||||
|
||||
* **FP** Single-precision and double-precision floating point
|
||||
* **ASIMD** Advanced SIMD
|
||||
* **EVTSTRM** Generic timer
|
||||
* **AES** AES instructions
|
||||
* **PMULL** Polynomial Multiply instructions (PMULL/PMULL2)
|
||||
* **SHA1** SHA-1 instructions (SHA1C, etc)
|
||||
* **SHA2** SHA-2 instructions (SHA256H, etc)
|
||||
* **CRC32** CRC32/CRC32C instructions
|
||||
* **ATOMICS** Large System Extensions (LSE)
|
||||
* **FPHP** Half-precision floating point
|
||||
* **ASIMDHP** Advanced SIMD half-precision floating point
|
||||
* **ARMCPUID** Some CPU ID registers readable at user-level
|
||||
* **ASIMDRDM** Rounding Double Multiply Accumulate/Subtract (SQRDMLAH/SQRDMLSH)
|
||||
* **JSCVT** Javascript-style double->int convert (FJCVTZS)
|
||||
* **FCMA** Floating point complex number addition and multiplication
|
||||
* **LRCPC** Weaker release consistency (LDAPR, etc)
|
||||
* **DCPOP** Data cache clean to Point of Persistence (DC CVAP)
|
||||
* **SHA3** SHA-3 instructions (EOR3, RAXI, XAR, BCAX)
|
||||
* **SM3** SM3 instructions
|
||||
* **SM4** SM4 instructions
|
||||
* **ASIMDDP** SIMD Dot Product
|
||||
* **SHA512** SHA512 instructions
|
||||
* **SVE** Scalable Vector Extension
|
||||
* **GPA** Generic Pointer Authentication
|
||||
|
||||
## Cpu Vendor/VM
|
||||
* **Intel**
|
||||
* **AMD**
|
||||
* **VIA**
|
||||
* **Transmeta**
|
||||
* **NSC**
|
||||
* **KVM** (Kernel-based Virtual Machine)
|
||||
* **MSVM** (Microsoft Hyper-V or Windows Virtual PC)
|
||||
* **VMware**
|
||||
* **XenHVM**
|
||||
* **Bhyve**
|
||||
* **Hygon**
|
||||
|
||||
# installing
|
||||
|
||||
```go get github.com/klauspost/cpuid```
|
||||
|
||||
# example
|
||||
|
||||
```Go
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/klauspost/cpuid"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// Print basic CPU information:
|
||||
fmt.Println("Name:", cpuid.CPU.BrandName)
|
||||
fmt.Println("PhysicalCores:", cpuid.CPU.PhysicalCores)
|
||||
fmt.Println("ThreadsPerCore:", cpuid.CPU.ThreadsPerCore)
|
||||
fmt.Println("LogicalCores:", cpuid.CPU.LogicalCores)
|
||||
fmt.Println("Family", cpuid.CPU.Family, "Model:", cpuid.CPU.Model)
|
||||
fmt.Println("Features:", cpuid.CPU.Features)
|
||||
fmt.Println("Cacheline bytes:", cpuid.CPU.CacheLine)
|
||||
fmt.Println("L1 Data Cache:", cpuid.CPU.Cache.L1D, "bytes")
|
||||
fmt.Println("L1 Instruction Cache:", cpuid.CPU.Cache.L1D, "bytes")
|
||||
fmt.Println("L2 Cache:", cpuid.CPU.Cache.L2, "bytes")
|
||||
fmt.Println("L3 Cache:", cpuid.CPU.Cache.L3, "bytes")
|
||||
|
||||
// Test if we have a specific feature:
|
||||
if cpuid.CPU.SSE() {
|
||||
fmt.Println("We have Streaming SIMD Extensions")
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Sample output:
|
||||
```
|
||||
>go run main.go
|
||||
Name: Intel(R) Core(TM) i5-2540M CPU @ 2.60GHz
|
||||
PhysicalCores: 2
|
||||
ThreadsPerCore: 2
|
||||
LogicalCores: 4
|
||||
Family 6 Model: 42
|
||||
Features: CMOV,MMX,MMXEXT,SSE,SSE2,SSE3,SSSE3,SSE4.1,SSE4.2,AVX,AESNI,CLMUL
|
||||
Cacheline bytes: 64
|
||||
We have Streaming SIMD Extensions
|
||||
```
|
||||
|
||||
# private package
|
||||
|
||||
In the "private" folder you can find an autogenerated version of the library you can include in your own packages.
|
||||
|
||||
For this purpose all exports are removed, and functions and constants are lowercased.
|
||||
|
||||
This is not a recommended way of using the library, but provided for convenience, if it is difficult for you to use external packages.
|
||||
|
||||
# license
|
||||
|
||||
This code is published under an MIT license. See LICENSE file for more information.
|
1504
vendor/github.com/klauspost/cpuid/cpuid.go
generated
vendored
Normal file
1504
vendor/github.com/klauspost/cpuid/cpuid.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
42
vendor/github.com/klauspost/cpuid/cpuid_386.s
generated
vendored
Normal file
42
vendor/github.com/klauspost/cpuid/cpuid_386.s
generated
vendored
Normal file
@ -0,0 +1,42 @@
|
||||
// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file.
|
||||
|
||||
//+build 386,!gccgo,!noasm,!appengine
|
||||
|
||||
// func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32)
|
||||
TEXT ·asmCpuid(SB), 7, $0
|
||||
XORL CX, CX
|
||||
MOVL op+0(FP), AX
|
||||
CPUID
|
||||
MOVL AX, eax+4(FP)
|
||||
MOVL BX, ebx+8(FP)
|
||||
MOVL CX, ecx+12(FP)
|
||||
MOVL DX, edx+16(FP)
|
||||
RET
|
||||
|
||||
// func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32)
|
||||
TEXT ·asmCpuidex(SB), 7, $0
|
||||
MOVL op+0(FP), AX
|
||||
MOVL op2+4(FP), CX
|
||||
CPUID
|
||||
MOVL AX, eax+8(FP)
|
||||
MOVL BX, ebx+12(FP)
|
||||
MOVL CX, ecx+16(FP)
|
||||
MOVL DX, edx+20(FP)
|
||||
RET
|
||||
|
||||
// func xgetbv(index uint32) (eax, edx uint32)
|
||||
TEXT ·asmXgetbv(SB), 7, $0
|
||||
MOVL index+0(FP), CX
|
||||
BYTE $0x0f; BYTE $0x01; BYTE $0xd0 // XGETBV
|
||||
MOVL AX, eax+4(FP)
|
||||
MOVL DX, edx+8(FP)
|
||||
RET
|
||||
|
||||
// func asmRdtscpAsm() (eax, ebx, ecx, edx uint32)
|
||||
TEXT ·asmRdtscpAsm(SB), 7, $0
|
||||
BYTE $0x0F; BYTE $0x01; BYTE $0xF9 // RDTSCP
|
||||
MOVL AX, eax+0(FP)
|
||||
MOVL BX, ebx+4(FP)
|
||||
MOVL CX, ecx+8(FP)
|
||||
MOVL DX, edx+12(FP)
|
||||
RET
|
42
vendor/github.com/klauspost/cpuid/cpuid_amd64.s
generated
vendored
Normal file
42
vendor/github.com/klauspost/cpuid/cpuid_amd64.s
generated
vendored
Normal file
@ -0,0 +1,42 @@
|
||||
// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file.
|
||||
|
||||
//+build amd64,!gccgo,!noasm,!appengine
|
||||
|
||||
// func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32)
|
||||
TEXT ·asmCpuid(SB), 7, $0
|
||||
XORQ CX, CX
|
||||
MOVL op+0(FP), AX
|
||||
CPUID
|
||||
MOVL AX, eax+8(FP)
|
||||
MOVL BX, ebx+12(FP)
|
||||
MOVL CX, ecx+16(FP)
|
||||
MOVL DX, edx+20(FP)
|
||||
RET
|
||||
|
||||
// func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32)
|
||||
TEXT ·asmCpuidex(SB), 7, $0
|
||||
MOVL op+0(FP), AX
|
||||
MOVL op2+4(FP), CX
|
||||
CPUID
|
||||
MOVL AX, eax+8(FP)
|
||||
MOVL BX, ebx+12(FP)
|
||||
MOVL CX, ecx+16(FP)
|
||||
MOVL DX, edx+20(FP)
|
||||
RET
|
||||
|
||||
// func asmXgetbv(index uint32) (eax, edx uint32)
|
||||
TEXT ·asmXgetbv(SB), 7, $0
|
||||
MOVL index+0(FP), CX
|
||||
BYTE $0x0f; BYTE $0x01; BYTE $0xd0 // XGETBV
|
||||
MOVL AX, eax+8(FP)
|
||||
MOVL DX, edx+12(FP)
|
||||
RET
|
||||
|
||||
// func asmRdtscpAsm() (eax, ebx, ecx, edx uint32)
|
||||
TEXT ·asmRdtscpAsm(SB), 7, $0
|
||||
BYTE $0x0F; BYTE $0x01; BYTE $0xF9 // RDTSCP
|
||||
MOVL AX, eax+0(FP)
|
||||
MOVL BX, ebx+4(FP)
|
||||
MOVL CX, ecx+8(FP)
|
||||
MOVL DX, edx+12(FP)
|
||||
RET
|
26
vendor/github.com/klauspost/cpuid/cpuid_arm64.s
generated
vendored
Normal file
26
vendor/github.com/klauspost/cpuid/cpuid_arm64.s
generated
vendored
Normal file
@ -0,0 +1,26 @@
|
||||
// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file.
|
||||
|
||||
//+build arm64,!gccgo
|
||||
|
||||
// See https://www.kernel.org/doc/Documentation/arm64/cpu-feature-registers.txt
|
||||
|
||||
// func getMidr
|
||||
TEXT ·getMidr(SB), 7, $0
|
||||
WORD $0xd5380000 // mrs x0, midr_el1 /* Main ID Register */
|
||||
MOVD R0, midr+0(FP)
|
||||
RET
|
||||
|
||||
// func getProcFeatures
|
||||
TEXT ·getProcFeatures(SB), 7, $0
|
||||
WORD $0xd5380400 // mrs x0, id_aa64pfr0_el1 /* Processor Feature Register 0 */
|
||||
MOVD R0, procFeatures+0(FP)
|
||||
RET
|
||||
|
||||
// func getInstAttributes
|
||||
TEXT ·getInstAttributes(SB), 7, $0
|
||||
WORD $0xd5380600 // mrs x0, id_aa64isar0_el1 /* Instruction Set Attribute Register 0 */
|
||||
WORD $0xd5380621 // mrs x1, id_aa64isar1_el1 /* Instruction Set Attribute Register 1 */
|
||||
MOVD R0, instAttrReg0+0(FP)
|
||||
MOVD R1, instAttrReg1+8(FP)
|
||||
RET
|
||||
|
219
vendor/github.com/klauspost/cpuid/detect_arm64.go
generated
vendored
Normal file
219
vendor/github.com/klauspost/cpuid/detect_arm64.go
generated
vendored
Normal file
@ -0,0 +1,219 @@
|
||||
// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file.
|
||||
|
||||
//+build arm64,!gccgo,!noasm,!appengine
|
||||
|
||||
package cpuid
|
||||
|
||||
func getMidr() (midr uint64)
|
||||
func getProcFeatures() (procFeatures uint64)
|
||||
func getInstAttributes() (instAttrReg0, instAttrReg1 uint64)
|
||||
|
||||
func initCPU() {
|
||||
cpuid = func(uint32) (a, b, c, d uint32) { return 0, 0, 0, 0 }
|
||||
cpuidex = func(x, y uint32) (a, b, c, d uint32) { return 0, 0, 0, 0 }
|
||||
xgetbv = func(uint32) (a, b uint32) { return 0, 0 }
|
||||
rdtscpAsm = func() (a, b, c, d uint32) { return 0, 0, 0, 0 }
|
||||
}
|
||||
|
||||
func addInfo(c *CPUInfo) {
|
||||
// ARM64 disabled for now.
|
||||
if true {
|
||||
return
|
||||
}
|
||||
// midr := getMidr()
|
||||
|
||||
// MIDR_EL1 - Main ID Register
|
||||
// x--------------------------------------------------x
|
||||
// | Name | bits | visible |
|
||||
// |--------------------------------------------------|
|
||||
// | Implementer | [31-24] | y |
|
||||
// |--------------------------------------------------|
|
||||
// | Variant | [23-20] | y |
|
||||
// |--------------------------------------------------|
|
||||
// | Architecture | [19-16] | y |
|
||||
// |--------------------------------------------------|
|
||||
// | PartNum | [15-4] | y |
|
||||
// |--------------------------------------------------|
|
||||
// | Revision | [3-0] | y |
|
||||
// x--------------------------------------------------x
|
||||
|
||||
// fmt.Printf(" implementer: 0x%02x\n", (midr>>24)&0xff)
|
||||
// fmt.Printf(" variant: 0x%01x\n", (midr>>20)&0xf)
|
||||
// fmt.Printf("architecture: 0x%01x\n", (midr>>16)&0xf)
|
||||
// fmt.Printf(" part num: 0x%03x\n", (midr>>4)&0xfff)
|
||||
// fmt.Printf(" revision: 0x%01x\n", (midr>>0)&0xf)
|
||||
|
||||
procFeatures := getProcFeatures()
|
||||
|
||||
// ID_AA64PFR0_EL1 - Processor Feature Register 0
|
||||
// x--------------------------------------------------x
|
||||
// | Name | bits | visible |
|
||||
// |--------------------------------------------------|
|
||||
// | DIT | [51-48] | y |
|
||||
// |--------------------------------------------------|
|
||||
// | SVE | [35-32] | y |
|
||||
// |--------------------------------------------------|
|
||||
// | GIC | [27-24] | n |
|
||||
// |--------------------------------------------------|
|
||||
// | AdvSIMD | [23-20] | y |
|
||||
// |--------------------------------------------------|
|
||||
// | FP | [19-16] | y |
|
||||
// |--------------------------------------------------|
|
||||
// | EL3 | [15-12] | n |
|
||||
// |--------------------------------------------------|
|
||||
// | EL2 | [11-8] | n |
|
||||
// |--------------------------------------------------|
|
||||
// | EL1 | [7-4] | n |
|
||||
// |--------------------------------------------------|
|
||||
// | EL0 | [3-0] | n |
|
||||
// x--------------------------------------------------x
|
||||
|
||||
var f ArmFlags
|
||||
// if procFeatures&(0xf<<48) != 0 {
|
||||
// fmt.Println("DIT")
|
||||
// }
|
||||
if procFeatures&(0xf<<32) != 0 {
|
||||
f |= SVE
|
||||
}
|
||||
if procFeatures&(0xf<<20) != 15<<20 {
|
||||
f |= ASIMD
|
||||
if procFeatures&(0xf<<20) == 1<<20 {
|
||||
// https://developer.arm.com/docs/ddi0595/b/aarch64-system-registers/id_aa64pfr0_el1
|
||||
// 0b0001 --> As for 0b0000, and also includes support for half-precision floating-point arithmetic.
|
||||
f |= FPHP
|
||||
f |= ASIMDHP
|
||||
}
|
||||
}
|
||||
if procFeatures&(0xf<<16) != 0 {
|
||||
f |= FP
|
||||
}
|
||||
|
||||
instAttrReg0, instAttrReg1 := getInstAttributes()
|
||||
|
||||
// https://developer.arm.com/docs/ddi0595/b/aarch64-system-registers/id_aa64isar0_el1
|
||||
//
|
||||
// ID_AA64ISAR0_EL1 - Instruction Set Attribute Register 0
|
||||
// x--------------------------------------------------x
|
||||
// | Name | bits | visible |
|
||||
// |--------------------------------------------------|
|
||||
// | TS | [55-52] | y |
|
||||
// |--------------------------------------------------|
|
||||
// | FHM | [51-48] | y |
|
||||
// |--------------------------------------------------|
|
||||
// | DP | [47-44] | y |
|
||||
// |--------------------------------------------------|
|
||||
// | SM4 | [43-40] | y |
|
||||
// |--------------------------------------------------|
|
||||
// | SM3 | [39-36] | y |
|
||||
// |--------------------------------------------------|
|
||||
// | SHA3 | [35-32] | y |
|
||||
// |--------------------------------------------------|
|
||||
// | RDM | [31-28] | y |
|
||||
// |--------------------------------------------------|
|
||||
// | ATOMICS | [23-20] | y |
|
||||
// |--------------------------------------------------|
|
||||
// | CRC32 | [19-16] | y |
|
||||
// |--------------------------------------------------|
|
||||
// | SHA2 | [15-12] | y |
|
||||
// |--------------------------------------------------|
|
||||
// | SHA1 | [11-8] | y |
|
||||
// |--------------------------------------------------|
|
||||
// | AES | [7-4] | y |
|
||||
// x--------------------------------------------------x
|
||||
|
||||
// if instAttrReg0&(0xf<<52) != 0 {
|
||||
// fmt.Println("TS")
|
||||
// }
|
||||
// if instAttrReg0&(0xf<<48) != 0 {
|
||||
// fmt.Println("FHM")
|
||||
// }
|
||||
if instAttrReg0&(0xf<<44) != 0 {
|
||||
f |= ASIMDDP
|
||||
}
|
||||
if instAttrReg0&(0xf<<40) != 0 {
|
||||
f |= SM4
|
||||
}
|
||||
if instAttrReg0&(0xf<<36) != 0 {
|
||||
f |= SM3
|
||||
}
|
||||
if instAttrReg0&(0xf<<32) != 0 {
|
||||
f |= SHA3
|
||||
}
|
||||
if instAttrReg0&(0xf<<28) != 0 {
|
||||
f |= ASIMDRDM
|
||||
}
|
||||
if instAttrReg0&(0xf<<20) != 0 {
|
||||
f |= ATOMICS
|
||||
}
|
||||
if instAttrReg0&(0xf<<16) != 0 {
|
||||
f |= CRC32
|
||||
}
|
||||
if instAttrReg0&(0xf<<12) != 0 {
|
||||
f |= SHA2
|
||||
}
|
||||
if instAttrReg0&(0xf<<12) == 2<<12 {
|
||||
// https://developer.arm.com/docs/ddi0595/b/aarch64-system-registers/id_aa64isar0_el1
|
||||
// 0b0010 --> As 0b0001, plus SHA512H, SHA512H2, SHA512SU0, and SHA512SU1 instructions implemented.
|
||||
f |= SHA512
|
||||
}
|
||||
if instAttrReg0&(0xf<<8) != 0 {
|
||||
f |= SHA1
|
||||
}
|
||||
if instAttrReg0&(0xf<<4) != 0 {
|
||||
f |= AES
|
||||
}
|
||||
if instAttrReg0&(0xf<<4) == 2<<4 {
|
||||
// https://developer.arm.com/docs/ddi0595/b/aarch64-system-registers/id_aa64isar0_el1
|
||||
// 0b0010 --> As for 0b0001, plus PMULL/PMULL2 instructions operating on 64-bit data quantities.
|
||||
f |= PMULL
|
||||
}
|
||||
|
||||
// https://developer.arm.com/docs/ddi0595/b/aarch64-system-registers/id_aa64isar1_el1
|
||||
//
|
||||
// ID_AA64ISAR1_EL1 - Instruction set attribute register 1
|
||||
// x--------------------------------------------------x
|
||||
// | Name | bits | visible |
|
||||
// |--------------------------------------------------|
|
||||
// | GPI | [31-28] | y |
|
||||
// |--------------------------------------------------|
|
||||
// | GPA | [27-24] | y |
|
||||
// |--------------------------------------------------|
|
||||
// | LRCPC | [23-20] | y |
|
||||
// |--------------------------------------------------|
|
||||
// | FCMA | [19-16] | y |
|
||||
// |--------------------------------------------------|
|
||||
// | JSCVT | [15-12] | y |
|
||||
// |--------------------------------------------------|
|
||||
// | API | [11-8] | y |
|
||||
// |--------------------------------------------------|
|
||||
// | APA | [7-4] | y |
|
||||
// |--------------------------------------------------|
|
||||
// | DPB | [3-0] | y |
|
||||
// x--------------------------------------------------x
|
||||
|
||||
// if instAttrReg1&(0xf<<28) != 0 {
|
||||
// fmt.Println("GPI")
|
||||
// }
|
||||
if instAttrReg1&(0xf<<28) != 24 {
|
||||
f |= GPA
|
||||
}
|
||||
if instAttrReg1&(0xf<<20) != 0 {
|
||||
f |= LRCPC
|
||||
}
|
||||
if instAttrReg1&(0xf<<16) != 0 {
|
||||
f |= FCMA
|
||||
}
|
||||
if instAttrReg1&(0xf<<12) != 0 {
|
||||
f |= JSCVT
|
||||
}
|
||||
// if instAttrReg1&(0xf<<8) != 0 {
|
||||
// fmt.Println("API")
|
||||
// }
|
||||
// if instAttrReg1&(0xf<<4) != 0 {
|
||||
// fmt.Println("APA")
|
||||
// }
|
||||
if instAttrReg1&(0xf<<0) != 0 {
|
||||
f |= DCPOP
|
||||
}
|
||||
c.Arm = f
|
||||
}
|
33
vendor/github.com/klauspost/cpuid/detect_intel.go
generated
vendored
Normal file
33
vendor/github.com/klauspost/cpuid/detect_intel.go
generated
vendored
Normal file
@ -0,0 +1,33 @@
|
||||
// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file.
|
||||
|
||||
//+build 386,!gccgo,!noasm amd64,!gccgo,!noasm,!appengine
|
||||
|
||||
package cpuid
|
||||
|
||||
func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32)
|
||||
func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32)
|
||||
func asmXgetbv(index uint32) (eax, edx uint32)
|
||||
func asmRdtscpAsm() (eax, ebx, ecx, edx uint32)
|
||||
|
||||
func initCPU() {
|
||||
cpuid = asmCpuid
|
||||
cpuidex = asmCpuidex
|
||||
xgetbv = asmXgetbv
|
||||
rdtscpAsm = asmRdtscpAsm
|
||||
}
|
||||
|
||||
func addInfo(c *CPUInfo) {
|
||||
c.maxFunc = maxFunctionID()
|
||||
c.maxExFunc = maxExtendedFunction()
|
||||
c.BrandName = brandName()
|
||||
c.CacheLine = cacheLine()
|
||||
c.Family, c.Model = familyModel()
|
||||
c.Features = support()
|
||||
c.SGX = hasSGX(c.Features&SGX != 0, c.Features&SGXLC != 0)
|
||||
c.ThreadsPerCore = threadsPerCore()
|
||||
c.LogicalCores = logicalCores()
|
||||
c.PhysicalCores = physicalCores()
|
||||
c.VendorID, c.VendorString = vendorID()
|
||||
c.Hz = hertz(c.BrandName)
|
||||
c.cacheSize()
|
||||
}
|
14
vendor/github.com/klauspost/cpuid/detect_ref.go
generated
vendored
Normal file
14
vendor/github.com/klauspost/cpuid/detect_ref.go
generated
vendored
Normal file
@ -0,0 +1,14 @@
|
||||
// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file.
|
||||
|
||||
//+build !amd64,!386,!arm64 gccgo noasm appengine
|
||||
|
||||
package cpuid
|
||||
|
||||
func initCPU() {
|
||||
cpuid = func(uint32) (a, b, c, d uint32) { return 0, 0, 0, 0 }
|
||||
cpuidex = func(x, y uint32) (a, b, c, d uint32) { return 0, 0, 0, 0 }
|
||||
xgetbv = func(uint32) (a, b uint32) { return 0, 0 }
|
||||
rdtscpAsm = func() (a, b, c, d uint32) { return 0, 0, 0, 0 }
|
||||
}
|
||||
|
||||
func addInfo(info *CPUInfo) {}
|
3
vendor/github.com/klauspost/cpuid/go.mod
generated
vendored
Normal file
3
vendor/github.com/klauspost/cpuid/go.mod
generated
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
module github.com/klauspost/cpuid
|
||||
|
||||
go 1.12
|
202
vendor/github.com/minio/md5-simd/LICENSE
generated
vendored
Normal file
202
vendor/github.com/minio/md5-simd/LICENSE
generated
vendored
Normal file
@ -0,0 +1,202 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
196
vendor/github.com/minio/md5-simd/README.md
generated
vendored
Normal file
196
vendor/github.com/minio/md5-simd/README.md
generated
vendored
Normal file
@ -0,0 +1,196 @@
|
||||
|
||||
# md5-simd
|
||||
|
||||
This is a SIMD accelerated MD5 package, allowing up to either 8 (AVX2) or 16 (AVX512) independent MD5 sums to be calculated on a single CPU core.
|
||||
|
||||
It was originally based on the [md5vec](https://github.com/igneous-systems/md5vec) repository by Igneous Systems, but has been made more flexible by amongst others supporting different message sizes per lane and adding AVX512.
|
||||
|
||||
`md5-simd` integrates a similar mechanism as described in [minio/sha256-simd](https://github.com/minio/sha256-simd#support-for-avx512) for making it easy for clients to take advantages of the parallel nature of the MD5 calculation. This will result in reduced overall CPU load.
|
||||
|
||||
It is important to understand that `md5-simd` **does not speed up** a single threaded MD5 hash sum.
|
||||
Rather it allows multiple __independent__ MD5 sums to be computed in parallel on the same CPU core,
|
||||
thereby making more efficient usage of the computing resources.
|
||||
|
||||
## Usage
|
||||
|
||||
[![Documentation](https://godoc.org/github.com/minio/md5-simd?status.svg)](https://pkg.go.dev/github.com/minio/md5-simd?tab=doc)
|
||||
|
||||
|
||||
In order to use `md5-simd`, you must first create an `Server` which can be
|
||||
used to instantiate one or more objects for MD5 hashing.
|
||||
|
||||
These objects conform to the regular [`hash.Hash`](https://pkg.go.dev/hash?tab=doc#Hash) interface
|
||||
and as such the normal Write/Reset/Sum functionality works as expected.
|
||||
|
||||
As an example:
|
||||
```
|
||||
// Create server
|
||||
server := md5simd.NewServer()
|
||||
defer server.Close()
|
||||
|
||||
// Create hashing object (conforming to hash.Hash)
|
||||
md5Hash := server.NewHash()
|
||||
defer md5Hash.Close()
|
||||
|
||||
// Write one (or more) blocks
|
||||
md5Hash.Write(block)
|
||||
|
||||
// Return digest
|
||||
digest := md5Hash.Sum([]byte{})
|
||||
```
|
||||
|
||||
To keep performance both a [Server](https://pkg.go.dev/github.com/minio/md5-simd?tab=doc#Server)
|
||||
and individual [Hasher](https://pkg.go.dev/github.com/minio/md5-simd?tab=doc#Hasher) should
|
||||
be closed using the `Close()` function when no longer needed.
|
||||
|
||||
A Hasher can efficiently be re-used by using [`Reset()`](https://pkg.go.dev/hash?tab=doc#Hash) functionality.
|
||||
|
||||
In case your system does not support the instructions required it will fall back to using `crypto/md5` for hashing.
|
||||
|
||||
## Limitations
|
||||
|
||||
As explained above `md5-simd` does not speed up an individual MD5 hash sum computation,
|
||||
unless some hierarchical tree construct is used but this will result in different outcomes.
|
||||
Running a single hash on a server results in approximately half the throughput.
|
||||
|
||||
Instead, it allows running multiple MD5 calculations in parallel on a single CPU core.
|
||||
This can be beneficial in e.g. multi-threaded server applications where many go-routines
|
||||
are dealing with many requests and multiple MD5 calculations can be packed/scheduled for parallel execution on a single core.
|
||||
|
||||
This will result in a lower overall CPU usage as compared to using the standard `crypto/md5`
|
||||
functionality where each MD5 hash computation will consume a single thread (core).
|
||||
|
||||
It is best to test and measure the overall CPU usage in a representative usage scenario in your application
|
||||
to get an overall understanding of the benefits of `md5-simd` as compared to `crypto/md5`, ideally under heavy CPU load.
|
||||
|
||||
Also note that `md5-simd` is best meant to work with large objects,
|
||||
so if your application only hashes small objects of a few kilobytes
|
||||
you may be better of by using `crypto/md5`.
|
||||
|
||||
## Performance
|
||||
|
||||
For the best performance writes should be a multiple of 64 bytes, ideally a multiple of 32KB.
|
||||
To help with that a [`buffered := bufio.NewWriterSize(hasher, 32<<10)`](https://golang.org/pkg/bufio/#NewWriterSize)
|
||||
can be inserted if you are unsure of the sizes of the writes.
|
||||
Remember to [flush](https://golang.org/pkg/bufio/#Writer.Flush) `buffered` before reading the hash.
|
||||
|
||||
A single 'server' can process 16 streams concurrently with 1 core (AVX-512) or 2 cores (AVX2).
|
||||
In situations where it is likely that more than 16 streams are fully loaded it may be beneficial
|
||||
to use multiple servers.
|
||||
|
||||
The following chart compares the multi-core performance between `crypto/md5` vs the AVX2 vs the AVX512 code:
|
||||
|
||||
![md5-performance-overview](chart/Multi-core-MD5-Aggregated-Hashing-Performance.png)
|
||||
|
||||
Compared to `crypto/md5`, the AVX2 version is up to 4x faster:
|
||||
|
||||
```
|
||||
$ benchcmp crypto-md5.txt avx2.txt
|
||||
benchmark old MB/s new MB/s speedup
|
||||
BenchmarkParallel/32KB-4 2229.22 7370.50 3.31x
|
||||
BenchmarkParallel/64KB-4 2233.61 8248.46 3.69x
|
||||
BenchmarkParallel/128KB-4 2235.43 8660.74 3.87x
|
||||
BenchmarkParallel/256KB-4 2236.39 8863.87 3.96x
|
||||
BenchmarkParallel/512KB-4 2238.05 8985.39 4.01x
|
||||
BenchmarkParallel/1MB-4 2233.56 9042.62 4.05x
|
||||
BenchmarkParallel/2MB-4 2224.11 9014.46 4.05x
|
||||
BenchmarkParallel/4MB-4 2199.78 8993.61 4.09x
|
||||
BenchmarkParallel/8MB-4 2182.48 8748.22 4.01x
|
||||
```
|
||||
|
||||
Compared to `crypto/md5`, the AVX512 is up to 8x faster (for larger block sizes):
|
||||
|
||||
```
|
||||
$ benchcmp crypto-md5.txt avx512.txt
|
||||
benchmark old MB/s new MB/s speedup
|
||||
BenchmarkParallel/32KB-4 2229.22 11605.78 5.21x
|
||||
BenchmarkParallel/64KB-4 2233.61 14329.65 6.42x
|
||||
BenchmarkParallel/128KB-4 2235.43 16166.39 7.23x
|
||||
BenchmarkParallel/256KB-4 2236.39 15570.09 6.96x
|
||||
BenchmarkParallel/512KB-4 2238.05 16705.83 7.46x
|
||||
BenchmarkParallel/1MB-4 2233.56 16941.95 7.59x
|
||||
BenchmarkParallel/2MB-4 2224.11 17136.01 7.70x
|
||||
BenchmarkParallel/4MB-4 2199.78 17218.61 7.83x
|
||||
BenchmarkParallel/8MB-4 2182.48 17252.88 7.91x
|
||||
```
|
||||
|
||||
These measurements were performed on AWS EC2 instance of type `c5.xlarge` equipped with a Xeon Platinum 8124M CPU at 3.0 GHz.
|
||||
|
||||
|
||||
## Operation
|
||||
|
||||
To make operation as easy as possible there is a “Server” coordinating everything. The server keeps track of individual hash states and updates them as new data comes in. This can be visualized as follows:
|
||||
|
||||
![server-architecture](chart/server-architecture.png)
|
||||
|
||||
The data is sent to the server from each hash input in blocks of up to 32KB per round. In our testing we found this to be the block size that yielded the best results.
|
||||
|
||||
Whenever there is data available the server will collect data for up to 16 hashes and process all 16 lanes in parallel. This means that if 16 hashes have data available all the lanes will be filled. However since that may not be the case, the server will fill less lanes and do a round anyway. Lanes can also be partially filled if less than 32KB of data is written.
|
||||
|
||||
![server-lanes-example](chart/server-lanes-example.png)
|
||||
|
||||
In this example 4 lanes are fully filled and 2 lanes are partially filled. In this case the black areas will simply be masked out from the results and ignored. This is also why calculating a single hash on a server will not result in any speedup and hash writes should be a multiple of 32KB for the best performance.
|
||||
|
||||
For AVX512 all 16 calculations will be done on a single core, on AVX2 on 2 cores if there is data for more than 8 lanes.
|
||||
So for optimal usage there should be data available for all 16 hashes. It may be perfectly reasonable to use more than 16 concurrent hashes.
|
||||
|
||||
|
||||
## Design & Tech
|
||||
|
||||
md5-simd has both an AVX2 (8-lane parallel), and an AVX512 (16-lane parallel version) algorithm to accelerate the computation with the following function definitions:
|
||||
```
|
||||
//go:noescape
|
||||
func block8(state *uint32, base uintptr, bufs *int32, cache *byte, n int)
|
||||
|
||||
//go:noescape
|
||||
func block16(state *uint32, ptrs *int64, mask uint64, n int)
|
||||
```
|
||||
|
||||
The AVX2 version is based on the [md5vec](https://github.com/igneous-systems/md5vec) repository and is essentially unchanged except for minor (cosmetic) changes.
|
||||
|
||||
The AVX512 version is derived from the AVX2 version but adds some further optimizations and simplifications.
|
||||
|
||||
### Caching in upper ZMM registers
|
||||
|
||||
The AVX2 version passes in a `cache8` block of memory (about 0.5 KB) for temporary storage of intermediate results during `ROUND1` which are subsequently used during `ROUND2` through to `ROUND4`.
|
||||
|
||||
Since AVX512 has double the amount of registers (32 ZMM registers as compared to 16 YMM registers), it is possible to use the upper 16 ZMM registers for keeping the intermediate states on the CPU. As such, there is no need to pass in a corresponding `cache16` into the AVX512 block function.
|
||||
|
||||
### Direct loading using 64-bit pointers
|
||||
|
||||
The AVX2 uses the `VPGATHERDD` instruction (for YMM) to do a parallel load of 8 lanes using (8 independent) 32-bit offets. Since there is no control over how the 8 slices that are passed into the (Golang) `blockMd5` function are laid out into memory, it is not possible to derive a "base" address and corresponding offsets (all within 32-bits) for all 8 slices.
|
||||
|
||||
As such the AVX2 version uses an interim buffer to collect the byte slices to be hashed from all 8 inut slices and passed this buffer along with (fixed) 32-bit offsets into the assembly code.
|
||||
|
||||
For the AVX512 version this interim buffer is not needed since the AVX512 code uses a pair of `VPGATHERQD` instructions to directly dereference 64-bit pointers (from a base register address that is initialized to zero).
|
||||
|
||||
Note that two load (gather) instructions are needed because the AVX512 version processes 16-lanes in parallel, requiring 16 times 64-bit = 1024 bits in total to be loaded. A simple `VALIGND` and `VPORD` are subsequently used to merge the lower and upper halves together into a single ZMM register (that contains 16 lanes of 32-bit DWORDS).
|
||||
|
||||
### Masking support
|
||||
|
||||
Due to the fact that pointers are passed directly from the Golang slices, we need to protect against NULL pointers.
|
||||
For this a 16-bit mask is passed in the AVX512 assembly code which is used during the `VPGATHERQD` instructions to mask out lanes that could otherwise result in segment violations.
|
||||
|
||||
### Minor optimizations
|
||||
|
||||
The `roll` macro (three instructions on AVX2) is no longer needed for AVX512 and is replaced by a single `VPROLD` instruction.
|
||||
|
||||
Also several logical operations from the various ROUNDS of the AVX2 version could be combined into a single instruction using ternary logic (with the `VPTERMLOGD` instruction), resulting in a further simplification and speed-up.
|
||||
|
||||
## Low level block function performance
|
||||
|
||||
The benchmark below shows the (single thread) maximum performance of the `block()` function for AVX2 (having 8 lanes) and AVX512 (having 16 lanes). Also the baseline single-core performance from the standard `crypto/md5` package is shown for comparison.
|
||||
|
||||
```
|
||||
BenchmarkCryptoMd5-4 687.66 MB/s 0 B/op 0 allocs/op
|
||||
BenchmarkBlock8-4 4144.80 MB/s 0 B/op 0 allocs/op
|
||||
BenchmarkBlock16-4 8228.88 MB/s 0 B/op 0 allocs/op
|
||||
```
|
||||
|
||||
## License
|
||||
|
||||
`md5-simd` is released under the Apache License v2.0. You can find the complete text in the file LICENSE.
|
||||
|
||||
## Contributing
|
||||
|
||||
Contributions are welcome, please send PRs for any enhancements.
|
132
vendor/github.com/minio/md5-simd/block-generic.go
generated
vendored
Normal file
132
vendor/github.com/minio/md5-simd/block-generic.go
generated
vendored
Normal file
@ -0,0 +1,132 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Code generated by go run gen.go -output md5block.go; DO NOT EDIT.
|
||||
|
||||
package md5simd
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"math/bits"
|
||||
)
|
||||
|
||||
type digest struct {
|
||||
s [4]uint32
|
||||
x [BlockSize]byte
|
||||
nx int
|
||||
len uint64
|
||||
}
|
||||
|
||||
func blockGeneric(dig *digest, p []byte) {
|
||||
// load state
|
||||
a, b, c, d := dig.s[0], dig.s[1], dig.s[2], dig.s[3]
|
||||
|
||||
for i := 0; i <= len(p)-BlockSize; i += BlockSize {
|
||||
// eliminate bounds checks on p
|
||||
q := p[i:]
|
||||
q = q[:BlockSize:BlockSize]
|
||||
|
||||
// save current state
|
||||
aa, bb, cc, dd := a, b, c, d
|
||||
|
||||
// load input block
|
||||
x0 := binary.LittleEndian.Uint32(q[4*0x0:])
|
||||
x1 := binary.LittleEndian.Uint32(q[4*0x1:])
|
||||
x2 := binary.LittleEndian.Uint32(q[4*0x2:])
|
||||
x3 := binary.LittleEndian.Uint32(q[4*0x3:])
|
||||
x4 := binary.LittleEndian.Uint32(q[4*0x4:])
|
||||
x5 := binary.LittleEndian.Uint32(q[4*0x5:])
|
||||
x6 := binary.LittleEndian.Uint32(q[4*0x6:])
|
||||
x7 := binary.LittleEndian.Uint32(q[4*0x7:])
|
||||
x8 := binary.LittleEndian.Uint32(q[4*0x8:])
|
||||
x9 := binary.LittleEndian.Uint32(q[4*0x9:])
|
||||
xa := binary.LittleEndian.Uint32(q[4*0xa:])
|
||||
xb := binary.LittleEndian.Uint32(q[4*0xb:])
|
||||
xc := binary.LittleEndian.Uint32(q[4*0xc:])
|
||||
xd := binary.LittleEndian.Uint32(q[4*0xd:])
|
||||
xe := binary.LittleEndian.Uint32(q[4*0xe:])
|
||||
xf := binary.LittleEndian.Uint32(q[4*0xf:])
|
||||
|
||||
// round 1
|
||||
a = b + bits.RotateLeft32((((c^d)&b)^d)+a+x0+0xd76aa478, 7)
|
||||
d = a + bits.RotateLeft32((((b^c)&a)^c)+d+x1+0xe8c7b756, 12)
|
||||
c = d + bits.RotateLeft32((((a^b)&d)^b)+c+x2+0x242070db, 17)
|
||||
b = c + bits.RotateLeft32((((d^a)&c)^a)+b+x3+0xc1bdceee, 22)
|
||||
a = b + bits.RotateLeft32((((c^d)&b)^d)+a+x4+0xf57c0faf, 7)
|
||||
d = a + bits.RotateLeft32((((b^c)&a)^c)+d+x5+0x4787c62a, 12)
|
||||
c = d + bits.RotateLeft32((((a^b)&d)^b)+c+x6+0xa8304613, 17)
|
||||
b = c + bits.RotateLeft32((((d^a)&c)^a)+b+x7+0xfd469501, 22)
|
||||
a = b + bits.RotateLeft32((((c^d)&b)^d)+a+x8+0x698098d8, 7)
|
||||
d = a + bits.RotateLeft32((((b^c)&a)^c)+d+x9+0x8b44f7af, 12)
|
||||
c = d + bits.RotateLeft32((((a^b)&d)^b)+c+xa+0xffff5bb1, 17)
|
||||
b = c + bits.RotateLeft32((((d^a)&c)^a)+b+xb+0x895cd7be, 22)
|
||||
a = b + bits.RotateLeft32((((c^d)&b)^d)+a+xc+0x6b901122, 7)
|
||||
d = a + bits.RotateLeft32((((b^c)&a)^c)+d+xd+0xfd987193, 12)
|
||||
c = d + bits.RotateLeft32((((a^b)&d)^b)+c+xe+0xa679438e, 17)
|
||||
b = c + bits.RotateLeft32((((d^a)&c)^a)+b+xf+0x49b40821, 22)
|
||||
|
||||
// round 2
|
||||
a = b + bits.RotateLeft32((((b^c)&d)^c)+a+x1+0xf61e2562, 5)
|
||||
d = a + bits.RotateLeft32((((a^b)&c)^b)+d+x6+0xc040b340, 9)
|
||||
c = d + bits.RotateLeft32((((d^a)&b)^a)+c+xb+0x265e5a51, 14)
|
||||
b = c + bits.RotateLeft32((((c^d)&a)^d)+b+x0+0xe9b6c7aa, 20)
|
||||
a = b + bits.RotateLeft32((((b^c)&d)^c)+a+x5+0xd62f105d, 5)
|
||||
d = a + bits.RotateLeft32((((a^b)&c)^b)+d+xa+0x02441453, 9)
|
||||
c = d + bits.RotateLeft32((((d^a)&b)^a)+c+xf+0xd8a1e681, 14)
|
||||
b = c + bits.RotateLeft32((((c^d)&a)^d)+b+x4+0xe7d3fbc8, 20)
|
||||
a = b + bits.RotateLeft32((((b^c)&d)^c)+a+x9+0x21e1cde6, 5)
|
||||
d = a + bits.RotateLeft32((((a^b)&c)^b)+d+xe+0xc33707d6, 9)
|
||||
c = d + bits.RotateLeft32((((d^a)&b)^a)+c+x3+0xf4d50d87, 14)
|
||||
b = c + bits.RotateLeft32((((c^d)&a)^d)+b+x8+0x455a14ed, 20)
|
||||
a = b + bits.RotateLeft32((((b^c)&d)^c)+a+xd+0xa9e3e905, 5)
|
||||
d = a + bits.RotateLeft32((((a^b)&c)^b)+d+x2+0xfcefa3f8, 9)
|
||||
c = d + bits.RotateLeft32((((d^a)&b)^a)+c+x7+0x676f02d9, 14)
|
||||
b = c + bits.RotateLeft32((((c^d)&a)^d)+b+xc+0x8d2a4c8a, 20)
|
||||
|
||||
// round 3
|
||||
a = b + bits.RotateLeft32((b^c^d)+a+x5+0xfffa3942, 4)
|
||||
d = a + bits.RotateLeft32((a^b^c)+d+x8+0x8771f681, 11)
|
||||
c = d + bits.RotateLeft32((d^a^b)+c+xb+0x6d9d6122, 16)
|
||||
b = c + bits.RotateLeft32((c^d^a)+b+xe+0xfde5380c, 23)
|
||||
a = b + bits.RotateLeft32((b^c^d)+a+x1+0xa4beea44, 4)
|
||||
d = a + bits.RotateLeft32((a^b^c)+d+x4+0x4bdecfa9, 11)
|
||||
c = d + bits.RotateLeft32((d^a^b)+c+x7+0xf6bb4b60, 16)
|
||||
b = c + bits.RotateLeft32((c^d^a)+b+xa+0xbebfbc70, 23)
|
||||
a = b + bits.RotateLeft32((b^c^d)+a+xd+0x289b7ec6, 4)
|
||||
d = a + bits.RotateLeft32((a^b^c)+d+x0+0xeaa127fa, 11)
|
||||
c = d + bits.RotateLeft32((d^a^b)+c+x3+0xd4ef3085, 16)
|
||||
b = c + bits.RotateLeft32((c^d^a)+b+x6+0x04881d05, 23)
|
||||
a = b + bits.RotateLeft32((b^c^d)+a+x9+0xd9d4d039, 4)
|
||||
d = a + bits.RotateLeft32((a^b^c)+d+xc+0xe6db99e5, 11)
|
||||
c = d + bits.RotateLeft32((d^a^b)+c+xf+0x1fa27cf8, 16)
|
||||
b = c + bits.RotateLeft32((c^d^a)+b+x2+0xc4ac5665, 23)
|
||||
|
||||
// round 4
|
||||
a = b + bits.RotateLeft32((c^(b|^d))+a+x0+0xf4292244, 6)
|
||||
d = a + bits.RotateLeft32((b^(a|^c))+d+x7+0x432aff97, 10)
|
||||
c = d + bits.RotateLeft32((a^(d|^b))+c+xe+0xab9423a7, 15)
|
||||
b = c + bits.RotateLeft32((d^(c|^a))+b+x5+0xfc93a039, 21)
|
||||
a = b + bits.RotateLeft32((c^(b|^d))+a+xc+0x655b59c3, 6)
|
||||
d = a + bits.RotateLeft32((b^(a|^c))+d+x3+0x8f0ccc92, 10)
|
||||
c = d + bits.RotateLeft32((a^(d|^b))+c+xa+0xffeff47d, 15)
|
||||
b = c + bits.RotateLeft32((d^(c|^a))+b+x1+0x85845dd1, 21)
|
||||
a = b + bits.RotateLeft32((c^(b|^d))+a+x8+0x6fa87e4f, 6)
|
||||
d = a + bits.RotateLeft32((b^(a|^c))+d+xf+0xfe2ce6e0, 10)
|
||||
c = d + bits.RotateLeft32((a^(d|^b))+c+x6+0xa3014314, 15)
|
||||
b = c + bits.RotateLeft32((d^(c|^a))+b+xd+0x4e0811a1, 21)
|
||||
a = b + bits.RotateLeft32((c^(b|^d))+a+x4+0xf7537e82, 6)
|
||||
d = a + bits.RotateLeft32((b^(a|^c))+d+xb+0xbd3af235, 10)
|
||||
c = d + bits.RotateLeft32((a^(d|^b))+c+x2+0x2ad7d2bb, 15)
|
||||
b = c + bits.RotateLeft32((d^(c|^a))+b+x9+0xeb86d391, 21)
|
||||
|
||||
// add saved state
|
||||
a += aa
|
||||
b += bb
|
||||
c += cc
|
||||
d += dd
|
||||
}
|
||||
|
||||
// save state
|
||||
dig.s[0], dig.s[1], dig.s[2], dig.s[3] = a, b, c, d
|
||||
}
|
227
vendor/github.com/minio/md5-simd/block16_amd64.s
generated
vendored
Normal file
227
vendor/github.com/minio/md5-simd/block16_amd64.s
generated
vendored
Normal file
@ -0,0 +1,227 @@
|
||||
// Copyright (c) 2020 MinIO Inc. All rights reserved.
|
||||
// Use of this source code is governed by a license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
// This is the AVX512 implementation of the MD5 block function (16-way parallel)
|
||||
|
||||
#define prep(index) \
|
||||
KMOVQ kmask, ktmp \
|
||||
VPGATHERDD index*4(base)(ptrs*1), ktmp, mem
|
||||
|
||||
#define ROUND1(a, b, c, d, index, const, shift) \
|
||||
VXORPS c, tmp, tmp \
|
||||
VPADDD 64*const(consts), a, a \
|
||||
VPADDD mem, a, a \
|
||||
VPTERNLOGD $0x6C, b, d, tmp \
|
||||
prep(index) \
|
||||
VPADDD tmp, a, a \
|
||||
VPROLD $shift, a, a \
|
||||
VMOVAPD c, tmp \
|
||||
VPADDD b, a, a
|
||||
|
||||
#define ROUND1noload(a, b, c, d, const, shift) \
|
||||
VXORPS c, tmp, tmp \
|
||||
VPADDD 64*const(consts), a, a \
|
||||
VPADDD mem, a, a \
|
||||
VPTERNLOGD $0x6C, b, d, tmp \
|
||||
VPADDD tmp, a, a \
|
||||
VPROLD $shift, a, a \
|
||||
VMOVAPD c, tmp \
|
||||
VPADDD b, a, a
|
||||
|
||||
#define ROUND2(a, b, c, d, zreg, const, shift) \
|
||||
VPADDD 64*const(consts), a, a \
|
||||
VPADDD zreg, a, a \
|
||||
VANDNPS c, tmp, tmp \
|
||||
VPTERNLOGD $0xEC, b, tmp, tmp2 \
|
||||
VMOVAPD c, tmp \
|
||||
VPADDD tmp2, a, a \
|
||||
VMOVAPD c, tmp2 \
|
||||
VPROLD $shift, a, a \
|
||||
VPADDD b, a, a
|
||||
|
||||
#define ROUND3(a, b, c, d, zreg, const, shift) \
|
||||
VPADDD 64*const(consts), a, a \
|
||||
VPADDD zreg, a, a \
|
||||
VPTERNLOGD $0x96, b, d, tmp \
|
||||
VPADDD tmp, a, a \
|
||||
VPROLD $shift, a, a \
|
||||
VMOVAPD b, tmp \
|
||||
VPADDD b, a, a
|
||||
|
||||
#define ROUND4(a, b, c, d, zreg, const, shift) \
|
||||
VPADDD 64*const(consts), a, a \
|
||||
VPADDD zreg, a, a \
|
||||
VPTERNLOGD $0x36, b, c, tmp \
|
||||
VPADDD tmp, a, a \
|
||||
VPROLD $shift, a, a \
|
||||
VXORPS c, ones, tmp \
|
||||
VPADDD b, a, a
|
||||
|
||||
TEXT ·block16(SB),4,$0-40
|
||||
|
||||
MOVQ state+0(FP), BX
|
||||
MOVQ base+8(FP), SI
|
||||
MOVQ ptrs+16(FP), AX
|
||||
KMOVQ mask+24(FP), K1
|
||||
MOVQ n+32(FP), DX
|
||||
MOVQ ·avx512md5consts+0(SB), DI
|
||||
|
||||
#define a Z0
|
||||
#define b Z1
|
||||
#define c Z2
|
||||
#define d Z3
|
||||
|
||||
#define sa Z4
|
||||
#define sb Z5
|
||||
#define sc Z6
|
||||
#define sd Z7
|
||||
|
||||
#define tmp Z8
|
||||
#define tmp2 Z9
|
||||
#define ptrs Z10
|
||||
#define ones Z12
|
||||
#define mem Z15
|
||||
|
||||
#define kmask K1
|
||||
#define ktmp K3
|
||||
|
||||
// ----------------------------------------------------------
|
||||
// Registers Z16 through to Z31 are used for caching purposes
|
||||
// ----------------------------------------------------------
|
||||
|
||||
|
||||
#define dig BX
|
||||
#define count DX
|
||||
#define base SI
|
||||
#define consts DI
|
||||
|
||||
// load digest into state registers
|
||||
VMOVUPD (dig), a
|
||||
VMOVUPD 0x40(dig), b
|
||||
VMOVUPD 0x80(dig), c
|
||||
VMOVUPD 0xc0(dig), d
|
||||
|
||||
// load source pointers
|
||||
VMOVUPD 0x00(AX), ptrs
|
||||
|
||||
MOVQ $-1, AX
|
||||
VPBROADCASTQ AX, ones
|
||||
|
||||
loop:
|
||||
VMOVAPD a, sa
|
||||
VMOVAPD b, sb
|
||||
VMOVAPD c, sc
|
||||
VMOVAPD d, sd
|
||||
|
||||
prep(0)
|
||||
VMOVAPD d, tmp
|
||||
VMOVAPD mem, Z16
|
||||
|
||||
ROUND1(a,b,c,d, 1,0x00, 7)
|
||||
VMOVAPD mem, Z17
|
||||
ROUND1(d,a,b,c, 2,0x01,12)
|
||||
VMOVAPD mem, Z18
|
||||
ROUND1(c,d,a,b, 3,0x02,17)
|
||||
VMOVAPD mem, Z19
|
||||
ROUND1(b,c,d,a, 4,0x03,22)
|
||||
VMOVAPD mem, Z20
|
||||
ROUND1(a,b,c,d, 5,0x04, 7)
|
||||
VMOVAPD mem, Z21
|
||||
ROUND1(d,a,b,c, 6,0x05,12)
|
||||
VMOVAPD mem, Z22
|
||||
ROUND1(c,d,a,b, 7,0x06,17)
|
||||
VMOVAPD mem, Z23
|
||||
ROUND1(b,c,d,a, 8,0x07,22)
|
||||
VMOVAPD mem, Z24
|
||||
ROUND1(a,b,c,d, 9,0x08, 7)
|
||||
VMOVAPD mem, Z25
|
||||
ROUND1(d,a,b,c,10,0x09,12)
|
||||
VMOVAPD mem, Z26
|
||||
ROUND1(c,d,a,b,11,0x0a,17)
|
||||
VMOVAPD mem, Z27
|
||||
ROUND1(b,c,d,a,12,0x0b,22)
|
||||
VMOVAPD mem, Z28
|
||||
ROUND1(a,b,c,d,13,0x0c, 7)
|
||||
VMOVAPD mem, Z29
|
||||
ROUND1(d,a,b,c,14,0x0d,12)
|
||||
VMOVAPD mem, Z30
|
||||
ROUND1(c,d,a,b,15,0x0e,17)
|
||||
VMOVAPD mem, Z31
|
||||
|
||||
ROUND1noload(b,c,d,a, 0x0f,22)
|
||||
|
||||
VMOVAPD d, tmp
|
||||
VMOVAPD d, tmp2
|
||||
|
||||
ROUND2(a,b,c,d, Z17,0x10, 5)
|
||||
ROUND2(d,a,b,c, Z22,0x11, 9)
|
||||
ROUND2(c,d,a,b, Z27,0x12,14)
|
||||
ROUND2(b,c,d,a, Z16,0x13,20)
|
||||
ROUND2(a,b,c,d, Z21,0x14, 5)
|
||||
ROUND2(d,a,b,c, Z26,0x15, 9)
|
||||
ROUND2(c,d,a,b, Z31,0x16,14)
|
||||
ROUND2(b,c,d,a, Z20,0x17,20)
|
||||
ROUND2(a,b,c,d, Z25,0x18, 5)
|
||||
ROUND2(d,a,b,c, Z30,0x19, 9)
|
||||
ROUND2(c,d,a,b, Z19,0x1a,14)
|
||||
ROUND2(b,c,d,a, Z24,0x1b,20)
|
||||
ROUND2(a,b,c,d, Z29,0x1c, 5)
|
||||
ROUND2(d,a,b,c, Z18,0x1d, 9)
|
||||
ROUND2(c,d,a,b, Z23,0x1e,14)
|
||||
ROUND2(b,c,d,a, Z28,0x1f,20)
|
||||
|
||||
VMOVAPD c, tmp
|
||||
|
||||
ROUND3(a,b,c,d, Z21,0x20, 4)
|
||||
ROUND3(d,a,b,c, Z24,0x21,11)
|
||||
ROUND3(c,d,a,b, Z27,0x22,16)
|
||||
ROUND3(b,c,d,a, Z30,0x23,23)
|
||||
ROUND3(a,b,c,d, Z17,0x24, 4)
|
||||
ROUND3(d,a,b,c, Z20,0x25,11)
|
||||
ROUND3(c,d,a,b, Z23,0x26,16)
|
||||
ROUND3(b,c,d,a, Z26,0x27,23)
|
||||
ROUND3(a,b,c,d, Z29,0x28, 4)
|
||||
ROUND3(d,a,b,c, Z16,0x29,11)
|
||||
ROUND3(c,d,a,b, Z19,0x2a,16)
|
||||
ROUND3(b,c,d,a, Z22,0x2b,23)
|
||||
ROUND3(a,b,c,d, Z25,0x2c, 4)
|
||||
ROUND3(d,a,b,c, Z28,0x2d,11)
|
||||
ROUND3(c,d,a,b, Z31,0x2e,16)
|
||||
ROUND3(b,c,d,a, Z18,0x2f,23)
|
||||
|
||||
VXORPS d, ones, tmp
|
||||
|
||||
ROUND4(a,b,c,d, Z16,0x30, 6)
|
||||
ROUND4(d,a,b,c, Z23,0x31,10)
|
||||
ROUND4(c,d,a,b, Z30,0x32,15)
|
||||
ROUND4(b,c,d,a, Z21,0x33,21)
|
||||
ROUND4(a,b,c,d, Z28,0x34, 6)
|
||||
ROUND4(d,a,b,c, Z19,0x35,10)
|
||||
ROUND4(c,d,a,b, Z26,0x36,15)
|
||||
ROUND4(b,c,d,a, Z17,0x37,21)
|
||||
ROUND4(a,b,c,d, Z24,0x38, 6)
|
||||
ROUND4(d,a,b,c, Z31,0x39,10)
|
||||
ROUND4(c,d,a,b, Z22,0x3a,15)
|
||||
ROUND4(b,c,d,a, Z29,0x3b,21)
|
||||
ROUND4(a,b,c,d, Z20,0x3c, 6)
|
||||
ROUND4(d,a,b,c, Z27,0x3d,10)
|
||||
ROUND4(c,d,a,b, Z18,0x3e,15)
|
||||
ROUND4(b,c,d,a, Z25,0x3f,21)
|
||||
|
||||
VPADDD sa, a, a
|
||||
VPADDD sb, b, b
|
||||
VPADDD sc, c, c
|
||||
VPADDD sd, d, d
|
||||
|
||||
LEAQ 64(base), base
|
||||
SUBQ $64, count
|
||||
JNE loop
|
||||
|
||||
VMOVUPD a, (dig)
|
||||
VMOVUPD b, 0x40(dig)
|
||||
VMOVUPD c, 0x80(dig)
|
||||
VMOVUPD d, 0xc0(dig)
|
||||
|
||||
VZEROUPPER
|
||||
RET
|
279
vendor/github.com/minio/md5-simd/block8_amd64.s
generated
vendored
Normal file
279
vendor/github.com/minio/md5-simd/block8_amd64.s
generated
vendored
Normal file
@ -0,0 +1,279 @@
|
||||
// Copyright (c) 2018 Igneous Systems
|
||||
// MIT License
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
// of this software and associated documentation files (the "Software"), to deal
|
||||
// in the Software without restriction, including without limitation the rights
|
||||
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
// copies of the Software, and to permit persons to whom the Software is
|
||||
// furnished to do so, subject to the following conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice shall be included in all
|
||||
// copies or substantial portions of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
// SOFTWARE.
|
||||
|
||||
// Copyright (c) 2020 MinIO Inc. All rights reserved.
|
||||
// Use of this source code is governed by a license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
// This is the AVX2 implementation of the MD5 block function (8-way parallel)
|
||||
|
||||
// block8(state *uint64, base uintptr, bufs *int32, cache *byte, n int)
|
||||
TEXT ·block8(SB), 4, $0-40
|
||||
MOVQ state+0(FP), BX
|
||||
MOVQ base+8(FP), SI
|
||||
MOVQ bufs+16(FP), AX
|
||||
MOVQ cache+24(FP), CX
|
||||
MOVQ n+32(FP), DX
|
||||
MOVQ ·avx256md5consts+0(SB), DI
|
||||
|
||||
// Align cache (which is stack allocated by the compiler)
|
||||
// to a 256 bit boundary (ymm register alignment)
|
||||
// The cache8 type is deliberately oversized to permit this.
|
||||
ADDQ $31, CX
|
||||
ANDB $-32, CL
|
||||
|
||||
#define a Y0
|
||||
#define b Y1
|
||||
#define c Y2
|
||||
#define d Y3
|
||||
|
||||
#define sa Y4
|
||||
#define sb Y5
|
||||
#define sc Y6
|
||||
#define sd Y7
|
||||
|
||||
#define tmp Y8
|
||||
#define tmp2 Y9
|
||||
|
||||
#define mask Y10
|
||||
#define off Y11
|
||||
|
||||
#define ones Y12
|
||||
|
||||
#define rtmp1 Y13
|
||||
#define rtmp2 Y14
|
||||
|
||||
#define mem Y15
|
||||
|
||||
#define dig BX
|
||||
#define cache CX
|
||||
#define count DX
|
||||
#define base SI
|
||||
#define consts DI
|
||||
|
||||
#define prepmask \
|
||||
VXORPS mask, mask, mask \
|
||||
VPCMPGTD mask, off, mask
|
||||
|
||||
#define prep(index) \
|
||||
VMOVAPD mask, rtmp2 \
|
||||
VPGATHERDD rtmp2, index*4(base)(off*1), mem
|
||||
|
||||
#define load(index) \
|
||||
VMOVAPD index*32(cache), mem
|
||||
|
||||
#define store(index) \
|
||||
VMOVAPD mem, index*32(cache)
|
||||
|
||||
#define roll(shift, a) \
|
||||
VPSLLD $shift, a, rtmp1 \
|
||||
VPSRLD $32-shift, a, a \
|
||||
VORPS rtmp1, a, a
|
||||
|
||||
#define ROUND1(a, b, c, d, index, const, shift) \
|
||||
VXORPS c, tmp, tmp \
|
||||
VPADDD 32*const(consts), a, a \
|
||||
VPADDD mem, a, a \
|
||||
VANDPS b, tmp, tmp \
|
||||
VXORPS d, tmp, tmp \
|
||||
prep(index) \
|
||||
VPADDD tmp, a, a \
|
||||
roll(shift,a) \
|
||||
VMOVAPD c, tmp \
|
||||
VPADDD b, a, a
|
||||
|
||||
#define ROUND1load(a, b, c, d, index, const, shift) \
|
||||
VXORPS c, tmp, tmp \
|
||||
VPADDD 32*const(consts), a, a \
|
||||
VPADDD mem, a, a \
|
||||
VANDPS b, tmp, tmp \
|
||||
VXORPS d, tmp, tmp \
|
||||
load(index) \
|
||||
VPADDD tmp, a, a \
|
||||
roll(shift,a) \
|
||||
VMOVAPD c, tmp \
|
||||
VPADDD b, a, a
|
||||
|
||||
#define ROUND2(a, b, c, d, index, const, shift) \
|
||||
VPADDD 32*const(consts), a, a \
|
||||
VPADDD mem, a, a \
|
||||
VANDPS b, tmp2, tmp2 \
|
||||
VANDNPS c, tmp, tmp \
|
||||
load(index) \
|
||||
VORPS tmp, tmp2, tmp2 \
|
||||
VMOVAPD c, tmp \
|
||||
VPADDD tmp2, a, a \
|
||||
VMOVAPD c, tmp2 \
|
||||
roll(shift,a) \
|
||||
VPADDD b, a, a
|
||||
|
||||
#define ROUND3(a, b, c, d, index, const, shift) \
|
||||
VPADDD 32*const(consts), a, a \
|
||||
VPADDD mem, a, a \
|
||||
load(index) \
|
||||
VXORPS d, tmp, tmp \
|
||||
VXORPS b, tmp, tmp \
|
||||
VPADDD tmp, a, a \
|
||||
roll(shift,a) \
|
||||
VMOVAPD b, tmp \
|
||||
VPADDD b, a, a
|
||||
|
||||
#define ROUND4(a, b, c, d, index, const, shift) \
|
||||
VPADDD 32*const(consts), a, a \
|
||||
VPADDD mem, a, a \
|
||||
VORPS b, tmp, tmp \
|
||||
VXORPS c, tmp, tmp \
|
||||
VPADDD tmp, a, a \
|
||||
load(index) \
|
||||
roll(shift,a) \
|
||||
VXORPS c, ones, tmp \
|
||||
VPADDD b, a, a
|
||||
|
||||
// load digest into state registers
|
||||
VMOVUPD (dig), a
|
||||
VMOVUPD 32(dig), b
|
||||
VMOVUPD 64(dig), c
|
||||
VMOVUPD 96(dig), d
|
||||
|
||||
// load source buffer offsets
|
||||
VMOVUPD (AX), off
|
||||
|
||||
prepmask
|
||||
VPCMPEQD ones, ones, ones
|
||||
|
||||
loop:
|
||||
VMOVAPD a, sa
|
||||
VMOVAPD b, sb
|
||||
VMOVAPD c, sc
|
||||
VMOVAPD d, sd
|
||||
|
||||
prep(0)
|
||||
VMOVAPD d, tmp
|
||||
store(0)
|
||||
|
||||
ROUND1(a,b,c,d, 1,0x00, 7)
|
||||
store(1)
|
||||
ROUND1(d,a,b,c, 2,0x01,12)
|
||||
store(2)
|
||||
ROUND1(c,d,a,b, 3,0x02,17)
|
||||
store(3)
|
||||
ROUND1(b,c,d,a, 4,0x03,22)
|
||||
store(4)
|
||||
ROUND1(a,b,c,d, 5,0x04, 7)
|
||||
store(5)
|
||||
ROUND1(d,a,b,c, 6,0x05,12)
|
||||
store(6)
|
||||
ROUND1(c,d,a,b, 7,0x06,17)
|
||||
store(7)
|
||||
ROUND1(b,c,d,a, 8,0x07,22)
|
||||
store(8)
|
||||
ROUND1(a,b,c,d, 9,0x08, 7)
|
||||
store(9)
|
||||
ROUND1(d,a,b,c,10,0x09,12)
|
||||
store(10)
|
||||
ROUND1(c,d,a,b,11,0x0a,17)
|
||||
store(11)
|
||||
ROUND1(b,c,d,a,12,0x0b,22)
|
||||
store(12)
|
||||
ROUND1(a,b,c,d,13,0x0c, 7)
|
||||
store(13)
|
||||
ROUND1(d,a,b,c,14,0x0d,12)
|
||||
store(14)
|
||||
ROUND1(c,d,a,b,15,0x0e,17)
|
||||
store(15)
|
||||
ROUND1load(b,c,d,a, 1,0x0f,22)
|
||||
|
||||
VMOVAPD d, tmp
|
||||
VMOVAPD d, tmp2
|
||||
|
||||
ROUND2(a,b,c,d, 6,0x10, 5)
|
||||
ROUND2(d,a,b,c,11,0x11, 9)
|
||||
ROUND2(c,d,a,b, 0,0x12,14)
|
||||
ROUND2(b,c,d,a, 5,0x13,20)
|
||||
ROUND2(a,b,c,d,10,0x14, 5)
|
||||
ROUND2(d,a,b,c,15,0x15, 9)
|
||||
ROUND2(c,d,a,b, 4,0x16,14)
|
||||
ROUND2(b,c,d,a, 9,0x17,20)
|
||||
ROUND2(a,b,c,d,14,0x18, 5)
|
||||
ROUND2(d,a,b,c, 3,0x19, 9)
|
||||
ROUND2(c,d,a,b, 8,0x1a,14)
|
||||
ROUND2(b,c,d,a,13,0x1b,20)
|
||||
ROUND2(a,b,c,d, 2,0x1c, 5)
|
||||
ROUND2(d,a,b,c, 7,0x1d, 9)
|
||||
ROUND2(c,d,a,b,12,0x1e,14)
|
||||
ROUND2(b,c,d,a, 0,0x1f,20)
|
||||
|
||||
load(5)
|
||||
VMOVAPD c, tmp
|
||||
|
||||
ROUND3(a,b,c,d, 8,0x20, 4)
|
||||
ROUND3(d,a,b,c,11,0x21,11)
|
||||
ROUND3(c,d,a,b,14,0x22,16)
|
||||
ROUND3(b,c,d,a, 1,0x23,23)
|
||||
ROUND3(a,b,c,d, 4,0x24, 4)
|
||||
ROUND3(d,a,b,c, 7,0x25,11)
|
||||
ROUND3(c,d,a,b,10,0x26,16)
|
||||
ROUND3(b,c,d,a,13,0x27,23)
|
||||
ROUND3(a,b,c,d, 0,0x28, 4)
|
||||
ROUND3(d,a,b,c, 3,0x29,11)
|
||||
ROUND3(c,d,a,b, 6,0x2a,16)
|
||||
ROUND3(b,c,d,a, 9,0x2b,23)
|
||||
ROUND3(a,b,c,d,12,0x2c, 4)
|
||||
ROUND3(d,a,b,c,15,0x2d,11)
|
||||
ROUND3(c,d,a,b, 2,0x2e,16)
|
||||
ROUND3(b,c,d,a, 0,0x2f,23)
|
||||
|
||||
load(0)
|
||||
VXORPS d, ones, tmp
|
||||
|
||||
ROUND4(a,b,c,d, 7,0x30, 6)
|
||||
ROUND4(d,a,b,c,14,0x31,10)
|
||||
ROUND4(c,d,a,b, 5,0x32,15)
|
||||
ROUND4(b,c,d,a,12,0x33,21)
|
||||
ROUND4(a,b,c,d, 3,0x34, 6)
|
||||
ROUND4(d,a,b,c,10,0x35,10)
|
||||
ROUND4(c,d,a,b, 1,0x36,15)
|
||||
ROUND4(b,c,d,a, 8,0x37,21)
|
||||
ROUND4(a,b,c,d,15,0x38, 6)
|
||||
ROUND4(d,a,b,c, 6,0x39,10)
|
||||
ROUND4(c,d,a,b,13,0x3a,15)
|
||||
ROUND4(b,c,d,a, 4,0x3b,21)
|
||||
ROUND4(a,b,c,d,11,0x3c, 6)
|
||||
ROUND4(d,a,b,c, 2,0x3d,10)
|
||||
ROUND4(c,d,a,b, 9,0x3e,15)
|
||||
ROUND4(b,c,d,a, 0,0x3f,21)
|
||||
|
||||
VPADDD sa, a, a
|
||||
VPADDD sb, b, b
|
||||
VPADDD sc, c, c
|
||||
VPADDD sd, d, d
|
||||
|
||||
LEAQ 64(base), base
|
||||
SUBQ $64, count
|
||||
JNE loop
|
||||
|
||||
VMOVUPD a, (dig)
|
||||
VMOVUPD b, 32(dig)
|
||||
VMOVUPD c, 64(dig)
|
||||
VMOVUPD d, 96(dig)
|
||||
|
||||
VZEROUPPER
|
||||
RET
|
199
vendor/github.com/minio/md5-simd/block_amd64.go
generated
vendored
Normal file
199
vendor/github.com/minio/md5-simd/block_amd64.go
generated
vendored
Normal file
@ -0,0 +1,199 @@
|
||||
//+build !noasm,!appengine,gc
|
||||
|
||||
// Copyright (c) 2020 MinIO Inc. All rights reserved.
|
||||
// Use of this source code is governed by a license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
package md5simd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"sync"
|
||||
"unsafe"
|
||||
|
||||
"github.com/klauspost/cpuid"
|
||||
)
|
||||
|
||||
var hasAVX512 bool
|
||||
|
||||
//go:noescape
|
||||
func block8(state *uint32, base uintptr, bufs *int32, cache *byte, n int)
|
||||
|
||||
//go:noescape
|
||||
func block16(state *uint32, base uintptr, ptrs *int32, mask uint64, n int)
|
||||
|
||||
// 8-way 4x uint32 digests in 4 ymm registers
|
||||
// (ymm0, ymm1, ymm2, ymm3)
|
||||
type digest8 struct {
|
||||
v0, v1, v2, v3 [8]uint32
|
||||
}
|
||||
|
||||
// Stack cache for 8x64 byte md5.BlockSize bytes.
|
||||
// Must be 32-byte aligned, so allocate 512+32 and
|
||||
// align upwards at runtime.
|
||||
type cache8 [512 + 32]byte
|
||||
|
||||
// MD5 magic numbers for one lane of hashing; inflated
|
||||
// 8x below at init time.
|
||||
var md5consts = [64]uint32{
|
||||
0xd76aa478, 0xe8c7b756, 0x242070db, 0xc1bdceee,
|
||||
0xf57c0faf, 0x4787c62a, 0xa8304613, 0xfd469501,
|
||||
0x698098d8, 0x8b44f7af, 0xffff5bb1, 0x895cd7be,
|
||||
0x6b901122, 0xfd987193, 0xa679438e, 0x49b40821,
|
||||
0xf61e2562, 0xc040b340, 0x265e5a51, 0xe9b6c7aa,
|
||||
0xd62f105d, 0x02441453, 0xd8a1e681, 0xe7d3fbc8,
|
||||
0x21e1cde6, 0xc33707d6, 0xf4d50d87, 0x455a14ed,
|
||||
0xa9e3e905, 0xfcefa3f8, 0x676f02d9, 0x8d2a4c8a,
|
||||
0xfffa3942, 0x8771f681, 0x6d9d6122, 0xfde5380c,
|
||||
0xa4beea44, 0x4bdecfa9, 0xf6bb4b60, 0xbebfbc70,
|
||||
0x289b7ec6, 0xeaa127fa, 0xd4ef3085, 0x04881d05,
|
||||
0xd9d4d039, 0xe6db99e5, 0x1fa27cf8, 0xc4ac5665,
|
||||
0xf4292244, 0x432aff97, 0xab9423a7, 0xfc93a039,
|
||||
0x655b59c3, 0x8f0ccc92, 0xffeff47d, 0x85845dd1,
|
||||
0x6fa87e4f, 0xfe2ce6e0, 0xa3014314, 0x4e0811a1,
|
||||
0xf7537e82, 0xbd3af235, 0x2ad7d2bb, 0xeb86d391,
|
||||
}
|
||||
|
||||
// inflate the consts 8-way for 8x md5 (256 bit ymm registers)
|
||||
var avx256md5consts = func(c []uint32) []uint32 {
|
||||
inf := make([]uint32, 8*len(c))
|
||||
for i := range c {
|
||||
for j := 0; j < 8; j++ {
|
||||
inf[(i*8)+j] = c[i]
|
||||
}
|
||||
}
|
||||
return inf
|
||||
}(md5consts[:])
|
||||
|
||||
// 16-way 4x uint32 digests in 4 zmm registers
|
||||
type digest16 struct {
|
||||
v0, v1, v2, v3 [16]uint32
|
||||
}
|
||||
|
||||
// inflate the consts 16-way for 16x md5 (512 bit zmm registers)
|
||||
var avx512md5consts = func(c []uint32) []uint32 {
|
||||
inf := make([]uint32, 16*len(c))
|
||||
for i := range c {
|
||||
for j := 0; j < 16; j++ {
|
||||
inf[(i*16)+j] = c[i]
|
||||
}
|
||||
}
|
||||
return inf
|
||||
}(md5consts[:])
|
||||
|
||||
func init() {
|
||||
hasAVX512 = cpuid.CPU.AVX512F()
|
||||
}
|
||||
|
||||
// Interface function to assembly code
|
||||
func (s *md5Server) blockMd5_x16(d *digest16, input [16][]byte, half bool) {
|
||||
if hasAVX512 {
|
||||
blockMd5_avx512(d, input, s.allBufs, &s.maskRounds16)
|
||||
} else {
|
||||
d8a, d8b := digest8{}, digest8{}
|
||||
for i := range d8a.v0 {
|
||||
j := i + 8
|
||||
d8a.v0[i], d8a.v1[i], d8a.v2[i], d8a.v3[i] = d.v0[i], d.v1[i], d.v2[i], d.v3[i]
|
||||
if !half {
|
||||
d8b.v0[i], d8b.v1[i], d8b.v2[i], d8b.v3[i] = d.v0[j], d.v1[j], d.v2[j], d.v3[j]
|
||||
}
|
||||
}
|
||||
|
||||
i8 := [2][8][]byte{}
|
||||
for i := range i8[0] {
|
||||
i8[0][i], i8[1][i] = input[i], input[8+i]
|
||||
}
|
||||
if half {
|
||||
blockMd5_avx2(&d8a, i8[0], s.allBufs, &s.maskRounds8a)
|
||||
} else {
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(2)
|
||||
go func() { blockMd5_avx2(&d8a, i8[0], s.allBufs, &s.maskRounds8a); wg.Done() }()
|
||||
go func() { blockMd5_avx2(&d8b, i8[1], s.allBufs, &s.maskRounds8b); wg.Done() }()
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
for i := range d8a.v0 {
|
||||
j := i + 8
|
||||
d.v0[i], d.v1[i], d.v2[i], d.v3[i] = d8a.v0[i], d8a.v1[i], d8a.v2[i], d8a.v3[i]
|
||||
if !half {
|
||||
d.v0[j], d.v1[j], d.v2[j], d.v3[j] = d8b.v0[i], d8b.v1[i], d8b.v2[i], d8b.v3[i]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Interface function to AVX512 assembly code
|
||||
func blockMd5_avx512(s *digest16, input [16][]byte, base []byte, maskRounds *[16]maskRounds) {
|
||||
baseMin := uint64(uintptr(unsafe.Pointer(&(base[0]))))
|
||||
ptrs := [16]int32{}
|
||||
|
||||
for i := range ptrs {
|
||||
if len(input[i]) > 0 {
|
||||
if len(input[i]) > internalBlockSize {
|
||||
panic(fmt.Sprintf("Sanity check fails for lane %d: maximum input length cannot exceed internalBlockSize", i))
|
||||
}
|
||||
|
||||
off := uint64(uintptr(unsafe.Pointer(&(input[i][0])))) - baseMin
|
||||
if off > math.MaxUint32 {
|
||||
panic(fmt.Sprintf("invalid buffer sent with offset %x", off))
|
||||
}
|
||||
ptrs[i] = int32(off)
|
||||
}
|
||||
}
|
||||
|
||||
sdup := *s // create copy of initial states to receive intermediate updates
|
||||
|
||||
rounds := generateMaskAndRounds16(input, maskRounds)
|
||||
|
||||
for r := 0; r < rounds; r++ {
|
||||
m := maskRounds[r]
|
||||
|
||||
block16(&sdup.v0[0], uintptr(baseMin), &ptrs[0], m.mask, int(64*m.rounds))
|
||||
|
||||
for j := 0; j < len(ptrs); j++ {
|
||||
ptrs[j] += int32(64 * m.rounds) // update pointers for next round
|
||||
if m.mask&(1<<j) != 0 { // update digest if still masked as active
|
||||
(*s).v0[j], (*s).v1[j], (*s).v2[j], (*s).v3[j] = sdup.v0[j], sdup.v1[j], sdup.v2[j], sdup.v3[j]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Interface function to AVX2 assembly code
|
||||
func blockMd5_avx2(s *digest8, input [8][]byte, base []byte, maskRounds *[8]maskRounds) {
|
||||
baseMin := uint64(uintptr(unsafe.Pointer(&(base[0])))) - 4
|
||||
ptrs := [8]int32{}
|
||||
|
||||
for i := range ptrs {
|
||||
if len(input[i]) > 0 {
|
||||
if len(input[i]) > internalBlockSize {
|
||||
panic(fmt.Sprintf("Sanity check fails for lane %d: maximum input length cannot exceed internalBlockSize", i))
|
||||
}
|
||||
|
||||
off := uint64(uintptr(unsafe.Pointer(&(input[i][0])))) - baseMin
|
||||
if off > math.MaxUint32 {
|
||||
panic(fmt.Sprintf("invalid buffer sent with offset %x", off))
|
||||
}
|
||||
ptrs[i] = int32(off)
|
||||
}
|
||||
}
|
||||
|
||||
sdup := *s // create copy of initial states to receive intermediate updates
|
||||
|
||||
rounds := generateMaskAndRounds8(input, maskRounds)
|
||||
|
||||
for r := 0; r < rounds; r++ {
|
||||
m := maskRounds[r]
|
||||
var cache cache8 // stack storage for block8 tmp state
|
||||
block8(&sdup.v0[0], uintptr(baseMin), &ptrs[0], &cache[0], int(64*m.rounds))
|
||||
|
||||
for j := 0; j < len(ptrs); j++ {
|
||||
ptrs[j] += int32(64 * m.rounds) // update pointers for next round
|
||||
if m.mask&(1<<j) != 0 { // update digest if still masked as active
|
||||
(*s).v0[j], (*s).v1[j], (*s).v2[j], (*s).v3[j] = sdup.v0[j], sdup.v1[j], sdup.v2[j], sdup.v3[j]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
7
vendor/github.com/minio/md5-simd/go.mod
generated
vendored
Normal file
7
vendor/github.com/minio/md5-simd/go.mod
generated
vendored
Normal file
@ -0,0 +1,7 @@
|
||||
module github.com/minio/md5-simd
|
||||
|
||||
go 1.14
|
||||
|
||||
require (
|
||||
github.com/klauspost/cpuid v1.2.3
|
||||
)
|
2
vendor/github.com/minio/md5-simd/go.sum
generated
vendored
Normal file
2
vendor/github.com/minio/md5-simd/go.sum
generated
vendored
Normal file
@ -0,0 +1,2 @@
|
||||
github.com/klauspost/cpuid v1.2.3 h1:CCtW0xUnWGVINKvE/WWOYKdsPV6mawAtvQuSl8guwQs=
|
||||
github.com/klauspost/cpuid v1.2.3/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
|
178
vendor/github.com/minio/md5-simd/md5-digest_amd64.go
generated
vendored
Normal file
178
vendor/github.com/minio/md5-simd/md5-digest_amd64.go
generated
vendored
Normal file
@ -0,0 +1,178 @@
|
||||
//+build !noasm,!appengine,gc
|
||||
|
||||
// Copyright (c) 2020 MinIO Inc. All rights reserved.
|
||||
// Use of this source code is governed by a license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
package md5simd
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
// md5Digest - Type for computing MD5 using either AVX2 or AVX512
|
||||
type md5Digest struct {
|
||||
uid uint64
|
||||
blocksCh chan blockInput
|
||||
cycleServer chan uint64
|
||||
x [BlockSize]byte
|
||||
nx int
|
||||
len uint64
|
||||
buffers <-chan []byte
|
||||
}
|
||||
|
||||
// NewHash - initialize instance for Md5 implementation.
|
||||
func (s *md5Server) NewHash() Hasher {
|
||||
uid := atomic.AddUint64(&s.uidCounter, 1)
|
||||
blockCh := make(chan blockInput, buffersPerLane)
|
||||
s.newInput <- newClient{
|
||||
uid: uid,
|
||||
input: blockCh,
|
||||
}
|
||||
return &md5Digest{
|
||||
uid: uid,
|
||||
buffers: s.buffers,
|
||||
blocksCh: blockCh,
|
||||
cycleServer: s.cycle,
|
||||
}
|
||||
}
|
||||
|
||||
// Size - Return size of checksum
|
||||
func (d *md5Digest) Size() int { return Size }
|
||||
|
||||
// BlockSize - Return blocksize of checksum
|
||||
func (d md5Digest) BlockSize() int { return BlockSize }
|
||||
|
||||
func (d *md5Digest) Reset() {
|
||||
if d.blocksCh == nil {
|
||||
panic("reset after close")
|
||||
}
|
||||
d.nx = 0
|
||||
d.len = 0
|
||||
d.sendBlock(blockInput{uid: d.uid, reset: true}, false)
|
||||
}
|
||||
|
||||
// write to digest
|
||||
func (d *md5Digest) Write(p []byte) (nn int, err error) {
|
||||
if d.blocksCh == nil {
|
||||
return 0, errors.New("md5Digest closed")
|
||||
}
|
||||
|
||||
// break input into chunks of maximum internalBlockSize size
|
||||
for {
|
||||
l := len(p)
|
||||
if l > internalBlockSize {
|
||||
l = internalBlockSize
|
||||
}
|
||||
nnn, err := d.write(p[:l])
|
||||
if err != nil {
|
||||
return nn, err
|
||||
}
|
||||
nn += nnn
|
||||
p = p[l:]
|
||||
|
||||
if len(p) == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (d *md5Digest) write(p []byte) (nn int, err error) {
|
||||
|
||||
nn = len(p)
|
||||
d.len += uint64(nn)
|
||||
if d.nx > 0 {
|
||||
n := copy(d.x[d.nx:], p)
|
||||
d.nx += n
|
||||
if d.nx == BlockSize {
|
||||
// Create a copy of the overflow buffer in order to send it async over the channel
|
||||
// (since we will modify the overflow buffer down below with any access beyond multiples of 64)
|
||||
tmp := <-d.buffers
|
||||
tmp = tmp[:BlockSize]
|
||||
copy(tmp, d.x[:])
|
||||
d.sendBlock(blockInput{uid: d.uid, msg: tmp}, len(p)-n < BlockSize)
|
||||
d.nx = 0
|
||||
}
|
||||
p = p[n:]
|
||||
}
|
||||
if len(p) >= BlockSize {
|
||||
n := len(p) &^ (BlockSize - 1)
|
||||
buf := <-d.buffers
|
||||
buf = buf[:n]
|
||||
copy(buf, p)
|
||||
d.sendBlock(blockInput{uid: d.uid, msg: buf}, len(p)-n < BlockSize)
|
||||
p = p[n:]
|
||||
}
|
||||
if len(p) > 0 {
|
||||
d.nx = copy(d.x[:], p)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (d *md5Digest) Close() {
|
||||
if d.blocksCh != nil {
|
||||
close(d.blocksCh)
|
||||
d.blocksCh = nil
|
||||
}
|
||||
}
|
||||
|
||||
// Sum - Return MD5 sum in bytes
|
||||
func (d *md5Digest) Sum(in []byte) (result []byte) {
|
||||
if d.blocksCh == nil {
|
||||
panic("sum after close")
|
||||
}
|
||||
|
||||
trail := <-d.buffers
|
||||
trail = append(trail[:0], d.x[:d.nx]...)
|
||||
|
||||
length := d.len
|
||||
// Padding. Add a 1 bit and 0 bits until 56 bytes mod 64.
|
||||
var tmp [64]byte
|
||||
tmp[0] = 0x80
|
||||
if length%64 < 56 {
|
||||
trail = append(trail, tmp[0:56-length%64]...)
|
||||
} else {
|
||||
trail = append(trail, tmp[0:64+56-length%64]...)
|
||||
}
|
||||
|
||||
// Length in bits.
|
||||
length <<= 3
|
||||
binary.LittleEndian.PutUint64(tmp[:], length) // append length in bits
|
||||
|
||||
trail = append(trail, tmp[0:8]...)
|
||||
if len(trail)%BlockSize != 0 {
|
||||
panic(fmt.Errorf("internal error: sum block was not aligned. len=%d, nx=%d", len(trail), d.nx))
|
||||
}
|
||||
sumCh := make(chan sumResult, 1)
|
||||
d.sendBlock(blockInput{uid: d.uid, msg: trail, sumCh: sumCh}, true)
|
||||
|
||||
sum := <-sumCh
|
||||
|
||||
return append(in, sum.digest[:]...)
|
||||
}
|
||||
|
||||
// sendBlock will send a block for processing.
|
||||
// If cycle is true we will block on cycle, otherwise we will only block
|
||||
// if the block channel is full.
|
||||
func (d *md5Digest) sendBlock(bi blockInput, cycle bool) {
|
||||
if cycle {
|
||||
select {
|
||||
case d.blocksCh <- bi:
|
||||
d.cycleServer <- d.uid
|
||||
}
|
||||
return
|
||||
}
|
||||
// Only block on cycle if we filled the buffer
|
||||
select {
|
||||
case d.blocksCh <- bi:
|
||||
return
|
||||
default:
|
||||
d.cycleServer <- d.uid
|
||||
d.blocksCh <- bi
|
||||
}
|
||||
}
|
307
vendor/github.com/minio/md5-simd/md5-server_amd64.go
generated
vendored
Normal file
307
vendor/github.com/minio/md5-simd/md5-server_amd64.go
generated
vendored
Normal file
@ -0,0 +1,307 @@
|
||||
//+build !noasm,!appengine,gc
|
||||
|
||||
// Copyright (c) 2020 MinIO Inc. All rights reserved.
|
||||
// Use of this source code is governed by a license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
package md5simd
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"runtime"
|
||||
|
||||
"github.com/klauspost/cpuid"
|
||||
)
|
||||
|
||||
// MD5 initialization constants
|
||||
const (
|
||||
// Lanes is the number of concurrently calculated hashes.
|
||||
Lanes = 16
|
||||
|
||||
init0 = 0x67452301
|
||||
init1 = 0xefcdab89
|
||||
init2 = 0x98badcfe
|
||||
init3 = 0x10325476
|
||||
)
|
||||
|
||||
// md5ServerUID - Does not start at 0 but next multiple of 16 so as to be able to
|
||||
// differentiate with default initialisation value of 0
|
||||
const md5ServerUID = Lanes
|
||||
|
||||
const buffersPerLane = 3
|
||||
|
||||
// Message to send across input channel
|
||||
type blockInput struct {
|
||||
uid uint64
|
||||
msg []byte
|
||||
sumCh chan sumResult
|
||||
reset bool
|
||||
}
|
||||
|
||||
type sumResult struct {
|
||||
digest [Size]byte
|
||||
}
|
||||
|
||||
type lanesInfo [Lanes]blockInput
|
||||
|
||||
// md5Server - Type to implement parallel handling of MD5 invocations
|
||||
type md5Server struct {
|
||||
uidCounter uint64
|
||||
cycle chan uint64 // client with uid has update.
|
||||
newInput chan newClient // Add new client.
|
||||
digests map[uint64][Size]byte // Map of uids to (interim) digest results
|
||||
maskRounds16 [16]maskRounds // Pre-allocated static array for max 16 rounds
|
||||
maskRounds8a [8]maskRounds // Pre-allocated static array for max 8 rounds (1st AVX2 core)
|
||||
maskRounds8b [8]maskRounds // Pre-allocated static array for max 8 rounds (2nd AVX2 core)
|
||||
allBufs []byte // Preallocated buffer.
|
||||
buffers chan []byte // Preallocated buffers, sliced from allBufs.
|
||||
}
|
||||
|
||||
// NewServer - Create new object for parallel processing handling
|
||||
func NewServer() Server {
|
||||
if !cpuid.CPU.AVX2() {
|
||||
return &fallbackServer{}
|
||||
}
|
||||
md5srv := &md5Server{}
|
||||
md5srv.digests = make(map[uint64][Size]byte)
|
||||
md5srv.newInput = make(chan newClient, Lanes)
|
||||
md5srv.cycle = make(chan uint64, Lanes*10)
|
||||
md5srv.uidCounter = md5ServerUID - 1
|
||||
md5srv.allBufs = make([]byte, 32+buffersPerLane*Lanes*internalBlockSize)
|
||||
md5srv.buffers = make(chan []byte, buffersPerLane*Lanes)
|
||||
// Fill buffers.
|
||||
for i := 0; i < buffersPerLane*Lanes; i++ {
|
||||
s := 32 + i*internalBlockSize
|
||||
md5srv.buffers <- md5srv.allBufs[s : s+internalBlockSize : s+internalBlockSize]
|
||||
}
|
||||
|
||||
// Start a single thread for reading from the input channel
|
||||
go md5srv.process(md5srv.newInput)
|
||||
return md5srv
|
||||
}
|
||||
|
||||
type newClient struct {
|
||||
uid uint64
|
||||
input chan blockInput
|
||||
}
|
||||
|
||||
// process - Sole handler for reading from the input channel.
|
||||
func (s *md5Server) process(newClients chan newClient) {
|
||||
// To fill up as many lanes as possible:
|
||||
//
|
||||
// 1. Wait for a cycle id.
|
||||
// 2. If not already in a lane, add, otherwise leave on channel
|
||||
// 3. Start timer
|
||||
// 4. Check if lanes is full, if so, goto 10 (process).
|
||||
// 5. If timeout, goto 10.
|
||||
// 6. Wait for new id (goto 2) or timeout (goto 10).
|
||||
// 10. Process.
|
||||
// 11. Check all input if there is already input, if so add to lanes.
|
||||
// 12. Goto 1
|
||||
|
||||
// lanes contains the lanes.
|
||||
var lanes lanesInfo
|
||||
// lanesFilled contains the number of filled lanes for current cycle.
|
||||
var lanesFilled int
|
||||
// clients contains active clients
|
||||
var clients = make(map[uint64]chan blockInput, Lanes)
|
||||
|
||||
addToLane := func(uid uint64) {
|
||||
cl, ok := clients[uid]
|
||||
if !ok {
|
||||
// Unknown client. Maybe it was already removed.
|
||||
return
|
||||
}
|
||||
// Check if we already have it.
|
||||
for _, lane := range lanes[:lanesFilled] {
|
||||
if lane.uid == uid {
|
||||
return
|
||||
}
|
||||
}
|
||||
// Continue until we get a block or there is nothing on channel
|
||||
for {
|
||||
select {
|
||||
case block, ok := <-cl:
|
||||
if !ok {
|
||||
// Client disconnected
|
||||
delete(clients, block.uid)
|
||||
return
|
||||
}
|
||||
if block.uid != uid {
|
||||
panic(fmt.Errorf("uid mismatch, %d (block) != %d (client)", block.uid, uid))
|
||||
}
|
||||
// If reset message, reset and we're done
|
||||
if block.reset {
|
||||
delete(s.digests, uid)
|
||||
continue
|
||||
}
|
||||
|
||||
// If requesting sum, we will need to maintain state.
|
||||
if block.sumCh != nil {
|
||||
var dig digest
|
||||
d, ok := s.digests[uid]
|
||||
if ok {
|
||||
dig.s[0] = binary.LittleEndian.Uint32(d[0:4])
|
||||
dig.s[1] = binary.LittleEndian.Uint32(d[4:8])
|
||||
dig.s[2] = binary.LittleEndian.Uint32(d[8:12])
|
||||
dig.s[3] = binary.LittleEndian.Uint32(d[12:16])
|
||||
} else {
|
||||
dig.s[0], dig.s[1], dig.s[2], dig.s[3] = init0, init1, init2, init3
|
||||
}
|
||||
|
||||
sum := sumResult{}
|
||||
// Add end block to current digest.
|
||||
blockGeneric(&dig, block.msg)
|
||||
|
||||
binary.LittleEndian.PutUint32(sum.digest[0:], dig.s[0])
|
||||
binary.LittleEndian.PutUint32(sum.digest[4:], dig.s[1])
|
||||
binary.LittleEndian.PutUint32(sum.digest[8:], dig.s[2])
|
||||
binary.LittleEndian.PutUint32(sum.digest[12:], dig.s[3])
|
||||
block.sumCh <- sum
|
||||
if block.msg != nil {
|
||||
s.buffers <- block.msg
|
||||
}
|
||||
continue
|
||||
}
|
||||
if len(block.msg) == 0 {
|
||||
continue
|
||||
}
|
||||
lanes[lanesFilled] = block
|
||||
lanesFilled++
|
||||
return
|
||||
default:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
addNewClient := func(cl newClient) {
|
||||
if _, ok := clients[cl.uid]; ok {
|
||||
panic("internal error: duplicate client registration")
|
||||
}
|
||||
clients[cl.uid] = cl.input
|
||||
}
|
||||
|
||||
allLanesFilled := func() bool {
|
||||
return lanesFilled == Lanes || lanesFilled >= len(clients)
|
||||
}
|
||||
|
||||
for {
|
||||
// Step 1.
|
||||
for lanesFilled == 0 {
|
||||
select {
|
||||
case cl, ok := <-newClients:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
addNewClient(cl)
|
||||
// Check if it already sent a payload.
|
||||
addToLane(cl.uid)
|
||||
continue
|
||||
case uid := <-s.cycle:
|
||||
addToLane(uid)
|
||||
}
|
||||
}
|
||||
|
||||
fillLanes:
|
||||
for !allLanesFilled() {
|
||||
select {
|
||||
case cl, ok := <-newClients:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
addNewClient(cl)
|
||||
|
||||
case uid := <-s.cycle:
|
||||
addToLane(uid)
|
||||
default:
|
||||
// Nothing more queued...
|
||||
break fillLanes
|
||||
}
|
||||
}
|
||||
|
||||
// If we did not fill all lanes, check if there is more waiting
|
||||
if !allLanesFilled() {
|
||||
runtime.Gosched()
|
||||
for uid := range clients {
|
||||
addToLane(uid)
|
||||
if allLanesFilled() {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if false {
|
||||
if !allLanesFilled() {
|
||||
fmt.Println("Not all lanes filled", lanesFilled, "of", len(clients))
|
||||
//pprof.Lookup("goroutine").WriteTo(os.Stdout, 1)
|
||||
} else if true {
|
||||
fmt.Println("all lanes filled")
|
||||
}
|
||||
}
|
||||
// Process the lanes we could collect
|
||||
s.blocks(lanes[:lanesFilled])
|
||||
|
||||
// Clear lanes...
|
||||
lanesFilled = 0
|
||||
// Add all current queued
|
||||
for uid := range clients {
|
||||
addToLane(uid)
|
||||
if allLanesFilled() {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *md5Server) Close() {
|
||||
if s.newInput != nil {
|
||||
close(s.newInput)
|
||||
s.newInput = nil
|
||||
}
|
||||
}
|
||||
|
||||
// Invoke assembly and send results back
|
||||
func (s *md5Server) blocks(lanes []blockInput) {
|
||||
inputs := [16][]byte{}
|
||||
for i := range lanes {
|
||||
inputs[i] = lanes[i].msg
|
||||
}
|
||||
|
||||
// Collect active digests...
|
||||
state := s.getDigests(lanes)
|
||||
// Process all lanes...
|
||||
s.blockMd5_x16(&state, inputs, len(lanes) <= 8)
|
||||
|
||||
for i, lane := range lanes {
|
||||
uid := lane.uid
|
||||
dig := [Size]byte{}
|
||||
binary.LittleEndian.PutUint32(dig[0:], state.v0[i])
|
||||
binary.LittleEndian.PutUint32(dig[4:], state.v1[i])
|
||||
binary.LittleEndian.PutUint32(dig[8:], state.v2[i])
|
||||
binary.LittleEndian.PutUint32(dig[12:], state.v3[i])
|
||||
|
||||
s.digests[uid] = dig
|
||||
if lane.msg != nil {
|
||||
s.buffers <- lane.msg
|
||||
}
|
||||
lanes[i] = blockInput{}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *md5Server) getDigests(lanes []blockInput) (d digest16) {
|
||||
for i, lane := range lanes {
|
||||
a, ok := s.digests[lane.uid]
|
||||
if ok {
|
||||
d.v0[i] = binary.LittleEndian.Uint32(a[0:4])
|
||||
d.v1[i] = binary.LittleEndian.Uint32(a[4:8])
|
||||
d.v2[i] = binary.LittleEndian.Uint32(a[8:12])
|
||||
d.v3[i] = binary.LittleEndian.Uint32(a[12:16])
|
||||
} else {
|
||||
d.v0[i] = init0
|
||||
d.v1[i] = init1
|
||||
d.v2[i] = init2
|
||||
d.v3[i] = init3
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
12
vendor/github.com/minio/md5-simd/md5-server_fallback.go
generated
vendored
Normal file
12
vendor/github.com/minio/md5-simd/md5-server_fallback.go
generated
vendored
Normal file
@ -0,0 +1,12 @@
|
||||
//+build !amd64 appengine !gc noasm
|
||||
|
||||
// Copyright (c) 2020 MinIO Inc. All rights reserved.
|
||||
// Use of this source code is governed by a license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
package md5simd
|
||||
|
||||
// NewServer - Create new object for parallel processing handling
|
||||
func NewServer() *fallbackServer {
|
||||
return &fallbackServer{}
|
||||
}
|
70
vendor/github.com/minio/md5-simd/md5-util_amd64.go
generated
vendored
Normal file
70
vendor/github.com/minio/md5-simd/md5-util_amd64.go
generated
vendored
Normal file
@ -0,0 +1,70 @@
|
||||
// Copyright (c) 2020 MinIO Inc. All rights reserved.
|
||||
// Use of this source code is governed by a license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
package md5simd
|
||||
|
||||
import (
|
||||
"sort"
|
||||
)
|
||||
|
||||
// Helper struct for sorting blocks based on length
|
||||
type lane struct {
|
||||
len uint
|
||||
pos uint
|
||||
}
|
||||
|
||||
// Helper struct for generating number of rounds in combination with mask for valid lanes
|
||||
type maskRounds struct {
|
||||
mask uint64
|
||||
rounds uint64
|
||||
}
|
||||
|
||||
func generateMaskAndRounds8(input [8][]byte, mr *[8]maskRounds) (rounds int) {
|
||||
// Sort on blocks length small to large
|
||||
var sorted [8]lane
|
||||
for c, inpt := range input {
|
||||
sorted[c] = lane{uint(len(inpt)), uint(c)}
|
||||
}
|
||||
sort.Slice(sorted[:], func(i, j int) bool { return sorted[i].len < sorted[j].len })
|
||||
|
||||
// Create mask array including 'rounds' (of processing blocks of 64 bytes) between masks
|
||||
m, round := uint64(0xff), uint64(0)
|
||||
|
||||
for _, s := range sorted {
|
||||
if s.len > 0 {
|
||||
if uint64(s.len)>>6 > round {
|
||||
mr[rounds] = maskRounds{m, (uint64(s.len) >> 6) - round}
|
||||
rounds++
|
||||
}
|
||||
round = uint64(s.len) >> 6
|
||||
}
|
||||
m = m & ^(1 << uint(s.pos))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func generateMaskAndRounds16(input [16][]byte, mr *[16]maskRounds) (rounds int) {
|
||||
|
||||
// Sort on blocks length small to large
|
||||
var sorted [16]lane
|
||||
for c, inpt := range input {
|
||||
sorted[c] = lane{uint(len(inpt)), uint(c)}
|
||||
}
|
||||
sort.Slice(sorted[:], func(i, j int) bool { return sorted[i].len < sorted[j].len })
|
||||
|
||||
// Create mask array including 'rounds' (of processing blocks of 64 bytes) between masks
|
||||
m, round := uint64(0xffff), uint64(0)
|
||||
|
||||
for _, s := range sorted {
|
||||
if s.len > 0 {
|
||||
if uint64(s.len)>>6 > round {
|
||||
mr[rounds] = maskRounds{m, (uint64(s.len) >> 6) - round}
|
||||
rounds++
|
||||
}
|
||||
round = uint64(s.len) >> 6
|
||||
}
|
||||
m = m & ^(1 << uint(s.pos))
|
||||
}
|
||||
return
|
||||
}
|
57
vendor/github.com/minio/md5-simd/md5.go
generated
vendored
Normal file
57
vendor/github.com/minio/md5-simd/md5.go
generated
vendored
Normal file
@ -0,0 +1,57 @@
|
||||
package md5simd
|
||||
|
||||
import (
|
||||
"crypto/md5"
|
||||
"hash"
|
||||
"sync"
|
||||
)
|
||||
|
||||
const (
|
||||
// The blocksize of MD5 in bytes.
|
||||
BlockSize = 64
|
||||
|
||||
// The size of an MD5 checksum in bytes.
|
||||
Size = 16
|
||||
|
||||
// internalBlockSize is the internal block size.
|
||||
internalBlockSize = 32 << 10
|
||||
)
|
||||
|
||||
type Server interface {
|
||||
NewHash() Hasher
|
||||
Close()
|
||||
}
|
||||
|
||||
type Hasher interface {
|
||||
hash.Hash
|
||||
Close()
|
||||
}
|
||||
|
||||
// md5Wrapper is a wrapper around the builtin hasher.
|
||||
type md5Wrapper struct {
|
||||
hash.Hash
|
||||
}
|
||||
|
||||
var md5Pool = sync.Pool{New: func() interface{} {
|
||||
return md5.New()
|
||||
}}
|
||||
|
||||
// fallbackServer - Fallback when no assembly is available.
|
||||
type fallbackServer struct {
|
||||
}
|
||||
|
||||
// NewHash -- return regular Golang md5 hashing from crypto
|
||||
func (s *fallbackServer) NewHash() Hasher {
|
||||
return &md5Wrapper{Hash: md5Pool.New().(hash.Hash)}
|
||||
}
|
||||
|
||||
func (s *fallbackServer) Close() {
|
||||
}
|
||||
|
||||
func (m *md5Wrapper) Close() {
|
||||
if m.Hash != nil {
|
||||
m.Reset()
|
||||
md5Pool.Put(m.Hash)
|
||||
m.Hash = nil
|
||||
}
|
||||
}
|
4
vendor/github.com/minio/minio-go/v7/.gitignore
generated
vendored
Normal file
4
vendor/github.com/minio/minio-go/v7/.gitignore
generated
vendored
Normal file
@ -0,0 +1,4 @@
|
||||
*~
|
||||
*.test
|
||||
validator
|
||||
golangci-lint
|
16
vendor/github.com/minio/minio-go/v7/.golangci.yml
generated
vendored
Normal file
16
vendor/github.com/minio/minio-go/v7/.golangci.yml
generated
vendored
Normal file
@ -0,0 +1,16 @@
|
||||
linters-settings:
|
||||
misspell:
|
||||
locale: US
|
||||
|
||||
linters:
|
||||
disable-all: true
|
||||
enable:
|
||||
- typecheck
|
||||
- goimports
|
||||
- misspell
|
||||
- govet
|
||||
- golint
|
||||
- ineffassign
|
||||
- gosimple
|
||||
- deadcode
|
||||
- structcheck
|
1
vendor/github.com/minio/minio-go/v7/CNAME
generated
vendored
Normal file
1
vendor/github.com/minio/minio-go/v7/CNAME
generated
vendored
Normal file
@ -0,0 +1 @@
|
||||
minio-go.min.io
|
23
vendor/github.com/minio/minio-go/v7/CONTRIBUTING.md
generated
vendored
Normal file
23
vendor/github.com/minio/minio-go/v7/CONTRIBUTING.md
generated
vendored
Normal file
@ -0,0 +1,23 @@
|
||||
|
||||
### Developer Guidelines
|
||||
|
||||
``minio-go`` welcomes your contribution. To make the process as seamless as possible, we ask for the following:
|
||||
|
||||
* Go ahead and fork the project and make your changes. We encourage pull requests to discuss code changes.
|
||||
- Fork it
|
||||
- Create your feature branch (git checkout -b my-new-feature)
|
||||
- Commit your changes (git commit -am 'Add some feature')
|
||||
- Push to the branch (git push origin my-new-feature)
|
||||
- Create new Pull Request
|
||||
|
||||
* When you're ready to create a pull request, be sure to:
|
||||
- Have test cases for the new code. If you have questions about how to do it, please ask in your pull request.
|
||||
- Run `go fmt`
|
||||
- Squash your commits into a single commit. `git rebase -i`. It's okay to force update your pull request.
|
||||
- Make sure `go test -race ./...` and `go build` completes.
|
||||
NOTE: go test runs functional tests and requires you to have a AWS S3 account. Set them as environment variables
|
||||
``ACCESS_KEY`` and ``SECRET_KEY``. To run shorter version of the tests please use ``go test -short -race ./...``
|
||||
|
||||
* Read [Effective Go](https://github.com/golang/go/wiki/CodeReviewComments) article from Golang project
|
||||
- `minio-go` project is strictly conformant with Golang style
|
||||
- if you happen to observe offending code, please feel free to send a pull request
|
202
vendor/github.com/minio/minio-go/v7/LICENSE
generated
vendored
Normal file
202
vendor/github.com/minio/minio-go/v7/LICENSE
generated
vendored
Normal file
@ -0,0 +1,202 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
35
vendor/github.com/minio/minio-go/v7/MAINTAINERS.md
generated
vendored
Normal file
35
vendor/github.com/minio/minio-go/v7/MAINTAINERS.md
generated
vendored
Normal file
@ -0,0 +1,35 @@
|
||||
# For maintainers only
|
||||
|
||||
## Responsibilities
|
||||
|
||||
Please go through this link [Maintainer Responsibility](https://gist.github.com/abperiasamy/f4d9b31d3186bbd26522)
|
||||
|
||||
### Making new releases
|
||||
Tag and sign your release commit, additionally this step requires you to have access to MinIO's trusted private key.
|
||||
```sh
|
||||
$ export GNUPGHOME=/media/${USER}/minio/trusted
|
||||
$ git tag -s 4.0.0
|
||||
$ git push
|
||||
$ git push --tags
|
||||
```
|
||||
|
||||
### Update version
|
||||
Once release has been made update `libraryVersion` constant in `api.go` to next to be released version.
|
||||
|
||||
```sh
|
||||
$ grep libraryVersion api.go
|
||||
libraryVersion = "4.0.1"
|
||||
```
|
||||
|
||||
Commit your changes
|
||||
```
|
||||
$ git commit -a -m "Update version for next release" --author "MinIO Trusted <trusted@min.io>"
|
||||
```
|
||||
|
||||
### Announce
|
||||
Announce new release by adding release notes at https://github.com/minio/minio-go/releases from `trusted@min.io` account. Release notes requires two sections `highlights` and `changelog`. Highlights is a bulleted list of salient features in this release and Changelog contains list of all commits since the last release.
|
||||
|
||||
To generate `changelog`
|
||||
```sh
|
||||
$ git log --no-color --pretty=format:'-%d %s (%cr) <%an>' <last_release_tag>..<latest_release_tag>
|
||||
```
|
32
vendor/github.com/minio/minio-go/v7/Makefile
generated
vendored
Normal file
32
vendor/github.com/minio/minio-go/v7/Makefile
generated
vendored
Normal file
@ -0,0 +1,32 @@
|
||||
GOPATH := $(shell go env GOPATH)
|
||||
TMPDIR := $(shell mktemp -d)
|
||||
|
||||
all: checks
|
||||
|
||||
.PHONY: examples docs
|
||||
|
||||
checks: lint vet test examples functional-test
|
||||
|
||||
lint:
|
||||
@mkdir -p ${GOPATH}/bin
|
||||
@which golangci-lint 1>/dev/null || (echo "Installing golangci-lint" && curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(GOPATH)/bin v1.27.0)
|
||||
@echo "Running $@ check"
|
||||
@GO111MODULE=on ${GOPATH}/bin/golangci-lint cache clean
|
||||
@GO111MODULE=on ${GOPATH}/bin/golangci-lint run --timeout=5m --config ./.golangci.yml
|
||||
|
||||
vet:
|
||||
@GO111MODULE=on go vet ./...
|
||||
|
||||
test:
|
||||
@GO111MODULE=on SERVER_ENDPOINT=localhost:9000 ACCESS_KEY=minio SECRET_KEY=minio123 ENABLE_HTTPS=1 MINT_MODE=full go test -race -v ./...
|
||||
|
||||
examples:
|
||||
@$(foreach v,$(wildcard examples/s3/*), go build -o ${TMPDIR}/$(basename $(v)) $(v) || exit 1;)
|
||||
|
||||
functional-test:
|
||||
@GO111MODULE=on SERVER_ENDPOINT=localhost:9000 ACCESS_KEY=minio SECRET_KEY=minio123 ENABLE_HTTPS=1 MINT_MODE=full go run functional_tests.go
|
||||
|
||||
clean:
|
||||
@echo "Cleaning up all the generated files"
|
||||
@find . -name '*.test' | xargs rm -fv
|
||||
@find . -name '*~' | xargs rm -fv
|
9
vendor/github.com/minio/minio-go/v7/NOTICE
generated
vendored
Normal file
9
vendor/github.com/minio/minio-go/v7/NOTICE
generated
vendored
Normal file
@ -0,0 +1,9 @@
|
||||
MinIO Cloud Storage, (C) 2014-2020 MinIO, Inc.
|
||||
|
||||
This product includes software developed at MinIO, Inc.
|
||||
(https://min.io/).
|
||||
|
||||
The MinIO project contains unmodified/modified subcomponents too with
|
||||
separate copyright notices and license terms. Your use of the source
|
||||
code for these subcomponents is subject to the terms and conditions
|
||||
of Apache License Version 2.0
|
251
vendor/github.com/minio/minio-go/v7/README.md
generated
vendored
Normal file
251
vendor/github.com/minio/minio-go/v7/README.md
generated
vendored
Normal file
@ -0,0 +1,251 @@
|
||||
# MinIO Go Client SDK for Amazon S3 Compatible Cloud Storage [![Slack](https://slack.min.io/slack?type=svg)](https://slack.min.io) [![Sourcegraph](https://sourcegraph.com/github.com/minio/minio-go/-/badge.svg)](https://sourcegraph.com/github.com/minio/minio-go?badge) [![Apache V2 License](http://img.shields.io/badge/license-Apache%20V2-blue.svg)](https://github.com/minio/minio-go/blob/master/LICENSE)
|
||||
|
||||
The MinIO Go Client SDK provides simple APIs to access any Amazon S3 compatible object storage.
|
||||
|
||||
This quickstart guide will show you how to install the MinIO client SDK, connect to MinIO, and provide a walkthrough for a simple file uploader. For a complete list of APIs and examples, please take a look at the [Go Client API Reference](https://docs.min.io/docs/golang-client-api-reference).
|
||||
|
||||
This document assumes that you have a working [Go development environment](https://golang.org/doc/install).
|
||||
|
||||
## Download from Github
|
||||
```sh
|
||||
GO111MODULE=on go get github.com/minio/minio-go/v7
|
||||
```
|
||||
|
||||
## Initialize MinIO Client
|
||||
MinIO client requires the following four parameters specified to connect to an Amazon S3 compatible object storage.
|
||||
|
||||
| Parameter | Description|
|
||||
| :--- | :--- |
|
||||
| endpoint | URL to object storage service. |
|
||||
| _minio.Options_ | All the options such as credentials, custom transport etc. |
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
|
||||
"github.com/minio/minio-go/v7"
|
||||
"github.com/minio/minio-go/v7/pkg/credentials"
|
||||
)
|
||||
|
||||
func main() {
|
||||
endpoint := "play.min.io"
|
||||
accessKeyID := "Q3AM3UQ867SPQQA43P2F"
|
||||
secretAccessKey := "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG"
|
||||
useSSL := true
|
||||
|
||||
// Initialize minio client object.
|
||||
minioClient, err := minio.New(endpoint, &minio.Options{
|
||||
Creds: credentials.NewStaticV4(accessKeyID, secretAccessKey, ""),
|
||||
Secure: useSSL,
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
|
||||
log.Printf("%#v\n", minioClient) // minioClient is now set up
|
||||
}
|
||||
```
|
||||
|
||||
## Quick Start Example - File Uploader
|
||||
This example program connects to an object storage server, creates a bucket and uploads a file to the bucket.
|
||||
|
||||
We will use the MinIO server running at [https://play.min.io](https://play.min.io) in this example. Feel free to use this service for testing and development. Access credentials shown in this example are open to the public.
|
||||
|
||||
### FileUploader.go
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
|
||||
"github.com/minio/minio-go/v7"
|
||||
"github.com/minio/minio-go/v7/pkg/credentials"
|
||||
)
|
||||
|
||||
func main() {
|
||||
ctx := context.Background()
|
||||
endpoint := "play.min.io"
|
||||
accessKeyID := "Q3AM3UQ867SPQQA43P2F"
|
||||
secretAccessKey := "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG"
|
||||
useSSL := true
|
||||
|
||||
// Initialize minio client object.
|
||||
minioClient, err := minio.New(endpoint, &minio.Options{
|
||||
Creds: credentials.NewStaticV4(accessKeyID, secretAccessKey, ""),
|
||||
Secure: useSSL,
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
|
||||
// Make a new bucket called mymusic.
|
||||
bucketName := "mymusic"
|
||||
location := "us-east-1"
|
||||
|
||||
err = minioClient.MakeBucket(ctx, bucketName, minio.MakeBucketOptions{Region: location})
|
||||
if err != nil {
|
||||
// Check to see if we already own this bucket (which happens if you run this twice)
|
||||
exists, errBucketExists := minioClient.BucketExists(ctx, bucketName)
|
||||
if errBucketExists == nil && exists {
|
||||
log.Printf("We already own %s\n", bucketName)
|
||||
} else {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
} else {
|
||||
log.Printf("Successfully created %s\n", bucketName)
|
||||
}
|
||||
|
||||
// Upload the zip file
|
||||
objectName := "golden-oldies.zip"
|
||||
filePath := "/tmp/golden-oldies.zip"
|
||||
contentType := "application/zip"
|
||||
|
||||
// Upload the zip file with FPutObject
|
||||
n, err := minioClient.FPutObject(ctx, bucketName, objectName, filePath, minio.PutObjectOptions{ContentType: contentType})
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
|
||||
log.Printf("Successfully uploaded %s of size %d\n", objectName, n)
|
||||
}
|
||||
```
|
||||
|
||||
### Run FileUploader
|
||||
```sh
|
||||
export GO111MODULE=on
|
||||
go run file-uploader.go
|
||||
2016/08/13 17:03:28 Successfully created mymusic
|
||||
2016/08/13 17:03:40 Successfully uploaded golden-oldies.zip of size 16253413
|
||||
|
||||
mc ls play/mymusic/
|
||||
[2016-05-27 16:02:16 PDT] 17MiB golden-oldies.zip
|
||||
```
|
||||
|
||||
## API Reference
|
||||
The full API Reference is available here.
|
||||
|
||||
* [Complete API Reference](https://docs.min.io/docs/golang-client-api-reference)
|
||||
|
||||
### API Reference : Bucket Operations
|
||||
* [`MakeBucket`](https://docs.min.io/docs/golang-client-api-reference#MakeBucket)
|
||||
* [`ListBuckets`](https://docs.min.io/docs/golang-client-api-reference#ListBuckets)
|
||||
* [`BucketExists`](https://docs.min.io/docs/golang-client-api-reference#BucketExists)
|
||||
* [`RemoveBucket`](https://docs.min.io/docs/golang-client-api-reference#RemoveBucket)
|
||||
* [`ListObjects`](https://docs.min.io/docs/golang-client-api-reference#ListObjects)
|
||||
* [`ListIncompleteUploads`](https://docs.min.io/docs/golang-client-api-reference#ListIncompleteUploads)
|
||||
|
||||
### API Reference : Bucket policy Operations
|
||||
* [`SetBucketPolicy`](https://docs.min.io/docs/golang-client-api-reference#SetBucketPolicy)
|
||||
* [`GetBucketPolicy`](https://docs.min.io/docs/golang-client-api-reference#GetBucketPolicy)
|
||||
|
||||
### API Reference : Bucket notification Operations
|
||||
* [`SetBucketNotification`](https://docs.min.io/docs/golang-client-api-reference#SetBucketNotification)
|
||||
* [`GetBucketNotification`](https://docs.min.io/docs/golang-client-api-reference#GetBucketNotification)
|
||||
* [`RemoveAllBucketNotification`](https://docs.min.io/docs/golang-client-api-reference#RemoveAllBucketNotification)
|
||||
* [`ListenBucketNotification`](https://docs.min.io/docs/golang-client-api-reference#ListenBucketNotification) (MinIO Extension)
|
||||
* [`ListenNotification`](https://docs.min.io/docs/golang-client-api-reference#ListenNotification) (MinIO Extension)
|
||||
|
||||
### API Reference : File Object Operations
|
||||
* [`FPutObject`](https://docs.min.io/docs/golang-client-api-reference#FPutObject)
|
||||
* [`FGetObject`](https://docs.min.io/docs/golang-client-api-reference#FGetObject)
|
||||
|
||||
### API Reference : Object Operations
|
||||
* [`GetObject`](https://docs.min.io/docs/golang-client-api-reference#GetObject)
|
||||
* [`PutObject`](https://docs.min.io/docs/golang-client-api-reference#PutObject)
|
||||
* [`PutObjectStreaming`](https://docs.min.io/docs/golang-client-api-reference#PutObjectStreaming)
|
||||
* [`StatObject`](https://docs.min.io/docs/golang-client-api-reference#StatObject)
|
||||
* [`CopyObject`](https://docs.min.io/docs/golang-client-api-reference#CopyObject)
|
||||
* [`RemoveObject`](https://docs.min.io/docs/golang-client-api-reference#RemoveObject)
|
||||
* [`RemoveObjects`](https://docs.min.io/docs/golang-client-api-reference#RemoveObjects)
|
||||
* [`RemoveIncompleteUpload`](https://docs.min.io/docs/golang-client-api-reference#RemoveIncompleteUpload)
|
||||
* [`SelectObjectContent`](https://docs.min.io/docs/golang-client-api-reference#SelectObjectContent)
|
||||
|
||||
|
||||
### API Reference : Presigned Operations
|
||||
* [`PresignedGetObject`](https://docs.min.io/docs/golang-client-api-reference#PresignedGetObject)
|
||||
* [`PresignedPutObject`](https://docs.min.io/docs/golang-client-api-reference#PresignedPutObject)
|
||||
* [`PresignedHeadObject`](https://docs.min.io/docs/golang-client-api-reference#PresignedHeadObject)
|
||||
* [`PresignedPostPolicy`](https://docs.min.io/docs/golang-client-api-reference#PresignedPostPolicy)
|
||||
|
||||
### API Reference : Client custom settings
|
||||
* [`SetAppInfo`](http://docs.min.io/docs/golang-client-api-reference#SetAppInfo)
|
||||
* [`TraceOn`](http://docs.min.io/docs/golang-client-api-reference#TraceOn)
|
||||
* [`TraceOff`](http://docs.min.io/docs/golang-client-api-reference#TraceOff)
|
||||
|
||||
## Full Examples
|
||||
|
||||
### Full Examples : Bucket Operations
|
||||
* [makebucket.go](https://github.com/minio/minio-go/blob/master/examples/s3/makebucket.go)
|
||||
* [listbuckets.go](https://github.com/minio/minio-go/blob/master/examples/s3/listbuckets.go)
|
||||
* [bucketexists.go](https://github.com/minio/minio-go/blob/master/examples/s3/bucketexists.go)
|
||||
* [removebucket.go](https://github.com/minio/minio-go/blob/master/examples/s3/removebucket.go)
|
||||
* [listobjects.go](https://github.com/minio/minio-go/blob/master/examples/s3/listobjects.go)
|
||||
* [listobjectsV2.go](https://github.com/minio/minio-go/blob/master/examples/s3/listobjectsV2.go)
|
||||
* [listincompleteuploads.go](https://github.com/minio/minio-go/blob/master/examples/s3/listincompleteuploads.go)
|
||||
|
||||
### Full Examples : Bucket policy Operations
|
||||
* [setbucketpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketpolicy.go)
|
||||
* [getbucketpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketpolicy.go)
|
||||
* [listbucketpolicies.go](https://github.com/minio/minio-go/blob/master/examples/s3/listbucketpolicies.go)
|
||||
|
||||
### Full Examples : Bucket lifecycle Operations
|
||||
* [setbucketlifecycle.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketlifecycle.go)
|
||||
* [getbucketlifecycle.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketlifecycle.go)
|
||||
|
||||
### Full Examples : Bucket encryption Operations
|
||||
* [setbucketencryption.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketencryption.go)
|
||||
* [getbucketencryption.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketencryption.go)
|
||||
* [deletebucketencryption.go](https://github.com/minio/minio-go/blob/master/examples/s3/deletebucketencryption.go)
|
||||
|
||||
### Full Examples : Bucket replication Operations
|
||||
* [setbucketreplication.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketreplication.go)
|
||||
* [getbucketreplication.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketreplication.go)
|
||||
* [removebucketreplication.go](https://github.com/minio/minio-go/blob/master/examples/s3/removebucketreplication.go)
|
||||
|
||||
### Full Examples : Bucket notification Operations
|
||||
* [setbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketnotification.go)
|
||||
* [getbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketnotification.go)
|
||||
* [removeallbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeallbucketnotification.go)
|
||||
* [listenbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/minio/listenbucketnotification.go) (MinIO Extension)
|
||||
* [listennotification.go](https://github.com/minio/minio-go/blob/master/examples/minio/listen-notification.go) (MinIO Extension)
|
||||
|
||||
### Full Examples : File Object Operations
|
||||
* [fputobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputobject.go)
|
||||
* [fgetobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/fgetobject.go)
|
||||
* [fputobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputobject-context.go)
|
||||
* [fgetobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/fgetobject-context.go)
|
||||
|
||||
### Full Examples : Object Operations
|
||||
* [putobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/putobject.go)
|
||||
* [getobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/getobject.go)
|
||||
* [putobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/putobject-context.go)
|
||||
* [getobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/getobject-context.go)
|
||||
* [statobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/statobject.go)
|
||||
* [copyobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/copyobject.go)
|
||||
* [removeobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeobject.go)
|
||||
* [removeincompleteupload.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeincompleteupload.go)
|
||||
* [removeobjects.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeobjects.go)
|
||||
|
||||
### Full Examples : Encrypted Object Operations
|
||||
* [put-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/put-encrypted-object.go)
|
||||
* [get-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/get-encrypted-object.go)
|
||||
* [fput-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputencrypted-object.go)
|
||||
|
||||
### Full Examples : Presigned Operations
|
||||
* [presignedgetobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedgetobject.go)
|
||||
* [presignedputobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedputobject.go)
|
||||
* [presignedheadobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedheadobject.go)
|
||||
* [presignedpostpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedpostpolicy.go)
|
||||
|
||||
## Explore Further
|
||||
* [Complete Documentation](https://docs.min.io)
|
||||
* [MinIO Go Client SDK API Reference](https://docs.min.io/docs/golang-client-api-reference)
|
||||
|
||||
## Contribute
|
||||
[Contributors Guide](https://github.com/minio/minio-go/blob/master/CONTRIBUTING.md)
|
||||
|
||||
## License
|
||||
This SDK is distributed under the [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0), see [LICENSE](https://github.com/minio/minio-go/blob/master/LICENSE) and [NOTICE](https://github.com/minio/minio-go/blob/master/NOTICE) for more information.
|
260
vendor/github.com/minio/minio-go/v7/README_zh_CN.md
generated
vendored
Normal file
260
vendor/github.com/minio/minio-go/v7/README_zh_CN.md
generated
vendored
Normal file
@ -0,0 +1,260 @@
|
||||
# 适用于与Amazon S3兼容云存储的MinIO Go SDK [![Slack](https://slack.min.io/slack?type=svg)](https://slack.min.io) [![Sourcegraph](https://sourcegraph.com/github.com/minio/minio-go/-/badge.svg)](https://sourcegraph.com/github.com/minio/minio-go?badge)
|
||||
|
||||
MinIO Go Client SDK提供了简单的API来访问任何与Amazon S3兼容的对象存储服务。
|
||||
|
||||
**支持的云存储:**
|
||||
|
||||
- AWS Signature Version 4
|
||||
- Amazon S3
|
||||
- MinIO
|
||||
|
||||
- AWS Signature Version 2
|
||||
- Google Cloud Storage (兼容模式)
|
||||
- Openstack Swift + Swift3 middleware
|
||||
- Ceph Object Gateway
|
||||
- Riak CS
|
||||
|
||||
本文我们将学习如何安装MinIO client SDK,连接到MinIO,并提供一下文件上传的示例。对于完整的API以及示例,请参考[Go Client API Reference](https://docs.min.io/docs/golang-client-api-reference)。
|
||||
|
||||
本文假设你已经有 [Go开发环境](https://golang.org/doc/install)。
|
||||
|
||||
## 从Github下载
|
||||
```sh
|
||||
go get -u github.com/minio/minio-go
|
||||
```
|
||||
|
||||
## 初始化MinIO Client
|
||||
MinIO client需要以下4个参数来连接与Amazon S3兼容的对象存储。
|
||||
|
||||
| 参数 | 描述|
|
||||
| :--- | :--- |
|
||||
| endpoint | 对象存储服务的URL |
|
||||
| accessKeyID | Access key是唯一标识你的账户的用户ID。 |
|
||||
| secretAccessKey | Secret key是你账户的密码。 |
|
||||
| secure | true代表使用HTTPS |
|
||||
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
|
||||
"github.com/minio/minio-go/v7"
|
||||
"github.com/minio/minio-go/v7/pkg/credentials"
|
||||
)
|
||||
|
||||
func main() {
|
||||
endpoint := "play.min.io"
|
||||
accessKeyID := "Q3AM3UQ867SPQQA43P2F"
|
||||
secretAccessKey := "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG"
|
||||
useSSL := true
|
||||
|
||||
// 初使化 minio client对象。
|
||||
minioClient, err := minio.New(endpoint, &minio.Options{
|
||||
Creds: credentials.NewStaticV4(accessKeyID, secretAccessKey, ""),
|
||||
Secure: useSSL,
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
|
||||
log.Printf("%#v\n", minioClient) // minioClient初使化成功
|
||||
}
|
||||
```
|
||||
|
||||
## 示例-文件上传
|
||||
本示例连接到一个对象存储服务,创建一个存储桶并上传一个文件到存储桶中。
|
||||
|
||||
我们在本示例中使用运行在 [https://play.min.io](https://play.min.io) 上的MinIO服务,你可以用这个服务来开发和测试。示例中的访问凭据是公开的。
|
||||
|
||||
### FileUploader.go
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
|
||||
"github.com/minio/minio-go/v7"
|
||||
"github.com/minio/minio-go/v7/pkg/credentials"
|
||||
)
|
||||
|
||||
func main() {
|
||||
ctx := context.Background()
|
||||
endpoint := "play.min.io"
|
||||
accessKeyID := "Q3AM3UQ867SPQQA43P2F"
|
||||
secretAccessKey := "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG"
|
||||
useSSL := true
|
||||
|
||||
// 初使化 minio client对象。
|
||||
minioClient, err := minio.New(endpoint, &minio.Options{
|
||||
Creds: credentials.NewStaticV4(accessKeyID, secretAccessKey, ""),
|
||||
Secure: useSSL,
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
|
||||
// 创建一个叫mymusic的存储桶。
|
||||
bucketName := "mymusic"
|
||||
location := "us-east-1"
|
||||
|
||||
err = minioClient.MakeBucket(ctx, bucketName, minio.MakeBucketOptions{Region: location})
|
||||
if err != nil {
|
||||
// 检查存储桶是否已经存在。
|
||||
exists, errBucketExists := minioClient.BucketExists(ctx, bucketName)
|
||||
if errBucketExists == nil && exists {
|
||||
log.Printf("We already own %s\n", bucketName)
|
||||
} else {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
} else {
|
||||
log.Printf("Successfully created %s\n", bucketName)
|
||||
}
|
||||
|
||||
// 上传一个zip文件。
|
||||
objectName := "golden-oldies.zip"
|
||||
filePath := "/tmp/golden-oldies.zip"
|
||||
contentType := "application/zip"
|
||||
|
||||
// 使用FPutObject上传一个zip文件。
|
||||
n, err := minioClient.FPutObject(ctx, bucketName, objectName, filePath, minio.PutObjectOptions{ContentType: contentType})
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
|
||||
log.Printf("Successfully uploaded %s of size %d\n", objectName, n)
|
||||
}
|
||||
```
|
||||
|
||||
### 运行FileUploader
|
||||
```sh
|
||||
go run file-uploader.go
|
||||
2016/08/13 17:03:28 Successfully created mymusic
|
||||
2016/08/13 17:03:40 Successfully uploaded golden-oldies.zip of size 16253413
|
||||
|
||||
mc ls play/mymusic/
|
||||
[2016-05-27 16:02:16 PDT] 17MiB golden-oldies.zip
|
||||
```
|
||||
|
||||
## API文档
|
||||
完整的API文档在这里。
|
||||
* [完整API文档](https://docs.min.io/docs/golang-client-api-reference)
|
||||
|
||||
### API文档 : 操作存储桶
|
||||
* [`MakeBucket`](https://docs.min.io/docs/golang-client-api-reference#MakeBucket)
|
||||
* [`ListBuckets`](https://docs.min.io/docs/golang-client-api-reference#ListBuckets)
|
||||
* [`BucketExists`](https://docs.min.io/docs/golang-client-api-reference#BucketExists)
|
||||
* [`RemoveBucket`](https://docs.min.io/docs/golang-client-api-reference#RemoveBucket)
|
||||
* [`ListObjects`](https://docs.min.io/docs/golang-client-api-reference#ListObjects)
|
||||
* [`ListIncompleteUploads`](https://docs.min.io/docs/golang-client-api-reference#ListIncompleteUploads)
|
||||
|
||||
### API文档 : 存储桶策略
|
||||
* [`SetBucketPolicy`](https://docs.min.io/docs/golang-client-api-reference#SetBucketPolicy)
|
||||
* [`GetBucketPolicy`](https://docs.min.io/docs/golang-client-api-reference#GetBucketPolicy)
|
||||
|
||||
### API文档 : 存储桶通知
|
||||
* [`SetBucketNotification`](https://docs.min.io/docs/golang-client-api-reference#SetBucketNotification)
|
||||
* [`GetBucketNotification`](https://docs.min.io/docs/golang-client-api-reference#GetBucketNotification)
|
||||
* [`RemoveAllBucketNotification`](https://docs.min.io/docs/golang-client-api-reference#RemoveAllBucketNotification)
|
||||
* [`ListenBucketNotification`](https://docs.min.io/docs/golang-client-api-reference#ListenBucketNotification) (MinIO 扩展)
|
||||
* [`ListenNotification`](https://docs.min.io/docs/golang-client-api-reference#ListenNotification) (MinIO 扩展)
|
||||
|
||||
### API文档 : 操作文件对象
|
||||
* [`FPutObject`](https://docs.min.io/docs/golang-client-api-reference#FPutObject)
|
||||
* [`FGetObject`](https://docs.min.io/docs/golang-client-api-reference#FPutObject)
|
||||
|
||||
### API文档 : 操作对象
|
||||
* [`GetObject`](https://docs.min.io/docs/golang-client-api-reference#GetObject)
|
||||
* [`PutObject`](https://docs.min.io/docs/golang-client-api-reference#PutObject)
|
||||
* [`PutObjectStreaming`](https://docs.min.io/docs/golang-client-api-reference#PutObjectStreaming)
|
||||
* [`StatObject`](https://docs.min.io/docs/golang-client-api-reference#StatObject)
|
||||
* [`CopyObject`](https://docs.min.io/docs/golang-client-api-reference#CopyObject)
|
||||
* [`RemoveObject`](https://docs.min.io/docs/golang-client-api-reference#RemoveObject)
|
||||
* [`RemoveObjects`](https://docs.min.io/docs/golang-client-api-reference#RemoveObjects)
|
||||
* [`RemoveIncompleteUpload`](https://docs.min.io/docs/golang-client-api-reference#RemoveIncompleteUpload)
|
||||
* [`SelectObjectContent`](https://docs.min.io/docs/golang-client-api-reference#SelectObjectContent)
|
||||
|
||||
### API文档 : Presigned操作
|
||||
* [`PresignedGetObject`](https://docs.min.io/docs/golang-client-api-reference#PresignedGetObject)
|
||||
* [`PresignedPutObject`](https://docs.min.io/docs/golang-client-api-reference#PresignedPutObject)
|
||||
* [`PresignedHeadObject`](https://docs.min.io/docs/golang-client-api-reference#PresignedHeadObject)
|
||||
* [`PresignedPostPolicy`](https://docs.min.io/docs/golang-client-api-reference#PresignedPostPolicy)
|
||||
|
||||
### API文档 : 客户端自定义设置
|
||||
* [`SetAppInfo`](http://docs.min.io/docs/golang-client-api-reference#SetAppInfo)
|
||||
* [`TraceOn`](http://docs.min.io/docs/golang-client-api-reference#TraceOn)
|
||||
* [`TraceOff`](http://docs.min.io/docs/golang-client-api-reference#TraceOff)
|
||||
|
||||
## 完整示例
|
||||
|
||||
### 完整示例 : 操作存储桶
|
||||
* [makebucket.go](https://github.com/minio/minio-go/blob/master/examples/s3/makebucket.go)
|
||||
* [listbuckets.go](https://github.com/minio/minio-go/blob/master/examples/s3/listbuckets.go)
|
||||
* [bucketexists.go](https://github.com/minio/minio-go/blob/master/examples/s3/bucketexists.go)
|
||||
* [removebucket.go](https://github.com/minio/minio-go/blob/master/examples/s3/removebucket.go)
|
||||
* [listobjects.go](https://github.com/minio/minio-go/blob/master/examples/s3/listobjects.go)
|
||||
* [listobjectsV2.go](https://github.com/minio/minio-go/blob/master/examples/s3/listobjectsV2.go)
|
||||
* [listincompleteuploads.go](https://github.com/minio/minio-go/blob/master/examples/s3/listincompleteuploads.go)
|
||||
|
||||
### 完整示例 : 存储桶策略
|
||||
* [setbucketpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketpolicy.go)
|
||||
* [getbucketpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketpolicy.go)
|
||||
* [listbucketpolicies.go](https://github.com/minio/minio-go/blob/master/examples/s3/listbucketpolicies.go)
|
||||
|
||||
### 完整示例 : 存储桶生命周期
|
||||
* [setbucketlifecycle.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketlifecycle.go)
|
||||
* [getbucketlifecycle.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketlifecycle.go)
|
||||
|
||||
### 完整示例 : 存储桶加密
|
||||
* [setbucketencryption.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketencryption.go)
|
||||
* [getbucketencryption.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketencryption.go)
|
||||
* [deletebucketencryption.go](https://github.com/minio/minio-go/blob/master/examples/s3/deletebucketencryption.go)
|
||||
|
||||
### 完整示例 : 存储桶复制
|
||||
* [setbucketreplication.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketreplication.go)
|
||||
* [getbucketreplication.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketreplication.go)
|
||||
* [removebucketreplication.go](https://github.com/minio/minio-go/blob/master/examples/s3/removebucketreplication.go)
|
||||
|
||||
### 完整示例 : 存储桶通知
|
||||
* [setbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketnotification.go)
|
||||
* [getbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketnotification.go)
|
||||
* [removeallbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeallbucketnotification.go)
|
||||
* [listenbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/minio/listenbucketnotification.go) (MinIO扩展)
|
||||
* [listennotification.go](https://github.com/minio/minio-go/blob/master/examples/minio/listen-notification.go) (MinIO 扩展)
|
||||
|
||||
### 完整示例 : 操作文件对象
|
||||
* [fputobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputobject.go)
|
||||
* [fgetobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/fgetobject.go)
|
||||
* [fputobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputobject-context.go)
|
||||
* [fgetobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/fgetobject-context.go)
|
||||
|
||||
### 完整示例 : 操作对象
|
||||
* [putobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/putobject.go)
|
||||
* [getobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/getobject.go)
|
||||
* [putobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/putobject-context.go)
|
||||
* [getobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/getobject-context.go)
|
||||
* [statobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/statobject.go)
|
||||
* [copyobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/copyobject.go)
|
||||
* [removeobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeobject.go)
|
||||
* [removeincompleteupload.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeincompleteupload.go)
|
||||
* [removeobjects.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeobjects.go)
|
||||
|
||||
### 完整示例 : 操作加密对象
|
||||
* [put-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/put-encrypted-object.go)
|
||||
* [get-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/get-encrypted-object.go)
|
||||
* [fput-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputencrypted-object.go)
|
||||
|
||||
### 完整示例 : Presigned操作
|
||||
* [presignedgetobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedgetobject.go)
|
||||
* [presignedputobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedputobject.go)
|
||||
* [presignedheadobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedheadobject.go)
|
||||
* [presignedpostpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedpostpolicy.go)
|
||||
|
||||
## 了解更多
|
||||
* [完整文档](https://docs.min.io)
|
||||
* [MinIO Go Client SDK API文档](https://docs.min.io/docs/golang-client-api-reference)
|
||||
|
||||
## 贡献
|
||||
[贡献指南](https://github.com/minio/minio-go/blob/master/docs/zh_CN/CONTRIBUTING.md)
|
134
vendor/github.com/minio/minio-go/v7/api-bucket-encryption.go
generated
vendored
Normal file
134
vendor/github.com/minio/minio-go/v7/api-bucket-encryption.go
generated
vendored
Normal file
@ -0,0 +1,134 @@
|
||||
/*
|
||||
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
|
||||
* Copyright 2020 MinIO, Inc.
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package minio
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/xml"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
||||
"github.com/minio/minio-go/v7/pkg/s3utils"
|
||||
"github.com/minio/minio-go/v7/pkg/sse"
|
||||
)
|
||||
|
||||
// SetBucketEncryption sets the default encryption configuration on an existing bucket.
|
||||
func (c Client) SetBucketEncryption(ctx context.Context, bucketName string, config *sse.Configuration) error {
|
||||
// Input validation.
|
||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if config == nil {
|
||||
return errInvalidArgument("configuration cannot be empty")
|
||||
}
|
||||
|
||||
buf, err := xml.Marshal(config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Get resources properly escaped and lined up before
|
||||
// using them in http request.
|
||||
urlValues := make(url.Values)
|
||||
urlValues.Set("encryption", "")
|
||||
|
||||
// Content-length is mandatory to set a default encryption configuration
|
||||
reqMetadata := requestMetadata{
|
||||
bucketName: bucketName,
|
||||
queryValues: urlValues,
|
||||
contentBody: bytes.NewReader(buf),
|
||||
contentLength: int64(len(buf)),
|
||||
contentMD5Base64: sumMD5Base64(buf),
|
||||
}
|
||||
|
||||
// Execute PUT to upload a new bucket default encryption configuration.
|
||||
resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata)
|
||||
defer closeResponse(resp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return httpRespToErrorResponse(resp, bucketName, "")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// RemoveBucketEncryption removes the default encryption configuration on a bucket with a context to control cancellations and timeouts.
|
||||
func (c Client) RemoveBucketEncryption(ctx context.Context, bucketName string) error {
|
||||
// Input validation.
|
||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Get resources properly escaped and lined up before
|
||||
// using them in http request.
|
||||
urlValues := make(url.Values)
|
||||
urlValues.Set("encryption", "")
|
||||
|
||||
// DELETE default encryption configuration on a bucket.
|
||||
resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{
|
||||
bucketName: bucketName,
|
||||
queryValues: urlValues,
|
||||
contentSHA256Hex: emptySHA256Hex,
|
||||
})
|
||||
defer closeResponse(resp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent {
|
||||
return httpRespToErrorResponse(resp, bucketName, "")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetBucketEncryption gets the default encryption configuration
|
||||
// on an existing bucket with a context to control cancellations and timeouts.
|
||||
func (c Client) GetBucketEncryption(ctx context.Context, bucketName string) (*sse.Configuration, error) {
|
||||
// Input validation.
|
||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get resources properly escaped and lined up before
|
||||
// using them in http request.
|
||||
urlValues := make(url.Values)
|
||||
urlValues.Set("encryption", "")
|
||||
|
||||
// Execute GET on bucket to get the default encryption configuration.
|
||||
resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{
|
||||
bucketName: bucketName,
|
||||
queryValues: urlValues,
|
||||
})
|
||||
|
||||
defer closeResponse(resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return nil, httpRespToErrorResponse(resp, bucketName, "")
|
||||
}
|
||||
|
||||
encryptionConfig := &sse.Configuration{}
|
||||
if err = xmlDecoder(resp.Body, encryptionConfig); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return encryptionConfig, nil
|
||||
}
|
147
vendor/github.com/minio/minio-go/v7/api-bucket-lifecycle.go
generated
vendored
Normal file
147
vendor/github.com/minio/minio-go/v7/api-bucket-lifecycle.go
generated
vendored
Normal file
@ -0,0 +1,147 @@
|
||||
/*
|
||||
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
|
||||
* Copyright 2020 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package minio
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/xml"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
||||
"github.com/minio/minio-go/v7/pkg/lifecycle"
|
||||
"github.com/minio/minio-go/v7/pkg/s3utils"
|
||||
)
|
||||
|
||||
// SetBucketLifecycle set the lifecycle on an existing bucket.
|
||||
func (c Client) SetBucketLifecycle(ctx context.Context, bucketName string, config *lifecycle.Configuration) error {
|
||||
// Input validation.
|
||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// If lifecycle is empty then delete it.
|
||||
if config.Empty() {
|
||||
return c.removeBucketLifecycle(ctx, bucketName)
|
||||
}
|
||||
|
||||
buf, err := xml.Marshal(config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Save the updated lifecycle.
|
||||
return c.putBucketLifecycle(ctx, bucketName, buf)
|
||||
}
|
||||
|
||||
// Saves a new bucket lifecycle.
|
||||
func (c Client) putBucketLifecycle(ctx context.Context, bucketName string, buf []byte) error {
|
||||
// Get resources properly escaped and lined up before
|
||||
// using them in http request.
|
||||
urlValues := make(url.Values)
|
||||
urlValues.Set("lifecycle", "")
|
||||
|
||||
// Content-length is mandatory for put lifecycle request
|
||||
reqMetadata := requestMetadata{
|
||||
bucketName: bucketName,
|
||||
queryValues: urlValues,
|
||||
contentBody: bytes.NewReader(buf),
|
||||
contentLength: int64(len(buf)),
|
||||
contentMD5Base64: sumMD5Base64(buf),
|
||||
}
|
||||
|
||||
// Execute PUT to upload a new bucket lifecycle.
|
||||
resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata)
|
||||
defer closeResponse(resp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if resp != nil {
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return httpRespToErrorResponse(resp, bucketName, "")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove lifecycle from a bucket.
|
||||
func (c Client) removeBucketLifecycle(ctx context.Context, bucketName string) error {
|
||||
// Get resources properly escaped and lined up before
|
||||
// using them in http request.
|
||||
urlValues := make(url.Values)
|
||||
urlValues.Set("lifecycle", "")
|
||||
|
||||
// Execute DELETE on objectName.
|
||||
resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{
|
||||
bucketName: bucketName,
|
||||
queryValues: urlValues,
|
||||
contentSHA256Hex: emptySHA256Hex,
|
||||
})
|
||||
defer closeResponse(resp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetBucketLifecycle fetch bucket lifecycle configuration
|
||||
func (c Client) GetBucketLifecycle(ctx context.Context, bucketName string) (*lifecycle.Configuration, error) {
|
||||
// Input validation.
|
||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
bucketLifecycle, err := c.getBucketLifecycle(ctx, bucketName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
config := lifecycle.NewConfiguration()
|
||||
if err = xml.Unmarshal(bucketLifecycle, config); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return config, nil
|
||||
}
|
||||
|
||||
// Request server for current bucket lifecycle.
|
||||
func (c Client) getBucketLifecycle(ctx context.Context, bucketName string) ([]byte, error) {
|
||||
// Get resources properly escaped and lined up before
|
||||
// using them in http request.
|
||||
urlValues := make(url.Values)
|
||||
urlValues.Set("lifecycle", "")
|
||||
|
||||
// Execute GET on bucket to get lifecycle.
|
||||
resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{
|
||||
bucketName: bucketName,
|
||||
queryValues: urlValues,
|
||||
})
|
||||
|
||||
defer closeResponse(resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if resp != nil {
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return nil, httpRespToErrorResponse(resp, bucketName, "")
|
||||
}
|
||||
}
|
||||
|
||||
return ioutil.ReadAll(resp.Body)
|
||||
}
|
255
vendor/github.com/minio/minio-go/v7/api-bucket-notification.go
generated
vendored
Normal file
255
vendor/github.com/minio/minio-go/v7/api-bucket-notification.go
generated
vendored
Normal file
@ -0,0 +1,255 @@
|
||||
/*
|
||||
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
|
||||
* Copyright 2017-2020 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package minio
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/xml"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"time"
|
||||
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
"github.com/minio/minio-go/v7/pkg/notification"
|
||||
"github.com/minio/minio-go/v7/pkg/s3utils"
|
||||
)
|
||||
|
||||
// SetBucketNotification saves a new bucket notification with a context to control cancellations and timeouts.
|
||||
func (c Client) SetBucketNotification(ctx context.Context, bucketName string, config notification.Configuration) error {
|
||||
// Input validation.
|
||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Get resources properly escaped and lined up before
|
||||
// using them in http request.
|
||||
urlValues := make(url.Values)
|
||||
urlValues.Set("notification", "")
|
||||
|
||||
notifBytes, err := xml.Marshal(&config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
notifBuffer := bytes.NewReader(notifBytes)
|
||||
reqMetadata := requestMetadata{
|
||||
bucketName: bucketName,
|
||||
queryValues: urlValues,
|
||||
contentBody: notifBuffer,
|
||||
contentLength: int64(len(notifBytes)),
|
||||
contentMD5Base64: sumMD5Base64(notifBytes),
|
||||
contentSHA256Hex: sum256Hex(notifBytes),
|
||||
}
|
||||
|
||||
// Execute PUT to upload a new bucket notification.
|
||||
resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata)
|
||||
defer closeResponse(resp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if resp != nil {
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return httpRespToErrorResponse(resp, bucketName, "")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// RemoveAllBucketNotification - Remove bucket notification clears all previously specified config
|
||||
func (c Client) RemoveAllBucketNotification(ctx context.Context, bucketName string) error {
|
||||
return c.SetBucketNotification(ctx, bucketName, notification.Configuration{})
|
||||
}
|
||||
|
||||
// GetBucketNotification returns current bucket notification configuration
|
||||
func (c Client) GetBucketNotification(ctx context.Context, bucketName string) (bucketNotification notification.Configuration, err error) {
|
||||
// Input validation.
|
||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
return notification.Configuration{}, err
|
||||
}
|
||||
return c.getBucketNotification(ctx, bucketName)
|
||||
}
|
||||
|
||||
// Request server for notification rules.
|
||||
func (c Client) getBucketNotification(ctx context.Context, bucketName string) (notification.Configuration, error) {
|
||||
urlValues := make(url.Values)
|
||||
urlValues.Set("notification", "")
|
||||
|
||||
// Execute GET on bucket to list objects.
|
||||
resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{
|
||||
bucketName: bucketName,
|
||||
queryValues: urlValues,
|
||||
contentSHA256Hex: emptySHA256Hex,
|
||||
})
|
||||
|
||||
defer closeResponse(resp)
|
||||
if err != nil {
|
||||
return notification.Configuration{}, err
|
||||
}
|
||||
return processBucketNotificationResponse(bucketName, resp)
|
||||
|
||||
}
|
||||
|
||||
// processes the GetNotification http response from the server.
|
||||
func processBucketNotificationResponse(bucketName string, resp *http.Response) (notification.Configuration, error) {
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
errResponse := httpRespToErrorResponse(resp, bucketName, "")
|
||||
return notification.Configuration{}, errResponse
|
||||
}
|
||||
var bucketNotification notification.Configuration
|
||||
err := xmlDecoder(resp.Body, &bucketNotification)
|
||||
if err != nil {
|
||||
return notification.Configuration{}, err
|
||||
}
|
||||
return bucketNotification, nil
|
||||
}
|
||||
|
||||
// ListenNotification listen for all events, this is a MinIO specific API
|
||||
func (c Client) ListenNotification(ctx context.Context, prefix, suffix string, events []string) <-chan notification.Info {
|
||||
return c.ListenBucketNotification(ctx, "", prefix, suffix, events)
|
||||
}
|
||||
|
||||
// ListenBucketNotification listen for bucket events, this is a MinIO specific API
|
||||
func (c Client) ListenBucketNotification(ctx context.Context, bucketName, prefix, suffix string, events []string) <-chan notification.Info {
|
||||
notificationInfoCh := make(chan notification.Info, 1)
|
||||
const notificationCapacity = 4 * 1024 * 1024
|
||||
notificationEventBuffer := make([]byte, notificationCapacity)
|
||||
// Only success, start a routine to start reading line by line.
|
||||
go func(notificationInfoCh chan<- notification.Info) {
|
||||
defer close(notificationInfoCh)
|
||||
|
||||
// Validate the bucket name.
|
||||
if bucketName != "" {
|
||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
select {
|
||||
case notificationInfoCh <- notification.Info{
|
||||
Err: err,
|
||||
}:
|
||||
case <-ctx.Done():
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Check ARN partition to verify if listening bucket is supported
|
||||
if s3utils.IsAmazonEndpoint(*c.endpointURL) || s3utils.IsGoogleEndpoint(*c.endpointURL) {
|
||||
select {
|
||||
case notificationInfoCh <- notification.Info{
|
||||
Err: errAPINotSupported("Listening for bucket notification is specific only to `minio` server endpoints"),
|
||||
}:
|
||||
case <-ctx.Done():
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Continuously run and listen on bucket notification.
|
||||
// Create a done channel to control 'ListObjects' go routine.
|
||||
retryDoneCh := make(chan struct{}, 1)
|
||||
|
||||
// Indicate to our routine to exit cleanly upon return.
|
||||
defer close(retryDoneCh)
|
||||
|
||||
// Prepare urlValues to pass into the request on every loop
|
||||
urlValues := make(url.Values)
|
||||
urlValues.Set("prefix", prefix)
|
||||
urlValues.Set("suffix", suffix)
|
||||
urlValues["events"] = events
|
||||
|
||||
// Wait on the jitter retry loop.
|
||||
for range c.newRetryTimerContinous(time.Second, time.Second*30, MaxJitter, retryDoneCh) {
|
||||
// Execute GET on bucket to list objects.
|
||||
resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{
|
||||
bucketName: bucketName,
|
||||
queryValues: urlValues,
|
||||
contentSHA256Hex: emptySHA256Hex,
|
||||
})
|
||||
if err != nil {
|
||||
select {
|
||||
case notificationInfoCh <- notification.Info{
|
||||
Err: err,
|
||||
}:
|
||||
case <-ctx.Done():
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Validate http response, upon error return quickly.
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
errResponse := httpRespToErrorResponse(resp, bucketName, "")
|
||||
select {
|
||||
case notificationInfoCh <- notification.Info{
|
||||
Err: errResponse,
|
||||
}:
|
||||
case <-ctx.Done():
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Initialize a new bufio scanner, to read line by line.
|
||||
bio := bufio.NewScanner(resp.Body)
|
||||
|
||||
// Use a higher buffer to support unexpected
|
||||
// caching done by proxies
|
||||
bio.Buffer(notificationEventBuffer, notificationCapacity)
|
||||
var json = jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
|
||||
// Unmarshal each line, returns marshaled values.
|
||||
for bio.Scan() {
|
||||
var notificationInfo notification.Info
|
||||
if err = json.Unmarshal(bio.Bytes(), ¬ificationInfo); err != nil {
|
||||
// Unexpected error during json unmarshal, send
|
||||
// the error to caller for actionable as needed.
|
||||
select {
|
||||
case notificationInfoCh <- notification.Info{
|
||||
Err: err,
|
||||
}:
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
closeResponse(resp)
|
||||
continue
|
||||
}
|
||||
// Send notificationInfo
|
||||
select {
|
||||
case notificationInfoCh <- notificationInfo:
|
||||
case <-ctx.Done():
|
||||
closeResponse(resp)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if err = bio.Err(); err != nil {
|
||||
select {
|
||||
case notificationInfoCh <- notification.Info{
|
||||
Err: err,
|
||||
}:
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Close current connection before looping further.
|
||||
closeResponse(resp)
|
||||
|
||||
}
|
||||
}(notificationInfoCh)
|
||||
|
||||
// Returns the notification info channel, for caller to start reading from.
|
||||
return notificationInfoCh
|
||||
}
|
142
vendor/github.com/minio/minio-go/v7/api-bucket-policy.go
generated
vendored
Normal file
142
vendor/github.com/minio/minio-go/v7/api-bucket-policy.go
generated
vendored
Normal file
@ -0,0 +1,142 @@
|
||||
/*
|
||||
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
|
||||
* Copyright 2020 MinIO, Inc.
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package minio
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"github.com/minio/minio-go/v7/pkg/s3utils"
|
||||
)
|
||||
|
||||
// SetBucketPolicy sets the access permissions on an existing bucket.
|
||||
func (c Client) SetBucketPolicy(ctx context.Context, bucketName, policy string) error {
|
||||
// Input validation.
|
||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// If policy is empty then delete the bucket policy.
|
||||
if policy == "" {
|
||||
return c.removeBucketPolicy(ctx, bucketName)
|
||||
}
|
||||
|
||||
// Save the updated policies.
|
||||
return c.putBucketPolicy(ctx, bucketName, policy)
|
||||
}
|
||||
|
||||
// Saves a new bucket policy.
|
||||
func (c Client) putBucketPolicy(ctx context.Context, bucketName, policy string) error {
|
||||
// Get resources properly escaped and lined up before
|
||||
// using them in http request.
|
||||
urlValues := make(url.Values)
|
||||
urlValues.Set("policy", "")
|
||||
|
||||
reqMetadata := requestMetadata{
|
||||
bucketName: bucketName,
|
||||
queryValues: urlValues,
|
||||
contentBody: strings.NewReader(policy),
|
||||
contentLength: int64(len(policy)),
|
||||
}
|
||||
|
||||
// Execute PUT to upload a new bucket policy.
|
||||
resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata)
|
||||
defer closeResponse(resp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if resp != nil {
|
||||
if resp.StatusCode != http.StatusNoContent {
|
||||
return httpRespToErrorResponse(resp, bucketName, "")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Removes all policies on a bucket.
|
||||
func (c Client) removeBucketPolicy(ctx context.Context, bucketName string) error {
|
||||
// Get resources properly escaped and lined up before
|
||||
// using them in http request.
|
||||
urlValues := make(url.Values)
|
||||
urlValues.Set("policy", "")
|
||||
|
||||
// Execute DELETE on objectName.
|
||||
resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{
|
||||
bucketName: bucketName,
|
||||
queryValues: urlValues,
|
||||
contentSHA256Hex: emptySHA256Hex,
|
||||
})
|
||||
defer closeResponse(resp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetBucketPolicy returns the current policy
|
||||
func (c Client) GetBucketPolicy(ctx context.Context, bucketName string) (string, error) {
|
||||
// Input validation.
|
||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
return "", err
|
||||
}
|
||||
bucketPolicy, err := c.getBucketPolicy(ctx, bucketName)
|
||||
if err != nil {
|
||||
errResponse := ToErrorResponse(err)
|
||||
if errResponse.Code == "NoSuchBucketPolicy" {
|
||||
return "", nil
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
return bucketPolicy, nil
|
||||
}
|
||||
|
||||
// Request server for current bucket policy.
|
||||
func (c Client) getBucketPolicy(ctx context.Context, bucketName string) (string, error) {
|
||||
// Get resources properly escaped and lined up before
|
||||
// using them in http request.
|
||||
urlValues := make(url.Values)
|
||||
urlValues.Set("policy", "")
|
||||
|
||||
// Execute GET on bucket to list objects.
|
||||
resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{
|
||||
bucketName: bucketName,
|
||||
queryValues: urlValues,
|
||||
contentSHA256Hex: emptySHA256Hex,
|
||||
})
|
||||
|
||||
defer closeResponse(resp)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if resp != nil {
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return "", httpRespToErrorResponse(resp, bucketName, "")
|
||||
}
|
||||
}
|
||||
|
||||
bucketPolicyBuf, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
policy := string(bucketPolicyBuf)
|
||||
return policy, err
|
||||
}
|
149
vendor/github.com/minio/minio-go/v7/api-bucket-replication.go
generated
vendored
Normal file
149
vendor/github.com/minio/minio-go/v7/api-bucket-replication.go
generated
vendored
Normal file
@ -0,0 +1,149 @@
|
||||
/*
|
||||
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
|
||||
* Copyright 2020 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package minio
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/xml"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
||||
"github.com/minio/minio-go/v7/pkg/replication"
|
||||
"github.com/minio/minio-go/v7/pkg/s3utils"
|
||||
)
|
||||
|
||||
// RemoveBucketReplication removes a replication config on an existing bucket.
|
||||
func (c Client) RemoveBucketReplication(ctx context.Context, bucketName string) error {
|
||||
return c.removeBucketReplication(ctx, bucketName)
|
||||
}
|
||||
|
||||
// SetBucketReplication sets a replication config on an existing bucket.
|
||||
func (c Client) SetBucketReplication(ctx context.Context, bucketName string, cfg replication.Config) error {
|
||||
// Input validation.
|
||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// If replication is empty then delete it.
|
||||
if cfg.Empty() {
|
||||
return c.removeBucketReplication(ctx, bucketName)
|
||||
}
|
||||
// Save the updated replication.
|
||||
return c.putBucketReplication(ctx, bucketName, cfg)
|
||||
}
|
||||
|
||||
// Saves a new bucket replication.
|
||||
func (c Client) putBucketReplication(ctx context.Context, bucketName string, cfg replication.Config) error {
|
||||
// Get resources properly escaped and lined up before
|
||||
// using them in http request.
|
||||
urlValues := make(url.Values)
|
||||
urlValues.Set("replication", "")
|
||||
replication, err := xml.Marshal(cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
reqMetadata := requestMetadata{
|
||||
bucketName: bucketName,
|
||||
queryValues: urlValues,
|
||||
contentBody: bytes.NewReader(replication),
|
||||
contentLength: int64(len(replication)),
|
||||
contentMD5Base64: sumMD5Base64(replication),
|
||||
}
|
||||
|
||||
// Execute PUT to upload a new bucket replication config.
|
||||
resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata)
|
||||
defer closeResponse(resp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return httpRespToErrorResponse(resp, bucketName, "")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove replication from a bucket.
|
||||
func (c Client) removeBucketReplication(ctx context.Context, bucketName string) error {
|
||||
// Get resources properly escaped and lined up before
|
||||
// using them in http request.
|
||||
urlValues := make(url.Values)
|
||||
urlValues.Set("replication", "")
|
||||
|
||||
// Execute DELETE on objectName.
|
||||
resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{
|
||||
bucketName: bucketName,
|
||||
queryValues: urlValues,
|
||||
contentSHA256Hex: emptySHA256Hex,
|
||||
})
|
||||
defer closeResponse(resp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetBucketReplication fetches bucket replication configuration.If config is not
|
||||
// found, returns empty config with nil error.
|
||||
func (c Client) GetBucketReplication(ctx context.Context, bucketName string) (cfg replication.Config, err error) {
|
||||
// Input validation.
|
||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
return cfg, err
|
||||
}
|
||||
bucketReplicationCfg, err := c.getBucketReplication(ctx, bucketName)
|
||||
if err != nil {
|
||||
errResponse := ToErrorResponse(err)
|
||||
if errResponse.Code == "ReplicationConfigurationNotFoundError" {
|
||||
return cfg, nil
|
||||
}
|
||||
return cfg, err
|
||||
}
|
||||
return bucketReplicationCfg, nil
|
||||
}
|
||||
|
||||
// Request server for current bucket replication config.
|
||||
func (c Client) getBucketReplication(ctx context.Context, bucketName string) (cfg replication.Config, err error) {
|
||||
// Get resources properly escaped and lined up before
|
||||
// using them in http request.
|
||||
urlValues := make(url.Values)
|
||||
urlValues.Set("replication", "")
|
||||
|
||||
// Execute GET on bucket to get replication config.
|
||||
resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{
|
||||
bucketName: bucketName,
|
||||
queryValues: urlValues,
|
||||
})
|
||||
|
||||
defer closeResponse(resp)
|
||||
if err != nil {
|
||||
return cfg, err
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return cfg, httpRespToErrorResponse(resp, bucketName, "")
|
||||
}
|
||||
|
||||
if err = xmlDecoder(resp.Body, &cfg); err != nil {
|
||||
return cfg, err
|
||||
}
|
||||
|
||||
return cfg, nil
|
||||
}
|
135
vendor/github.com/minio/minio-go/v7/api-bucket-tagging.go
generated
vendored
Normal file
135
vendor/github.com/minio/minio-go/v7/api-bucket-tagging.go
generated
vendored
Normal file
@ -0,0 +1,135 @@
|
||||
/*
|
||||
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
|
||||
* Copyright 2020 MinIO, Inc.
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package minio
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
||||
"github.com/minio/minio-go/v7/pkg/s3utils"
|
||||
"github.com/minio/minio-go/v7/pkg/tags"
|
||||
)
|
||||
|
||||
// GetBucketTagging fetch tagging configuration for a bucket with a
|
||||
// context to control cancellations and timeouts.
|
||||
func (c Client) GetBucketTagging(ctx context.Context, bucketName string) (*tags.Tags, error) {
|
||||
// Input validation.
|
||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get resources properly escaped and lined up before
|
||||
// using them in http request.
|
||||
urlValues := make(url.Values)
|
||||
urlValues.Set("tagging", "")
|
||||
|
||||
// Execute GET on bucket to get tagging configuration.
|
||||
resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{
|
||||
bucketName: bucketName,
|
||||
queryValues: urlValues,
|
||||
})
|
||||
|
||||
defer closeResponse(resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return nil, httpRespToErrorResponse(resp, bucketName, "")
|
||||
}
|
||||
|
||||
defer io.Copy(ioutil.Discard, resp.Body)
|
||||
return tags.ParseBucketXML(resp.Body)
|
||||
}
|
||||
|
||||
// SetBucketTagging sets tagging configuration for a bucket
|
||||
// with a context to control cancellations and timeouts.
|
||||
func (c Client) SetBucketTagging(ctx context.Context, bucketName string, tags *tags.Tags) error {
|
||||
// Input validation.
|
||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if tags == nil {
|
||||
return errors.New("nil tags passed")
|
||||
}
|
||||
|
||||
buf, err := xml.Marshal(tags)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Get resources properly escaped and lined up before
|
||||
// using them in http request.
|
||||
urlValues := make(url.Values)
|
||||
urlValues.Set("tagging", "")
|
||||
|
||||
// Content-length is mandatory to set a default encryption configuration
|
||||
reqMetadata := requestMetadata{
|
||||
bucketName: bucketName,
|
||||
queryValues: urlValues,
|
||||
contentBody: bytes.NewReader(buf),
|
||||
contentLength: int64(len(buf)),
|
||||
contentMD5Base64: sumMD5Base64(buf),
|
||||
}
|
||||
|
||||
// Execute PUT on bucket to put tagging configuration.
|
||||
resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata)
|
||||
defer closeResponse(resp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent {
|
||||
return httpRespToErrorResponse(resp, bucketName, "")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// RemoveBucketTagging removes tagging configuration for a
|
||||
// bucket with a context to control cancellations and timeouts.
|
||||
func (c Client) RemoveBucketTagging(ctx context.Context, bucketName string) error {
|
||||
// Input validation.
|
||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Get resources properly escaped and lined up before
|
||||
// using them in http request.
|
||||
urlValues := make(url.Values)
|
||||
urlValues.Set("tagging", "")
|
||||
|
||||
// Execute DELETE on bucket to remove tagging configuration.
|
||||
resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{
|
||||
bucketName: bucketName,
|
||||
queryValues: urlValues,
|
||||
contentSHA256Hex: emptySHA256Hex,
|
||||
})
|
||||
defer closeResponse(resp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent {
|
||||
return httpRespToErrorResponse(resp, bucketName, "")
|
||||
}
|
||||
return nil
|
||||
}
|
137
vendor/github.com/minio/minio-go/v7/api-bucket-versioning.go
generated
vendored
Normal file
137
vendor/github.com/minio/minio-go/v7/api-bucket-versioning.go
generated
vendored
Normal file
@ -0,0 +1,137 @@
|
||||
/*
|
||||
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
|
||||
* Copyright 2020 MinIO, Inc.
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package minio
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/xml"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
||||
"github.com/minio/minio-go/v7/pkg/s3utils"
|
||||
)
|
||||
|
||||
// SetBucketVersioning sets a bucket versioning configuration
|
||||
func (c Client) SetBucketVersioning(ctx context.Context, bucketName string, config BucketVersioningConfiguration) error {
|
||||
// Input validation.
|
||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
buf, err := xml.Marshal(config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Get resources properly escaped and lined up before
|
||||
// using them in http request.
|
||||
urlValues := make(url.Values)
|
||||
urlValues.Set("versioning", "")
|
||||
|
||||
reqMetadata := requestMetadata{
|
||||
bucketName: bucketName,
|
||||
queryValues: urlValues,
|
||||
contentBody: bytes.NewReader(buf),
|
||||
contentLength: int64(len(buf)),
|
||||
contentMD5Base64: sumMD5Base64(buf),
|
||||
contentSHA256Hex: sum256Hex(buf),
|
||||
}
|
||||
|
||||
// Execute PUT to set a bucket versioning.
|
||||
resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata)
|
||||
defer closeResponse(resp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if resp != nil {
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return httpRespToErrorResponse(resp, bucketName, "")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// EnableVersioning - enable object versioning in given bucket.
|
||||
func (c Client) EnableVersioning(ctx context.Context, bucketName string) error {
|
||||
return c.SetBucketVersioning(ctx, bucketName, BucketVersioningConfiguration{Status: "Enabled"})
|
||||
}
|
||||
|
||||
// SuspendVersioning - suspend object versioning in given bucket.
|
||||
func (c Client) SuspendVersioning(ctx context.Context, bucketName string) error {
|
||||
return c.SetBucketVersioning(ctx, bucketName, BucketVersioningConfiguration{Status: "Suspended"})
|
||||
}
|
||||
|
||||
// BucketVersioningConfiguration is the versioning configuration structure
|
||||
type BucketVersioningConfiguration struct {
|
||||
XMLName xml.Name `xml:"VersioningConfiguration"`
|
||||
Status string `xml:"Status"`
|
||||
MFADelete string `xml:"MfaDelete,omitempty"`
|
||||
}
|
||||
|
||||
// Various supported states
|
||||
const (
|
||||
Enabled = "Enabled"
|
||||
// Disabled State = "Disabled" only used by MFA Delete not supported yet.
|
||||
Suspended = "Suspended"
|
||||
)
|
||||
|
||||
// Enabled returns true if bucket versioning is enabled
|
||||
func (b BucketVersioningConfiguration) Enabled() bool {
|
||||
return b.Status == Enabled
|
||||
}
|
||||
|
||||
// Suspended returns true if bucket versioning is suspended
|
||||
func (b BucketVersioningConfiguration) Suspended() bool {
|
||||
return b.Status == Suspended
|
||||
}
|
||||
|
||||
// GetBucketVersioning gets the versioning configuration on
|
||||
// an existing bucket with a context to control cancellations and timeouts.
|
||||
func (c Client) GetBucketVersioning(ctx context.Context, bucketName string) (BucketVersioningConfiguration, error) {
|
||||
// Input validation.
|
||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
return BucketVersioningConfiguration{}, err
|
||||
}
|
||||
|
||||
// Get resources properly escaped and lined up before
|
||||
// using them in http request.
|
||||
urlValues := make(url.Values)
|
||||
urlValues.Set("versioning", "")
|
||||
|
||||
// Execute GET on bucket to get the versioning configuration.
|
||||
resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{
|
||||
bucketName: bucketName,
|
||||
queryValues: urlValues,
|
||||
})
|
||||
|
||||
defer closeResponse(resp)
|
||||
if err != nil {
|
||||
return BucketVersioningConfiguration{}, err
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return BucketVersioningConfiguration{}, httpRespToErrorResponse(resp, bucketName, "")
|
||||
}
|
||||
|
||||
versioningConfig := BucketVersioningConfiguration{}
|
||||
if err = xmlDecoder(resp.Body, &versioningConfig); err != nil {
|
||||
return versioningConfig, err
|
||||
}
|
||||
|
||||
return versioningConfig, nil
|
||||
}
|
575
vendor/github.com/minio/minio-go/v7/api-compose-object.go
generated
vendored
Normal file
575
vendor/github.com/minio/minio-go/v7/api-compose-object.go
generated
vendored
Normal file
@ -0,0 +1,575 @@
|
||||
/*
|
||||
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
|
||||
* Copyright 2017, 2018 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package minio
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/minio/minio-go/v7/pkg/encrypt"
|
||||
"github.com/minio/minio-go/v7/pkg/s3utils"
|
||||
)
|
||||
|
||||
// CopyDestOptions represents options specified by user for CopyObject/ComposeObject APIs
|
||||
type CopyDestOptions struct {
|
||||
Bucket string // points to destination bucket
|
||||
Object string // points to destination object
|
||||
|
||||
// `Encryption` is the key info for server-side-encryption with customer
|
||||
// provided key. If it is nil, no encryption is performed.
|
||||
Encryption encrypt.ServerSide
|
||||
|
||||
// `userMeta` is the user-metadata key-value pairs to be set on the
|
||||
// destination. The keys are automatically prefixed with `x-amz-meta-`
|
||||
// if needed. If nil is passed, and if only a single source (of any
|
||||
// size) is provided in the ComposeObject call, then metadata from the
|
||||
// source is copied to the destination.
|
||||
// if no user-metadata is provided, it is copied from source
|
||||
// (when there is only once source object in the compose
|
||||
// request)
|
||||
UserMetadata map[string]string
|
||||
// UserMetadata is only set to destination if ReplaceMetadata is true
|
||||
// other value is UserMetadata is ignored and we preserve src.UserMetadata
|
||||
// NOTE: if you set this value to true and now metadata is present
|
||||
// in UserMetadata your destination object will not have any metadata
|
||||
// set.
|
||||
ReplaceMetadata bool
|
||||
|
||||
// `userTags` is the user defined object tags to be set on destination.
|
||||
// This will be set only if the `replaceTags` field is set to true.
|
||||
// Otherwise this field is ignored
|
||||
UserTags map[string]string
|
||||
ReplaceTags bool
|
||||
|
||||
// Specifies whether you want to apply a Legal Hold to the copied object.
|
||||
LegalHold LegalHoldStatus
|
||||
|
||||
// Object Retention related fields
|
||||
Mode RetentionMode
|
||||
RetainUntilDate time.Time
|
||||
|
||||
Size int64 // Needs to be specified if progress bar is specified.
|
||||
// Progress of the entire copy operation will be sent here.
|
||||
Progress io.Reader
|
||||
}
|
||||
|
||||
// Process custom-metadata to remove a `x-amz-meta-` prefix if
|
||||
// present and validate that keys are distinct (after this
|
||||
// prefix removal).
|
||||
func filterCustomMeta(userMeta map[string]string) map[string]string {
|
||||
m := make(map[string]string)
|
||||
for k, v := range userMeta {
|
||||
if strings.HasPrefix(strings.ToLower(k), "x-amz-meta-") {
|
||||
k = k[len("x-amz-meta-"):]
|
||||
}
|
||||
if _, ok := m[k]; ok {
|
||||
continue
|
||||
}
|
||||
m[k] = v
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
// Marshal converts all the CopyDestOptions into their
|
||||
// equivalent HTTP header representation
|
||||
func (opts CopyDestOptions) Marshal(header http.Header) {
|
||||
const replaceDirective = "REPLACE"
|
||||
if opts.ReplaceTags {
|
||||
header.Set(amzTaggingHeaderDirective, replaceDirective)
|
||||
if tags := s3utils.TagEncode(opts.UserTags); tags != "" {
|
||||
header.Set(amzTaggingHeader, tags)
|
||||
}
|
||||
}
|
||||
|
||||
if opts.LegalHold != LegalHoldStatus("") {
|
||||
header.Set(amzLegalHoldHeader, opts.LegalHold.String())
|
||||
}
|
||||
|
||||
if opts.Mode != RetentionMode("") && !opts.RetainUntilDate.IsZero() {
|
||||
header.Set(amzLockMode, opts.Mode.String())
|
||||
header.Set(amzLockRetainUntil, opts.RetainUntilDate.Format(time.RFC3339))
|
||||
}
|
||||
|
||||
if opts.Encryption != nil {
|
||||
opts.Encryption.Marshal(header)
|
||||
}
|
||||
|
||||
if opts.ReplaceMetadata {
|
||||
header.Set("x-amz-metadata-directive", replaceDirective)
|
||||
for k, v := range filterCustomMeta(opts.UserMetadata) {
|
||||
if isAmzHeader(k) || isStandardHeader(k) || isStorageClassHeader(k) {
|
||||
header.Set(k, v)
|
||||
} else {
|
||||
header.Set("x-amz-meta-"+k, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// toDestinationInfo returns a validated copyOptions object.
|
||||
func (opts CopyDestOptions) validate() (err error) {
|
||||
// Input validation.
|
||||
if err = s3utils.CheckValidBucketName(opts.Bucket); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = s3utils.CheckValidObjectName(opts.Object); err != nil {
|
||||
return err
|
||||
}
|
||||
if opts.Progress != nil && opts.Size < 0 {
|
||||
return errInvalidArgument("For progress bar effective size needs to be specified")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CopySrcOptions represents a source object to be copied, using
|
||||
// server-side copying APIs.
|
||||
type CopySrcOptions struct {
|
||||
Bucket, Object string
|
||||
VersionID string
|
||||
MatchETag string
|
||||
NoMatchETag string
|
||||
MatchModifiedSince time.Time
|
||||
MatchUnmodifiedSince time.Time
|
||||
MatchRange bool
|
||||
Start, End int64
|
||||
Encryption encrypt.ServerSide
|
||||
}
|
||||
|
||||
// Marshal converts all the CopySrcOptions into their
|
||||
// equivalent HTTP header representation
|
||||
func (opts CopySrcOptions) Marshal(header http.Header) {
|
||||
// Set the source header
|
||||
header.Set("x-amz-copy-source", s3utils.EncodePath(opts.Bucket+"/"+opts.Object))
|
||||
if opts.VersionID != "" {
|
||||
header.Set("x-amz-copy-source", s3utils.EncodePath(opts.Bucket+"/"+opts.Object)+"?versionId="+opts.VersionID)
|
||||
}
|
||||
|
||||
if opts.MatchETag != "" {
|
||||
header.Set("x-amz-copy-source-if-match", opts.MatchETag)
|
||||
}
|
||||
if opts.NoMatchETag != "" {
|
||||
header.Set("x-amz-copy-source-if-none-match", opts.NoMatchETag)
|
||||
}
|
||||
|
||||
if !opts.MatchModifiedSince.IsZero() {
|
||||
header.Set("x-amz-copy-source-if-modified-since", opts.MatchModifiedSince.Format(http.TimeFormat))
|
||||
}
|
||||
if !opts.MatchUnmodifiedSince.IsZero() {
|
||||
header.Set("x-amz-copy-source-if-unmodified-since", opts.MatchUnmodifiedSince.Format(http.TimeFormat))
|
||||
}
|
||||
|
||||
if opts.Encryption != nil {
|
||||
encrypt.SSECopy(opts.Encryption).Marshal(header)
|
||||
}
|
||||
}
|
||||
|
||||
func (opts CopySrcOptions) validate() (err error) {
|
||||
// Input validation.
|
||||
if err = s3utils.CheckValidBucketName(opts.Bucket); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = s3utils.CheckValidObjectName(opts.Object); err != nil {
|
||||
return err
|
||||
}
|
||||
if opts.Start > opts.End || opts.Start < 0 {
|
||||
return errInvalidArgument("start must be non-negative, and start must be at most end.")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Low level implementation of CopyObject API, supports only upto 5GiB worth of copy.
|
||||
func (c Client) copyObjectDo(ctx context.Context, srcBucket, srcObject, destBucket, destObject string,
|
||||
metadata map[string]string, dstOpts PutObjectOptions) (ObjectInfo, error) {
|
||||
|
||||
// Build headers.
|
||||
headers := make(http.Header)
|
||||
|
||||
// Set all the metadata headers.
|
||||
for k, v := range metadata {
|
||||
headers.Set(k, v)
|
||||
}
|
||||
if !dstOpts.Internal.ReplicationStatus.Empty() {
|
||||
headers.Set(amzBucketReplicationStatus, string(dstOpts.Internal.ReplicationStatus))
|
||||
}
|
||||
if !dstOpts.Internal.SourceMTime.IsZero() {
|
||||
headers.Set(minIOBucketSourceMTime, dstOpts.Internal.SourceMTime.Format(time.RFC3339Nano))
|
||||
}
|
||||
if dstOpts.Internal.SourceETag != "" {
|
||||
headers.Set(minIOBucketSourceETag, dstOpts.Internal.SourceETag)
|
||||
}
|
||||
if len(dstOpts.UserTags) != 0 {
|
||||
headers.Set(amzTaggingHeader, s3utils.TagEncode(dstOpts.UserTags))
|
||||
}
|
||||
|
||||
reqMetadata := requestMetadata{
|
||||
bucketName: destBucket,
|
||||
objectName: destObject,
|
||||
customHeader: headers,
|
||||
}
|
||||
if dstOpts.Internal.SourceVersionID != "" {
|
||||
if _, err := uuid.Parse(dstOpts.Internal.SourceVersionID); err != nil {
|
||||
return ObjectInfo{}, errInvalidArgument(err.Error())
|
||||
}
|
||||
urlValues := make(url.Values)
|
||||
urlValues.Set("versionId", dstOpts.Internal.SourceVersionID)
|
||||
reqMetadata.queryValues = urlValues
|
||||
}
|
||||
|
||||
// Set the source header
|
||||
headers.Set("x-amz-copy-source", s3utils.EncodePath(srcBucket+"/"+srcObject))
|
||||
|
||||
// Send upload-part-copy request
|
||||
resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata)
|
||||
defer closeResponse(resp)
|
||||
if err != nil {
|
||||
return ObjectInfo{}, err
|
||||
}
|
||||
|
||||
// Check if we got an error response.
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return ObjectInfo{}, httpRespToErrorResponse(resp, srcBucket, srcObject)
|
||||
}
|
||||
|
||||
cpObjRes := copyObjectResult{}
|
||||
err = xmlDecoder(resp.Body, &cpObjRes)
|
||||
if err != nil {
|
||||
return ObjectInfo{}, err
|
||||
}
|
||||
|
||||
objInfo := ObjectInfo{
|
||||
Key: destObject,
|
||||
ETag: strings.Trim(cpObjRes.ETag, "\""),
|
||||
LastModified: cpObjRes.LastModified,
|
||||
}
|
||||
return objInfo, nil
|
||||
}
|
||||
|
||||
func (c Client) copyObjectPartDo(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, uploadID string,
|
||||
partID int, startOffset int64, length int64, metadata map[string]string) (p CompletePart, err error) {
|
||||
|
||||
headers := make(http.Header)
|
||||
|
||||
// Set source
|
||||
headers.Set("x-amz-copy-source", s3utils.EncodePath(srcBucket+"/"+srcObject))
|
||||
|
||||
if startOffset < 0 {
|
||||
return p, errInvalidArgument("startOffset must be non-negative")
|
||||
}
|
||||
|
||||
if length >= 0 {
|
||||
headers.Set("x-amz-copy-source-range", fmt.Sprintf("bytes=%d-%d", startOffset, startOffset+length-1))
|
||||
}
|
||||
|
||||
for k, v := range metadata {
|
||||
headers.Set(k, v)
|
||||
}
|
||||
|
||||
queryValues := make(url.Values)
|
||||
queryValues.Set("partNumber", strconv.Itoa(partID))
|
||||
queryValues.Set("uploadId", uploadID)
|
||||
|
||||
resp, err := c.executeMethod(ctx, http.MethodPut, requestMetadata{
|
||||
bucketName: destBucket,
|
||||
objectName: destObject,
|
||||
customHeader: headers,
|
||||
queryValues: queryValues,
|
||||
})
|
||||
defer closeResponse(resp)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Check if we got an error response.
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return p, httpRespToErrorResponse(resp, destBucket, destObject)
|
||||
}
|
||||
|
||||
// Decode copy-part response on success.
|
||||
cpObjRes := copyObjectResult{}
|
||||
err = xmlDecoder(resp.Body, &cpObjRes)
|
||||
if err != nil {
|
||||
return p, err
|
||||
}
|
||||
p.PartNumber, p.ETag = partID, cpObjRes.ETag
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// uploadPartCopy - helper function to create a part in a multipart
|
||||
// upload via an upload-part-copy request
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadUploadPartCopy.html
|
||||
func (c Client) uploadPartCopy(ctx context.Context, bucket, object, uploadID string, partNumber int,
|
||||
headers http.Header) (p CompletePart, err error) {
|
||||
|
||||
// Build query parameters
|
||||
urlValues := make(url.Values)
|
||||
urlValues.Set("partNumber", strconv.Itoa(partNumber))
|
||||
urlValues.Set("uploadId", uploadID)
|
||||
|
||||
// Send upload-part-copy request
|
||||
resp, err := c.executeMethod(ctx, http.MethodPut, requestMetadata{
|
||||
bucketName: bucket,
|
||||
objectName: object,
|
||||
customHeader: headers,
|
||||
queryValues: urlValues,
|
||||
})
|
||||
defer closeResponse(resp)
|
||||
if err != nil {
|
||||
return p, err
|
||||
}
|
||||
|
||||
// Check if we got an error response.
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return p, httpRespToErrorResponse(resp, bucket, object)
|
||||
}
|
||||
|
||||
// Decode copy-part response on success.
|
||||
cpObjRes := copyObjectResult{}
|
||||
err = xmlDecoder(resp.Body, &cpObjRes)
|
||||
if err != nil {
|
||||
return p, err
|
||||
}
|
||||
p.PartNumber, p.ETag = partNumber, cpObjRes.ETag
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// ComposeObject - creates an object using server-side copying
|
||||
// of existing objects. It takes a list of source objects (with optional offsets)
|
||||
// and concatenates them into a new object using only server-side copying
|
||||
// operations. Optionally takes progress reader hook for applications to
|
||||
// look at current progress.
|
||||
func (c Client) ComposeObject(ctx context.Context, dst CopyDestOptions, srcs ...CopySrcOptions) (UploadInfo, error) {
|
||||
if len(srcs) < 1 || len(srcs) > maxPartsCount {
|
||||
return UploadInfo{}, errInvalidArgument("There must be as least one and up to 10000 source objects.")
|
||||
}
|
||||
|
||||
for _, src := range srcs {
|
||||
if err := src.validate(); err != nil {
|
||||
return UploadInfo{}, err
|
||||
}
|
||||
}
|
||||
|
||||
if err := dst.validate(); err != nil {
|
||||
return UploadInfo{}, err
|
||||
}
|
||||
|
||||
srcObjectInfos := make([]ObjectInfo, len(srcs))
|
||||
srcObjectSizes := make([]int64, len(srcs))
|
||||
var totalSize, totalParts int64
|
||||
var err error
|
||||
for i, src := range srcs {
|
||||
opts := StatObjectOptions{ServerSideEncryption: encrypt.SSE(src.Encryption), VersionID: src.VersionID}
|
||||
srcObjectInfos[i], err = c.statObject(context.Background(), src.Bucket, src.Object, opts)
|
||||
if err != nil {
|
||||
return UploadInfo{}, err
|
||||
}
|
||||
|
||||
srcCopySize := srcObjectInfos[i].Size
|
||||
// Check if a segment is specified, and if so, is the
|
||||
// segment within object bounds?
|
||||
if src.MatchRange {
|
||||
// Since range is specified,
|
||||
// 0 <= src.start <= src.end
|
||||
// so only invalid case to check is:
|
||||
if src.End >= srcCopySize || src.Start < 0 {
|
||||
return UploadInfo{}, errInvalidArgument(
|
||||
fmt.Sprintf("CopySrcOptions %d has invalid segment-to-copy [%d, %d] (size is %d)",
|
||||
i, src.Start, src.End, srcCopySize))
|
||||
}
|
||||
srcCopySize = src.End - src.Start + 1
|
||||
}
|
||||
|
||||
// Only the last source may be less than `absMinPartSize`
|
||||
if srcCopySize < absMinPartSize && i < len(srcs)-1 {
|
||||
return UploadInfo{}, errInvalidArgument(
|
||||
fmt.Sprintf("CopySrcOptions %d is too small (%d) and it is not the last part", i, srcCopySize))
|
||||
}
|
||||
|
||||
// Is data to copy too large?
|
||||
totalSize += srcCopySize
|
||||
if totalSize > maxMultipartPutObjectSize {
|
||||
return UploadInfo{}, errInvalidArgument(fmt.Sprintf("Cannot compose an object of size %d (> 5TiB)", totalSize))
|
||||
}
|
||||
|
||||
// record source size
|
||||
srcObjectSizes[i] = srcCopySize
|
||||
|
||||
// calculate parts needed for current source
|
||||
totalParts += partsRequired(srcCopySize)
|
||||
// Do we need more parts than we are allowed?
|
||||
if totalParts > maxPartsCount {
|
||||
return UploadInfo{}, errInvalidArgument(fmt.Sprintf(
|
||||
"Your proposed compose object requires more than %d parts", maxPartsCount))
|
||||
}
|
||||
}
|
||||
|
||||
// Single source object case (i.e. when only one source is
|
||||
// involved, it is being copied wholly and at most 5GiB in
|
||||
// size, emptyfiles are also supported).
|
||||
if (totalParts == 1 && srcs[0].Start == -1 && totalSize <= maxPartSize) || (totalSize == 0) {
|
||||
return c.CopyObject(ctx, dst, srcs[0])
|
||||
}
|
||||
|
||||
// Now, handle multipart-copy cases.
|
||||
|
||||
// 1. Ensure that the object has not been changed while
|
||||
// we are copying data.
|
||||
for i, src := range srcs {
|
||||
src.MatchETag = srcObjectInfos[i].ETag
|
||||
}
|
||||
|
||||
// 2. Initiate a new multipart upload.
|
||||
|
||||
// Set user-metadata on the destination object. If no
|
||||
// user-metadata is specified, and there is only one source,
|
||||
// (only) then metadata from source is copied.
|
||||
var userMeta map[string]string
|
||||
if dst.ReplaceMetadata {
|
||||
userMeta = dst.UserMetadata
|
||||
} else {
|
||||
userMeta = srcObjectInfos[0].UserMetadata
|
||||
}
|
||||
|
||||
var userTags map[string]string
|
||||
if dst.ReplaceTags {
|
||||
userTags = dst.UserTags
|
||||
} else {
|
||||
userTags = srcObjectInfos[0].UserTags
|
||||
}
|
||||
|
||||
uploadID, err := c.newUploadID(ctx, dst.Bucket, dst.Object, PutObjectOptions{
|
||||
ServerSideEncryption: dst.Encryption,
|
||||
UserMetadata: userMeta,
|
||||
UserTags: userTags,
|
||||
Mode: dst.Mode,
|
||||
RetainUntilDate: dst.RetainUntilDate,
|
||||
LegalHold: dst.LegalHold,
|
||||
})
|
||||
if err != nil {
|
||||
return UploadInfo{}, err
|
||||
}
|
||||
|
||||
// 3. Perform copy part uploads
|
||||
objParts := []CompletePart{}
|
||||
partIndex := 1
|
||||
for i, src := range srcs {
|
||||
var h = make(http.Header)
|
||||
src.Marshal(h)
|
||||
if dst.Encryption != nil && dst.Encryption.Type() == encrypt.SSEC {
|
||||
dst.Encryption.Marshal(h)
|
||||
}
|
||||
|
||||
// calculate start/end indices of parts after
|
||||
// splitting.
|
||||
startIdx, endIdx := calculateEvenSplits(srcObjectSizes[i], src)
|
||||
for j, start := range startIdx {
|
||||
end := endIdx[j]
|
||||
|
||||
// Add (or reset) source range header for
|
||||
// upload part copy request.
|
||||
h.Set("x-amz-copy-source-range",
|
||||
fmt.Sprintf("bytes=%d-%d", start, end))
|
||||
|
||||
// make upload-part-copy request
|
||||
complPart, err := c.uploadPartCopy(ctx, dst.Bucket,
|
||||
dst.Object, uploadID, partIndex, h)
|
||||
if err != nil {
|
||||
return UploadInfo{}, err
|
||||
}
|
||||
if dst.Progress != nil {
|
||||
io.CopyN(ioutil.Discard, dst.Progress, end-start+1)
|
||||
}
|
||||
objParts = append(objParts, complPart)
|
||||
partIndex++
|
||||
}
|
||||
}
|
||||
|
||||
// 4. Make final complete-multipart request.
|
||||
uploadInfo, err := c.completeMultipartUpload(ctx, dst.Bucket, dst.Object, uploadID,
|
||||
completeMultipartUpload{Parts: objParts})
|
||||
if err != nil {
|
||||
return UploadInfo{}, err
|
||||
}
|
||||
|
||||
uploadInfo.Size = totalSize
|
||||
return uploadInfo, nil
|
||||
}
|
||||
|
||||
// partsRequired is maximum parts possible with
|
||||
// max part size of ceiling(maxMultipartPutObjectSize / (maxPartsCount - 1))
|
||||
func partsRequired(size int64) int64 {
|
||||
maxPartSize := maxMultipartPutObjectSize / (maxPartsCount - 1)
|
||||
r := size / int64(maxPartSize)
|
||||
if size%int64(maxPartSize) > 0 {
|
||||
r++
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
// calculateEvenSplits - computes splits for a source and returns
|
||||
// start and end index slices. Splits happen evenly to be sure that no
|
||||
// part is less than 5MiB, as that could fail the multipart request if
|
||||
// it is not the last part.
|
||||
func calculateEvenSplits(size int64, src CopySrcOptions) (startIndex, endIndex []int64) {
|
||||
if size == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
reqParts := partsRequired(size)
|
||||
startIndex = make([]int64, reqParts)
|
||||
endIndex = make([]int64, reqParts)
|
||||
// Compute number of required parts `k`, as:
|
||||
//
|
||||
// k = ceiling(size / copyPartSize)
|
||||
//
|
||||
// Now, distribute the `size` bytes in the source into
|
||||
// k parts as evenly as possible:
|
||||
//
|
||||
// r parts sized (q+1) bytes, and
|
||||
// (k - r) parts sized q bytes, where
|
||||
//
|
||||
// size = q * k + r (by simple division of size by k,
|
||||
// so that 0 <= r < k)
|
||||
//
|
||||
start := src.Start
|
||||
if start == -1 {
|
||||
start = 0
|
||||
}
|
||||
quot, rem := size/reqParts, size%reqParts
|
||||
nextStart := start
|
||||
for j := int64(0); j < reqParts; j++ {
|
||||
curPartSize := quot
|
||||
if j < rem {
|
||||
curPartSize++
|
||||
}
|
||||
|
||||
cStart := nextStart
|
||||
cEnd := cStart + curPartSize - 1
|
||||
nextStart = cEnd + 1
|
||||
|
||||
startIndex[j], endIndex[j] = cStart, cEnd
|
||||
}
|
||||
return
|
||||
}
|
173
vendor/github.com/minio/minio-go/v7/api-datatypes.go
generated
vendored
Normal file
173
vendor/github.com/minio/minio-go/v7/api-datatypes.go
generated
vendored
Normal file
@ -0,0 +1,173 @@
|
||||
/*
|
||||
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
|
||||
* Copyright 2015-2020 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package minio
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"io"
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
|
||||
// BucketInfo container for bucket metadata.
|
||||
type BucketInfo struct {
|
||||
// The name of the bucket.
|
||||
Name string `json:"name"`
|
||||
// Date the bucket was created.
|
||||
CreationDate time.Time `json:"creationDate"`
|
||||
}
|
||||
|
||||
// StringMap represents map with custom UnmarshalXML
|
||||
type StringMap map[string]string
|
||||
|
||||
// UnmarshalXML unmarshals the XML into a map of string to strings,
|
||||
// creating a key in the map for each tag and setting it's value to the
|
||||
// tags contents.
|
||||
//
|
||||
// The fact this function is on the pointer of Map is important, so that
|
||||
// if m is nil it can be initialized, which is often the case if m is
|
||||
// nested in another xml structural. This is also why the first thing done
|
||||
// on the first line is initialize it.
|
||||
func (m *StringMap) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
|
||||
*m = StringMap{}
|
||||
type xmlMapEntry struct {
|
||||
XMLName xml.Name
|
||||
Value string `xml:",chardata"`
|
||||
}
|
||||
for {
|
||||
var e xmlMapEntry
|
||||
err := d.Decode(&e)
|
||||
if err == io.EOF {
|
||||
break
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
(*m)[e.XMLName.Local] = e.Value
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Owner name.
|
||||
type Owner struct {
|
||||
DisplayName string `json:"name"`
|
||||
ID string `json:"id"`
|
||||
}
|
||||
|
||||
// UploadInfo contains information about the
|
||||
// newly uploaded or copied object.
|
||||
type UploadInfo struct {
|
||||
Bucket string
|
||||
Key string
|
||||
ETag string
|
||||
Size int64
|
||||
LastModified time.Time
|
||||
Location string
|
||||
VersionID string
|
||||
|
||||
// Lifecycle expiry-date and ruleID associated with the expiry
|
||||
// not to be confused with `Expires` HTTP header.
|
||||
Expiration time.Time
|
||||
ExpirationRuleID string
|
||||
}
|
||||
|
||||
// ObjectInfo container for object metadata.
|
||||
type ObjectInfo struct {
|
||||
// An ETag is optionally set to md5sum of an object. In case of multipart objects,
|
||||
// ETag is of the form MD5SUM-N where MD5SUM is md5sum of all individual md5sums of
|
||||
// each parts concatenated into one string.
|
||||
ETag string `json:"etag"`
|
||||
|
||||
Key string `json:"name"` // Name of the object
|
||||
LastModified time.Time `json:"lastModified"` // Date and time the object was last modified.
|
||||
Size int64 `json:"size"` // Size in bytes of the object.
|
||||
ContentType string `json:"contentType"` // A standard MIME type describing the format of the object data.
|
||||
Expires time.Time `json:"expires"` // The date and time at which the object is no longer able to be cached.
|
||||
|
||||
// Collection of additional metadata on the object.
|
||||
// eg: x-amz-meta-*, content-encoding etc.
|
||||
Metadata http.Header `json:"metadata" xml:"-"`
|
||||
|
||||
// x-amz-meta-* headers stripped "x-amz-meta-" prefix containing the first value.
|
||||
UserMetadata StringMap `json:"userMetadata"`
|
||||
|
||||
// x-amz-tagging values in their k/v values.
|
||||
UserTags map[string]string `json:"userTags"`
|
||||
|
||||
// x-amz-tagging-count value
|
||||
UserTagCount int
|
||||
|
||||
// Owner name.
|
||||
Owner Owner
|
||||
|
||||
// ACL grant.
|
||||
Grant []struct {
|
||||
Grantee struct {
|
||||
ID string `xml:"ID"`
|
||||
DisplayName string `xml:"DisplayName"`
|
||||
URI string `xml:"URI"`
|
||||
} `xml:"Grantee"`
|
||||
Permission string `xml:"Permission"`
|
||||
} `xml:"Grant"`
|
||||
|
||||
// The class of storage used to store the object.
|
||||
StorageClass string `json:"storageClass"`
|
||||
|
||||
// Versioning related information
|
||||
IsLatest bool
|
||||
IsDeleteMarker bool
|
||||
VersionID string `xml:"VersionId"`
|
||||
|
||||
// x-amz-replication-status value is either in one of the following states
|
||||
// - COMPLETE
|
||||
// - PENDING
|
||||
// - FAILED
|
||||
// - REPLICA (on the destination)
|
||||
ReplicationStatus string `xml:"ReplicationStatus"`
|
||||
|
||||
// Lifecycle expiry-date and ruleID associated with the expiry
|
||||
// not to be confused with `Expires` HTTP header.
|
||||
Expiration time.Time
|
||||
ExpirationRuleID string
|
||||
|
||||
// Error
|
||||
Err error `json:"-"`
|
||||
}
|
||||
|
||||
// ObjectMultipartInfo container for multipart object metadata.
|
||||
type ObjectMultipartInfo struct {
|
||||
// Date and time at which the multipart upload was initiated.
|
||||
Initiated time.Time `type:"timestamp" timestampFormat:"iso8601"`
|
||||
|
||||
Initiator initiator
|
||||
Owner owner
|
||||
|
||||
// The type of storage to use for the object. Defaults to 'STANDARD'.
|
||||
StorageClass string
|
||||
|
||||
// Key of the object for which the multipart upload was initiated.
|
||||
Key string
|
||||
|
||||
// Size in bytes of the object.
|
||||
Size int64
|
||||
|
||||
// Upload ID that identifies the multipart upload.
|
||||
UploadID string `xml:"UploadId"`
|
||||
|
||||
// Error
|
||||
Err error
|
||||
}
|
271
vendor/github.com/minio/minio-go/v7/api-error-response.go
generated
vendored
Normal file
271
vendor/github.com/minio/minio-go/v7/api-error-response.go
generated
vendored
Normal file
@ -0,0 +1,271 @@
|
||||
/*
|
||||
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
|
||||
* Copyright 2015-2020 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package minio
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
/* **** SAMPLE ERROR RESPONSE ****
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<Error>
|
||||
<Code>AccessDenied</Code>
|
||||
<Message>Access Denied</Message>
|
||||
<BucketName>bucketName</BucketName>
|
||||
<Key>objectName</Key>
|
||||
<RequestId>F19772218238A85A</RequestId>
|
||||
<HostId>GuWkjyviSiGHizehqpmsD1ndz5NClSP19DOT+s2mv7gXGQ8/X1lhbDGiIJEXpGFD</HostId>
|
||||
</Error>
|
||||
*/
|
||||
|
||||
// ErrorResponse - Is the typed error returned by all API operations.
|
||||
// ErrorResponse struct should be comparable since it is compared inside
|
||||
// golang http API (https://github.com/golang/go/issues/29768)
|
||||
type ErrorResponse struct {
|
||||
XMLName xml.Name `xml:"Error" json:"-"`
|
||||
Code string
|
||||
Message string
|
||||
BucketName string
|
||||
Key string
|
||||
RequestID string `xml:"RequestId"`
|
||||
HostID string `xml:"HostId"`
|
||||
|
||||
// Region where the bucket is located. This header is returned
|
||||
// only in HEAD bucket and ListObjects response.
|
||||
Region string
|
||||
|
||||
// Captures the server string returned in response header.
|
||||
Server string
|
||||
|
||||
// Underlying HTTP status code for the returned error
|
||||
StatusCode int `xml:"-" json:"-"`
|
||||
}
|
||||
|
||||
// ToErrorResponse - Returns parsed ErrorResponse struct from body and
|
||||
// http headers.
|
||||
//
|
||||
// For example:
|
||||
//
|
||||
// import s3 "github.com/minio/minio-go/v7"
|
||||
// ...
|
||||
// ...
|
||||
// reader, stat, err := s3.GetObject(...)
|
||||
// if err != nil {
|
||||
// resp := s3.ToErrorResponse(err)
|
||||
// }
|
||||
// ...
|
||||
func ToErrorResponse(err error) ErrorResponse {
|
||||
switch err := err.(type) {
|
||||
case ErrorResponse:
|
||||
return err
|
||||
default:
|
||||
return ErrorResponse{}
|
||||
}
|
||||
}
|
||||
|
||||
// Error - Returns S3 error string.
|
||||
func (e ErrorResponse) Error() string {
|
||||
if e.Message == "" {
|
||||
msg, ok := s3ErrorResponseMap[e.Code]
|
||||
if !ok {
|
||||
msg = fmt.Sprintf("Error response code %s.", e.Code)
|
||||
}
|
||||
return msg
|
||||
}
|
||||
return e.Message
|
||||
}
|
||||
|
||||
// Common string for errors to report issue location in unexpected
|
||||
// cases.
|
||||
const (
|
||||
reportIssue = "Please report this issue at https://github.com/minio/minio-go/issues."
|
||||
)
|
||||
|
||||
// httpRespToErrorResponse returns a new encoded ErrorResponse
|
||||
// structure as error.
|
||||
func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string) error {
|
||||
if resp == nil {
|
||||
msg := "Empty http response. " + reportIssue
|
||||
return errInvalidArgument(msg)
|
||||
}
|
||||
|
||||
errResp := ErrorResponse{
|
||||
StatusCode: resp.StatusCode,
|
||||
Server: resp.Header.Get("Server"),
|
||||
}
|
||||
|
||||
err := xmlDecoder(resp.Body, &errResp)
|
||||
// Xml decoding failed with no body, fall back to HTTP headers.
|
||||
if err != nil {
|
||||
switch resp.StatusCode {
|
||||
case http.StatusNotFound:
|
||||
if objectName == "" {
|
||||
errResp = ErrorResponse{
|
||||
StatusCode: resp.StatusCode,
|
||||
Code: "NoSuchBucket",
|
||||
Message: "The specified bucket does not exist.",
|
||||
BucketName: bucketName,
|
||||
}
|
||||
} else {
|
||||
errResp = ErrorResponse{
|
||||
StatusCode: resp.StatusCode,
|
||||
Code: "NoSuchKey",
|
||||
Message: "The specified key does not exist.",
|
||||
BucketName: bucketName,
|
||||
Key: objectName,
|
||||
}
|
||||
}
|
||||
case http.StatusForbidden:
|
||||
errResp = ErrorResponse{
|
||||
StatusCode: resp.StatusCode,
|
||||
Code: "AccessDenied",
|
||||
Message: "Access Denied.",
|
||||
BucketName: bucketName,
|
||||
Key: objectName,
|
||||
}
|
||||
case http.StatusConflict:
|
||||
errResp = ErrorResponse{
|
||||
StatusCode: resp.StatusCode,
|
||||
Code: "Conflict",
|
||||
Message: "Bucket not empty.",
|
||||
BucketName: bucketName,
|
||||
}
|
||||
case http.StatusPreconditionFailed:
|
||||
errResp = ErrorResponse{
|
||||
StatusCode: resp.StatusCode,
|
||||
Code: "PreconditionFailed",
|
||||
Message: s3ErrorResponseMap["PreconditionFailed"],
|
||||
BucketName: bucketName,
|
||||
Key: objectName,
|
||||
}
|
||||
default:
|
||||
errResp = ErrorResponse{
|
||||
StatusCode: resp.StatusCode,
|
||||
Code: resp.Status,
|
||||
Message: resp.Status,
|
||||
BucketName: bucketName,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Save hostID, requestID and region information
|
||||
// from headers if not available through error XML.
|
||||
if errResp.RequestID == "" {
|
||||
errResp.RequestID = resp.Header.Get("x-amz-request-id")
|
||||
}
|
||||
if errResp.HostID == "" {
|
||||
errResp.HostID = resp.Header.Get("x-amz-id-2")
|
||||
}
|
||||
if errResp.Region == "" {
|
||||
errResp.Region = resp.Header.Get("x-amz-bucket-region")
|
||||
}
|
||||
if errResp.Code == "InvalidRegion" && errResp.Region != "" {
|
||||
errResp.Message = fmt.Sprintf("Region does not match, expecting region ‘%s’.", errResp.Region)
|
||||
}
|
||||
|
||||
return errResp
|
||||
}
|
||||
|
||||
// errTransferAccelerationBucket - bucket name is invalid to be used with transfer acceleration.
|
||||
func errTransferAccelerationBucket(bucketName string) error {
|
||||
return ErrorResponse{
|
||||
StatusCode: http.StatusBadRequest,
|
||||
Code: "InvalidArgument",
|
||||
Message: "The name of the bucket used for Transfer Acceleration must be DNS-compliant and must not contain periods ‘.’.",
|
||||
BucketName: bucketName,
|
||||
}
|
||||
}
|
||||
|
||||
// errEntityTooLarge - Input size is larger than supported maximum.
|
||||
func errEntityTooLarge(totalSize, maxObjectSize int64, bucketName, objectName string) error {
|
||||
msg := fmt.Sprintf("Your proposed upload size ‘%d’ exceeds the maximum allowed object size ‘%d’ for single PUT operation.", totalSize, maxObjectSize)
|
||||
return ErrorResponse{
|
||||
StatusCode: http.StatusBadRequest,
|
||||
Code: "EntityTooLarge",
|
||||
Message: msg,
|
||||
BucketName: bucketName,
|
||||
Key: objectName,
|
||||
}
|
||||
}
|
||||
|
||||
// errEntityTooSmall - Input size is smaller than supported minimum.
|
||||
func errEntityTooSmall(totalSize int64, bucketName, objectName string) error {
|
||||
msg := fmt.Sprintf("Your proposed upload size ‘%d’ is below the minimum allowed object size ‘0B’ for single PUT operation.", totalSize)
|
||||
return ErrorResponse{
|
||||
StatusCode: http.StatusBadRequest,
|
||||
Code: "EntityTooSmall",
|
||||
Message: msg,
|
||||
BucketName: bucketName,
|
||||
Key: objectName,
|
||||
}
|
||||
}
|
||||
|
||||
// errUnexpectedEOF - Unexpected end of file reached.
|
||||
func errUnexpectedEOF(totalRead, totalSize int64, bucketName, objectName string) error {
|
||||
msg := fmt.Sprintf("Data read ‘%d’ is not equal to the size ‘%d’ of the input Reader.", totalRead, totalSize)
|
||||
return ErrorResponse{
|
||||
StatusCode: http.StatusBadRequest,
|
||||
Code: "UnexpectedEOF",
|
||||
Message: msg,
|
||||
BucketName: bucketName,
|
||||
Key: objectName,
|
||||
}
|
||||
}
|
||||
|
||||
// errInvalidBucketName - Invalid bucket name response.
|
||||
func errInvalidBucketName(message string) error {
|
||||
return ErrorResponse{
|
||||
StatusCode: http.StatusBadRequest,
|
||||
Code: "InvalidBucketName",
|
||||
Message: message,
|
||||
RequestID: "minio",
|
||||
}
|
||||
}
|
||||
|
||||
// errInvalidObjectName - Invalid object name response.
|
||||
func errInvalidObjectName(message string) error {
|
||||
return ErrorResponse{
|
||||
StatusCode: http.StatusNotFound,
|
||||
Code: "NoSuchKey",
|
||||
Message: message,
|
||||
RequestID: "minio",
|
||||
}
|
||||
}
|
||||
|
||||
// errInvalidArgument - Invalid argument response.
|
||||
func errInvalidArgument(message string) error {
|
||||
return ErrorResponse{
|
||||
StatusCode: http.StatusBadRequest,
|
||||
Code: "InvalidArgument",
|
||||
Message: message,
|
||||
RequestID: "minio",
|
||||
}
|
||||
}
|
||||
|
||||
// errAPINotSupported - API not supported response
|
||||
// The specified API call is not supported
|
||||
func errAPINotSupported(message string) error {
|
||||
return ErrorResponse{
|
||||
StatusCode: http.StatusNotImplemented,
|
||||
Code: "APINotSupported",
|
||||
Message: message,
|
||||
RequestID: "minio",
|
||||
}
|
||||
}
|
140
vendor/github.com/minio/minio-go/v7/api-get-object-acl.go
generated
vendored
Normal file
140
vendor/github.com/minio/minio-go/v7/api-get-object-acl.go
generated
vendored
Normal file
@ -0,0 +1,140 @@
|
||||
/*
|
||||
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
|
||||
* Copyright 2018 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package minio
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"net/url"
|
||||
)
|
||||
|
||||
type accessControlPolicy struct {
|
||||
Owner struct {
|
||||
ID string `xml:"ID"`
|
||||
DisplayName string `xml:"DisplayName"`
|
||||
} `xml:"Owner"`
|
||||
AccessControlList struct {
|
||||
Grant []struct {
|
||||
Grantee struct {
|
||||
ID string `xml:"ID"`
|
||||
DisplayName string `xml:"DisplayName"`
|
||||
URI string `xml:"URI"`
|
||||
} `xml:"Grantee"`
|
||||
Permission string `xml:"Permission"`
|
||||
} `xml:"Grant"`
|
||||
} `xml:"AccessControlList"`
|
||||
}
|
||||
|
||||
// GetObjectACL get object ACLs
|
||||
func (c Client) GetObjectACL(ctx context.Context, bucketName, objectName string) (*ObjectInfo, error) {
|
||||
resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{
|
||||
bucketName: bucketName,
|
||||
objectName: objectName,
|
||||
queryValues: url.Values{
|
||||
"acl": []string{""},
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer closeResponse(resp)
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return nil, httpRespToErrorResponse(resp, bucketName, objectName)
|
||||
}
|
||||
|
||||
res := &accessControlPolicy{}
|
||||
|
||||
if err := xmlDecoder(resp.Body, res); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
objInfo, err := c.statObject(ctx, bucketName, objectName, StatObjectOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
objInfo.Owner.DisplayName = res.Owner.DisplayName
|
||||
objInfo.Owner.ID = res.Owner.ID
|
||||
|
||||
objInfo.Grant = append(objInfo.Grant, res.AccessControlList.Grant...)
|
||||
|
||||
cannedACL := getCannedACL(res)
|
||||
if cannedACL != "" {
|
||||
objInfo.Metadata.Add("X-Amz-Acl", cannedACL)
|
||||
return &objInfo, nil
|
||||
}
|
||||
|
||||
grantACL := getAmzGrantACL(res)
|
||||
for k, v := range grantACL {
|
||||
objInfo.Metadata[k] = v
|
||||
}
|
||||
|
||||
return &objInfo, nil
|
||||
}
|
||||
|
||||
func getCannedACL(aCPolicy *accessControlPolicy) string {
|
||||
grants := aCPolicy.AccessControlList.Grant
|
||||
|
||||
switch {
|
||||
case len(grants) == 1:
|
||||
if grants[0].Grantee.URI == "" && grants[0].Permission == "FULL_CONTROL" {
|
||||
return "private"
|
||||
}
|
||||
case len(grants) == 2:
|
||||
for _, g := range grants {
|
||||
if g.Grantee.URI == "http://acs.amazonaws.com/groups/global/AuthenticatedUsers" && g.Permission == "READ" {
|
||||
return "authenticated-read"
|
||||
}
|
||||
if g.Grantee.URI == "http://acs.amazonaws.com/groups/global/AllUsers" && g.Permission == "READ" {
|
||||
return "public-read"
|
||||
}
|
||||
if g.Permission == "READ" && g.Grantee.ID == aCPolicy.Owner.ID {
|
||||
return "bucket-owner-read"
|
||||
}
|
||||
}
|
||||
case len(grants) == 3:
|
||||
for _, g := range grants {
|
||||
if g.Grantee.URI == "http://acs.amazonaws.com/groups/global/AllUsers" && g.Permission == "WRITE" {
|
||||
return "public-read-write"
|
||||
}
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func getAmzGrantACL(aCPolicy *accessControlPolicy) map[string][]string {
|
||||
grants := aCPolicy.AccessControlList.Grant
|
||||
res := map[string][]string{}
|
||||
|
||||
for _, g := range grants {
|
||||
switch {
|
||||
case g.Permission == "READ":
|
||||
res["X-Amz-Grant-Read"] = append(res["X-Amz-Grant-Read"], "id="+g.Grantee.ID)
|
||||
case g.Permission == "WRITE":
|
||||
res["X-Amz-Grant-Write"] = append(res["X-Amz-Grant-Write"], "id="+g.Grantee.ID)
|
||||
case g.Permission == "READ_ACP":
|
||||
res["X-Amz-Grant-Read-Acp"] = append(res["X-Amz-Grant-Read-Acp"], "id="+g.Grantee.ID)
|
||||
case g.Permission == "WRITE_ACP":
|
||||
res["X-Amz-Grant-Write-Acp"] = append(res["X-Amz-Grant-Write-Acp"], "id="+g.Grantee.ID)
|
||||
case g.Permission == "FULL_CONTROL":
|
||||
res["X-Amz-Grant-Full-Control"] = append(res["X-Amz-Grant-Full-Control"], "id="+g.Grantee.ID)
|
||||
}
|
||||
}
|
||||
return res
|
||||
}
|
127
vendor/github.com/minio/minio-go/v7/api-get-object-file.go
generated
vendored
Normal file
127
vendor/github.com/minio/minio-go/v7/api-get-object-file.go
generated
vendored
Normal file
@ -0,0 +1,127 @@
|
||||
/*
|
||||
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
|
||||
* Copyright 2015-2017 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package minio
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/minio/minio-go/v7/pkg/s3utils"
|
||||
)
|
||||
|
||||
// FGetObject - download contents of an object to a local file.
|
||||
// The options can be used to specify the GET request further.
|
||||
func (c Client) FGetObject(ctx context.Context, bucketName, objectName, filePath string, opts GetObjectOptions) error {
|
||||
// Input validation.
|
||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s3utils.CheckValidObjectName(objectName); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Verify if destination already exists.
|
||||
st, err := os.Stat(filePath)
|
||||
if err == nil {
|
||||
// If the destination exists and is a directory.
|
||||
if st.IsDir() {
|
||||
return errInvalidArgument("fileName is a directory.")
|
||||
}
|
||||
}
|
||||
|
||||
// Proceed if file does not exist. return for all other errors.
|
||||
if err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Extract top level directory.
|
||||
objectDir, _ := filepath.Split(filePath)
|
||||
if objectDir != "" {
|
||||
// Create any missing top level directories.
|
||||
if err := os.MkdirAll(objectDir, 0700); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Gather md5sum.
|
||||
objectStat, err := c.StatObject(ctx, bucketName, objectName, StatObjectOptions(opts))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Write to a temporary file "fileName.part.minio" before saving.
|
||||
filePartPath := filePath + objectStat.ETag + ".part.minio"
|
||||
|
||||
// If exists, open in append mode. If not create it as a part file.
|
||||
filePart, err := os.OpenFile(filePartPath, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0600)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// If we return early with an error, be sure to close and delete
|
||||
// filePart. If we have an error along the way there is a chance
|
||||
// that filePart is somehow damaged, and we should discard it.
|
||||
closeAndRemove := true
|
||||
defer func() {
|
||||
if closeAndRemove {
|
||||
_ = filePart.Close()
|
||||
_ = os.Remove(filePartPath)
|
||||
}
|
||||
}()
|
||||
|
||||
// Issue Stat to get the current offset.
|
||||
st, err = filePart.Stat()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Initialize get object request headers to set the
|
||||
// appropriate range offsets to read from.
|
||||
if st.Size() > 0 {
|
||||
opts.SetRange(st.Size(), 0)
|
||||
}
|
||||
|
||||
// Seek to current position for incoming reader.
|
||||
objectReader, objectStat, _, err := c.getObject(ctx, bucketName, objectName, opts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Write to the part file.
|
||||
if _, err = io.CopyN(filePart, objectReader, objectStat.Size); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Close the file before rename, this is specifically needed for Windows users.
|
||||
closeAndRemove = false
|
||||
if err = filePart.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Safely completed. Now commit by renaming to actual filename.
|
||||
if err = os.Rename(filePartPath, filePath); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Return.
|
||||
return nil
|
||||
}
|
646
vendor/github.com/minio/minio-go/v7/api-get-object.go
generated
vendored
Normal file
646
vendor/github.com/minio/minio-go/v7/api-get-object.go
generated
vendored
Normal file
@ -0,0 +1,646 @@
|
||||
/*
|
||||
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
|
||||
* Copyright 2015-2020 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package minio
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"sync"
|
||||
|
||||
"github.com/minio/minio-go/v7/pkg/s3utils"
|
||||
)
|
||||
|
||||
// GetObject wrapper function that accepts a request context
|
||||
func (c Client) GetObject(ctx context.Context, bucketName, objectName string, opts GetObjectOptions) (*Object, error) {
|
||||
// Input validation.
|
||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := s3utils.CheckValidObjectName(objectName); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Detect if snowball is server location we are talking to.
|
||||
var snowball bool
|
||||
if location, ok := c.bucketLocCache.Get(bucketName); ok {
|
||||
if location == "snowball" {
|
||||
snowball = true
|
||||
}
|
||||
}
|
||||
|
||||
var httpReader io.ReadCloser
|
||||
var objectInfo ObjectInfo
|
||||
var err error
|
||||
|
||||
// Create request channel.
|
||||
reqCh := make(chan getRequest)
|
||||
// Create response channel.
|
||||
resCh := make(chan getResponse)
|
||||
// Create done channel.
|
||||
doneCh := make(chan struct{})
|
||||
|
||||
// This routine feeds partial object data as and when the caller reads.
|
||||
go func() {
|
||||
defer close(reqCh)
|
||||
defer close(resCh)
|
||||
|
||||
// Used to verify if etag of object has changed since last read.
|
||||
var etag string
|
||||
|
||||
// Loop through the incoming control messages and read data.
|
||||
for {
|
||||
select {
|
||||
// When the done channel is closed exit our routine.
|
||||
case <-doneCh:
|
||||
// Close the http response body before returning.
|
||||
// This ends the connection with the server.
|
||||
if httpReader != nil {
|
||||
httpReader.Close()
|
||||
}
|
||||
return
|
||||
|
||||
// Gather incoming request.
|
||||
case req := <-reqCh:
|
||||
// If this is the first request we may not need to do a getObject request yet.
|
||||
if req.isFirstReq {
|
||||
// First request is a Read/ReadAt.
|
||||
if req.isReadOp {
|
||||
// Differentiate between wanting the whole object and just a range.
|
||||
if req.isReadAt {
|
||||
// If this is a ReadAt request only get the specified range.
|
||||
// Range is set with respect to the offset and length of the buffer requested.
|
||||
// Do not set objectInfo from the first readAt request because it will not get
|
||||
// the whole object.
|
||||
opts.SetRange(req.Offset, req.Offset+int64(len(req.Buffer))-1)
|
||||
} else if req.Offset > 0 {
|
||||
opts.SetRange(req.Offset, 0)
|
||||
}
|
||||
httpReader, objectInfo, _, err = c.getObject(ctx, bucketName, objectName, opts)
|
||||
if err != nil {
|
||||
resCh <- getResponse{Error: err}
|
||||
return
|
||||
}
|
||||
etag = objectInfo.ETag
|
||||
// Read at least firstReq.Buffer bytes, if not we have
|
||||
// reached our EOF.
|
||||
size, err := readFull(httpReader, req.Buffer)
|
||||
if size > 0 && err == io.ErrUnexpectedEOF {
|
||||
// If an EOF happens after reading some but not
|
||||
// all the bytes ReadFull returns ErrUnexpectedEOF
|
||||
err = io.EOF
|
||||
}
|
||||
// Send back the first response.
|
||||
resCh <- getResponse{
|
||||
objectInfo: objectInfo,
|
||||
Size: int(size),
|
||||
Error: err,
|
||||
didRead: true,
|
||||
}
|
||||
} else {
|
||||
// First request is a Stat or Seek call.
|
||||
// Only need to run a StatObject until an actual Read or ReadAt request comes through.
|
||||
|
||||
// Remove range header if already set, for stat Operations to get original file size.
|
||||
delete(opts.headers, "Range")
|
||||
objectInfo, err = c.statObject(ctx, bucketName, objectName, StatObjectOptions(opts))
|
||||
if err != nil {
|
||||
resCh <- getResponse{
|
||||
Error: err,
|
||||
}
|
||||
// Exit the go-routine.
|
||||
return
|
||||
}
|
||||
etag = objectInfo.ETag
|
||||
// Send back the first response.
|
||||
resCh <- getResponse{
|
||||
objectInfo: objectInfo,
|
||||
}
|
||||
}
|
||||
} else if req.settingObjectInfo { // Request is just to get objectInfo.
|
||||
// Remove range header if already set, for stat Operations to get original file size.
|
||||
delete(opts.headers, "Range")
|
||||
// Check whether this is snowball
|
||||
// if yes do not use If-Match feature
|
||||
// it doesn't work.
|
||||
if etag != "" && !snowball {
|
||||
opts.SetMatchETag(etag)
|
||||
}
|
||||
objectInfo, err := c.statObject(ctx, bucketName, objectName, StatObjectOptions(opts))
|
||||
if err != nil {
|
||||
resCh <- getResponse{
|
||||
Error: err,
|
||||
}
|
||||
// Exit the goroutine.
|
||||
return
|
||||
}
|
||||
// Send back the objectInfo.
|
||||
resCh <- getResponse{
|
||||
objectInfo: objectInfo,
|
||||
}
|
||||
} else {
|
||||
// Offset changes fetch the new object at an Offset.
|
||||
// Because the httpReader may not be set by the first
|
||||
// request if it was a stat or seek it must be checked
|
||||
// if the object has been read or not to only initialize
|
||||
// new ones when they haven't been already.
|
||||
// All readAt requests are new requests.
|
||||
if req.DidOffsetChange || !req.beenRead {
|
||||
// Check whether this is snowball
|
||||
// if yes do not use If-Match feature
|
||||
// it doesn't work.
|
||||
if etag != "" && !snowball {
|
||||
opts.SetMatchETag(etag)
|
||||
}
|
||||
if httpReader != nil {
|
||||
// Close previously opened http reader.
|
||||
httpReader.Close()
|
||||
}
|
||||
// If this request is a readAt only get the specified range.
|
||||
if req.isReadAt {
|
||||
// Range is set with respect to the offset and length of the buffer requested.
|
||||
opts.SetRange(req.Offset, req.Offset+int64(len(req.Buffer))-1)
|
||||
} else if req.Offset > 0 { // Range is set with respect to the offset.
|
||||
opts.SetRange(req.Offset, 0)
|
||||
}
|
||||
httpReader, objectInfo, _, err = c.getObject(ctx, bucketName, objectName, opts)
|
||||
if err != nil {
|
||||
resCh <- getResponse{
|
||||
Error: err,
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Read at least req.Buffer bytes, if not we have
|
||||
// reached our EOF.
|
||||
size, err := readFull(httpReader, req.Buffer)
|
||||
if size > 0 && err == io.ErrUnexpectedEOF {
|
||||
// If an EOF happens after reading some but not
|
||||
// all the bytes ReadFull returns ErrUnexpectedEOF
|
||||
err = io.EOF
|
||||
}
|
||||
// Reply back how much was read.
|
||||
resCh <- getResponse{
|
||||
Size: int(size),
|
||||
Error: err,
|
||||
didRead: true,
|
||||
objectInfo: objectInfo,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// Create a newObject through the information sent back by reqCh.
|
||||
return newObject(reqCh, resCh, doneCh), nil
|
||||
}
|
||||
|
||||
// get request message container to communicate with internal
|
||||
// go-routine.
|
||||
type getRequest struct {
|
||||
Buffer []byte
|
||||
Offset int64 // readAt offset.
|
||||
DidOffsetChange bool // Tracks the offset changes for Seek requests.
|
||||
beenRead bool // Determines if this is the first time an object is being read.
|
||||
isReadAt bool // Determines if this request is a request to a specific range
|
||||
isReadOp bool // Determines if this request is a Read or Read/At request.
|
||||
isFirstReq bool // Determines if this request is the first time an object is being accessed.
|
||||
settingObjectInfo bool // Determines if this request is to set the objectInfo of an object.
|
||||
}
|
||||
|
||||
// get response message container to reply back for the request.
|
||||
type getResponse struct {
|
||||
Size int
|
||||
Error error
|
||||
didRead bool // Lets subsequent calls know whether or not httpReader has been initiated.
|
||||
objectInfo ObjectInfo // Used for the first request.
|
||||
}
|
||||
|
||||
// Object represents an open object. It implements
|
||||
// Reader, ReaderAt, Seeker, Closer for a HTTP stream.
|
||||
type Object struct {
|
||||
// Mutex.
|
||||
mutex *sync.Mutex
|
||||
|
||||
// User allocated and defined.
|
||||
reqCh chan<- getRequest
|
||||
resCh <-chan getResponse
|
||||
doneCh chan<- struct{}
|
||||
currOffset int64
|
||||
objectInfo ObjectInfo
|
||||
|
||||
// Ask lower level to initiate data fetching based on currOffset
|
||||
seekData bool
|
||||
|
||||
// Keeps track of closed call.
|
||||
isClosed bool
|
||||
|
||||
// Keeps track of if this is the first call.
|
||||
isStarted bool
|
||||
|
||||
// Previous error saved for future calls.
|
||||
prevErr error
|
||||
|
||||
// Keeps track of if this object has been read yet.
|
||||
beenRead bool
|
||||
|
||||
// Keeps track of if objectInfo has been set yet.
|
||||
objectInfoSet bool
|
||||
}
|
||||
|
||||
// doGetRequest - sends and blocks on the firstReqCh and reqCh of an object.
|
||||
// Returns back the size of the buffer read, if anything was read, as well
|
||||
// as any error encountered. For all first requests sent on the object
|
||||
// it is also responsible for sending back the objectInfo.
|
||||
func (o *Object) doGetRequest(request getRequest) (getResponse, error) {
|
||||
o.reqCh <- request
|
||||
response := <-o.resCh
|
||||
|
||||
// Return any error to the top level.
|
||||
if response.Error != nil {
|
||||
return response, response.Error
|
||||
}
|
||||
|
||||
// This was the first request.
|
||||
if !o.isStarted {
|
||||
// The object has been operated on.
|
||||
o.isStarted = true
|
||||
}
|
||||
// Set the objectInfo if the request was not readAt
|
||||
// and it hasn't been set before.
|
||||
if !o.objectInfoSet && !request.isReadAt {
|
||||
o.objectInfo = response.objectInfo
|
||||
o.objectInfoSet = true
|
||||
}
|
||||
// Set beenRead only if it has not been set before.
|
||||
if !o.beenRead {
|
||||
o.beenRead = response.didRead
|
||||
}
|
||||
// Data are ready on the wire, no need to reinitiate connection in lower level
|
||||
o.seekData = false
|
||||
|
||||
return response, nil
|
||||
}
|
||||
|
||||
// setOffset - handles the setting of offsets for
|
||||
// Read/ReadAt/Seek requests.
|
||||
func (o *Object) setOffset(bytesRead int64) error {
|
||||
// Update the currentOffset.
|
||||
o.currOffset += bytesRead
|
||||
|
||||
if o.objectInfo.Size > -1 && o.currOffset >= o.objectInfo.Size {
|
||||
return io.EOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Read reads up to len(b) bytes into b. It returns the number of
|
||||
// bytes read (0 <= n <= len(b)) and any error encountered. Returns
|
||||
// io.EOF upon end of file.
|
||||
func (o *Object) Read(b []byte) (n int, err error) {
|
||||
if o == nil {
|
||||
return 0, errInvalidArgument("Object is nil")
|
||||
}
|
||||
|
||||
// Locking.
|
||||
o.mutex.Lock()
|
||||
defer o.mutex.Unlock()
|
||||
|
||||
// prevErr is previous error saved from previous operation.
|
||||
if o.prevErr != nil || o.isClosed {
|
||||
return 0, o.prevErr
|
||||
}
|
||||
|
||||
// Create a new request.
|
||||
readReq := getRequest{
|
||||
isReadOp: true,
|
||||
beenRead: o.beenRead,
|
||||
Buffer: b,
|
||||
}
|
||||
|
||||
// Alert that this is the first request.
|
||||
if !o.isStarted {
|
||||
readReq.isFirstReq = true
|
||||
}
|
||||
|
||||
// Ask to establish a new data fetch routine based on seekData flag
|
||||
readReq.DidOffsetChange = o.seekData
|
||||
readReq.Offset = o.currOffset
|
||||
|
||||
// Send and receive from the first request.
|
||||
response, err := o.doGetRequest(readReq)
|
||||
if err != nil && err != io.EOF {
|
||||
// Save the error for future calls.
|
||||
o.prevErr = err
|
||||
return response.Size, err
|
||||
}
|
||||
|
||||
// Bytes read.
|
||||
bytesRead := int64(response.Size)
|
||||
|
||||
// Set the new offset.
|
||||
oerr := o.setOffset(bytesRead)
|
||||
if oerr != nil {
|
||||
// Save the error for future calls.
|
||||
o.prevErr = oerr
|
||||
return response.Size, oerr
|
||||
}
|
||||
|
||||
// Return the response.
|
||||
return response.Size, err
|
||||
}
|
||||
|
||||
// Stat returns the ObjectInfo structure describing Object.
|
||||
func (o *Object) Stat() (ObjectInfo, error) {
|
||||
if o == nil {
|
||||
return ObjectInfo{}, errInvalidArgument("Object is nil")
|
||||
}
|
||||
// Locking.
|
||||
o.mutex.Lock()
|
||||
defer o.mutex.Unlock()
|
||||
|
||||
if o.prevErr != nil && o.prevErr != io.EOF || o.isClosed {
|
||||
return ObjectInfo{}, o.prevErr
|
||||
}
|
||||
|
||||
// This is the first request.
|
||||
if !o.isStarted || !o.objectInfoSet {
|
||||
// Send the request and get the response.
|
||||
_, err := o.doGetRequest(getRequest{
|
||||
isFirstReq: !o.isStarted,
|
||||
settingObjectInfo: !o.objectInfoSet,
|
||||
})
|
||||
if err != nil {
|
||||
o.prevErr = err
|
||||
return ObjectInfo{}, err
|
||||
}
|
||||
}
|
||||
|
||||
return o.objectInfo, nil
|
||||
}
|
||||
|
||||
// ReadAt reads len(b) bytes from the File starting at byte offset
|
||||
// off. It returns the number of bytes read and the error, if any.
|
||||
// ReadAt always returns a non-nil error when n < len(b). At end of
|
||||
// file, that error is io.EOF.
|
||||
func (o *Object) ReadAt(b []byte, offset int64) (n int, err error) {
|
||||
if o == nil {
|
||||
return 0, errInvalidArgument("Object is nil")
|
||||
}
|
||||
|
||||
// Locking.
|
||||
o.mutex.Lock()
|
||||
defer o.mutex.Unlock()
|
||||
|
||||
// prevErr is error which was saved in previous operation.
|
||||
if o.prevErr != nil && o.prevErr != io.EOF || o.isClosed {
|
||||
return 0, o.prevErr
|
||||
}
|
||||
|
||||
// Set the current offset to ReadAt offset, because the current offset will be shifted at the end of this method.
|
||||
o.currOffset = offset
|
||||
|
||||
// Can only compare offsets to size when size has been set.
|
||||
if o.objectInfoSet {
|
||||
// If offset is negative than we return io.EOF.
|
||||
// If offset is greater than or equal to object size we return io.EOF.
|
||||
if (o.objectInfo.Size > -1 && offset >= o.objectInfo.Size) || offset < 0 {
|
||||
return 0, io.EOF
|
||||
}
|
||||
}
|
||||
|
||||
// Create the new readAt request.
|
||||
readAtReq := getRequest{
|
||||
isReadOp: true,
|
||||
isReadAt: true,
|
||||
DidOffsetChange: true, // Offset always changes.
|
||||
beenRead: o.beenRead, // Set if this is the first request to try and read.
|
||||
Offset: offset, // Set the offset.
|
||||
Buffer: b,
|
||||
}
|
||||
|
||||
// Alert that this is the first request.
|
||||
if !o.isStarted {
|
||||
readAtReq.isFirstReq = true
|
||||
}
|
||||
|
||||
// Send and receive from the first request.
|
||||
response, err := o.doGetRequest(readAtReq)
|
||||
if err != nil && err != io.EOF {
|
||||
// Save the error.
|
||||
o.prevErr = err
|
||||
return response.Size, err
|
||||
}
|
||||
// Bytes read.
|
||||
bytesRead := int64(response.Size)
|
||||
// There is no valid objectInfo yet
|
||||
// to compare against for EOF.
|
||||
if !o.objectInfoSet {
|
||||
// Update the currentOffset.
|
||||
o.currOffset += bytesRead
|
||||
} else {
|
||||
// If this was not the first request update
|
||||
// the offsets and compare against objectInfo
|
||||
// for EOF.
|
||||
oerr := o.setOffset(bytesRead)
|
||||
if oerr != nil {
|
||||
o.prevErr = oerr
|
||||
return response.Size, oerr
|
||||
}
|
||||
}
|
||||
return response.Size, err
|
||||
}
|
||||
|
||||
// Seek sets the offset for the next Read or Write to offset,
|
||||
// interpreted according to whence: 0 means relative to the
|
||||
// origin of the file, 1 means relative to the current offset,
|
||||
// and 2 means relative to the end.
|
||||
// Seek returns the new offset and an error, if any.
|
||||
//
|
||||
// Seeking to a negative offset is an error. Seeking to any positive
|
||||
// offset is legal, subsequent io operations succeed until the
|
||||
// underlying object is not closed.
|
||||
func (o *Object) Seek(offset int64, whence int) (n int64, err error) {
|
||||
if o == nil {
|
||||
return 0, errInvalidArgument("Object is nil")
|
||||
}
|
||||
|
||||
// Locking.
|
||||
o.mutex.Lock()
|
||||
defer o.mutex.Unlock()
|
||||
|
||||
// At EOF seeking is legal allow only io.EOF, for any other errors we return.
|
||||
if o.prevErr != nil && o.prevErr != io.EOF {
|
||||
return 0, o.prevErr
|
||||
}
|
||||
|
||||
// Negative offset is valid for whence of '2'.
|
||||
if offset < 0 && whence != 2 {
|
||||
return 0, errInvalidArgument(fmt.Sprintf("Negative position not allowed for %d", whence))
|
||||
}
|
||||
|
||||
// This is the first request. So before anything else
|
||||
// get the ObjectInfo.
|
||||
if !o.isStarted || !o.objectInfoSet {
|
||||
// Create the new Seek request.
|
||||
seekReq := getRequest{
|
||||
isReadOp: false,
|
||||
Offset: offset,
|
||||
isFirstReq: true,
|
||||
}
|
||||
// Send and receive from the seek request.
|
||||
_, err := o.doGetRequest(seekReq)
|
||||
if err != nil {
|
||||
// Save the error.
|
||||
o.prevErr = err
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
||||
// Switch through whence.
|
||||
switch whence {
|
||||
default:
|
||||
return 0, errInvalidArgument(fmt.Sprintf("Invalid whence %d", whence))
|
||||
case 0:
|
||||
if o.objectInfo.Size > -1 && offset > o.objectInfo.Size {
|
||||
return 0, io.EOF
|
||||
}
|
||||
o.currOffset = offset
|
||||
case 1:
|
||||
if o.objectInfo.Size > -1 && o.currOffset+offset > o.objectInfo.Size {
|
||||
return 0, io.EOF
|
||||
}
|
||||
o.currOffset += offset
|
||||
case 2:
|
||||
// If we don't know the object size return an error for io.SeekEnd
|
||||
if o.objectInfo.Size < 0 {
|
||||
return 0, errInvalidArgument("Whence END is not supported when the object size is unknown")
|
||||
}
|
||||
// Seeking to positive offset is valid for whence '2', but
|
||||
// since we are backing a Reader we have reached 'EOF' if
|
||||
// offset is positive.
|
||||
if offset > 0 {
|
||||
return 0, io.EOF
|
||||
}
|
||||
// Seeking to negative position not allowed for whence.
|
||||
if o.objectInfo.Size+offset < 0 {
|
||||
return 0, errInvalidArgument(fmt.Sprintf("Seeking at negative offset not allowed for %d", whence))
|
||||
}
|
||||
o.currOffset = o.objectInfo.Size + offset
|
||||
}
|
||||
// Reset the saved error since we successfully seeked, let the Read
|
||||
// and ReadAt decide.
|
||||
if o.prevErr == io.EOF {
|
||||
o.prevErr = nil
|
||||
}
|
||||
|
||||
// Ask lower level to fetch again from source
|
||||
o.seekData = true
|
||||
|
||||
// Return the effective offset.
|
||||
return o.currOffset, nil
|
||||
}
|
||||
|
||||
// Close - The behavior of Close after the first call returns error
|
||||
// for subsequent Close() calls.
|
||||
func (o *Object) Close() (err error) {
|
||||
if o == nil {
|
||||
return errInvalidArgument("Object is nil")
|
||||
}
|
||||
// Locking.
|
||||
o.mutex.Lock()
|
||||
defer o.mutex.Unlock()
|
||||
|
||||
// if already closed return an error.
|
||||
if o.isClosed {
|
||||
return o.prevErr
|
||||
}
|
||||
|
||||
// Close successfully.
|
||||
close(o.doneCh)
|
||||
|
||||
// Save for future operations.
|
||||
errMsg := "Object is already closed. Bad file descriptor."
|
||||
o.prevErr = errors.New(errMsg)
|
||||
// Save here that we closed done channel successfully.
|
||||
o.isClosed = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// newObject instantiates a new *minio.Object*
|
||||
// ObjectInfo will be set by setObjectInfo
|
||||
func newObject(reqCh chan<- getRequest, resCh <-chan getResponse, doneCh chan<- struct{}) *Object {
|
||||
return &Object{
|
||||
mutex: &sync.Mutex{},
|
||||
reqCh: reqCh,
|
||||
resCh: resCh,
|
||||
doneCh: doneCh,
|
||||
}
|
||||
}
|
||||
|
||||
// getObject - retrieve object from Object Storage.
|
||||
//
|
||||
// Additionally this function also takes range arguments to download the specified
|
||||
// range bytes of an object. Setting offset and length = 0 will download the full object.
|
||||
//
|
||||
// For more information about the HTTP Range header.
|
||||
// go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35.
|
||||
func (c Client) getObject(ctx context.Context, bucketName, objectName string, opts GetObjectOptions) (io.ReadCloser, ObjectInfo, http.Header, error) {
|
||||
// Validate input arguments.
|
||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
return nil, ObjectInfo{}, nil, err
|
||||
}
|
||||
if err := s3utils.CheckValidObjectName(objectName); err != nil {
|
||||
return nil, ObjectInfo{}, nil, err
|
||||
}
|
||||
|
||||
urlValues := make(url.Values)
|
||||
if opts.VersionID != "" {
|
||||
urlValues.Set("versionId", opts.VersionID)
|
||||
}
|
||||
|
||||
// Execute GET on objectName.
|
||||
resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{
|
||||
bucketName: bucketName,
|
||||
objectName: objectName,
|
||||
queryValues: urlValues,
|
||||
customHeader: opts.Header(),
|
||||
contentSHA256Hex: emptySHA256Hex,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, ObjectInfo{}, nil, err
|
||||
}
|
||||
if resp != nil {
|
||||
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent {
|
||||
return nil, ObjectInfo{}, nil, httpRespToErrorResponse(resp, bucketName, objectName)
|
||||
}
|
||||
}
|
||||
|
||||
objectStat, err := ToObjectInfo(bucketName, objectName, resp.Header)
|
||||
if err != nil {
|
||||
closeResponse(resp)
|
||||
return nil, ObjectInfo{}, nil, err
|
||||
}
|
||||
|
||||
// do not close body here, caller will close
|
||||
return resp.Body, objectStat, resp.Header, nil
|
||||
}
|
140
vendor/github.com/minio/minio-go/v7/api-get-options.go
generated
vendored
Normal file
140
vendor/github.com/minio/minio-go/v7/api-get-options.go
generated
vendored
Normal file
@ -0,0 +1,140 @@
|
||||
/*
|
||||
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
|
||||
* Copyright 2015-2020 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package minio
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio-go/v7/pkg/encrypt"
|
||||
)
|
||||
|
||||
//AdvancedGetOptions for internal use by MinIO server - not intended for client use.
|
||||
type AdvancedGetOptions struct {
|
||||
ReplicationDeleteMarker bool
|
||||
ReplicationProxyRequest bool
|
||||
}
|
||||
|
||||
// GetObjectOptions are used to specify additional headers or options
|
||||
// during GET requests.
|
||||
type GetObjectOptions struct {
|
||||
headers map[string]string
|
||||
ServerSideEncryption encrypt.ServerSide
|
||||
VersionID string
|
||||
// To be not used by external applications
|
||||
Internal AdvancedGetOptions
|
||||
}
|
||||
|
||||
// StatObjectOptions are used to specify additional headers or options
|
||||
// during GET info/stat requests.
|
||||
type StatObjectOptions = GetObjectOptions
|
||||
|
||||
// Header returns the http.Header representation of the GET options.
|
||||
func (o GetObjectOptions) Header() http.Header {
|
||||
headers := make(http.Header, len(o.headers))
|
||||
for k, v := range o.headers {
|
||||
headers.Set(k, v)
|
||||
}
|
||||
if o.ServerSideEncryption != nil && o.ServerSideEncryption.Type() == encrypt.SSEC {
|
||||
o.ServerSideEncryption.Marshal(headers)
|
||||
}
|
||||
// this header is set for active-active replication scenario where GET/HEAD
|
||||
// to site A is proxy'd to site B if object/version missing on site A.
|
||||
if o.Internal.ReplicationProxyRequest {
|
||||
headers.Set(minIOBucketReplicationProxyRequest, "true")
|
||||
}
|
||||
return headers
|
||||
}
|
||||
|
||||
// Set adds a key value pair to the options. The
|
||||
// key-value pair will be part of the HTTP GET request
|
||||
// headers.
|
||||
func (o *GetObjectOptions) Set(key, value string) {
|
||||
if o.headers == nil {
|
||||
o.headers = make(map[string]string)
|
||||
}
|
||||
o.headers[http.CanonicalHeaderKey(key)] = value
|
||||
}
|
||||
|
||||
// SetMatchETag - set match etag.
|
||||
func (o *GetObjectOptions) SetMatchETag(etag string) error {
|
||||
if etag == "" {
|
||||
return errInvalidArgument("ETag cannot be empty.")
|
||||
}
|
||||
o.Set("If-Match", "\""+etag+"\"")
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetMatchETagExcept - set match etag except.
|
||||
func (o *GetObjectOptions) SetMatchETagExcept(etag string) error {
|
||||
if etag == "" {
|
||||
return errInvalidArgument("ETag cannot be empty.")
|
||||
}
|
||||
o.Set("If-None-Match", "\""+etag+"\"")
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetUnmodified - set unmodified time since.
|
||||
func (o *GetObjectOptions) SetUnmodified(modTime time.Time) error {
|
||||
if modTime.IsZero() {
|
||||
return errInvalidArgument("Modified since cannot be empty.")
|
||||
}
|
||||
o.Set("If-Unmodified-Since", modTime.Format(http.TimeFormat))
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetModified - set modified time since.
|
||||
func (o *GetObjectOptions) SetModified(modTime time.Time) error {
|
||||
if modTime.IsZero() {
|
||||
return errInvalidArgument("Modified since cannot be empty.")
|
||||
}
|
||||
o.Set("If-Modified-Since", modTime.Format(http.TimeFormat))
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetRange - set the start and end offset of the object to be read.
|
||||
// See https://tools.ietf.org/html/rfc7233#section-3.1 for reference.
|
||||
func (o *GetObjectOptions) SetRange(start, end int64) error {
|
||||
switch {
|
||||
case start == 0 && end < 0:
|
||||
// Read last '-end' bytes. `bytes=-N`.
|
||||
o.Set("Range", fmt.Sprintf("bytes=%d", end))
|
||||
case 0 < start && end == 0:
|
||||
// Read everything starting from offset
|
||||
// 'start'. `bytes=N-`.
|
||||
o.Set("Range", fmt.Sprintf("bytes=%d-", start))
|
||||
case 0 <= start && start <= end:
|
||||
// Read everything starting at 'start' till the
|
||||
// 'end'. `bytes=N-M`
|
||||
o.Set("Range", fmt.Sprintf("bytes=%d-%d", start, end))
|
||||
default:
|
||||
// All other cases such as
|
||||
// bytes=-3-
|
||||
// bytes=5-3
|
||||
// bytes=-2-4
|
||||
// bytes=-3-0
|
||||
// bytes=-3--2
|
||||
// are invalid.
|
||||
return errInvalidArgument(
|
||||
fmt.Sprintf(
|
||||
"Invalid range specified: start=%d end=%d",
|
||||
start, end))
|
||||
}
|
||||
return nil
|
||||
}
|
950
vendor/github.com/minio/minio-go/v7/api-list.go
generated
vendored
Normal file
950
vendor/github.com/minio/minio-go/v7/api-list.go
generated
vendored
Normal file
@ -0,0 +1,950 @@
|
||||
/*
|
||||
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
|
||||
* Copyright 2015-2020 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package minio
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
||||
"github.com/minio/minio-go/v7/pkg/s3utils"
|
||||
)
|
||||
|
||||
// ListBuckets list all buckets owned by this authenticated user.
|
||||
//
|
||||
// This call requires explicit authentication, no anonymous requests are
|
||||
// allowed for listing buckets.
|
||||
//
|
||||
// api := client.New(....)
|
||||
// for message := range api.ListBuckets(context.Background()) {
|
||||
// fmt.Println(message)
|
||||
// }
|
||||
//
|
||||
func (c Client) ListBuckets(ctx context.Context) ([]BucketInfo, error) {
|
||||
// Execute GET on service.
|
||||
resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{contentSHA256Hex: emptySHA256Hex})
|
||||
defer closeResponse(resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if resp != nil {
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return nil, httpRespToErrorResponse(resp, "", "")
|
||||
}
|
||||
}
|
||||
listAllMyBucketsResult := listAllMyBucketsResult{}
|
||||
err = xmlDecoder(resp.Body, &listAllMyBucketsResult)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return listAllMyBucketsResult.Buckets.Bucket, nil
|
||||
}
|
||||
|
||||
/// Bucket Read Operations.
|
||||
|
||||
func (c Client) listObjectsV2(ctx context.Context, bucketName, objectPrefix string, recursive, metadata bool, maxKeys int) <-chan ObjectInfo {
|
||||
// Allocate new list objects channel.
|
||||
objectStatCh := make(chan ObjectInfo, 1)
|
||||
// Default listing is delimited at "/"
|
||||
delimiter := "/"
|
||||
if recursive {
|
||||
// If recursive we do not delimit.
|
||||
delimiter = ""
|
||||
}
|
||||
|
||||
// Return object owner information by default
|
||||
fetchOwner := true
|
||||
|
||||
// Validate bucket name.
|
||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
defer close(objectStatCh)
|
||||
objectStatCh <- ObjectInfo{
|
||||
Err: err,
|
||||
}
|
||||
return objectStatCh
|
||||
}
|
||||
|
||||
// Validate incoming object prefix.
|
||||
if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil {
|
||||
defer close(objectStatCh)
|
||||
objectStatCh <- ObjectInfo{
|
||||
Err: err,
|
||||
}
|
||||
return objectStatCh
|
||||
}
|
||||
|
||||
// Initiate list objects goroutine here.
|
||||
go func(objectStatCh chan<- ObjectInfo) {
|
||||
defer close(objectStatCh)
|
||||
// Save continuationToken for next request.
|
||||
var continuationToken string
|
||||
for {
|
||||
// Get list of objects a maximum of 1000 per request.
|
||||
result, err := c.listObjectsV2Query(ctx, bucketName, objectPrefix, continuationToken,
|
||||
fetchOwner, metadata, delimiter, maxKeys)
|
||||
if err != nil {
|
||||
objectStatCh <- ObjectInfo{
|
||||
Err: err,
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// If contents are available loop through and send over channel.
|
||||
for _, object := range result.Contents {
|
||||
object.ETag = trimEtag(object.ETag)
|
||||
select {
|
||||
// Send object content.
|
||||
case objectStatCh <- object:
|
||||
// If receives done from the caller, return here.
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Send all common prefixes if any.
|
||||
// NOTE: prefixes are only present if the request is delimited.
|
||||
for _, obj := range result.CommonPrefixes {
|
||||
select {
|
||||
// Send object prefixes.
|
||||
case objectStatCh <- ObjectInfo{Key: obj.Prefix}:
|
||||
// If receives done from the caller, return here.
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// If continuation token present, save it for next request.
|
||||
if result.NextContinuationToken != "" {
|
||||
continuationToken = result.NextContinuationToken
|
||||
}
|
||||
|
||||
// Listing ends result is not truncated, return right here.
|
||||
if !result.IsTruncated {
|
||||
return
|
||||
}
|
||||
}
|
||||
}(objectStatCh)
|
||||
return objectStatCh
|
||||
}
|
||||
|
||||
// listObjectsV2Query - (List Objects V2) - List some or all (up to 1000) of the objects in a bucket.
|
||||
//
|
||||
// You can use the request parameters as selection criteria to return a subset of the objects in a bucket.
|
||||
// request parameters :-
|
||||
// ---------
|
||||
// ?continuation-token - Used to continue iterating over a set of objects
|
||||
// ?delimiter - A delimiter is a character you use to group keys.
|
||||
// ?prefix - Limits the response to keys that begin with the specified prefix.
|
||||
// ?max-keys - Sets the maximum number of keys returned in the response body.
|
||||
// ?metadata - Specifies if we want metadata for the objects as part of list operation.
|
||||
func (c Client) listObjectsV2Query(ctx context.Context, bucketName, objectPrefix, continuationToken string, fetchOwner, metadata bool, delimiter string, maxkeys int) (ListBucketV2Result, error) {
|
||||
// Validate bucket name.
|
||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
return ListBucketV2Result{}, err
|
||||
}
|
||||
// Validate object prefix.
|
||||
if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil {
|
||||
return ListBucketV2Result{}, err
|
||||
}
|
||||
// Get resources properly escaped and lined up before
|
||||
// using them in http request.
|
||||
urlValues := make(url.Values)
|
||||
|
||||
// Always set list-type in ListObjects V2
|
||||
urlValues.Set("list-type", "2")
|
||||
|
||||
if metadata {
|
||||
urlValues.Set("metadata", "true")
|
||||
}
|
||||
|
||||
// Always set encoding-type in ListObjects V2
|
||||
urlValues.Set("encoding-type", "url")
|
||||
|
||||
// Set object prefix, prefix value to be set to empty is okay.
|
||||
urlValues.Set("prefix", objectPrefix)
|
||||
|
||||
// Set delimiter, delimiter value to be set to empty is okay.
|
||||
urlValues.Set("delimiter", delimiter)
|
||||
|
||||
// Set continuation token
|
||||
if continuationToken != "" {
|
||||
urlValues.Set("continuation-token", continuationToken)
|
||||
}
|
||||
|
||||
// Fetch owner when listing
|
||||
if fetchOwner {
|
||||
urlValues.Set("fetch-owner", "true")
|
||||
}
|
||||
|
||||
// Set max keys.
|
||||
if maxkeys > 0 {
|
||||
urlValues.Set("max-keys", fmt.Sprintf("%d", maxkeys))
|
||||
}
|
||||
|
||||
// Execute GET on bucket to list objects.
|
||||
resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{
|
||||
bucketName: bucketName,
|
||||
queryValues: urlValues,
|
||||
contentSHA256Hex: emptySHA256Hex,
|
||||
})
|
||||
defer closeResponse(resp)
|
||||
if err != nil {
|
||||
return ListBucketV2Result{}, err
|
||||
}
|
||||
if resp != nil {
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return ListBucketV2Result{}, httpRespToErrorResponse(resp, bucketName, "")
|
||||
}
|
||||
}
|
||||
|
||||
// Decode listBuckets XML.
|
||||
listBucketResult := ListBucketV2Result{}
|
||||
if err = xmlDecoder(resp.Body, &listBucketResult); err != nil {
|
||||
return listBucketResult, err
|
||||
}
|
||||
|
||||
// This is an additional verification check to make
|
||||
// sure proper responses are received.
|
||||
if listBucketResult.IsTruncated && listBucketResult.NextContinuationToken == "" {
|
||||
return listBucketResult, ErrorResponse{
|
||||
Code: "NotImplemented",
|
||||
Message: "Truncated response should have continuation token set",
|
||||
}
|
||||
}
|
||||
|
||||
for i, obj := range listBucketResult.Contents {
|
||||
listBucketResult.Contents[i].Key, err = decodeS3Name(obj.Key, listBucketResult.EncodingType)
|
||||
if err != nil {
|
||||
return listBucketResult, err
|
||||
}
|
||||
}
|
||||
|
||||
for i, obj := range listBucketResult.CommonPrefixes {
|
||||
listBucketResult.CommonPrefixes[i].Prefix, err = decodeS3Name(obj.Prefix, listBucketResult.EncodingType)
|
||||
if err != nil {
|
||||
return listBucketResult, err
|
||||
}
|
||||
}
|
||||
|
||||
// Success.
|
||||
return listBucketResult, nil
|
||||
}
|
||||
|
||||
func (c Client) listObjects(ctx context.Context, bucketName, objectPrefix string, recursive bool, maxKeys int) <-chan ObjectInfo {
|
||||
// Allocate new list objects channel.
|
||||
objectStatCh := make(chan ObjectInfo, 1)
|
||||
// Default listing is delimited at "/"
|
||||
delimiter := "/"
|
||||
if recursive {
|
||||
// If recursive we do not delimit.
|
||||
delimiter = ""
|
||||
}
|
||||
// Validate bucket name.
|
||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
defer close(objectStatCh)
|
||||
objectStatCh <- ObjectInfo{
|
||||
Err: err,
|
||||
}
|
||||
return objectStatCh
|
||||
}
|
||||
// Validate incoming object prefix.
|
||||
if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil {
|
||||
defer close(objectStatCh)
|
||||
objectStatCh <- ObjectInfo{
|
||||
Err: err,
|
||||
}
|
||||
return objectStatCh
|
||||
}
|
||||
|
||||
// Initiate list objects goroutine here.
|
||||
go func(objectStatCh chan<- ObjectInfo) {
|
||||
defer close(objectStatCh)
|
||||
|
||||
marker := ""
|
||||
for {
|
||||
// Get list of objects a maximum of 1000 per request.
|
||||
result, err := c.listObjectsQuery(ctx, bucketName, objectPrefix, marker, delimiter, maxKeys)
|
||||
if err != nil {
|
||||
objectStatCh <- ObjectInfo{
|
||||
Err: err,
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// If contents are available loop through and send over channel.
|
||||
for _, object := range result.Contents {
|
||||
// Save the marker.
|
||||
marker = object.Key
|
||||
select {
|
||||
// Send object content.
|
||||
case objectStatCh <- object:
|
||||
// If receives done from the caller, return here.
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Send all common prefixes if any.
|
||||
// NOTE: prefixes are only present if the request is delimited.
|
||||
for _, obj := range result.CommonPrefixes {
|
||||
select {
|
||||
// Send object prefixes.
|
||||
case objectStatCh <- ObjectInfo{Key: obj.Prefix}:
|
||||
// If receives done from the caller, return here.
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// If next marker present, save it for next request.
|
||||
if result.NextMarker != "" {
|
||||
marker = result.NextMarker
|
||||
}
|
||||
|
||||
// Listing ends result is not truncated, return right here.
|
||||
if !result.IsTruncated {
|
||||
return
|
||||
}
|
||||
}
|
||||
}(objectStatCh)
|
||||
return objectStatCh
|
||||
}
|
||||
|
||||
func (c Client) listObjectVersions(ctx context.Context, bucketName, prefix string, recursive bool, maxKeys int) <-chan ObjectInfo {
|
||||
// Allocate new list objects channel.
|
||||
resultCh := make(chan ObjectInfo, 1)
|
||||
// Default listing is delimited at "/"
|
||||
delimiter := "/"
|
||||
if recursive {
|
||||
// If recursive we do not delimit.
|
||||
delimiter = ""
|
||||
}
|
||||
|
||||
// Validate bucket name.
|
||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
defer close(resultCh)
|
||||
resultCh <- ObjectInfo{
|
||||
Err: err,
|
||||
}
|
||||
return resultCh
|
||||
}
|
||||
|
||||
// Validate incoming object prefix.
|
||||
if err := s3utils.CheckValidObjectNamePrefix(prefix); err != nil {
|
||||
defer close(resultCh)
|
||||
resultCh <- ObjectInfo{
|
||||
Err: err,
|
||||
}
|
||||
return resultCh
|
||||
}
|
||||
|
||||
// Initiate list objects goroutine here.
|
||||
go func(resultCh chan<- ObjectInfo) {
|
||||
defer close(resultCh)
|
||||
|
||||
var (
|
||||
keyMarker = ""
|
||||
versionIDMarker = ""
|
||||
)
|
||||
|
||||
for {
|
||||
// Get list of objects a maximum of 1000 per request.
|
||||
result, err := c.listObjectVersionsQuery(ctx, bucketName, prefix, keyMarker, versionIDMarker, delimiter, maxKeys)
|
||||
if err != nil {
|
||||
resultCh <- ObjectInfo{
|
||||
Err: err,
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// If contents are available loop through and send over channel.
|
||||
for _, version := range result.Versions {
|
||||
info := ObjectInfo{
|
||||
ETag: trimEtag(version.ETag),
|
||||
Key: version.Key,
|
||||
LastModified: version.LastModified,
|
||||
Size: version.Size,
|
||||
Owner: version.Owner,
|
||||
StorageClass: version.StorageClass,
|
||||
IsLatest: version.IsLatest,
|
||||
VersionID: version.VersionID,
|
||||
|
||||
IsDeleteMarker: version.isDeleteMarker,
|
||||
}
|
||||
select {
|
||||
// Send object version info.
|
||||
case resultCh <- info:
|
||||
// If receives done from the caller, return here.
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Send all common prefixes if any.
|
||||
// NOTE: prefixes are only present if the request is delimited.
|
||||
for _, obj := range result.CommonPrefixes {
|
||||
select {
|
||||
// Send object prefixes.
|
||||
case resultCh <- ObjectInfo{Key: obj.Prefix}:
|
||||
// If receives done from the caller, return here.
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// If next key marker is present, save it for next request.
|
||||
if result.NextKeyMarker != "" {
|
||||
keyMarker = result.NextKeyMarker
|
||||
}
|
||||
|
||||
// If next version id marker is present, save it for next request.
|
||||
if result.NextVersionIDMarker != "" {
|
||||
versionIDMarker = result.NextVersionIDMarker
|
||||
}
|
||||
|
||||
// Listing ends result is not truncated, return right here.
|
||||
if !result.IsTruncated {
|
||||
return
|
||||
}
|
||||
}
|
||||
}(resultCh)
|
||||
return resultCh
|
||||
}
|
||||
|
||||
// listObjectVersions - (List Object Versions) - List some or all (up to 1000) of the existing objects
|
||||
// and their versions in a bucket.
|
||||
//
|
||||
// You can use the request parameters as selection criteria to return a subset of the objects in a bucket.
|
||||
// request parameters :-
|
||||
// ---------
|
||||
// ?key-marker - Specifies the key to start with when listing objects in a bucket.
|
||||
// ?version-id-marker - Specifies the version id marker to start with when listing objects with versions in a bucket.
|
||||
// ?delimiter - A delimiter is a character you use to group keys.
|
||||
// ?prefix - Limits the response to keys that begin with the specified prefix.
|
||||
// ?max-keys - Sets the maximum number of keys returned in the response body.
|
||||
func (c Client) listObjectVersionsQuery(ctx context.Context, bucketName, prefix, keyMarker, versionIDMarker, delimiter string, maxkeys int) (ListVersionsResult, error) {
|
||||
// Validate bucket name.
|
||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
return ListVersionsResult{}, err
|
||||
}
|
||||
// Validate object prefix.
|
||||
if err := s3utils.CheckValidObjectNamePrefix(prefix); err != nil {
|
||||
return ListVersionsResult{}, err
|
||||
}
|
||||
// Get resources properly escaped and lined up before
|
||||
// using them in http request.
|
||||
urlValues := make(url.Values)
|
||||
|
||||
// Set versions to trigger versioning API
|
||||
urlValues.Set("versions", "")
|
||||
|
||||
// Set object prefix, prefix value to be set to empty is okay.
|
||||
urlValues.Set("prefix", prefix)
|
||||
|
||||
// Set delimiter, delimiter value to be set to empty is okay.
|
||||
urlValues.Set("delimiter", delimiter)
|
||||
|
||||
// Set object marker.
|
||||
if keyMarker != "" {
|
||||
urlValues.Set("key-marker", keyMarker)
|
||||
}
|
||||
|
||||
// Set max keys.
|
||||
if maxkeys > 0 {
|
||||
urlValues.Set("max-keys", fmt.Sprintf("%d", maxkeys))
|
||||
}
|
||||
|
||||
// Set version ID marker
|
||||
if versionIDMarker != "" {
|
||||
urlValues.Set("version-id-marker", versionIDMarker)
|
||||
}
|
||||
|
||||
// Always set encoding-type
|
||||
urlValues.Set("encoding-type", "url")
|
||||
|
||||
// Execute GET on bucket to list objects.
|
||||
resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{
|
||||
bucketName: bucketName,
|
||||
queryValues: urlValues,
|
||||
contentSHA256Hex: emptySHA256Hex,
|
||||
})
|
||||
defer closeResponse(resp)
|
||||
if err != nil {
|
||||
return ListVersionsResult{}, err
|
||||
}
|
||||
if resp != nil {
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return ListVersionsResult{}, httpRespToErrorResponse(resp, bucketName, "")
|
||||
}
|
||||
}
|
||||
|
||||
// Decode ListVersionsResult XML.
|
||||
listObjectVersionsOutput := ListVersionsResult{}
|
||||
err = xmlDecoder(resp.Body, &listObjectVersionsOutput)
|
||||
if err != nil {
|
||||
return ListVersionsResult{}, err
|
||||
}
|
||||
|
||||
for i, obj := range listObjectVersionsOutput.Versions {
|
||||
listObjectVersionsOutput.Versions[i].Key, err = decodeS3Name(obj.Key, listObjectVersionsOutput.EncodingType)
|
||||
if err != nil {
|
||||
return listObjectVersionsOutput, err
|
||||
}
|
||||
}
|
||||
|
||||
for i, obj := range listObjectVersionsOutput.CommonPrefixes {
|
||||
listObjectVersionsOutput.CommonPrefixes[i].Prefix, err = decodeS3Name(obj.Prefix, listObjectVersionsOutput.EncodingType)
|
||||
if err != nil {
|
||||
return listObjectVersionsOutput, err
|
||||
}
|
||||
}
|
||||
|
||||
if listObjectVersionsOutput.NextKeyMarker != "" {
|
||||
listObjectVersionsOutput.NextKeyMarker, err = decodeS3Name(listObjectVersionsOutput.NextKeyMarker, listObjectVersionsOutput.EncodingType)
|
||||
if err != nil {
|
||||
return listObjectVersionsOutput, err
|
||||
}
|
||||
}
|
||||
|
||||
return listObjectVersionsOutput, nil
|
||||
}
|
||||
|
||||
// listObjects - (List Objects) - List some or all (up to 1000) of the objects in a bucket.
|
||||
//
|
||||
// You can use the request parameters as selection criteria to return a subset of the objects in a bucket.
|
||||
// request parameters :-
|
||||
// ---------
|
||||
// ?marker - Specifies the key to start with when listing objects in a bucket.
|
||||
// ?delimiter - A delimiter is a character you use to group keys.
|
||||
// ?prefix - Limits the response to keys that begin with the specified prefix.
|
||||
// ?max-keys - Sets the maximum number of keys returned in the response body.
|
||||
func (c Client) listObjectsQuery(ctx context.Context, bucketName, objectPrefix, objectMarker, delimiter string, maxkeys int) (ListBucketResult, error) {
|
||||
// Validate bucket name.
|
||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
return ListBucketResult{}, err
|
||||
}
|
||||
// Validate object prefix.
|
||||
if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil {
|
||||
return ListBucketResult{}, err
|
||||
}
|
||||
// Get resources properly escaped and lined up before
|
||||
// using them in http request.
|
||||
urlValues := make(url.Values)
|
||||
|
||||
// Set object prefix, prefix value to be set to empty is okay.
|
||||
urlValues.Set("prefix", objectPrefix)
|
||||
|
||||
// Set delimiter, delimiter value to be set to empty is okay.
|
||||
urlValues.Set("delimiter", delimiter)
|
||||
|
||||
// Set object marker.
|
||||
if objectMarker != "" {
|
||||
urlValues.Set("marker", objectMarker)
|
||||
}
|
||||
|
||||
// Set max keys.
|
||||
if maxkeys > 0 {
|
||||
urlValues.Set("max-keys", fmt.Sprintf("%d", maxkeys))
|
||||
}
|
||||
|
||||
// Always set encoding-type
|
||||
urlValues.Set("encoding-type", "url")
|
||||
|
||||
// Execute GET on bucket to list objects.
|
||||
resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{
|
||||
bucketName: bucketName,
|
||||
queryValues: urlValues,
|
||||
contentSHA256Hex: emptySHA256Hex,
|
||||
})
|
||||
defer closeResponse(resp)
|
||||
if err != nil {
|
||||
return ListBucketResult{}, err
|
||||
}
|
||||
if resp != nil {
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return ListBucketResult{}, httpRespToErrorResponse(resp, bucketName, "")
|
||||
}
|
||||
}
|
||||
// Decode listBuckets XML.
|
||||
listBucketResult := ListBucketResult{}
|
||||
err = xmlDecoder(resp.Body, &listBucketResult)
|
||||
if err != nil {
|
||||
return listBucketResult, err
|
||||
}
|
||||
|
||||
for i, obj := range listBucketResult.Contents {
|
||||
listBucketResult.Contents[i].Key, err = decodeS3Name(obj.Key, listBucketResult.EncodingType)
|
||||
if err != nil {
|
||||
return listBucketResult, err
|
||||
}
|
||||
}
|
||||
|
||||
for i, obj := range listBucketResult.CommonPrefixes {
|
||||
listBucketResult.CommonPrefixes[i].Prefix, err = decodeS3Name(obj.Prefix, listBucketResult.EncodingType)
|
||||
if err != nil {
|
||||
return listBucketResult, err
|
||||
}
|
||||
}
|
||||
|
||||
if listBucketResult.NextMarker != "" {
|
||||
listBucketResult.NextMarker, err = decodeS3Name(listBucketResult.NextMarker, listBucketResult.EncodingType)
|
||||
if err != nil {
|
||||
return listBucketResult, err
|
||||
}
|
||||
}
|
||||
|
||||
return listBucketResult, nil
|
||||
}
|
||||
|
||||
// ListObjectsOptions holds all options of a list object request
|
||||
type ListObjectsOptions struct {
|
||||
// Include objects versions in the listing
|
||||
WithVersions bool
|
||||
// Include objects metadata in the listing
|
||||
WithMetadata bool
|
||||
// Only list objects with the prefix
|
||||
Prefix string
|
||||
// Ignore '/' delimiter
|
||||
Recursive bool
|
||||
// The maximum number of objects requested per
|
||||
// batch, advanced use-case not useful for most
|
||||
// applications
|
||||
MaxKeys int
|
||||
|
||||
// Use the deprecated list objects V1 API
|
||||
UseV1 bool
|
||||
}
|
||||
|
||||
// ListObjects returns objects list after evaluating the passed options.
|
||||
//
|
||||
// api := client.New(....)
|
||||
// for object := range api.ListObjects(ctx, "mytestbucket", minio.ListObjectsOptions{Prefix: "starthere", Recursive:true}) {
|
||||
// fmt.Println(object)
|
||||
// }
|
||||
//
|
||||
func (c Client) ListObjects(ctx context.Context, bucketName string, opts ListObjectsOptions) <-chan ObjectInfo {
|
||||
if opts.WithVersions {
|
||||
return c.listObjectVersions(ctx, bucketName, opts.Prefix, opts.Recursive, opts.MaxKeys)
|
||||
}
|
||||
|
||||
// Use legacy list objects v1 API
|
||||
if opts.UseV1 {
|
||||
return c.listObjects(ctx, bucketName, opts.Prefix, opts.Recursive, opts.MaxKeys)
|
||||
}
|
||||
|
||||
// Check whether this is snowball region, if yes ListObjectsV2 doesn't work, fallback to listObjectsV1.
|
||||
if location, ok := c.bucketLocCache.Get(bucketName); ok {
|
||||
if location == "snowball" {
|
||||
return c.listObjects(ctx, bucketName, opts.Prefix, opts.Recursive, opts.MaxKeys)
|
||||
}
|
||||
}
|
||||
|
||||
return c.listObjectsV2(ctx, bucketName, opts.Prefix, opts.Recursive, opts.WithMetadata, opts.MaxKeys)
|
||||
}
|
||||
|
||||
// ListIncompleteUploads - List incompletely uploaded multipart objects.
|
||||
//
|
||||
// ListIncompleteUploads lists all incompleted objects matching the
|
||||
// objectPrefix from the specified bucket. If recursion is enabled
|
||||
// it would list all subdirectories and all its contents.
|
||||
//
|
||||
// Your input parameters are just bucketName, objectPrefix, recursive.
|
||||
// If you enable recursive as 'true' this function will return back all
|
||||
// the multipart objects in a given bucket name.
|
||||
//
|
||||
// api := client.New(....)
|
||||
// // Recurively list all objects in 'mytestbucket'
|
||||
// recursive := true
|
||||
// for message := range api.ListIncompleteUploads(context.Background(), "mytestbucket", "starthere", recursive) {
|
||||
// fmt.Println(message)
|
||||
// }
|
||||
func (c Client) ListIncompleteUploads(ctx context.Context, bucketName, objectPrefix string, recursive bool) <-chan ObjectMultipartInfo {
|
||||
return c.listIncompleteUploads(ctx, bucketName, objectPrefix, recursive)
|
||||
}
|
||||
|
||||
// listIncompleteUploads lists all incomplete uploads.
|
||||
func (c Client) listIncompleteUploads(ctx context.Context, bucketName, objectPrefix string, recursive bool) <-chan ObjectMultipartInfo {
|
||||
// Allocate channel for multipart uploads.
|
||||
objectMultipartStatCh := make(chan ObjectMultipartInfo, 1)
|
||||
// Delimiter is set to "/" by default.
|
||||
delimiter := "/"
|
||||
if recursive {
|
||||
// If recursive do not delimit.
|
||||
delimiter = ""
|
||||
}
|
||||
// Validate bucket name.
|
||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
defer close(objectMultipartStatCh)
|
||||
objectMultipartStatCh <- ObjectMultipartInfo{
|
||||
Err: err,
|
||||
}
|
||||
return objectMultipartStatCh
|
||||
}
|
||||
// Validate incoming object prefix.
|
||||
if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil {
|
||||
defer close(objectMultipartStatCh)
|
||||
objectMultipartStatCh <- ObjectMultipartInfo{
|
||||
Err: err,
|
||||
}
|
||||
return objectMultipartStatCh
|
||||
}
|
||||
go func(objectMultipartStatCh chan<- ObjectMultipartInfo) {
|
||||
defer close(objectMultipartStatCh)
|
||||
// object and upload ID marker for future requests.
|
||||
var objectMarker string
|
||||
var uploadIDMarker string
|
||||
for {
|
||||
// list all multipart uploads.
|
||||
result, err := c.listMultipartUploadsQuery(ctx, bucketName, objectMarker, uploadIDMarker, objectPrefix, delimiter, 0)
|
||||
if err != nil {
|
||||
objectMultipartStatCh <- ObjectMultipartInfo{
|
||||
Err: err,
|
||||
}
|
||||
return
|
||||
}
|
||||
objectMarker = result.NextKeyMarker
|
||||
uploadIDMarker = result.NextUploadIDMarker
|
||||
|
||||
// Send all multipart uploads.
|
||||
for _, obj := range result.Uploads {
|
||||
// Calculate total size of the uploaded parts if 'aggregateSize' is enabled.
|
||||
select {
|
||||
// Send individual uploads here.
|
||||
case objectMultipartStatCh <- obj:
|
||||
// If the context is canceled
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
// Send all common prefixes if any.
|
||||
// NOTE: prefixes are only present if the request is delimited.
|
||||
for _, obj := range result.CommonPrefixes {
|
||||
select {
|
||||
// Send delimited prefixes here.
|
||||
case objectMultipartStatCh <- ObjectMultipartInfo{Key: obj.Prefix, Size: 0}:
|
||||
// If context is canceled.
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
// Listing ends if result not truncated, return right here.
|
||||
if !result.IsTruncated {
|
||||
return
|
||||
}
|
||||
}
|
||||
}(objectMultipartStatCh)
|
||||
// return.
|
||||
return objectMultipartStatCh
|
||||
|
||||
}
|
||||
|
||||
// listMultipartUploadsQuery - (List Multipart Uploads).
|
||||
// - Lists some or all (up to 1000) in-progress multipart uploads in a bucket.
|
||||
//
|
||||
// You can use the request parameters as selection criteria to return a subset of the uploads in a bucket.
|
||||
// request parameters. :-
|
||||
// ---------
|
||||
// ?key-marker - Specifies the multipart upload after which listing should begin.
|
||||
// ?upload-id-marker - Together with key-marker specifies the multipart upload after which listing should begin.
|
||||
// ?delimiter - A delimiter is a character you use to group keys.
|
||||
// ?prefix - Limits the response to keys that begin with the specified prefix.
|
||||
// ?max-uploads - Sets the maximum number of multipart uploads returned in the response body.
|
||||
func (c Client) listMultipartUploadsQuery(ctx context.Context, bucketName, keyMarker, uploadIDMarker, prefix, delimiter string, maxUploads int) (ListMultipartUploadsResult, error) {
|
||||
// Get resources properly escaped and lined up before using them in http request.
|
||||
urlValues := make(url.Values)
|
||||
// Set uploads.
|
||||
urlValues.Set("uploads", "")
|
||||
// Set object key marker.
|
||||
if keyMarker != "" {
|
||||
urlValues.Set("key-marker", keyMarker)
|
||||
}
|
||||
// Set upload id marker.
|
||||
if uploadIDMarker != "" {
|
||||
urlValues.Set("upload-id-marker", uploadIDMarker)
|
||||
}
|
||||
|
||||
// Set object prefix, prefix value to be set to empty is okay.
|
||||
urlValues.Set("prefix", prefix)
|
||||
|
||||
// Set delimiter, delimiter value to be set to empty is okay.
|
||||
urlValues.Set("delimiter", delimiter)
|
||||
|
||||
// Always set encoding-type
|
||||
urlValues.Set("encoding-type", "url")
|
||||
|
||||
// maxUploads should be 1000 or less.
|
||||
if maxUploads > 0 {
|
||||
// Set max-uploads.
|
||||
urlValues.Set("max-uploads", fmt.Sprintf("%d", maxUploads))
|
||||
}
|
||||
|
||||
// Execute GET on bucketName to list multipart uploads.
|
||||
resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{
|
||||
bucketName: bucketName,
|
||||
queryValues: urlValues,
|
||||
contentSHA256Hex: emptySHA256Hex,
|
||||
})
|
||||
defer closeResponse(resp)
|
||||
if err != nil {
|
||||
return ListMultipartUploadsResult{}, err
|
||||
}
|
||||
if resp != nil {
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return ListMultipartUploadsResult{}, httpRespToErrorResponse(resp, bucketName, "")
|
||||
}
|
||||
}
|
||||
// Decode response body.
|
||||
listMultipartUploadsResult := ListMultipartUploadsResult{}
|
||||
err = xmlDecoder(resp.Body, &listMultipartUploadsResult)
|
||||
if err != nil {
|
||||
return listMultipartUploadsResult, err
|
||||
}
|
||||
|
||||
listMultipartUploadsResult.NextKeyMarker, err = decodeS3Name(listMultipartUploadsResult.NextKeyMarker, listMultipartUploadsResult.EncodingType)
|
||||
if err != nil {
|
||||
return listMultipartUploadsResult, err
|
||||
}
|
||||
|
||||
listMultipartUploadsResult.NextUploadIDMarker, err = decodeS3Name(listMultipartUploadsResult.NextUploadIDMarker, listMultipartUploadsResult.EncodingType)
|
||||
if err != nil {
|
||||
return listMultipartUploadsResult, err
|
||||
}
|
||||
|
||||
for i, obj := range listMultipartUploadsResult.Uploads {
|
||||
listMultipartUploadsResult.Uploads[i].Key, err = decodeS3Name(obj.Key, listMultipartUploadsResult.EncodingType)
|
||||
if err != nil {
|
||||
return listMultipartUploadsResult, err
|
||||
}
|
||||
}
|
||||
|
||||
for i, obj := range listMultipartUploadsResult.CommonPrefixes {
|
||||
listMultipartUploadsResult.CommonPrefixes[i].Prefix, err = decodeS3Name(obj.Prefix, listMultipartUploadsResult.EncodingType)
|
||||
if err != nil {
|
||||
return listMultipartUploadsResult, err
|
||||
}
|
||||
}
|
||||
|
||||
return listMultipartUploadsResult, nil
|
||||
}
|
||||
|
||||
// listObjectParts list all object parts recursively.
|
||||
func (c Client) listObjectParts(ctx context.Context, bucketName, objectName, uploadID string) (partsInfo map[int]ObjectPart, err error) {
|
||||
// Part number marker for the next batch of request.
|
||||
var nextPartNumberMarker int
|
||||
partsInfo = make(map[int]ObjectPart)
|
||||
for {
|
||||
// Get list of uploaded parts a maximum of 1000 per request.
|
||||
listObjPartsResult, err := c.listObjectPartsQuery(ctx, bucketName, objectName, uploadID, nextPartNumberMarker, 1000)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Append to parts info.
|
||||
for _, part := range listObjPartsResult.ObjectParts {
|
||||
// Trim off the odd double quotes from ETag in the beginning and end.
|
||||
part.ETag = trimEtag(part.ETag)
|
||||
partsInfo[part.PartNumber] = part
|
||||
}
|
||||
// Keep part number marker, for the next iteration.
|
||||
nextPartNumberMarker = listObjPartsResult.NextPartNumberMarker
|
||||
// Listing ends result is not truncated, return right here.
|
||||
if !listObjPartsResult.IsTruncated {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Return all the parts.
|
||||
return partsInfo, nil
|
||||
}
|
||||
|
||||
// findUploadIDs lists all incomplete uploads and find the uploadIDs of the matching object name.
|
||||
func (c Client) findUploadIDs(ctx context.Context, bucketName, objectName string) ([]string, error) {
|
||||
var uploadIDs []string
|
||||
// Make list incomplete uploads recursive.
|
||||
isRecursive := true
|
||||
// List all incomplete uploads.
|
||||
for mpUpload := range c.listIncompleteUploads(ctx, bucketName, objectName, isRecursive) {
|
||||
if mpUpload.Err != nil {
|
||||
return nil, mpUpload.Err
|
||||
}
|
||||
if objectName == mpUpload.Key {
|
||||
uploadIDs = append(uploadIDs, mpUpload.UploadID)
|
||||
}
|
||||
}
|
||||
// Return the latest upload id.
|
||||
return uploadIDs, nil
|
||||
}
|
||||
|
||||
// listObjectPartsQuery (List Parts query)
|
||||
// - lists some or all (up to 1000) parts that have been uploaded
|
||||
// for a specific multipart upload
|
||||
//
|
||||
// You can use the request parameters as selection criteria to return
|
||||
// a subset of the uploads in a bucket, request parameters :-
|
||||
// ---------
|
||||
// ?part-number-marker - Specifies the part after which listing should
|
||||
// begin.
|
||||
// ?max-parts - Maximum parts to be listed per request.
|
||||
func (c Client) listObjectPartsQuery(ctx context.Context, bucketName, objectName, uploadID string, partNumberMarker, maxParts int) (ListObjectPartsResult, error) {
|
||||
// Get resources properly escaped and lined up before using them in http request.
|
||||
urlValues := make(url.Values)
|
||||
// Set part number marker.
|
||||
urlValues.Set("part-number-marker", fmt.Sprintf("%d", partNumberMarker))
|
||||
// Set upload id.
|
||||
urlValues.Set("uploadId", uploadID)
|
||||
|
||||
// maxParts should be 1000 or less.
|
||||
if maxParts > 0 {
|
||||
// Set max parts.
|
||||
urlValues.Set("max-parts", fmt.Sprintf("%d", maxParts))
|
||||
}
|
||||
|
||||
// Execute GET on objectName to get list of parts.
|
||||
resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{
|
||||
bucketName: bucketName,
|
||||
objectName: objectName,
|
||||
queryValues: urlValues,
|
||||
contentSHA256Hex: emptySHA256Hex,
|
||||
})
|
||||
defer closeResponse(resp)
|
||||
if err != nil {
|
||||
return ListObjectPartsResult{}, err
|
||||
}
|
||||
if resp != nil {
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return ListObjectPartsResult{}, httpRespToErrorResponse(resp, bucketName, objectName)
|
||||
}
|
||||
}
|
||||
// Decode list object parts XML.
|
||||
listObjectPartsResult := ListObjectPartsResult{}
|
||||
err = xmlDecoder(resp.Body, &listObjectPartsResult)
|
||||
if err != nil {
|
||||
return listObjectPartsResult, err
|
||||
}
|
||||
return listObjectPartsResult, nil
|
||||
}
|
||||
|
||||
// Decode an S3 object name according to the encoding type
|
||||
func decodeS3Name(name, encodingType string) (string, error) {
|
||||
switch encodingType {
|
||||
case "url":
|
||||
return url.QueryUnescape(name)
|
||||
default:
|
||||
return name, nil
|
||||
}
|
||||
}
|
176
vendor/github.com/minio/minio-go/v7/api-object-legal-hold.go
generated
vendored
Normal file
176
vendor/github.com/minio/minio-go/v7/api-object-legal-hold.go
generated
vendored
Normal file
@ -0,0 +1,176 @@
|
||||
/*
|
||||
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
|
||||
* Copyright 2020 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package minio
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
||||
"github.com/minio/minio-go/v7/pkg/s3utils"
|
||||
)
|
||||
|
||||
// objectLegalHold - object legal hold specified in
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/archive-RESTObjectPUTLegalHold.html
|
||||
type objectLegalHold struct {
|
||||
XMLNS string `xml:"xmlns,attr,omitempty"`
|
||||
XMLName xml.Name `xml:"LegalHold"`
|
||||
Status LegalHoldStatus `xml:"Status,omitempty"`
|
||||
}
|
||||
|
||||
// PutObjectLegalHoldOptions represents options specified by user for PutObjectLegalHold call
|
||||
type PutObjectLegalHoldOptions struct {
|
||||
VersionID string
|
||||
Status *LegalHoldStatus
|
||||
}
|
||||
|
||||
// GetObjectLegalHoldOptions represents options specified by user for GetObjectLegalHold call
|
||||
type GetObjectLegalHoldOptions struct {
|
||||
VersionID string
|
||||
}
|
||||
|
||||
// LegalHoldStatus - object legal hold status.
|
||||
type LegalHoldStatus string
|
||||
|
||||
const (
|
||||
// LegalHoldEnabled indicates legal hold is enabled
|
||||
LegalHoldEnabled LegalHoldStatus = "ON"
|
||||
|
||||
// LegalHoldDisabled indicates legal hold is disabled
|
||||
LegalHoldDisabled LegalHoldStatus = "OFF"
|
||||
)
|
||||
|
||||
func (r LegalHoldStatus) String() string {
|
||||
return string(r)
|
||||
}
|
||||
|
||||
// IsValid - check whether this legal hold status is valid or not.
|
||||
func (r LegalHoldStatus) IsValid() bool {
|
||||
return r == LegalHoldEnabled || r == LegalHoldDisabled
|
||||
}
|
||||
|
||||
func newObjectLegalHold(status *LegalHoldStatus) (*objectLegalHold, error) {
|
||||
if status == nil {
|
||||
return nil, fmt.Errorf("Status not set")
|
||||
}
|
||||
if !status.IsValid() {
|
||||
return nil, fmt.Errorf("invalid legal hold status `%v`", status)
|
||||
}
|
||||
legalHold := &objectLegalHold{
|
||||
Status: *status,
|
||||
}
|
||||
return legalHold, nil
|
||||
}
|
||||
|
||||
// PutObjectLegalHold : sets object legal hold for a given object and versionID.
|
||||
func (c Client) PutObjectLegalHold(ctx context.Context, bucketName, objectName string, opts PutObjectLegalHoldOptions) error {
|
||||
// Input validation.
|
||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := s3utils.CheckValidObjectName(objectName); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Get resources properly escaped and lined up before
|
||||
// using them in http request.
|
||||
urlValues := make(url.Values)
|
||||
urlValues.Set("legal-hold", "")
|
||||
|
||||
if opts.VersionID != "" {
|
||||
urlValues.Set("versionId", opts.VersionID)
|
||||
}
|
||||
|
||||
lh, err := newObjectLegalHold(opts.Status)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
lhData, err := xml.Marshal(lh)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
reqMetadata := requestMetadata{
|
||||
bucketName: bucketName,
|
||||
objectName: objectName,
|
||||
queryValues: urlValues,
|
||||
contentBody: bytes.NewReader(lhData),
|
||||
contentLength: int64(len(lhData)),
|
||||
contentMD5Base64: sumMD5Base64(lhData),
|
||||
contentSHA256Hex: sum256Hex(lhData),
|
||||
}
|
||||
|
||||
// Execute PUT Object Legal Hold.
|
||||
resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata)
|
||||
defer closeResponse(resp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if resp != nil {
|
||||
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent {
|
||||
return httpRespToErrorResponse(resp, bucketName, objectName)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetObjectLegalHold gets legal-hold status of given object.
|
||||
func (c Client) GetObjectLegalHold(ctx context.Context, bucketName, objectName string, opts GetObjectLegalHoldOptions) (status *LegalHoldStatus, err error) {
|
||||
// Input validation.
|
||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := s3utils.CheckValidObjectName(objectName); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
urlValues := make(url.Values)
|
||||
urlValues.Set("legal-hold", "")
|
||||
|
||||
if opts.VersionID != "" {
|
||||
urlValues.Set("versionId", opts.VersionID)
|
||||
}
|
||||
|
||||
// Execute GET on bucket to list objects.
|
||||
resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{
|
||||
bucketName: bucketName,
|
||||
objectName: objectName,
|
||||
queryValues: urlValues,
|
||||
contentSHA256Hex: emptySHA256Hex,
|
||||
})
|
||||
defer closeResponse(resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if resp != nil {
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return nil, httpRespToErrorResponse(resp, bucketName, objectName)
|
||||
}
|
||||
}
|
||||
lh := &objectLegalHold{}
|
||||
if err = xml.NewDecoder(resp.Body).Decode(lh); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &lh.Status, nil
|
||||
}
|
241
vendor/github.com/minio/minio-go/v7/api-object-lock.go
generated
vendored
Normal file
241
vendor/github.com/minio/minio-go/v7/api-object-lock.go
generated
vendored
Normal file
@ -0,0 +1,241 @@
|
||||
/*
|
||||
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
|
||||
* Copyright 2019 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package minio
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio-go/v7/pkg/s3utils"
|
||||
)
|
||||
|
||||
// RetentionMode - object retention mode.
|
||||
type RetentionMode string
|
||||
|
||||
const (
|
||||
// Governance - governance mode.
|
||||
Governance RetentionMode = "GOVERNANCE"
|
||||
|
||||
// Compliance - compliance mode.
|
||||
Compliance RetentionMode = "COMPLIANCE"
|
||||
)
|
||||
|
||||
func (r RetentionMode) String() string {
|
||||
return string(r)
|
||||
}
|
||||
|
||||
// IsValid - check whether this retention mode is valid or not.
|
||||
func (r RetentionMode) IsValid() bool {
|
||||
return r == Governance || r == Compliance
|
||||
}
|
||||
|
||||
// ValidityUnit - retention validity unit.
|
||||
type ValidityUnit string
|
||||
|
||||
const (
|
||||
// Days - denotes no. of days.
|
||||
Days ValidityUnit = "DAYS"
|
||||
|
||||
// Years - denotes no. of years.
|
||||
Years ValidityUnit = "YEARS"
|
||||
)
|
||||
|
||||
func (unit ValidityUnit) String() string {
|
||||
return string(unit)
|
||||
}
|
||||
|
||||
// IsValid - check whether this validity unit is valid or not.
|
||||
func (unit ValidityUnit) isValid() bool {
|
||||
return unit == Days || unit == Years
|
||||
}
|
||||
|
||||
// Retention - bucket level retention configuration.
|
||||
type Retention struct {
|
||||
Mode RetentionMode
|
||||
Validity time.Duration
|
||||
}
|
||||
|
||||
func (r Retention) String() string {
|
||||
return fmt.Sprintf("{Mode:%v, Validity:%v}", r.Mode, r.Validity)
|
||||
}
|
||||
|
||||
// IsEmpty - returns whether retention is empty or not.
|
||||
func (r Retention) IsEmpty() bool {
|
||||
return r.Mode == "" || r.Validity == 0
|
||||
}
|
||||
|
||||
// objectLockConfig - object lock configuration specified in
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/Type_API_ObjectLockConfiguration.html
|
||||
type objectLockConfig struct {
|
||||
XMLNS string `xml:"xmlns,attr,omitempty"`
|
||||
XMLName xml.Name `xml:"ObjectLockConfiguration"`
|
||||
ObjectLockEnabled string `xml:"ObjectLockEnabled"`
|
||||
Rule *struct {
|
||||
DefaultRetention struct {
|
||||
Mode RetentionMode `xml:"Mode"`
|
||||
Days *uint `xml:"Days"`
|
||||
Years *uint `xml:"Years"`
|
||||
} `xml:"DefaultRetention"`
|
||||
} `xml:"Rule,omitempty"`
|
||||
}
|
||||
|
||||
func newObjectLockConfig(mode *RetentionMode, validity *uint, unit *ValidityUnit) (*objectLockConfig, error) {
|
||||
config := &objectLockConfig{
|
||||
ObjectLockEnabled: "Enabled",
|
||||
}
|
||||
|
||||
if mode != nil && validity != nil && unit != nil {
|
||||
if !mode.IsValid() {
|
||||
return nil, fmt.Errorf("invalid retention mode `%v`", mode)
|
||||
}
|
||||
|
||||
if !unit.isValid() {
|
||||
return nil, fmt.Errorf("invalid validity unit `%v`", unit)
|
||||
}
|
||||
|
||||
config.Rule = &struct {
|
||||
DefaultRetention struct {
|
||||
Mode RetentionMode `xml:"Mode"`
|
||||
Days *uint `xml:"Days"`
|
||||
Years *uint `xml:"Years"`
|
||||
} `xml:"DefaultRetention"`
|
||||
}{}
|
||||
|
||||
config.Rule.DefaultRetention.Mode = *mode
|
||||
if *unit == Days {
|
||||
config.Rule.DefaultRetention.Days = validity
|
||||
} else {
|
||||
config.Rule.DefaultRetention.Years = validity
|
||||
}
|
||||
|
||||
return config, nil
|
||||
}
|
||||
|
||||
if mode == nil && validity == nil && unit == nil {
|
||||
return config, nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("all of retention mode, validity and validity unit must be passed")
|
||||
}
|
||||
|
||||
// SetBucketObjectLockConfig sets object lock configuration in given bucket. mode, validity and unit are either all set or all nil.
|
||||
func (c Client) SetBucketObjectLockConfig(ctx context.Context, bucketName string, mode *RetentionMode, validity *uint, unit *ValidityUnit) error {
|
||||
// Input validation.
|
||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Get resources properly escaped and lined up before
|
||||
// using them in http request.
|
||||
urlValues := make(url.Values)
|
||||
urlValues.Set("object-lock", "")
|
||||
|
||||
config, err := newObjectLockConfig(mode, validity, unit)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
configData, err := xml.Marshal(config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
reqMetadata := requestMetadata{
|
||||
bucketName: bucketName,
|
||||
queryValues: urlValues,
|
||||
contentBody: bytes.NewReader(configData),
|
||||
contentLength: int64(len(configData)),
|
||||
contentMD5Base64: sumMD5Base64(configData),
|
||||
contentSHA256Hex: sum256Hex(configData),
|
||||
}
|
||||
|
||||
// Execute PUT bucket object lock configuration.
|
||||
resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata)
|
||||
defer closeResponse(resp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if resp != nil {
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return httpRespToErrorResponse(resp, bucketName, "")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetObjectLockConfig gets object lock configuration of given bucket.
|
||||
func (c Client) GetObjectLockConfig(ctx context.Context, bucketName string) (objectLock string, mode *RetentionMode, validity *uint, unit *ValidityUnit, err error) {
|
||||
// Input validation.
|
||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
return "", nil, nil, nil, err
|
||||
}
|
||||
|
||||
urlValues := make(url.Values)
|
||||
urlValues.Set("object-lock", "")
|
||||
|
||||
// Execute GET on bucket to list objects.
|
||||
resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{
|
||||
bucketName: bucketName,
|
||||
queryValues: urlValues,
|
||||
contentSHA256Hex: emptySHA256Hex,
|
||||
})
|
||||
defer closeResponse(resp)
|
||||
if err != nil {
|
||||
return "", nil, nil, nil, err
|
||||
}
|
||||
if resp != nil {
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return "", nil, nil, nil, httpRespToErrorResponse(resp, bucketName, "")
|
||||
}
|
||||
}
|
||||
config := &objectLockConfig{}
|
||||
if err = xml.NewDecoder(resp.Body).Decode(config); err != nil {
|
||||
return "", nil, nil, nil, err
|
||||
}
|
||||
|
||||
if config.Rule != nil {
|
||||
mode = &config.Rule.DefaultRetention.Mode
|
||||
if config.Rule.DefaultRetention.Days != nil {
|
||||
validity = config.Rule.DefaultRetention.Days
|
||||
days := Days
|
||||
unit = &days
|
||||
} else {
|
||||
validity = config.Rule.DefaultRetention.Years
|
||||
years := Years
|
||||
unit = &years
|
||||
}
|
||||
return config.ObjectLockEnabled, mode, validity, unit, nil
|
||||
}
|
||||
return config.ObjectLockEnabled, nil, nil, nil, nil
|
||||
}
|
||||
|
||||
// GetBucketObjectLockConfig gets object lock configuration of given bucket.
|
||||
func (c Client) GetBucketObjectLockConfig(ctx context.Context, bucketName string) (mode *RetentionMode, validity *uint, unit *ValidityUnit, err error) {
|
||||
_, mode, validity, unit, err = c.GetObjectLockConfig(ctx, bucketName)
|
||||
return mode, validity, unit, err
|
||||
}
|
||||
|
||||
// SetObjectLockConfig sets object lock configuration in given bucket. mode, validity and unit are either all set or all nil.
|
||||
func (c Client) SetObjectLockConfig(ctx context.Context, bucketName string, mode *RetentionMode, validity *uint, unit *ValidityUnit) error {
|
||||
return c.SetBucketObjectLockConfig(ctx, bucketName, mode, validity, unit)
|
||||
}
|
165
vendor/github.com/minio/minio-go/v7/api-object-retention.go
generated
vendored
Normal file
165
vendor/github.com/minio/minio-go/v7/api-object-retention.go
generated
vendored
Normal file
@ -0,0 +1,165 @@
|
||||
/*
|
||||
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
|
||||
* Copyright 2019-2020 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package minio
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio-go/v7/pkg/s3utils"
|
||||
)
|
||||
|
||||
// objectRetention - object retention specified in
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/Type_API_ObjectLockConfiguration.html
|
||||
type objectRetention struct {
|
||||
XMLNS string `xml:"xmlns,attr,omitempty"`
|
||||
XMLName xml.Name `xml:"Retention"`
|
||||
Mode RetentionMode `xml:"Mode,omitempty"`
|
||||
RetainUntilDate *time.Time `type:"timestamp" timestampFormat:"iso8601" xml:"RetainUntilDate,omitempty"`
|
||||
}
|
||||
|
||||
func newObjectRetention(mode *RetentionMode, date *time.Time) (*objectRetention, error) {
|
||||
objectRetention := &objectRetention{}
|
||||
|
||||
if date != nil && !date.IsZero() {
|
||||
objectRetention.RetainUntilDate = date
|
||||
}
|
||||
if mode != nil {
|
||||
if !mode.IsValid() {
|
||||
return nil, fmt.Errorf("invalid retention mode `%v`", mode)
|
||||
}
|
||||
objectRetention.Mode = *mode
|
||||
}
|
||||
|
||||
return objectRetention, nil
|
||||
}
|
||||
|
||||
// PutObjectRetentionOptions represents options specified by user for PutObject call
|
||||
type PutObjectRetentionOptions struct {
|
||||
GovernanceBypass bool
|
||||
Mode *RetentionMode
|
||||
RetainUntilDate *time.Time
|
||||
VersionID string
|
||||
}
|
||||
|
||||
// PutObjectRetention sets object retention for a given object and versionID.
|
||||
func (c Client) PutObjectRetention(ctx context.Context, bucketName, objectName string, opts PutObjectRetentionOptions) error {
|
||||
// Input validation.
|
||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := s3utils.CheckValidObjectName(objectName); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Get resources properly escaped and lined up before
|
||||
// using them in http request.
|
||||
urlValues := make(url.Values)
|
||||
urlValues.Set("retention", "")
|
||||
|
||||
if opts.VersionID != "" {
|
||||
urlValues.Set("versionId", opts.VersionID)
|
||||
}
|
||||
|
||||
retention, err := newObjectRetention(opts.Mode, opts.RetainUntilDate)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
retentionData, err := xml.Marshal(retention)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Build headers.
|
||||
headers := make(http.Header)
|
||||
|
||||
if opts.GovernanceBypass {
|
||||
// Set the bypass goverenance retention header
|
||||
headers.Set(amzBypassGovernance, "true")
|
||||
}
|
||||
|
||||
reqMetadata := requestMetadata{
|
||||
bucketName: bucketName,
|
||||
objectName: objectName,
|
||||
queryValues: urlValues,
|
||||
contentBody: bytes.NewReader(retentionData),
|
||||
contentLength: int64(len(retentionData)),
|
||||
contentMD5Base64: sumMD5Base64(retentionData),
|
||||
contentSHA256Hex: sum256Hex(retentionData),
|
||||
customHeader: headers,
|
||||
}
|
||||
|
||||
// Execute PUT Object Retention.
|
||||
resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata)
|
||||
defer closeResponse(resp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if resp != nil {
|
||||
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent {
|
||||
return httpRespToErrorResponse(resp, bucketName, objectName)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetObjectRetention gets retention of given object.
|
||||
func (c Client) GetObjectRetention(ctx context.Context, bucketName, objectName, versionID string) (mode *RetentionMode, retainUntilDate *time.Time, err error) {
|
||||
// Input validation.
|
||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if err := s3utils.CheckValidObjectName(objectName); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
urlValues := make(url.Values)
|
||||
urlValues.Set("retention", "")
|
||||
if versionID != "" {
|
||||
urlValues.Set("versionId", versionID)
|
||||
}
|
||||
// Execute GET on bucket to list objects.
|
||||
resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{
|
||||
bucketName: bucketName,
|
||||
objectName: objectName,
|
||||
queryValues: urlValues,
|
||||
contentSHA256Hex: emptySHA256Hex,
|
||||
})
|
||||
defer closeResponse(resp)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if resp != nil {
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return nil, nil, httpRespToErrorResponse(resp, bucketName, objectName)
|
||||
}
|
||||
}
|
||||
retention := &objectRetention{}
|
||||
if err = xml.NewDecoder(resp.Body).Decode(retention); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return &retention.Mode, retention.RetainUntilDate, nil
|
||||
}
|
157
vendor/github.com/minio/minio-go/v7/api-object-tagging.go
generated
vendored
Normal file
157
vendor/github.com/minio/minio-go/v7/api-object-tagging.go
generated
vendored
Normal file
@ -0,0 +1,157 @@
|
||||
/*
|
||||
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
|
||||
* Copyright 2020 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package minio
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/xml"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
||||
"github.com/minio/minio-go/v7/pkg/s3utils"
|
||||
"github.com/minio/minio-go/v7/pkg/tags"
|
||||
)
|
||||
|
||||
// PutObjectTaggingOptions holds an object version id
|
||||
// to update tag(s) of a specific object version
|
||||
type PutObjectTaggingOptions struct {
|
||||
VersionID string
|
||||
}
|
||||
|
||||
// PutObjectTagging replaces or creates object tag(s) and can target
|
||||
// a specific object version in a versioned bucket.
|
||||
func (c Client) PutObjectTagging(ctx context.Context, bucketName, objectName string, otags *tags.Tags, opts PutObjectTaggingOptions) error {
|
||||
// Input validation.
|
||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Get resources properly escaped and lined up before
|
||||
// using them in http request.
|
||||
urlValues := make(url.Values)
|
||||
urlValues.Set("tagging", "")
|
||||
|
||||
if opts.VersionID != "" {
|
||||
urlValues.Set("versionId", opts.VersionID)
|
||||
}
|
||||
|
||||
reqBytes, err := xml.Marshal(otags)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
reqMetadata := requestMetadata{
|
||||
bucketName: bucketName,
|
||||
objectName: objectName,
|
||||
queryValues: urlValues,
|
||||
contentBody: bytes.NewReader(reqBytes),
|
||||
contentLength: int64(len(reqBytes)),
|
||||
contentMD5Base64: sumMD5Base64(reqBytes),
|
||||
}
|
||||
|
||||
// Execute PUT to set a object tagging.
|
||||
resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata)
|
||||
defer closeResponse(resp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if resp != nil {
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return httpRespToErrorResponse(resp, bucketName, objectName)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetObjectTaggingOptions holds the object version ID
|
||||
// to fetch the tagging key/value pairs
|
||||
type GetObjectTaggingOptions struct {
|
||||
VersionID string
|
||||
}
|
||||
|
||||
// GetObjectTagging fetches object tag(s) with options to target
|
||||
// a specific object version in a versioned bucket.
|
||||
func (c Client) GetObjectTagging(ctx context.Context, bucketName, objectName string, opts GetObjectTaggingOptions) (*tags.Tags, error) {
|
||||
// Get resources properly escaped and lined up before
|
||||
// using them in http request.
|
||||
urlValues := make(url.Values)
|
||||
urlValues.Set("tagging", "")
|
||||
|
||||
if opts.VersionID != "" {
|
||||
urlValues.Set("versionId", opts.VersionID)
|
||||
}
|
||||
|
||||
// Execute GET on object to get object tag(s)
|
||||
resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{
|
||||
bucketName: bucketName,
|
||||
objectName: objectName,
|
||||
queryValues: urlValues,
|
||||
})
|
||||
|
||||
defer closeResponse(resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if resp != nil {
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return nil, httpRespToErrorResponse(resp, bucketName, objectName)
|
||||
}
|
||||
}
|
||||
|
||||
return tags.ParseObjectXML(resp.Body)
|
||||
}
|
||||
|
||||
// RemoveObjectTaggingOptions holds the version id of the object to remove
|
||||
type RemoveObjectTaggingOptions struct {
|
||||
VersionID string
|
||||
}
|
||||
|
||||
// RemoveObjectTagging removes object tag(s) with options to control a specific object
|
||||
// version in a versioned bucket
|
||||
func (c Client) RemoveObjectTagging(ctx context.Context, bucketName, objectName string, opts RemoveObjectTaggingOptions) error {
|
||||
// Get resources properly escaped and lined up before
|
||||
// using them in http request.
|
||||
urlValues := make(url.Values)
|
||||
urlValues.Set("tagging", "")
|
||||
|
||||
if opts.VersionID != "" {
|
||||
urlValues.Set("versionId", opts.VersionID)
|
||||
}
|
||||
|
||||
// Execute DELETE on object to remove object tag(s)
|
||||
resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{
|
||||
bucketName: bucketName,
|
||||
objectName: objectName,
|
||||
queryValues: urlValues,
|
||||
})
|
||||
|
||||
defer closeResponse(resp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if resp != nil {
|
||||
// S3 returns "204 No content" after Object tag deletion.
|
||||
if resp.StatusCode != http.StatusNoContent {
|
||||
return httpRespToErrorResponse(resp, bucketName, objectName)
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
216
vendor/github.com/minio/minio-go/v7/api-presigned.go
generated
vendored
Normal file
216
vendor/github.com/minio/minio-go/v7/api-presigned.go
generated
vendored
Normal file
@ -0,0 +1,216 @@
|
||||
/*
|
||||
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
|
||||
* Copyright 2015-2017 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package minio
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio-go/v7/pkg/s3utils"
|
||||
"github.com/minio/minio-go/v7/pkg/signer"
|
||||
)
|
||||
|
||||
// presignURL - Returns a presigned URL for an input 'method'.
|
||||
// Expires maximum is 7days - ie. 604800 and minimum is 1.
|
||||
func (c Client) presignURL(ctx context.Context, method string, bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) {
|
||||
// Input validation.
|
||||
if method == "" {
|
||||
return nil, errInvalidArgument("method cannot be empty.")
|
||||
}
|
||||
if err = s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err = isValidExpiry(expires); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Convert expires into seconds.
|
||||
expireSeconds := int64(expires / time.Second)
|
||||
reqMetadata := requestMetadata{
|
||||
presignURL: true,
|
||||
bucketName: bucketName,
|
||||
objectName: objectName,
|
||||
expires: expireSeconds,
|
||||
queryValues: reqParams,
|
||||
}
|
||||
|
||||
// Instantiate a new request.
|
||||
// Since expires is set newRequest will presign the request.
|
||||
var req *http.Request
|
||||
if req, err = c.newRequest(ctx, method, reqMetadata); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return req.URL, nil
|
||||
}
|
||||
|
||||
// PresignedGetObject - Returns a presigned URL to access an object
|
||||
// data without credentials. URL can have a maximum expiry of
|
||||
// upto 7days or a minimum of 1sec. Additionally you can override
|
||||
// a set of response headers using the query parameters.
|
||||
func (c Client) PresignedGetObject(ctx context.Context, bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) {
|
||||
if err = s3utils.CheckValidObjectName(objectName); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return c.presignURL(ctx, http.MethodGet, bucketName, objectName, expires, reqParams)
|
||||
}
|
||||
|
||||
// PresignedHeadObject - Returns a presigned URL to access
|
||||
// object metadata without credentials. URL can have a maximum expiry
|
||||
// of upto 7days or a minimum of 1sec. Additionally you can override
|
||||
// a set of response headers using the query parameters.
|
||||
func (c Client) PresignedHeadObject(ctx context.Context, bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) {
|
||||
if err = s3utils.CheckValidObjectName(objectName); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return c.presignURL(ctx, http.MethodHead, bucketName, objectName, expires, reqParams)
|
||||
}
|
||||
|
||||
// PresignedPutObject - Returns a presigned URL to upload an object
|
||||
// without credentials. URL can have a maximum expiry of upto 7days
|
||||
// or a minimum of 1sec.
|
||||
func (c Client) PresignedPutObject(ctx context.Context, bucketName string, objectName string, expires time.Duration) (u *url.URL, err error) {
|
||||
if err = s3utils.CheckValidObjectName(objectName); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return c.presignURL(ctx, http.MethodPut, bucketName, objectName, expires, nil)
|
||||
}
|
||||
|
||||
// Presign - returns a presigned URL for any http method of your choice
|
||||
// along with custom request params. URL can have a maximum expiry of
|
||||
// upto 7days or a minimum of 1sec.
|
||||
func (c Client) Presign(ctx context.Context, method string, bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) {
|
||||
return c.presignURL(ctx, method, bucketName, objectName, expires, reqParams)
|
||||
}
|
||||
|
||||
// PresignedPostPolicy - Returns POST urlString, form data to upload an object.
|
||||
func (c Client) PresignedPostPolicy(ctx context.Context, p *PostPolicy) (u *url.URL, formData map[string]string, err error) {
|
||||
// Validate input arguments.
|
||||
if p.expiration.IsZero() {
|
||||
return nil, nil, errors.New("Expiration time must be specified")
|
||||
}
|
||||
if _, ok := p.formData["key"]; !ok {
|
||||
return nil, nil, errors.New("object key must be specified")
|
||||
}
|
||||
if _, ok := p.formData["bucket"]; !ok {
|
||||
return nil, nil, errors.New("bucket name must be specified")
|
||||
}
|
||||
|
||||
bucketName := p.formData["bucket"]
|
||||
// Fetch the bucket location.
|
||||
location, err := c.getBucketLocation(ctx, bucketName)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
isVirtualHost := c.isVirtualHostStyleRequest(*c.endpointURL, bucketName)
|
||||
|
||||
u, err = c.makeTargetURL(bucketName, "", location, isVirtualHost, nil)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// Get credentials from the configured credentials provider.
|
||||
credValues, err := c.credsProvider.Get()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
var (
|
||||
signerType = credValues.SignerType
|
||||
sessionToken = credValues.SessionToken
|
||||
accessKeyID = credValues.AccessKeyID
|
||||
secretAccessKey = credValues.SecretAccessKey
|
||||
)
|
||||
|
||||
if signerType.IsAnonymous() {
|
||||
return nil, nil, errInvalidArgument("Presigned operations are not supported for anonymous credentials")
|
||||
}
|
||||
|
||||
// Keep time.
|
||||
t := time.Now().UTC()
|
||||
// For signature version '2' handle here.
|
||||
if signerType.IsV2() {
|
||||
policyBase64 := p.base64()
|
||||
p.formData["policy"] = policyBase64
|
||||
// For Google endpoint set this value to be 'GoogleAccessId'.
|
||||
if s3utils.IsGoogleEndpoint(*c.endpointURL) {
|
||||
p.formData["GoogleAccessId"] = accessKeyID
|
||||
} else {
|
||||
// For all other endpoints set this value to be 'AWSAccessKeyId'.
|
||||
p.formData["AWSAccessKeyId"] = accessKeyID
|
||||
}
|
||||
// Sign the policy.
|
||||
p.formData["signature"] = signer.PostPresignSignatureV2(policyBase64, secretAccessKey)
|
||||
return u, p.formData, nil
|
||||
}
|
||||
|
||||
// Add date policy.
|
||||
if err = p.addNewPolicy(policyCondition{
|
||||
matchType: "eq",
|
||||
condition: "$x-amz-date",
|
||||
value: t.Format(iso8601DateFormat),
|
||||
}); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// Add algorithm policy.
|
||||
if err = p.addNewPolicy(policyCondition{
|
||||
matchType: "eq",
|
||||
condition: "$x-amz-algorithm",
|
||||
value: signV4Algorithm,
|
||||
}); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// Add a credential policy.
|
||||
credential := signer.GetCredential(accessKeyID, location, t, signer.ServiceTypeS3)
|
||||
if err = p.addNewPolicy(policyCondition{
|
||||
matchType: "eq",
|
||||
condition: "$x-amz-credential",
|
||||
value: credential,
|
||||
}); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if sessionToken != "" {
|
||||
if err = p.addNewPolicy(policyCondition{
|
||||
matchType: "eq",
|
||||
condition: "$x-amz-security-token",
|
||||
value: sessionToken,
|
||||
}); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Get base64 encoded policy.
|
||||
policyBase64 := p.base64()
|
||||
|
||||
// Fill in the form data.
|
||||
p.formData["policy"] = policyBase64
|
||||
p.formData["x-amz-algorithm"] = signV4Algorithm
|
||||
p.formData["x-amz-credential"] = credential
|
||||
p.formData["x-amz-date"] = t.Format(iso8601DateFormat)
|
||||
if sessionToken != "" {
|
||||
p.formData["x-amz-security-token"] = sessionToken
|
||||
}
|
||||
p.formData["x-amz-signature"] = signer.PostPresignSignatureV4(policyBase64, t, secretAccessKey, location)
|
||||
return u, p.formData, nil
|
||||
}
|
123
vendor/github.com/minio/minio-go/v7/api-put-bucket.go
generated
vendored
Normal file
123
vendor/github.com/minio/minio-go/v7/api-put-bucket.go
generated
vendored
Normal file
@ -0,0 +1,123 @@
|
||||
/*
|
||||
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
|
||||
* Copyright 2015-2020 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package minio
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/xml"
|
||||
"net/http"
|
||||
|
||||
"github.com/minio/minio-go/v7/pkg/s3utils"
|
||||
)
|
||||
|
||||
/// Bucket operations
|
||||
func (c Client) makeBucket(ctx context.Context, bucketName string, opts MakeBucketOptions) (err error) {
|
||||
// Validate the input arguments.
|
||||
if err := s3utils.CheckValidBucketNameStrict(bucketName); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = c.doMakeBucket(ctx, bucketName, opts.Region, opts.ObjectLocking)
|
||||
if err != nil && (opts.Region == "" || opts.Region == "us-east-1") {
|
||||
if resp, ok := err.(ErrorResponse); ok && resp.Code == "AuthorizationHeaderMalformed" && resp.Region != "" {
|
||||
err = c.doMakeBucket(ctx, bucketName, resp.Region, opts.ObjectLocking)
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (c Client) doMakeBucket(ctx context.Context, bucketName string, location string, objectLockEnabled bool) (err error) {
|
||||
defer func() {
|
||||
// Save the location into cache on a successful makeBucket response.
|
||||
if err == nil {
|
||||
c.bucketLocCache.Set(bucketName, location)
|
||||
}
|
||||
}()
|
||||
|
||||
// If location is empty, treat is a default region 'us-east-1'.
|
||||
if location == "" {
|
||||
location = "us-east-1"
|
||||
// For custom region clients, default
|
||||
// to custom region instead not 'us-east-1'.
|
||||
if c.region != "" {
|
||||
location = c.region
|
||||
}
|
||||
}
|
||||
// PUT bucket request metadata.
|
||||
reqMetadata := requestMetadata{
|
||||
bucketName: bucketName,
|
||||
bucketLocation: location,
|
||||
}
|
||||
|
||||
if objectLockEnabled {
|
||||
headers := make(http.Header)
|
||||
headers.Add("x-amz-bucket-object-lock-enabled", "true")
|
||||
reqMetadata.customHeader = headers
|
||||
}
|
||||
|
||||
// If location is not 'us-east-1' create bucket location config.
|
||||
if location != "us-east-1" && location != "" {
|
||||
createBucketConfig := createBucketConfiguration{}
|
||||
createBucketConfig.Location = location
|
||||
var createBucketConfigBytes []byte
|
||||
createBucketConfigBytes, err = xml.Marshal(createBucketConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
reqMetadata.contentMD5Base64 = sumMD5Base64(createBucketConfigBytes)
|
||||
reqMetadata.contentSHA256Hex = sum256Hex(createBucketConfigBytes)
|
||||
reqMetadata.contentBody = bytes.NewReader(createBucketConfigBytes)
|
||||
reqMetadata.contentLength = int64(len(createBucketConfigBytes))
|
||||
}
|
||||
|
||||
// Execute PUT to create a new bucket.
|
||||
resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata)
|
||||
defer closeResponse(resp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if resp != nil {
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return httpRespToErrorResponse(resp, bucketName, "")
|
||||
}
|
||||
}
|
||||
|
||||
// Success.
|
||||
return nil
|
||||
}
|
||||
|
||||
// MakeBucketOptions holds all options to tweak bucket creation
|
||||
type MakeBucketOptions struct {
|
||||
// Bucket location
|
||||
Region string
|
||||
// Enable object locking
|
||||
ObjectLocking bool
|
||||
}
|
||||
|
||||
// MakeBucket creates a new bucket with bucketName with a context to control cancellations and timeouts.
|
||||
//
|
||||
// Location is an optional argument, by default all buckets are
|
||||
// created in US Standard Region.
|
||||
//
|
||||
// For Amazon S3 for more supported regions - http://docs.aws.amazon.com/general/latest/gr/rande.html
|
||||
// For Google Cloud Storage for more supported regions - https://cloud.google.com/storage/docs/bucket-locations
|
||||
func (c Client) MakeBucket(ctx context.Context, bucketName string, opts MakeBucketOptions) (err error) {
|
||||
return c.makeBucket(ctx, bucketName, opts)
|
||||
}
|
148
vendor/github.com/minio/minio-go/v7/api-put-object-common.go
generated
vendored
Normal file
148
vendor/github.com/minio/minio-go/v7/api-put-object-common.go
generated
vendored
Normal file
@ -0,0 +1,148 @@
|
||||
/*
|
||||
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
|
||||
* Copyright 2015-2017 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package minio
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"math"
|
||||
"os"
|
||||
|
||||
"github.com/minio/minio-go/v7/pkg/s3utils"
|
||||
)
|
||||
|
||||
// Verify if reader is *minio.Object
|
||||
func isObject(reader io.Reader) (ok bool) {
|
||||
_, ok = reader.(*Object)
|
||||
return
|
||||
}
|
||||
|
||||
// Verify if reader is a generic ReaderAt
|
||||
func isReadAt(reader io.Reader) (ok bool) {
|
||||
var v *os.File
|
||||
v, ok = reader.(*os.File)
|
||||
if ok {
|
||||
// Stdin, Stdout and Stderr all have *os.File type
|
||||
// which happen to also be io.ReaderAt compatible
|
||||
// we need to add special conditions for them to
|
||||
// be ignored by this function.
|
||||
for _, f := range []string{
|
||||
"/dev/stdin",
|
||||
"/dev/stdout",
|
||||
"/dev/stderr",
|
||||
} {
|
||||
if f == v.Name() {
|
||||
ok = false
|
||||
break
|
||||
}
|
||||
}
|
||||
} else {
|
||||
_, ok = reader.(io.ReaderAt)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// optimalPartInfo - calculate the optimal part info for a given
|
||||
// object size.
|
||||
//
|
||||
// NOTE: Assumption here is that for any object to be uploaded to any S3 compatible
|
||||
// object storage it will have the following parameters as constants.
|
||||
//
|
||||
// maxPartsCount - 10000
|
||||
// minPartSize - 128MiB
|
||||
// maxMultipartPutObjectSize - 5TiB
|
||||
//
|
||||
func optimalPartInfo(objectSize int64, configuredPartSize uint64) (totalPartsCount int, partSize int64, lastPartSize int64, err error) {
|
||||
// object size is '-1' set it to 5TiB.
|
||||
var unknownSize bool
|
||||
if objectSize == -1 {
|
||||
unknownSize = true
|
||||
objectSize = maxMultipartPutObjectSize
|
||||
}
|
||||
|
||||
// object size is larger than supported maximum.
|
||||
if objectSize > maxMultipartPutObjectSize {
|
||||
err = errEntityTooLarge(objectSize, maxMultipartPutObjectSize, "", "")
|
||||
return
|
||||
}
|
||||
|
||||
var partSizeFlt float64
|
||||
if configuredPartSize > 0 {
|
||||
if int64(configuredPartSize) > objectSize {
|
||||
err = errEntityTooLarge(int64(configuredPartSize), objectSize, "", "")
|
||||
return
|
||||
}
|
||||
|
||||
if !unknownSize {
|
||||
if objectSize > (int64(configuredPartSize) * maxPartsCount) {
|
||||
err = errInvalidArgument("Part size * max_parts(10000) is lesser than input objectSize.")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if configuredPartSize < absMinPartSize {
|
||||
err = errInvalidArgument("Input part size is smaller than allowed minimum of 5MiB.")
|
||||
return
|
||||
}
|
||||
|
||||
if configuredPartSize > maxPartSize {
|
||||
err = errInvalidArgument("Input part size is bigger than allowed maximum of 5GiB.")
|
||||
return
|
||||
}
|
||||
|
||||
partSizeFlt = float64(configuredPartSize)
|
||||
if unknownSize {
|
||||
// If input has unknown size and part size is configured
|
||||
// keep it to maximum allowed as per 10000 parts.
|
||||
objectSize = int64(configuredPartSize) * maxPartsCount
|
||||
}
|
||||
} else {
|
||||
configuredPartSize = minPartSize
|
||||
// Use floats for part size for all calculations to avoid
|
||||
// overflows during float64 to int64 conversions.
|
||||
partSizeFlt = float64(objectSize / maxPartsCount)
|
||||
partSizeFlt = math.Ceil(partSizeFlt/float64(configuredPartSize)) * float64(configuredPartSize)
|
||||
}
|
||||
|
||||
// Total parts count.
|
||||
totalPartsCount = int(math.Ceil(float64(objectSize) / partSizeFlt))
|
||||
// Part size.
|
||||
partSize = int64(partSizeFlt)
|
||||
// Last part size.
|
||||
lastPartSize = objectSize - int64(totalPartsCount-1)*partSize
|
||||
return totalPartsCount, partSize, lastPartSize, nil
|
||||
}
|
||||
|
||||
// getUploadID - fetch upload id if already present for an object name
|
||||
// or initiate a new request to fetch a new upload id.
|
||||
func (c Client) newUploadID(ctx context.Context, bucketName, objectName string, opts PutObjectOptions) (uploadID string, err error) {
|
||||
// Input validation.
|
||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
return "", err
|
||||
}
|
||||
if err := s3utils.CheckValidObjectName(objectName); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Initiate multipart upload for an object.
|
||||
initMultipartUploadResult, err := c.initiateMultipartUpload(ctx, bucketName, objectName, opts)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return initMultipartUploadResult.UploadID, nil
|
||||
}
|
77
vendor/github.com/minio/minio-go/v7/api-put-object-copy.go
generated
vendored
Normal file
77
vendor/github.com/minio/minio-go/v7/api-put-object-copy.go
generated
vendored
Normal file
@ -0,0 +1,77 @@
|
||||
/*
|
||||
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
|
||||
* Copyright 2017, 2018 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package minio
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// CopyObject - copy a source object into a new object
|
||||
func (c Client) CopyObject(ctx context.Context, dst CopyDestOptions, src CopySrcOptions) (UploadInfo, error) {
|
||||
if err := src.validate(); err != nil {
|
||||
return UploadInfo{}, err
|
||||
}
|
||||
|
||||
if err := dst.validate(); err != nil {
|
||||
return UploadInfo{}, err
|
||||
}
|
||||
|
||||
header := make(http.Header)
|
||||
dst.Marshal(header)
|
||||
src.Marshal(header)
|
||||
|
||||
resp, err := c.executeMethod(ctx, http.MethodPut, requestMetadata{
|
||||
bucketName: dst.Bucket,
|
||||
objectName: dst.Object,
|
||||
customHeader: header,
|
||||
})
|
||||
if err != nil {
|
||||
return UploadInfo{}, err
|
||||
}
|
||||
defer closeResponse(resp)
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return UploadInfo{}, httpRespToErrorResponse(resp, dst.Bucket, dst.Object)
|
||||
}
|
||||
|
||||
// Update the progress properly after successful copy.
|
||||
if dst.Progress != nil {
|
||||
io.Copy(ioutil.Discard, io.LimitReader(dst.Progress, dst.Size))
|
||||
}
|
||||
|
||||
cpObjRes := copyObjectResult{}
|
||||
if err = xmlDecoder(resp.Body, &cpObjRes); err != nil {
|
||||
return UploadInfo{}, err
|
||||
}
|
||||
|
||||
// extract lifecycle expiry date and rule ID
|
||||
expTime, ruleID := amzExpirationToExpiryDateRuleID(resp.Header.Get(amzExpiration))
|
||||
|
||||
return UploadInfo{
|
||||
Bucket: dst.Bucket,
|
||||
Key: dst.Object,
|
||||
LastModified: cpObjRes.LastModified,
|
||||
ETag: trimEtag(resp.Header.Get("ETag")),
|
||||
VersionID: resp.Header.Get(amzVersionID),
|
||||
Expiration: expTime,
|
||||
ExpirationRuleID: ruleID,
|
||||
}, nil
|
||||
}
|
64
vendor/github.com/minio/minio-go/v7/api-put-object-file-context.go
generated
vendored
Normal file
64
vendor/github.com/minio/minio-go/v7/api-put-object-file-context.go
generated
vendored
Normal file
@ -0,0 +1,64 @@
|
||||
/*
|
||||
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
|
||||
* Copyright 2017 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package minio
|
||||
|
||||
import (
|
||||
"context"
|
||||
"mime"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/minio/minio-go/v7/pkg/s3utils"
|
||||
)
|
||||
|
||||
// FPutObject - Create an object in a bucket, with contents from file at filePath. Allows request cancellation.
|
||||
func (c Client) FPutObject(ctx context.Context, bucketName, objectName, filePath string, opts PutObjectOptions) (info UploadInfo, err error) {
|
||||
// Input validation.
|
||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
return UploadInfo{}, err
|
||||
}
|
||||
if err := s3utils.CheckValidObjectName(objectName); err != nil {
|
||||
return UploadInfo{}, err
|
||||
}
|
||||
|
||||
// Open the referenced file.
|
||||
fileReader, err := os.Open(filePath)
|
||||
// If any error fail quickly here.
|
||||
if err != nil {
|
||||
return UploadInfo{}, err
|
||||
}
|
||||
defer fileReader.Close()
|
||||
|
||||
// Save the file stat.
|
||||
fileStat, err := fileReader.Stat()
|
||||
if err != nil {
|
||||
return UploadInfo{}, err
|
||||
}
|
||||
|
||||
// Save the file size.
|
||||
fileSize := fileStat.Size()
|
||||
|
||||
// Set contentType based on filepath extension if not given or default
|
||||
// value of "application/octet-stream" if the extension has no associated type.
|
||||
if opts.ContentType == "" {
|
||||
if opts.ContentType = mime.TypeByExtension(filepath.Ext(filePath)); opts.ContentType == "" {
|
||||
opts.ContentType = "application/octet-stream"
|
||||
}
|
||||
}
|
||||
return c.PutObject(ctx, bucketName, objectName, fileReader, fileSize, opts)
|
||||
}
|
385
vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go
generated
vendored
Normal file
385
vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go
generated
vendored
Normal file
@ -0,0 +1,385 @@
|
||||
/*
|
||||
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
|
||||
* Copyright 2015-2017 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package minio
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/minio/minio-go/v7/pkg/encrypt"
|
||||
"github.com/minio/minio-go/v7/pkg/s3utils"
|
||||
)
|
||||
|
||||
func (c Client) putObjectMultipart(ctx context.Context, bucketName, objectName string, reader io.Reader, size int64,
|
||||
opts PutObjectOptions) (info UploadInfo, err error) {
|
||||
info, err = c.putObjectMultipartNoStream(ctx, bucketName, objectName, reader, opts)
|
||||
if err != nil {
|
||||
errResp := ToErrorResponse(err)
|
||||
// Verify if multipart functionality is not available, if not
|
||||
// fall back to single PutObject operation.
|
||||
if errResp.Code == "AccessDenied" && strings.Contains(errResp.Message, "Access Denied") {
|
||||
// Verify if size of reader is greater than '5GiB'.
|
||||
if size > maxSinglePutObjectSize {
|
||||
return UploadInfo{}, errEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName)
|
||||
}
|
||||
// Fall back to uploading as single PutObject operation.
|
||||
return c.putObject(ctx, bucketName, objectName, reader, size, opts)
|
||||
}
|
||||
}
|
||||
return info, err
|
||||
}
|
||||
|
||||
func (c Client) putObjectMultipartNoStream(ctx context.Context, bucketName, objectName string, reader io.Reader, opts PutObjectOptions) (info UploadInfo, err error) {
|
||||
// Input validation.
|
||||
if err = s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
return UploadInfo{}, err
|
||||
}
|
||||
if err = s3utils.CheckValidObjectName(objectName); err != nil {
|
||||
return UploadInfo{}, err
|
||||
}
|
||||
|
||||
// Total data read and written to server. should be equal to
|
||||
// 'size' at the end of the call.
|
||||
var totalUploadedSize int64
|
||||
|
||||
// Complete multipart upload.
|
||||
var complMultipartUpload completeMultipartUpload
|
||||
|
||||
// Calculate the optimal parts info for a given size.
|
||||
totalPartsCount, partSize, _, err := optimalPartInfo(-1, opts.PartSize)
|
||||
if err != nil {
|
||||
return UploadInfo{}, err
|
||||
}
|
||||
|
||||
// Initiate a new multipart upload.
|
||||
uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts)
|
||||
if err != nil {
|
||||
return UploadInfo{}, err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err != nil {
|
||||
c.abortMultipartUpload(ctx, bucketName, objectName, uploadID)
|
||||
}
|
||||
}()
|
||||
|
||||
// Part number always starts with '1'.
|
||||
partNumber := 1
|
||||
|
||||
// Initialize parts uploaded map.
|
||||
partsInfo := make(map[int]ObjectPart)
|
||||
|
||||
// Create a buffer.
|
||||
buf := make([]byte, partSize)
|
||||
|
||||
for partNumber <= totalPartsCount {
|
||||
// Choose hash algorithms to be calculated by hashCopyN,
|
||||
// avoid sha256 with non-v4 signature request or
|
||||
// HTTPS connection.
|
||||
hashAlgos, hashSums := c.hashMaterials(opts.SendContentMd5)
|
||||
|
||||
length, rErr := readFull(reader, buf)
|
||||
if rErr == io.EOF && partNumber > 1 {
|
||||
break
|
||||
}
|
||||
|
||||
if rErr != nil && rErr != io.ErrUnexpectedEOF && rErr != io.EOF {
|
||||
return UploadInfo{}, rErr
|
||||
}
|
||||
|
||||
// Calculates hash sums while copying partSize bytes into cw.
|
||||
for k, v := range hashAlgos {
|
||||
v.Write(buf[:length])
|
||||
hashSums[k] = v.Sum(nil)
|
||||
v.Close()
|
||||
}
|
||||
|
||||
// Update progress reader appropriately to the latest offset
|
||||
// as we read from the source.
|
||||
rd := newHook(bytes.NewReader(buf[:length]), opts.Progress)
|
||||
|
||||
// Checksums..
|
||||
var (
|
||||
md5Base64 string
|
||||
sha256Hex string
|
||||
)
|
||||
if hashSums["md5"] != nil {
|
||||
md5Base64 = base64.StdEncoding.EncodeToString(hashSums["md5"])
|
||||
}
|
||||
if hashSums["sha256"] != nil {
|
||||
sha256Hex = hex.EncodeToString(hashSums["sha256"])
|
||||
}
|
||||
|
||||
// Proceed to upload the part.
|
||||
objPart, uerr := c.uploadPart(ctx, bucketName, objectName, uploadID, rd, partNumber,
|
||||
md5Base64, sha256Hex, int64(length), opts.ServerSideEncryption)
|
||||
if uerr != nil {
|
||||
return UploadInfo{}, uerr
|
||||
}
|
||||
|
||||
// Save successfully uploaded part metadata.
|
||||
partsInfo[partNumber] = objPart
|
||||
|
||||
// Save successfully uploaded size.
|
||||
totalUploadedSize += int64(length)
|
||||
|
||||
// Increment part number.
|
||||
partNumber++
|
||||
|
||||
// For unknown size, Read EOF we break away.
|
||||
// We do not have to upload till totalPartsCount.
|
||||
if rErr == io.EOF {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Loop over total uploaded parts to save them in
|
||||
// Parts array before completing the multipart request.
|
||||
for i := 1; i < partNumber; i++ {
|
||||
part, ok := partsInfo[i]
|
||||
if !ok {
|
||||
return UploadInfo{}, errInvalidArgument(fmt.Sprintf("Missing part number %d", i))
|
||||
}
|
||||
complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{
|
||||
ETag: part.ETag,
|
||||
PartNumber: part.PartNumber,
|
||||
})
|
||||
}
|
||||
|
||||
// Sort all completed parts.
|
||||
sort.Sort(completedParts(complMultipartUpload.Parts))
|
||||
|
||||
uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload)
|
||||
if err != nil {
|
||||
return UploadInfo{}, err
|
||||
}
|
||||
|
||||
uploadInfo.Size = totalUploadedSize
|
||||
return uploadInfo, nil
|
||||
}
|
||||
|
||||
// initiateMultipartUpload - Initiates a multipart upload and returns an upload ID.
|
||||
func (c Client) initiateMultipartUpload(ctx context.Context, bucketName, objectName string, opts PutObjectOptions) (initiateMultipartUploadResult, error) {
|
||||
// Input validation.
|
||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
return initiateMultipartUploadResult{}, err
|
||||
}
|
||||
if err := s3utils.CheckValidObjectName(objectName); err != nil {
|
||||
return initiateMultipartUploadResult{}, err
|
||||
}
|
||||
|
||||
// Initialize url queries.
|
||||
urlValues := make(url.Values)
|
||||
urlValues.Set("uploads", "")
|
||||
|
||||
// Set ContentType header.
|
||||
customHeader := opts.Header()
|
||||
|
||||
reqMetadata := requestMetadata{
|
||||
bucketName: bucketName,
|
||||
objectName: objectName,
|
||||
queryValues: urlValues,
|
||||
customHeader: customHeader,
|
||||
}
|
||||
|
||||
// Execute POST on an objectName to initiate multipart upload.
|
||||
resp, err := c.executeMethod(ctx, http.MethodPost, reqMetadata)
|
||||
defer closeResponse(resp)
|
||||
if err != nil {
|
||||
return initiateMultipartUploadResult{}, err
|
||||
}
|
||||
if resp != nil {
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return initiateMultipartUploadResult{}, httpRespToErrorResponse(resp, bucketName, objectName)
|
||||
}
|
||||
}
|
||||
// Decode xml for new multipart upload.
|
||||
initiateMultipartUploadResult := initiateMultipartUploadResult{}
|
||||
err = xmlDecoder(resp.Body, &initiateMultipartUploadResult)
|
||||
if err != nil {
|
||||
return initiateMultipartUploadResult, err
|
||||
}
|
||||
return initiateMultipartUploadResult, nil
|
||||
}
|
||||
|
||||
// uploadPart - Uploads a part in a multipart upload.
|
||||
func (c Client) uploadPart(ctx context.Context, bucketName, objectName, uploadID string, reader io.Reader,
|
||||
partNumber int, md5Base64, sha256Hex string, size int64, sse encrypt.ServerSide) (ObjectPart, error) {
|
||||
// Input validation.
|
||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
return ObjectPart{}, err
|
||||
}
|
||||
if err := s3utils.CheckValidObjectName(objectName); err != nil {
|
||||
return ObjectPart{}, err
|
||||
}
|
||||
if size > maxPartSize {
|
||||
return ObjectPart{}, errEntityTooLarge(size, maxPartSize, bucketName, objectName)
|
||||
}
|
||||
if size <= -1 {
|
||||
return ObjectPart{}, errEntityTooSmall(size, bucketName, objectName)
|
||||
}
|
||||
if partNumber <= 0 {
|
||||
return ObjectPart{}, errInvalidArgument("Part number cannot be negative or equal to zero.")
|
||||
}
|
||||
if uploadID == "" {
|
||||
return ObjectPart{}, errInvalidArgument("UploadID cannot be empty.")
|
||||
}
|
||||
|
||||
// Get resources properly escaped and lined up before using them in http request.
|
||||
urlValues := make(url.Values)
|
||||
// Set part number.
|
||||
urlValues.Set("partNumber", strconv.Itoa(partNumber))
|
||||
// Set upload id.
|
||||
urlValues.Set("uploadId", uploadID)
|
||||
|
||||
// Set encryption headers, if any.
|
||||
customHeader := make(http.Header)
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadUploadPart.html
|
||||
// Server-side encryption is supported by the S3 Multipart Upload actions.
|
||||
// Unless you are using a customer-provided encryption key, you don't need
|
||||
// to specify the encryption parameters in each UploadPart request.
|
||||
if sse != nil && sse.Type() == encrypt.SSEC {
|
||||
sse.Marshal(customHeader)
|
||||
}
|
||||
|
||||
reqMetadata := requestMetadata{
|
||||
bucketName: bucketName,
|
||||
objectName: objectName,
|
||||
queryValues: urlValues,
|
||||
customHeader: customHeader,
|
||||
contentBody: reader,
|
||||
contentLength: size,
|
||||
contentMD5Base64: md5Base64,
|
||||
contentSHA256Hex: sha256Hex,
|
||||
}
|
||||
|
||||
// Execute PUT on each part.
|
||||
resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata)
|
||||
defer closeResponse(resp)
|
||||
if err != nil {
|
||||
return ObjectPart{}, err
|
||||
}
|
||||
if resp != nil {
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return ObjectPart{}, httpRespToErrorResponse(resp, bucketName, objectName)
|
||||
}
|
||||
}
|
||||
// Once successfully uploaded, return completed part.
|
||||
objPart := ObjectPart{}
|
||||
objPart.Size = size
|
||||
objPart.PartNumber = partNumber
|
||||
// Trim off the odd double quotes from ETag in the beginning and end.
|
||||
objPart.ETag = trimEtag(resp.Header.Get("ETag"))
|
||||
return objPart, nil
|
||||
}
|
||||
|
||||
// completeMultipartUpload - Completes a multipart upload by assembling previously uploaded parts.
|
||||
func (c Client) completeMultipartUpload(ctx context.Context, bucketName, objectName, uploadID string,
|
||||
complete completeMultipartUpload) (UploadInfo, error) {
|
||||
// Input validation.
|
||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
return UploadInfo{}, err
|
||||
}
|
||||
if err := s3utils.CheckValidObjectName(objectName); err != nil {
|
||||
return UploadInfo{}, err
|
||||
}
|
||||
|
||||
// Initialize url queries.
|
||||
urlValues := make(url.Values)
|
||||
urlValues.Set("uploadId", uploadID)
|
||||
// Marshal complete multipart body.
|
||||
completeMultipartUploadBytes, err := xml.Marshal(complete)
|
||||
if err != nil {
|
||||
return UploadInfo{}, err
|
||||
}
|
||||
|
||||
// Instantiate all the complete multipart buffer.
|
||||
completeMultipartUploadBuffer := bytes.NewReader(completeMultipartUploadBytes)
|
||||
reqMetadata := requestMetadata{
|
||||
bucketName: bucketName,
|
||||
objectName: objectName,
|
||||
queryValues: urlValues,
|
||||
contentBody: completeMultipartUploadBuffer,
|
||||
contentLength: int64(len(completeMultipartUploadBytes)),
|
||||
contentSHA256Hex: sum256Hex(completeMultipartUploadBytes),
|
||||
}
|
||||
|
||||
// Execute POST to complete multipart upload for an objectName.
|
||||
resp, err := c.executeMethod(ctx, http.MethodPost, reqMetadata)
|
||||
defer closeResponse(resp)
|
||||
if err != nil {
|
||||
return UploadInfo{}, err
|
||||
}
|
||||
if resp != nil {
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return UploadInfo{}, httpRespToErrorResponse(resp, bucketName, objectName)
|
||||
}
|
||||
}
|
||||
|
||||
// Read resp.Body into a []bytes to parse for Error response inside the body
|
||||
var b []byte
|
||||
b, err = ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return UploadInfo{}, err
|
||||
}
|
||||
// Decode completed multipart upload response on success.
|
||||
completeMultipartUploadResult := completeMultipartUploadResult{}
|
||||
err = xmlDecoder(bytes.NewReader(b), &completeMultipartUploadResult)
|
||||
if err != nil {
|
||||
// xml parsing failure due to presence an ill-formed xml fragment
|
||||
return UploadInfo{}, err
|
||||
} else if completeMultipartUploadResult.Bucket == "" {
|
||||
// xml's Decode method ignores well-formed xml that don't apply to the type of value supplied.
|
||||
// In this case, it would leave completeMultipartUploadResult with the corresponding zero-values
|
||||
// of the members.
|
||||
|
||||
// Decode completed multipart upload response on failure
|
||||
completeMultipartUploadErr := ErrorResponse{}
|
||||
err = xmlDecoder(bytes.NewReader(b), &completeMultipartUploadErr)
|
||||
if err != nil {
|
||||
// xml parsing failure due to presence an ill-formed xml fragment
|
||||
return UploadInfo{}, err
|
||||
}
|
||||
return UploadInfo{}, completeMultipartUploadErr
|
||||
}
|
||||
|
||||
// extract lifecycle expiry date and rule ID
|
||||
expTime, ruleID := amzExpirationToExpiryDateRuleID(resp.Header.Get(amzExpiration))
|
||||
|
||||
return UploadInfo{
|
||||
Bucket: completeMultipartUploadResult.Bucket,
|
||||
Key: completeMultipartUploadResult.Key,
|
||||
ETag: trimEtag(completeMultipartUploadResult.ETag),
|
||||
VersionID: resp.Header.Get(amzVersionID),
|
||||
Location: completeMultipartUploadResult.Location,
|
||||
Expiration: expTime,
|
||||
ExpirationRuleID: ruleID,
|
||||
}, nil
|
||||
|
||||
}
|
487
vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go
generated
vendored
Normal file
487
vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go
generated
vendored
Normal file
@ -0,0 +1,487 @@
|
||||
/*
|
||||
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
|
||||
* Copyright 2017 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package minio
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/minio/minio-go/v7/pkg/s3utils"
|
||||
)
|
||||
|
||||
// putObjectMultipartStream - upload a large object using
|
||||
// multipart upload and streaming signature for signing payload.
|
||||
// Comprehensive put object operation involving multipart uploads.
|
||||
//
|
||||
// Following code handles these types of readers.
|
||||
//
|
||||
// - *minio.Object
|
||||
// - Any reader which has a method 'ReadAt()'
|
||||
//
|
||||
func (c Client) putObjectMultipartStream(ctx context.Context, bucketName, objectName string,
|
||||
reader io.Reader, size int64, opts PutObjectOptions) (info UploadInfo, err error) {
|
||||
|
||||
if !isObject(reader) && isReadAt(reader) && !opts.SendContentMd5 {
|
||||
// Verify if the reader implements ReadAt and it is not a *minio.Object then we will use parallel uploader.
|
||||
info, err = c.putObjectMultipartStreamFromReadAt(ctx, bucketName, objectName, reader.(io.ReaderAt), size, opts)
|
||||
} else {
|
||||
info, err = c.putObjectMultipartStreamOptionalChecksum(ctx, bucketName, objectName, reader, size, opts)
|
||||
}
|
||||
if err != nil {
|
||||
errResp := ToErrorResponse(err)
|
||||
// Verify if multipart functionality is not available, if not
|
||||
// fall back to single PutObject operation.
|
||||
if errResp.Code == "AccessDenied" && strings.Contains(errResp.Message, "Access Denied") {
|
||||
// Verify if size of reader is greater than '5GiB'.
|
||||
if size > maxSinglePutObjectSize {
|
||||
return UploadInfo{}, errEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName)
|
||||
}
|
||||
// Fall back to uploading as single PutObject operation.
|
||||
return c.putObject(ctx, bucketName, objectName, reader, size, opts)
|
||||
}
|
||||
}
|
||||
return info, err
|
||||
}
|
||||
|
||||
// uploadedPartRes - the response received from a part upload.
|
||||
type uploadedPartRes struct {
|
||||
Error error // Any error encountered while uploading the part.
|
||||
PartNum int // Number of the part uploaded.
|
||||
Size int64 // Size of the part uploaded.
|
||||
Part ObjectPart
|
||||
}
|
||||
|
||||
type uploadPartReq struct {
|
||||
PartNum int // Number of the part uploaded.
|
||||
Part ObjectPart // Size of the part uploaded.
|
||||
}
|
||||
|
||||
// putObjectMultipartFromReadAt - Uploads files bigger than 128MiB.
|
||||
// Supports all readers which implements io.ReaderAt interface
|
||||
// (ReadAt method).
|
||||
//
|
||||
// NOTE: This function is meant to be used for all readers which
|
||||
// implement io.ReaderAt which allows us for resuming multipart
|
||||
// uploads but reading at an offset, which would avoid re-read the
|
||||
// data which was already uploaded. Internally this function uses
|
||||
// temporary files for staging all the data, these temporary files are
|
||||
// cleaned automatically when the caller i.e http client closes the
|
||||
// stream after uploading all the contents successfully.
|
||||
func (c Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketName, objectName string,
|
||||
reader io.ReaderAt, size int64, opts PutObjectOptions) (info UploadInfo, err error) {
|
||||
// Input validation.
|
||||
if err = s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
return UploadInfo{}, err
|
||||
}
|
||||
if err = s3utils.CheckValidObjectName(objectName); err != nil {
|
||||
return UploadInfo{}, err
|
||||
}
|
||||
|
||||
// Calculate the optimal parts info for a given size.
|
||||
totalPartsCount, partSize, lastPartSize, err := optimalPartInfo(size, opts.PartSize)
|
||||
if err != nil {
|
||||
return UploadInfo{}, err
|
||||
}
|
||||
|
||||
// Initiate a new multipart upload.
|
||||
uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts)
|
||||
if err != nil {
|
||||
return UploadInfo{}, err
|
||||
}
|
||||
|
||||
// Aborts the multipart upload in progress, if the
|
||||
// function returns any error, since we do not resume
|
||||
// we should purge the parts which have been uploaded
|
||||
// to relinquish storage space.
|
||||
defer func() {
|
||||
if err != nil {
|
||||
c.abortMultipartUpload(ctx, bucketName, objectName, uploadID)
|
||||
}
|
||||
}()
|
||||
|
||||
// Total data read and written to server. should be equal to 'size' at the end of the call.
|
||||
var totalUploadedSize int64
|
||||
|
||||
// Complete multipart upload.
|
||||
var complMultipartUpload completeMultipartUpload
|
||||
|
||||
// Declare a channel that sends the next part number to be uploaded.
|
||||
// Buffered to 10000 because thats the maximum number of parts allowed
|
||||
// by S3.
|
||||
uploadPartsCh := make(chan uploadPartReq, 10000)
|
||||
|
||||
// Declare a channel that sends back the response of a part upload.
|
||||
// Buffered to 10000 because thats the maximum number of parts allowed
|
||||
// by S3.
|
||||
uploadedPartsCh := make(chan uploadedPartRes, 10000)
|
||||
|
||||
// Used for readability, lastPartNumber is always totalPartsCount.
|
||||
lastPartNumber := totalPartsCount
|
||||
|
||||
// Send each part number to the channel to be processed.
|
||||
for p := 1; p <= totalPartsCount; p++ {
|
||||
uploadPartsCh <- uploadPartReq{PartNum: p}
|
||||
}
|
||||
close(uploadPartsCh)
|
||||
|
||||
var partsBuf = make([][]byte, opts.getNumThreads())
|
||||
for i := range partsBuf {
|
||||
partsBuf[i] = make([]byte, 0, partSize)
|
||||
}
|
||||
|
||||
// Receive each part number from the channel allowing three parallel uploads.
|
||||
for w := 1; w <= opts.getNumThreads(); w++ {
|
||||
go func(w int, partSize int64) {
|
||||
// Each worker will draw from the part channel and upload in parallel.
|
||||
for uploadReq := range uploadPartsCh {
|
||||
|
||||
// If partNumber was not uploaded we calculate the missing
|
||||
// part offset and size. For all other part numbers we
|
||||
// calculate offset based on multiples of partSize.
|
||||
readOffset := int64(uploadReq.PartNum-1) * partSize
|
||||
|
||||
// As a special case if partNumber is lastPartNumber, we
|
||||
// calculate the offset based on the last part size.
|
||||
if uploadReq.PartNum == lastPartNumber {
|
||||
readOffset = (size - lastPartSize)
|
||||
partSize = lastPartSize
|
||||
}
|
||||
|
||||
n, rerr := readFull(io.NewSectionReader(reader, readOffset, partSize), partsBuf[w-1][:partSize])
|
||||
if rerr != nil && rerr != io.ErrUnexpectedEOF && err != io.EOF {
|
||||
uploadedPartsCh <- uploadedPartRes{
|
||||
Error: rerr,
|
||||
}
|
||||
// Exit the goroutine.
|
||||
return
|
||||
}
|
||||
|
||||
// Get a section reader on a particular offset.
|
||||
hookReader := newHook(bytes.NewReader(partsBuf[w-1][:n]), opts.Progress)
|
||||
|
||||
// Proceed to upload the part.
|
||||
objPart, err := c.uploadPart(ctx, bucketName, objectName,
|
||||
uploadID, hookReader, uploadReq.PartNum,
|
||||
"", "", partSize, opts.ServerSideEncryption)
|
||||
if err != nil {
|
||||
uploadedPartsCh <- uploadedPartRes{
|
||||
Error: err,
|
||||
}
|
||||
// Exit the goroutine.
|
||||
return
|
||||
}
|
||||
|
||||
// Save successfully uploaded part metadata.
|
||||
uploadReq.Part = objPart
|
||||
|
||||
// Send successful part info through the channel.
|
||||
uploadedPartsCh <- uploadedPartRes{
|
||||
Size: objPart.Size,
|
||||
PartNum: uploadReq.PartNum,
|
||||
Part: uploadReq.Part,
|
||||
}
|
||||
}
|
||||
}(w, partSize)
|
||||
}
|
||||
|
||||
// Gather the responses as they occur and update any
|
||||
// progress bar.
|
||||
for u := 1; u <= totalPartsCount; u++ {
|
||||
uploadRes := <-uploadedPartsCh
|
||||
if uploadRes.Error != nil {
|
||||
return UploadInfo{}, uploadRes.Error
|
||||
}
|
||||
// Update the totalUploadedSize.
|
||||
totalUploadedSize += uploadRes.Size
|
||||
// Store the parts to be completed in order.
|
||||
complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{
|
||||
ETag: uploadRes.Part.ETag,
|
||||
PartNumber: uploadRes.Part.PartNumber,
|
||||
})
|
||||
}
|
||||
|
||||
// Verify if we uploaded all the data.
|
||||
if totalUploadedSize != size {
|
||||
return UploadInfo{}, errUnexpectedEOF(totalUploadedSize, size, bucketName, objectName)
|
||||
}
|
||||
|
||||
// Sort all completed parts.
|
||||
sort.Sort(completedParts(complMultipartUpload.Parts))
|
||||
|
||||
uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload)
|
||||
if err != nil {
|
||||
return UploadInfo{}, err
|
||||
}
|
||||
|
||||
uploadInfo.Size = totalUploadedSize
|
||||
return uploadInfo, nil
|
||||
}
|
||||
|
||||
func (c Client) putObjectMultipartStreamOptionalChecksum(ctx context.Context, bucketName, objectName string,
|
||||
reader io.Reader, size int64, opts PutObjectOptions) (info UploadInfo, err error) {
|
||||
// Input validation.
|
||||
if err = s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
return UploadInfo{}, err
|
||||
}
|
||||
if err = s3utils.CheckValidObjectName(objectName); err != nil {
|
||||
return UploadInfo{}, err
|
||||
}
|
||||
|
||||
// Calculate the optimal parts info for a given size.
|
||||
totalPartsCount, partSize, lastPartSize, err := optimalPartInfo(size, opts.PartSize)
|
||||
if err != nil {
|
||||
return UploadInfo{}, err
|
||||
}
|
||||
// Initiates a new multipart request
|
||||
uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts)
|
||||
if err != nil {
|
||||
return UploadInfo{}, err
|
||||
}
|
||||
|
||||
// Aborts the multipart upload if the function returns
|
||||
// any error, since we do not resume we should purge
|
||||
// the parts which have been uploaded to relinquish
|
||||
// storage space.
|
||||
defer func() {
|
||||
if err != nil {
|
||||
c.abortMultipartUpload(ctx, bucketName, objectName, uploadID)
|
||||
}
|
||||
}()
|
||||
|
||||
// Total data read and written to server. should be equal to 'size' at the end of the call.
|
||||
var totalUploadedSize int64
|
||||
|
||||
// Initialize parts uploaded map.
|
||||
partsInfo := make(map[int]ObjectPart)
|
||||
|
||||
// Create a buffer.
|
||||
buf := make([]byte, partSize)
|
||||
|
||||
// Avoid declaring variables in the for loop
|
||||
var md5Base64 string
|
||||
var hookReader io.Reader
|
||||
|
||||
// Part number always starts with '1'.
|
||||
var partNumber int
|
||||
for partNumber = 1; partNumber <= totalPartsCount; partNumber++ {
|
||||
|
||||
// Proceed to upload the part.
|
||||
if partNumber == totalPartsCount {
|
||||
partSize = lastPartSize
|
||||
}
|
||||
|
||||
if opts.SendContentMd5 {
|
||||
length, rerr := readFull(reader, buf)
|
||||
if rerr == io.EOF && partNumber > 1 {
|
||||
break
|
||||
}
|
||||
|
||||
if rerr != nil && rerr != io.ErrUnexpectedEOF && err != io.EOF {
|
||||
return UploadInfo{}, rerr
|
||||
}
|
||||
|
||||
// Calculate md5sum.
|
||||
hash := c.md5Hasher()
|
||||
hash.Write(buf[:length])
|
||||
md5Base64 = base64.StdEncoding.EncodeToString(hash.Sum(nil))
|
||||
hash.Close()
|
||||
|
||||
// Update progress reader appropriately to the latest offset
|
||||
// as we read from the source.
|
||||
hookReader = newHook(bytes.NewReader(buf[:length]), opts.Progress)
|
||||
} else {
|
||||
// Update progress reader appropriately to the latest offset
|
||||
// as we read from the source.
|
||||
hookReader = newHook(reader, opts.Progress)
|
||||
}
|
||||
|
||||
objPart, uerr := c.uploadPart(ctx, bucketName, objectName, uploadID,
|
||||
io.LimitReader(hookReader, partSize),
|
||||
partNumber, md5Base64, "", partSize, opts.ServerSideEncryption)
|
||||
if uerr != nil {
|
||||
return UploadInfo{}, uerr
|
||||
}
|
||||
|
||||
// Save successfully uploaded part metadata.
|
||||
partsInfo[partNumber] = objPart
|
||||
|
||||
// Save successfully uploaded size.
|
||||
totalUploadedSize += partSize
|
||||
}
|
||||
|
||||
// Verify if we uploaded all the data.
|
||||
if size > 0 {
|
||||
if totalUploadedSize != size {
|
||||
return UploadInfo{}, errUnexpectedEOF(totalUploadedSize, size, bucketName, objectName)
|
||||
}
|
||||
}
|
||||
|
||||
// Complete multipart upload.
|
||||
var complMultipartUpload completeMultipartUpload
|
||||
|
||||
// Loop over total uploaded parts to save them in
|
||||
// Parts array before completing the multipart request.
|
||||
for i := 1; i < partNumber; i++ {
|
||||
part, ok := partsInfo[i]
|
||||
if !ok {
|
||||
return UploadInfo{}, errInvalidArgument(fmt.Sprintf("Missing part number %d", i))
|
||||
}
|
||||
complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{
|
||||
ETag: part.ETag,
|
||||
PartNumber: part.PartNumber,
|
||||
})
|
||||
}
|
||||
|
||||
// Sort all completed parts.
|
||||
sort.Sort(completedParts(complMultipartUpload.Parts))
|
||||
|
||||
uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload)
|
||||
if err != nil {
|
||||
return UploadInfo{}, err
|
||||
}
|
||||
|
||||
uploadInfo.Size = totalUploadedSize
|
||||
return uploadInfo, nil
|
||||
}
|
||||
|
||||
// putObject special function used Google Cloud Storage. This special function
|
||||
// is used for Google Cloud Storage since Google's multipart API is not S3 compatible.
|
||||
func (c Client) putObject(ctx context.Context, bucketName, objectName string, reader io.Reader, size int64, opts PutObjectOptions) (info UploadInfo, err error) {
|
||||
// Input validation.
|
||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
return UploadInfo{}, err
|
||||
}
|
||||
if err := s3utils.CheckValidObjectName(objectName); err != nil {
|
||||
return UploadInfo{}, err
|
||||
}
|
||||
|
||||
// Size -1 is only supported on Google Cloud Storage, we error
|
||||
// out in all other situations.
|
||||
if size < 0 && !s3utils.IsGoogleEndpoint(*c.endpointURL) {
|
||||
return UploadInfo{}, errEntityTooSmall(size, bucketName, objectName)
|
||||
}
|
||||
|
||||
if opts.SendContentMd5 && s3utils.IsGoogleEndpoint(*c.endpointURL) && size < 0 {
|
||||
return UploadInfo{}, errInvalidArgument("MD5Sum cannot be calculated with size '-1'")
|
||||
}
|
||||
|
||||
if size > 0 {
|
||||
if isReadAt(reader) && !isObject(reader) {
|
||||
seeker, ok := reader.(io.Seeker)
|
||||
if ok {
|
||||
offset, err := seeker.Seek(0, io.SeekCurrent)
|
||||
if err != nil {
|
||||
return UploadInfo{}, errInvalidArgument(err.Error())
|
||||
}
|
||||
reader = io.NewSectionReader(reader.(io.ReaderAt), offset, size)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var md5Base64 string
|
||||
if opts.SendContentMd5 {
|
||||
// Create a buffer.
|
||||
buf := make([]byte, size)
|
||||
|
||||
length, rErr := readFull(reader, buf)
|
||||
if rErr != nil && rErr != io.ErrUnexpectedEOF && rErr != io.EOF {
|
||||
return UploadInfo{}, rErr
|
||||
}
|
||||
|
||||
// Calculate md5sum.
|
||||
hash := c.md5Hasher()
|
||||
hash.Write(buf[:length])
|
||||
md5Base64 = base64.StdEncoding.EncodeToString(hash.Sum(nil))
|
||||
reader = bytes.NewReader(buf[:length])
|
||||
hash.Close()
|
||||
}
|
||||
|
||||
// Update progress reader appropriately to the latest offset as we
|
||||
// read from the source.
|
||||
readSeeker := newHook(reader, opts.Progress)
|
||||
|
||||
// This function does not calculate sha256 and md5sum for payload.
|
||||
// Execute put object.
|
||||
return c.putObjectDo(ctx, bucketName, objectName, readSeeker, md5Base64, "", size, opts)
|
||||
}
|
||||
|
||||
// putObjectDo - executes the put object http operation.
|
||||
// NOTE: You must have WRITE permissions on a bucket to add an object to it.
|
||||
func (c Client) putObjectDo(ctx context.Context, bucketName, objectName string, reader io.Reader, md5Base64, sha256Hex string, size int64, opts PutObjectOptions) (UploadInfo, error) {
|
||||
// Input validation.
|
||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
return UploadInfo{}, err
|
||||
}
|
||||
if err := s3utils.CheckValidObjectName(objectName); err != nil {
|
||||
return UploadInfo{}, err
|
||||
}
|
||||
// Set headers.
|
||||
customHeader := opts.Header()
|
||||
|
||||
// Populate request metadata.
|
||||
reqMetadata := requestMetadata{
|
||||
bucketName: bucketName,
|
||||
objectName: objectName,
|
||||
customHeader: customHeader,
|
||||
contentBody: reader,
|
||||
contentLength: size,
|
||||
contentMD5Base64: md5Base64,
|
||||
contentSHA256Hex: sha256Hex,
|
||||
}
|
||||
if opts.Internal.SourceVersionID != "" {
|
||||
if _, err := uuid.Parse(opts.Internal.SourceVersionID); err != nil {
|
||||
return UploadInfo{}, errInvalidArgument(err.Error())
|
||||
}
|
||||
urlValues := make(url.Values)
|
||||
urlValues.Set("versionId", opts.Internal.SourceVersionID)
|
||||
reqMetadata.queryValues = urlValues
|
||||
}
|
||||
|
||||
// Execute PUT an objectName.
|
||||
resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata)
|
||||
defer closeResponse(resp)
|
||||
if err != nil {
|
||||
return UploadInfo{}, err
|
||||
}
|
||||
if resp != nil {
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return UploadInfo{}, httpRespToErrorResponse(resp, bucketName, objectName)
|
||||
}
|
||||
}
|
||||
|
||||
// extract lifecycle expiry date and rule ID
|
||||
expTime, ruleID := amzExpirationToExpiryDateRuleID(resp.Header.Get(amzExpiration))
|
||||
|
||||
return UploadInfo{
|
||||
Bucket: bucketName,
|
||||
Key: objectName,
|
||||
ETag: trimEtag(resp.Header.Get("ETag")),
|
||||
VersionID: resp.Header.Get(amzVersionID),
|
||||
Size: size,
|
||||
Expiration: expTime,
|
||||
ExpirationRuleID: ruleID,
|
||||
}, nil
|
||||
}
|
366
vendor/github.com/minio/minio-go/v7/api-put-object.go
generated
vendored
Normal file
366
vendor/github.com/minio/minio-go/v7/api-put-object.go
generated
vendored
Normal file
@ -0,0 +1,366 @@
|
||||
/*
|
||||
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
|
||||
* Copyright 2015-2017 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package minio
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio-go/v7/pkg/encrypt"
|
||||
"github.com/minio/minio-go/v7/pkg/s3utils"
|
||||
"golang.org/x/net/http/httpguts"
|
||||
)
|
||||
|
||||
// ReplicationStatus represents replication status of object
|
||||
type ReplicationStatus string
|
||||
|
||||
const (
|
||||
// ReplicationStatusPending indicates replication is pending
|
||||
ReplicationStatusPending ReplicationStatus = "PENDING"
|
||||
// ReplicationStatusComplete indicates replication completed ok
|
||||
ReplicationStatusComplete ReplicationStatus = "COMPLETE"
|
||||
// ReplicationStatusFailed indicates replication failed
|
||||
ReplicationStatusFailed ReplicationStatus = "FAILED"
|
||||
// ReplicationStatusReplica indicates object is a replica of a source
|
||||
ReplicationStatusReplica ReplicationStatus = "REPLICA"
|
||||
)
|
||||
|
||||
// Empty returns true if no replication status set.
|
||||
func (r ReplicationStatus) Empty() bool {
|
||||
return r == ""
|
||||
}
|
||||
|
||||
// AdvancedPutOptions for internal use - to be utilized by replication, ILM transition
|
||||
// implementation on MinIO server
|
||||
type AdvancedPutOptions struct {
|
||||
SourceVersionID string
|
||||
SourceETag string
|
||||
ReplicationStatus ReplicationStatus
|
||||
SourceMTime time.Time
|
||||
}
|
||||
|
||||
// PutObjectOptions represents options specified by user for PutObject call
|
||||
type PutObjectOptions struct {
|
||||
UserMetadata map[string]string
|
||||
UserTags map[string]string
|
||||
Progress io.Reader
|
||||
ContentType string
|
||||
ContentEncoding string
|
||||
ContentDisposition string
|
||||
ContentLanguage string
|
||||
CacheControl string
|
||||
Mode RetentionMode
|
||||
RetainUntilDate time.Time
|
||||
ServerSideEncryption encrypt.ServerSide
|
||||
NumThreads uint
|
||||
StorageClass string
|
||||
WebsiteRedirectLocation string
|
||||
PartSize uint64
|
||||
LegalHold LegalHoldStatus
|
||||
SendContentMd5 bool
|
||||
DisableMultipart bool
|
||||
Internal AdvancedPutOptions
|
||||
}
|
||||
|
||||
// getNumThreads - gets the number of threads to be used in the multipart
|
||||
// put object operation
|
||||
func (opts PutObjectOptions) getNumThreads() (numThreads int) {
|
||||
if opts.NumThreads > 0 {
|
||||
numThreads = int(opts.NumThreads)
|
||||
} else {
|
||||
numThreads = totalWorkers
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Header - constructs the headers from metadata entered by user in
|
||||
// PutObjectOptions struct
|
||||
func (opts PutObjectOptions) Header() (header http.Header) {
|
||||
header = make(http.Header)
|
||||
|
||||
contentType := opts.ContentType
|
||||
if contentType == "" {
|
||||
contentType = "application/octet-stream"
|
||||
}
|
||||
header.Set("Content-Type", contentType)
|
||||
|
||||
if opts.ContentEncoding != "" {
|
||||
header.Set("Content-Encoding", opts.ContentEncoding)
|
||||
}
|
||||
if opts.ContentDisposition != "" {
|
||||
header.Set("Content-Disposition", opts.ContentDisposition)
|
||||
}
|
||||
if opts.ContentLanguage != "" {
|
||||
header.Set("Content-Language", opts.ContentLanguage)
|
||||
}
|
||||
if opts.CacheControl != "" {
|
||||
header.Set("Cache-Control", opts.CacheControl)
|
||||
}
|
||||
|
||||
if opts.Mode != "" {
|
||||
header.Set(amzLockMode, opts.Mode.String())
|
||||
}
|
||||
|
||||
if !opts.RetainUntilDate.IsZero() {
|
||||
header.Set("X-Amz-Object-Lock-Retain-Until-Date", opts.RetainUntilDate.Format(time.RFC3339))
|
||||
}
|
||||
|
||||
if opts.LegalHold != "" {
|
||||
header.Set(amzLegalHoldHeader, opts.LegalHold.String())
|
||||
}
|
||||
|
||||
if opts.ServerSideEncryption != nil {
|
||||
opts.ServerSideEncryption.Marshal(header)
|
||||
}
|
||||
|
||||
if opts.StorageClass != "" {
|
||||
header.Set(amzStorageClass, opts.StorageClass)
|
||||
}
|
||||
|
||||
if opts.WebsiteRedirectLocation != "" {
|
||||
header.Set(amzWebsiteRedirectLocation, opts.WebsiteRedirectLocation)
|
||||
}
|
||||
|
||||
if !opts.Internal.ReplicationStatus.Empty() {
|
||||
header.Set(amzBucketReplicationStatus, string(opts.Internal.ReplicationStatus))
|
||||
}
|
||||
if !opts.Internal.SourceMTime.IsZero() {
|
||||
header.Set(minIOBucketSourceMTime, opts.Internal.SourceMTime.Format(time.RFC3339Nano))
|
||||
}
|
||||
if opts.Internal.SourceETag != "" {
|
||||
header.Set(minIOBucketSourceETag, opts.Internal.SourceETag)
|
||||
}
|
||||
if len(opts.UserTags) != 0 {
|
||||
header.Set(amzTaggingHeader, s3utils.TagEncode(opts.UserTags))
|
||||
}
|
||||
|
||||
for k, v := range opts.UserMetadata {
|
||||
if isAmzHeader(k) || isStandardHeader(k) || isStorageClassHeader(k) {
|
||||
header.Set(k, v)
|
||||
} else {
|
||||
header.Set("x-amz-meta-"+k, v)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// validate() checks if the UserMetadata map has standard headers or and raises an error if so.
|
||||
func (opts PutObjectOptions) validate() (err error) {
|
||||
for k, v := range opts.UserMetadata {
|
||||
if !httpguts.ValidHeaderFieldName(k) || isStandardHeader(k) || isSSEHeader(k) || isStorageClassHeader(k) {
|
||||
return errInvalidArgument(k + " unsupported user defined metadata name")
|
||||
}
|
||||
if !httpguts.ValidHeaderFieldValue(v) {
|
||||
return errInvalidArgument(v + " unsupported user defined metadata value")
|
||||
}
|
||||
}
|
||||
if opts.Mode != "" && !opts.Mode.IsValid() {
|
||||
return errInvalidArgument(opts.Mode.String() + " unsupported retention mode")
|
||||
}
|
||||
if opts.LegalHold != "" && !opts.LegalHold.IsValid() {
|
||||
return errInvalidArgument(opts.LegalHold.String() + " unsupported legal-hold status")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// completedParts is a collection of parts sortable by their part numbers.
|
||||
// used for sorting the uploaded parts before completing the multipart request.
|
||||
type completedParts []CompletePart
|
||||
|
||||
func (a completedParts) Len() int { return len(a) }
|
||||
func (a completedParts) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
func (a completedParts) Less(i, j int) bool { return a[i].PartNumber < a[j].PartNumber }
|
||||
|
||||
// PutObject creates an object in a bucket.
|
||||
//
|
||||
// You must have WRITE permissions on a bucket to create an object.
|
||||
//
|
||||
// - For size smaller than 128MiB PutObject automatically does a
|
||||
// single atomic Put operation.
|
||||
// - For size larger than 128MiB PutObject automatically does a
|
||||
// multipart Put operation.
|
||||
// - For size input as -1 PutObject does a multipart Put operation
|
||||
// until input stream reaches EOF. Maximum object size that can
|
||||
// be uploaded through this operation will be 5TiB.
|
||||
func (c Client) PutObject(ctx context.Context, bucketName, objectName string, reader io.Reader, objectSize int64,
|
||||
opts PutObjectOptions) (info UploadInfo, err error) {
|
||||
if objectSize < 0 && opts.DisableMultipart {
|
||||
return UploadInfo{}, errors.New("object size must be provided with disable multipart upload")
|
||||
}
|
||||
|
||||
err = opts.validate()
|
||||
if err != nil {
|
||||
return UploadInfo{}, err
|
||||
}
|
||||
|
||||
return c.putObjectCommon(ctx, bucketName, objectName, reader, objectSize, opts)
|
||||
}
|
||||
|
||||
func (c Client) putObjectCommon(ctx context.Context, bucketName, objectName string, reader io.Reader, size int64, opts PutObjectOptions) (info UploadInfo, err error) {
|
||||
// Check for largest object size allowed.
|
||||
if size > int64(maxMultipartPutObjectSize) {
|
||||
return UploadInfo{}, errEntityTooLarge(size, maxMultipartPutObjectSize, bucketName, objectName)
|
||||
}
|
||||
|
||||
// NOTE: Streaming signature is not supported by GCS.
|
||||
if s3utils.IsGoogleEndpoint(*c.endpointURL) {
|
||||
return c.putObject(ctx, bucketName, objectName, reader, size, opts)
|
||||
}
|
||||
|
||||
partSize := opts.PartSize
|
||||
if opts.PartSize == 0 {
|
||||
partSize = minPartSize
|
||||
}
|
||||
|
||||
if c.overrideSignerType.IsV2() {
|
||||
if size >= 0 && size < int64(partSize) || opts.DisableMultipart {
|
||||
return c.putObject(ctx, bucketName, objectName, reader, size, opts)
|
||||
}
|
||||
return c.putObjectMultipart(ctx, bucketName, objectName, reader, size, opts)
|
||||
}
|
||||
|
||||
if size < 0 {
|
||||
return c.putObjectMultipartStreamNoLength(ctx, bucketName, objectName, reader, opts)
|
||||
}
|
||||
|
||||
if size < int64(partSize) || opts.DisableMultipart {
|
||||
return c.putObject(ctx, bucketName, objectName, reader, size, opts)
|
||||
}
|
||||
|
||||
return c.putObjectMultipartStream(ctx, bucketName, objectName, reader, size, opts)
|
||||
}
|
||||
|
||||
func (c Client) putObjectMultipartStreamNoLength(ctx context.Context, bucketName, objectName string, reader io.Reader, opts PutObjectOptions) (info UploadInfo, err error) {
|
||||
// Input validation.
|
||||
if err = s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
return UploadInfo{}, err
|
||||
}
|
||||
if err = s3utils.CheckValidObjectName(objectName); err != nil {
|
||||
return UploadInfo{}, err
|
||||
}
|
||||
|
||||
// Total data read and written to server. should be equal to
|
||||
// 'size' at the end of the call.
|
||||
var totalUploadedSize int64
|
||||
|
||||
// Complete multipart upload.
|
||||
var complMultipartUpload completeMultipartUpload
|
||||
|
||||
// Calculate the optimal parts info for a given size.
|
||||
totalPartsCount, partSize, _, err := optimalPartInfo(-1, opts.PartSize)
|
||||
if err != nil {
|
||||
return UploadInfo{}, err
|
||||
}
|
||||
// Initiate a new multipart upload.
|
||||
uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts)
|
||||
if err != nil {
|
||||
return UploadInfo{}, err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err != nil {
|
||||
c.abortMultipartUpload(ctx, bucketName, objectName, uploadID)
|
||||
}
|
||||
}()
|
||||
|
||||
// Part number always starts with '1'.
|
||||
partNumber := 1
|
||||
|
||||
// Initialize parts uploaded map.
|
||||
partsInfo := make(map[int]ObjectPart)
|
||||
|
||||
// Create a buffer.
|
||||
buf := make([]byte, partSize)
|
||||
|
||||
for partNumber <= totalPartsCount {
|
||||
length, rerr := readFull(reader, buf)
|
||||
if rerr == io.EOF && partNumber > 1 {
|
||||
break
|
||||
}
|
||||
|
||||
if rerr != nil && rerr != io.ErrUnexpectedEOF && rerr != io.EOF {
|
||||
return UploadInfo{}, rerr
|
||||
}
|
||||
|
||||
var md5Base64 string
|
||||
if opts.SendContentMd5 {
|
||||
// Calculate md5sum.
|
||||
hash := c.md5Hasher()
|
||||
hash.Write(buf[:length])
|
||||
md5Base64 = base64.StdEncoding.EncodeToString(hash.Sum(nil))
|
||||
hash.Close()
|
||||
}
|
||||
|
||||
// Update progress reader appropriately to the latest offset
|
||||
// as we read from the source.
|
||||
rd := newHook(bytes.NewReader(buf[:length]), opts.Progress)
|
||||
|
||||
// Proceed to upload the part.
|
||||
objPart, uerr := c.uploadPart(ctx, bucketName, objectName, uploadID, rd, partNumber,
|
||||
md5Base64, "", int64(length), opts.ServerSideEncryption)
|
||||
if uerr != nil {
|
||||
return UploadInfo{}, uerr
|
||||
}
|
||||
|
||||
// Save successfully uploaded part metadata.
|
||||
partsInfo[partNumber] = objPart
|
||||
|
||||
// Save successfully uploaded size.
|
||||
totalUploadedSize += int64(length)
|
||||
|
||||
// Increment part number.
|
||||
partNumber++
|
||||
|
||||
// For unknown size, Read EOF we break away.
|
||||
// We do not have to upload till totalPartsCount.
|
||||
if rerr == io.EOF {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Loop over total uploaded parts to save them in
|
||||
// Parts array before completing the multipart request.
|
||||
for i := 1; i < partNumber; i++ {
|
||||
part, ok := partsInfo[i]
|
||||
if !ok {
|
||||
return UploadInfo{}, errInvalidArgument(fmt.Sprintf("Missing part number %d", i))
|
||||
}
|
||||
complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{
|
||||
ETag: part.ETag,
|
||||
PartNumber: part.PartNumber,
|
||||
})
|
||||
}
|
||||
|
||||
// Sort all completed parts.
|
||||
sort.Sort(completedParts(complMultipartUpload.Parts))
|
||||
|
||||
uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload)
|
||||
if err != nil {
|
||||
return UploadInfo{}, err
|
||||
}
|
||||
|
||||
uploadInfo.Size = totalUploadedSize
|
||||
return uploadInfo, nil
|
||||
}
|
415
vendor/github.com/minio/minio-go/v7/api-remove.go
generated
vendored
Normal file
415
vendor/github.com/minio/minio-go/v7/api-remove.go
generated
vendored
Normal file
@ -0,0 +1,415 @@
|
||||
/*
|
||||
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
|
||||
* Copyright 2015-2020 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package minio
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/xml"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio-go/v7/pkg/s3utils"
|
||||
)
|
||||
|
||||
// RemoveBucket deletes the bucket name.
|
||||
//
|
||||
// All objects (including all object versions and delete markers).
|
||||
// in the bucket must be deleted before successfully attempting this request.
|
||||
func (c Client) RemoveBucket(ctx context.Context, bucketName string) error {
|
||||
// Input validation.
|
||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
return err
|
||||
}
|
||||
// Execute DELETE on bucket.
|
||||
resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{
|
||||
bucketName: bucketName,
|
||||
contentSHA256Hex: emptySHA256Hex,
|
||||
})
|
||||
defer closeResponse(resp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if resp != nil {
|
||||
if resp.StatusCode != http.StatusNoContent {
|
||||
return httpRespToErrorResponse(resp, bucketName, "")
|
||||
}
|
||||
}
|
||||
|
||||
// Remove the location from cache on a successful delete.
|
||||
c.bucketLocCache.Delete(bucketName)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// AdvancedRemoveOptions intended for internal use by replication
|
||||
type AdvancedRemoveOptions struct {
|
||||
ReplicationDeleteMarker bool
|
||||
ReplicationStatus ReplicationStatus
|
||||
ReplicationMTime time.Time
|
||||
}
|
||||
|
||||
// RemoveObjectOptions represents options specified by user for RemoveObject call
|
||||
type RemoveObjectOptions struct {
|
||||
GovernanceBypass bool
|
||||
VersionID string
|
||||
Internal AdvancedRemoveOptions
|
||||
}
|
||||
|
||||
// RemoveObject removes an object from a bucket.
|
||||
func (c Client) RemoveObject(ctx context.Context, bucketName, objectName string, opts RemoveObjectOptions) error {
|
||||
// Input validation.
|
||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s3utils.CheckValidObjectName(objectName); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return c.removeObject(ctx, bucketName, objectName, opts)
|
||||
}
|
||||
|
||||
func (c Client) removeObject(ctx context.Context, bucketName, objectName string, opts RemoveObjectOptions) error {
|
||||
|
||||
// Get resources properly escaped and lined up before
|
||||
// using them in http request.
|
||||
urlValues := make(url.Values)
|
||||
|
||||
if opts.VersionID != "" {
|
||||
urlValues.Set("versionId", opts.VersionID)
|
||||
}
|
||||
|
||||
// Build headers.
|
||||
headers := make(http.Header)
|
||||
|
||||
if opts.GovernanceBypass {
|
||||
// Set the bypass goverenance retention header
|
||||
headers.Set(amzBypassGovernance, "true")
|
||||
}
|
||||
if opts.Internal.ReplicationDeleteMarker {
|
||||
headers.Set(minIOBucketReplicationDeleteMarker, "true")
|
||||
}
|
||||
if !opts.Internal.ReplicationMTime.IsZero() {
|
||||
headers.Set(minIOBucketSourceMTime, opts.Internal.ReplicationMTime.Format(time.RFC3339Nano))
|
||||
}
|
||||
if !opts.Internal.ReplicationStatus.Empty() {
|
||||
headers.Set(amzBucketReplicationStatus, string(opts.Internal.ReplicationStatus))
|
||||
}
|
||||
// Execute DELETE on objectName.
|
||||
resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{
|
||||
bucketName: bucketName,
|
||||
objectName: objectName,
|
||||
contentSHA256Hex: emptySHA256Hex,
|
||||
queryValues: urlValues,
|
||||
customHeader: headers,
|
||||
})
|
||||
defer closeResponse(resp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if resp != nil {
|
||||
// if some unexpected error happened and max retry is reached, we want to let client know
|
||||
if resp.StatusCode != http.StatusNoContent {
|
||||
return httpRespToErrorResponse(resp, bucketName, objectName)
|
||||
}
|
||||
}
|
||||
|
||||
// DeleteObject always responds with http '204' even for
|
||||
// objects which do not exist. So no need to handle them
|
||||
// specifically.
|
||||
return nil
|
||||
}
|
||||
|
||||
// RemoveObjectError - container of Multi Delete S3 API error
|
||||
type RemoveObjectError struct {
|
||||
ObjectName string
|
||||
VersionID string
|
||||
Err error
|
||||
}
|
||||
|
||||
// generateRemoveMultiObjects - generate the XML request for remove multi objects request
|
||||
func generateRemoveMultiObjectsRequest(objects []ObjectInfo) []byte {
|
||||
delObjects := []deleteObject{}
|
||||
for _, obj := range objects {
|
||||
delObjects = append(delObjects, deleteObject{
|
||||
Key: obj.Key,
|
||||
VersionID: obj.VersionID,
|
||||
})
|
||||
}
|
||||
xmlBytes, _ := xml.Marshal(deleteMultiObjects{Objects: delObjects, Quiet: true})
|
||||
return xmlBytes
|
||||
}
|
||||
|
||||
// processRemoveMultiObjectsResponse - parse the remove multi objects web service
|
||||
// and return the success/failure result status for each object
|
||||
func processRemoveMultiObjectsResponse(body io.Reader, objects []ObjectInfo, errorCh chan<- RemoveObjectError) {
|
||||
// Parse multi delete XML response
|
||||
rmResult := &deleteMultiObjectsResult{}
|
||||
err := xmlDecoder(body, rmResult)
|
||||
if err != nil {
|
||||
errorCh <- RemoveObjectError{ObjectName: "", Err: err}
|
||||
return
|
||||
}
|
||||
|
||||
// Fill deletion that returned an error.
|
||||
for _, obj := range rmResult.UnDeletedObjects {
|
||||
// Version does not exist is not an error ignore and continue.
|
||||
switch obj.Code {
|
||||
case "InvalidArgument", "NoSuchVersion":
|
||||
continue
|
||||
}
|
||||
errorCh <- RemoveObjectError{
|
||||
ObjectName: obj.Key,
|
||||
VersionID: obj.VersionID,
|
||||
Err: ErrorResponse{
|
||||
Code: obj.Code,
|
||||
Message: obj.Message,
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// RemoveObjectsOptions represents options specified by user for RemoveObjects call
|
||||
type RemoveObjectsOptions struct {
|
||||
GovernanceBypass bool
|
||||
}
|
||||
|
||||
// RemoveObjects removes multiple objects from a bucket while
|
||||
// it is possible to specify objects versions which are received from
|
||||
// objectsCh. Remove failures are sent back via error channel.
|
||||
func (c Client) RemoveObjects(ctx context.Context, bucketName string, objectsCh <-chan ObjectInfo, opts RemoveObjectsOptions) <-chan RemoveObjectError {
|
||||
errorCh := make(chan RemoveObjectError, 1)
|
||||
|
||||
// Validate if bucket name is valid.
|
||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
defer close(errorCh)
|
||||
errorCh <- RemoveObjectError{
|
||||
Err: err,
|
||||
}
|
||||
return errorCh
|
||||
}
|
||||
// Validate objects channel to be properly allocated.
|
||||
if objectsCh == nil {
|
||||
defer close(errorCh)
|
||||
errorCh <- RemoveObjectError{
|
||||
Err: errInvalidArgument("Objects channel cannot be nil"),
|
||||
}
|
||||
return errorCh
|
||||
}
|
||||
|
||||
go c.removeObjects(ctx, bucketName, objectsCh, errorCh, opts)
|
||||
return errorCh
|
||||
}
|
||||
|
||||
// Return true if the character is within the allowed characters in an XML 1.0 document
|
||||
// The list of allowed characters can be found here: https://www.w3.org/TR/xml/#charsets
|
||||
func validXMLChar(r rune) (ok bool) {
|
||||
return r == 0x09 ||
|
||||
r == 0x0A ||
|
||||
r == 0x0D ||
|
||||
r >= 0x20 && r <= 0xD7FF ||
|
||||
r >= 0xE000 && r <= 0xFFFD ||
|
||||
r >= 0x10000 && r <= 0x10FFFF
|
||||
}
|
||||
|
||||
func hasInvalidXMLChar(str string) bool {
|
||||
for _, s := range str {
|
||||
if !validXMLChar(s) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Generate and call MultiDelete S3 requests based on entries received from objectsCh
|
||||
func (c Client) removeObjects(ctx context.Context, bucketName string, objectsCh <-chan ObjectInfo, errorCh chan<- RemoveObjectError, opts RemoveObjectsOptions) {
|
||||
maxEntries := 1000
|
||||
finish := false
|
||||
urlValues := make(url.Values)
|
||||
urlValues.Set("delete", "")
|
||||
|
||||
// Close error channel when Multi delete finishes.
|
||||
defer close(errorCh)
|
||||
|
||||
// Loop over entries by 1000 and call MultiDelete requests
|
||||
for {
|
||||
if finish {
|
||||
break
|
||||
}
|
||||
count := 0
|
||||
var batch []ObjectInfo
|
||||
|
||||
// Try to gather 1000 entries
|
||||
for object := range objectsCh {
|
||||
if hasInvalidXMLChar(object.Key) {
|
||||
// Use single DELETE so the object name will be in the request URL instead of the multi-delete XML document.
|
||||
err := c.removeObject(ctx, bucketName, object.Key, RemoveObjectOptions{
|
||||
VersionID: object.VersionID,
|
||||
GovernanceBypass: opts.GovernanceBypass,
|
||||
})
|
||||
if err != nil {
|
||||
// Version does not exist is not an error ignore and continue.
|
||||
switch ToErrorResponse(err).Code {
|
||||
case "InvalidArgument", "NoSuchVersion":
|
||||
continue
|
||||
}
|
||||
errorCh <- RemoveObjectError{
|
||||
ObjectName: object.Key,
|
||||
VersionID: object.VersionID,
|
||||
Err: err,
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
batch = append(batch, object)
|
||||
if count++; count >= maxEntries {
|
||||
break
|
||||
}
|
||||
}
|
||||
if count == 0 {
|
||||
// Multi Objects Delete API doesn't accept empty object list, quit immediately
|
||||
break
|
||||
}
|
||||
if count < maxEntries {
|
||||
// We didn't have 1000 entries, so this is the last batch
|
||||
finish = true
|
||||
}
|
||||
|
||||
// Build headers.
|
||||
headers := make(http.Header)
|
||||
if opts.GovernanceBypass {
|
||||
// Set the bypass goverenance retention header
|
||||
headers.Set(amzBypassGovernance, "true")
|
||||
}
|
||||
|
||||
// Generate remove multi objects XML request
|
||||
removeBytes := generateRemoveMultiObjectsRequest(batch)
|
||||
// Execute GET on bucket to list objects.
|
||||
resp, err := c.executeMethod(ctx, http.MethodPost, requestMetadata{
|
||||
bucketName: bucketName,
|
||||
queryValues: urlValues,
|
||||
contentBody: bytes.NewReader(removeBytes),
|
||||
contentLength: int64(len(removeBytes)),
|
||||
contentMD5Base64: sumMD5Base64(removeBytes),
|
||||
contentSHA256Hex: sum256Hex(removeBytes),
|
||||
customHeader: headers,
|
||||
})
|
||||
if resp != nil {
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
e := httpRespToErrorResponse(resp, bucketName, "")
|
||||
errorCh <- RemoveObjectError{ObjectName: "", Err: e}
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
for _, b := range batch {
|
||||
errorCh <- RemoveObjectError{
|
||||
ObjectName: b.Key,
|
||||
VersionID: b.VersionID,
|
||||
Err: err,
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Process multiobjects remove xml response
|
||||
processRemoveMultiObjectsResponse(resp.Body, batch, errorCh)
|
||||
|
||||
closeResponse(resp)
|
||||
}
|
||||
}
|
||||
|
||||
// RemoveIncompleteUpload aborts an partially uploaded object.
|
||||
func (c Client) RemoveIncompleteUpload(ctx context.Context, bucketName, objectName string) error {
|
||||
// Input validation.
|
||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s3utils.CheckValidObjectName(objectName); err != nil {
|
||||
return err
|
||||
}
|
||||
// Find multipart upload ids of the object to be aborted.
|
||||
uploadIDs, err := c.findUploadIDs(ctx, bucketName, objectName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, uploadID := range uploadIDs {
|
||||
// abort incomplete multipart upload, based on the upload id passed.
|
||||
err := c.abortMultipartUpload(ctx, bucketName, objectName, uploadID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// abortMultipartUpload aborts a multipart upload for the given
|
||||
// uploadID, all previously uploaded parts are deleted.
|
||||
func (c Client) abortMultipartUpload(ctx context.Context, bucketName, objectName, uploadID string) error {
|
||||
// Input validation.
|
||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s3utils.CheckValidObjectName(objectName); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Initialize url queries.
|
||||
urlValues := make(url.Values)
|
||||
urlValues.Set("uploadId", uploadID)
|
||||
|
||||
// Execute DELETE on multipart upload.
|
||||
resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{
|
||||
bucketName: bucketName,
|
||||
objectName: objectName,
|
||||
queryValues: urlValues,
|
||||
contentSHA256Hex: emptySHA256Hex,
|
||||
})
|
||||
defer closeResponse(resp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if resp != nil {
|
||||
if resp.StatusCode != http.StatusNoContent {
|
||||
// Abort has no response body, handle it for any errors.
|
||||
var errorResponse ErrorResponse
|
||||
switch resp.StatusCode {
|
||||
case http.StatusNotFound:
|
||||
// This is needed specifically for abort and it cannot
|
||||
// be converged into default case.
|
||||
errorResponse = ErrorResponse{
|
||||
Code: "NoSuchUpload",
|
||||
Message: "The specified multipart upload does not exist.",
|
||||
BucketName: bucketName,
|
||||
Key: objectName,
|
||||
RequestID: resp.Header.Get("x-amz-request-id"),
|
||||
HostID: resp.Header.Get("x-amz-id-2"),
|
||||
Region: resp.Header.Get("x-amz-bucket-region"),
|
||||
}
|
||||
default:
|
||||
return httpRespToErrorResponse(resp, bucketName, objectName)
|
||||
}
|
||||
return errorResponse
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
361
vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go
generated
vendored
Normal file
361
vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go
generated
vendored
Normal file
@ -0,0 +1,361 @@
|
||||
/*
|
||||
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
|
||||
* Copyright 2015-2020 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package minio
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"io"
|
||||
"reflect"
|
||||
"time"
|
||||
)
|
||||
|
||||
// listAllMyBucketsResult container for listBuckets response.
|
||||
type listAllMyBucketsResult struct {
|
||||
// Container for one or more buckets.
|
||||
Buckets struct {
|
||||
Bucket []BucketInfo
|
||||
}
|
||||
Owner owner
|
||||
}
|
||||
|
||||
// owner container for bucket owner information.
|
||||
type owner struct {
|
||||
DisplayName string
|
||||
ID string
|
||||
}
|
||||
|
||||
// CommonPrefix container for prefix response.
|
||||
type CommonPrefix struct {
|
||||
Prefix string
|
||||
}
|
||||
|
||||
// ListBucketV2Result container for listObjects response version 2.
|
||||
type ListBucketV2Result struct {
|
||||
// A response can contain CommonPrefixes only if you have
|
||||
// specified a delimiter.
|
||||
CommonPrefixes []CommonPrefix
|
||||
// Metadata about each object returned.
|
||||
Contents []ObjectInfo
|
||||
Delimiter string
|
||||
|
||||
// Encoding type used to encode object keys in the response.
|
||||
EncodingType string
|
||||
|
||||
// A flag that indicates whether or not ListObjects returned all of the results
|
||||
// that satisfied the search criteria.
|
||||
IsTruncated bool
|
||||
MaxKeys int64
|
||||
Name string
|
||||
|
||||
// Hold the token that will be sent in the next request to fetch the next group of keys
|
||||
NextContinuationToken string
|
||||
|
||||
ContinuationToken string
|
||||
Prefix string
|
||||
|
||||
// FetchOwner and StartAfter are currently not used
|
||||
FetchOwner string
|
||||
StartAfter string
|
||||
}
|
||||
|
||||
// Version is an element in the list object versions response
|
||||
type Version struct {
|
||||
ETag string
|
||||
IsLatest bool
|
||||
Key string
|
||||
LastModified time.Time
|
||||
Owner Owner
|
||||
Size int64
|
||||
StorageClass string
|
||||
VersionID string `xml:"VersionId"`
|
||||
|
||||
isDeleteMarker bool
|
||||
}
|
||||
|
||||
// ListVersionsResult is an element in the list object versions response
|
||||
// and has a special Unmarshaler because we need to preserver the order
|
||||
// of <Version> and <DeleteMarker> in ListVersionsResult.Versions slice
|
||||
type ListVersionsResult struct {
|
||||
Versions []Version
|
||||
|
||||
CommonPrefixes []CommonPrefix
|
||||
Name string
|
||||
Prefix string
|
||||
Delimiter string
|
||||
MaxKeys int64
|
||||
EncodingType string
|
||||
IsTruncated bool
|
||||
KeyMarker string
|
||||
VersionIDMarker string
|
||||
NextKeyMarker string
|
||||
NextVersionIDMarker string
|
||||
}
|
||||
|
||||
// UnmarshalXML is a custom unmarshal code for the response of ListObjectVersions, the custom
|
||||
// code will unmarshal <Version> and <DeleteMarker> tags and save them in Versions field to
|
||||
// preserve the lexical order of the listing.
|
||||
func (l *ListVersionsResult) UnmarshalXML(d *xml.Decoder, start xml.StartElement) (err error) {
|
||||
for {
|
||||
// Read tokens from the XML document in a stream.
|
||||
t, err := d.Token()
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
switch se := t.(type) {
|
||||
case xml.StartElement:
|
||||
tagName := se.Name.Local
|
||||
switch tagName {
|
||||
case "Name", "Prefix",
|
||||
"Delimiter", "EncodingType",
|
||||
"KeyMarker", "NextKeyMarker":
|
||||
var s string
|
||||
if err = d.DecodeElement(&s, &se); err != nil {
|
||||
return err
|
||||
}
|
||||
v := reflect.ValueOf(l).Elem().FieldByName(tagName)
|
||||
if v.IsValid() {
|
||||
v.SetString(s)
|
||||
}
|
||||
case "VersionIdMarker":
|
||||
// VersionIdMarker is a special case because of 'Id' instead of 'ID' in field name
|
||||
var s string
|
||||
if err = d.DecodeElement(&s, &se); err != nil {
|
||||
return err
|
||||
}
|
||||
l.VersionIDMarker = s
|
||||
case "NextVersionIdMarker":
|
||||
// NextVersionIdMarker is a special case because of 'Id' instead of 'ID' in field name
|
||||
var s string
|
||||
if err = d.DecodeElement(&s, &se); err != nil {
|
||||
return err
|
||||
}
|
||||
l.NextVersionIDMarker = s
|
||||
case "IsTruncated": // bool
|
||||
var b bool
|
||||
if err = d.DecodeElement(&b, &se); err != nil {
|
||||
return err
|
||||
}
|
||||
l.IsTruncated = b
|
||||
case "MaxKeys": // int64
|
||||
var i int64
|
||||
if err = d.DecodeElement(&i, &se); err != nil {
|
||||
return err
|
||||
}
|
||||
l.MaxKeys = i
|
||||
case "CommonPrefixes":
|
||||
var cp CommonPrefix
|
||||
if err = d.DecodeElement(&cp, &se); err != nil {
|
||||
return err
|
||||
}
|
||||
l.CommonPrefixes = append(l.CommonPrefixes, cp)
|
||||
case "DeleteMarker", "Version":
|
||||
var v Version
|
||||
if err = d.DecodeElement(&v, &se); err != nil {
|
||||
return err
|
||||
}
|
||||
if tagName == "DeleteMarker" {
|
||||
v.isDeleteMarker = true
|
||||
}
|
||||
l.Versions = append(l.Versions, v)
|
||||
default:
|
||||
return errors.New("unrecognized option:" + tagName)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ListBucketResult container for listObjects response.
|
||||
type ListBucketResult struct {
|
||||
// A response can contain CommonPrefixes only if you have
|
||||
// specified a delimiter.
|
||||
CommonPrefixes []CommonPrefix
|
||||
// Metadata about each object returned.
|
||||
Contents []ObjectInfo
|
||||
Delimiter string
|
||||
|
||||
// Encoding type used to encode object keys in the response.
|
||||
EncodingType string
|
||||
|
||||
// A flag that indicates whether or not ListObjects returned all of the results
|
||||
// that satisfied the search criteria.
|
||||
IsTruncated bool
|
||||
Marker string
|
||||
MaxKeys int64
|
||||
Name string
|
||||
|
||||
// When response is truncated (the IsTruncated element value in
|
||||
// the response is true), you can use the key name in this field
|
||||
// as marker in the subsequent request to get next set of objects.
|
||||
// Object storage lists objects in alphabetical order Note: This
|
||||
// element is returned only if you have delimiter request
|
||||
// parameter specified. If response does not include the NextMaker
|
||||
// and it is truncated, you can use the value of the last Key in
|
||||
// the response as the marker in the subsequent request to get the
|
||||
// next set of object keys.
|
||||
NextMarker string
|
||||
Prefix string
|
||||
}
|
||||
|
||||
// ListMultipartUploadsResult container for ListMultipartUploads response
|
||||
type ListMultipartUploadsResult struct {
|
||||
Bucket string
|
||||
KeyMarker string
|
||||
UploadIDMarker string `xml:"UploadIdMarker"`
|
||||
NextKeyMarker string
|
||||
NextUploadIDMarker string `xml:"NextUploadIdMarker"`
|
||||
EncodingType string
|
||||
MaxUploads int64
|
||||
IsTruncated bool
|
||||
Uploads []ObjectMultipartInfo `xml:"Upload"`
|
||||
Prefix string
|
||||
Delimiter string
|
||||
// A response can contain CommonPrefixes only if you specify a delimiter.
|
||||
CommonPrefixes []CommonPrefix
|
||||
}
|
||||
|
||||
// initiator container for who initiated multipart upload.
|
||||
type initiator struct {
|
||||
ID string
|
||||
DisplayName string
|
||||
}
|
||||
|
||||
// copyObjectResult container for copy object response.
|
||||
type copyObjectResult struct {
|
||||
ETag string
|
||||
LastModified time.Time // time string format "2006-01-02T15:04:05.000Z"
|
||||
}
|
||||
|
||||
// ObjectPart container for particular part of an object.
|
||||
type ObjectPart struct {
|
||||
// Part number identifies the part.
|
||||
PartNumber int
|
||||
|
||||
// Date and time the part was uploaded.
|
||||
LastModified time.Time
|
||||
|
||||
// Entity tag returned when the part was uploaded, usually md5sum
|
||||
// of the part.
|
||||
ETag string
|
||||
|
||||
// Size of the uploaded part data.
|
||||
Size int64
|
||||
}
|
||||
|
||||
// ListObjectPartsResult container for ListObjectParts response.
|
||||
type ListObjectPartsResult struct {
|
||||
Bucket string
|
||||
Key string
|
||||
UploadID string `xml:"UploadId"`
|
||||
|
||||
Initiator initiator
|
||||
Owner owner
|
||||
|
||||
StorageClass string
|
||||
PartNumberMarker int
|
||||
NextPartNumberMarker int
|
||||
MaxParts int
|
||||
|
||||
// Indicates whether the returned list of parts is truncated.
|
||||
IsTruncated bool
|
||||
ObjectParts []ObjectPart `xml:"Part"`
|
||||
|
||||
EncodingType string
|
||||
}
|
||||
|
||||
// initiateMultipartUploadResult container for InitiateMultiPartUpload
|
||||
// response.
|
||||
type initiateMultipartUploadResult struct {
|
||||
Bucket string
|
||||
Key string
|
||||
UploadID string `xml:"UploadId"`
|
||||
}
|
||||
|
||||
// completeMultipartUploadResult container for completed multipart
|
||||
// upload response.
|
||||
type completeMultipartUploadResult struct {
|
||||
Location string
|
||||
Bucket string
|
||||
Key string
|
||||
ETag string
|
||||
}
|
||||
|
||||
// CompletePart sub container lists individual part numbers and their
|
||||
// md5sum, part of completeMultipartUpload.
|
||||
type CompletePart struct {
|
||||
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Part" json:"-"`
|
||||
|
||||
// Part number identifies the part.
|
||||
PartNumber int
|
||||
ETag string
|
||||
}
|
||||
|
||||
// completeMultipartUpload container for completing multipart upload.
|
||||
type completeMultipartUpload struct {
|
||||
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CompleteMultipartUpload" json:"-"`
|
||||
Parts []CompletePart `xml:"Part"`
|
||||
}
|
||||
|
||||
// createBucketConfiguration container for bucket configuration.
|
||||
type createBucketConfiguration struct {
|
||||
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CreateBucketConfiguration" json:"-"`
|
||||
Location string `xml:"LocationConstraint"`
|
||||
}
|
||||
|
||||
// deleteObject container for Delete element in MultiObjects Delete XML request
|
||||
type deleteObject struct {
|
||||
Key string
|
||||
VersionID string `xml:"VersionId,omitempty"`
|
||||
}
|
||||
|
||||
// deletedObject container for Deleted element in MultiObjects Delete XML response
|
||||
type deletedObject struct {
|
||||
Key string
|
||||
VersionID string `xml:"VersionId,omitempty"`
|
||||
// These fields are ignored.
|
||||
DeleteMarker bool
|
||||
DeleteMarkerVersionID string
|
||||
}
|
||||
|
||||
// nonDeletedObject container for Error element (failed deletion) in MultiObjects Delete XML response
|
||||
type nonDeletedObject struct {
|
||||
Key string
|
||||
Code string
|
||||
Message string
|
||||
VersionID string `xml:"VersionId"`
|
||||
}
|
||||
|
||||
// deletedMultiObjects container for MultiObjects Delete XML request
|
||||
type deleteMultiObjects struct {
|
||||
XMLName xml.Name `xml:"Delete"`
|
||||
Quiet bool
|
||||
Objects []deleteObject `xml:"Object"`
|
||||
}
|
||||
|
||||
// deletedMultiObjectsResult container for MultiObjects Delete XML response
|
||||
type deleteMultiObjectsResult struct {
|
||||
XMLName xml.Name `xml:"DeleteResult"`
|
||||
DeletedObjects []deletedObject `xml:"Deleted"`
|
||||
UnDeletedObjects []nonDeletedObject `xml:"Error"`
|
||||
}
|
751
vendor/github.com/minio/minio-go/v7/api-select.go
generated
vendored
Normal file
751
vendor/github.com/minio/minio-go/v7/api-select.go
generated
vendored
Normal file
@ -0,0 +1,751 @@
|
||||
/*
|
||||
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
|
||||
* (C) 2018-2020 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package minio
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash"
|
||||
"hash/crc32"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"github.com/minio/minio-go/v7/pkg/encrypt"
|
||||
"github.com/minio/minio-go/v7/pkg/s3utils"
|
||||
)
|
||||
|
||||
// CSVFileHeaderInfo - is the parameter for whether to utilize headers.
|
||||
type CSVFileHeaderInfo string
|
||||
|
||||
// Constants for file header info.
|
||||
const (
|
||||
CSVFileHeaderInfoNone CSVFileHeaderInfo = "NONE"
|
||||
CSVFileHeaderInfoIgnore = "IGNORE"
|
||||
CSVFileHeaderInfoUse = "USE"
|
||||
)
|
||||
|
||||
// SelectCompressionType - is the parameter for what type of compression is
|
||||
// present
|
||||
type SelectCompressionType string
|
||||
|
||||
// Constants for compression types under select API.
|
||||
const (
|
||||
SelectCompressionNONE SelectCompressionType = "NONE"
|
||||
SelectCompressionGZIP = "GZIP"
|
||||
SelectCompressionBZIP = "BZIP2"
|
||||
)
|
||||
|
||||
// CSVQuoteFields - is the parameter for how CSV fields are quoted.
|
||||
type CSVQuoteFields string
|
||||
|
||||
// Constants for csv quote styles.
|
||||
const (
|
||||
CSVQuoteFieldsAlways CSVQuoteFields = "Always"
|
||||
CSVQuoteFieldsAsNeeded = "AsNeeded"
|
||||
)
|
||||
|
||||
// QueryExpressionType - is of what syntax the expression is, this should only
|
||||
// be SQL
|
||||
type QueryExpressionType string
|
||||
|
||||
// Constants for expression type.
|
||||
const (
|
||||
QueryExpressionTypeSQL QueryExpressionType = "SQL"
|
||||
)
|
||||
|
||||
// JSONType determines json input serialization type.
|
||||
type JSONType string
|
||||
|
||||
// Constants for JSONTypes.
|
||||
const (
|
||||
JSONDocumentType JSONType = "DOCUMENT"
|
||||
JSONLinesType = "LINES"
|
||||
)
|
||||
|
||||
// ParquetInputOptions parquet input specific options
|
||||
type ParquetInputOptions struct{}
|
||||
|
||||
// CSVInputOptions csv input specific options
|
||||
type CSVInputOptions struct {
|
||||
FileHeaderInfo CSVFileHeaderInfo
|
||||
fileHeaderInfoSet bool
|
||||
|
||||
RecordDelimiter string
|
||||
recordDelimiterSet bool
|
||||
|
||||
FieldDelimiter string
|
||||
fieldDelimiterSet bool
|
||||
|
||||
QuoteCharacter string
|
||||
quoteCharacterSet bool
|
||||
|
||||
QuoteEscapeCharacter string
|
||||
quoteEscapeCharacterSet bool
|
||||
|
||||
Comments string
|
||||
commentsSet bool
|
||||
}
|
||||
|
||||
// SetFileHeaderInfo sets the file header info in the CSV input options
|
||||
func (c *CSVInputOptions) SetFileHeaderInfo(val CSVFileHeaderInfo) {
|
||||
c.FileHeaderInfo = val
|
||||
c.fileHeaderInfoSet = true
|
||||
}
|
||||
|
||||
// SetRecordDelimiter sets the record delimiter in the CSV input options
|
||||
func (c *CSVInputOptions) SetRecordDelimiter(val string) {
|
||||
c.RecordDelimiter = val
|
||||
c.recordDelimiterSet = true
|
||||
}
|
||||
|
||||
// SetFieldDelimiter sets the field delimiter in the CSV input options
|
||||
func (c *CSVInputOptions) SetFieldDelimiter(val string) {
|
||||
c.FieldDelimiter = val
|
||||
c.fieldDelimiterSet = true
|
||||
}
|
||||
|
||||
// SetQuoteCharacter sets the quote character in the CSV input options
|
||||
func (c *CSVInputOptions) SetQuoteCharacter(val string) {
|
||||
c.QuoteCharacter = val
|
||||
c.quoteCharacterSet = true
|
||||
}
|
||||
|
||||
// SetQuoteEscapeCharacter sets the quote escape character in the CSV input options
|
||||
func (c *CSVInputOptions) SetQuoteEscapeCharacter(val string) {
|
||||
c.QuoteEscapeCharacter = val
|
||||
c.quoteEscapeCharacterSet = true
|
||||
}
|
||||
|
||||
// SetComments sets the comments character in the CSV input options
|
||||
func (c *CSVInputOptions) SetComments(val string) {
|
||||
c.Comments = val
|
||||
c.commentsSet = true
|
||||
}
|
||||
|
||||
// MarshalXML - produces the xml representation of the CSV input options struct
|
||||
func (c CSVInputOptions) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
|
||||
if err := e.EncodeToken(start); err != nil {
|
||||
return err
|
||||
}
|
||||
if c.FileHeaderInfo != "" || c.fileHeaderInfoSet {
|
||||
if err := e.EncodeElement(c.FileHeaderInfo, xml.StartElement{Name: xml.Name{Local: "FileHeaderInfo"}}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if c.RecordDelimiter != "" || c.recordDelimiterSet {
|
||||
if err := e.EncodeElement(c.RecordDelimiter, xml.StartElement{Name: xml.Name{Local: "RecordDelimiter"}}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if c.FieldDelimiter != "" || c.fieldDelimiterSet {
|
||||
if err := e.EncodeElement(c.FieldDelimiter, xml.StartElement{Name: xml.Name{Local: "FieldDelimiter"}}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if c.QuoteCharacter != "" || c.quoteCharacterSet {
|
||||
if err := e.EncodeElement(c.QuoteCharacter, xml.StartElement{Name: xml.Name{Local: "QuoteCharacter"}}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if c.QuoteEscapeCharacter != "" || c.quoteEscapeCharacterSet {
|
||||
if err := e.EncodeElement(c.QuoteEscapeCharacter, xml.StartElement{Name: xml.Name{Local: "QuoteEscapeCharacter"}}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if c.Comments != "" || c.commentsSet {
|
||||
if err := e.EncodeElement(c.Comments, xml.StartElement{Name: xml.Name{Local: "Comments"}}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return e.EncodeToken(xml.EndElement{Name: start.Name})
|
||||
}
|
||||
|
||||
// CSVOutputOptions csv output specific options
|
||||
type CSVOutputOptions struct {
|
||||
QuoteFields CSVQuoteFields
|
||||
quoteFieldsSet bool
|
||||
|
||||
RecordDelimiter string
|
||||
recordDelimiterSet bool
|
||||
|
||||
FieldDelimiter string
|
||||
fieldDelimiterSet bool
|
||||
|
||||
QuoteCharacter string
|
||||
quoteCharacterSet bool
|
||||
|
||||
QuoteEscapeCharacter string
|
||||
quoteEscapeCharacterSet bool
|
||||
}
|
||||
|
||||
// SetQuoteFields sets the quote field parameter in the CSV output options
|
||||
func (c *CSVOutputOptions) SetQuoteFields(val CSVQuoteFields) {
|
||||
c.QuoteFields = val
|
||||
c.quoteFieldsSet = true
|
||||
}
|
||||
|
||||
// SetRecordDelimiter sets the record delimiter character in the CSV output options
|
||||
func (c *CSVOutputOptions) SetRecordDelimiter(val string) {
|
||||
c.RecordDelimiter = val
|
||||
c.recordDelimiterSet = true
|
||||
}
|
||||
|
||||
// SetFieldDelimiter sets the field delimiter character in the CSV output options
|
||||
func (c *CSVOutputOptions) SetFieldDelimiter(val string) {
|
||||
c.FieldDelimiter = val
|
||||
c.fieldDelimiterSet = true
|
||||
}
|
||||
|
||||
// SetQuoteCharacter sets the quote character in the CSV output options
|
||||
func (c *CSVOutputOptions) SetQuoteCharacter(val string) {
|
||||
c.QuoteCharacter = val
|
||||
c.quoteCharacterSet = true
|
||||
}
|
||||
|
||||
// SetQuoteEscapeCharacter sets the quote escape character in the CSV output options
|
||||
func (c *CSVOutputOptions) SetQuoteEscapeCharacter(val string) {
|
||||
c.QuoteEscapeCharacter = val
|
||||
c.quoteEscapeCharacterSet = true
|
||||
}
|
||||
|
||||
// MarshalXML - produces the xml representation of the CSVOutputOptions struct
|
||||
func (c CSVOutputOptions) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
|
||||
if err := e.EncodeToken(start); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if c.QuoteFields != "" || c.quoteFieldsSet {
|
||||
if err := e.EncodeElement(c.QuoteFields, xml.StartElement{Name: xml.Name{Local: "QuoteFields"}}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if c.RecordDelimiter != "" || c.recordDelimiterSet {
|
||||
if err := e.EncodeElement(c.RecordDelimiter, xml.StartElement{Name: xml.Name{Local: "RecordDelimiter"}}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if c.FieldDelimiter != "" || c.fieldDelimiterSet {
|
||||
if err := e.EncodeElement(c.FieldDelimiter, xml.StartElement{Name: xml.Name{Local: "FieldDelimiter"}}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if c.QuoteCharacter != "" || c.quoteCharacterSet {
|
||||
if err := e.EncodeElement(c.QuoteCharacter, xml.StartElement{Name: xml.Name{Local: "QuoteCharacter"}}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if c.QuoteEscapeCharacter != "" || c.quoteEscapeCharacterSet {
|
||||
if err := e.EncodeElement(c.QuoteEscapeCharacter, xml.StartElement{Name: xml.Name{Local: "QuoteEscapeCharacter"}}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return e.EncodeToken(xml.EndElement{Name: start.Name})
|
||||
}
|
||||
|
||||
// JSONInputOptions json input specific options
|
||||
type JSONInputOptions struct {
|
||||
Type JSONType
|
||||
typeSet bool
|
||||
}
|
||||
|
||||
// SetType sets the JSON type in the JSON input options
|
||||
func (j *JSONInputOptions) SetType(typ JSONType) {
|
||||
j.Type = typ
|
||||
j.typeSet = true
|
||||
}
|
||||
|
||||
// MarshalXML - produces the xml representation of the JSONInputOptions struct
|
||||
func (j JSONInputOptions) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
|
||||
if err := e.EncodeToken(start); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if j.Type != "" || j.typeSet {
|
||||
if err := e.EncodeElement(j.Type, xml.StartElement{Name: xml.Name{Local: "Type"}}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return e.EncodeToken(xml.EndElement{Name: start.Name})
|
||||
}
|
||||
|
||||
// JSONOutputOptions - json output specific options
|
||||
type JSONOutputOptions struct {
|
||||
RecordDelimiter string
|
||||
recordDelimiterSet bool
|
||||
}
|
||||
|
||||
// SetRecordDelimiter sets the record delimiter in the JSON output options
|
||||
func (j *JSONOutputOptions) SetRecordDelimiter(val string) {
|
||||
j.RecordDelimiter = val
|
||||
j.recordDelimiterSet = true
|
||||
}
|
||||
|
||||
// MarshalXML - produces the xml representation of the JSONOutputOptions struct
|
||||
func (j JSONOutputOptions) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
|
||||
if err := e.EncodeToken(start); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if j.RecordDelimiter != "" || j.recordDelimiterSet {
|
||||
if err := e.EncodeElement(j.RecordDelimiter, xml.StartElement{Name: xml.Name{Local: "RecordDelimiter"}}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return e.EncodeToken(xml.EndElement{Name: start.Name})
|
||||
}
|
||||
|
||||
// SelectObjectInputSerialization - input serialization parameters
|
||||
type SelectObjectInputSerialization struct {
|
||||
CompressionType SelectCompressionType
|
||||
Parquet *ParquetInputOptions `xml:"Parquet,omitempty"`
|
||||
CSV *CSVInputOptions `xml:"CSV,omitempty"`
|
||||
JSON *JSONInputOptions `xml:"JSON,omitempty"`
|
||||
}
|
||||
|
||||
// SelectObjectOutputSerialization - output serialization parameters.
|
||||
type SelectObjectOutputSerialization struct {
|
||||
CSV *CSVOutputOptions `xml:"CSV,omitempty"`
|
||||
JSON *JSONOutputOptions `xml:"JSON,omitempty"`
|
||||
}
|
||||
|
||||
// SelectObjectOptions - represents the input select body
|
||||
type SelectObjectOptions struct {
|
||||
XMLName xml.Name `xml:"SelectObjectContentRequest" json:"-"`
|
||||
ServerSideEncryption encrypt.ServerSide `xml:"-"`
|
||||
Expression string
|
||||
ExpressionType QueryExpressionType
|
||||
InputSerialization SelectObjectInputSerialization
|
||||
OutputSerialization SelectObjectOutputSerialization
|
||||
RequestProgress struct {
|
||||
Enabled bool
|
||||
}
|
||||
}
|
||||
|
||||
// Header returns the http.Header representation of the SelectObject options.
|
||||
func (o SelectObjectOptions) Header() http.Header {
|
||||
headers := make(http.Header)
|
||||
if o.ServerSideEncryption != nil && o.ServerSideEncryption.Type() == encrypt.SSEC {
|
||||
o.ServerSideEncryption.Marshal(headers)
|
||||
}
|
||||
return headers
|
||||
}
|
||||
|
||||
// SelectObjectType - is the parameter which defines what type of object the
|
||||
// operation is being performed on.
|
||||
type SelectObjectType string
|
||||
|
||||
// Constants for input data types.
|
||||
const (
|
||||
SelectObjectTypeCSV SelectObjectType = "CSV"
|
||||
SelectObjectTypeJSON = "JSON"
|
||||
SelectObjectTypeParquet = "Parquet"
|
||||
)
|
||||
|
||||
// preludeInfo is used for keeping track of necessary information from the
|
||||
// prelude.
|
||||
type preludeInfo struct {
|
||||
totalLen uint32
|
||||
headerLen uint32
|
||||
}
|
||||
|
||||
// SelectResults is used for the streaming responses from the server.
|
||||
type SelectResults struct {
|
||||
pipeReader *io.PipeReader
|
||||
resp *http.Response
|
||||
stats *StatsMessage
|
||||
progress *ProgressMessage
|
||||
}
|
||||
|
||||
// ProgressMessage is a struct for progress xml message.
|
||||
type ProgressMessage struct {
|
||||
XMLName xml.Name `xml:"Progress" json:"-"`
|
||||
StatsMessage
|
||||
}
|
||||
|
||||
// StatsMessage is a struct for stat xml message.
|
||||
type StatsMessage struct {
|
||||
XMLName xml.Name `xml:"Stats" json:"-"`
|
||||
BytesScanned int64
|
||||
BytesProcessed int64
|
||||
BytesReturned int64
|
||||
}
|
||||
|
||||
// messageType represents the type of message.
|
||||
type messageType string
|
||||
|
||||
const (
|
||||
errorMsg messageType = "error"
|
||||
commonMsg = "event"
|
||||
)
|
||||
|
||||
// eventType represents the type of event.
|
||||
type eventType string
|
||||
|
||||
// list of event-types returned by Select API.
|
||||
const (
|
||||
endEvent eventType = "End"
|
||||
recordsEvent = "Records"
|
||||
progressEvent = "Progress"
|
||||
statsEvent = "Stats"
|
||||
)
|
||||
|
||||
// contentType represents content type of event.
|
||||
type contentType string
|
||||
|
||||
const (
|
||||
xmlContent contentType = "text/xml"
|
||||
)
|
||||
|
||||
// SelectObjectContent is a implementation of http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html AWS S3 API.
|
||||
func (c Client) SelectObjectContent(ctx context.Context, bucketName, objectName string, opts SelectObjectOptions) (*SelectResults, error) {
|
||||
// Input validation.
|
||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := s3utils.CheckValidObjectName(objectName); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
selectReqBytes, err := xml.Marshal(opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
urlValues := make(url.Values)
|
||||
urlValues.Set("select", "")
|
||||
urlValues.Set("select-type", "2")
|
||||
|
||||
// Execute POST on bucket/object.
|
||||
resp, err := c.executeMethod(ctx, http.MethodPost, requestMetadata{
|
||||
bucketName: bucketName,
|
||||
objectName: objectName,
|
||||
queryValues: urlValues,
|
||||
customHeader: opts.Header(),
|
||||
contentMD5Base64: sumMD5Base64(selectReqBytes),
|
||||
contentSHA256Hex: sum256Hex(selectReqBytes),
|
||||
contentBody: bytes.NewReader(selectReqBytes),
|
||||
contentLength: int64(len(selectReqBytes)),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return NewSelectResults(resp, bucketName)
|
||||
}
|
||||
|
||||
// NewSelectResults creates a Select Result parser that parses the response
|
||||
// and returns a Reader that will return parsed and assembled select output.
|
||||
func NewSelectResults(resp *http.Response, bucketName string) (*SelectResults, error) {
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return nil, httpRespToErrorResponse(resp, bucketName, "")
|
||||
}
|
||||
|
||||
pipeReader, pipeWriter := io.Pipe()
|
||||
streamer := &SelectResults{
|
||||
resp: resp,
|
||||
stats: &StatsMessage{},
|
||||
progress: &ProgressMessage{},
|
||||
pipeReader: pipeReader,
|
||||
}
|
||||
streamer.start(pipeWriter)
|
||||
return streamer, nil
|
||||
}
|
||||
|
||||
// Close - closes the underlying response body and the stream reader.
|
||||
func (s *SelectResults) Close() error {
|
||||
defer closeResponse(s.resp)
|
||||
return s.pipeReader.Close()
|
||||
}
|
||||
|
||||
// Read - is a reader compatible implementation for SelectObjectContent records.
|
||||
func (s *SelectResults) Read(b []byte) (n int, err error) {
|
||||
return s.pipeReader.Read(b)
|
||||
}
|
||||
|
||||
// Stats - information about a request's stats when processing is complete.
|
||||
func (s *SelectResults) Stats() *StatsMessage {
|
||||
return s.stats
|
||||
}
|
||||
|
||||
// Progress - information about the progress of a request.
|
||||
func (s *SelectResults) Progress() *ProgressMessage {
|
||||
return s.progress
|
||||
}
|
||||
|
||||
// start is the main function that decodes the large byte array into
|
||||
// several events that are sent through the eventstream.
|
||||
func (s *SelectResults) start(pipeWriter *io.PipeWriter) {
|
||||
go func() {
|
||||
for {
|
||||
var prelude preludeInfo
|
||||
var headers = make(http.Header)
|
||||
var err error
|
||||
|
||||
// Create CRC code
|
||||
crc := crc32.New(crc32.IEEETable)
|
||||
crcReader := io.TeeReader(s.resp.Body, crc)
|
||||
|
||||
// Extract the prelude(12 bytes) into a struct to extract relevant information.
|
||||
prelude, err = processPrelude(crcReader, crc)
|
||||
if err != nil {
|
||||
pipeWriter.CloseWithError(err)
|
||||
closeResponse(s.resp)
|
||||
return
|
||||
}
|
||||
|
||||
// Extract the headers(variable bytes) into a struct to extract relevant information
|
||||
if prelude.headerLen > 0 {
|
||||
if err = extractHeader(io.LimitReader(crcReader, int64(prelude.headerLen)), headers); err != nil {
|
||||
pipeWriter.CloseWithError(err)
|
||||
closeResponse(s.resp)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Get the actual payload length so that the appropriate amount of
|
||||
// bytes can be read or parsed.
|
||||
payloadLen := prelude.PayloadLen()
|
||||
|
||||
m := messageType(headers.Get("message-type"))
|
||||
|
||||
switch m {
|
||||
case errorMsg:
|
||||
pipeWriter.CloseWithError(errors.New(headers.Get("error-code") + ":\"" + headers.Get("error-message") + "\""))
|
||||
closeResponse(s.resp)
|
||||
return
|
||||
case commonMsg:
|
||||
// Get content-type of the payload.
|
||||
c := contentType(headers.Get("content-type"))
|
||||
|
||||
// Get event type of the payload.
|
||||
e := eventType(headers.Get("event-type"))
|
||||
|
||||
// Handle all supported events.
|
||||
switch e {
|
||||
case endEvent:
|
||||
pipeWriter.Close()
|
||||
closeResponse(s.resp)
|
||||
return
|
||||
case recordsEvent:
|
||||
if _, err = io.Copy(pipeWriter, io.LimitReader(crcReader, payloadLen)); err != nil {
|
||||
pipeWriter.CloseWithError(err)
|
||||
closeResponse(s.resp)
|
||||
return
|
||||
}
|
||||
case progressEvent:
|
||||
switch c {
|
||||
case xmlContent:
|
||||
if err = xmlDecoder(io.LimitReader(crcReader, payloadLen), s.progress); err != nil {
|
||||
pipeWriter.CloseWithError(err)
|
||||
closeResponse(s.resp)
|
||||
return
|
||||
}
|
||||
default:
|
||||
pipeWriter.CloseWithError(fmt.Errorf("Unexpected content-type %s sent for event-type %s", c, progressEvent))
|
||||
closeResponse(s.resp)
|
||||
return
|
||||
}
|
||||
case statsEvent:
|
||||
switch c {
|
||||
case xmlContent:
|
||||
if err = xmlDecoder(io.LimitReader(crcReader, payloadLen), s.stats); err != nil {
|
||||
pipeWriter.CloseWithError(err)
|
||||
closeResponse(s.resp)
|
||||
return
|
||||
}
|
||||
default:
|
||||
pipeWriter.CloseWithError(fmt.Errorf("Unexpected content-type %s sent for event-type %s", c, statsEvent))
|
||||
closeResponse(s.resp)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Ensures that the full message's CRC is correct and
|
||||
// that the message is not corrupted
|
||||
if err := checkCRC(s.resp.Body, crc.Sum32()); err != nil {
|
||||
pipeWriter.CloseWithError(err)
|
||||
closeResponse(s.resp)
|
||||
return
|
||||
}
|
||||
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// PayloadLen is a function that calculates the length of the payload.
|
||||
func (p preludeInfo) PayloadLen() int64 {
|
||||
return int64(p.totalLen - p.headerLen - 16)
|
||||
}
|
||||
|
||||
// processPrelude is the function that reads the 12 bytes of the prelude and
|
||||
// ensures the CRC is correct while also extracting relevant information into
|
||||
// the struct,
|
||||
func processPrelude(prelude io.Reader, crc hash.Hash32) (preludeInfo, error) {
|
||||
var err error
|
||||
var pInfo = preludeInfo{}
|
||||
|
||||
// reads total length of the message (first 4 bytes)
|
||||
pInfo.totalLen, err = extractUint32(prelude)
|
||||
if err != nil {
|
||||
return pInfo, err
|
||||
}
|
||||
|
||||
// reads total header length of the message (2nd 4 bytes)
|
||||
pInfo.headerLen, err = extractUint32(prelude)
|
||||
if err != nil {
|
||||
return pInfo, err
|
||||
}
|
||||
|
||||
// checks that the CRC is correct (3rd 4 bytes)
|
||||
preCRC := crc.Sum32()
|
||||
if err := checkCRC(prelude, preCRC); err != nil {
|
||||
return pInfo, err
|
||||
}
|
||||
|
||||
return pInfo, nil
|
||||
}
|
||||
|
||||
// extracts the relevant information from the Headers.
|
||||
func extractHeader(body io.Reader, myHeaders http.Header) error {
|
||||
for {
|
||||
// extracts the first part of the header,
|
||||
headerTypeName, err := extractHeaderType(body)
|
||||
if err != nil {
|
||||
// Since end of file, we have read all of our headers
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// reads the 7 present in the header and ignores it.
|
||||
extractUint8(body)
|
||||
|
||||
headerValueName, err := extractHeaderValue(body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
myHeaders.Set(headerTypeName, headerValueName)
|
||||
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// extractHeaderType extracts the first half of the header message, the header type.
|
||||
func extractHeaderType(body io.Reader) (string, error) {
|
||||
// extracts 2 bit integer
|
||||
headerNameLen, err := extractUint8(body)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
// extracts the string with the appropriate number of bytes
|
||||
headerName, err := extractString(body, int(headerNameLen))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return strings.TrimPrefix(headerName, ":"), nil
|
||||
}
|
||||
|
||||
// extractsHeaderValue extracts the second half of the header message, the
|
||||
// header value
|
||||
func extractHeaderValue(body io.Reader) (string, error) {
|
||||
bodyLen, err := extractUint16(body)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
bodyName, err := extractString(body, int(bodyLen))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return bodyName, nil
|
||||
}
|
||||
|
||||
// extracts a string from byte array of a particular number of bytes.
|
||||
func extractString(source io.Reader, lenBytes int) (string, error) {
|
||||
myVal := make([]byte, lenBytes)
|
||||
_, err := source.Read(myVal)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(myVal), nil
|
||||
}
|
||||
|
||||
// extractUint32 extracts a 4 byte integer from the byte array.
|
||||
func extractUint32(r io.Reader) (uint32, error) {
|
||||
buf := make([]byte, 4)
|
||||
_, err := readFull(r, buf)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return binary.BigEndian.Uint32(buf), nil
|
||||
}
|
||||
|
||||
// extractUint16 extracts a 2 byte integer from the byte array.
|
||||
func extractUint16(r io.Reader) (uint16, error) {
|
||||
buf := make([]byte, 2)
|
||||
_, err := readFull(r, buf)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return binary.BigEndian.Uint16(buf), nil
|
||||
}
|
||||
|
||||
// extractUint8 extracts a 1 byte integer from the byte array.
|
||||
func extractUint8(r io.Reader) (uint8, error) {
|
||||
buf := make([]byte, 1)
|
||||
_, err := readFull(r, buf)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return buf[0], nil
|
||||
}
|
||||
|
||||
// checkCRC ensures that the CRC matches with the one from the reader.
|
||||
func checkCRC(r io.Reader, expect uint32) error {
|
||||
msgCRC, err := extractUint32(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if msgCRC != expect {
|
||||
return fmt.Errorf("Checksum Mismatch, MessageCRC of 0x%X does not equal expected CRC of 0x%X", msgCRC, expect)
|
||||
|
||||
}
|
||||
return nil
|
||||
}
|
127
vendor/github.com/minio/minio-go/v7/api-stat.go
generated
vendored
Normal file
127
vendor/github.com/minio/minio-go/v7/api-stat.go
generated
vendored
Normal file
@ -0,0 +1,127 @@
|
||||
/*
|
||||
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
|
||||
* Copyright 2015-2020 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package minio
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
||||
"github.com/minio/minio-go/v7/pkg/s3utils"
|
||||
)
|
||||
|
||||
// BucketExists verifies if bucket exists and you have permission to access it. Allows for a Context to
|
||||
// control cancellations and timeouts.
|
||||
func (c Client) BucketExists(ctx context.Context, bucketName string) (bool, error) {
|
||||
// Input validation.
|
||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// Execute HEAD on bucketName.
|
||||
resp, err := c.executeMethod(ctx, http.MethodHead, requestMetadata{
|
||||
bucketName: bucketName,
|
||||
contentSHA256Hex: emptySHA256Hex,
|
||||
})
|
||||
defer closeResponse(resp)
|
||||
if err != nil {
|
||||
if ToErrorResponse(err).Code == "NoSuchBucket" {
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
if resp != nil {
|
||||
resperr := httpRespToErrorResponse(resp, bucketName, "")
|
||||
if ToErrorResponse(resperr).Code == "NoSuchBucket" {
|
||||
return false, nil
|
||||
}
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return false, httpRespToErrorResponse(resp, bucketName, "")
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// StatObject verifies if object exists and you have permission to access.
|
||||
func (c Client) StatObject(ctx context.Context, bucketName, objectName string, opts StatObjectOptions) (ObjectInfo, error) {
|
||||
// Input validation.
|
||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
return ObjectInfo{}, err
|
||||
}
|
||||
if err := s3utils.CheckValidObjectName(objectName); err != nil {
|
||||
return ObjectInfo{}, err
|
||||
}
|
||||
return c.statObject(ctx, bucketName, objectName, opts)
|
||||
}
|
||||
|
||||
// Lower level API for statObject supporting pre-conditions and range headers.
|
||||
func (c Client) statObject(ctx context.Context, bucketName, objectName string, opts StatObjectOptions) (ObjectInfo, error) {
|
||||
// Input validation.
|
||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
return ObjectInfo{}, err
|
||||
}
|
||||
if err := s3utils.CheckValidObjectName(objectName); err != nil {
|
||||
return ObjectInfo{}, err
|
||||
}
|
||||
headers := opts.Header()
|
||||
if opts.Internal.ReplicationDeleteMarker {
|
||||
headers.Set(minIOBucketReplicationDeleteMarker, "true")
|
||||
}
|
||||
|
||||
urlValues := make(url.Values)
|
||||
if opts.VersionID != "" {
|
||||
urlValues.Set("versionId", opts.VersionID)
|
||||
}
|
||||
// Execute HEAD on objectName.
|
||||
resp, err := c.executeMethod(ctx, http.MethodHead, requestMetadata{
|
||||
bucketName: bucketName,
|
||||
objectName: objectName,
|
||||
queryValues: urlValues,
|
||||
contentSHA256Hex: emptySHA256Hex,
|
||||
customHeader: headers,
|
||||
})
|
||||
defer closeResponse(resp)
|
||||
if err != nil {
|
||||
return ObjectInfo{}, err
|
||||
}
|
||||
deleteMarker := resp.Header.Get(amzDeleteMarker) == "true"
|
||||
|
||||
if resp != nil {
|
||||
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent {
|
||||
if resp.StatusCode == http.StatusBadRequest && opts.VersionID != "" && deleteMarker {
|
||||
errResp := ErrorResponse{
|
||||
StatusCode: resp.StatusCode,
|
||||
Code: "MethodNotAllowed",
|
||||
Message: "The specified method is not allowed against this resource.",
|
||||
BucketName: bucketName,
|
||||
Key: objectName,
|
||||
}
|
||||
return ObjectInfo{
|
||||
VersionID: resp.Header.Get(amzVersionID),
|
||||
IsDeleteMarker: deleteMarker,
|
||||
}, errResp
|
||||
}
|
||||
return ObjectInfo{
|
||||
VersionID: resp.Header.Get(amzVersionID),
|
||||
IsDeleteMarker: deleteMarker,
|
||||
}, httpRespToErrorResponse(resp, bucketName, objectName)
|
||||
}
|
||||
}
|
||||
|
||||
return ToObjectInfo(bucketName, objectName, resp.Header)
|
||||
}
|
899
vendor/github.com/minio/minio-go/v7/api.go
generated
vendored
Normal file
899
vendor/github.com/minio/minio-go/v7/api.go
generated
vendored
Normal file
@ -0,0 +1,899 @@
|
||||
/*
|
||||
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
|
||||
* Copyright 2015-2018 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package minio
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/cookiejar"
|
||||
"net/http/httputil"
|
||||
"net/url"
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
md5simd "github.com/minio/md5-simd"
|
||||
"github.com/minio/minio-go/v7/pkg/credentials"
|
||||
"github.com/minio/minio-go/v7/pkg/s3utils"
|
||||
"github.com/minio/minio-go/v7/pkg/signer"
|
||||
"golang.org/x/net/publicsuffix"
|
||||
)
|
||||
|
||||
// Client implements Amazon S3 compatible methods.
|
||||
type Client struct {
|
||||
/// Standard options.
|
||||
|
||||
// Parsed endpoint url provided by the user.
|
||||
endpointURL *url.URL
|
||||
|
||||
// Holds various credential providers.
|
||||
credsProvider *credentials.Credentials
|
||||
|
||||
// Custom signerType value overrides all credentials.
|
||||
overrideSignerType credentials.SignatureType
|
||||
|
||||
// User supplied.
|
||||
appInfo struct {
|
||||
appName string
|
||||
appVersion string
|
||||
}
|
||||
|
||||
// Indicate whether we are using https or not
|
||||
secure bool
|
||||
|
||||
// Needs allocation.
|
||||
httpClient *http.Client
|
||||
bucketLocCache *bucketLocationCache
|
||||
|
||||
// Advanced functionality.
|
||||
isTraceEnabled bool
|
||||
traceErrorsOnly bool
|
||||
traceOutput io.Writer
|
||||
|
||||
// S3 specific accelerated endpoint.
|
||||
s3AccelerateEndpoint string
|
||||
|
||||
// Region endpoint
|
||||
region string
|
||||
|
||||
// Random seed.
|
||||
random *rand.Rand
|
||||
|
||||
// lookup indicates type of url lookup supported by server. If not specified,
|
||||
// default to Auto.
|
||||
lookup BucketLookupType
|
||||
|
||||
// Factory for MD5 hash functions.
|
||||
md5Hasher func() md5simd.Hasher
|
||||
sha256Hasher func() md5simd.Hasher
|
||||
}
|
||||
|
||||
// Options for New method
|
||||
type Options struct {
|
||||
Creds *credentials.Credentials
|
||||
Secure bool
|
||||
Transport http.RoundTripper
|
||||
Region string
|
||||
BucketLookup BucketLookupType
|
||||
|
||||
// Custom hash routines. Leave nil to use standard.
|
||||
CustomMD5 func() md5simd.Hasher
|
||||
CustomSHA256 func() md5simd.Hasher
|
||||
}
|
||||
|
||||
// Global constants.
|
||||
const (
|
||||
libraryName = "minio-go"
|
||||
libraryVersion = "v7.0.7"
|
||||
)
|
||||
|
||||
// User Agent should always following the below style.
|
||||
// Please open an issue to discuss any new changes here.
|
||||
//
|
||||
// MinIO (OS; ARCH) LIB/VER APP/VER
|
||||
const (
|
||||
libraryUserAgentPrefix = "MinIO (" + runtime.GOOS + "; " + runtime.GOARCH + ") "
|
||||
libraryUserAgent = libraryUserAgentPrefix + libraryName + "/" + libraryVersion
|
||||
)
|
||||
|
||||
// BucketLookupType is type of url lookup supported by server.
|
||||
type BucketLookupType int
|
||||
|
||||
// Different types of url lookup supported by the server.Initialized to BucketLookupAuto
|
||||
const (
|
||||
BucketLookupAuto BucketLookupType = iota
|
||||
BucketLookupDNS
|
||||
BucketLookupPath
|
||||
)
|
||||
|
||||
// New - instantiate minio client with options
|
||||
func New(endpoint string, opts *Options) (*Client, error) {
|
||||
if opts == nil {
|
||||
return nil, errors.New("no options provided")
|
||||
}
|
||||
clnt, err := privateNew(endpoint, opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Google cloud storage should be set to signature V2, force it if not.
|
||||
if s3utils.IsGoogleEndpoint(*clnt.endpointURL) {
|
||||
clnt.overrideSignerType = credentials.SignatureV2
|
||||
}
|
||||
// If Amazon S3 set to signature v4.
|
||||
if s3utils.IsAmazonEndpoint(*clnt.endpointURL) {
|
||||
clnt.overrideSignerType = credentials.SignatureV4
|
||||
}
|
||||
|
||||
return clnt, nil
|
||||
}
|
||||
|
||||
// EndpointURL returns the URL of the S3 endpoint.
|
||||
func (c *Client) EndpointURL() *url.URL {
|
||||
endpoint := *c.endpointURL // copy to prevent callers from modifying internal state
|
||||
return &endpoint
|
||||
}
|
||||
|
||||
// lockedRandSource provides protected rand source, implements rand.Source interface.
|
||||
type lockedRandSource struct {
|
||||
lk sync.Mutex
|
||||
src rand.Source
|
||||
}
|
||||
|
||||
// Int63 returns a non-negative pseudo-random 63-bit integer as an int64.
|
||||
func (r *lockedRandSource) Int63() (n int64) {
|
||||
r.lk.Lock()
|
||||
n = r.src.Int63()
|
||||
r.lk.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
// Seed uses the provided seed value to initialize the generator to a
|
||||
// deterministic state.
|
||||
func (r *lockedRandSource) Seed(seed int64) {
|
||||
r.lk.Lock()
|
||||
r.src.Seed(seed)
|
||||
r.lk.Unlock()
|
||||
}
|
||||
|
||||
// Redirect requests by re signing the request.
|
||||
func (c *Client) redirectHeaders(req *http.Request, via []*http.Request) error {
|
||||
if len(via) >= 5 {
|
||||
return errors.New("stopped after 5 redirects")
|
||||
}
|
||||
if len(via) == 0 {
|
||||
return nil
|
||||
}
|
||||
lastRequest := via[len(via)-1]
|
||||
var reAuth bool
|
||||
for attr, val := range lastRequest.Header {
|
||||
// if hosts do not match do not copy Authorization header
|
||||
if attr == "Authorization" && req.Host != lastRequest.Host {
|
||||
reAuth = true
|
||||
continue
|
||||
}
|
||||
if _, ok := req.Header[attr]; !ok {
|
||||
req.Header[attr] = val
|
||||
}
|
||||
}
|
||||
|
||||
*c.endpointURL = *req.URL
|
||||
|
||||
value, err := c.credsProvider.Get()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var (
|
||||
signerType = value.SignerType
|
||||
accessKeyID = value.AccessKeyID
|
||||
secretAccessKey = value.SecretAccessKey
|
||||
sessionToken = value.SessionToken
|
||||
region = c.region
|
||||
)
|
||||
|
||||
// Custom signer set then override the behavior.
|
||||
if c.overrideSignerType != credentials.SignatureDefault {
|
||||
signerType = c.overrideSignerType
|
||||
}
|
||||
|
||||
// If signerType returned by credentials helper is anonymous,
|
||||
// then do not sign regardless of signerType override.
|
||||
if value.SignerType == credentials.SignatureAnonymous {
|
||||
signerType = credentials.SignatureAnonymous
|
||||
}
|
||||
|
||||
if reAuth {
|
||||
// Check if there is no region override, if not get it from the URL if possible.
|
||||
if region == "" {
|
||||
region = s3utils.GetRegionFromURL(*c.endpointURL)
|
||||
}
|
||||
switch {
|
||||
case signerType.IsV2():
|
||||
return errors.New("signature V2 cannot support redirection")
|
||||
case signerType.IsV4():
|
||||
signer.SignV4(*req, accessKeyID, secretAccessKey, sessionToken, getDefaultLocation(*c.endpointURL, region))
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func privateNew(endpoint string, opts *Options) (*Client, error) {
|
||||
// construct endpoint.
|
||||
endpointURL, err := getEndpointURL(endpoint, opts.Secure)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Initialize cookies to preserve server sent cookies if any and replay
|
||||
// them upon each request.
|
||||
jar, err := cookiejar.New(&cookiejar.Options{PublicSuffixList: publicsuffix.List})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// instantiate new Client.
|
||||
clnt := new(Client)
|
||||
|
||||
// Save the credentials.
|
||||
clnt.credsProvider = opts.Creds
|
||||
|
||||
// Remember whether we are using https or not
|
||||
clnt.secure = opts.Secure
|
||||
|
||||
// Save endpoint URL, user agent for future uses.
|
||||
clnt.endpointURL = endpointURL
|
||||
|
||||
transport := opts.Transport
|
||||
if transport == nil {
|
||||
transport, err = DefaultTransport(opts.Secure)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Instantiate http client and bucket location cache.
|
||||
clnt.httpClient = &http.Client{
|
||||
Jar: jar,
|
||||
Transport: transport,
|
||||
CheckRedirect: clnt.redirectHeaders,
|
||||
}
|
||||
|
||||
// Sets custom region, if region is empty bucket location cache is used automatically.
|
||||
if opts.Region == "" {
|
||||
opts.Region = s3utils.GetRegionFromURL(*clnt.endpointURL)
|
||||
}
|
||||
clnt.region = opts.Region
|
||||
|
||||
// Instantiate bucket location cache.
|
||||
clnt.bucketLocCache = newBucketLocationCache()
|
||||
|
||||
// Introduce a new locked random seed.
|
||||
clnt.random = rand.New(&lockedRandSource{src: rand.NewSource(time.Now().UTC().UnixNano())})
|
||||
|
||||
// Add default md5 hasher.
|
||||
clnt.md5Hasher = opts.CustomMD5
|
||||
clnt.sha256Hasher = opts.CustomSHA256
|
||||
if clnt.md5Hasher == nil {
|
||||
clnt.md5Hasher = newMd5Hasher
|
||||
}
|
||||
if clnt.sha256Hasher == nil {
|
||||
clnt.sha256Hasher = newSHA256Hasher
|
||||
}
|
||||
// Sets bucket lookup style, whether server accepts DNS or Path lookup. Default is Auto - determined
|
||||
// by the SDK. When Auto is specified, DNS lookup is used for Amazon/Google cloud endpoints and Path for all other endpoints.
|
||||
clnt.lookup = opts.BucketLookup
|
||||
// Return.
|
||||
return clnt, nil
|
||||
}
|
||||
|
||||
// SetAppInfo - add application details to user agent.
|
||||
func (c *Client) SetAppInfo(appName string, appVersion string) {
|
||||
// if app name and version not set, we do not set a new user agent.
|
||||
if appName != "" && appVersion != "" {
|
||||
c.appInfo.appName = appName
|
||||
c.appInfo.appVersion = appVersion
|
||||
}
|
||||
}
|
||||
|
||||
// TraceOn - enable HTTP tracing.
|
||||
func (c *Client) TraceOn(outputStream io.Writer) {
|
||||
// if outputStream is nil then default to os.Stdout.
|
||||
if outputStream == nil {
|
||||
outputStream = os.Stdout
|
||||
}
|
||||
// Sets a new output stream.
|
||||
c.traceOutput = outputStream
|
||||
|
||||
// Enable tracing.
|
||||
c.isTraceEnabled = true
|
||||
}
|
||||
|
||||
// TraceErrorsOnlyOn - same as TraceOn, but only errors will be traced.
|
||||
func (c *Client) TraceErrorsOnlyOn(outputStream io.Writer) {
|
||||
c.TraceOn(outputStream)
|
||||
c.traceErrorsOnly = true
|
||||
}
|
||||
|
||||
// TraceErrorsOnlyOff - Turns off the errors only tracing and everything will be traced after this call.
|
||||
// If all tracing needs to be turned off, call TraceOff().
|
||||
func (c *Client) TraceErrorsOnlyOff() {
|
||||
c.traceErrorsOnly = false
|
||||
}
|
||||
|
||||
// TraceOff - disable HTTP tracing.
|
||||
func (c *Client) TraceOff() {
|
||||
// Disable tracing.
|
||||
c.isTraceEnabled = false
|
||||
c.traceErrorsOnly = false
|
||||
}
|
||||
|
||||
// SetS3TransferAccelerate - turns s3 accelerated endpoint on or off for all your
|
||||
// requests. This feature is only specific to S3 for all other endpoints this
|
||||
// function does nothing. To read further details on s3 transfer acceleration
|
||||
// please vist -
|
||||
// http://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html
|
||||
func (c *Client) SetS3TransferAccelerate(accelerateEndpoint string) {
|
||||
if s3utils.IsAmazonEndpoint(*c.endpointURL) {
|
||||
c.s3AccelerateEndpoint = accelerateEndpoint
|
||||
}
|
||||
}
|
||||
|
||||
// Hash materials provides relevant initialized hash algo writers
|
||||
// based on the expected signature type.
|
||||
//
|
||||
// - For signature v4 request if the connection is insecure compute only sha256.
|
||||
// - For signature v4 request if the connection is secure compute only md5.
|
||||
// - For anonymous request compute md5.
|
||||
func (c *Client) hashMaterials(isMd5Requested bool) (hashAlgos map[string]md5simd.Hasher, hashSums map[string][]byte) {
|
||||
hashSums = make(map[string][]byte)
|
||||
hashAlgos = make(map[string]md5simd.Hasher)
|
||||
if c.overrideSignerType.IsV4() {
|
||||
if c.secure {
|
||||
hashAlgos["md5"] = c.md5Hasher()
|
||||
} else {
|
||||
hashAlgos["sha256"] = c.sha256Hasher()
|
||||
}
|
||||
} else {
|
||||
if c.overrideSignerType.IsAnonymous() {
|
||||
hashAlgos["md5"] = c.md5Hasher()
|
||||
}
|
||||
}
|
||||
if isMd5Requested {
|
||||
hashAlgos["md5"] = c.md5Hasher()
|
||||
}
|
||||
return hashAlgos, hashSums
|
||||
}
|
||||
|
||||
// requestMetadata - is container for all the values to make a request.
|
||||
type requestMetadata struct {
|
||||
// If set newRequest presigns the URL.
|
||||
presignURL bool
|
||||
|
||||
// User supplied.
|
||||
bucketName string
|
||||
objectName string
|
||||
queryValues url.Values
|
||||
customHeader http.Header
|
||||
expires int64
|
||||
|
||||
// Generated by our internal code.
|
||||
bucketLocation string
|
||||
contentBody io.Reader
|
||||
contentLength int64
|
||||
contentMD5Base64 string // carries base64 encoded md5sum
|
||||
contentSHA256Hex string // carries hex encoded sha256sum
|
||||
}
|
||||
|
||||
// dumpHTTP - dump HTTP request and response.
|
||||
func (c Client) dumpHTTP(req *http.Request, resp *http.Response) error {
|
||||
// Starts http dump.
|
||||
_, err := fmt.Fprintln(c.traceOutput, "---------START-HTTP---------")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Filter out Signature field from Authorization header.
|
||||
origAuth := req.Header.Get("Authorization")
|
||||
if origAuth != "" {
|
||||
req.Header.Set("Authorization", redactSignature(origAuth))
|
||||
}
|
||||
|
||||
// Only display request header.
|
||||
reqTrace, err := httputil.DumpRequestOut(req, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Write request to trace output.
|
||||
_, err = fmt.Fprint(c.traceOutput, string(reqTrace))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Only display response header.
|
||||
var respTrace []byte
|
||||
|
||||
// For errors we make sure to dump response body as well.
|
||||
if resp.StatusCode != http.StatusOK &&
|
||||
resp.StatusCode != http.StatusPartialContent &&
|
||||
resp.StatusCode != http.StatusNoContent {
|
||||
respTrace, err = httputil.DumpResponse(resp, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
respTrace, err = httputil.DumpResponse(resp, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Write response to trace output.
|
||||
_, err = fmt.Fprint(c.traceOutput, strings.TrimSuffix(string(respTrace), "\r\n"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Ends the http dump.
|
||||
_, err = fmt.Fprintln(c.traceOutput, "---------END-HTTP---------")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Returns success.
|
||||
return nil
|
||||
}
|
||||
|
||||
// do - execute http request.
|
||||
func (c Client) do(req *http.Request) (*http.Response, error) {
|
||||
resp, err := c.httpClient.Do(req)
|
||||
if err != nil {
|
||||
// Handle this specifically for now until future Golang versions fix this issue properly.
|
||||
if urlErr, ok := err.(*url.Error); ok {
|
||||
if strings.Contains(urlErr.Err.Error(), "EOF") {
|
||||
return nil, &url.Error{
|
||||
Op: urlErr.Op,
|
||||
URL: urlErr.URL,
|
||||
Err: errors.New("Connection closed by foreign host " + urlErr.URL + ". Retry again."),
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Response cannot be non-nil, report error if thats the case.
|
||||
if resp == nil {
|
||||
msg := "Response is empty. " + reportIssue
|
||||
return nil, errInvalidArgument(msg)
|
||||
}
|
||||
|
||||
// If trace is enabled, dump http request and response,
|
||||
// except when the traceErrorsOnly enabled and the response's status code is ok
|
||||
if c.isTraceEnabled && !(c.traceErrorsOnly && resp.StatusCode == http.StatusOK) {
|
||||
err = c.dumpHTTP(req, resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// List of success status.
|
||||
var successStatus = []int{
|
||||
http.StatusOK,
|
||||
http.StatusNoContent,
|
||||
http.StatusPartialContent,
|
||||
}
|
||||
|
||||
// executeMethod - instantiates a given method, and retries the
|
||||
// request upon any error up to maxRetries attempts in a binomially
|
||||
// delayed manner using a standard back off algorithm.
|
||||
func (c Client) executeMethod(ctx context.Context, method string, metadata requestMetadata) (res *http.Response, err error) {
|
||||
var retryable bool // Indicates if request can be retried.
|
||||
var bodySeeker io.Seeker // Extracted seeker from io.Reader.
|
||||
var reqRetry = MaxRetry // Indicates how many times we can retry the request
|
||||
|
||||
if metadata.contentBody != nil {
|
||||
// Check if body is seekable then it is retryable.
|
||||
bodySeeker, retryable = metadata.contentBody.(io.Seeker)
|
||||
switch bodySeeker {
|
||||
case os.Stdin, os.Stdout, os.Stderr:
|
||||
retryable = false
|
||||
}
|
||||
// Retry only when reader is seekable
|
||||
if !retryable {
|
||||
reqRetry = 1
|
||||
}
|
||||
|
||||
// Figure out if the body can be closed - if yes
|
||||
// we will definitely close it upon the function
|
||||
// return.
|
||||
bodyCloser, ok := metadata.contentBody.(io.Closer)
|
||||
if ok {
|
||||
defer bodyCloser.Close()
|
||||
}
|
||||
}
|
||||
|
||||
// Create cancel context to control 'newRetryTimer' go routine.
|
||||
retryCtx, cancel := context.WithCancel(ctx)
|
||||
|
||||
// Indicate to our routine to exit cleanly upon return.
|
||||
defer cancel()
|
||||
|
||||
// Blank indentifier is kept here on purpose since 'range' without
|
||||
// blank identifiers is only supported since go1.4
|
||||
// https://golang.org/doc/go1.4#forrange.
|
||||
for range c.newRetryTimer(retryCtx, reqRetry, DefaultRetryUnit, DefaultRetryCap, MaxJitter) {
|
||||
// Retry executes the following function body if request has an
|
||||
// error until maxRetries have been exhausted, retry attempts are
|
||||
// performed after waiting for a given period of time in a
|
||||
// binomial fashion.
|
||||
if retryable {
|
||||
// Seek back to beginning for each attempt.
|
||||
if _, err = bodySeeker.Seek(0, 0); err != nil {
|
||||
// If seek failed, no need to retry.
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Instantiate a new request.
|
||||
var req *http.Request
|
||||
req, err = c.newRequest(ctx, method, metadata)
|
||||
if err != nil {
|
||||
errResponse := ToErrorResponse(err)
|
||||
if isS3CodeRetryable(errResponse.Code) {
|
||||
continue // Retry.
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Initiate the request.
|
||||
res, err = c.do(req)
|
||||
if err != nil {
|
||||
if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Retry the request
|
||||
continue
|
||||
}
|
||||
|
||||
// For any known successful http status, return quickly.
|
||||
for _, httpStatus := range successStatus {
|
||||
if httpStatus == res.StatusCode {
|
||||
return res, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Read the body to be saved later.
|
||||
errBodyBytes, err := ioutil.ReadAll(res.Body)
|
||||
// res.Body should be closed
|
||||
closeResponse(res)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Save the body.
|
||||
errBodySeeker := bytes.NewReader(errBodyBytes)
|
||||
res.Body = ioutil.NopCloser(errBodySeeker)
|
||||
|
||||
// For errors verify if its retryable otherwise fail quickly.
|
||||
errResponse := ToErrorResponse(httpRespToErrorResponse(res, metadata.bucketName, metadata.objectName))
|
||||
|
||||
// Save the body back again.
|
||||
errBodySeeker.Seek(0, 0) // Seek back to starting point.
|
||||
res.Body = ioutil.NopCloser(errBodySeeker)
|
||||
|
||||
// Bucket region if set in error response and the error
|
||||
// code dictates invalid region, we can retry the request
|
||||
// with the new region.
|
||||
//
|
||||
// Additionally we should only retry if bucketLocation and custom
|
||||
// region is empty.
|
||||
if c.region == "" {
|
||||
switch errResponse.Code {
|
||||
case "AuthorizationHeaderMalformed":
|
||||
fallthrough
|
||||
case "InvalidRegion":
|
||||
fallthrough
|
||||
case "AccessDenied":
|
||||
if errResponse.Region == "" {
|
||||
// Region is empty we simply return the error.
|
||||
return res, err
|
||||
}
|
||||
// Region is not empty figure out a way to
|
||||
// handle this appropriately.
|
||||
if metadata.bucketName != "" {
|
||||
// Gather Cached location only if bucketName is present.
|
||||
if location, cachedOk := c.bucketLocCache.Get(metadata.bucketName); cachedOk && location != errResponse.Region {
|
||||
c.bucketLocCache.Set(metadata.bucketName, errResponse.Region)
|
||||
continue // Retry.
|
||||
}
|
||||
} else {
|
||||
// This is for ListBuckets() fallback.
|
||||
if errResponse.Region != metadata.bucketLocation {
|
||||
// Retry if the error response has a different region
|
||||
// than the request we just made.
|
||||
metadata.bucketLocation = errResponse.Region
|
||||
continue // Retry
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Verify if error response code is retryable.
|
||||
if isS3CodeRetryable(errResponse.Code) {
|
||||
continue // Retry.
|
||||
}
|
||||
|
||||
// Verify if http status code is retryable.
|
||||
if isHTTPStatusRetryable(res.StatusCode) {
|
||||
continue // Retry.
|
||||
}
|
||||
|
||||
// For all other cases break out of the retry loop.
|
||||
break
|
||||
}
|
||||
|
||||
// Return an error when retry is canceled or deadlined
|
||||
if e := retryCtx.Err(); e != nil {
|
||||
return nil, e
|
||||
}
|
||||
|
||||
return res, err
|
||||
}
|
||||
|
||||
// newRequest - instantiate a new HTTP request for a given method.
|
||||
func (c Client) newRequest(ctx context.Context, method string, metadata requestMetadata) (req *http.Request, err error) {
|
||||
// If no method is supplied default to 'POST'.
|
||||
if method == "" {
|
||||
method = http.MethodPost
|
||||
}
|
||||
|
||||
location := metadata.bucketLocation
|
||||
if location == "" {
|
||||
if metadata.bucketName != "" {
|
||||
// Gather location only if bucketName is present.
|
||||
location, err = c.getBucketLocation(ctx, metadata.bucketName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if location == "" {
|
||||
location = getDefaultLocation(*c.endpointURL, c.region)
|
||||
}
|
||||
}
|
||||
|
||||
// Look if target url supports virtual host.
|
||||
// We explicitly disallow MakeBucket calls to not use virtual DNS style,
|
||||
// since the resolution may fail.
|
||||
isMakeBucket := (metadata.objectName == "" && method == http.MethodPut && len(metadata.queryValues) == 0)
|
||||
isVirtualHost := c.isVirtualHostStyleRequest(*c.endpointURL, metadata.bucketName) && !isMakeBucket
|
||||
|
||||
// Construct a new target URL.
|
||||
targetURL, err := c.makeTargetURL(metadata.bucketName, metadata.objectName, location,
|
||||
isVirtualHost, metadata.queryValues)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Initialize a new HTTP request for the method.
|
||||
req, err = http.NewRequestWithContext(ctx, method, targetURL.String(), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get credentials from the configured credentials provider.
|
||||
value, err := c.credsProvider.Get()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var (
|
||||
signerType = value.SignerType
|
||||
accessKeyID = value.AccessKeyID
|
||||
secretAccessKey = value.SecretAccessKey
|
||||
sessionToken = value.SessionToken
|
||||
)
|
||||
|
||||
// Custom signer set then override the behavior.
|
||||
if c.overrideSignerType != credentials.SignatureDefault {
|
||||
signerType = c.overrideSignerType
|
||||
}
|
||||
|
||||
// If signerType returned by credentials helper is anonymous,
|
||||
// then do not sign regardless of signerType override.
|
||||
if value.SignerType == credentials.SignatureAnonymous {
|
||||
signerType = credentials.SignatureAnonymous
|
||||
}
|
||||
|
||||
// Generate presign url if needed, return right here.
|
||||
if metadata.expires != 0 && metadata.presignURL {
|
||||
if signerType.IsAnonymous() {
|
||||
return nil, errInvalidArgument("Presigned URLs cannot be generated with anonymous credentials.")
|
||||
}
|
||||
if signerType.IsV2() {
|
||||
// Presign URL with signature v2.
|
||||
req = signer.PreSignV2(*req, accessKeyID, secretAccessKey, metadata.expires, isVirtualHost)
|
||||
} else if signerType.IsV4() {
|
||||
// Presign URL with signature v4.
|
||||
req = signer.PreSignV4(*req, accessKeyID, secretAccessKey, sessionToken, location, metadata.expires)
|
||||
}
|
||||
return req, nil
|
||||
}
|
||||
|
||||
// Set 'User-Agent' header for the request.
|
||||
c.setUserAgent(req)
|
||||
|
||||
// Set all headers.
|
||||
for k, v := range metadata.customHeader {
|
||||
req.Header.Set(k, v[0])
|
||||
}
|
||||
|
||||
// Go net/http notoriously closes the request body.
|
||||
// - The request Body, if non-nil, will be closed by the underlying Transport, even on errors.
|
||||
// This can cause underlying *os.File seekers to fail, avoid that
|
||||
// by making sure to wrap the closer as a nop.
|
||||
if metadata.contentLength == 0 {
|
||||
req.Body = nil
|
||||
} else {
|
||||
req.Body = ioutil.NopCloser(metadata.contentBody)
|
||||
}
|
||||
|
||||
// Set incoming content-length.
|
||||
req.ContentLength = metadata.contentLength
|
||||
if req.ContentLength <= -1 {
|
||||
// For unknown content length, we upload using transfer-encoding: chunked.
|
||||
req.TransferEncoding = []string{"chunked"}
|
||||
}
|
||||
|
||||
// set md5Sum for content protection.
|
||||
if len(metadata.contentMD5Base64) > 0 {
|
||||
req.Header.Set("Content-Md5", metadata.contentMD5Base64)
|
||||
}
|
||||
|
||||
// For anonymous requests just return.
|
||||
if signerType.IsAnonymous() {
|
||||
return req, nil
|
||||
}
|
||||
|
||||
switch {
|
||||
case signerType.IsV2():
|
||||
// Add signature version '2' authorization header.
|
||||
req = signer.SignV2(*req, accessKeyID, secretAccessKey, isVirtualHost)
|
||||
case metadata.objectName != "" && metadata.queryValues == nil && method == http.MethodPut && metadata.customHeader.Get("X-Amz-Copy-Source") == "" && !c.secure:
|
||||
// Streaming signature is used by default for a PUT object request. Additionally we also
|
||||
// look if the initialized client is secure, if yes then we don't need to perform
|
||||
// streaming signature.
|
||||
req = signer.StreamingSignV4(req, accessKeyID,
|
||||
secretAccessKey, sessionToken, location, metadata.contentLength, time.Now().UTC())
|
||||
default:
|
||||
// Set sha256 sum for signature calculation only with signature version '4'.
|
||||
shaHeader := unsignedPayload
|
||||
if metadata.contentSHA256Hex != "" {
|
||||
shaHeader = metadata.contentSHA256Hex
|
||||
}
|
||||
req.Header.Set("X-Amz-Content-Sha256", shaHeader)
|
||||
|
||||
// Add signature version '4' authorization header.
|
||||
req = signer.SignV4(*req, accessKeyID, secretAccessKey, sessionToken, location)
|
||||
}
|
||||
|
||||
// Return request.
|
||||
return req, nil
|
||||
}
|
||||
|
||||
// set User agent.
|
||||
func (c Client) setUserAgent(req *http.Request) {
|
||||
req.Header.Set("User-Agent", libraryUserAgent)
|
||||
if c.appInfo.appName != "" && c.appInfo.appVersion != "" {
|
||||
req.Header.Set("User-Agent", libraryUserAgent+" "+c.appInfo.appName+"/"+c.appInfo.appVersion)
|
||||
}
|
||||
}
|
||||
|
||||
// makeTargetURL make a new target url.
|
||||
func (c Client) makeTargetURL(bucketName, objectName, bucketLocation string, isVirtualHostStyle bool, queryValues url.Values) (*url.URL, error) {
|
||||
host := c.endpointURL.Host
|
||||
// For Amazon S3 endpoint, try to fetch location based endpoint.
|
||||
if s3utils.IsAmazonEndpoint(*c.endpointURL) {
|
||||
if c.s3AccelerateEndpoint != "" && bucketName != "" {
|
||||
// http://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html
|
||||
// Disable transfer acceleration for non-compliant bucket names.
|
||||
if strings.Contains(bucketName, ".") {
|
||||
return nil, errTransferAccelerationBucket(bucketName)
|
||||
}
|
||||
// If transfer acceleration is requested set new host.
|
||||
// For more details about enabling transfer acceleration read here.
|
||||
// http://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html
|
||||
host = c.s3AccelerateEndpoint
|
||||
} else {
|
||||
// Do not change the host if the endpoint URL is a FIPS S3 endpoint.
|
||||
if !s3utils.IsAmazonFIPSEndpoint(*c.endpointURL) {
|
||||
// Fetch new host based on the bucket location.
|
||||
host = getS3Endpoint(bucketLocation)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Save scheme.
|
||||
scheme := c.endpointURL.Scheme
|
||||
|
||||
// Strip port 80 and 443 so we won't send these ports in Host header.
|
||||
// The reason is that browsers and curl automatically remove :80 and :443
|
||||
// with the generated presigned urls, then a signature mismatch error.
|
||||
if h, p, err := net.SplitHostPort(host); err == nil {
|
||||
if scheme == "http" && p == "80" || scheme == "https" && p == "443" {
|
||||
host = h
|
||||
}
|
||||
}
|
||||
|
||||
urlStr := scheme + "://" + host + "/"
|
||||
// Make URL only if bucketName is available, otherwise use the
|
||||
// endpoint URL.
|
||||
if bucketName != "" {
|
||||
// If endpoint supports virtual host style use that always.
|
||||
// Currently only S3 and Google Cloud Storage would support
|
||||
// virtual host style.
|
||||
if isVirtualHostStyle {
|
||||
urlStr = scheme + "://" + bucketName + "." + host + "/"
|
||||
if objectName != "" {
|
||||
urlStr = urlStr + s3utils.EncodePath(objectName)
|
||||
}
|
||||
} else {
|
||||
// If not fall back to using path style.
|
||||
urlStr = urlStr + bucketName + "/"
|
||||
if objectName != "" {
|
||||
urlStr = urlStr + s3utils.EncodePath(objectName)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If there are any query values, add them to the end.
|
||||
if len(queryValues) > 0 {
|
||||
urlStr = urlStr + "?" + s3utils.QueryEncode(queryValues)
|
||||
}
|
||||
|
||||
return url.Parse(urlStr)
|
||||
}
|
||||
|
||||
// returns true if virtual hosted style requests are to be used.
|
||||
func (c *Client) isVirtualHostStyleRequest(url url.URL, bucketName string) bool {
|
||||
if bucketName == "" {
|
||||
return false
|
||||
}
|
||||
|
||||
if c.lookup == BucketLookupDNS {
|
||||
return true
|
||||
}
|
||||
if c.lookup == BucketLookupPath {
|
||||
return false
|
||||
}
|
||||
|
||||
// default to virtual only for Amazon/Google storage. In all other cases use
|
||||
// path style requests
|
||||
return s3utils.IsVirtualHostSupported(url, bucketName)
|
||||
}
|
253
vendor/github.com/minio/minio-go/v7/bucket-cache.go
generated
vendored
Normal file
253
vendor/github.com/minio/minio-go/v7/bucket-cache.go
generated
vendored
Normal file
@ -0,0 +1,253 @@
|
||||
/*
|
||||
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
|
||||
* Copyright 2015-2017 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package minio
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"sync"
|
||||
|
||||
"github.com/minio/minio-go/v7/pkg/credentials"
|
||||
"github.com/minio/minio-go/v7/pkg/s3utils"
|
||||
"github.com/minio/minio-go/v7/pkg/signer"
|
||||
)
|
||||
|
||||
// bucketLocationCache - Provides simple mechanism to hold bucket
|
||||
// locations in memory.
|
||||
type bucketLocationCache struct {
|
||||
// mutex is used for handling the concurrent
|
||||
// read/write requests for cache.
|
||||
sync.RWMutex
|
||||
|
||||
// items holds the cached bucket locations.
|
||||
items map[string]string
|
||||
}
|
||||
|
||||
// newBucketLocationCache - Provides a new bucket location cache to be
|
||||
// used internally with the client object.
|
||||
func newBucketLocationCache() *bucketLocationCache {
|
||||
return &bucketLocationCache{
|
||||
items: make(map[string]string),
|
||||
}
|
||||
}
|
||||
|
||||
// Get - Returns a value of a given key if it exists.
|
||||
func (r *bucketLocationCache) Get(bucketName string) (location string, ok bool) {
|
||||
r.RLock()
|
||||
defer r.RUnlock()
|
||||
location, ok = r.items[bucketName]
|
||||
return
|
||||
}
|
||||
|
||||
// Set - Will persist a value into cache.
|
||||
func (r *bucketLocationCache) Set(bucketName string, location string) {
|
||||
r.Lock()
|
||||
defer r.Unlock()
|
||||
r.items[bucketName] = location
|
||||
}
|
||||
|
||||
// Delete - Deletes a bucket name from cache.
|
||||
func (r *bucketLocationCache) Delete(bucketName string) {
|
||||
r.Lock()
|
||||
defer r.Unlock()
|
||||
delete(r.items, bucketName)
|
||||
}
|
||||
|
||||
// GetBucketLocation - get location for the bucket name from location cache, if not
|
||||
// fetch freshly by making a new request.
|
||||
func (c Client) GetBucketLocation(ctx context.Context, bucketName string) (string, error) {
|
||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return c.getBucketLocation(ctx, bucketName)
|
||||
}
|
||||
|
||||
// getBucketLocation - Get location for the bucketName from location map cache, if not
|
||||
// fetch freshly by making a new request.
|
||||
func (c Client) getBucketLocation(ctx context.Context, bucketName string) (string, error) {
|
||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Region set then no need to fetch bucket location.
|
||||
if c.region != "" {
|
||||
return c.region, nil
|
||||
}
|
||||
|
||||
if location, ok := c.bucketLocCache.Get(bucketName); ok {
|
||||
return location, nil
|
||||
}
|
||||
|
||||
// Initialize a new request.
|
||||
req, err := c.getBucketLocationRequest(ctx, bucketName)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Initiate the request.
|
||||
resp, err := c.do(req)
|
||||
defer closeResponse(resp)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
location, err := processBucketLocationResponse(resp, bucketName)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
c.bucketLocCache.Set(bucketName, location)
|
||||
return location, nil
|
||||
}
|
||||
|
||||
// processes the getBucketLocation http response from the server.
|
||||
func processBucketLocationResponse(resp *http.Response, bucketName string) (bucketLocation string, err error) {
|
||||
if resp != nil {
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
err = httpRespToErrorResponse(resp, bucketName, "")
|
||||
errResp := ToErrorResponse(err)
|
||||
// For access denied error, it could be an anonymous
|
||||
// request. Move forward and let the top level callers
|
||||
// succeed if possible based on their policy.
|
||||
switch errResp.Code {
|
||||
case "NotImplemented":
|
||||
if errResp.Server == "AmazonSnowball" {
|
||||
return "snowball", nil
|
||||
}
|
||||
case "AuthorizationHeaderMalformed":
|
||||
fallthrough
|
||||
case "InvalidRegion":
|
||||
fallthrough
|
||||
case "AccessDenied":
|
||||
if errResp.Region == "" {
|
||||
return "us-east-1", nil
|
||||
}
|
||||
return errResp.Region, nil
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
||||
// Extract location.
|
||||
var locationConstraint string
|
||||
err = xmlDecoder(resp.Body, &locationConstraint)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
location := locationConstraint
|
||||
// Location is empty will be 'us-east-1'.
|
||||
if location == "" {
|
||||
location = "us-east-1"
|
||||
}
|
||||
|
||||
// Location can be 'EU' convert it to meaningful 'eu-west-1'.
|
||||
if location == "EU" {
|
||||
location = "eu-west-1"
|
||||
}
|
||||
|
||||
// Save the location into cache.
|
||||
|
||||
// Return.
|
||||
return location, nil
|
||||
}
|
||||
|
||||
// getBucketLocationRequest - Wrapper creates a new getBucketLocation request.
|
||||
func (c Client) getBucketLocationRequest(ctx context.Context, bucketName string) (*http.Request, error) {
|
||||
// Set location query.
|
||||
urlValues := make(url.Values)
|
||||
urlValues.Set("location", "")
|
||||
|
||||
// Set get bucket location always as path style.
|
||||
targetURL := *c.endpointURL
|
||||
|
||||
// as it works in makeTargetURL method from api.go file
|
||||
if h, p, err := net.SplitHostPort(targetURL.Host); err == nil {
|
||||
if targetURL.Scheme == "http" && p == "80" || targetURL.Scheme == "https" && p == "443" {
|
||||
targetURL.Host = h
|
||||
}
|
||||
}
|
||||
|
||||
isVirtualHost := s3utils.IsVirtualHostSupported(targetURL, bucketName)
|
||||
|
||||
var urlStr string
|
||||
|
||||
//only support Aliyun OSS for virtual hosted path, compatible Amazon & Google Endpoint
|
||||
if isVirtualHost && s3utils.IsAliyunOSSEndpoint(targetURL) {
|
||||
urlStr = c.endpointURL.Scheme + "://" + bucketName + "." + targetURL.Host + "/?location"
|
||||
} else {
|
||||
targetURL.Path = path.Join(bucketName, "") + "/"
|
||||
targetURL.RawQuery = urlValues.Encode()
|
||||
urlStr = targetURL.String()
|
||||
}
|
||||
|
||||
// Get a new HTTP request for the method.
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, urlStr, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Set UserAgent for the request.
|
||||
c.setUserAgent(req)
|
||||
|
||||
// Get credentials from the configured credentials provider.
|
||||
value, err := c.credsProvider.Get()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var (
|
||||
signerType = value.SignerType
|
||||
accessKeyID = value.AccessKeyID
|
||||
secretAccessKey = value.SecretAccessKey
|
||||
sessionToken = value.SessionToken
|
||||
)
|
||||
|
||||
// Custom signer set then override the behavior.
|
||||
if c.overrideSignerType != credentials.SignatureDefault {
|
||||
signerType = c.overrideSignerType
|
||||
}
|
||||
|
||||
// If signerType returned by credentials helper is anonymous,
|
||||
// then do not sign regardless of signerType override.
|
||||
if value.SignerType == credentials.SignatureAnonymous {
|
||||
signerType = credentials.SignatureAnonymous
|
||||
}
|
||||
|
||||
if signerType.IsAnonymous() {
|
||||
return req, nil
|
||||
}
|
||||
|
||||
if signerType.IsV2() {
|
||||
// Get Bucket Location calls should be always path style
|
||||
isVirtualHost := false
|
||||
req = signer.SignV2(*req, accessKeyID, secretAccessKey, isVirtualHost)
|
||||
return req, nil
|
||||
}
|
||||
|
||||
// Set sha256 sum for signature calculation only with signature version '4'.
|
||||
contentSha256 := emptySHA256Hex
|
||||
if c.secure {
|
||||
contentSha256 = unsignedPayload
|
||||
}
|
||||
|
||||
req.Header.Set("X-Amz-Content-Sha256", contentSha256)
|
||||
req = signer.SignV4(*req, accessKeyID, secretAccessKey, sessionToken, "us-east-1")
|
||||
return req, nil
|
||||
}
|
80
vendor/github.com/minio/minio-go/v7/code_of_conduct.md
generated
vendored
Normal file
80
vendor/github.com/minio/minio-go/v7/code_of_conduct.md
generated
vendored
Normal file
@ -0,0 +1,80 @@
|
||||
# Contributor Covenant Code of Conduct
|
||||
|
||||
## Our Pledge
|
||||
|
||||
In the interest of fostering an open and welcoming environment, we as
|
||||
contributors and maintainers pledge to making participation in our project and
|
||||
our community a harassment-free experience for everyone, regardless of age, body
|
||||
size, disability, ethnicity, gender identity and expression, level of experience,
|
||||
nationality, personal appearance, race, religion, or sexual identity and
|
||||
orientation.
|
||||
|
||||
## Our Standards
|
||||
|
||||
Examples of behavior that contributes to creating a positive environment
|
||||
include:
|
||||
|
||||
* Using welcoming and inclusive language
|
||||
* Being respectful of differing viewpoints and experiences
|
||||
* Gracefully accepting constructive criticism
|
||||
* Focusing on what is best for the community
|
||||
* Showing empathy towards other community members
|
||||
|
||||
Examples of unacceptable behavior by participants include:
|
||||
|
||||
* The use of sexualized language or imagery and unwelcome sexual attention or
|
||||
advances
|
||||
* Trolling, insulting/derogatory comments, and personal or political attacks
|
||||
* Public or private harassment
|
||||
* Publishing others' private information, such as a physical or electronic
|
||||
address, without explicit permission
|
||||
* Other conduct which could reasonably be considered inappropriate in a
|
||||
professional setting
|
||||
|
||||
## Our Responsibilities
|
||||
|
||||
Project maintainers are responsible for clarifying the standards of acceptable
|
||||
behavior and are expected to take appropriate and fair corrective action in
|
||||
response to any instances of unacceptable behavior, in compliance with the
|
||||
licensing terms applying to the Project developments.
|
||||
|
||||
Project maintainers have the right and responsibility to remove, edit, or
|
||||
reject comments, commits, code, wiki edits, issues, and other contributions
|
||||
that are not aligned to this Code of Conduct, or to ban temporarily or
|
||||
permanently any contributor for other behaviors that they deem inappropriate,
|
||||
threatening, offensive, or harmful. However, these actions shall respect the
|
||||
licensing terms of the Project Developments that will always supersede such
|
||||
Code of Conduct.
|
||||
|
||||
## Scope
|
||||
|
||||
This Code of Conduct applies both within project spaces and in public spaces
|
||||
when an individual is representing the project or its community. Examples of
|
||||
representing a project or community include using an official project e-mail
|
||||
address, posting via an official social media account, or acting as an appointed
|
||||
representative at an online or offline event. Representation of a project may be
|
||||
further defined and clarified by project maintainers.
|
||||
|
||||
## Enforcement
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
||||
reported by contacting the project team at dev@min.io. The project team
|
||||
will review and investigate all complaints, and will respond in a way that it deems
|
||||
appropriate to the circumstances. The project team is obligated to maintain
|
||||
confidentiality with regard to the reporter of an incident.
|
||||
Further details of specific enforcement policies may be posted separately.
|
||||
|
||||
Project maintainers who do not follow or enforce the Code of Conduct in good
|
||||
faith may face temporary or permanent repercussions as determined by other
|
||||
members of the project's leadership.
|
||||
|
||||
## Attribution
|
||||
|
||||
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
|
||||
available at [http://contributor-covenant.org/version/1/4][version]
|
||||
|
||||
This version includes a clarification to ensure that the code of conduct is in
|
||||
compliance with the free software licensing terms of the project.
|
||||
|
||||
[homepage]: http://contributor-covenant.org
|
||||
[version]: http://contributor-covenant.org/version/1/4/
|
91
vendor/github.com/minio/minio-go/v7/constants.go
generated
vendored
Normal file
91
vendor/github.com/minio/minio-go/v7/constants.go
generated
vendored
Normal file
@ -0,0 +1,91 @@
|
||||
/*
|
||||
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
|
||||
* Copyright 2015-2017 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package minio
|
||||
|
||||
/// Multipart upload defaults.
|
||||
|
||||
// absMinPartSize - absolute minimum part size (5 MiB) below which
|
||||
// a part in a multipart upload may not be uploaded.
|
||||
const absMinPartSize = 1024 * 1024 * 5
|
||||
|
||||
// minPartSize - minimum part size 128MiB per object after which
|
||||
// putObject behaves internally as multipart.
|
||||
const minPartSize = 1024 * 1024 * 128
|
||||
|
||||
// maxPartsCount - maximum number of parts for a single multipart session.
|
||||
const maxPartsCount = 10000
|
||||
|
||||
// maxPartSize - maximum part size 5GiB for a single multipart upload
|
||||
// operation.
|
||||
const maxPartSize = 1024 * 1024 * 1024 * 5
|
||||
|
||||
// maxSinglePutObjectSize - maximum size 5GiB of object per PUT
|
||||
// operation.
|
||||
const maxSinglePutObjectSize = 1024 * 1024 * 1024 * 5
|
||||
|
||||
// maxMultipartPutObjectSize - maximum size 5TiB of object for
|
||||
// Multipart operation.
|
||||
const maxMultipartPutObjectSize = 1024 * 1024 * 1024 * 1024 * 5
|
||||
|
||||
// unsignedPayload - value to be set to X-Amz-Content-Sha256 header when
|
||||
// we don't want to sign the request payload
|
||||
const unsignedPayload = "UNSIGNED-PAYLOAD"
|
||||
|
||||
// Total number of parallel workers used for multipart operation.
|
||||
const totalWorkers = 4
|
||||
|
||||
// Signature related constants.
|
||||
const (
|
||||
signV4Algorithm = "AWS4-HMAC-SHA256"
|
||||
iso8601DateFormat = "20060102T150405Z"
|
||||
)
|
||||
|
||||
const (
|
||||
// Storage class header.
|
||||
amzStorageClass = "X-Amz-Storage-Class"
|
||||
|
||||
// Website redirect location header
|
||||
amzWebsiteRedirectLocation = "X-Amz-Website-Redirect-Location"
|
||||
|
||||
// Object Tagging headers
|
||||
amzTaggingHeader = "X-Amz-Tagging"
|
||||
amzTaggingHeaderDirective = "X-Amz-Tagging-Directive"
|
||||
|
||||
amzVersionID = "X-Amz-Version-Id"
|
||||
amzTaggingCount = "X-Amz-Tagging-Count"
|
||||
amzExpiration = "X-Amz-Expiration"
|
||||
amzReplicationStatus = "X-Amz-Replication-Status"
|
||||
amzDeleteMarker = "X-Amz-Delete-Marker"
|
||||
|
||||
// Object legal hold header
|
||||
amzLegalHoldHeader = "X-Amz-Object-Lock-Legal-Hold"
|
||||
|
||||
// Object retention header
|
||||
amzLockMode = "X-Amz-Object-Lock-Mode"
|
||||
amzLockRetainUntil = "X-Amz-Object-Lock-Retain-Until-Date"
|
||||
amzBypassGovernance = "X-Amz-Bypass-Governance-Retention"
|
||||
|
||||
// Replication status
|
||||
amzBucketReplicationStatus = "X-Amz-Replication-Status"
|
||||
// Minio specific Replication/lifecycle transition extension
|
||||
minIOBucketSourceMTime = "X-Minio-Source-Mtime"
|
||||
|
||||
minIOBucketSourceETag = "X-Minio-Source-Etag"
|
||||
minIOBucketReplicationDeleteMarker = "X-Minio-Source-DeleteMarker"
|
||||
minIOBucketReplicationProxyRequest = "X-Minio-Source-Proxy-Request"
|
||||
)
|
133
vendor/github.com/minio/minio-go/v7/core.go
generated
vendored
Normal file
133
vendor/github.com/minio/minio-go/v7/core.go
generated
vendored
Normal file
@ -0,0 +1,133 @@
|
||||
/*
|
||||
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
|
||||
* Copyright 2015-2017 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package minio
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
"github.com/minio/minio-go/v7/pkg/encrypt"
|
||||
)
|
||||
|
||||
// Core - Inherits Client and adds new methods to expose the low level S3 APIs.
|
||||
type Core struct {
|
||||
*Client
|
||||
}
|
||||
|
||||
// NewCore - Returns new initialized a Core client, this CoreClient should be
|
||||
// only used under special conditions such as need to access lower primitives
|
||||
// and being able to use them to write your own wrappers.
|
||||
func NewCore(endpoint string, opts *Options) (*Core, error) {
|
||||
var s3Client Core
|
||||
client, err := New(endpoint, opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s3Client.Client = client
|
||||
return &s3Client, nil
|
||||
}
|
||||
|
||||
// ListObjects - List all the objects at a prefix, optionally with marker and delimiter
|
||||
// you can further filter the results.
|
||||
func (c Core) ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (result ListBucketResult, err error) {
|
||||
return c.listObjectsQuery(context.Background(), bucket, prefix, marker, delimiter, maxKeys)
|
||||
}
|
||||
|
||||
// ListObjectsV2 - Lists all the objects at a prefix, similar to ListObjects() but uses
|
||||
// continuationToken instead of marker to support iteration over the results.
|
||||
func (c Core) ListObjectsV2(bucketName, objectPrefix, continuationToken string, fetchOwner bool, delimiter string, maxkeys int) (ListBucketV2Result, error) {
|
||||
return c.listObjectsV2Query(context.Background(), bucketName, objectPrefix, continuationToken, fetchOwner, false, delimiter, maxkeys)
|
||||
}
|
||||
|
||||
// CopyObject - copies an object from source object to destination object on server side.
|
||||
func (c Core) CopyObject(ctx context.Context, sourceBucket, sourceObject, destBucket, destObject string, metadata map[string]string, dstOpts PutObjectOptions) (ObjectInfo, error) {
|
||||
return c.copyObjectDo(ctx, sourceBucket, sourceObject, destBucket, destObject, metadata, dstOpts)
|
||||
}
|
||||
|
||||
// CopyObjectPart - creates a part in a multipart upload by copying (a
|
||||
// part of) an existing object.
|
||||
func (c Core) CopyObjectPart(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, uploadID string,
|
||||
partID int, startOffset, length int64, metadata map[string]string) (p CompletePart, err error) {
|
||||
|
||||
return c.copyObjectPartDo(ctx, srcBucket, srcObject, destBucket, destObject, uploadID,
|
||||
partID, startOffset, length, metadata)
|
||||
}
|
||||
|
||||
// PutObject - Upload object. Uploads using single PUT call.
|
||||
func (c Core) PutObject(ctx context.Context, bucket, object string, data io.Reader, size int64, md5Base64, sha256Hex string, opts PutObjectOptions) (UploadInfo, error) {
|
||||
hookReader := newHook(data, opts.Progress)
|
||||
return c.putObjectDo(ctx, bucket, object, hookReader, md5Base64, sha256Hex, size, opts)
|
||||
}
|
||||
|
||||
// NewMultipartUpload - Initiates new multipart upload and returns the new uploadID.
|
||||
func (c Core) NewMultipartUpload(ctx context.Context, bucket, object string, opts PutObjectOptions) (uploadID string, err error) {
|
||||
result, err := c.initiateMultipartUpload(ctx, bucket, object, opts)
|
||||
return result.UploadID, err
|
||||
}
|
||||
|
||||
// ListMultipartUploads - List incomplete uploads.
|
||||
func (c Core) ListMultipartUploads(ctx context.Context, bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (result ListMultipartUploadsResult, err error) {
|
||||
return c.listMultipartUploadsQuery(ctx, bucket, keyMarker, uploadIDMarker, prefix, delimiter, maxUploads)
|
||||
}
|
||||
|
||||
// PutObjectPart - Upload an object part.
|
||||
func (c Core) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data io.Reader, size int64, md5Base64, sha256Hex string, sse encrypt.ServerSide) (ObjectPart, error) {
|
||||
return c.uploadPart(ctx, bucket, object, uploadID, data, partID, md5Base64, sha256Hex, size, sse)
|
||||
}
|
||||
|
||||
// ListObjectParts - List uploaded parts of an incomplete upload.x
|
||||
func (c Core) ListObjectParts(ctx context.Context, bucket, object, uploadID string, partNumberMarker int, maxParts int) (result ListObjectPartsResult, err error) {
|
||||
return c.listObjectPartsQuery(ctx, bucket, object, uploadID, partNumberMarker, maxParts)
|
||||
}
|
||||
|
||||
// CompleteMultipartUpload - Concatenate uploaded parts and commit to an object.
|
||||
func (c Core) CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, parts []CompletePart) (string, error) {
|
||||
res, err := c.completeMultipartUpload(ctx, bucket, object, uploadID, completeMultipartUpload{
|
||||
Parts: parts,
|
||||
})
|
||||
return res.ETag, err
|
||||
}
|
||||
|
||||
// AbortMultipartUpload - Abort an incomplete upload.
|
||||
func (c Core) AbortMultipartUpload(ctx context.Context, bucket, object, uploadID string) error {
|
||||
return c.abortMultipartUpload(ctx, bucket, object, uploadID)
|
||||
}
|
||||
|
||||
// GetBucketPolicy - fetches bucket access policy for a given bucket.
|
||||
func (c Core) GetBucketPolicy(ctx context.Context, bucket string) (string, error) {
|
||||
return c.getBucketPolicy(ctx, bucket)
|
||||
}
|
||||
|
||||
// PutBucketPolicy - applies a new bucket access policy for a given bucket.
|
||||
func (c Core) PutBucketPolicy(ctx context.Context, bucket, bucketPolicy string) error {
|
||||
return c.putBucketPolicy(ctx, bucket, bucketPolicy)
|
||||
}
|
||||
|
||||
// GetObject is a lower level API implemented to support reading
|
||||
// partial objects and also downloading objects with special conditions
|
||||
// matching etag, modtime etc.
|
||||
func (c Core) GetObject(ctx context.Context, bucketName, objectName string, opts GetObjectOptions) (io.ReadCloser, ObjectInfo, http.Header, error) {
|
||||
return c.getObject(ctx, bucketName, objectName, opts)
|
||||
}
|
||||
|
||||
// StatObject is a lower level API implemented to support special
|
||||
// conditions matching etag, modtime on a request.
|
||||
func (c Core) StatObject(ctx context.Context, bucketName, objectName string, opts StatObjectOptions) (ObjectInfo, error) {
|
||||
return c.statObject(ctx, bucketName, objectName, opts)
|
||||
}
|
29
vendor/github.com/minio/minio-go/v7/go.mod
generated
vendored
Normal file
29
vendor/github.com/minio/minio-go/v7/go.mod
generated
vendored
Normal file
@ -0,0 +1,29 @@
|
||||
module github.com/minio/minio-go/v7
|
||||
|
||||
go 1.12
|
||||
|
||||
require (
|
||||
github.com/cheggaaa/pb v1.0.29 // indirect
|
||||
github.com/dustin/go-humanize v1.0.0 // indirect
|
||||
github.com/google/uuid v1.1.1
|
||||
github.com/json-iterator/go v1.1.10
|
||||
github.com/klauspost/cpuid v1.3.1 // indirect
|
||||
github.com/kr/pretty v0.1.0 // indirect
|
||||
github.com/minio/md5-simd v1.1.0
|
||||
github.com/minio/sha256-simd v0.1.1
|
||||
github.com/minio/sio v0.2.1 // indirect
|
||||
github.com/mitchellh/go-homedir v1.1.0
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.1 // indirect
|
||||
github.com/rs/xid v1.2.1
|
||||
github.com/sirupsen/logrus v1.7.0 // indirect
|
||||
github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a // indirect
|
||||
github.com/stretchr/testify v1.4.0 // indirect
|
||||
golang.org/x/crypto v0.0.0-20200709230013-948cd5f35899
|
||||
golang.org/x/net v0.0.0-20200707034311-ab3426394381
|
||||
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae // indirect
|
||||
golang.org/x/text v0.3.3 // indirect
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect
|
||||
gopkg.in/ini.v1 v1.57.0
|
||||
gopkg.in/yaml.v2 v2.2.8 // indirect
|
||||
)
|
97
vendor/github.com/minio/minio-go/v7/go.sum
generated
vendored
Normal file
97
vendor/github.com/minio/minio-go/v7/go.sum
generated
vendored
Normal file
@ -0,0 +1,97 @@
|
||||
github.com/cheggaaa/pb v1.0.29 h1:FckUN5ngEk2LpvuG0fw1GEFx6LtyY2pWI/Z2QgCnEYo=
|
||||
github.com/cheggaaa/pb v1.0.29/go.mod h1:W40334L7FMC5JKWldsTWbdGjLo0RxUKK73K+TuPxX30=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
|
||||
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s=
|
||||
github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
|
||||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68=
|
||||
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
|
||||
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||
github.com/klauspost/cpuid v1.2.3 h1:CCtW0xUnWGVINKvE/WWOYKdsPV6mawAtvQuSl8guwQs=
|
||||
github.com/klauspost/cpuid v1.2.3/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
|
||||
github.com/klauspost/cpuid v1.3.1 h1:5JNjFYYQrZeKRJ0734q51WCEEn2huer72Dc7K+R/b6s=
|
||||
github.com/klauspost/cpuid v1.3.1/go.mod h1:bYW4mA6ZgKPob1/Dlai2LviZJO7KGI3uoWLd42rAQw4=
|
||||
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/mattn/go-colorable v0.1.4 h1:snbPLB8fVfU9iwbbo30TPtbLRzwWu6aJS6Xh4eaaviA=
|
||||
github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
|
||||
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||
github.com/mattn/go-isatty v0.0.11 h1:FxPOTFNqGkuDUGi3H/qkUbQO4ZiBa2brKq5r0l8TGeM=
|
||||
github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE=
|
||||
github.com/mattn/go-runewidth v0.0.4 h1:2BvfKmzob6Bmd4YsL0zygOqfdFnK7GR4QL06Do4/p7Y=
|
||||
github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
||||
github.com/minio/md5-simd v1.1.0 h1:QPfiOqlZH+Cj9teu0t9b1nTBfPbyTl16Of5MeuShdK4=
|
||||
github.com/minio/md5-simd v1.1.0/go.mod h1:XpBqgZULrMYD3R+M28PcmP0CkI7PEMzB3U77ZrKZ0Gw=
|
||||
github.com/minio/sha256-simd v0.1.1 h1:5QHSlgo3nt5yKOJrC7W8w7X+NFl8cMPZm96iu8kKUJU=
|
||||
github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM=
|
||||
github.com/minio/sio v0.2.1 h1:NjzKiIMSMcHediVQR0AFVx2tp7Wxh9tKPfDI3kH7aHQ=
|
||||
github.com/minio/sio v0.2.1/go.mod h1:8b0yPp2avGThviy/+OCJBI6OMpvxoUuiLvE6F1lebhw=
|
||||
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
|
||||
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 h1:ZqeYNhU3OHLH3mGKHDcjJRFFRrJa6eAM5H+CtDdOsPc=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742 h1:Esafd1046DLDQ0W1YjYsBW+p8U2u7vzgW2SQVmlNazg=
|
||||
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
|
||||
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/rs/xid v1.2.1 h1:mhH9Nq+C1fY2l1XIpgxIiUOfNpRBYH1kKcr+qfKgjRc=
|
||||
github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ=
|
||||
github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM=
|
||||
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM=
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||
github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a h1:pa8hGb/2YqsZKovtsgrwcDH1RZhVbTKCjLp47XpqCDs=
|
||||
github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200709230013-948cd5f35899 h1:DZhuSZLsGlFL4CmhA8BcRA0mnthyA/nZ00AqCUo7vHg=
|
||||
golang.org/x/crypto v0.0.0-20200709230013-948cd5f35899/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20200707034311-ab3426394381 h1:VXak5I6aEWmAXeQjA+QSZzlgNrpq9mjcfDemuexIKsU=
|
||||
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d h1:+R4KGOnez64A81RvjARKc4UT5/tI9ujCIVX+P5KiHuI=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae h1:Ih9Yo4hSPImZOpfGuA4bR/ORKTAbhZo2AbWNRCnevdo=
|
||||
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/ini.v1 v1.57.0 h1:9unxIsFcTt4I55uWluz+UmL95q4kdJ0buvQ1ZIqVQww=
|
||||
gopkg.in/ini.v1 v1.57.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
|
||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
85
vendor/github.com/minio/minio-go/v7/hook-reader.go
generated
vendored
Normal file
85
vendor/github.com/minio/minio-go/v7/hook-reader.go
generated
vendored
Normal file
@ -0,0 +1,85 @@
|
||||
/*
|
||||
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
|
||||
* Copyright 2015-2017 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package minio
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
// hookReader hooks additional reader in the source stream. It is
|
||||
// useful for making progress bars. Second reader is appropriately
|
||||
// notified about the exact number of bytes read from the primary
|
||||
// source on each Read operation.
|
||||
type hookReader struct {
|
||||
source io.Reader
|
||||
hook io.Reader
|
||||
}
|
||||
|
||||
// Seek implements io.Seeker. Seeks source first, and if necessary
|
||||
// seeks hook if Seek method is appropriately found.
|
||||
func (hr *hookReader) Seek(offset int64, whence int) (n int64, err error) {
|
||||
// Verify for source has embedded Seeker, use it.
|
||||
sourceSeeker, ok := hr.source.(io.Seeker)
|
||||
if ok {
|
||||
n, err = sourceSeeker.Seek(offset, whence)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
||||
// Verify if hook has embedded Seeker, use it.
|
||||
hookSeeker, ok := hr.hook.(io.Seeker)
|
||||
if ok {
|
||||
var m int64
|
||||
m, err = hookSeeker.Seek(offset, whence)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if n != m {
|
||||
return 0, fmt.Errorf("hook seeker seeked %d bytes, expected source %d bytes", m, n)
|
||||
}
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// Read implements io.Reader. Always reads from the source, the return
|
||||
// value 'n' number of bytes are reported through the hook. Returns
|
||||
// error for all non io.EOF conditions.
|
||||
func (hr *hookReader) Read(b []byte) (n int, err error) {
|
||||
n, err = hr.source.Read(b)
|
||||
if err != nil && err != io.EOF {
|
||||
return n, err
|
||||
}
|
||||
// Progress the hook with the total read bytes from the source.
|
||||
if _, herr := hr.hook.Read(b[:n]); herr != nil {
|
||||
if herr != io.EOF {
|
||||
return n, herr
|
||||
}
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
// newHook returns a io.ReadSeeker which implements hookReader that
|
||||
// reports the data read from the source to the hook.
|
||||
func newHook(source, hook io.Reader) io.Reader {
|
||||
if hook == nil {
|
||||
return source
|
||||
}
|
||||
return &hookReader{source, hook}
|
||||
}
|
214
vendor/github.com/minio/minio-go/v7/pkg/credentials/assume_role.go
generated
vendored
Normal file
214
vendor/github.com/minio/minio-go/v7/pkg/credentials/assume_role.go
generated
vendored
Normal file
@ -0,0 +1,214 @@
|
||||
/*
|
||||
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
|
||||
* Copyright 2020 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package credentials
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio-go/v7/pkg/signer"
|
||||
sha256 "github.com/minio/sha256-simd"
|
||||
)
|
||||
|
||||
// AssumeRoleResponse contains the result of successful AssumeRole request.
|
||||
type AssumeRoleResponse struct {
|
||||
XMLName xml.Name `xml:"https://sts.amazonaws.com/doc/2011-06-15/ AssumeRoleResponse" json:"-"`
|
||||
|
||||
Result AssumeRoleResult `xml:"AssumeRoleResult"`
|
||||
ResponseMetadata struct {
|
||||
RequestID string `xml:"RequestId,omitempty"`
|
||||
} `xml:"ResponseMetadata,omitempty"`
|
||||
}
|
||||
|
||||
// AssumeRoleResult - Contains the response to a successful AssumeRole
|
||||
// request, including temporary credentials that can be used to make
|
||||
// MinIO API requests.
|
||||
type AssumeRoleResult struct {
|
||||
// The identifiers for the temporary security credentials that the operation
|
||||
// returns.
|
||||
AssumedRoleUser AssumedRoleUser `xml:",omitempty"`
|
||||
|
||||
// The temporary security credentials, which include an access key ID, a secret
|
||||
// access key, and a security (or session) token.
|
||||
//
|
||||
// Note: The size of the security token that STS APIs return is not fixed. We
|
||||
// strongly recommend that you make no assumptions about the maximum size. As
|
||||
// of this writing, the typical size is less than 4096 bytes, but that can vary.
|
||||
// Also, future updates to AWS might require larger sizes.
|
||||
Credentials struct {
|
||||
AccessKey string `xml:"AccessKeyId" json:"accessKey,omitempty"`
|
||||
SecretKey string `xml:"SecretAccessKey" json:"secretKey,omitempty"`
|
||||
Expiration time.Time `xml:"Expiration" json:"expiration,omitempty"`
|
||||
SessionToken string `xml:"SessionToken" json:"sessionToken,omitempty"`
|
||||
} `xml:",omitempty"`
|
||||
|
||||
// A percentage value that indicates the size of the policy in packed form.
|
||||
// The service rejects any policy with a packed size greater than 100 percent,
|
||||
// which means the policy exceeded the allowed space.
|
||||
PackedPolicySize int `xml:",omitempty"`
|
||||
}
|
||||
|
||||
// A STSAssumeRole retrieves credentials from MinIO service, and keeps track if
|
||||
// those credentials are expired.
|
||||
type STSAssumeRole struct {
|
||||
Expiry
|
||||
|
||||
// Required http Client to use when connecting to MinIO STS service.
|
||||
Client *http.Client
|
||||
|
||||
// STS endpoint to fetch STS credentials.
|
||||
STSEndpoint string
|
||||
|
||||
// various options for this request.
|
||||
Options STSAssumeRoleOptions
|
||||
}
|
||||
|
||||
// STSAssumeRoleOptions collection of various input options
|
||||
// to obtain AssumeRole credentials.
|
||||
type STSAssumeRoleOptions struct {
|
||||
// Mandatory inputs.
|
||||
AccessKey string
|
||||
SecretKey string
|
||||
|
||||
Location string // Optional commonly needed with AWS STS.
|
||||
DurationSeconds int // Optional defaults to 1 hour.
|
||||
|
||||
// Optional only valid if using with AWS STS
|
||||
RoleARN string
|
||||
RoleSessionName string
|
||||
}
|
||||
|
||||
// NewSTSAssumeRole returns a pointer to a new
|
||||
// Credentials object wrapping the STSAssumeRole.
|
||||
func NewSTSAssumeRole(stsEndpoint string, opts STSAssumeRoleOptions) (*Credentials, error) {
|
||||
if stsEndpoint == "" {
|
||||
return nil, errors.New("STS endpoint cannot be empty")
|
||||
}
|
||||
if opts.AccessKey == "" || opts.SecretKey == "" {
|
||||
return nil, errors.New("AssumeRole credentials access/secretkey is mandatory")
|
||||
}
|
||||
return New(&STSAssumeRole{
|
||||
Client: &http.Client{
|
||||
Transport: http.DefaultTransport,
|
||||
},
|
||||
STSEndpoint: stsEndpoint,
|
||||
Options: opts,
|
||||
}), nil
|
||||
}
|
||||
|
||||
const defaultDurationSeconds = 3600
|
||||
|
||||
// closeResponse close non nil response with any response Body.
|
||||
// convenient wrapper to drain any remaining data on response body.
|
||||
//
|
||||
// Subsequently this allows golang http RoundTripper
|
||||
// to re-use the same connection for future requests.
|
||||
func closeResponse(resp *http.Response) {
|
||||
// Callers should close resp.Body when done reading from it.
|
||||
// If resp.Body is not closed, the Client's underlying RoundTripper
|
||||
// (typically Transport) may not be able to re-use a persistent TCP
|
||||
// connection to the server for a subsequent "keep-alive" request.
|
||||
if resp != nil && resp.Body != nil {
|
||||
// Drain any remaining Body and then close the connection.
|
||||
// Without this closing connection would disallow re-using
|
||||
// the same connection for future uses.
|
||||
// - http://stackoverflow.com/a/17961593/4465767
|
||||
io.Copy(ioutil.Discard, resp.Body)
|
||||
resp.Body.Close()
|
||||
}
|
||||
}
|
||||
|
||||
func getAssumeRoleCredentials(clnt *http.Client, endpoint string, opts STSAssumeRoleOptions) (AssumeRoleResponse, error) {
|
||||
v := url.Values{}
|
||||
v.Set("Action", "AssumeRole")
|
||||
v.Set("Version", STSVersion)
|
||||
if opts.RoleARN != "" {
|
||||
v.Set("RoleArn", opts.RoleARN)
|
||||
}
|
||||
if opts.RoleSessionName != "" {
|
||||
v.Set("RoleSessionName", opts.RoleSessionName)
|
||||
}
|
||||
if opts.DurationSeconds > defaultDurationSeconds {
|
||||
v.Set("DurationSeconds", strconv.Itoa(opts.DurationSeconds))
|
||||
} else {
|
||||
v.Set("DurationSeconds", strconv.Itoa(defaultDurationSeconds))
|
||||
}
|
||||
|
||||
u, err := url.Parse(endpoint)
|
||||
if err != nil {
|
||||
return AssumeRoleResponse{}, err
|
||||
}
|
||||
u.Path = "/"
|
||||
|
||||
postBody := strings.NewReader(v.Encode())
|
||||
hash := sha256.New()
|
||||
if _, err = io.Copy(hash, postBody); err != nil {
|
||||
return AssumeRoleResponse{}, err
|
||||
}
|
||||
postBody.Seek(0, 0)
|
||||
|
||||
req, err := http.NewRequest(http.MethodPost, u.String(), postBody)
|
||||
if err != nil {
|
||||
return AssumeRoleResponse{}, err
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
|
||||
req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(hash.Sum(nil)))
|
||||
req = signer.SignV4STS(*req, opts.AccessKey, opts.SecretKey, opts.Location)
|
||||
|
||||
resp, err := clnt.Do(req)
|
||||
if err != nil {
|
||||
return AssumeRoleResponse{}, err
|
||||
}
|
||||
defer closeResponse(resp)
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return AssumeRoleResponse{}, errors.New(resp.Status)
|
||||
}
|
||||
|
||||
a := AssumeRoleResponse{}
|
||||
if err = xml.NewDecoder(resp.Body).Decode(&a); err != nil {
|
||||
return AssumeRoleResponse{}, err
|
||||
}
|
||||
return a, nil
|
||||
}
|
||||
|
||||
// Retrieve retrieves credentials from the MinIO service.
|
||||
// Error will be returned if the request fails.
|
||||
func (m *STSAssumeRole) Retrieve() (Value, error) {
|
||||
a, err := getAssumeRoleCredentials(m.Client, m.STSEndpoint, m.Options)
|
||||
if err != nil {
|
||||
return Value{}, err
|
||||
}
|
||||
|
||||
// Expiry window is set to 10secs.
|
||||
m.SetExpiration(a.Result.Credentials.Expiration, DefaultExpiryWindow)
|
||||
|
||||
return Value{
|
||||
AccessKeyID: a.Result.Credentials.AccessKey,
|
||||
SecretAccessKey: a.Result.Credentials.SecretKey,
|
||||
SessionToken: a.Result.Credentials.SessionToken,
|
||||
SignerType: SignatureV4,
|
||||
}, nil
|
||||
}
|
89
vendor/github.com/minio/minio-go/v7/pkg/credentials/chain.go
generated
vendored
Normal file
89
vendor/github.com/minio/minio-go/v7/pkg/credentials/chain.go
generated
vendored
Normal file
@ -0,0 +1,89 @@
|
||||
/*
|
||||
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
|
||||
* Copyright 2017 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package credentials
|
||||
|
||||
// A Chain will search for a provider which returns credentials
|
||||
// and cache that provider until Retrieve is called again.
|
||||
//
|
||||
// The Chain provides a way of chaining multiple providers together
|
||||
// which will pick the first available using priority order of the
|
||||
// Providers in the list.
|
||||
//
|
||||
// If none of the Providers retrieve valid credentials Value, ChainProvider's
|
||||
// Retrieve() will return the no credentials value.
|
||||
//
|
||||
// If a Provider is found which returns valid credentials Value ChainProvider
|
||||
// will cache that Provider for all calls to IsExpired(), until Retrieve is
|
||||
// called again after IsExpired() is true.
|
||||
//
|
||||
// creds := credentials.NewChainCredentials(
|
||||
// []credentials.Provider{
|
||||
// &credentials.EnvAWSS3{},
|
||||
// &credentials.EnvMinio{},
|
||||
// })
|
||||
//
|
||||
// // Usage of ChainCredentials.
|
||||
// mc, err := minio.NewWithCredentials(endpoint, creds, secure, "us-east-1")
|
||||
// if err != nil {
|
||||
// log.Fatalln(err)
|
||||
// }
|
||||
//
|
||||
type Chain struct {
|
||||
Providers []Provider
|
||||
curr Provider
|
||||
}
|
||||
|
||||
// NewChainCredentials returns a pointer to a new Credentials object
|
||||
// wrapping a chain of providers.
|
||||
func NewChainCredentials(providers []Provider) *Credentials {
|
||||
return New(&Chain{
|
||||
Providers: append([]Provider{}, providers...),
|
||||
})
|
||||
}
|
||||
|
||||
// Retrieve returns the credentials value, returns no credentials(anonymous)
|
||||
// if no credentials provider returned any value.
|
||||
//
|
||||
// If a provider is found with credentials, it will be cached and any calls
|
||||
// to IsExpired() will return the expired state of the cached provider.
|
||||
func (c *Chain) Retrieve() (Value, error) {
|
||||
for _, p := range c.Providers {
|
||||
creds, _ := p.Retrieve()
|
||||
// Always prioritize non-anonymous providers, if any.
|
||||
if creds.AccessKeyID == "" && creds.SecretAccessKey == "" {
|
||||
continue
|
||||
}
|
||||
c.curr = p
|
||||
return creds, nil
|
||||
}
|
||||
// At this point we have exhausted all the providers and
|
||||
// are left without any credentials return anonymous.
|
||||
return Value{
|
||||
SignerType: SignatureAnonymous,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// IsExpired will returned the expired state of the currently cached provider
|
||||
// if there is one. If there is no current provider, true will be returned.
|
||||
func (c *Chain) IsExpired() bool {
|
||||
if c.curr != nil {
|
||||
return c.curr.IsExpired()
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
17
vendor/github.com/minio/minio-go/v7/pkg/credentials/config.json.sample
generated
vendored
Normal file
17
vendor/github.com/minio/minio-go/v7/pkg/credentials/config.json.sample
generated
vendored
Normal file
@ -0,0 +1,17 @@
|
||||
{
|
||||
"version": "8",
|
||||
"hosts": {
|
||||
"play": {
|
||||
"url": "https://play.min.io",
|
||||
"accessKey": "Q3AM3UQ867SPQQA43P2F",
|
||||
"secretKey": "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG",
|
||||
"api": "S3v2"
|
||||
},
|
||||
"s3": {
|
||||
"url": "https://s3.amazonaws.com",
|
||||
"accessKey": "accessKey",
|
||||
"secretKey": "secret",
|
||||
"api": "S3v4"
|
||||
}
|
||||
}
|
||||
}
|
178
vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.go
generated
vendored
Normal file
178
vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.go
generated
vendored
Normal file
@ -0,0 +1,178 @@
|
||||
/*
|
||||
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
|
||||
* Copyright 2017 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package credentials
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// STSVersion sts version string
|
||||
const STSVersion = "2011-06-15"
|
||||
|
||||
// A Value is the AWS credentials value for individual credential fields.
|
||||
type Value struct {
|
||||
// AWS Access key ID
|
||||
AccessKeyID string
|
||||
|
||||
// AWS Secret Access Key
|
||||
SecretAccessKey string
|
||||
|
||||
// AWS Session Token
|
||||
SessionToken string
|
||||
|
||||
// Signature Type.
|
||||
SignerType SignatureType
|
||||
}
|
||||
|
||||
// A Provider is the interface for any component which will provide credentials
|
||||
// Value. A provider is required to manage its own Expired state, and what to
|
||||
// be expired means.
|
||||
type Provider interface {
|
||||
// Retrieve returns nil if it successfully retrieved the value.
|
||||
// Error is returned if the value were not obtainable, or empty.
|
||||
Retrieve() (Value, error)
|
||||
|
||||
// IsExpired returns if the credentials are no longer valid, and need
|
||||
// to be retrieved.
|
||||
IsExpired() bool
|
||||
}
|
||||
|
||||
// A Expiry provides shared expiration logic to be used by credentials
|
||||
// providers to implement expiry functionality.
|
||||
//
|
||||
// The best method to use this struct is as an anonymous field within the
|
||||
// provider's struct.
|
||||
//
|
||||
// Example:
|
||||
// type IAMCredentialProvider struct {
|
||||
// Expiry
|
||||
// ...
|
||||
// }
|
||||
type Expiry struct {
|
||||
// The date/time when to expire on
|
||||
expiration time.Time
|
||||
|
||||
// If set will be used by IsExpired to determine the current time.
|
||||
// Defaults to time.Now if CurrentTime is not set.
|
||||
CurrentTime func() time.Time
|
||||
}
|
||||
|
||||
// SetExpiration sets the expiration IsExpired will check when called.
|
||||
//
|
||||
// If window is greater than 0 the expiration time will be reduced by the
|
||||
// window value.
|
||||
//
|
||||
// Using a window is helpful to trigger credentials to expire sooner than
|
||||
// the expiration time given to ensure no requests are made with expired
|
||||
// tokens.
|
||||
func (e *Expiry) SetExpiration(expiration time.Time, window time.Duration) {
|
||||
e.expiration = expiration
|
||||
if window > 0 {
|
||||
e.expiration = e.expiration.Add(-window)
|
||||
}
|
||||
}
|
||||
|
||||
// IsExpired returns if the credentials are expired.
|
||||
func (e *Expiry) IsExpired() bool {
|
||||
if e.CurrentTime == nil {
|
||||
e.CurrentTime = time.Now
|
||||
}
|
||||
return e.expiration.Before(e.CurrentTime())
|
||||
}
|
||||
|
||||
// Credentials - A container for synchronous safe retrieval of credentials Value.
|
||||
// Credentials will cache the credentials value until they expire. Once the value
|
||||
// expires the next Get will attempt to retrieve valid credentials.
|
||||
//
|
||||
// Credentials is safe to use across multiple goroutines and will manage the
|
||||
// synchronous state so the Providers do not need to implement their own
|
||||
// synchronization.
|
||||
//
|
||||
// The first Credentials.Get() will always call Provider.Retrieve() to get the
|
||||
// first instance of the credentials Value. All calls to Get() after that
|
||||
// will return the cached credentials Value until IsExpired() returns true.
|
||||
type Credentials struct {
|
||||
sync.Mutex
|
||||
|
||||
creds Value
|
||||
forceRefresh bool
|
||||
provider Provider
|
||||
}
|
||||
|
||||
// New returns a pointer to a new Credentials with the provider set.
|
||||
func New(provider Provider) *Credentials {
|
||||
return &Credentials{
|
||||
provider: provider,
|
||||
forceRefresh: true,
|
||||
}
|
||||
}
|
||||
|
||||
// Get returns the credentials value, or error if the credentials Value failed
|
||||
// to be retrieved.
|
||||
//
|
||||
// Will return the cached credentials Value if it has not expired. If the
|
||||
// credentials Value has expired the Provider's Retrieve() will be called
|
||||
// to refresh the credentials.
|
||||
//
|
||||
// If Credentials.Expire() was called the credentials Value will be force
|
||||
// expired, and the next call to Get() will cause them to be refreshed.
|
||||
func (c *Credentials) Get() (Value, error) {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
|
||||
if c.isExpired() {
|
||||
creds, err := c.provider.Retrieve()
|
||||
if err != nil {
|
||||
return Value{}, err
|
||||
}
|
||||
c.creds = creds
|
||||
c.forceRefresh = false
|
||||
}
|
||||
|
||||
return c.creds, nil
|
||||
}
|
||||
|
||||
// Expire expires the credentials and forces them to be retrieved on the
|
||||
// next call to Get().
|
||||
//
|
||||
// This will override the Provider's expired state, and force Credentials
|
||||
// to call the Provider's Retrieve().
|
||||
func (c *Credentials) Expire() {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
|
||||
c.forceRefresh = true
|
||||
}
|
||||
|
||||
// IsExpired returns if the credentials are no longer valid, and need
|
||||
// to be refreshed.
|
||||
//
|
||||
// If the Credentials were forced to be expired with Expire() this will
|
||||
// reflect that override.
|
||||
func (c *Credentials) IsExpired() bool {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
|
||||
return c.isExpired()
|
||||
}
|
||||
|
||||
// isExpired helper method wrapping the definition of expired credentials.
|
||||
func (c *Credentials) isExpired() bool {
|
||||
return c.forceRefresh || c.provider.IsExpired()
|
||||
}
|
12
vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.sample
generated
vendored
Normal file
12
vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.sample
generated
vendored
Normal file
@ -0,0 +1,12 @@
|
||||
[default]
|
||||
aws_access_key_id = accessKey
|
||||
aws_secret_access_key = secret
|
||||
aws_session_token = token
|
||||
|
||||
[no_token]
|
||||
aws_access_key_id = accessKey
|
||||
aws_secret_access_key = secret
|
||||
|
||||
[with_colon]
|
||||
aws_access_key_id: accessKey
|
||||
aws_secret_access_key: secret
|
62
vendor/github.com/minio/minio-go/v7/pkg/credentials/doc.go
generated
vendored
Normal file
62
vendor/github.com/minio/minio-go/v7/pkg/credentials/doc.go
generated
vendored
Normal file
@ -0,0 +1,62 @@
|
||||
/*
|
||||
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
|
||||
* Copyright 2017 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
// Package credentials provides credential retrieval and management
|
||||
// for S3 compatible object storage.
|
||||
//
|
||||
// By default the Credentials.Get() will cache the successful result of a
|
||||
// Provider's Retrieve() until Provider.IsExpired() returns true. At which
|
||||
// point Credentials will call Provider's Retrieve() to get new credential Value.
|
||||
//
|
||||
// The Provider is responsible for determining when credentials have expired.
|
||||
// It is also important to note that Credentials will always call Retrieve the
|
||||
// first time Credentials.Get() is called.
|
||||
//
|
||||
// Example of using the environment variable credentials.
|
||||
//
|
||||
// creds := NewFromEnv()
|
||||
// // Retrieve the credentials value
|
||||
// credValue, err := creds.Get()
|
||||
// if err != nil {
|
||||
// // handle error
|
||||
// }
|
||||
//
|
||||
// Example of forcing credentials to expire and be refreshed on the next Get().
|
||||
// This may be helpful to proactively expire credentials and refresh them sooner
|
||||
// than they would naturally expire on their own.
|
||||
//
|
||||
// creds := NewFromIAM("")
|
||||
// creds.Expire()
|
||||
// credsValue, err := creds.Get()
|
||||
// // New credentials will be retrieved instead of from cache.
|
||||
//
|
||||
//
|
||||
// Custom Provider
|
||||
//
|
||||
// Each Provider built into this package also provides a helper method to generate
|
||||
// a Credentials pointer setup with the provider. To use a custom Provider just
|
||||
// create a type which satisfies the Provider interface and pass it to the
|
||||
// NewCredentials method.
|
||||
//
|
||||
// type MyProvider struct{}
|
||||
// func (m *MyProvider) Retrieve() (Value, error) {...}
|
||||
// func (m *MyProvider) IsExpired() bool {...}
|
||||
//
|
||||
// creds := NewCredentials(&MyProvider{})
|
||||
// credValue, err := creds.Get()
|
||||
//
|
||||
package credentials
|
71
vendor/github.com/minio/minio-go/v7/pkg/credentials/env_aws.go
generated
vendored
Normal file
71
vendor/github.com/minio/minio-go/v7/pkg/credentials/env_aws.go
generated
vendored
Normal file
@ -0,0 +1,71 @@
|
||||
/*
|
||||
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
|
||||
* Copyright 2017 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package credentials
|
||||
|
||||
import "os"
|
||||
|
||||
// A EnvAWS retrieves credentials from the environment variables of the
|
||||
// running process. EnvAWSironment credentials never expire.
|
||||
//
|
||||
// EnvAWSironment variables used:
|
||||
//
|
||||
// * Access Key ID: AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY.
|
||||
// * Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY.
|
||||
// * Secret Token: AWS_SESSION_TOKEN.
|
||||
type EnvAWS struct {
|
||||
retrieved bool
|
||||
}
|
||||
|
||||
// NewEnvAWS returns a pointer to a new Credentials object
|
||||
// wrapping the environment variable provider.
|
||||
func NewEnvAWS() *Credentials {
|
||||
return New(&EnvAWS{})
|
||||
}
|
||||
|
||||
// Retrieve retrieves the keys from the environment.
|
||||
func (e *EnvAWS) Retrieve() (Value, error) {
|
||||
e.retrieved = false
|
||||
|
||||
id := os.Getenv("AWS_ACCESS_KEY_ID")
|
||||
if id == "" {
|
||||
id = os.Getenv("AWS_ACCESS_KEY")
|
||||
}
|
||||
|
||||
secret := os.Getenv("AWS_SECRET_ACCESS_KEY")
|
||||
if secret == "" {
|
||||
secret = os.Getenv("AWS_SECRET_KEY")
|
||||
}
|
||||
|
||||
signerType := SignatureV4
|
||||
if id == "" || secret == "" {
|
||||
signerType = SignatureAnonymous
|
||||
}
|
||||
|
||||
e.retrieved = true
|
||||
return Value{
|
||||
AccessKeyID: id,
|
||||
SecretAccessKey: secret,
|
||||
SessionToken: os.Getenv("AWS_SESSION_TOKEN"),
|
||||
SignerType: signerType,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// IsExpired returns if the credentials have been retrieved.
|
||||
func (e *EnvAWS) IsExpired() bool {
|
||||
return !e.retrieved
|
||||
}
|
62
vendor/github.com/minio/minio-go/v7/pkg/credentials/env_minio.go
generated
vendored
Normal file
62
vendor/github.com/minio/minio-go/v7/pkg/credentials/env_minio.go
generated
vendored
Normal file
@ -0,0 +1,62 @@
|
||||
/*
|
||||
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
|
||||
* Copyright 2017 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package credentials
|
||||
|
||||
import "os"
|
||||
|
||||
// A EnvMinio retrieves credentials from the environment variables of the
|
||||
// running process. EnvMinioironment credentials never expire.
|
||||
//
|
||||
// EnvMinioironment variables used:
|
||||
//
|
||||
// * Access Key ID: MINIO_ACCESS_KEY.
|
||||
// * Secret Access Key: MINIO_SECRET_KEY.
|
||||
type EnvMinio struct {
|
||||
retrieved bool
|
||||
}
|
||||
|
||||
// NewEnvMinio returns a pointer to a new Credentials object
|
||||
// wrapping the environment variable provider.
|
||||
func NewEnvMinio() *Credentials {
|
||||
return New(&EnvMinio{})
|
||||
}
|
||||
|
||||
// Retrieve retrieves the keys from the environment.
|
||||
func (e *EnvMinio) Retrieve() (Value, error) {
|
||||
e.retrieved = false
|
||||
|
||||
id := os.Getenv("MINIO_ACCESS_KEY")
|
||||
secret := os.Getenv("MINIO_SECRET_KEY")
|
||||
|
||||
signerType := SignatureV4
|
||||
if id == "" || secret == "" {
|
||||
signerType = SignatureAnonymous
|
||||
}
|
||||
|
||||
e.retrieved = true
|
||||
return Value{
|
||||
AccessKeyID: id,
|
||||
SecretAccessKey: secret,
|
||||
SignerType: signerType,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// IsExpired returns if the credentials have been retrieved.
|
||||
func (e *EnvMinio) IsExpired() bool {
|
||||
return !e.retrieved
|
||||
}
|
120
vendor/github.com/minio/minio-go/v7/pkg/credentials/file_aws_credentials.go
generated
vendored
Normal file
120
vendor/github.com/minio/minio-go/v7/pkg/credentials/file_aws_credentials.go
generated
vendored
Normal file
@ -0,0 +1,120 @@
|
||||
/*
|
||||
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
|
||||
* Copyright 2017 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package credentials
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
homedir "github.com/mitchellh/go-homedir"
|
||||
ini "gopkg.in/ini.v1"
|
||||
)
|
||||
|
||||
// A FileAWSCredentials retrieves credentials from the current user's home
|
||||
// directory, and keeps track if those credentials are expired.
|
||||
//
|
||||
// Profile ini file example: $HOME/.aws/credentials
|
||||
type FileAWSCredentials struct {
|
||||
// Path to the shared credentials file.
|
||||
//
|
||||
// If empty will look for "AWS_SHARED_CREDENTIALS_FILE" env variable. If the
|
||||
// env value is empty will default to current user's home directory.
|
||||
// Linux/OSX: "$HOME/.aws/credentials"
|
||||
// Windows: "%USERPROFILE%\.aws\credentials"
|
||||
Filename string
|
||||
|
||||
// AWS Profile to extract credentials from the shared credentials file. If empty
|
||||
// will default to environment variable "AWS_PROFILE" or "default" if
|
||||
// environment variable is also not set.
|
||||
Profile string
|
||||
|
||||
// retrieved states if the credentials have been successfully retrieved.
|
||||
retrieved bool
|
||||
}
|
||||
|
||||
// NewFileAWSCredentials returns a pointer to a new Credentials object
|
||||
// wrapping the Profile file provider.
|
||||
func NewFileAWSCredentials(filename string, profile string) *Credentials {
|
||||
return New(&FileAWSCredentials{
|
||||
Filename: filename,
|
||||
Profile: profile,
|
||||
})
|
||||
}
|
||||
|
||||
// Retrieve reads and extracts the shared credentials from the current
|
||||
// users home directory.
|
||||
func (p *FileAWSCredentials) Retrieve() (Value, error) {
|
||||
if p.Filename == "" {
|
||||
p.Filename = os.Getenv("AWS_SHARED_CREDENTIALS_FILE")
|
||||
if p.Filename == "" {
|
||||
homeDir, err := homedir.Dir()
|
||||
if err != nil {
|
||||
return Value{}, err
|
||||
}
|
||||
p.Filename = filepath.Join(homeDir, ".aws", "credentials")
|
||||
}
|
||||
}
|
||||
if p.Profile == "" {
|
||||
p.Profile = os.Getenv("AWS_PROFILE")
|
||||
if p.Profile == "" {
|
||||
p.Profile = "default"
|
||||
}
|
||||
}
|
||||
|
||||
p.retrieved = false
|
||||
|
||||
iniProfile, err := loadProfile(p.Filename, p.Profile)
|
||||
if err != nil {
|
||||
return Value{}, err
|
||||
}
|
||||
|
||||
// Default to empty string if not found.
|
||||
id := iniProfile.Key("aws_access_key_id")
|
||||
// Default to empty string if not found.
|
||||
secret := iniProfile.Key("aws_secret_access_key")
|
||||
// Default to empty string if not found.
|
||||
token := iniProfile.Key("aws_session_token")
|
||||
|
||||
p.retrieved = true
|
||||
return Value{
|
||||
AccessKeyID: id.String(),
|
||||
SecretAccessKey: secret.String(),
|
||||
SessionToken: token.String(),
|
||||
SignerType: SignatureV4,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// IsExpired returns if the shared credentials have expired.
|
||||
func (p *FileAWSCredentials) IsExpired() bool {
|
||||
return !p.retrieved
|
||||
}
|
||||
|
||||
// loadProfiles loads from the file pointed to by shared credentials filename for profile.
|
||||
// The credentials retrieved from the profile will be returned or error. Error will be
|
||||
// returned if it fails to read from the file, or the data is invalid.
|
||||
func loadProfile(filename, profile string) (*ini.Section, error) {
|
||||
config, err := ini.Load(filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
iniProfile, err := config.GetSection(profile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return iniProfile, nil
|
||||
}
|
135
vendor/github.com/minio/minio-go/v7/pkg/credentials/file_minio_client.go
generated
vendored
Normal file
135
vendor/github.com/minio/minio-go/v7/pkg/credentials/file_minio_client.go
generated
vendored
Normal file
@ -0,0 +1,135 @@
|
||||
/*
|
||||
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
|
||||
* Copyright 2017 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package credentials
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
homedir "github.com/mitchellh/go-homedir"
|
||||
)
|
||||
|
||||
// A FileMinioClient retrieves credentials from the current user's home
|
||||
// directory, and keeps track if those credentials are expired.
|
||||
//
|
||||
// Configuration file example: $HOME/.mc/config.json
|
||||
type FileMinioClient struct {
|
||||
// Path to the shared credentials file.
|
||||
//
|
||||
// If empty will look for "MINIO_SHARED_CREDENTIALS_FILE" env variable. If the
|
||||
// env value is empty will default to current user's home directory.
|
||||
// Linux/OSX: "$HOME/.mc/config.json"
|
||||
// Windows: "%USERALIAS%\mc\config.json"
|
||||
Filename string
|
||||
|
||||
// MinIO Alias to extract credentials from the shared credentials file. If empty
|
||||
// will default to environment variable "MINIO_ALIAS" or "default" if
|
||||
// environment variable is also not set.
|
||||
Alias string
|
||||
|
||||
// retrieved states if the credentials have been successfully retrieved.
|
||||
retrieved bool
|
||||
}
|
||||
|
||||
// NewFileMinioClient returns a pointer to a new Credentials object
|
||||
// wrapping the Alias file provider.
|
||||
func NewFileMinioClient(filename string, alias string) *Credentials {
|
||||
return New(&FileMinioClient{
|
||||
Filename: filename,
|
||||
Alias: alias,
|
||||
})
|
||||
}
|
||||
|
||||
// Retrieve reads and extracts the shared credentials from the current
|
||||
// users home directory.
|
||||
func (p *FileMinioClient) Retrieve() (Value, error) {
|
||||
if p.Filename == "" {
|
||||
if value, ok := os.LookupEnv("MINIO_SHARED_CREDENTIALS_FILE"); ok {
|
||||
p.Filename = value
|
||||
} else {
|
||||
homeDir, err := homedir.Dir()
|
||||
if err != nil {
|
||||
return Value{}, err
|
||||
}
|
||||
p.Filename = filepath.Join(homeDir, ".mc", "config.json")
|
||||
if runtime.GOOS == "windows" {
|
||||
p.Filename = filepath.Join(homeDir, "mc", "config.json")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if p.Alias == "" {
|
||||
p.Alias = os.Getenv("MINIO_ALIAS")
|
||||
if p.Alias == "" {
|
||||
p.Alias = "s3"
|
||||
}
|
||||
}
|
||||
|
||||
p.retrieved = false
|
||||
|
||||
hostCfg, err := loadAlias(p.Filename, p.Alias)
|
||||
if err != nil {
|
||||
return Value{}, err
|
||||
}
|
||||
|
||||
p.retrieved = true
|
||||
return Value{
|
||||
AccessKeyID: hostCfg.AccessKey,
|
||||
SecretAccessKey: hostCfg.SecretKey,
|
||||
SignerType: parseSignatureType(hostCfg.API),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// IsExpired returns if the shared credentials have expired.
|
||||
func (p *FileMinioClient) IsExpired() bool {
|
||||
return !p.retrieved
|
||||
}
|
||||
|
||||
// hostConfig configuration of a host.
|
||||
type hostConfig struct {
|
||||
URL string `json:"url"`
|
||||
AccessKey string `json:"accessKey"`
|
||||
SecretKey string `json:"secretKey"`
|
||||
API string `json:"api"`
|
||||
}
|
||||
|
||||
// config config version.
|
||||
type config struct {
|
||||
Version string `json:"version"`
|
||||
Hosts map[string]hostConfig `json:"hosts"`
|
||||
}
|
||||
|
||||
// loadAliass loads from the file pointed to by shared credentials filename for alias.
|
||||
// The credentials retrieved from the alias will be returned or error. Error will be
|
||||
// returned if it fails to read from the file.
|
||||
func loadAlias(filename, alias string) (hostConfig, error) {
|
||||
cfg := &config{}
|
||||
var json = jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
|
||||
configBytes, err := ioutil.ReadFile(filename)
|
||||
if err != nil {
|
||||
return hostConfig{}, err
|
||||
}
|
||||
if err = json.Unmarshal(configBytes, cfg); err != nil {
|
||||
return hostConfig{}, err
|
||||
}
|
||||
return cfg.Hosts[alias], nil
|
||||
}
|
324
vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go
generated
vendored
Normal file
324
vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go
generated
vendored
Normal file
@ -0,0 +1,324 @@
|
||||
/*
|
||||
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
|
||||
* Copyright 2017 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package credentials
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"time"
|
||||
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
)
|
||||
|
||||
// DefaultExpiryWindow - Default expiry window.
|
||||
// ExpiryWindow will allow the credentials to trigger refreshing
|
||||
// prior to the credentials actually expiring. This is beneficial
|
||||
// so race conditions with expiring credentials do not cause
|
||||
// request to fail unexpectedly due to ExpiredTokenException exceptions.
|
||||
const DefaultExpiryWindow = time.Second * 10 // 10 secs
|
||||
|
||||
// A IAM retrieves credentials from the EC2 service, and keeps track if
|
||||
// those credentials are expired.
|
||||
type IAM struct {
|
||||
Expiry
|
||||
|
||||
// Required http Client to use when connecting to IAM metadata service.
|
||||
Client *http.Client
|
||||
|
||||
// Custom endpoint to fetch IAM role credentials.
|
||||
Endpoint string
|
||||
}
|
||||
|
||||
// IAM Roles for Amazon EC2
|
||||
// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
|
||||
const (
|
||||
defaultIAMRoleEndpoint = "http://169.254.169.254"
|
||||
defaultECSRoleEndpoint = "http://169.254.170.2"
|
||||
defaultSTSRoleEndpoint = "https://sts.amazonaws.com"
|
||||
defaultIAMSecurityCredsPath = "/latest/meta-data/iam/security-credentials/"
|
||||
)
|
||||
|
||||
// NewIAM returns a pointer to a new Credentials object wrapping the IAM.
|
||||
func NewIAM(endpoint string) *Credentials {
|
||||
return New(&IAM{
|
||||
Client: &http.Client{
|
||||
Transport: http.DefaultTransport,
|
||||
},
|
||||
Endpoint: endpoint,
|
||||
})
|
||||
}
|
||||
|
||||
// Retrieve retrieves credentials from the EC2 service.
|
||||
// Error will be returned if the request fails, or unable to extract
|
||||
// the desired
|
||||
func (m *IAM) Retrieve() (Value, error) {
|
||||
var roleCreds ec2RoleCredRespBody
|
||||
var err error
|
||||
|
||||
endpoint := m.Endpoint
|
||||
switch {
|
||||
case len(os.Getenv("AWS_WEB_IDENTITY_TOKEN_FILE")) > 0:
|
||||
if len(endpoint) == 0 {
|
||||
if len(os.Getenv("AWS_REGION")) > 0 {
|
||||
endpoint = "https://sts." + os.Getenv("AWS_REGION") + ".amazonaws.com"
|
||||
} else {
|
||||
endpoint = defaultSTSRoleEndpoint
|
||||
}
|
||||
}
|
||||
|
||||
creds := &STSWebIdentity{
|
||||
Client: m.Client,
|
||||
STSEndpoint: endpoint,
|
||||
GetWebIDTokenExpiry: func() (*WebIdentityToken, error) {
|
||||
token, err := ioutil.ReadFile(os.Getenv("AWS_WEB_IDENTITY_TOKEN_FILE"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &WebIdentityToken{Token: string(token)}, nil
|
||||
},
|
||||
roleARN: os.Getenv("AWS_ROLE_ARN"),
|
||||
roleSessionName: os.Getenv("AWS_ROLE_SESSION_NAME"),
|
||||
}
|
||||
|
||||
stsWebIdentityCreds, err := creds.Retrieve()
|
||||
if err == nil {
|
||||
m.SetExpiration(creds.Expiration(), DefaultExpiryWindow)
|
||||
}
|
||||
return stsWebIdentityCreds, err
|
||||
|
||||
case len(os.Getenv("AWS_CONTAINER_CREDENTIALS_RELATIVE_URI")) > 0:
|
||||
if len(endpoint) == 0 {
|
||||
endpoint = fmt.Sprintf("%s%s", defaultECSRoleEndpoint,
|
||||
os.Getenv("AWS_CONTAINER_CREDENTIALS_RELATIVE_URI"))
|
||||
}
|
||||
|
||||
roleCreds, err = getEcsTaskCredentials(m.Client, endpoint)
|
||||
|
||||
case len(os.Getenv("AWS_CONTAINER_CREDENTIALS_FULL_URI")) > 0:
|
||||
if len(endpoint) == 0 {
|
||||
endpoint = os.Getenv("AWS_CONTAINER_CREDENTIALS_FULL_URI")
|
||||
var ok bool
|
||||
if ok, err = isLoopback(endpoint); !ok {
|
||||
if err == nil {
|
||||
err = fmt.Errorf("uri host is not a loopback address: %s", endpoint)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
roleCreds, err = getEcsTaskCredentials(m.Client, endpoint)
|
||||
|
||||
default:
|
||||
roleCreds, err = getCredentials(m.Client, endpoint)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return Value{}, err
|
||||
}
|
||||
// Expiry window is set to 10secs.
|
||||
m.SetExpiration(roleCreds.Expiration, DefaultExpiryWindow)
|
||||
|
||||
return Value{
|
||||
AccessKeyID: roleCreds.AccessKeyID,
|
||||
SecretAccessKey: roleCreds.SecretAccessKey,
|
||||
SessionToken: roleCreds.Token,
|
||||
SignerType: SignatureV4,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// A ec2RoleCredRespBody provides the shape for unmarshaling credential
|
||||
// request responses.
|
||||
type ec2RoleCredRespBody struct {
|
||||
// Success State
|
||||
Expiration time.Time
|
||||
AccessKeyID string
|
||||
SecretAccessKey string
|
||||
Token string
|
||||
|
||||
// Error state
|
||||
Code string
|
||||
Message string
|
||||
|
||||
// Unused params.
|
||||
LastUpdated time.Time
|
||||
Type string
|
||||
}
|
||||
|
||||
// Get the final IAM role URL where the request will
|
||||
// be sent to fetch the rolling access credentials.
|
||||
// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
|
||||
func getIAMRoleURL(endpoint string) (*url.URL, error) {
|
||||
if endpoint == "" {
|
||||
endpoint = defaultIAMRoleEndpoint
|
||||
}
|
||||
|
||||
u, err := url.Parse(endpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
u.Path = defaultIAMSecurityCredsPath
|
||||
return u, nil
|
||||
}
|
||||
|
||||
// listRoleNames lists of credential role names associated
|
||||
// with the current EC2 service. If there are no credentials,
|
||||
// or there is an error making or receiving the request.
|
||||
// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
|
||||
func listRoleNames(client *http.Client, u *url.URL) ([]string, error) {
|
||||
req, err := http.NewRequest(http.MethodGet, u.String(), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return nil, errors.New(resp.Status)
|
||||
}
|
||||
|
||||
credsList := []string{}
|
||||
s := bufio.NewScanner(resp.Body)
|
||||
for s.Scan() {
|
||||
credsList = append(credsList, s.Text())
|
||||
}
|
||||
|
||||
if err := s.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return credsList, nil
|
||||
}
|
||||
|
||||
func getEcsTaskCredentials(client *http.Client, endpoint string) (ec2RoleCredRespBody, error) {
|
||||
req, err := http.NewRequest(http.MethodGet, endpoint, nil)
|
||||
if err != nil {
|
||||
return ec2RoleCredRespBody{}, err
|
||||
}
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return ec2RoleCredRespBody{}, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return ec2RoleCredRespBody{}, errors.New(resp.Status)
|
||||
}
|
||||
|
||||
respCreds := ec2RoleCredRespBody{}
|
||||
if err := jsoniter.NewDecoder(resp.Body).Decode(&respCreds); err != nil {
|
||||
return ec2RoleCredRespBody{}, err
|
||||
}
|
||||
|
||||
return respCreds, nil
|
||||
}
|
||||
|
||||
// getCredentials - obtains the credentials from the IAM role name associated with
|
||||
// the current EC2 service.
|
||||
//
|
||||
// If the credentials cannot be found, or there is an error
|
||||
// reading the response an error will be returned.
|
||||
func getCredentials(client *http.Client, endpoint string) (ec2RoleCredRespBody, error) {
|
||||
|
||||
// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
|
||||
u, err := getIAMRoleURL(endpoint)
|
||||
if err != nil {
|
||||
return ec2RoleCredRespBody{}, err
|
||||
}
|
||||
|
||||
// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
|
||||
roleNames, err := listRoleNames(client, u)
|
||||
if err != nil {
|
||||
return ec2RoleCredRespBody{}, err
|
||||
}
|
||||
|
||||
if len(roleNames) == 0 {
|
||||
return ec2RoleCredRespBody{}, errors.New("No IAM roles attached to this EC2 service")
|
||||
}
|
||||
|
||||
// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
|
||||
// - An instance profile can contain only one IAM role. This limit cannot be increased.
|
||||
roleName := roleNames[0]
|
||||
|
||||
// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
|
||||
// The following command retrieves the security credentials for an
|
||||
// IAM role named `s3access`.
|
||||
//
|
||||
// $ curl http://169.254.169.254/latest/meta-data/iam/security-credentials/s3access
|
||||
//
|
||||
u.Path = path.Join(u.Path, roleName)
|
||||
req, err := http.NewRequest(http.MethodGet, u.String(), nil)
|
||||
if err != nil {
|
||||
return ec2RoleCredRespBody{}, err
|
||||
}
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return ec2RoleCredRespBody{}, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return ec2RoleCredRespBody{}, errors.New(resp.Status)
|
||||
}
|
||||
|
||||
respCreds := ec2RoleCredRespBody{}
|
||||
if err := jsoniter.NewDecoder(resp.Body).Decode(&respCreds); err != nil {
|
||||
return ec2RoleCredRespBody{}, err
|
||||
}
|
||||
|
||||
if respCreds.Code != "Success" {
|
||||
// If an error code was returned something failed requesting the role.
|
||||
return ec2RoleCredRespBody{}, errors.New(respCreds.Message)
|
||||
}
|
||||
|
||||
return respCreds, nil
|
||||
}
|
||||
|
||||
// isLoopback identifies if a uri's host is on a loopback address
|
||||
func isLoopback(uri string) (bool, error) {
|
||||
u, err := url.Parse(uri)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
host := u.Hostname()
|
||||
if len(host) == 0 {
|
||||
return false, fmt.Errorf("can't parse host from uri: %s", uri)
|
||||
}
|
||||
|
||||
ips, err := net.LookupHost(host)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
for _, ip := range ips {
|
||||
if !net.ParseIP(ip).IsLoopback() {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user